summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--COPYRIGHT.txt4
-rw-r--r--doc/classes/JavaScript.xml16
-rw-r--r--modules/raycast/SCsub18
-rw-r--r--modules/raycast/config.py3
-rw-r--r--modules/raycast/godot_update_embree.py17
-rw-r--r--platform/javascript/SCsub2
-rw-r--r--platform/javascript/api/api.cpp5
-rw-r--r--platform/javascript/api/javascript_singleton.h1
-rw-r--r--platform/javascript/api/javascript_tools_editor_plugin.cpp9
-rw-r--r--platform/javascript/dom_keys.inc4
-rw-r--r--platform/javascript/javascript_singleton.cpp5
-rw-r--r--platform/javascript/js/libs/library_godot_editor_tools.js57
-rw-r--r--platform/javascript/js/libs/library_godot_os.js17
-rw-r--r--thirdparty/README.md8
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_filter.cpp56
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_for.cpp48
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_for_for.cpp63
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_for_for_prefix_sum.cpp85
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_map.cpp47
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_partition.cpp53
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_prefix_sum.cpp48
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_reduce.cpp49
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_set.cpp43
-rw-r--r--thirdparty/embree-aarch64/common/algorithms/parallel_sort.cpp50
-rw-r--r--thirdparty/embree-aarch64/common/math/AVX2NEON.h986
-rw-r--r--thirdparty/embree-aarch64/common/math/SSE2NEON.h1753
-rw-r--r--thirdparty/embree-aarch64/common/math/constants.cpp61
-rw-r--r--thirdparty/embree-aarch64/common/tasking/taskschedulergcd.h49
-rw-r--r--thirdparty/embree-aarch64/kernels/builders/primrefgen.h28
-rw-r--r--thirdparty/embree-aarch64/kernels/common/instance_stack.h199
-rw-r--r--thirdparty/embree-aarch64/kernels/common/scene_curves.h341
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bezier_curve.h21
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bspline_curve.h21
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_catmullrom_curve.h21
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_hermite_curve.h21
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_linear_curve.h21
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_point.h22
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_moeller.h493
-rw-r--r--thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_pluecker.h508
-rw-r--r--thirdparty/embree/common/algorithms/parallel_any_of.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_any_of.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_filter.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_filter.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_for.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_for.h)73
-rw-r--r--thirdparty/embree/common/algorithms/parallel_for_for.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_for_for.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_for_for_prefix_sum.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_for_for_prefix_sum.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_map.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_map.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_partition.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_partition.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_prefix_sum.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_prefix_sum.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_reduce.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_reduce.h)4
-rw-r--r--thirdparty/embree/common/algorithms/parallel_set.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_set.h)2
-rw-r--r--thirdparty/embree/common/algorithms/parallel_sort.h (renamed from thirdparty/embree-aarch64/common/algorithms/parallel_sort.h)9
-rw-r--r--thirdparty/embree/common/lexers/parsestream.h (renamed from thirdparty/embree-aarch64/common/lexers/parsestream.h)2
-rw-r--r--thirdparty/embree/common/lexers/stream.h (renamed from thirdparty/embree-aarch64/common/lexers/stream.h)2
-rw-r--r--thirdparty/embree/common/lexers/streamfilters.h (renamed from thirdparty/embree-aarch64/common/lexers/streamfilters.h)2
-rw-r--r--thirdparty/embree/common/lexers/stringstream.cpp (renamed from thirdparty/embree-aarch64/common/lexers/stringstream.cpp)2
-rw-r--r--thirdparty/embree/common/lexers/stringstream.h (renamed from thirdparty/embree-aarch64/common/lexers/stringstream.h)2
-rw-r--r--thirdparty/embree/common/lexers/tokenstream.cpp (renamed from thirdparty/embree-aarch64/common/lexers/tokenstream.cpp)2
-rw-r--r--thirdparty/embree/common/lexers/tokenstream.h (renamed from thirdparty/embree-aarch64/common/lexers/tokenstream.h)2
-rw-r--r--thirdparty/embree/common/math/affinespace.h (renamed from thirdparty/embree-aarch64/common/math/affinespace.h)2
-rw-r--r--thirdparty/embree/common/math/bbox.h (renamed from thirdparty/embree-aarch64/common/math/bbox.h)8
-rw-r--r--thirdparty/embree/common/math/col3.h (renamed from thirdparty/embree-aarch64/common/math/col3.h)4
-rw-r--r--thirdparty/embree/common/math/col4.h (renamed from thirdparty/embree-aarch64/common/math/col4.h)4
-rw-r--r--thirdparty/embree/common/math/color.h (renamed from thirdparty/embree-aarch64/common/math/color.h)44
-rw-r--r--thirdparty/embree/common/math/constants.cpp27
-rw-r--r--thirdparty/embree/common/math/constants.h (renamed from thirdparty/embree-aarch64/common/math/constants.h)60
-rw-r--r--thirdparty/embree/common/math/interval.h (renamed from thirdparty/embree-aarch64/common/math/interval.h)2
-rw-r--r--thirdparty/embree/common/math/lbbox.h (renamed from thirdparty/embree-aarch64/common/math/lbbox.h)2
-rw-r--r--thirdparty/embree/common/math/linearspace2.h (renamed from thirdparty/embree-aarch64/common/math/linearspace2.h)2
-rw-r--r--thirdparty/embree/common/math/linearspace3.h (renamed from thirdparty/embree-aarch64/common/math/linearspace3.h)2
-rw-r--r--thirdparty/embree/common/math/math.h (renamed from thirdparty/embree-aarch64/common/math/math.h)140
-rw-r--r--thirdparty/embree/common/math/obbox.h (renamed from thirdparty/embree-aarch64/common/math/obbox.h)2
-rw-r--r--thirdparty/embree/common/math/quaternion.h (renamed from thirdparty/embree-aarch64/common/math/quaternion.h)2
-rw-r--r--thirdparty/embree/common/math/range.h (renamed from thirdparty/embree-aarch64/common/math/range.h)2
-rw-r--r--thirdparty/embree/common/math/transcendental.h (renamed from thirdparty/embree-aarch64/common/math/transcendental.h)4
-rw-r--r--thirdparty/embree/common/math/vec2.h (renamed from thirdparty/embree-aarch64/common/math/vec2.h)8
-rw-r--r--thirdparty/embree/common/math/vec2fa.h (renamed from thirdparty/embree-aarch64/common/math/vec2fa.h)28
-rw-r--r--thirdparty/embree/common/math/vec3.h (renamed from thirdparty/embree-aarch64/common/math/vec3.h)26
-rw-r--r--thirdparty/embree/common/math/vec3ba.h (renamed from thirdparty/embree-aarch64/common/math/vec3ba.h)2
-rw-r--r--thirdparty/embree/common/math/vec3fa.h (renamed from thirdparty/embree-aarch64/common/math/vec3fa.h)129
-rw-r--r--thirdparty/embree/common/math/vec3ia.h (renamed from thirdparty/embree-aarch64/common/math/vec3ia.h)40
-rw-r--r--thirdparty/embree/common/math/vec4.h (renamed from thirdparty/embree-aarch64/common/math/vec4.h)23
-rw-r--r--thirdparty/embree/common/simd/arm/emulation.h50
-rw-r--r--thirdparty/embree/common/simd/arm/sse2neon.h6996
-rw-r--r--thirdparty/embree/common/simd/avx.h (renamed from thirdparty/embree-aarch64/common/simd/avx.h)2
-rw-r--r--thirdparty/embree/common/simd/avx512.h (renamed from thirdparty/embree-aarch64/common/simd/avx512.h)2
-rw-r--r--thirdparty/embree/common/simd/simd.h (renamed from thirdparty/embree-aarch64/common/simd/simd.h)4
-rw-r--r--thirdparty/embree/common/simd/sse.cpp (renamed from thirdparty/embree-aarch64/common/simd/sse.cpp)2
-rw-r--r--thirdparty/embree/common/simd/sse.h (renamed from thirdparty/embree-aarch64/common/simd/sse.h)4
-rw-r--r--thirdparty/embree/common/simd/varying.h (renamed from thirdparty/embree-aarch64/common/simd/varying.h)71
-rw-r--r--thirdparty/embree/common/simd/vboold4_avx.h (renamed from thirdparty/embree-aarch64/common/simd/vboold4_avx.h)29
-rw-r--r--thirdparty/embree/common/simd/vboold4_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vboold4_avx512.h)18
-rw-r--r--thirdparty/embree/common/simd/vboold8_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vboold8_avx512.h)31
-rw-r--r--thirdparty/embree/common/simd/vboolf16_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vboolf16_avx512.h)31
-rw-r--r--thirdparty/embree/common/simd/vboolf4_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vboolf4_avx512.h)18
-rw-r--r--thirdparty/embree/common/simd/vboolf4_sse2.h (renamed from thirdparty/embree-aarch64/common/simd/vboolf4_sse2.h)51
-rw-r--r--thirdparty/embree/common/simd/vboolf8_avx.h (renamed from thirdparty/embree-aarch64/common/simd/vboolf8_avx.h)23
-rw-r--r--thirdparty/embree/common/simd/vboolf8_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vboolf8_avx512.h)18
-rw-r--r--thirdparty/embree/common/simd/vdouble4_avx.h (renamed from thirdparty/embree-aarch64/common/simd/vdouble4_avx.h)39
-rw-r--r--thirdparty/embree/common/simd/vdouble8_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vdouble8_avx512.h)39
-rw-r--r--thirdparty/embree/common/simd/vfloat16_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vfloat16_avx512.h)206
-rw-r--r--thirdparty/embree/common/simd/vfloat4_sse2.h (renamed from thirdparty/embree-aarch64/common/simd/vfloat4_sse2.h)425
-rw-r--r--thirdparty/embree/common/simd/vfloat8_avx.h (renamed from thirdparty/embree-aarch64/common/simd/vfloat8_avx.h)231
-rw-r--r--thirdparty/embree/common/simd/vint16_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vint16_avx512.h)56
-rw-r--r--thirdparty/embree/common/simd/vint4_sse2.h (renamed from thirdparty/embree-aarch64/common/simd/vint4_sse2.h)243
-rw-r--r--thirdparty/embree/common/simd/vint8_avx.h (renamed from thirdparty/embree-aarch64/common/simd/vint8_avx.h)98
-rw-r--r--thirdparty/embree/common/simd/vint8_avx2.h (renamed from thirdparty/embree-aarch64/common/simd/vint8_avx2.h)68
-rw-r--r--thirdparty/embree/common/simd/vllong4_avx2.h (renamed from thirdparty/embree-aarch64/common/simd/vllong4_avx2.h)40
-rw-r--r--thirdparty/embree/common/simd/vllong8_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vllong8_avx512.h)59
-rw-r--r--thirdparty/embree/common/simd/vuint16_avx512.h (renamed from thirdparty/embree-aarch64/common/simd/vuint16_avx512.h)55
-rw-r--r--thirdparty/embree/common/simd/vuint4_sse2.h (renamed from thirdparty/embree-aarch64/common/simd/vuint4_sse2.h)149
-rw-r--r--thirdparty/embree/common/simd/vuint8_avx.h (renamed from thirdparty/embree-aarch64/common/simd/vuint8_avx.h)99
-rw-r--r--thirdparty/embree/common/simd/vuint8_avx2.h (renamed from thirdparty/embree-aarch64/common/simd/vuint8_avx2.h)67
-rw-r--r--thirdparty/embree/common/sys/alloc.cpp (renamed from thirdparty/embree-aarch64/common/sys/alloc.cpp)4
-rw-r--r--thirdparty/embree/common/sys/alloc.h (renamed from thirdparty/embree-aarch64/common/sys/alloc.h)2
-rw-r--r--thirdparty/embree/common/sys/array.h (renamed from thirdparty/embree-aarch64/common/sys/array.h)6
-rw-r--r--thirdparty/embree/common/sys/atomic.h (renamed from thirdparty/embree-aarch64/common/sys/atomic.h)2
-rw-r--r--thirdparty/embree/common/sys/barrier.cpp (renamed from thirdparty/embree-aarch64/common/sys/barrier.cpp)2
-rw-r--r--thirdparty/embree/common/sys/barrier.h (renamed from thirdparty/embree-aarch64/common/sys/barrier.h)2
-rw-r--r--thirdparty/embree/common/sys/condition.cpp (renamed from thirdparty/embree-aarch64/common/sys/condition.cpp)16
-rw-r--r--thirdparty/embree/common/sys/condition.h (renamed from thirdparty/embree-aarch64/common/sys/condition.h)2
-rw-r--r--thirdparty/embree/common/sys/filename.cpp (renamed from thirdparty/embree-aarch64/common/sys/filename.cpp)2
-rw-r--r--thirdparty/embree/common/sys/filename.h (renamed from thirdparty/embree-aarch64/common/sys/filename.h)4
-rw-r--r--thirdparty/embree/common/sys/intrinsics.h (renamed from thirdparty/embree-aarch64/common/sys/intrinsics.h)346
-rw-r--r--thirdparty/embree/common/sys/library.cpp (renamed from thirdparty/embree-aarch64/common/sys/library.cpp)6
-rw-r--r--thirdparty/embree/common/sys/library.h (renamed from thirdparty/embree-aarch64/common/sys/library.h)2
-rw-r--r--thirdparty/embree/common/sys/mutex.cpp (renamed from thirdparty/embree-aarch64/common/sys/mutex.cpp)3
-rw-r--r--thirdparty/embree/common/sys/mutex.h (renamed from thirdparty/embree-aarch64/common/sys/mutex.h)2
-rw-r--r--thirdparty/embree/common/sys/platform.h (renamed from thirdparty/embree-aarch64/common/sys/platform.h)31
-rw-r--r--thirdparty/embree/common/sys/ref.h (renamed from thirdparty/embree-aarch64/common/sys/ref.h)2
-rw-r--r--thirdparty/embree/common/sys/regression.cpp (renamed from thirdparty/embree-aarch64/common/sys/regression.cpp)2
-rw-r--r--thirdparty/embree/common/sys/regression.h (renamed from thirdparty/embree-aarch64/common/sys/regression.h)2
-rw-r--r--thirdparty/embree/common/sys/string.cpp (renamed from thirdparty/embree-aarch64/common/sys/string.cpp)2
-rw-r--r--thirdparty/embree/common/sys/string.h (renamed from thirdparty/embree-aarch64/common/sys/string.h)2
-rw-r--r--thirdparty/embree/common/sys/sysinfo.cpp (renamed from thirdparty/embree-aarch64/common/sys/sysinfo.cpp)160
-rw-r--r--thirdparty/embree/common/sys/sysinfo.h (renamed from thirdparty/embree-aarch64/common/sys/sysinfo.h)34
-rw-r--r--thirdparty/embree/common/sys/thread.cpp (renamed from thirdparty/embree-aarch64/common/sys/thread.cpp)115
-rw-r--r--thirdparty/embree/common/sys/thread.h (renamed from thirdparty/embree-aarch64/common/sys/thread.h)5
-rw-r--r--thirdparty/embree/common/sys/vector.h (renamed from thirdparty/embree-aarch64/common/sys/vector.h)2
-rw-r--r--thirdparty/embree/common/tasking/taskscheduler.h (renamed from thirdparty/embree-aarch64/common/tasking/taskscheduler.h)4
-rw-r--r--thirdparty/embree/common/tasking/taskschedulerinternal.cpp (renamed from thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.cpp)16
-rw-r--r--thirdparty/embree/common/tasking/taskschedulerinternal.h (renamed from thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.h)13
-rw-r--r--thirdparty/embree/common/tasking/taskschedulerppl.h (renamed from thirdparty/embree-aarch64/common/tasking/taskschedulerppl.h)2
-rw-r--r--thirdparty/embree/common/tasking/taskschedulertbb.h (renamed from thirdparty/embree-aarch64/common/tasking/taskschedulertbb.h)8
-rw-r--r--thirdparty/embree/include/embree3/rtcore.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore.h)2
-rw-r--r--thirdparty/embree/include/embree3/rtcore_buffer.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_buffer.h)2
-rw-r--r--thirdparty/embree/include/embree3/rtcore_builder.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_builder.h)2
-rw-r--r--thirdparty/embree/include/embree3/rtcore_common.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_common.h)26
-rw-r--r--thirdparty/embree/include/embree3/rtcore_config.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_config.h)10
-rw-r--r--thirdparty/embree/include/embree3/rtcore_device.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_device.h)2
-rw-r--r--thirdparty/embree/include/embree3/rtcore_geometry.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_geometry.h)2
-rw-r--r--thirdparty/embree/include/embree3/rtcore_quaternion.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_quaternion.h)2
-rw-r--r--thirdparty/embree/include/embree3/rtcore_ray.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_ray.h)2
-rw-r--r--thirdparty/embree/include/embree3/rtcore_scene.h (renamed from thirdparty/embree-aarch64/include/embree3/rtcore_scene.h)2
-rw-r--r--thirdparty/embree/kernels/builders/bvh_builder_hair.h (renamed from thirdparty/embree-aarch64/kernels/builders/bvh_builder_hair.h)2
-rw-r--r--thirdparty/embree/kernels/builders/bvh_builder_morton.h (renamed from thirdparty/embree-aarch64/kernels/builders/bvh_builder_morton.h)2
-rw-r--r--thirdparty/embree/kernels/builders/bvh_builder_msmblur.h (renamed from thirdparty/embree-aarch64/kernels/builders/bvh_builder_msmblur.h)4
-rw-r--r--thirdparty/embree/kernels/builders/bvh_builder_msmblur_hair.h (renamed from thirdparty/embree-aarch64/kernels/builders/bvh_builder_msmblur_hair.h)2
-rw-r--r--thirdparty/embree/kernels/builders/bvh_builder_sah.h (renamed from thirdparty/embree-aarch64/kernels/builders/bvh_builder_sah.h)4
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_binning.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_binning.h)478
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_binning_array_aligned.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_binning_array_aligned.h)9
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_binning_array_unaligned.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_binning_array_unaligned.h)2
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_openmerge_array.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_openmerge_array.h)2
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_spatial.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_spatial.h)2
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_spatial_array.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_spatial_array.h)8
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_strand_array.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_strand_array.h)2
-rw-r--r--thirdparty/embree/kernels/builders/heuristic_timesplit_array.h (renamed from thirdparty/embree-aarch64/kernels/builders/heuristic_timesplit_array.h)2
-rw-r--r--thirdparty/embree/kernels/builders/priminfo.h (renamed from thirdparty/embree-aarch64/kernels/builders/priminfo.h)2
-rw-r--r--thirdparty/embree/kernels/builders/primrefgen.cpp (renamed from thirdparty/embree-aarch64/kernels/builders/primrefgen.cpp)156
-rw-r--r--thirdparty/embree/kernels/builders/primrefgen.h34
-rw-r--r--thirdparty/embree/kernels/builders/primrefgen_presplit.h (renamed from thirdparty/embree-aarch64/kernels/builders/primrefgen_presplit.h)2
-rw-r--r--thirdparty/embree/kernels/builders/splitter.h (renamed from thirdparty/embree-aarch64/kernels/builders/splitter.h)24
-rw-r--r--thirdparty/embree/kernels/bvh/bvh.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh.cpp)6
-rw-r--r--thirdparty/embree/kernels/bvh/bvh.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh.h)4
-rw-r--r--thirdparty/embree/kernels/bvh/bvh4_factory.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh4_factory.cpp)334
-rw-r--r--thirdparty/embree/kernels/bvh/bvh4_factory.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh4_factory.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh8_factory.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh8_factory.cpp)284
-rw-r--r--thirdparty/embree/kernels/bvh/bvh8_factory.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh8_factory.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder.cpp)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder.h)3
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder_morton.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder_morton.cpp)4
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder_sah.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah.cpp)119
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder_sah_mb.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah_mb.cpp)6
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder_sah_spatial.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah_spatial.cpp)6
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder_twolevel.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel.cpp)10
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder_twolevel.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel.h)4
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_builder_twolevel_internal.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel_internal.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_collider.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_collider.cpp)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_collider.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_collider.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_factory.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_factory.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector1.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1.cpp)29
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector1.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1.h)5
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector1_bvh4.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1_bvh4.cpp)20
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_hybrid.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_hybrid.h)5
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_stream.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_stream.h)115
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_stream_filters.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_stream_filters.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_aabb.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_aabb_mb.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb_mb.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_aabb_mb4d.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb_mb4d.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_base.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_base.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_obb.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_obb.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_obb_mb.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_obb_mb.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_qaabb.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_qaabb.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_node_ref.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_node_ref.h)6
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_refit.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_refit.cpp)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_refit.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_refit.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_rotate.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_rotate.cpp)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_rotate.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_rotate.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_statistics.cpp (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.cpp)4
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_statistics.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_traverser1.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_traverser1.h)242
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_traverser_stream.h (renamed from thirdparty/embree-aarch64/kernels/bvh/bvh_traverser_stream.h)21
-rw-r--r--thirdparty/embree/kernels/bvh/node_intersector.h (renamed from thirdparty/embree-aarch64/kernels/bvh/node_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/bvh/node_intersector1.h (renamed from thirdparty/embree-aarch64/kernels/bvh/node_intersector1.h)651
-rw-r--r--thirdparty/embree/kernels/bvh/node_intersector_frustum.h (renamed from thirdparty/embree-aarch64/kernels/bvh/node_intersector_frustum.h)108
-rw-r--r--thirdparty/embree/kernels/bvh/node_intersector_packet.h (renamed from thirdparty/embree-aarch64/kernels/bvh/node_intersector_packet.h)94
-rw-r--r--thirdparty/embree/kernels/bvh/node_intersector_packet_stream.h (renamed from thirdparty/embree-aarch64/kernels/bvh/node_intersector_packet_stream.h)98
-rw-r--r--thirdparty/embree/kernels/common/accel.h (renamed from thirdparty/embree-aarch64/kernels/common/accel.h)6
-rw-r--r--thirdparty/embree/kernels/common/accelinstance.h (renamed from thirdparty/embree-aarch64/kernels/common/accelinstance.h)2
-rw-r--r--thirdparty/embree/kernels/common/acceln.cpp (renamed from thirdparty/embree-aarch64/kernels/common/acceln.cpp)8
-rw-r--r--thirdparty/embree/kernels/common/acceln.h (renamed from thirdparty/embree-aarch64/kernels/common/acceln.h)2
-rw-r--r--thirdparty/embree/kernels/common/accelset.cpp (renamed from thirdparty/embree-aarch64/kernels/common/accelset.cpp)2
-rw-r--r--thirdparty/embree/kernels/common/accelset.h (renamed from thirdparty/embree-aarch64/kernels/common/accelset.h)2
-rw-r--r--thirdparty/embree/kernels/common/alloc.cpp (renamed from thirdparty/embree-aarch64/kernels/common/alloc.cpp)5
-rw-r--r--thirdparty/embree/kernels/common/alloc.h (renamed from thirdparty/embree-aarch64/kernels/common/alloc.h)76
-rw-r--r--thirdparty/embree/kernels/common/buffer.h (renamed from thirdparty/embree-aarch64/kernels/common/buffer.h)2
-rw-r--r--thirdparty/embree/kernels/common/builder.h (renamed from thirdparty/embree-aarch64/kernels/common/builder.h)2
-rw-r--r--thirdparty/embree/kernels/common/context.h (renamed from thirdparty/embree-aarch64/kernels/common/context.h)2
-rw-r--r--thirdparty/embree/kernels/common/default.h (renamed from thirdparty/embree-aarch64/kernels/common/default.h)7
-rw-r--r--thirdparty/embree/kernels/common/device.cpp (renamed from thirdparty/embree-aarch64/kernels/common/device.cpp)27
-rw-r--r--thirdparty/embree/kernels/common/device.h (renamed from thirdparty/embree-aarch64/kernels/common/device.h)2
-rw-r--r--thirdparty/embree/kernels/common/geometry.cpp (renamed from thirdparty/embree-aarch64/kernels/common/geometry.cpp)2
-rw-r--r--thirdparty/embree/kernels/common/geometry.h (renamed from thirdparty/embree-aarch64/kernels/common/geometry.h)4
-rw-r--r--thirdparty/embree/kernels/common/hit.h (renamed from thirdparty/embree-aarch64/kernels/common/hit.h)10
-rw-r--r--thirdparty/embree/kernels/common/instance_stack.h179
-rw-r--r--thirdparty/embree/kernels/common/isa.h (renamed from thirdparty/embree-aarch64/kernels/common/isa.h)137
-rw-r--r--thirdparty/embree/kernels/common/motion_derivative.h (renamed from thirdparty/embree-aarch64/kernels/common/motion_derivative.h)2
-rw-r--r--thirdparty/embree/kernels/common/point_query.h (renamed from thirdparty/embree-aarch64/kernels/common/point_query.h)2
-rw-r--r--thirdparty/embree/kernels/common/primref.h (renamed from thirdparty/embree-aarch64/kernels/common/primref.h)6
-rw-r--r--thirdparty/embree/kernels/common/primref_mb.h (renamed from thirdparty/embree-aarch64/kernels/common/primref_mb.h)12
-rw-r--r--thirdparty/embree/kernels/common/profile.h (renamed from thirdparty/embree-aarch64/kernels/common/profile.h)2
-rw-r--r--thirdparty/embree/kernels/common/ray.h (renamed from thirdparty/embree-aarch64/kernels/common/ray.h)28
-rw-r--r--thirdparty/embree/kernels/common/rtcore.cpp (renamed from thirdparty/embree-aarch64/kernels/common/rtcore.cpp)87
-rw-r--r--thirdparty/embree/kernels/common/rtcore.h (renamed from thirdparty/embree-aarch64/kernels/common/rtcore.h)2
-rw-r--r--thirdparty/embree/kernels/common/rtcore_builder.cpp (renamed from thirdparty/embree-aarch64/kernels/common/rtcore_builder.cpp)2
-rw-r--r--thirdparty/embree/kernels/common/scene.cpp (renamed from thirdparty/embree-aarch64/kernels/common/scene.cpp)153
-rw-r--r--thirdparty/embree/kernels/common/scene.h (renamed from thirdparty/embree-aarch64/kernels/common/scene.h)2
-rw-r--r--thirdparty/embree/kernels/common/scene_curves.h688
-rw-r--r--thirdparty/embree/kernels/common/scene_grid_mesh.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_grid_mesh.h)83
-rw-r--r--thirdparty/embree/kernels/common/scene_instance.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_instance.h)10
-rw-r--r--thirdparty/embree/kernels/common/scene_line_segments.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_line_segments.h)40
-rw-r--r--thirdparty/embree/kernels/common/scene_points.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_points.h)2
-rw-r--r--thirdparty/embree/kernels/common/scene_quad_mesh.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_quad_mesh.h)62
-rw-r--r--thirdparty/embree/kernels/common/scene_subdiv_mesh.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_subdiv_mesh.h)8
-rw-r--r--thirdparty/embree/kernels/common/scene_triangle_mesh.cpp (renamed from thirdparty/embree-aarch64/kernels/common/scene_triangle_mesh.cpp)61
-rw-r--r--thirdparty/embree/kernels/common/scene_triangle_mesh.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_triangle_mesh.h)58
-rw-r--r--thirdparty/embree/kernels/common/scene_user_geometry.h (renamed from thirdparty/embree-aarch64/kernels/common/scene_user_geometry.h)2
-rw-r--r--thirdparty/embree/kernels/common/stack_item.h (renamed from thirdparty/embree-aarch64/kernels/common/stack_item.h)2
-rw-r--r--thirdparty/embree/kernels/common/stat.cpp (renamed from thirdparty/embree-aarch64/kernels/common/stat.cpp)2
-rw-r--r--thirdparty/embree/kernels/common/stat.h (renamed from thirdparty/embree-aarch64/kernels/common/stat.h)2
-rw-r--r--thirdparty/embree/kernels/common/state.cpp (renamed from thirdparty/embree-aarch64/kernels/common/state.cpp)44
-rw-r--r--thirdparty/embree/kernels/common/state.h (renamed from thirdparty/embree-aarch64/kernels/common/state.h)3
-rw-r--r--thirdparty/embree/kernels/common/vector.h (renamed from thirdparty/embree-aarch64/kernels/common/vector.h)2
-rw-r--r--thirdparty/embree/kernels/config.h (renamed from thirdparty/embree-aarch64/kernels/config.h)0
-rw-r--r--thirdparty/embree/kernels/geometry/cone.h (renamed from thirdparty/embree-aarch64/kernels/geometry/cone.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/coneline_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/coneline_intersector.h)14
-rw-r--r--thirdparty/embree/kernels/geometry/conelinei_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/conelinei_intersector.h)42
-rw-r--r--thirdparty/embree/kernels/geometry/curveNi.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curveNi.h)130
-rw-r--r--thirdparty/embree/kernels/geometry/curveNi_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curveNi_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/curveNi_mb.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curveNi_mb.h)164
-rw-r--r--thirdparty/embree/kernels/geometry/curveNi_mb_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curveNi_mb_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/curveNv.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curveNv.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/curveNv_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curveNv_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/curve_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curve_intersector.h)10
-rw-r--r--thirdparty/embree/kernels/geometry/curve_intersector_distance.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curve_intersector_distance.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/curve_intersector_oriented.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curve_intersector_oriented.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/curve_intersector_precalculations.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curve_intersector_precalculations.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/curve_intersector_ribbon.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curve_intersector_ribbon.h)14
-rw-r--r--thirdparty/embree/kernels/geometry/curve_intersector_sweep.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curve_intersector_sweep.h)6
-rw-r--r--thirdparty/embree/kernels/geometry/curve_intersector_virtual.h (renamed from thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual.h)210
-rw-r--r--thirdparty/embree/kernels/geometry/cylinder.h (renamed from thirdparty/embree-aarch64/kernels/geometry/cylinder.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/disc_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/disc_intersector.h)10
-rw-r--r--thirdparty/embree/kernels/geometry/disci_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/disci_intersector.h)114
-rw-r--r--thirdparty/embree/kernels/geometry/filter.h (renamed from thirdparty/embree-aarch64/kernels/geometry/filter.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/grid_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/grid_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/grid_soa.h (renamed from thirdparty/embree-aarch64/kernels/geometry/grid_soa.h)14
-rw-r--r--thirdparty/embree/kernels/geometry/grid_soa_intersector1.h (renamed from thirdparty/embree-aarch64/kernels/geometry/grid_soa_intersector1.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/grid_soa_intersector_packet.h (renamed from thirdparty/embree-aarch64/kernels/geometry/grid_soa_intersector_packet.h)10
-rw-r--r--thirdparty/embree/kernels/geometry/instance.h (renamed from thirdparty/embree-aarch64/kernels/geometry/instance.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/instance_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/instance_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/intersector_epilog.h (renamed from thirdparty/embree-aarch64/kernels/geometry/intersector_epilog.h)141
-rw-r--r--thirdparty/embree/kernels/geometry/intersector_iterators.h (renamed from thirdparty/embree-aarch64/kernels/geometry/intersector_iterators.h)18
-rw-r--r--thirdparty/embree/kernels/geometry/line_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/line_intersector.h)16
-rw-r--r--thirdparty/embree/kernels/geometry/linei.h (renamed from thirdparty/embree-aarch64/kernels/geometry/linei.h)6
-rw-r--r--thirdparty/embree/kernels/geometry/linei_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/linei_intersector.h)42
-rw-r--r--thirdparty/embree/kernels/geometry/object.h (renamed from thirdparty/embree-aarch64/kernels/geometry/object.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/object_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/object_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/plane.h (renamed from thirdparty/embree-aarch64/kernels/geometry/plane.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/pointi.h (renamed from thirdparty/embree-aarch64/kernels/geometry/pointi.h)7
-rw-r--r--thirdparty/embree/kernels/geometry/primitive.h (renamed from thirdparty/embree-aarch64/kernels/geometry/primitive.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/primitive4.cpp (renamed from thirdparty/embree-aarch64/kernels/geometry/primitive4.cpp)2
-rw-r--r--thirdparty/embree/kernels/geometry/quad_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/quad_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/quad_intersector_moeller.h (renamed from thirdparty/embree-aarch64/kernels/geometry/quad_intersector_moeller.h)162
-rw-r--r--thirdparty/embree/kernels/geometry/quad_intersector_pluecker.h (renamed from thirdparty/embree-aarch64/kernels/geometry/quad_intersector_pluecker.h)139
-rw-r--r--thirdparty/embree/kernels/geometry/quadi.h (renamed from thirdparty/embree-aarch64/kernels/geometry/quadi.h)12
-rw-r--r--thirdparty/embree/kernels/geometry/quadi_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/quadi_intersector.h)10
-rw-r--r--thirdparty/embree/kernels/geometry/quadv.h (renamed from thirdparty/embree-aarch64/kernels/geometry/quadv.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/quadv_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/quadv_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/roundline_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/roundline_intersector.h)35
-rw-r--r--thirdparty/embree/kernels/geometry/roundlinei_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/roundlinei_intersector.h)57
-rw-r--r--thirdparty/embree/kernels/geometry/sphere_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/sphere_intersector.h)6
-rw-r--r--thirdparty/embree/kernels/geometry/spherei_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/spherei_intersector.h)58
-rw-r--r--thirdparty/embree/kernels/geometry/subdivpatch1.h (renamed from thirdparty/embree-aarch64/kernels/geometry/subdivpatch1.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/subdivpatch1_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/subdivpatch1_intersector.h)52
-rw-r--r--thirdparty/embree/kernels/geometry/subgrid.h (renamed from thirdparty/embree-aarch64/kernels/geometry/subgrid.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/subgrid_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector.h)73
-rw-r--r--thirdparty/embree/kernels/geometry/subgrid_intersector_moeller.h382
-rw-r--r--thirdparty/embree/kernels/geometry/subgrid_intersector_pluecker.h367
-rw-r--r--thirdparty/embree/kernels/geometry/subgrid_mb_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/subgrid_mb_intersector.h)38
-rw-r--r--thirdparty/embree/kernels/geometry/triangle.h (renamed from thirdparty/embree-aarch64/kernels/geometry/triangle.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/triangle_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/triangle_intersector.h)22
-rw-r--r--thirdparty/embree/kernels/geometry/triangle_intersector_moeller.h (renamed from thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_moeller.h)244
-rw-r--r--thirdparty/embree/kernels/geometry/triangle_intersector_pluecker.h (renamed from thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_pluecker.h)250
-rw-r--r--thirdparty/embree/kernels/geometry/triangle_intersector_woop.h (renamed from thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_woop.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/triangle_triangle_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/triangle_triangle_intersector.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/trianglei.h (renamed from thirdparty/embree-aarch64/kernels/geometry/trianglei.h)10
-rw-r--r--thirdparty/embree/kernels/geometry/trianglei_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/trianglei_intersector.h)90
-rw-r--r--thirdparty/embree/kernels/geometry/trianglev.h (renamed from thirdparty/embree-aarch64/kernels/geometry/trianglev.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/trianglev_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/trianglev_intersector.h)42
-rw-r--r--thirdparty/embree/kernels/geometry/trianglev_mb.h (renamed from thirdparty/embree-aarch64/kernels/geometry/trianglev_mb.h)2
-rw-r--r--thirdparty/embree/kernels/geometry/trianglev_mb_intersector.h (renamed from thirdparty/embree-aarch64/kernels/geometry/trianglev_mb_intersector.h)98
-rw-r--r--thirdparty/embree/kernels/hash.h (renamed from thirdparty/embree-aarch64/kernels/hash.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/bezier_curve.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/bezier_curve.h)6
-rw-r--r--thirdparty/embree/kernels/subdiv/bezier_patch.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/bezier_patch.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/bilinear_patch.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/bilinear_patch.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/bspline_curve.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/bspline_curve.h)7
-rw-r--r--thirdparty/embree/kernels/subdiv/bspline_patch.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/bspline_patch.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/catmullclark_coefficients.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/catmullclark_coefficients.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/catmullclark_patch.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/catmullclark_patch.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/catmullclark_ring.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/catmullclark_ring.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/catmullrom_curve.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/catmullrom_curve.h)7
-rw-r--r--thirdparty/embree/kernels/subdiv/feature_adaptive_eval.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/feature_adaptive_eval_grid.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval_grid.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/feature_adaptive_eval_simd.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval_simd.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/gregory_patch.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/gregory_patch.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/gregory_patch_dense.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/gregory_patch_dense.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/gridrange.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/gridrange.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/half_edge.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/half_edge.h)4
-rw-r--r--thirdparty/embree/kernels/subdiv/hermite_curve.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/hermite_curve.h)3
-rw-r--r--thirdparty/embree/kernels/subdiv/linear_bezier_patch.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/linear_bezier_patch.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/patch.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/patch.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/patch_eval.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/patch_eval.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/patch_eval_grid.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/patch_eval_grid.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/patch_eval_simd.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/patch_eval_simd.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/subdivpatch1base.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/subdivpatch1base.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/tessellation.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/tessellation.h)2
-rw-r--r--thirdparty/embree/kernels/subdiv/tessellation_cache.h (renamed from thirdparty/embree-aarch64/kernels/subdiv/tessellation_cache.h)4
-rw-r--r--thirdparty/embree/patches/godot-changes-android.patch103
-rw-r--r--thirdparty/embree/patches/godot-changes-misc.patch105
-rw-r--r--thirdparty/embree/patches/godot-changes-noexcept.patch (renamed from thirdparty/embree-aarch64/patches/godot-changes.patch)170
-rw-r--r--thirdparty/embree/patches/godot-changes-ubsan.patch24
-rw-r--r--thirdparty/misc/patches/polypartition-godot-types.patch14
-rw-r--r--thirdparty/misc/polypartition.cpp10
361 files changed, 13082 insertions, 11398 deletions
diff --git a/COPYRIGHT.txt b/COPYRIGHT.txt
index cfd983bb0a..eb82c42e6f 100644
--- a/COPYRIGHT.txt
+++ b/COPYRIGHT.txt
@@ -131,8 +131,8 @@ Comment: doctest
Copyright: 2016-2020, Viktor Kirilov
License: Expat
-Files: ./thirdparty/embree-aarch64/
-Comment: Embree-aarch64
+Files: ./thirdparty/embree/
+Comment: Embree
Copyright: 2009-2021 Intel Corporation
License: Apache-2.0
diff --git a/doc/classes/JavaScript.xml b/doc/classes/JavaScript.xml
index e6d74eeb21..c87e637ff5 100644
--- a/doc/classes/JavaScript.xml
+++ b/doc/classes/JavaScript.xml
@@ -29,6 +29,22 @@
Creates a new JavaScript object using the [code]new[/code] constructor. The [code]object[/code] must a valid property of the JavaScript [code]window[/code]. See [JavaScriptObject] for usage.
</description>
</method>
+ <method name="download_buffer">
+ <return type="void">
+ </return>
+ <argument index="0" name="buffer" type="PackedByteArray">
+ </argument>
+ <argument index="1" name="name" type="String">
+ </argument>
+ <argument index="2" name="mime" type="String" default="&quot;application/octet-stream&quot;">
+ </argument>
+ <description>
+ Prompts the user to download a file containing the specified [code]buffer[/code]. The file will have the given [code]name[/code] and [code]mime[/code] type.
+ [b]Note:[/b] The browser may override the [url=https://en.wikipedia.org/wiki/Media_type]MIME type[/url] provided based on the file [code]name[/code]'s extension.
+ [b]Note:[/b] Browsers might block the download if [method download_buffer] is not being called from a user interaction (e.g. button click).
+ [b]Note:[/b] Browsers might ask the user for permission or block the download if multiple download requests are made in a quick succession.
+ </description>
+ </method>
<method name="eval">
<return type="Variant">
</return>
diff --git a/modules/raycast/SCsub b/modules/raycast/SCsub
index 57120bff26..6e7b3e7b8d 100644
--- a/modules/raycast/SCsub
+++ b/modules/raycast/SCsub
@@ -10,7 +10,7 @@ env_raycast = env_modules.Clone()
thirdparty_obj = []
if env["builtin_embree"]:
- thirdparty_dir = "#thirdparty/embree-aarch64/"
+ thirdparty_dir = "#thirdparty/embree/"
embree_src = [
"common/sys/sysinfo.cpp",
@@ -28,16 +28,6 @@ if env["builtin_embree"]:
"common/lexers/stringstream.cpp",
"common/lexers/tokenstream.cpp",
"common/tasking/taskschedulerinternal.cpp",
- "common/algorithms/parallel_for.cpp",
- "common/algorithms/parallel_reduce.cpp",
- "common/algorithms/parallel_prefix_sum.cpp",
- "common/algorithms/parallel_for_for.cpp",
- "common/algorithms/parallel_for_for_prefix_sum.cpp",
- "common/algorithms/parallel_partition.cpp",
- "common/algorithms/parallel_sort.cpp",
- "common/algorithms/parallel_set.cpp",
- "common/algorithms/parallel_map.cpp",
- "common/algorithms/parallel_filter.cpp",
"kernels/common/device.cpp",
"kernels/common/stat.cpp",
"kernels/common/acceln.cpp",
@@ -82,13 +72,17 @@ if env["builtin_embree"]:
if env["platform"] == "windows":
if env.msvc:
env.Append(LINKFLAGS=["psapi.lib"])
- env_raycast.Append(CPPDEFINES=["__SSE2__", "__SSE__"])
else:
env.Append(LIBS=["psapi"])
env_thirdparty = env_raycast.Clone()
env_thirdparty.disable_warnings()
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_sources)
+
+ if not env["arch"] in ["x86", "x86_64"] or env.msvc:
+ # Embree needs those, it will automatically use SSE2NEON in ARM
+ env_thirdparty.Append(CPPDEFINES=["__SSE2__", "__SSE__"])
+
env.modules_sources += thirdparty_obj
diff --git a/modules/raycast/config.py b/modules/raycast/config.py
index 3da9ace9d8..5de36c5322 100644
--- a/modules/raycast/config.py
+++ b/modules/raycast/config.py
@@ -1,6 +1,5 @@
def can_build(env, platform):
- # Depends on Embree library, which supports only x86_64 (originally)
- # and aarch64 (thanks to the embree-aarch64 fork).
+ # Depends on Embree library, which only supports x86_64 and aarch64.
if platform == "android":
return env["android_arch"] in ["arm64v8", "x86_64"]
diff --git a/modules/raycast/godot_update_embree.py b/modules/raycast/godot_update_embree.py
index db4fa95c21..31a25a318f 100644
--- a/modules/raycast/godot_update_embree.py
+++ b/modules/raycast/godot_update_embree.py
@@ -11,6 +11,7 @@ include_dirs = [
"common/algorithms",
"common/lexers",
"common/simd",
+ "common/simd/arm",
"include/embree3",
"kernels/subdiv",
"kernels/geometry",
@@ -32,16 +33,6 @@ cpp_files = [
"common/lexers/stringstream.cpp",
"common/lexers/tokenstream.cpp",
"common/tasking/taskschedulerinternal.cpp",
- "common/algorithms/parallel_for.cpp",
- "common/algorithms/parallel_reduce.cpp",
- "common/algorithms/parallel_prefix_sum.cpp",
- "common/algorithms/parallel_for_for.cpp",
- "common/algorithms/parallel_for_for_prefix_sum.cpp",
- "common/algorithms/parallel_partition.cpp",
- "common/algorithms/parallel_sort.cpp",
- "common/algorithms/parallel_set.cpp",
- "common/algorithms/parallel_map.cpp",
- "common/algorithms/parallel_filter.cpp",
"kernels/common/device.cpp",
"kernels/common/stat.cpp",
"kernels/common/acceln.cpp",
@@ -74,11 +65,11 @@ cpp_files = [
os.chdir("../../thirdparty")
-dir_name = "embree-aarch64"
+dir_name = "embree"
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
-subprocess.run(["git", "clone", "https://github.com/lighttransport/embree-aarch64.git", "embree-tmp"])
+subprocess.run(["git", "clone", "https://github.com/embree/embree.git", "embree-tmp"])
os.chdir("embree-tmp")
commit_hash = str(subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True)).strip()
@@ -197,7 +188,7 @@ with open("CMakeLists.txt", "r") as cmake_file:
with open(os.path.join(dest_dir, "include/embree3/rtcore_config.h"), "w") as config_file:
config_file.write(
f"""
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/platform/javascript/SCsub b/platform/javascript/SCsub
index d68ab7f7c9..62a8660ae4 100644
--- a/platform/javascript/SCsub
+++ b/platform/javascript/SCsub
@@ -23,8 +23,6 @@ sys_env.AddJSLibraries(
]
)
-if env["tools"]:
- sys_env.AddJSLibraries(["js/libs/library_godot_editor_tools.js"])
if env["javascript_eval"]:
sys_env.AddJSLibraries(["js/libs/library_godot_javascript_singleton.js"])
diff --git a/platform/javascript/api/api.cpp b/platform/javascript/api/api.cpp
index 039ce815e4..5ad2bf56cf 100644
--- a/platform/javascript/api/api.cpp
+++ b/platform/javascript/api/api.cpp
@@ -70,6 +70,7 @@ void JavaScript::_bind_methods() {
mi.arguments.push_back(PropertyInfo(Variant::STRING, "object"));
ClassDB::bind_vararg_method(METHOD_FLAGS_DEFAULT, "create_object", &JavaScript::_create_object_bind, mi);
}
+ ClassDB::bind_method(D_METHOD("download_buffer", "buffer", "name", "mime"), &JavaScript::download_buffer, DEFVAL("application/octet-stream"));
}
#if !defined(JAVASCRIPT_ENABLED) || !defined(JAVASCRIPT_EVAL_ENABLED)
@@ -100,3 +101,7 @@ Variant JavaScript::_create_object_bind(const Variant **p_args, int p_argcount,
return Ref<JavaScriptObject>();
}
#endif
+#if !defined(JAVASCRIPT_ENABLED)
+void JavaScript::download_buffer(Vector<uint8_t> p_arr, const String &p_name, const String &p_mime) {
+}
+#endif
diff --git a/platform/javascript/api/javascript_singleton.h b/platform/javascript/api/javascript_singleton.h
index 45e9950acb..1615efa87e 100644
--- a/platform/javascript/api/javascript_singleton.h
+++ b/platform/javascript/api/javascript_singleton.h
@@ -58,6 +58,7 @@ public:
Ref<JavaScriptObject> get_interface(const String &p_interface);
Ref<JavaScriptObject> create_callback(const Callable &p_callable);
Variant _create_object_bind(const Variant **p_args, int p_argcount, Callable::CallError &r_error);
+ void download_buffer(Vector<uint8_t> p_arr, const String &p_name, const String &p_mime = "application/octet-stream");
static JavaScript *get_singleton();
JavaScript();
diff --git a/platform/javascript/api/javascript_tools_editor_plugin.cpp b/platform/javascript/api/javascript_tools_editor_plugin.cpp
index 015440f5be..ac4e6a1256 100644
--- a/platform/javascript/api/javascript_tools_editor_plugin.cpp
+++ b/platform/javascript/api/javascript_tools_editor_plugin.cpp
@@ -41,7 +41,7 @@
// JavaScript functions defined in library_godot_editor_tools.js
extern "C" {
-extern void godot_js_editor_download_file(const char *p_path, const char *p_name, const char *p_mime);
+extern int godot_js_os_download_buffer(const uint8_t *p_buf, int p_buf_size, const char *p_name, const char *p_mime);
}
static void _javascript_editor_init_callback() {
@@ -69,7 +69,12 @@ void JavaScriptToolsEditorPlugin::_download_zip(Variant p_v) {
String base_path = resource_path.substr(0, resource_path.rfind("/")) + "/";
_zip_recursive(resource_path, base_path, zip);
zipClose(zip, nullptr);
- godot_js_editor_download_file("/tmp/project.zip", "project.zip", "application/zip");
+ FileAccess *f = FileAccess::open("/tmp/project.zip", FileAccess::READ);
+ ERR_FAIL_COND_MSG(!f, "Unable to create zip file");
+ Vector<uint8_t> buf;
+ buf.resize(f->get_len());
+ f->get_buffer(buf.ptrw(), buf.size());
+ godot_js_os_download_buffer(buf.ptr(), buf.size(), "project.zip", "application/zip");
}
void JavaScriptToolsEditorPlugin::_zip_file(String p_path, String p_base_path, zipFile p_zip) {
diff --git a/platform/javascript/dom_keys.inc b/platform/javascript/dom_keys.inc
index 7902efafe0..69340ff58c 100644
--- a/platform/javascript/dom_keys.inc
+++ b/platform/javascript/dom_keys.inc
@@ -159,8 +159,8 @@ int dom_code2godot_scancode(EM_UTF8 const p_code[32], EM_UTF8 const p_key[32], b
DOM2GODOT("Backspace", BACKSPACE);
DOM2GODOT("CapsLock", CAPSLOCK);
DOM2GODOT("ContextMenu", MENU);
- DOM2GODOT("ControlLeft", CONTROL);
- DOM2GODOT("ControlRight", CONTROL);
+ DOM2GODOT("ControlLeft", CTRL);
+ DOM2GODOT("ControlRight", CTRL);
DOM2GODOT("Enter", ENTER);
DOM2GODOT("MetaLeft", SUPER_L);
DOM2GODOT("MetaRight", SUPER_R);
diff --git a/platform/javascript/javascript_singleton.cpp b/platform/javascript/javascript_singleton.cpp
index 67908a18da..5ef67c0cdd 100644
--- a/platform/javascript/javascript_singleton.cpp
+++ b/platform/javascript/javascript_singleton.cpp
@@ -301,6 +301,7 @@ union js_eval_ret {
};
extern int godot_js_eval(const char *p_js, int p_use_global_ctx, union js_eval_ret *p_union_ptr, void *p_byte_arr, void *p_byte_arr_write, void *(*p_callback)(void *p_ptr, void *p_ptr2, int p_len));
+extern int godot_js_os_download_buffer(const uint8_t *p_buf, int p_buf_size, const char *p_name, const char *p_mime);
}
void *resize_PackedByteArray_and_open_write(void *p_arr, void *r_write, int p_len) {
@@ -336,3 +337,7 @@ Variant JavaScript::eval(const String &p_code, bool p_use_global_exec_context) {
}
}
#endif // JAVASCRIPT_EVAL_ENABLED
+
+void JavaScript::download_buffer(Vector<uint8_t> p_arr, const String &p_name, const String &p_mime) {
+ godot_js_os_download_buffer(p_arr.ptr(), p_arr.size(), p_name.utf8().get_data(), p_mime.utf8().get_data());
+}
diff --git a/platform/javascript/js/libs/library_godot_editor_tools.js b/platform/javascript/js/libs/library_godot_editor_tools.js
deleted file mode 100644
index d7f1ad5ea1..0000000000
--- a/platform/javascript/js/libs/library_godot_editor_tools.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/*************************************************************************/
-/* library_godot_editor_tools.js */
-/*************************************************************************/
-/* This file is part of: */
-/* GODOT ENGINE */
-/* https://godotengine.org */
-/*************************************************************************/
-/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
-/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
-/* */
-/* Permission is hereby granted, free of charge, to any person obtaining */
-/* a copy of this software and associated documentation files (the */
-/* "Software"), to deal in the Software without restriction, including */
-/* without limitation the rights to use, copy, modify, merge, publish, */
-/* distribute, sublicense, and/or sell copies of the Software, and to */
-/* permit persons to whom the Software is furnished to do so, subject to */
-/* the following conditions: */
-/* */
-/* The above copyright notice and this permission notice shall be */
-/* included in all copies or substantial portions of the Software. */
-/* */
-/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
-/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
-/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
-/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
-/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
-/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
-/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-/*************************************************************************/
-
-const GodotEditorTools = {
- godot_js_editor_download_file__deps: ['$FS'],
- godot_js_editor_download_file__sig: 'viii',
- godot_js_editor_download_file: function (p_path, p_name, p_mime) {
- const path = GodotRuntime.parseString(p_path);
- const name = GodotRuntime.parseString(p_name);
- const mime = GodotRuntime.parseString(p_mime);
- const size = FS.stat(path)['size'];
- const buf = new Uint8Array(size);
- const fd = FS.open(path, 'r');
- FS.read(fd, buf, 0, size);
- FS.close(fd);
- FS.unlink(path);
- const blob = new Blob([buf], { type: mime });
- const url = window.URL.createObjectURL(blob);
- const a = document.createElement('a');
- a.href = url;
- a.download = name;
- a.style.display = 'none';
- document.body.appendChild(a);
- a.click();
- a.remove();
- window.URL.revokeObjectURL(url);
- },
-};
-
-mergeInto(LibraryManager.library, GodotEditorTools);
diff --git a/platform/javascript/js/libs/library_godot_os.js b/platform/javascript/js/libs/library_godot_os.js
index 1d9f889bce..7414b8cc47 100644
--- a/platform/javascript/js/libs/library_godot_os.js
+++ b/platform/javascript/js/libs/library_godot_os.js
@@ -304,6 +304,23 @@ const GodotOS = {
godot_js_os_hw_concurrency_get: function () {
return navigator.hardwareConcurrency || 1;
},
+
+ godot_js_os_download_buffer__sig: 'viiii',
+ godot_js_os_download_buffer: function (p_ptr, p_size, p_name, p_mime) {
+ const buf = GodotRuntime.heapSlice(HEAP8, p_ptr, p_size);
+ const name = GodotRuntime.parseString(p_name);
+ const mime = GodotRuntime.parseString(p_mime);
+ const blob = new Blob([buf], { type: mime });
+ const url = window.URL.createObjectURL(blob);
+ const a = document.createElement('a');
+ a.href = url;
+ a.download = name;
+ a.style.display = 'none';
+ document.body.appendChild(a);
+ a.click();
+ a.remove();
+ window.URL.revokeObjectURL(url);
+ },
};
autoAddDeps(GodotOS, '$GodotOS');
diff --git a/thirdparty/README.md b/thirdparty/README.md
index 1e7d990dda..7cedd1a2cb 100644
--- a/thirdparty/README.md
+++ b/thirdparty/README.md
@@ -61,10 +61,10 @@ Files extracted from upstream source:
Extracted from .zip provided. Extracted license and header only.
-## embree-aarch64
+## embree
-- Upstream: https://github.com/lighttransport/embree-aarch64
-- Version: 3.12.1 (6ef362f99af80c9dfe8dd2bfc582d9067897edc6, 2020)
+- Upstream: https://github.com/embree/embree
+- Version: 3.13.0 (7c53133eb21424f7f0ae1e25bf357e358feaf6ab, 2021)
- License: Apache 2.0
Files extracted from upstream:
@@ -73,7 +73,7 @@ Files extracted from upstream:
- All header files in the directories listed in `modules/raycast/godot_update_embree.py`
The `modules/raycast/godot_update_embree.py` script can be used to pull the
-relevant files from the latest Embree-aarch64 release and apply some automatic changes.
+relevant files from the latest Embree release and apply some automatic changes.
Some changes have been made in order to remove exceptions and fix minor build errors.
They are marked with `// -- GODOT start --` and `// -- GODOT end --`
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_filter.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_filter.cpp
deleted file mode 100644
index acddc0ff81..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_filter.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_filter.h"
-#include "../sys/regression.h"
-#include <map>
-
-namespace embree
-{
- struct parallel_filter_regression_test : public RegressionTest
- {
- parallel_filter_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
- auto pred = [&]( uint32_t v ) { return (v & 0x3) == 0; };
-
- for (size_t N=10; N<1000000; N=size_t(2.1*N))
- {
- size_t N0 = rand() % N;
-
- /* initialize array with random numbers */
- std::vector<uint32_t> src(N);
- std::map<uint32_t,int> m;
- for (size_t i=0; i<N; i++) src[i] = rand();
-
- /* count elements up */
- for (size_t i=N0; i<N; i++)
- if (pred(src[i]))
- m[src[i]] = 0;
- for (size_t i=N0; i<N; i++)
- if (pred(src[i]))
- m[src[i]]++;
-
- /* filter array */
- //size_t M = sequential_filter(src.data(),N0,N,pred);
- size_t M = parallel_filter(src.data(),N0,N,size_t(1024),pred);
-
- /* check if filtered data is correct */
- for (size_t i=N0; i<M; i++) {
- passed &= pred(src[i]);
- m[src[i]]--;
- }
- for (size_t i=N0; i<M; i++)
- passed &= (m[src[i]] == 0);
- }
-
- return passed;
- }
- };
-
- parallel_filter_regression_test parallel_filter_regression("parallel_filter_regression");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_for.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_for.cpp
deleted file mode 100644
index ef070ebc4d..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_for.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_for.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_for_regression_test : public RegressionTest
- {
- parallel_for_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
-
- const size_t M = 10;
- for (size_t N=10; N<10000000; N=size_t(2.1*N))
- {
- /* sequentially calculate sum of squares */
- size_t sum0 = 0;
- for (size_t i=0; i<N; i++) {
- sum0 += i*i;
- }
-
- /* parallel calculation of sum of squares */
- for (size_t m=0; m<M; m++)
- {
- std::atomic<size_t> sum1(0);
- parallel_for( size_t(0), size_t(N), size_t(1024), [&](const range<size_t>& r)
- {
- size_t s = 0;
- for (size_t i=r.begin(); i<r.end(); i++)
- s += i*i;
- sum1 += s;
- });
- passed = sum0 == sum1;
- }
- }
-
- return passed;
- }
- };
-
- parallel_for_regression_test parallel_for_regression("parallel_for_regression_test");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_for_for.cpp
deleted file mode 100644
index 0337611b35..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_for_for.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_for_for_regression_test : public RegressionTest
- {
- parallel_for_for_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
-
- /* create vector with random numbers */
- size_t sum0 = 0;
- size_t K = 0;
- const size_t M = 1000;
- std::vector<std::vector<size_t>* > array2(M);
- for (size_t i=0; i<M; i++) {
- const size_t N = rand() % 1024;
- K+=N;
- array2[i] = new std::vector<size_t>(N);
- for (size_t j=0; j<N; j++)
- sum0 += (*array2[i])[j] = rand();
- }
-
- /* array to test global index */
- std::vector<atomic<size_t>> verify_k(K);
- for (size_t i=0; i<K; i++) verify_k[i].store(0);
-
- /* add all numbers using parallel_for_for */
- std::atomic<size_t> sum1(0);
- parallel_for_for( array2, size_t(1), [&](std::vector<size_t>* v, const range<size_t>& r, size_t k) -> size_t
- {
- size_t s = 0;
- for (size_t i=r.begin(); i<r.end(); i++) {
- s += (*v)[i];
- verify_k[k++]++;
- }
- sum1 += s;
- return sum1;
- });
- passed &= (sum0 == sum1);
-
- /* check global index */
- for (size_t i=0; i<K; i++)
- passed &= (verify_k[i] == 1);
-
- /* delete vectors again */
- for (size_t i=0; i<array2.size(); i++)
- delete array2[i];
-
- return passed;
- }
- };
-
- parallel_for_for_regression_test parallel_for_for_regression("parallel_for_for_regression_test");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for_prefix_sum.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_for_for_prefix_sum.cpp
deleted file mode 100644
index 0169d8e481..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for_prefix_sum.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_for_for_prefix_sum.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_for_for_prefix_sum_regression_test : public RegressionTest
- {
- parallel_for_for_prefix_sum_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
-
- /* create vector with random numbers */
- const size_t M = 10;
- std::vector<atomic<size_t>> flattened;
- typedef std::vector<std::vector<size_t>* > ArrayArray;
- ArrayArray array2(M);
- size_t K = 0;
- for (size_t i=0; i<M; i++) {
- const size_t N = rand() % 10;
- K += N;
- array2[i] = new std::vector<size_t>(N);
- for (size_t j=0; j<N; j++)
- (*array2[i])[j] = rand() % 10;
- }
-
- /* array to test global index */
- std::vector<atomic<size_t>> verify_k(K);
- for (size_t i=0; i<K; i++) verify_k[i].store(0);
-
- ParallelForForPrefixSumState<size_t> state(array2,size_t(1));
-
- /* dry run only counts */
- size_t S = parallel_for_for_prefix_sum0( state, array2, size_t(0), [&](std::vector<size_t>* v, const range<size_t>& r, size_t k, size_t i) -> size_t
- {
- size_t s = 0;
- for (size_t i=r.begin(); i<r.end(); i++) {
- s += (*v)[i];
- verify_k[k++]++;
- }
- return s;
- }, [](size_t v0, size_t v1) { return v0+v1; });
-
- /* create properly sized output array */
- flattened.resize(S);
- for (auto& a : flattened) a.store(0);
-
- /* now we actually fill the flattened array */
- parallel_for_for_prefix_sum1( state, array2, size_t(0), [&](std::vector<size_t>* v, const range<size_t>& r, size_t k, size_t i, const size_t base) -> size_t
- {
- size_t s = 0;
- for (size_t i=r.begin(); i<r.end(); i++) {
- for (size_t j=0; j<(*v)[i]; j++) {
- flattened[base+s+j]++;
- }
- s += (*v)[i];
- verify_k[k++]++;
- }
- return s;
- }, [](size_t v0, size_t v1) { return v0+v1; });
-
- /* check global index */
- for (size_t i=0; i<K; i++)
- passed &= (verify_k[i] == 2);
-
- /* check if each element was assigned exactly once */
- for (size_t i=0; i<flattened.size(); i++)
- passed &= (flattened[i] == 1);
-
- /* delete arrays again */
- for (size_t i=0; i<array2.size(); i++)
- delete array2[i];
-
- return passed;
- }
- };
-
- parallel_for_for_prefix_sum_regression_test parallel_for_for_prefix_sum_regression("parallel_for_for_prefix_sum_regression_test");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_map.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_map.cpp
deleted file mode 100644
index 09dc303f81..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_map.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_map.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_map_regression_test : public RegressionTest
- {
- parallel_map_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
-
- /* create key/value vectors with random numbers */
- const size_t N = 10000;
- std::vector<uint32_t> keys(N);
- std::vector<uint32_t> vals(N);
- for (size_t i=0; i<N; i++) keys[i] = 2*unsigned(i)*647382649;
- for (size_t i=0; i<N; i++) std::swap(keys[i],keys[rand()%N]);
- for (size_t i=0; i<N; i++) vals[i] = 2*rand();
-
- /* create map */
- parallel_map<uint32_t,uint32_t> map;
- map.init(keys,vals);
-
- /* check that all keys are properly mapped */
- for (size_t i=0; i<N; i++) {
- const uint32_t* val = map.lookup(keys[i]);
- passed &= val && (*val == vals[i]);
- }
-
- /* check that these keys are not in the map */
- for (size_t i=0; i<N; i++) {
- passed &= !map.lookup(keys[i]+1);
- }
-
- return passed;
- }
- };
-
- parallel_map_regression_test parallel_map_regression("parallel_map_regression_test");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_partition.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_partition.cpp
deleted file mode 100644
index eb20c4465d..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_partition.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_partition.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_partition_regression_test : public RegressionTest
- {
- parallel_partition_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
-
- for (size_t i=0; i<100; i++)
- {
- /* create random permutation */
- size_t N = std::rand() % 1000000;
- std::vector<unsigned> array(N);
- for (unsigned i=0; i<N; i++) array[i] = i;
- for (auto& v : array) std::swap(v,array[std::rand()%array.size()]);
- size_t split = std::rand() % (N+1);
-
- /* perform parallel partitioning */
- size_t left_sum = 0, right_sum = 0;
- size_t mid = parallel_partitioning(array.data(),0,array.size(),0,left_sum,right_sum,
- [&] ( size_t i ) { return i < split; },
- [] ( size_t& sum, unsigned v) { sum += v; },
- [] ( size_t& sum, size_t v) { sum += v; },
- 128);
-
- /*serial_partitioning(array.data(),0,array.size(),left_sum,right_sum,
- [&] ( size_t i ) { return i < split; },
- [] ( size_t& left_sum, int v) { left_sum += v; });*/
-
- /* verify result */
- passed &= mid == split;
- passed &= left_sum == split*(split-1)/2;
- passed &= right_sum == N*(N-1)/2-left_sum;
- for (size_t i=0; i<split; i++) passed &= array[i] < split;
- for (size_t i=split; i<N; i++) passed &= array[i] >= split;
- }
-
- return passed;
- }
- };
-
- parallel_partition_regression_test parallel_partition_regression("parallel_partition_regression_test");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_prefix_sum.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_prefix_sum.cpp
deleted file mode 100644
index 685952c3dc..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_prefix_sum.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_prefix_sum.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_prefix_sum_regression_test : public RegressionTest
- {
- parallel_prefix_sum_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
- const size_t M = 10;
-
- for (size_t N=10; N<10000000; N=size_t(2.1*N))
- {
- /* initialize array with random numbers */
- uint32_t sum0 = 0;
- std::vector<uint32_t> src(N);
- for (size_t i=0; i<N; i++) {
- sum0 += src[i] = rand();
- }
-
- /* calculate parallel prefix sum */
- std::vector<uint32_t> dst(N);
- for (auto& v : dst) v = 0;
-
- for (size_t i=0; i<M; i++) {
- uint32_t sum1 = parallel_prefix_sum(src,dst,N,0,std::plus<uint32_t>());
- passed &= (sum0 == sum1);
- }
-
- /* check if prefix sum is correct */
- for (size_t i=0, sum=0; i<N; sum+=src[i++])
- passed &= ((uint32_t)sum == dst[i]);
- }
-
- return passed;
- }
- };
-
- parallel_prefix_sum_regression_test parallel_prefix_sum_regression("parallel_prefix_sum_regression");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.cpp
deleted file mode 100644
index 331fe4288e..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_reduce.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_reduce_regression_test : public RegressionTest
- {
- parallel_reduce_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
-
- const size_t M = 10;
- for (size_t N=10; N<10000000; N=size_t(2.1*N))
- {
- /* sequentially calculate sum of squares */
- size_t sum0 = 0;
- for (size_t i=0; i<N; i++) {
- sum0 += i*i;
- }
-
- /* parallel calculation of sum of squares */
- for (size_t m=0; m<M; m++)
- {
- size_t sum1 = parallel_reduce( size_t(0), size_t(N), size_t(1024), size_t(0), [&](const range<size_t>& r) -> size_t
- {
- size_t s = 0;
- for (size_t i=r.begin(); i<r.end(); i++)
- s += i*i;
- return s;
- },
- [](const size_t v0, const size_t v1) {
- return v0+v1;
- });
- passed = sum0 == sum1;
- }
- }
- return passed;
- }
- };
-
- parallel_reduce_regression_test parallel_reduce_regression("parallel_reduce_regression_test");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_set.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_set.cpp
deleted file mode 100644
index 20b639c1c9..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_set.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_set.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- struct parallel_set_regression_test : public RegressionTest
- {
- parallel_set_regression_test(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
-
- /* create vector with random numbers */
- const size_t N = 10000;
- std::vector<uint32_t> unsorted(N);
- for (size_t i=0; i<N; i++) unsorted[i] = 2*rand();
-
- /* created set from numbers */
- parallel_set<uint32_t> sorted;
- sorted.init(unsorted);
-
- /* check that all elements are in the set */
- for (size_t i=0; i<N; i++) {
- passed &= sorted.lookup(unsorted[i]);
- }
-
- /* check that these elements are not in the set */
- for (size_t i=0; i<N; i++) {
- passed &= !sorted.lookup(unsorted[i]+1);
- }
-
- return passed;
- }
- };
-
- parallel_set_regression_test parallel_set_regression("parallel_set_regression_test");
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_sort.cpp b/thirdparty/embree-aarch64/common/algorithms/parallel_sort.cpp
deleted file mode 100644
index 5e7ec79ac1..0000000000
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_sort.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#include "parallel_sort.h"
-#include "../sys/regression.h"
-
-namespace embree
-{
- template<typename Key>
- struct RadixSortRegressionTest : public RegressionTest
- {
- RadixSortRegressionTest(const char* name) : RegressionTest(name) {
- registerRegressionTest(this);
- }
-
- bool run ()
- {
- bool passed = true;
- const size_t M = 10;
-
- for (size_t N=10; N<1000000; N=size_t(2.1*N))
- {
- std::vector<Key> src(N); memset(src.data(),0,N*sizeof(Key));
- std::vector<Key> tmp(N); memset(tmp.data(),0,N*sizeof(Key));
- for (size_t i=0; i<N; i++) src[i] = uint64_t(rand())*uint64_t(rand());
-
- /* calculate checksum */
- Key sum0 = 0; for (size_t i=0; i<N; i++) sum0 += src[i];
-
- /* sort numbers */
- for (size_t i=0; i<M; i++) {
- radix_sort<Key>(src.data(),tmp.data(),N);
- }
-
- /* calculate checksum */
- Key sum1 = 0; for (size_t i=0; i<N; i++) sum1 += src[i];
- if (sum0 != sum1) passed = false;
-
- /* check if numbers are sorted */
- for (size_t i=1; i<N; i++)
- passed &= src[i-1] <= src[i];
- }
-
- return passed;
- }
- };
-
- RadixSortRegressionTest<uint32_t> test_u32("RadixSortRegressionTestU32");
- RadixSortRegressionTest<uint64_t> test_u64("RadixSortRegressionTestU64");
-}
diff --git a/thirdparty/embree-aarch64/common/math/AVX2NEON.h b/thirdparty/embree-aarch64/common/math/AVX2NEON.h
deleted file mode 100644
index e8698ac56d..0000000000
--- a/thirdparty/embree-aarch64/common/math/AVX2NEON.h
+++ /dev/null
@@ -1,986 +0,0 @@
-#pragma once
-
-#include "SSE2NEON.h"
-
-
-#define AVX2NEON_ABI static inline __attribute__((always_inline))
-
-
-struct __m256d;
-
-struct __m256 {
- __m128 lo,hi;
- __m256() {}
-};
-
-
-
-
-struct __m256i {
- __m128i lo,hi;
- explicit __m256i(const __m256 a) : lo(__m128i(a.lo)),hi(__m128i(a.hi)) {}
- operator __m256() const {__m256 res; res.lo = __m128(lo);res.hi = __m128(hi); return res;}
- __m256i() {}
-};
-
-
-
-
-struct __m256d {
- float64x2_t lo,hi;
- __m256d() {}
- __m256d(const __m256& a) : lo(float64x2_t(a.lo)),hi(float64x2_t(a.hi)) {}
- __m256d(const __m256i& a) : lo(float64x2_t(a.lo)),hi(float64x2_t(a.hi)) {}
-};
-
-#define UNARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a) {type res;res.lo=basic_func(a.lo);res.hi=basic_func(a.hi);return res;}
-
-
-#define BINARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a,const type& b) {type res;res.lo=basic_func(a.lo,b.lo);res.hi=basic_func(a.hi,b.hi);return res;}
-#define BINARY_AVX_OP_CAST(type,func,basic_func,bdst,bsrc) AVX2NEON_ABI type func(const type& a,const type& b) {type res;res.lo=bdst(basic_func(bsrc(a.lo),bsrc(b.lo)));res.hi=bdst(basic_func(bsrc(a.hi),bsrc(b.hi)));return res;}
-
-#define TERNARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a,const type& b,const type& c) {type res;res.lo=basic_func(a.lo,b.lo,c.lo);res.hi=basic_func(a.hi,b.hi,c.hi);return res;}
-
-
-#define CAST_SIMD_TYPE(to,name,from,basic_dst) AVX2NEON_ABI to name(const from& a) { to res; res.lo = basic_dst(a.lo); res.hi=basic_dst(a.hi); return res;}
-
-
-
-#define _mm_stream_load_si128 _mm_load_si128
-#define _mm256_stream_load_si256 _mm256_load_si256
-
-
-AVX2NEON_ABI
-__m128 _mm_blend_ps (__m128 a, __m128 b, const int imm8)
-{
- __m128 res;
- for (int i=0;i<4;i++)
- {
- if (imm8 & (1<<i))
- {
- res[i] = b[i];
- }
- else{
- res[i] = a[i];
- }
- }
-
- return res;
-}
-
-AVX2NEON_ABI
-__m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8)
-{
- __m128i res;
- for (int i=0;i<4;i++)
- {
- if (imm8 & (1<<i))
- {
- res[i] = b[i];
- }
- else{
- res[i] = a[i];
- }
- }
- return res;
-}
-
-AVX2NEON_ABI
-__m128 _mm_cmpngt_ps (__m128 a, __m128 b)
-{
- return __m128(vmvnq_s32(__m128i(_mm_cmpgt_ps(a,b))));
-}
-
-
-AVX2NEON_ABI
-__m128i _mm_loadl_epi64 (__m128i const* mem_addr)
-{
- int64x2_t y;
- y[0] = *(int64_t *)mem_addr;
- y[1] = 0;
- return __m128i(y);
-}
-
-AVX2NEON_ABI
-int _mm_movemask_popcnt(__m128 a)
-{
- return __builtin_popcount(_mm_movemask_ps(a));
-}
-
-AVX2NEON_ABI
-__m128 _mm_maskload_ps (float const * mem_addr, __m128i mask)
-{
- __m128 res;
- for (int i=0;i<4;i++) {
- if (mask[i] & 0x80000000) res[i] = mem_addr[i]; else res[i] = 0;
- }
- return res;
-}
-
-AVX2NEON_ABI
-void _mm_maskstore_ps (float * mem_addr, __m128i mask, __m128 a)
-{
- for (int i=0;i<4;i++) {
- if (mask[i] & 0x80000000) mem_addr[i] = a[i];
- }
-}
-
-AVX2NEON_ABI
-void _mm_maskstore_epi32 (int * mem_addr, __m128i mask, __m128i a)
-{
- for (int i=0;i<4;i++) {
- if (mask[i] & 0x80000000) mem_addr[i] = a[i];
- }
-}
-
-AVX2NEON_ABI
-__m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c)
-{
- return vnegq_f32(vfmaq_f32(c,a,b));
-}
-
-#define _mm_fnmsub_ss _mm_fnmsub_ps
-
-AVX2NEON_ABI
-__m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c)
-{
- return vfmsq_f32(c,a,b);
-}
-
-#define _mm_fnmadd_ss _mm_fnmadd_ps
-
-
-AVX2NEON_ABI
-__m128 _mm_broadcast_ss (float const * mem_addr)
-{
- return vdupq_n_f32(*mem_addr);
-}
-
-
-AVX2NEON_ABI
-__m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c)
-{
- return vfmaq_f32(vnegq_f32(c),a,b);
-}
-
-#define _mm_fmsub_ss _mm_fmsub_ps
-#define _mm_fmadd_ps _mm_madd_ps
-#define _mm_fmadd_ss _mm_madd_ps
-
-
-
-template<int code>
-AVX2NEON_ABI float32x4_t dpps_neon(const float32x4_t& a,const float32x4_t& b)
-{
- float v;
- v = 0;
- v += (code & 0x10) ? a[0]*b[0] : 0;
- v += (code & 0x20) ? a[1]*b[1] : 0;
- v += (code & 0x40) ? a[2]*b[2] : 0;
- v += (code & 0x80) ? a[3]*b[3] : 0;
- float32x4_t res;
- res[0] = (code & 0x1) ? v : 0;
- res[1] = (code & 0x2) ? v : 0;
- res[2] = (code & 0x4) ? v : 0;
- res[3] = (code & 0x8) ? v : 0;
- return res;
-}
-
-template<>
-inline float32x4_t dpps_neon<0x7f>(const float32x4_t& a,const float32x4_t& b)
-{
- float v;
- float32x4_t m = _mm_mul_ps(a,b);
- m[3] = 0;
- v = vaddvq_f32(m);
- return _mm_set1_ps(v);
-}
-
-template<>
-inline float32x4_t dpps_neon<0xff>(const float32x4_t& a,const float32x4_t& b)
-{
- float v;
- float32x4_t m = _mm_mul_ps(a,b);
- v = vaddvq_f32(m);
- return _mm_set1_ps(v);
-}
-
-#define _mm_dp_ps(a,b,c) dpps_neon<c>((a),(b))
-
-
-
-AVX2NEON_ABI
-__m128 _mm_cmpnge_ps (__m128 a, __m128 b)
-{
- return __m128(vmvnq_s32(__m128i(_mm_cmpge_ps(a,b))));
-}
-
-
-AVX2NEON_ABI
-__m128 _mm_permutevar_ps (__m128 a, __m128i b)
-{
- __m128 x;
- for (int i=0;i<4;i++)
- {
- x[i] = a[b[i&3]];
- }
- return x;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_setzero_si256()
-{
- __m256i res;
- res.lo = res.hi = vdupq_n_s32(0);
- return res;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_setzero_ps()
-{
- __m256 res;
- res.lo = res.hi = vdupq_n_f32(0.0f);
- return res;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_undefined_si256()
-{
- return _mm256_setzero_si256();
-}
-
-AVX2NEON_ABI
-__m256 _mm256_undefined_ps()
-{
- return _mm256_setzero_ps();
-}
-
-CAST_SIMD_TYPE(__m256d,_mm256_castps_pd,__m256,float64x2_t)
-CAST_SIMD_TYPE(__m256i,_mm256_castps_si256,__m256,__m128i)
-CAST_SIMD_TYPE(__m256, _mm256_castsi256_ps, __m256i,__m128)
-CAST_SIMD_TYPE(__m256, _mm256_castpd_ps ,__m256d,__m128)
-CAST_SIMD_TYPE(__m256d, _mm256_castsi256_pd, __m256i,float64x2_t)
-CAST_SIMD_TYPE(__m256i, _mm256_castpd_si256, __m256d,__m128i)
-
-
-
-
-AVX2NEON_ABI
-__m128 _mm256_castps256_ps128 (__m256 a)
-{
- return a.lo;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_castsi128_si256 (__m128i a)
-{
- __m256i res;
- res.lo = a ;
- res.hi = vdupq_n_s32(0);
- return res;
-}
-
-AVX2NEON_ABI
-__m128i _mm256_castsi256_si128 (__m256i a)
-{
- return a.lo;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_castps128_ps256 (__m128 a)
-{
- __m256 res;
- res.lo = a;
- res.hi = vdupq_n_f32(0);
- return res;
-}
-
-
-AVX2NEON_ABI
-__m256 _mm256_broadcast_ss (float const * mem_addr)
-{
- __m256 res;
- res.lo = res.hi = vdupq_n_f32(*mem_addr);
- return res;
-}
-
-
-
-AVX2NEON_ABI
-__m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0)
-{
- __m128i lo = {e0,e1,e2,e3}, hi = {e4,e5,e6,e7};
- __m256i res;
- res.lo = lo; res.hi = hi;
- return res;
-
-}
-
-AVX2NEON_ABI
-__m256i _mm256_set1_epi32 (int a)
-{
- __m256i res;
- res.lo = res.hi = vdupq_n_s32(a);
- return res;
-}
-
-
-
-
-AVX2NEON_ABI
-int _mm256_movemask_ps(const __m256& v)
-{
- return (_mm_movemask_ps(v.hi) << 4) | _mm_movemask_ps(v.lo);
-}
-
-template<int imm8>
-AVX2NEON_ABI
-__m256 __mm256_permute_ps (const __m256& a)
-{
- __m256 res;
- res.lo = _mm_shuffle_ps(a.lo,a.lo,imm8);
- res.hi = _mm_shuffle_ps(a.hi,a.hi,imm8);
- return res;
-
-}
-
-#define _mm256_permute_ps(a,c) __mm256_permute_ps<c>(a)
-
-
-template<int imm8>
-AVX2NEON_ABI
-__m256 __mm256_shuffle_ps (const __m256 a,const __m256& b)
-{
- __m256 res;
- res.lo = _mm_shuffle_ps(a.lo,b.lo,imm8);
- res.hi = _mm_shuffle_ps(a.hi,b.hi,imm8);
- return res;
-
-}
-
-#define _mm256_shuffle_ps(a,b,c) __mm256_shuffle_ps<c>(a,b)
-
-AVX2NEON_ABI
-__m256i _mm256_set1_epi64x (long long a)
-{
- __m256i res;
- int64x2_t t = vdupq_n_s64(a);
- res.lo = res.hi = __m128i(t);
- return res;
-}
-
-
-AVX2NEON_ABI
-__m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8)
-{
- __m256 res;
- __m128 tmp;
- switch (imm8 & 0x7)
- {
- case 0: tmp = a.lo; break;
- case 1: tmp = a.hi; break;
- case 2: tmp = b.lo; break;
- case 3: tmp = b.hi; break;
- }
- if (imm8 & 0x8)
- tmp = _mm_setzero_ps();
-
-
-
- res.lo = tmp;
- imm8 >>= 4;
-
- switch (imm8 & 0x7)
- {
- case 0: tmp = a.lo; break;
- case 1: tmp = a.hi; break;
- case 2: tmp = b.lo; break;
- case 3: tmp = b.hi; break;
- }
- if (imm8 & 0x8)
- tmp = _mm_setzero_ps();
-
- res.hi = tmp;
-
- return res;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_moveldup_ps (__m256 a)
-{
- __m256 res;
- res.lo[0] = res.lo[1] = a.lo[0];
- res.lo[2] = res.lo[3] = a.lo[2];
- res.hi[0] = res.hi[1] = a.hi[0];
- res.hi[2] = res.hi[3] = a.hi[2];
- return res;
-
-}
-
-AVX2NEON_ABI
-__m256 _mm256_movehdup_ps (__m256 a)
-{
- __m256 res;
- res.lo[0] = res.lo[1] = a.lo[1];
- res.lo[2] = res.lo[3] = a.lo[3];
- res.hi[0] = res.hi[1] = a.hi[1];
- res.hi[2] = res.hi[3] = a.hi[3];
- return res;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8)
-{
- __m256 res = a;
- if (imm8 & 1) res.hi = b;
- else res.lo = b;
- return res;
-}
-
-
-AVX2NEON_ABI
-__m128 _mm256_extractf128_ps (__m256 a, const int imm8)
-{
- if (imm8 & 1) return a.hi;
- return a.lo;
-}
-
-
-AVX2NEON_ABI
-__m256d _mm256_movedup_pd (__m256d a)
-{
- __m256d res;
- res.hi = a.hi;
- res.lo[0] = res.lo[1] = a.lo[0];
- return res;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_abs_epi32(__m256i a)
-{
- __m256i res;
- res.lo = vabsq_s32(a.lo);
- res.hi = vabsq_s32(a.hi);
- return res;
-}
-
-UNARY_AVX_OP(__m256,_mm256_sqrt_ps,_mm_sqrt_ps)
-UNARY_AVX_OP(__m256,_mm256_rsqrt_ps,_mm_rsqrt_ps)
-UNARY_AVX_OP(__m256,_mm256_rcp_ps,_mm_rcp_ps)
-UNARY_AVX_OP(__m256,_mm256_floor_ps,vrndmq_f32)
-UNARY_AVX_OP(__m256,_mm256_ceil_ps,vrndpq_f32)
-
-
-BINARY_AVX_OP(__m256i,_mm256_add_epi32,_mm_add_epi32)
-BINARY_AVX_OP(__m256i,_mm256_sub_epi32,_mm_sub_epi32)
-BINARY_AVX_OP(__m256i,_mm256_mullo_epi32,_mm_mullo_epi32)
-
-BINARY_AVX_OP(__m256i,_mm256_min_epi32,_mm_min_epi32)
-BINARY_AVX_OP(__m256i,_mm256_max_epi32,_mm_max_epi32)
-BINARY_AVX_OP_CAST(__m256i,_mm256_min_epu32,vminq_u32,__m128i,uint32x4_t)
-BINARY_AVX_OP_CAST(__m256i,_mm256_max_epu32,vmaxq_u32,__m128i,uint32x4_t)
-
-BINARY_AVX_OP(__m256,_mm256_min_ps,_mm_min_ps)
-BINARY_AVX_OP(__m256,_mm256_max_ps,_mm_max_ps)
-
-BINARY_AVX_OP(__m256,_mm256_add_ps,_mm_add_ps)
-BINARY_AVX_OP(__m256,_mm256_mul_ps,_mm_mul_ps)
-BINARY_AVX_OP(__m256,_mm256_sub_ps,_mm_sub_ps)
-BINARY_AVX_OP(__m256,_mm256_div_ps,_mm_div_ps)
-
-BINARY_AVX_OP(__m256,_mm256_and_ps,_mm_and_ps)
-BINARY_AVX_OP(__m256,_mm256_andnot_ps,_mm_andnot_ps)
-BINARY_AVX_OP(__m256,_mm256_or_ps,_mm_or_ps)
-BINARY_AVX_OP(__m256,_mm256_xor_ps,_mm_xor_ps)
-
-BINARY_AVX_OP_CAST(__m256d,_mm256_and_pd,vandq_s64,float64x2_t,int64x2_t)
-BINARY_AVX_OP_CAST(__m256d,_mm256_or_pd,vorrq_s64,float64x2_t,int64x2_t)
-BINARY_AVX_OP_CAST(__m256d,_mm256_xor_pd,veorq_s64,float64x2_t,int64x2_t)
-
-
-
-BINARY_AVX_OP(__m256i,_mm256_and_si256,_mm_and_si128)
-BINARY_AVX_OP(__m256i,_mm256_or_si256,_mm_or_si128)
-BINARY_AVX_OP(__m256i,_mm256_xor_si256,_mm_xor_si128)
-
-
-BINARY_AVX_OP(__m256,_mm256_unpackhi_ps,_mm_unpackhi_ps)
-BINARY_AVX_OP(__m256,_mm256_unpacklo_ps,_mm_unpacklo_ps)
-TERNARY_AVX_OP(__m256,_mm256_blendv_ps,_mm_blendv_ps)
-
-
-TERNARY_AVX_OP(__m256,_mm256_fmadd_ps,_mm_fmadd_ps)
-TERNARY_AVX_OP(__m256,_mm256_fnmadd_ps,_mm_fnmadd_ps)
-TERNARY_AVX_OP(__m256,_mm256_fmsub_ps,_mm_fmsub_ps)
-TERNARY_AVX_OP(__m256,_mm256_fnmsub_ps,_mm_fnmsub_ps)
-
-
-BINARY_AVX_OP(__m256i,_mm256_unpackhi_epi32,_mm_unpackhi_epi32)
-BINARY_AVX_OP(__m256i,_mm256_unpacklo_epi32,_mm_unpacklo_epi32)
-
-
-BINARY_AVX_OP(__m256i,_mm256_cmpeq_epi32,_mm_cmpeq_epi32)
-BINARY_AVX_OP(__m256i,_mm256_cmpgt_epi32,_mm_cmpgt_epi32)
-BINARY_AVX_OP(__m256,_mm256_cmpeq_ps,_mm_cmpeq_ps)
-BINARY_AVX_OP(__m256,_mm256_cmpneq_ps,_mm_cmpneq_ps)
-BINARY_AVX_OP(__m256,_mm256_cmpnlt_ps,_mm_cmpnlt_ps)
-BINARY_AVX_OP(__m256,_mm256_cmpngt_ps,_mm_cmpngt_ps)
-BINARY_AVX_OP(__m256,_mm256_cmpge_ps,_mm_cmpge_ps)
-BINARY_AVX_OP(__m256,_mm256_cmpnge_ps,_mm_cmpnge_ps)
-BINARY_AVX_OP(__m256,_mm256_cmplt_ps,_mm_cmplt_ps)
-BINARY_AVX_OP(__m256,_mm256_cmple_ps,_mm_cmple_ps)
-BINARY_AVX_OP(__m256,_mm256_cmpgt_ps,_mm_cmpgt_ps)
-BINARY_AVX_OP(__m256,_mm256_cmpnle_ps,_mm_cmpnle_ps)
-
-
-AVX2NEON_ABI
-__m256i _mm256_cvtps_epi32 (__m256 a)
-{
- __m256i res;
- res.lo = _mm_cvtps_epi32(a.lo);
- res.hi = _mm_cvtps_epi32(a.hi);
- return res;
-
-}
-
-AVX2NEON_ABI
-__m256i _mm256_cvttps_epi32 (__m256 a)
-{
- __m256i res;
- res.lo = _mm_cvttps_epi32(a.lo);
- res.hi = _mm_cvttps_epi32(a.hi);
- return res;
-
-}
-
-AVX2NEON_ABI
-__m256 _mm256_loadu_ps (float const * mem_addr)
-{
- __m256 res;
- res.lo = *(__m128 *)(mem_addr + 0);
- res.hi = *(__m128 *)(mem_addr + 4);
- return res;
-}
-#define _mm256_load_ps _mm256_loadu_ps
-
-
-AVX2NEON_ABI
-int _mm256_testz_ps (const __m256& a, const __m256& b)
-{
- __m256 t = a;
- if (&a != &b)
- t = _mm256_and_ps(a,b);
-
- __m128i l = vshrq_n_s32(__m128i(t.lo),31);
- __m128i h = vshrq_n_s32(__m128i(t.hi),31);
- return vaddvq_s32(vaddq_s32(l,h)) == 0;
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_set_epi64x (int64_t e3, int64_t e2, int64_t e1, int64_t e0)
-{
- __m256i res;
- int64x2_t t0 = {e0,e1};
- int64x2_t t1 = {e2,e3};
- res.lo = __m128i(t0);
- res.hi = __m128i(t1);
- return res;
-}
-
-AVX2NEON_ABI
-__m256d _mm256_setzero_pd ()
-{
- __m256d res;
- res.lo = res.hi = vdupq_n_f64(0);
- return res;
-}
-
-AVX2NEON_ABI
-int _mm256_movemask_pd (__m256d a)
-{
- int res = 0;
- uint64x2_t x;
- x = uint64x2_t(a.lo);
- res |= (x[0] >> 63) ? 1 : 0;
- res |= (x[0] >> 63) ? 2 : 0;
- x = uint64x2_t(a.hi);
- res |= (x[0] >> 63) ? 4 : 0;
- res |= (x[0] >> 63) ? 8 : 0;
- return res;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b)
-{
- __m256i res;
- res.lo = __m128i(vceqq_s64(int64x2_t(a.lo),int64x2_t(b.lo)));
- res.hi = __m128i(vceqq_s64(int64x2_t(a.hi),int64x2_t(b.hi)));
- return res;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_cmpeq_pd (__m256d a, __m256d b)
-{
- __m256i res;
- res.lo = __m128i(vceqq_f64(a.lo,b.lo));
- res.hi = __m128i(vceqq_f64(a.hi,b.hi));
- return res;
-}
-
-
-AVX2NEON_ABI
-int _mm256_testz_pd (const __m256d& a, const __m256d& b)
-{
- __m256d t = a;
-
- if (&a != &b)
- t = _mm256_and_pd(a,b);
-
- return _mm256_movemask_pd(t) == 0;
-}
-
-AVX2NEON_ABI
-__m256d _mm256_blendv_pd (__m256d a, __m256d b, __m256d mask)
-{
- __m256d res;
- uint64x2_t t = uint64x2_t(mask.lo);
- res.lo[0] = (t[0] >> 63) ? b.lo[0] : a.lo[0];
- res.lo[1] = (t[1] >> 63) ? b.lo[1] : a.lo[1];
- t = uint64x2_t(mask.hi);
- res.hi[0] = (t[0] >> 63) ? b.hi[0] : a.hi[0];
- res.hi[1] = (t[1] >> 63) ? b.hi[1] : a.hi[1];
- return res;
-}
-
-template<int imm8>
-__m256 __mm256_dp_ps (__m256 a, __m256 b)
-{
- __m256 res;
- res.lo = _mm_dp_ps(a.lo,b.lo,imm8);
- res.hi = _mm_dp_ps(a.hi,b.hi,imm8);
- return res;
-}
-
-#define _mm256_dp_ps(a,b,c) __mm256_dp_ps<c>(a,b)
-
-AVX2NEON_ABI
-double _mm256_permute4x64_pd_select(__m256d a, const int imm8)
-{
- switch (imm8 & 3) {
- case 0:
- return a.lo[0];
- case 1:
- return a.lo[1];
- case 2:
- return a.hi[0];
- case 3:
- return a.hi[1];
- }
- __builtin_unreachable();
- return 0;
-}
-
-AVX2NEON_ABI
-__m256d _mm256_permute4x64_pd (__m256d a, const int imm8)
-{
- __m256d res;
- res.lo[0] = _mm256_permute4x64_pd_select(a,imm8 >> 0);
- res.lo[1] = _mm256_permute4x64_pd_select(a,imm8 >> 2);
- res.hi[0] = _mm256_permute4x64_pd_select(a,imm8 >> 4);
- res.hi[1] = _mm256_permute4x64_pd_select(a,imm8 >> 6);
-
- return res;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)
-{
- return __m256i(_mm256_insertf128_ps((__m256)a,(__m128)b,imm8));
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_loadu_si256 (__m256i const * mem_addr)
-{
- __m256i res;
- res.lo = *(__m128i *)((int32_t *)mem_addr + 0);
- res.hi = *(__m128i *)((int32_t *)mem_addr + 4);
- return res;
-}
-
-#define _mm256_load_si256 _mm256_loadu_si256
-
-AVX2NEON_ABI
-void _mm256_storeu_ps (float * mem_addr, __m256 a)
-{
- *(__m128 *)(mem_addr + 0) = a.lo;
- *(__m128 *)(mem_addr + 4) = a.hi;
-
-}
-
-#define _mm256_store_ps _mm256_storeu_ps
-#define _mm256_stream_ps _mm256_storeu_ps
-
-
-AVX2NEON_ABI
-void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)
-{
- *(__m128i *)((int *)mem_addr + 0) = a.lo;
- *(__m128i *)((int *)mem_addr + 4) = a.hi;
-
-}
-
-#define _mm256_store_si256 _mm256_storeu_si256
-
-
-
-AVX2NEON_ABI
-__m256 _mm256_maskload_ps (float const * mem_addr, __m256i mask)
-{
- __m256 res;
- res.lo = _mm_maskload_ps(mem_addr,mask.lo);
- res.hi = _mm_maskload_ps(mem_addr + 4,mask.hi);
- return res;
-
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_cvtepu8_epi32 (__m128i a)
-{
- __m256i res;
- uint8x16_t x = uint8x16_t(a);
- for (int i=0;i<4;i++)
- {
- res.lo[i] = x[i];
- res.hi[i] = x[i+4];
- }
- return res;
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_cvtepi8_epi32 (__m128i a)
-{
- __m256i res;
- int8x16_t x = int8x16_t(a);
- for (int i=0;i<4;i++)
- {
- res.lo[i] = x[i];
- res.hi[i] = x[i+4];
- }
- return res;
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_cvtepu16_epi32 (__m128i a)
-{
- __m256i res;
- uint16x8_t x = uint16x8_t(a);
- for (int i=0;i<4;i++)
- {
- res.lo[i] = x[i];
- res.hi[i] = x[i+4];
- }
- return res;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_cvtepi16_epi32 (__m128i a)
-{
- __m256i res;
- int16x8_t x = int16x8_t(a);
- for (int i=0;i<4;i++)
- {
- res.lo[i] = x[i];
- res.hi[i] = x[i+4];
- }
- return res;
-}
-
-
-
-AVX2NEON_ABI
-void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a)
-{
- _mm_maskstore_epi32(mem_addr,mask.lo,a.lo);
- _mm_maskstore_epi32(mem_addr + 4,mask.hi,a.hi);
-}
-
-AVX2NEON_ABI
-__m256i _mm256_slli_epi32 (__m256i a, int imm8)
-{
- __m256i res;
- res.lo = _mm_slli_epi32(a.lo,imm8);
- res.hi = _mm_slli_epi32(a.hi,imm8);
- return res;
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_srli_epi32 (__m256i a, int imm8)
-{
- __m256i res;
- res.lo = _mm_srli_epi32(a.lo,imm8);
- res.hi = _mm_srli_epi32(a.hi,imm8);
- return res;
-}
-
-AVX2NEON_ABI
-__m256i _mm256_srai_epi32 (__m256i a, int imm8)
-{
- __m256i res;
- res.lo = _mm_srai_epi32(a.lo,imm8);
- res.hi = _mm_srai_epi32(a.hi,imm8);
- return res;
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_sllv_epi32 (__m256i a, __m256i count)
-{
- __m256i res;
- res.lo = vshlq_s32(a.lo,count.lo);
- res.hi = vshlq_s32(a.hi,count.hi);
- return res;
-
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_srav_epi32 (__m256i a, __m256i count)
-{
- __m256i res;
- res.lo = vshlq_s32(a.lo,vnegq_s32(count.lo));
- res.hi = vshlq_s32(a.hi,vnegq_s32(count.hi));
- return res;
-
-}
-
-AVX2NEON_ABI
-__m256i _mm256_srlv_epi32 (__m256i a, __m256i count)
-{
- __m256i res;
- res.lo = __m128i(vshlq_u32(uint32x4_t(a.lo),vnegq_s32(count.lo)));
- res.hi = __m128i(vshlq_u32(uint32x4_t(a.hi),vnegq_s32(count.hi)));
- return res;
-
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)
-{
- return __m256i(_mm256_permute2f128_ps(__m256(a),__m256(b),imm8));
-}
-
-
-AVX2NEON_ABI
-__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)
-{
- if (imm8 & 1) return a.hi;
- return a.lo;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_set1_ps(float x)
-{
- __m256 res;
- res.lo = res.hi = vdupq_n_f32(x);
- return res;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_set_ps (float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0)
-{
- __m256 res;
- res.lo = _mm_set_ps(e3,e2,e1,e0);
- res.hi = _mm_set_ps(e7,e6,e5,e4);
- return res;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_broadcast_ps (__m128 const * mem_addr)
-{
- __m256 res;
- res.lo = res.hi = *mem_addr;
- return res;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_cvtepi32_ps (__m256i a)
-{
- __m256 res;
- res.lo = _mm_cvtepi32_ps(a.lo);
- res.hi = _mm_cvtepi32_ps(a.hi);
- return res;
-}
-AVX2NEON_ABI
-void _mm256_maskstore_ps (float * mem_addr, __m256i mask, __m256 a)
-{
- for (int i=0;i<4;i++) {
- if (mask.lo[i] & 0x80000000) mem_addr[i] = a.lo[i];
- if (mask.hi[i] & 0x80000000) mem_addr[i+4] = a.hi[i];
- }
-}
-
-AVX2NEON_ABI
-__m256d _mm256_andnot_pd (__m256d a, __m256d b)
-{
- __m256d res;
- res.lo = float64x2_t(_mm_andnot_ps(__m128(a.lo),__m128(b.lo)));
- res.hi = float64x2_t(_mm_andnot_ps(__m128(a.hi),__m128(b.hi)));
- return res;
-}
-
-AVX2NEON_ABI
-__m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8)
-{
- __m256 res;
- res.lo = _mm_blend_ps(a.lo,b.lo,imm8 & 0xf);
- res.hi = _mm_blend_ps(a.hi,b.hi,imm8 >> 4);
- return res;
-
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8)
-{
- __m256i res;
- res.lo = _mm_blend_epi32(a.lo,b.lo,imm8 & 0xf);
- res.hi = _mm_blend_epi32(a.hi,b.hi,imm8 >> 4);
- return res;
-
-}
-
-AVX2NEON_ABI
-__m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale)
-{
- __m256i res;
- for (int i=0;i<4;i++)
- {
- res.lo[i] = *(int *)((char *) base_addr + (vindex.lo[i]*scale));
- res.hi[i] = *(int *)((char *) base_addr + (vindex.hi[i]*scale));
- }
- return res;
-}
-
-
-AVX2NEON_ABI
-__m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale)
-{
- __m256i res = _mm256_setzero_si256();
- for (int i=0;i<4;i++)
- {
- if (mask.lo[i] >> 31) res.lo[i] = *(int *)((char *) base_addr + (vindex.lo[i]*scale));
- if (mask.hi[i] >> 31) res.hi[i] = *(int *)((char *) base_addr + (vindex.hi[i]*scale));
- }
-
- return res;
-
-}
-
-
diff --git a/thirdparty/embree-aarch64/common/math/SSE2NEON.h b/thirdparty/embree-aarch64/common/math/SSE2NEON.h
deleted file mode 100644
index 2013151d31..0000000000
--- a/thirdparty/embree-aarch64/common/math/SSE2NEON.h
+++ /dev/null
@@ -1,1753 +0,0 @@
-#ifndef SSE2NEON_H
-#define SSE2NEON_H
-
-// This header file provides a simple API translation layer
-// between SSE intrinsics to their corresponding ARM NEON versions
-//
-// This header file does not (yet) translate *all* of the SSE intrinsics.
-// Since this is in support of a specific porting effort, I have only
-// included the intrinsics I needed to get my port to work.
-//
-// Questions/Comments/Feedback send to: jratcliffscarab@gmail.com
-//
-// If you want to improve or add to this project, send me an
-// email and I will probably approve your access to the depot.
-//
-// Project is located here:
-//
-// https://github.com/jratcliff63367/sse2neon
-//
-// Show your appreciation for open source by sending me a bitcoin tip to the following
-// address.
-//
-// TipJar: 1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p :
-// https://blockchain.info/address/1PzgWDSyq4pmdAXRH8SPUtta4SWGrt4B1p
-//
-//
-// Contributors to this project are:
-//
-// John W. Ratcliff : jratcliffscarab@gmail.com
-// Brandon Rowlett : browlett@nvidia.com
-// Ken Fast : kfast@gdeb.com
-// Eric van Beurden : evanbeurden@nvidia.com
-//
-//
-// *********************************************************************************************************************
-// Release notes for January 20, 2017 version:
-//
-// The unit tests have been refactored. They no longer assert on an error, instead they return a pass/fail condition
-// The unit-tests now test 10,000 random float and int values against each intrinsic.
-//
-// SSE2NEON now supports 95 SSE intrinsics. 39 of them have formal unit tests which have been implemented and
-// fully tested on NEON/ARM. The remaining 56 still need unit tests implemented.
-//
-// A struct is now defined in this header file called 'SIMDVec' which can be used by applications which
-// attempt to access the contents of an _m128 struct directly. It is important to note that accessing the __m128
-// struct directly is bad coding practice by Microsoft: @see: https://msdn.microsoft.com/en-us/library/ayeb3ayc.aspx
-//
-// However, some legacy source code may try to access the contents of an __m128 struct directly so the developer
-// can use the SIMDVec as an alias for it. Any casting must be done manually by the developer, as you cannot
-// cast or otherwise alias the base NEON data type for intrinsic operations.
-//
-// A bug was found with the _mm_shuffle_ps intrinsic. If the shuffle permutation was not one of the ones with
-// a custom/unique implementation causing it to fall through to the default shuffle implementation it was failing
-// to return the correct value. This is now fixed.
-//
-// A bug was found with the _mm_cvtps_epi32 intrinsic. This converts floating point values to integers.
-// It was not honoring the correct rounding mode. In SSE the default rounding mode when converting from float to int
-// is to use 'round to even' otherwise known as 'bankers rounding'. ARMv7 did not support this feature but ARMv8 does.
-// As it stands today, this header file assumes ARMv8. If you are trying to target really old ARM devices, you may get
-// a build error.
-//
-// Support for a number of new intrinsics was added, however, none of them yet have unit-tests to 100% confirm they are
-// producing the correct results on NEON. These unit tests will be added as soon as possible.
-//
-// Here is the list of new instrinsics which have been added:
-//
-// _mm_cvtss_f32 : extracts the lower order floating point value from the parameter
-// _mm_add_ss : adds the scalar single - precision floating point values of a and b
-// _mm_div_ps : Divides the four single - precision, floating - point values of a and b.
-// _mm_div_ss : Divides the scalar single - precision floating point value of a by b.
-// _mm_sqrt_ss : Computes the approximation of the square root of the scalar single - precision floating point value of in.
-// _mm_rsqrt_ps : Computes the approximations of the reciprocal square roots of the four single - precision floating point values of in.
-// _mm_comilt_ss : Compares the lower single - precision floating point scalar values of a and b using a less than operation
-// _mm_comigt_ss : Compares the lower single - precision floating point scalar values of a and b using a greater than operation.
-// _mm_comile_ss : Compares the lower single - precision floating point scalar values of a and b using a less than or equal operation.
-// _mm_comige_ss : Compares the lower single - precision floating point scalar values of a and b using a greater than or equal operation.
-// _mm_comieq_ss : Compares the lower single - precision floating point scalar values of a and b using an equality operation.
-// _mm_comineq_s : Compares the lower single - precision floating point scalar values of a and b using an inequality operation
-// _mm_unpackhi_epi8 : Interleaves the upper 8 signed or unsigned 8 - bit integers in a with the upper 8 signed or unsigned 8 - bit integers in b.
-// _mm_unpackhi_epi16: Interleaves the upper 4 signed or unsigned 16 - bit integers in a with the upper 4 signed or unsigned 16 - bit integers in b.
-//
-// *********************************************************************************************************************
-/*
-** The MIT license:
-**
-** Permission is hereby granted, free of charge, to any person obtaining a copy
-** of this software and associated documentation files (the "Software"), to deal
-** in the Software without restriction, including without limitation the rights
-** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-** copies of the Software, and to permit persons to whom the Software is furnished
-** to do so, subject to the following conditions:
-**
-** The above copyright notice and this permission notice shall be included in all
-** copies or substantial portions of the Software.
-
-** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-#pragma once
-
-#define GCC 1
-#define ENABLE_CPP_VERSION 0
-
-// enable precise emulation of _mm_min_ps and _mm_max_ps?
-// This would slow down the computation a bit, but gives consistent result with x86 SSE2.
-// (e.g. would solve a hole or NaN pixel in the rendering result)
-#define USE_PRECISE_MINMAX_IMPLEMENTATION (1)
-
-#if GCC
-#define FORCE_INLINE inline __attribute__((always_inline))
-#define ALIGN_STRUCT(x) __attribute__((aligned(x)))
-#else
-#define FORCE_INLINE inline
-#define ALIGN_STRUCT(x) __declspec(align(x))
-#endif
-
-#include <stdint.h>
-#include "arm_neon.h"
-#if defined(__aarch64__)
-#include "constants.h"
-#endif
-
-
-#if !defined(__has_builtin)
-#define __has_builtin(x) (0)
-#endif
-
-/*******************************************************/
-/* MACRO for shuffle parameter for _mm_shuffle_ps(). */
-/* Argument fp3 is a digit[0123] that represents the fp*/
-/* from argument "b" of mm_shuffle_ps that will be */
-/* placed in fp3 of result. fp2 is the same for fp2 in */
-/* result. fp1 is a digit[0123] that represents the fp */
-/* from argument "a" of mm_shuffle_ps that will be */
-/* places in fp1 of result. fp0 is the same for fp0 of */
-/* result */
-/*******************************************************/
-#if defined(__aarch64__)
-#define _MN_SHUFFLE(fp3,fp2,fp1,fp0) ( (uint8x16_t){ (((fp3)*4)+0), (((fp3)*4)+1), (((fp3)*4)+2), (((fp3)*4)+3), (((fp2)*4)+0), (((fp2)*4)+1), (((fp2)*4)+2), (((fp2)*4)+3), (((fp1)*4)+0), (((fp1)*4)+1), (((fp1)*4)+2), (((fp1)*4)+3), (((fp0)*4)+0), (((fp0)*4)+1), (((fp0)*4)+2), (((fp0)*4)+3) } )
-#define _MF_SHUFFLE(fp3,fp2,fp1,fp0) ( (uint8x16_t){ (((fp3)*4)+0), (((fp3)*4)+1), (((fp3)*4)+2), (((fp3)*4)+3), (((fp2)*4)+0), (((fp2)*4)+1), (((fp2)*4)+2), (((fp2)*4)+3), (((fp1)*4)+16+0), (((fp1)*4)+16+1), (((fp1)*4)+16+2), (((fp1)*4)+16+3), (((fp0)*4)+16+0), (((fp0)*4)+16+1), (((fp0)*4)+16+2), (((fp0)*4)+16+3) } )
-#endif
-
-#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) (((fp3) << 6) | ((fp2) << 4) | \
- ((fp1) << 2) | ((fp0)))
-
-typedef float32x4_t __m128;
-typedef int32x4_t __m128i;
-
-// union intended to allow direct access to an __m128 variable using the names that the MSVC
-// compiler provides. This union should really only be used when trying to access the members
-// of the vector as integer values. GCC/clang allow native access to the float members through
-// a simple array access operator (in C since 4.6, in C++ since 4.8).
-//
-// Ideally direct accesses to SIMD vectors should not be used since it can cause a performance
-// hit. If it really is needed however, the original __m128 variable can be aliased with a
-// pointer to this union and used to access individual components. The use of this union should
-// be hidden behind a macro that is used throughout the codebase to access the members instead
-// of always declaring this type of variable.
-typedef union ALIGN_STRUCT(16) SIMDVec
-{
- float m128_f32[4]; // as floats - do not to use this. Added for convenience.
- int8_t m128_i8[16]; // as signed 8-bit integers.
- int16_t m128_i16[8]; // as signed 16-bit integers.
- int32_t m128_i32[4]; // as signed 32-bit integers.
- int64_t m128_i64[2]; // as signed 64-bit integers.
- uint8_t m128_u8[16]; // as unsigned 8-bit integers.
- uint16_t m128_u16[8]; // as unsigned 16-bit integers.
- uint32_t m128_u32[4]; // as unsigned 32-bit integers.
- uint64_t m128_u64[2]; // as unsigned 64-bit integers.
- double m128_f64[2]; // as signed double
-} SIMDVec;
-
-// ******************************************
-// CPU stuff
-// ******************************************
-
-typedef SIMDVec __m128d;
-
-#include <stdlib.h>
-
-#ifndef _MM_MASK_MASK
-#define _MM_MASK_MASK 0x1f80
-#define _MM_MASK_DIV_ZERO 0x200
-#define _MM_FLUSH_ZERO_ON 0x8000
-#define _MM_DENORMALS_ZERO_ON 0x40
-#define _MM_MASK_DENORM 0x100
-#endif
-#define _MM_SET_EXCEPTION_MASK(x)
-#define _MM_SET_FLUSH_ZERO_MODE(x)
-#define _MM_SET_DENORMALS_ZERO_MODE(x)
-
-FORCE_INLINE void _mm_pause()
-{
-}
-
-FORCE_INLINE void _mm_mfence()
-{
- __sync_synchronize();
-}
-
-#define _MM_HINT_T0 3
-#define _MM_HINT_T1 2
-#define _MM_HINT_T2 1
-#define _MM_HINT_NTA 0
-
-FORCE_INLINE void _mm_prefetch(const void* ptr, unsigned int level)
-{
- __builtin_prefetch(ptr);
-
-}
-
-FORCE_INLINE void* _mm_malloc(int size, int align)
-{
- void *ptr;
- // align must be multiple of sizeof(void *) for posix_memalign.
- if (align < sizeof(void *)) {
- align = sizeof(void *);
- }
-
- if ((align % sizeof(void *)) != 0) {
- // fallback to malloc
- ptr = malloc(size);
- } else {
- if (posix_memalign(&ptr, align, size)) {
- return 0;
- }
- }
-
- return ptr;
-}
-
-FORCE_INLINE void _mm_free(void* ptr)
-{
- free(ptr);
-}
-
-FORCE_INLINE int _mm_getcsr()
-{
- return 0;
-}
-
-FORCE_INLINE void _mm_setcsr(int val)
-{
- return;
-}
-
-// ******************************************
-// Set/get methods
-// ******************************************
-
-// extracts the lower order floating point value from the parameter : https://msdn.microsoft.com/en-us/library/bb514059%28v=vs.120%29.aspx?f=255&MSPPError=-2147217396
-#if defined(__aarch64__)
-FORCE_INLINE float _mm_cvtss_f32(const __m128& x)
-{
- return x[0];
-}
-#else
-FORCE_INLINE float _mm_cvtss_f32(__m128 a)
-{
- return vgetq_lane_f32(a, 0);
-}
-#endif
-
-// Sets the 128-bit value to zero https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_setzero_si128()
-{
- return vdupq_n_s32(0);
-}
-
-// Clears the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_setzero_ps(void)
-{
- return vdupq_n_f32(0);
-}
-
-// Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_set1_ps(float _w)
-{
- return vdupq_n_f32(_w);
-}
-
-// Sets the four single-precision, floating-point values to w. https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_set_ps1(float _w)
-{
- return vdupq_n_f32(_w);
-}
-
-// Sets the four single-precision, floating-point values to the four inputs. https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
-#if defined(__aarch64__)
-FORCE_INLINE __m128 _mm_set_ps(const float w, const float z, const float y, const float x)
-{
- float32x4_t t = { x, y, z, w };
- return t;
-}
-
-// Sets the four single-precision, floating-point values to the four inputs in reverse order. https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_setr_ps(const float w, const float z , const float y , const float x )
-{
- float32x4_t t = { w, z, y, x };
- return t;
-}
-#else
-FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
-{
- float __attribute__((aligned(16))) data[4] = { x, y, z, w };
- return vld1q_f32(data);
-}
-
-// Sets the four single-precision, floating-point values to the four inputs in reverse order. https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_setr_ps(float w, float z , float y , float x )
-{
- float __attribute__ ((aligned (16))) data[4] = { w, z, y, x };
- return vld1q_f32(data);
-}
-#endif
-
-// Sets the 4 signed 32-bit integer values to i. https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_set1_epi32(int _i)
-{
- return vdupq_n_s32(_i);
-}
-
-//Set the first lane to of 4 signed single-position, floating-point number to w
-#if defined(__aarch64__)
-FORCE_INLINE __m128 _mm_set_ss(float _w)
-{
- float32x4_t res = {_w, 0, 0, 0};
- return res;
-}
-
-// Sets the 4 signed 32-bit integer values. https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
-{
- int32x4_t t = {i0,i1,i2,i3};
- return t;
-}
-#else
-FORCE_INLINE __m128 _mm_set_ss(float _w)
-{
- __m128 val = _mm_setzero_ps();
- return vsetq_lane_f32(_w, val, 0);
-}
-
-// Sets the 4 signed 32-bit integer values. https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
-{
- int32_t __attribute__((aligned(16))) data[4] = { i0, i1, i2, i3 };
- return vld1q_s32(data);
-}
-#endif
-
-// Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
-FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
-{
- vst1q_f32(p, a);
-}
-
-// Stores four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
-FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
-{
- vst1q_f32(p, a);
-}
-
-FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
-{
- vst1q_s32((int32_t*) p,a);
-}
-
-// Stores four 32-bit integer values as (as a __m128i value) at the address p. https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
-FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a )
-{
- vst1q_s32((int32_t*) p,a);
-}
-
-// Stores the lower single - precision, floating - point value. https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
-FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
-{
- vst1q_lane_f32(p, a, 0);
-}
-
-// Reads the lower 64 bits of b and stores them into the lower 64 bits of a. https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
-FORCE_INLINE void _mm_storel_epi64(__m128i* a, __m128i b)
-{
- *a = (__m128i)vsetq_lane_s64((int64_t)vget_low_s32(b), *(int64x2_t*)a, 0);
-}
-
-// Loads a single single-precision, floating-point value, copying it into all four words https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_load1_ps(const float * p)
-{
- return vld1q_dup_f32(p);
-}
-
-// Loads four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_load_ps(const float * p)
-{
- return vld1q_f32(p);
-}
-
-// Loads four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
-FORCE_INLINE __m128 _mm_loadu_ps(const float * p)
-{
- // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are equivalent for neon
- return vld1q_f32(p);
-}
-
-// Loads an single - precision, floating - point value into the low word and clears the upper three words. https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
-FORCE_INLINE __m128 _mm_load_ss(const float * p)
-{
- __m128 result = vdupq_n_f32(0);
- return vsetq_lane_f32(*p, result, 0);
-}
-
-FORCE_INLINE __m128i _mm_loadu_si128(__m128i *p)
-{
- return (__m128i)vld1q_s32((const int32_t*) p);
-}
-
-
-// ******************************************
-// Logic/Binary operations
-// ******************************************
-
-// Compares for inequality. https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
-{
- return (__m128)vmvnq_s32((__m128i)vceqq_f32(a, b));
-}
-
-// Computes the bitwise AND-NOT of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
-{
- return (__m128)vbicq_s32((__m128i)b, (__m128i)a); // *NOTE* argument swap
-}
-
-// Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the 128-bit value in a. https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
-{
- return (__m128i)vbicq_s32(b, a); // *NOTE* argument swap
-}
-
-// Computes the bitwise AND of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
-{
- return (__m128i)vandq_s32(a, b);
-}
-
-// Computes the bitwise AND of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
-{
- return (__m128)vandq_s32((__m128i)a, (__m128i)b);
-}
-
-// Computes the bitwise OR of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
-{
- return (__m128)vorrq_s32((__m128i)a, (__m128i)b);
-}
-
-// Computes bitwise EXOR (exclusive-or) of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
-{
- return (__m128)veorq_s32((__m128i)a, (__m128i)b);
-}
-
-// Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
-{
- return (__m128i)vorrq_s32(a, b);
-}
-
-// Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
-{
- return veorq_s32(a, b);
-}
-
-// NEON does not provide this method
-// Creates a 4-bit mask from the most significant bits of the four single-precision, floating-point values. https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
-FORCE_INLINE int _mm_movemask_ps(__m128 a)
-{
-#if ENABLE_CPP_VERSION // I am not yet convinced that the NEON version is faster than the C version of this
- uint32x4_t &ia = *(uint32x4_t *)&a;
- return (ia[0] >> 31) | ((ia[1] >> 30) & 2) | ((ia[2] >> 29) & 4) | ((ia[3] >> 28) & 8);
-#else
-
-#if defined(__aarch64__)
- uint32x4_t t2 = vandq_u32(vreinterpretq_u32_f32(a), embree::movemask_mask);
- return vaddvq_u32(t2);
-#else
- static const uint32x4_t movemask = { 1, 2, 4, 8 };
- static const uint32x4_t highbit = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
- uint32x4_t t0 = vreinterpretq_u32_f32(a);
- uint32x4_t t1 = vtstq_u32(t0, highbit);
- uint32x4_t t2 = vandq_u32(t1, movemask);
- uint32x2_t t3 = vorr_u32(vget_low_u32(t2), vget_high_u32(t2));
- return vget_lane_u32(t3, 0) | vget_lane_u32(t3, 1);
-#endif
-
-#endif
-}
-
-#if defined(__aarch64__)
-FORCE_INLINE int _mm_movemask_popcnt_ps(__m128 a)
-{
- uint32x4_t t2 = vandq_u32(vreinterpretq_u32_f32(a), embree::movemask_mask);
- t2 = vreinterpretq_u32_u8(vcntq_u8(vreinterpretq_u8_u32(t2)));
- return vaddvq_u32(t2);
-
-}
-#endif
-
-// Takes the upper 64 bits of a and places it in the low end of the result
-// Takes the lower 64 bits of b and places it into the high end of the result.
-FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
-{
- return vcombine_f32(vget_high_f32(a), vget_low_f32(b));
-}
-
-// takes the lower two 32-bit values from a and swaps them and places in high end of result
-// takes the higher two 32 bit values from b and swaps them and places in low end of result.
-FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
-{
- return vcombine_f32(vrev64_f32(vget_low_f32(a)), vrev64_f32(vget_high_f32(b)));
-}
-
-// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the high
-FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
-{
- return vcombine_f32(vget_low_f32(a), vget_high_f32(b));
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
-{
- return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 1)), vdup_n_f32(vgetq_lane_f32(b, 0)));
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
-{
- return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 2)), vdup_n_f32(vgetq_lane_f32(b, 0)));
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
-{
- return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 0)), vdup_n_f32(vgetq_lane_f32(b, 2)));
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
-{
- float32_t a0 = vgetq_lane_f32(a, 0);
- float32_t a2 = vgetq_lane_f32(a, 2);
- float32x2_t aVal = vdup_n_f32(a2);
- aVal = vset_lane_f32(a0, aVal, 1);
- return vcombine_f32(aVal, vget_high_f32(b));
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
-{
- return vcombine_f32(vdup_n_f32(vgetq_lane_f32(a, 3)), vdup_n_f32(vgetq_lane_f32(b, 1)));
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
-{
- float32_t b0 = vgetq_lane_f32(b, 0);
- float32_t b2 = vgetq_lane_f32(b, 2);
- float32x2_t bVal = vdup_n_f32(b0);
- bVal = vset_lane_f32(b2, bVal, 1);
- return vcombine_f32(vget_low_f32(a), bVal);
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
-{
- float32_t b0 = vgetq_lane_f32(b, 0);
- float32_t b2 = vgetq_lane_f32(b, 2);
- float32x2_t bVal = vdup_n_f32(b0);
- bVal = vset_lane_f32(b2, bVal, 1);
- return vcombine_f32(vrev64_f32(vget_low_f32(a)), bVal);
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
-{
- float32_t b0 = vgetq_lane_f32(b, 0);
- float32_t b2 = vgetq_lane_f32(b, 2);
- float32x2_t bVal = vdup_n_f32(b0);
- bVal = vset_lane_f32(b2, bVal, 1);
- return vcombine_f32(vget_high_f32(a), bVal);
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
-{
- float32x2_t a21 = vget_high_f32(vextq_f32(a, a, 3));
- float32x2_t b03 = vget_low_f32(vextq_f32(b, b, 3));
- return vcombine_f32(a21, b03);
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
-{
- float32x2_t a03 = vget_low_f32(vextq_f32(a, a, 3));
- float32x2_t b21 = vget_high_f32(vextq_f32(b, b, 3));
- return vcombine_f32(a03, b21);
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
-{
- float32x2_t a10 = vget_low_f32(a);
- float32x2_t b10 = vget_low_f32(b);
- return vcombine_f32(a10, b10);
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
-{
- float32x2_t a01 = vrev64_f32(vget_low_f32(a));
- float32x2_t b10 = vget_low_f32(b);
- return vcombine_f32(a01, b10);
-}
-
-FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
-{
- float32x2_t a01 = vrev64_f32(vget_low_f32(a));
- float32x2_t b01 = vrev64_f32(vget_low_f32(b));
- return vcombine_f32(a01, b01);
-}
-
-// NEON does not support a general purpose permute intrinsic
-// Currently I am not sure whether the C implementation is faster or slower than the NEON version.
-// Note, this has to be expanded as a template because the shuffle value must be an immediate value.
-// The same is true on SSE as well.
-// Selects four specific single-precision, floating-point values from a and b, based on the mask i. https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
-template <int i>
-FORCE_INLINE __m128 _mm_shuffle_ps_default(const __m128& a, const __m128& b)
-{
-#if ENABLE_CPP_VERSION // I am not convinced that the NEON version is faster than the C version yet.
- __m128 ret;
- ret[0] = a[i & 0x3];
- ret[1] = a[(i >> 2) & 0x3];
- ret[2] = b[(i >> 4) & 0x03];
- ret[3] = b[(i >> 6) & 0x03];
- return ret;
-#else
-# if __has_builtin(__builtin_shufflevector)
- return __builtin_shufflevector( \
- a, b, (i) & (0x3), ((i) >> 2) & 0x3,
- (((i) >> 4) & 0x3) + 4, (((i) >> 6) & 0x3) + 4);
-# else
- const int i0 = (i >> 0)&0x3;
- const int i1 = (i >> 2)&0x3;
- const int i2 = (i >> 4)&0x3;
- const int i3 = (i >> 6)&0x3;
-
- if (&a == &b)
- {
- if (i0 == i1 && i0 == i2 && i0 == i3)
- {
- return (float32x4_t)vdupq_laneq_f32(a,i0);
- }
- static const uint8_t tbl[16] = {
- (i0*4) + 0,(i0*4) + 1,(i0*4) + 2,(i0*4) + 3,
- (i1*4) + 0,(i1*4) + 1,(i1*4) + 2,(i1*4) + 3,
- (i2*4) + 0,(i2*4) + 1,(i2*4) + 2,(i2*4) + 3,
- (i3*4) + 0,(i3*4) + 1,(i3*4) + 2,(i3*4) + 3
- };
-
- return (float32x4_t)vqtbl1q_s8(int8x16_t(b),*(uint8x16_t *)tbl);
-
- }
- else
- {
-
- static const uint8_t tbl[16] = {
- (i0*4) + 0,(i0*4) + 1,(i0*4) + 2,(i0*4) + 3,
- (i1*4) + 0,(i1*4) + 1,(i1*4) + 2,(i1*4) + 3,
- (i2*4) + 0 + 16,(i2*4) + 1 + 16,(i2*4) + 2 + 16,(i2*4) + 3 + 16,
- (i3*4) + 0 + 16,(i3*4) + 1 + 16,(i3*4) + 2 + 16,(i3*4) + 3 + 16
- };
-
- return float32x4_t(vqtbl2q_s8((int8x16x2_t){int8x16_t(a),int8x16_t(b)},*(uint8x16_t *)tbl));
- }
-# endif //builtin(shufflevector)
-#endif
-}
-
-template <int i >
-FORCE_INLINE __m128 _mm_shuffle_ps_function(const __m128& a, const __m128& b)
-{
- switch (i)
- {
- case _MM_SHUFFLE(1, 0, 3, 2):
- return _mm_shuffle_ps_1032(a, b);
- break;
- case _MM_SHUFFLE(2, 3, 0, 1):
- return _mm_shuffle_ps_2301(a, b);
- break;
- case _MM_SHUFFLE(3, 2, 1, 0):
- return _mm_shuffle_ps_3210(a, b);
- break;
- case _MM_SHUFFLE(0, 0, 1, 1):
- return _mm_shuffle_ps_0011(a, b);
- break;
- case _MM_SHUFFLE(0, 0, 2, 2):
- return _mm_shuffle_ps_0022(a, b);
- break;
- case _MM_SHUFFLE(2, 2, 0, 0):
- return _mm_shuffle_ps_2200(a, b);
- break;
- case _MM_SHUFFLE(3, 2, 0, 2):
- return _mm_shuffle_ps_3202(a, b);
- break;
- case _MM_SHUFFLE(1, 1, 3, 3):
- return _mm_shuffle_ps_1133(a, b);
- break;
- case _MM_SHUFFLE(2, 0, 1, 0):
- return _mm_shuffle_ps_2010(a, b);
- break;
- case _MM_SHUFFLE(2, 0, 0, 1):
- return _mm_shuffle_ps_2001(a, b);
- break;
- case _MM_SHUFFLE(2, 0, 3, 2):
- return _mm_shuffle_ps_2032(a, b);
- break;
- case _MM_SHUFFLE(0, 3, 2, 1):
- return _mm_shuffle_ps_0321(a, b);
- break;
- case _MM_SHUFFLE(2, 1, 0, 3):
- return _mm_shuffle_ps_2103(a, b);
- break;
- case _MM_SHUFFLE(1, 0, 1, 0):
- return _mm_shuffle_ps_1010(a, b);
- break;
- case _MM_SHUFFLE(1, 0, 0, 1):
- return _mm_shuffle_ps_1001(a, b);
- break;
- case _MM_SHUFFLE(0, 1, 0, 1):
- return _mm_shuffle_ps_0101(a, b);
- break;
- }
- return _mm_shuffle_ps_default<i>(a, b);
-}
-
-# if __has_builtin(__builtin_shufflevector)
-#define _mm_shuffle_ps(a,b,i) _mm_shuffle_ps_default<i>(a,b)
-# else
-#define _mm_shuffle_ps(a,b,i) _mm_shuffle_ps_function<i>(a,b)
-#endif
-
-// Takes the upper 64 bits of a and places it in the low end of the result
-// Takes the lower 64 bits of b and places it into the high end of the result.
-FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a, __m128i b)
-{
- return vcombine_s32(vget_high_s32(a), vget_low_s32(b));
-}
-
-// takes the lower two 32-bit values from a and swaps them and places in low end of result
-// takes the higher two 32 bit values from b and swaps them and places in high end of result.
-FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a, __m128i b)
-{
- return vcombine_s32(vrev64_s32(vget_low_s32(a)), vrev64_s32(vget_high_s32(b)));
-}
-
-// shift a right by 32 bits, and put the lower 32 bits of a into the upper 32 bits of b
-// when a and b are the same, rotates the least significant 32 bits into the most signficant 32 bits, and shifts the rest down
-FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a, __m128i b)
-{
- return vextq_s32(a, b, 1);
-}
-
-// shift a left by 32 bits, and put the upper 32 bits of b into the lower 32 bits of a
-// when a and b are the same, rotates the most significant 32 bits into the least signficant 32 bits, and shifts the rest up
-FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a, __m128i b)
-{
- return vextq_s32(a, b, 3);
-}
-
-// gets the lower 64 bits of a, and places it in the upper 64 bits
-// gets the lower 64 bits of b and places it in the lower 64 bits
-FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a, __m128i b)
-{
- return vcombine_s32(vget_low_s32(a), vget_low_s32(a));
-}
-
-// gets the lower 64 bits of a, and places it in the upper 64 bits
-// gets the lower 64 bits of b, swaps the 0 and 1 elements, and places it in the lower 64 bits
-FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a, __m128i b)
-{
- return vcombine_s32(vrev64_s32(vget_low_s32(a)), vget_low_s32(b));
-}
-
-// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the upper 64 bits
-// gets the lower 64 bits of b, swaps the 0 and 1 elements, and places it in the lower 64 bits
-FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a, __m128i b)
-{
- return vcombine_s32(vrev64_s32(vget_low_s32(a)), vrev64_s32(vget_low_s32(b)));
-}
-
-FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a, __m128i b)
-{
- return vcombine_s32(vdup_n_s32(vgetq_lane_s32(a, 1)), vdup_n_s32(vgetq_lane_s32(b, 2)));
-}
-
-FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a, __m128i b)
-{
- return vcombine_s32(vdup_n_s32(vgetq_lane_s32(a, 2)), vrev64_s32(vget_low_s32(b)));
-}
-
-FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a, __m128i b)
-{
- return vcombine_s32(vget_high_s32(a), vdup_n_s32(vgetq_lane_s32(b, 3)));
-}
-
-template <int i >
-FORCE_INLINE __m128i _mm_shuffle_epi32_default(__m128i a, __m128i b)
-{
-#if ENABLE_CPP_VERSION
- __m128i ret;
- ret[0] = a[i & 0x3];
- ret[1] = a[(i >> 2) & 0x3];
- ret[2] = b[(i >> 4) & 0x03];
- ret[3] = b[(i >> 6) & 0x03];
- return ret;
-#else
- __m128i ret = vmovq_n_s32(vgetq_lane_s32(a, i & 0x3));
- ret = vsetq_lane_s32(vgetq_lane_s32(a, (i >> 2) & 0x3), ret, 1);
- ret = vsetq_lane_s32(vgetq_lane_s32(b, (i >> 4) & 0x3), ret, 2);
- ret = vsetq_lane_s32(vgetq_lane_s32(b, (i >> 6) & 0x3), ret, 3);
- return ret;
-#endif
-}
-
-template <int i >
-FORCE_INLINE __m128i _mm_shuffle_epi32_function(__m128i a, __m128i b)
-{
- switch (i)
- {
- case _MM_SHUFFLE(1, 0, 3, 2): return _mm_shuffle_epi_1032(a, b); break;
- case _MM_SHUFFLE(2, 3, 0, 1): return _mm_shuffle_epi_2301(a, b); break;
- case _MM_SHUFFLE(0, 3, 2, 1): return _mm_shuffle_epi_0321(a, b); break;
- case _MM_SHUFFLE(2, 1, 0, 3): return _mm_shuffle_epi_2103(a, b); break;
- case _MM_SHUFFLE(1, 0, 1, 0): return _mm_shuffle_epi_1010(a, b); break;
- case _MM_SHUFFLE(1, 0, 0, 1): return _mm_shuffle_epi_1001(a, b); break;
- case _MM_SHUFFLE(0, 1, 0, 1): return _mm_shuffle_epi_0101(a, b); break;
- case _MM_SHUFFLE(2, 2, 1, 1): return _mm_shuffle_epi_2211(a, b); break;
- case _MM_SHUFFLE(0, 1, 2, 2): return _mm_shuffle_epi_0122(a, b); break;
- case _MM_SHUFFLE(3, 3, 3, 2): return _mm_shuffle_epi_3332(a, b); break;
- default: return _mm_shuffle_epi32_default<i>(a, b);
- }
-}
-
-template <int i >
-FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a)
-{
- return vdupq_n_s32(vgetq_lane_s32(a, i));
-}
-
-template <int i>
-FORCE_INLINE __m128i _mm_shuffle_epi32_single(__m128i a)
-{
- switch (i)
- {
- case _MM_SHUFFLE(0, 0, 0, 0): return _mm_shuffle_epi32_splat<0>(a); break;
- case _MM_SHUFFLE(1, 1, 1, 1): return _mm_shuffle_epi32_splat<1>(a); break;
- case _MM_SHUFFLE(2, 2, 2, 2): return _mm_shuffle_epi32_splat<2>(a); break;
- case _MM_SHUFFLE(3, 3, 3, 3): return _mm_shuffle_epi32_splat<3>(a); break;
- default: return _mm_shuffle_epi32_function<i>(a, a);
- }
-}
-
-// Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm. https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
-#define _mm_shuffle_epi32(a,i) _mm_shuffle_epi32_single<i>(a)
-
-template <int i>
-FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a)
-{
- int16x8_t ret = (int16x8_t)a;
- int16x4_t highBits = vget_high_s16(ret);
- ret = vsetq_lane_s16(vget_lane_s16(highBits, i & 0x3), ret, 4);
- ret = vsetq_lane_s16(vget_lane_s16(highBits, (i >> 2) & 0x3), ret, 5);
- ret = vsetq_lane_s16(vget_lane_s16(highBits, (i >> 4) & 0x3), ret, 6);
- ret = vsetq_lane_s16(vget_lane_s16(highBits, (i >> 6) & 0x3), ret, 7);
- return (__m128i)ret;
-}
-
-// Shuffles the upper 4 signed or unsigned 16 - bit integers in a as specified by imm. https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
-#define _mm_shufflehi_epi16(a,i) _mm_shufflehi_epi16_function<i>(a)
-
-// Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while shifting in zeros. : https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx
-//#define _mm_slli_epi32(a, imm) (__m128i)vshlq_n_s32(a,imm)
-
-// Based on SIMDe
-FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, const int imm8)
-{
-#if defined(__aarch64__)
- const int32x4_t s = vdupq_n_s32(imm8);
- return vshlq_s32(a, s);
-#else
- int32_t __attribute__((aligned(16))) data[4];
- vst1q_s32(data, a);
- const int s = (imm8 > 31) ? 0 : imm8;
- data[0] = data[0] << s;
- data[1] = data[1] << s;
- data[2] = data[2] << s;
- data[3] = data[3] << s;
-
- return vld1q_s32(data);
-#endif
-}
-
-
-//Shifts the 4 signed or unsigned 32-bit integers in a right by count bits while shifting in zeros. https://msdn.microsoft.com/en-us/library/w486zcfa(v=vs.100).aspx
-//#define _mm_srli_epi32( a, imm ) (__m128i)vshrq_n_u32((uint32x4_t)a, imm)
-
-// Based on SIMDe
-FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, const int imm8)
-{
-#if defined(__aarch64__)
- const int shift = (imm8 > 31) ? 0 : imm8; // Unfortunately, we need to check for this case for embree.
- const int32x4_t s = vdupq_n_s32(-shift);
- return vreinterpretq_s32_u32(vshlq_u32(vreinterpretq_u32_s32(a), s));
-#else
- int32_t __attribute__((aligned(16))) data[4];
- vst1q_s32(data, a);
-
- const int s = (imm8 > 31) ? 0 : imm8;
-
- data[0] = data[0] >> s;
- data[1] = data[1] >> s;
- data[2] = data[2] >> s;
- data[3] = data[3] >> s;
-
- return vld1q_s32(data);
-#endif
-}
-
-
-// Shifts the 4 signed 32 - bit integers in a right by count bits while shifting in the sign bit. https://msdn.microsoft.com/en-us/library/z1939387(v=vs.100).aspx
-//#define _mm_srai_epi32( a, imm ) vshrq_n_s32(a, imm)
-
-// Based on SIMDe
-FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, const int imm8)
-{
-#if defined(__aarch64__)
- const int32x4_t s = vdupq_n_s32(-imm8);
- return vshlq_s32(a, s);
-#else
- int32_t __attribute__((aligned(16))) data[4];
- vst1q_s32(data, a);
- const uint32_t m = (uint32_t) ((~0U) << (32 - imm8));
-
- for (int i = 0; i < 4; i++) {
- uint32_t is_neg = ((uint32_t) (((data[i]) >> 31)));
- data[i] = (data[i] >> imm8) | (m * is_neg);
- }
-
- return vld1q_s32(data);
-#endif
-}
-
-// Shifts the 128 - bit value in a right by imm bytes while shifting in zeros.imm must be an immediate. https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx
-//#define _mm_srli_si128( a, imm ) (__m128i)vmaxq_s8((int8x16_t)a, vextq_s8((int8x16_t)a, vdupq_n_s8(0), imm))
-#define _mm_srli_si128( a, imm ) (__m128i)vextq_s8((int8x16_t)a, vdupq_n_s8(0), (imm))
-
-// Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm must be an immediate. https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx
-#define _mm_slli_si128( a, imm ) (__m128i)vextq_s8(vdupq_n_s8(0), (int8x16_t)a, 16 - (imm))
-
-// NEON does not provide a version of this function, here is an article about some ways to repro the results.
-// http://stackoverflow.com/questions/11870910/sse-mm-movemask-epi8-equivalent-method-for-arm-neon
-// Creates a 16-bit mask from the most significant bits of the 16 signed or unsigned 8-bit integers in a and zero extends the upper bits. https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
-FORCE_INLINE int _mm_movemask_epi8(__m128i _a)
-{
- uint8x16_t input = (uint8x16_t)_a;
- const int8_t __attribute__((aligned(16))) xr[8] = { -7, -6, -5, -4, -3, -2, -1, 0 };
- uint8x8_t mask_and = vdup_n_u8(0x80);
- int8x8_t mask_shift = vld1_s8(xr);
-
- uint8x8_t lo = vget_low_u8(input);
- uint8x8_t hi = vget_high_u8(input);
-
- lo = vand_u8(lo, mask_and);
- lo = vshl_u8(lo, mask_shift);
-
- hi = vand_u8(hi, mask_and);
- hi = vshl_u8(hi, mask_shift);
-
- lo = vpadd_u8(lo, lo);
- lo = vpadd_u8(lo, lo);
- lo = vpadd_u8(lo, lo);
-
- hi = vpadd_u8(hi, hi);
- hi = vpadd_u8(hi, hi);
- hi = vpadd_u8(hi, hi);
-
- return ((hi[0] << 8) | (lo[0] & 0xFF));
-}
-
-
-// ******************************************
-// Math operations
-// ******************************************
-
-// Subtracts the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
-{
- return vsubq_f32(a, b);
-}
-
-FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b)
-{
- return vsubq_f32(a, b);
-}
-
-// Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or unsigned 32-bit integers of a. https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
-{
- return vsubq_s32(a, b);
-}
-
-// Adds the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
-{
- return vaddq_f32(a, b);
-}
-
-// adds the scalar single-precision floating point values of a and b. https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
-{
- const float32_t b0 = vgetq_lane_f32(b, 0);
- float32x4_t value = vdupq_n_f32(0);
-
- //the upper values in the result must be the remnants of <a>.
- value = vsetq_lane_f32(b0, value, 0);
- return vaddq_f32(a, value);
-}
-
-// Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or unsigned 32-bit integers in b. https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
-{
- return vaddq_s32(a, b);
-}
-
-// Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or unsigned 16-bit integers in b. https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
-{
- return (__m128i)vaddq_s16((int16x8_t)a, (int16x8_t)b);
-}
-
-// Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or unsigned 16-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
-{
- return (__m128i)vmulq_s16((int16x8_t)a, (int16x8_t)b);
-}
-
-// Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or unsigned 32-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_mullo_epi32 (__m128i a, __m128i b)
-{
- return (__m128i)vmulq_s32((int32x4_t)a,(int32x4_t)b);
-}
-
-// Multiplies the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
-{
- return vmulq_f32(a, b);
-}
-
-FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b)
-{
- return vmulq_f32(a, b);
-}
-
-// Computes the approximations of reciprocals of the four single-precision, floating-point values of a. https://msdn.microsoft.com/en-us/library/vstudio/796k1tty(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
-{
-#if defined(BUILD_IOS)
- return vdivq_f32(vdupq_n_f32(1.0f),in);
-
-#endif
- // Get an initial estimate of 1/in.
- float32x4_t reciprocal = vrecpeq_f32(in);
-
- // We only return estimated 1/in.
- // Newton-Raphon iteration shold be done in the outside of _mm_rcp_ps().
-
- // TODO(LTE): We could delete these ifdef?
- reciprocal = vmulq_f32(vrecpsq_f32(in, reciprocal), reciprocal);
- reciprocal = vmulq_f32(vrecpsq_f32(in, reciprocal), reciprocal);
- return reciprocal;
-
-}
-
-FORCE_INLINE __m128 _mm_rcp_ss(__m128 in)
-{
- float32x4_t value;
- float32x4_t result = in;
-
- value = _mm_rcp_ps(in);
- return vsetq_lane_f32(vgetq_lane_f32(value, 0), result, 0);
-}
-
-// Divides the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
-{
-#if defined(BUILD_IOS)
- return vdivq_f32(a,b);
-#else
- float32x4_t reciprocal = _mm_rcp_ps(b);
-
- reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
- reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
-
- // Add one more round of newton-raphson since NEON's reciprocal estimation has less accuracy compared to SSE2's rcp.
- reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
-
- // Another round for safety
- reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
-
-
- return vmulq_f32(a, reciprocal);
-#endif
-}
-
-// Divides the scalar single-precision floating point value of a by b. https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
-{
- float32x4_t value;
- float32x4_t result = a;
- value = _mm_div_ps(a, b);
- return vsetq_lane_f32(vgetq_lane_f32(value, 0), result, 0);
-}
-
-// Computes the approximations of the reciprocal square roots of the four single-precision floating point values of in. https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
-{
-
- float32x4_t value = vrsqrteq_f32(in);
-
- // TODO: We must debug and ensure that rsqrt(0) and rsqrt(-0) yield proper values.
- // Related code snippets can be found here: https://cpp.hotexamples.com/examples/-/-/vrsqrteq_f32/cpp-vrsqrteq_f32-function-examples.html
- // If we adapt this function, we might be able to avoid special zero treatment in _mm_sqrt_ps
-
- value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(in, value), value));
- value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(in, value), value));
-
- // one more round to get better precision
- value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(in, value), value));
-
- // another round for safety
- value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(in, value), value));
-
- return value;
-}
-
-FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
-{
- float32x4_t result = in;
-
- __m128 value = _mm_rsqrt_ps(in);
-
- return vsetq_lane_f32(vgetq_lane_f32(value, 0), result, 0);
-}
-
-
-// Computes the approximations of square roots of the four single-precision, floating-point values of a. First computes reciprocal square roots and then reciprocals of the four values. https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
-{
-#if defined(BUILD_IOS)
- return vsqrtq_f32(in);
-#else
- __m128 reciprocal = _mm_rsqrt_ps(in);
-
- // We must treat sqrt(in == 0) in a special way. At this point reciprocal contains gargabe due to vrsqrteq_f32(0) returning +inf.
- // We assign 0 to reciprocal wherever required.
- const float32x4_t vzero = vdupq_n_f32(0.0f);
- const uint32x4_t mask = vceqq_f32(in, vzero);
- reciprocal = vbslq_f32(mask, vzero, reciprocal);
-
- // sqrt(x) = x * (1 / sqrt(x))
- return vmulq_f32(in, reciprocal);
-#endif
-}
-
-// Computes the approximation of the square root of the scalar single-precision floating point value of in. https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
-{
- float32x4_t value;
- float32x4_t result = in;
-
- value = _mm_sqrt_ps(in);
- return vsetq_lane_f32(vgetq_lane_f32(value, 0), result, 0);
-}
-
-
-// Computes the maximums of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
-{
-#if USE_PRECISE_MINMAX_IMPLEMENTATION
- return vbslq_f32(vcltq_f32(b,a),a,b);
-#else
- // Faster, but would give inconsitent rendering(e.g. holes, NaN pixels)
- return vmaxq_f32(a, b);
-#endif
-}
-
-// Computes the minima of the four single-precision, floating-point values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
-{
-#if USE_PRECISE_MINMAX_IMPLEMENTATION
- return vbslq_f32(vcltq_f32(a,b),a,b);
-#else
- // Faster, but would give inconsitent rendering(e.g. holes, NaN pixels)
- return vminq_f32(a, b);
-#endif
-}
-
-// Computes the maximum of the two lower scalar single-precision floating point values of a and b. https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
-{
- float32x4_t value;
- float32x4_t result = a;
-
- value = _mm_max_ps(a, b);
- return vsetq_lane_f32(vgetq_lane_f32(value, 0), result, 0);
-}
-
-// Computes the minimum of the two lower scalar single-precision floating point values of a and b. https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
-{
- float32x4_t value;
- float32x4_t result = a;
-
-
- value = _mm_min_ps(a, b);
- return vsetq_lane_f32(vgetq_lane_f32(value, 0), result, 0);
-}
-
-// Computes the pairwise minima of the 8 signed 16-bit integers from a and the 8 signed 16-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/6te997ew(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
-{
- return (__m128i)vminq_s16((int16x8_t)a, (int16x8_t)b);
-}
-
-// epi versions of min/max
-// Computes the pariwise maximums of the four signed 32-bit integer values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b )
-{
- return vmaxq_s32(a,b);
-}
-
-// Computes the pariwise minima of the four signed 32-bit integer values of a and b. https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b )
-{
- return vminq_s32(a,b);
-}
-
-// Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit integers from b. https://msdn.microsoft.com/en-us/library/vstudio/59hddw1d(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
-{
- int16x8_t ret = vqdmulhq_s16((int16x8_t)a, (int16x8_t)b);
- ret = vshrq_n_s16(ret, 1);
- return (__m128i)ret;
-}
-
-// Computes pairwise add of each argument as single-precision, floating-point values a and b.
-//https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx
-FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b )
-{
-#if defined(__aarch64__)
- return vpaddq_f32(a,b);
-#else
-// This does not work, no vpaddq...
-// return (__m128) vpaddq_f32(a,b);
- //
- // get two f32x2_t values from a
- // do vpadd
- // put result in low half of f32x4 result
- //
- // get two f32x2_t values from b
- // do vpadd
- // put result in high half of f32x4 result
- //
- // combine
- return vcombine_f32( vpadd_f32( vget_low_f32(a), vget_high_f32(a) ), vpadd_f32( vget_low_f32(b), vget_high_f32(b) ) );
-#endif
-}
-
-// ******************************************
-// Compare operations
-// ******************************************
-
-// Compares for less than https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
-{
- return (__m128)vcltq_f32(a, b);
-}
-
-FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b)
-{
- return (__m128) vmvnq_s32((__m128i)_mm_cmplt_ps(a,b));
-}
-
-// Compares for greater than. https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
-{
- return (__m128)vcgtq_f32(a, b);
-}
-
-FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b)
-{
- return (__m128) _mm_cmpgt_ps(a,b);
-}
-
-
-// Compares for greater than or equal. https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
-{
- return (__m128)vcgeq_f32(a, b);
-}
-
-// Compares for less than or equal. https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
-{
- return (__m128)vcleq_f32(a, b);
-}
-
-// Compares for equality. https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
-{
- return (__m128)vceqq_f32(a, b);
-}
-
-// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers in b for less than. https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
-{
- return (__m128i)vcltq_s32(a, b);
-}
-
-FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
-{
- return (__m128i) vceqq_s32(a,b);
-}
-
-// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers in b for greater than. https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
-{
- return (__m128i)vcgtq_s32(a, b);
-}
-
-// Compares the four 32-bit floats in a and b to check if any values are NaN. Ordered compare between each value returns true for "orderable" and false for "not orderable" (NaN). https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx
-// see also:
-// http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
-// http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
-FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b )
-{
- // Note: NEON does not have ordered compare builtin
- // Need to compare a eq a and b eq b to check for NaN
- // Do AND of results to get final
- return (__m128) vreinterpretq_f32_u32( vandq_u32( vceqq_f32(a,a), vceqq_f32(b,b) ) );
-}
-
-// Compares the lower single-precision floating point scalar values of a and b using a less than operation. : https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx
-FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
-{
- uint32x4_t value;
-
- value = vcltq_f32(a, b);
- return vgetq_lane_u32(value, 0);
-}
-
-// Compares the lower single-precision floating point scalar values of a and b using a greater than operation. : https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx
-FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
-{
- uint32x4_t value;
-
- value = vcgtq_f32(a, b);
- return vgetq_lane_u32(value, 0);
-}
-
-// Compares the lower single-precision floating point scalar values of a and b using a less than or equal operation. : https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx
-FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
-{
- uint32x4_t value;
-
- value = vcleq_f32(a, b);
- return vgetq_lane_u32(value, 0);
-}
-
-// Compares the lower single-precision floating point scalar values of a and b using a greater than or equal operation. : https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx
-FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
-{
- uint32x4_t value;
-
- value = vcgeq_f32(a, b);
- return vgetq_lane_u32(value, 0);
-}
-
-// Compares the lower single-precision floating point scalar values of a and b using an equality operation. : https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx
-FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
-{
- uint32x4_t value;
-
- value = vceqq_f32(a, b);
- return vgetq_lane_u32(value, 0);
-}
-
-// Compares the lower single-precision floating point scalar values of a and b using an inequality operation. : https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx
-FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
-{
- uint32x4_t value;
-
- value = vceqq_f32(a, b);
- return !vgetq_lane_u32(value, 0);
-}
-
-// according to the documentation, these intrinsics behave the same as the non-'u' versions. We'll just alias them here.
-#define _mm_ucomilt_ss _mm_comilt_ss
-#define _mm_ucomile_ss _mm_comile_ss
-#define _mm_ucomigt_ss _mm_comigt_ss
-#define _mm_ucomige_ss _mm_comige_ss
-#define _mm_ucomieq_ss _mm_comieq_ss
-#define _mm_ucomineq_ss _mm_comineq_ss
-
-// ******************************************
-// Conversions
-// ******************************************
-
-// Converts the four single-precision, floating-point values of a to signed 32-bit integer values using truncate. https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
-{
- return vcvtq_s32_f32(a);
-}
-
-// Converts the four signed 32-bit integer values of a to single-precision, floating-point values https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx
-FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
-{
- return vcvtq_f32_s32(a);
-}
-
-// Converts the four single-precision, floating-point values of a to signed 32-bit integer values. https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx
-// *NOTE*. The default rounding mode on SSE is 'round to even', which ArmV7 does not support!
-// It is supported on ARMv8 however.
-FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
-{
-#if 1
- return vcvtnq_s32_f32(a);
-#else
- __m128 half = vdupq_n_f32(0.5f);
- const __m128 sign = vcvtq_f32_u32((vshrq_n_u32(vreinterpretq_u32_f32(a), 31)));
- const __m128 aPlusHalf = vaddq_f32(a, half);
- const __m128 aRound = vsubq_f32(aPlusHalf, sign);
- return vcvtq_s32_f32(aRound);
-#endif
-}
-
-// Moves the least significant 32 bits of a to a 32-bit integer. https://msdn.microsoft.com/en-us/library/5z7a9642%28v=vs.90%29.aspx
-FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
-{
- return vgetq_lane_s32(a, 0);
-}
-
-// Moves 32-bit integer a to the least significant 32 bits of an __m128 object, zero extending the upper bits. https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx
-FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
-{
- __m128i result = vdupq_n_s32(0);
- return vsetq_lane_s32(a, result, 0);
-}
-
-
-// Applies a type cast to reinterpret four 32-bit floating point values passed in as a 128-bit parameter as packed 32-bit integers. https://msdn.microsoft.com/en-us/library/bb514099.aspx
-FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
-{
-#if defined(__aarch64__)
- return (__m128i)a;
-#else
- return *(const __m128i *)&a;
-#endif
-}
-
-// Applies a type cast to reinterpret four 32-bit integers passed in as a 128-bit parameter as packed 32-bit floating point values. https://msdn.microsoft.com/en-us/library/bb514029.aspx
-FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
-{
-#if defined(__aarch64__)
- return (__m128)a;
-#else
- return *(const __m128 *)&a;
-#endif
-}
-
-// Loads 128-bit value. : https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx
-FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
-{
- return vld1q_s32((int32_t *)p);
-}
-
-FORCE_INLINE __m128d _mm_castps_pd(const __m128 a)
-{
- return *(const __m128d *)&a;
-}
-
-FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a)
-{
- return *(const __m128d *)&a;
-}
-// ******************************************
-// Miscellaneous Operations
-// ******************************************
-
-// Packs the 16 signed 16-bit integers from a and b into 8-bit integers and saturates. https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx
-FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
-{
- return (__m128i)vcombine_s8(vqmovn_s16((int16x8_t)a), vqmovn_s16((int16x8_t)b));
-}
-
-// Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned integers and saturates. https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
-{
- return (__m128i)vcombine_u8(vqmovun_s16((int16x8_t)a), vqmovun_s16((int16x8_t)b));
-}
-
-// Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers and saturates. https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx
-FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
-{
- return (__m128i)vcombine_s16(vqmovn_s32(a), vqmovn_s32(b));
-}
-
-// Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower 8 signed or unsigned 8-bit integers in b. https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx
-FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
-{
- int8x8_t a1 = (int8x8_t)vget_low_s16((int16x8_t)a);
- int8x8_t b1 = (int8x8_t)vget_low_s16((int16x8_t)b);
-
- int8x8x2_t result = vzip_s8(a1, b1);
-
- return (__m128i)vcombine_s8(result.val[0], result.val[1]);
-}
-
-// Interleaves the lower 4 signed or unsigned 16-bit integers in a with the lower 4 signed or unsigned 16-bit integers in b. https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx
-FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
-{
- int16x4_t a1 = vget_low_s16((int16x8_t)a);
- int16x4_t b1 = vget_low_s16((int16x8_t)b);
-
- int16x4x2_t result = vzip_s16(a1, b1);
-
- return (__m128i)vcombine_s16(result.val[0], result.val[1]);
-}
-
-// Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the lower 2 signed or unsigned 32 - bit integers in b. https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
-{
- int32x2_t a1 = vget_low_s32(a);
- int32x2_t b1 = vget_low_s32(b);
-
- int32x2x2_t result = vzip_s32(a1, b1);
-
- return vcombine_s32(result.val[0], result.val[1]);
-}
-
-// Selects and interleaves the lower two single-precision, floating-point values from a and b. https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx
-FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
-{
- float32x2x2_t result = vzip_f32(vget_low_f32(a), vget_low_f32(b));
- return vcombine_f32(result.val[0], result.val[1]);
-}
-
-// Selects and interleaves the upper two single-precision, floating-point values from a and b. https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx
-FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
-{
- float32x2x2_t result = vzip_f32(vget_high_f32(a), vget_high_f32(b));
- return vcombine_f32(result.val[0], result.val[1]);
-}
-
-// Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper 8 signed or unsigned 8-bit integers in b. https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
-{
- int8x8_t a1 = (int8x8_t)vget_high_s16((int16x8_t)a);
- int8x8_t b1 = (int8x8_t)vget_high_s16((int16x8_t)b);
-
- int8x8x2_t result = vzip_s8(a1, b1);
-
- return (__m128i)vcombine_s8(result.val[0], result.val[1]);
-}
-
-// Interleaves the upper 4 signed or unsigned 16-bit integers in a with the upper 4 signed or unsigned 16-bit integers in b. https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
-{
- int16x4_t a1 = vget_high_s16((int16x8_t)a);
- int16x4_t b1 = vget_high_s16((int16x8_t)b);
-
- int16x4x2_t result = vzip_s16(a1, b1);
-
- return (__m128i)vcombine_s16(result.val[0], result.val[1]);
-}
-
-// Interleaves the upper 2 signed or unsigned 32-bit integers in a with the upper 2 signed or unsigned 32-bit integers in b. https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx
-FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
-{
- int32x2_t a1 = vget_high_s32(a);
- int32x2_t b1 = vget_high_s32(b);
-
- int32x2x2_t result = vzip_s32(a1, b1);
-
- return vcombine_s32(result.val[0], result.val[1]);
-}
-
-// Extracts the selected signed or unsigned 16-bit integer from a and zero extends. https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx
-#define _mm_extract_epi16( a, imm ) vgetq_lane_s16((int16x8_t)a, imm)
-
-// ******************************************
-// Streaming Extensions
-// ******************************************
-
-// Guarantees that every preceding store is globally visible before any subsequent store. https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx
-FORCE_INLINE void _mm_sfence(void)
-{
- __sync_synchronize();
-}
-
-// Stores the data in a to the address p without polluting the caches. If the cache line containing address p is already in the cache, the cache will be updated.Address p must be 16 - byte aligned. https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx
-FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
-{
- *p = a;
-}
-
-// Cache line containing p is flushed and invalidated from all caches in the coherency domain. : https://msdn.microsoft.com/en-us/library/ba08y07y(v=vs.100).aspx
-FORCE_INLINE void _mm_clflush(void const*p)
-{
- // no corollary for Neon?
-}
-
-FORCE_INLINE __m128i _mm_set_epi64x(int64_t a, int64_t b)
-{
- // Stick to the flipped behavior of x86.
- int64_t __attribute__((aligned(16))) data[2] = { b, a };
- return (__m128i)vld1q_s64(data);
-}
-
-FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
-{
- return (__m128i)vmovq_n_s64(_i);
-}
-
-#if defined(__aarch64__)
-FORCE_INLINE __m128 _mm_blendv_ps(__m128 a, __m128 b, __m128 c)
-{
- int32x4_t mask = vshrq_n_s32(__m128i(c),31);
- return vbslq_f32( uint32x4_t(mask), b, a);
-}
-
-FORCE_INLINE __m128i _mm_load4epu8_epi32(__m128i *ptr)
-{
- uint8x8_t t0 = vld1_u8((uint8_t*)ptr);
- uint16x8_t t1 = vmovl_u8(t0);
- uint32x4_t t2 = vmovl_u16(vget_low_u16(t1));
- return vreinterpretq_s32_u32(t2);
-}
-
-FORCE_INLINE __m128i _mm_load4epu16_epi32(__m128i *ptr)
-{
- uint16x8_t t0 = vld1q_u16((uint16_t*)ptr);
- uint32x4_t t1 = vmovl_u16(vget_low_u16(t0));
- return vreinterpretq_s32_u32(t1);
-}
-
-FORCE_INLINE __m128i _mm_load4epi8_f32(__m128i *ptr)
-{
- int8x8_t t0 = vld1_s8((int8_t*)ptr);
- int16x8_t t1 = vmovl_s8(t0);
- int32x4_t t2 = vmovl_s16(vget_low_s16(t1));
- float32x4_t t3 = vcvtq_f32_s32(t2);
- return vreinterpretq_s32_f32(t3);
-}
-
-FORCE_INLINE __m128i _mm_load4epu8_f32(__m128i *ptr)
-{
- uint8x8_t t0 = vld1_u8((uint8_t*)ptr);
- uint16x8_t t1 = vmovl_u8(t0);
- uint32x4_t t2 = vmovl_u16(vget_low_u16(t1));
- return vreinterpretq_s32_u32(t2);
-}
-
-FORCE_INLINE __m128i _mm_load4epi16_f32(__m128i *ptr)
-{
- int16x8_t t0 = vld1q_s16((int16_t*)ptr);
- int32x4_t t1 = vmovl_s16(vget_low_s16(t0));
- float32x4_t t2 = vcvtq_f32_s32(t1);
- return vreinterpretq_s32_f32(t2);
-}
-
-FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
-{
- return (__m128i)vcombine_u8(vqmovun_s16((int16x8_t)a), vqmovun_s16((int16x8_t)b));
-}
-
-FORCE_INLINE __m128i _mm_stream_load_si128(__m128i* ptr)
-{
- // No non-temporal load on a single register on ARM.
- return vreinterpretq_s32_u8(vld1q_u8((uint8_t*)ptr));
-}
-
-FORCE_INLINE void _mm_stream_ps(float* ptr, __m128i a)
-{
- // No non-temporal store on a single register on ARM.
- vst1q_f32((float*)ptr, vreinterpretq_f32_s32(a));
-}
-
-FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b)
-{
- return vreinterpretq_s32_u32(vminq_u32(vreinterpretq_u32_s32(a), vreinterpretq_u32_s32(b)));
-}
-
-FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b)
-{
- return vreinterpretq_s32_u32(vmaxq_u32(vreinterpretq_u32_s32(a), vreinterpretq_u32_s32(b)));
-}
-
-FORCE_INLINE __m128 _mm_abs_ps(__m128 a)
-{
- return vabsq_f32(a);
-}
-
-FORCE_INLINE __m128 _mm_madd_ps(__m128 a, __m128 b, __m128 c)
-{
- return vmlaq_f32(c, a, b);
-}
-
-FORCE_INLINE __m128 _mm_msub_ps(__m128 a, __m128 b, __m128 c)
-{
- return vmlsq_f32(c, a, b);
-}
-
-FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
-{
- return vabsq_s32(a);
-}
-#endif //defined(__aarch64__)
-
-// Count the number of bits set to 1 in unsigned 32-bit integer a, and
-// return that count in dst.
-// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u32
-FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
-{
- return (int)vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t)a)));
-}
-
-// Count the number of bits set to 1 in unsigned 64-bit integer a, and
-// return that count in dst.
-// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u64
-FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
-{
- return (int64_t)vaddlv_u8(vcnt_u8(vcreate_u8(a)));
-}
-
-#endif
diff --git a/thirdparty/embree-aarch64/common/math/constants.cpp b/thirdparty/embree-aarch64/common/math/constants.cpp
deleted file mode 100644
index eeff131664..0000000000
--- a/thirdparty/embree-aarch64/common/math/constants.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#if defined(__aarch64__)
-#include <arm_neon.h>
-#endif
-
-#include "constants.h"
-
-namespace embree
-{
- TrueTy True;
- FalseTy False;
- ZeroTy zero;
- OneTy one;
- NegInfTy neg_inf;
- PosInfTy inf;
- PosInfTy pos_inf;
- NaNTy nan;
- UlpTy ulp;
- PiTy pi;
- OneOverPiTy one_over_pi;
- TwoPiTy two_pi;
- OneOverTwoPiTy one_over_two_pi;
- FourPiTy four_pi;
- OneOverFourPiTy one_over_four_pi;
- StepTy step;
- ReverseStepTy reverse_step;
- EmptyTy empty;
- UndefinedTy undefined;
-
-#if defined(__aarch64__)
-const uint32x4_t movemask_mask = { 1, 2, 4, 8 };
-const uint32x4_t vzero = { 0, 0, 0, 0 };
-const uint32x4_t v0x80000000 = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
-const uint32x4_t v0x7fffffff = { 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff };
-const uint32x4_t v000F = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
-const uint32x4_t v00F0 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0x00000000 };
-const uint32x4_t v00FF = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
-const uint32x4_t v0F00 = { 0x00000000, 0xFFFFFFFF, 0x00000000, 0x00000000 };
-const uint32x4_t v0F0F = { 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF };
-const uint32x4_t v0FF0 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
-const uint32x4_t v0FFF = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
-const uint32x4_t vF000 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
-const uint32x4_t vF00F = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0xFFFFFFFF };
-const uint32x4_t vF0F0 = { 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000 };
-const uint32x4_t vF0FF = { 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
-const uint32x4_t vFF00 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
-const uint32x4_t vFF0F = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF };
-const uint32x4_t vFFF0 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
-const uint32x4_t vFFFF = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
-const uint8x16_t v0022 = {0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11};
-const uint8x16_t v1133 = {4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15};
-const uint8x16_t v0101 = {0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7};
-const float32x4_t vOne = { 1.0f, 1.0f, 1.0f, 1.0f };
-const float32x4_t vmOne = { -1.0f, -1.0f, -1.0f, -1.0f };
-const float32x4_t vInf = { INFINITY, INFINITY, INFINITY, INFINITY };
-const float32x4_t vmInf = { -INFINITY, -INFINITY, -INFINITY, -INFINITY };
-#endif
-
-}
diff --git a/thirdparty/embree-aarch64/common/tasking/taskschedulergcd.h b/thirdparty/embree-aarch64/common/tasking/taskschedulergcd.h
deleted file mode 100644
index d31f8bb478..0000000000
--- a/thirdparty/embree-aarch64/common/tasking/taskschedulergcd.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#pragma once
-
-#include "../sys/platform.h"
-#include "../sys/alloc.h"
-#include "../sys/barrier.h"
-#include "../sys/thread.h"
-#include "../sys/mutex.h"
-#include "../sys/condition.h"
-#include "../sys/ref.h"
-
-#include <dispatch/dispatch.h>
-
-namespace embree
-{
- struct TaskScheduler
- {
- /*! initializes the task scheduler */
- static void create(size_t numThreads, bool set_affinity, bool start_threads);
-
- /*! destroys the task scheduler again */
- static void destroy() {}
-
- /* returns the ID of the current thread */
- static __forceinline size_t threadID()
- {
- return threadIndex();
- }
-
- /* returns the index (0..threadCount-1) of the current thread */
- static __forceinline size_t threadIndex()
- {
- currentThreadIndex = (currentThreadIndex + 1) % GCDNumThreads;
- return currentThreadIndex;
- }
-
- /* returns the total number of threads */
- static __forceinline size_t threadCount()
- {
- return GCDNumThreads;
- }
-
- private:
- static size_t GCDNumThreads;
- static size_t currentThreadIndex;
-
- };
-
-};
-
diff --git a/thirdparty/embree-aarch64/kernels/builders/primrefgen.h b/thirdparty/embree-aarch64/kernels/builders/primrefgen.h
deleted file mode 100644
index 9919c945c3..0000000000
--- a/thirdparty/embree-aarch64/kernels/builders/primrefgen.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "../common/scene.h"
-#include "../common/primref.h"
-#include "../common/primref_mb.h"
-#include "priminfo.h"
-#include "bvh_builder_morton.h"
-
-namespace embree
-{
- namespace isa
- {
- PrimInfo createPrimRefArray(Geometry* geometry, unsigned int geomID, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor);
-
- PrimInfo createPrimRefArray(Scene* scene, Geometry::GTypeMask types, bool mblur, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor);
-
- PrimInfo createPrimRefArrayMBlur(Scene* scene, Geometry::GTypeMask types, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor, size_t itime = 0);
-
- PrimInfoMB createPrimRefArrayMSMBlur(Scene* scene, Geometry::GTypeMask types, mvector<PrimRefMB>& prims, BuildProgressMonitor& progressMonitor, BBox1f t0t1 = BBox1f(0.0f,1.0f));
-
- template<typename Mesh>
- size_t createMortonCodeArray(Mesh* mesh, mvector<BVHBuilderMorton::BuildPrim>& morton, BuildProgressMonitor& progressMonitor);
- }
-}
-
diff --git a/thirdparty/embree-aarch64/kernels/common/instance_stack.h b/thirdparty/embree-aarch64/kernels/common/instance_stack.h
deleted file mode 100644
index d7e3637f7b..0000000000
--- a/thirdparty/embree-aarch64/kernels/common/instance_stack.h
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "rtcore.h"
-
-namespace embree {
-namespace instance_id_stack {
-
-static_assert(RTC_MAX_INSTANCE_LEVEL_COUNT > 0,
- "RTC_MAX_INSTANCE_LEVEL_COUNT must be greater than 0.");
-
-/*******************************************************************************
- * Instance ID stack manipulation.
- * This is used from the instance intersector.
- ******************************************************************************/
-
-/*
- * Push an instance to the stack.
- */
-RTC_FORCEINLINE bool push(RTCIntersectContext* context,
- unsigned instanceId)
-{
-#if RTC_MAX_INSTANCE_LEVEL_COUNT > 1
- const bool spaceAvailable = context->instStackSize < RTC_MAX_INSTANCE_LEVEL_COUNT;
- /* We assert here because instances are silently dropped when the stack is full.
- This might be quite hard to find in production. */
- assert(spaceAvailable);
- if (likely(spaceAvailable))
- context->instID[context->instStackSize++] = instanceId;
- return spaceAvailable;
-#else
- const bool spaceAvailable = (context->instID[0] == RTC_INVALID_GEOMETRY_ID);
- assert(spaceAvailable);
- if (likely(spaceAvailable))
- context->instID[0] = instanceId;
- return spaceAvailable;
-#endif
-}
-
-
-/*
- * Pop the last instance pushed to the stack.
- * Do not call on an empty stack.
- */
-RTC_FORCEINLINE void pop(RTCIntersectContext* context)
-{
- assert(context);
-#if RTC_MAX_INSTANCE_LEVEL_COUNT > 1
- assert(context->instStackSize > 0);
- context->instID[--context->instStackSize] = RTC_INVALID_GEOMETRY_ID;
-#else
- assert(context->instID[0] != RTC_INVALID_GEOMETRY_ID);
- context->instID[0] = RTC_INVALID_GEOMETRY_ID;
-#endif
-}
-
-/*******************************************************************************
- * Optimized instance id stack copy.
- * The copy() function at the bottom of this block will either copy full
- * stacks or copy only until the last valid element has been copied, depending
- * on RTC_MAX_INSTANCE_LEVEL_COUNT.
- ******************************************************************************/
-
-/*
- * Plain array assignment. This works for scalar->scalar,
- * scalar->vector, and vector->vector.
- */
-template <class Src, class Tgt>
-RTC_FORCEINLINE void level_copy(unsigned level, Src* src, Tgt* tgt)
-{
- tgt[level] = src[level];
-}
-
-/*
- * Masked SIMD vector->vector store.
- */
-template <int K>
-RTC_FORCEINLINE void level_copy(unsigned level, const vuint<K>* src, vuint<K>* tgt, const vbool<K>& mask)
-{
- vuint<K>::storeu(mask, tgt + level, src[level]);
-}
-
-/*
- * Masked scalar->SIMD vector store.
- */
-template <int K>
-RTC_FORCEINLINE void level_copy(unsigned level, const unsigned* src, vuint<K>* tgt, const vbool<K>& mask)
-{
- vuint<K>::store(mask, tgt + level, src[level]);
-}
-
-/*
- * Indexed assign from vector to scalar.
- */
-template <int K>
-RTC_FORCEINLINE void level_copy(unsigned level, const vuint<K>* src, unsigned* tgt, const size_t& idx)
-{
- tgt[level] = src[level][idx];
-}
-
-/*
- * Indexed assign from scalar to vector.
- */
-template <int K>
-RTC_FORCEINLINE void level_copy(unsigned level, const unsigned* src, vuint<K>* tgt, const size_t& idx)
-{
- tgt[level][idx] = src[level];
-}
-
-/*
- * Indexed assign from vector to vector.
- */
-template <int K>
-RTC_FORCEINLINE void level_copy(unsigned level, const vuint<K>* src, vuint<K>* tgt, const size_t& i, const size_t& j)
-{
- tgt[level][j] = src[level][i];
-}
-
-/*
- * Check if the given stack level is valid.
- * These are only used for large max stack sizes.
- */
-RTC_FORCEINLINE bool level_valid(unsigned level, const unsigned* stack)
-{
- return stack[level] != RTC_INVALID_GEOMETRY_ID;
-}
-RTC_FORCEINLINE bool level_valid(unsigned level, const unsigned* stack, const size_t& /*i*/)
-{
- return stack[level] != RTC_INVALID_GEOMETRY_ID;
-}
-template <int K>
-RTC_FORCEINLINE bool level_valid(unsigned level, const unsigned* stack, const vbool<K>& /*mask*/)
-{
- return stack[level] != RTC_INVALID_GEOMETRY_ID;
-}
-
-template <int K>
-RTC_FORCEINLINE bool level_valid(unsigned level, const vuint<K>* stack)
-{
- return any(stack[level] != RTC_INVALID_GEOMETRY_ID);
-}
-template <int K>
-RTC_FORCEINLINE bool level_valid(unsigned level, const vuint<K>* stack, const vbool<K>& mask)
-{
- return any(mask & (stack[level] != RTC_INVALID_GEOMETRY_ID));
-}
-
-template <int K>
-RTC_FORCEINLINE bool level_valid(unsigned level, const vuint<K>* stack, const size_t& i)
-{
- return stack[level][i] != RTC_INVALID_GEOMETRY_ID;
-}
-template <int K>
-RTC_FORCEINLINE bool level_valid(unsigned level, const vuint<K>* stack, const size_t& i, const size_t& /*j*/)
-{
- return stack[level][i] != RTC_INVALID_GEOMETRY_ID;
-}
-
-/*
- * Copy an instance ID stack.
- *
- * This function automatically selects a LevelFunctor from the above Assign
- * structs.
- */
-template <class Src, class Tgt, class... Args>
-RTC_FORCEINLINE void copy(Src src, Tgt tgt, Args&&... args)
-{
-#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
- /*
- * Avoid all loops for only one level.
- */
- level_copy(0, src, tgt, std::forward<Args>(args)...);
-
-#elif (RTC_MAX_INSTANCE_LEVEL_COUNT <= 4)
- /*
- * It is faster to avoid the valid test for low level counts.
- * Just copy the whole stack.
- */
- for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l)
- level_copy(l, src, tgt, std::forward<Args>(args)...);
-
-#else
- /*
- * For general stack sizes, it pays off to test for validity.
- */
- bool valid = true;
- for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT && valid; ++l)
- {
- level_copy(l, src, tgt, std::forward<Args>(args)...);
- valid = level_valid(l, src, std::forward<Args>(args)...);
- }
-#endif
-}
-
-} // namespace instance_id_stack
-} // namespace embree
-
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_curves.h b/thirdparty/embree-aarch64/kernels/common/scene_curves.h
deleted file mode 100644
index 2649ab0e3e..0000000000
--- a/thirdparty/embree-aarch64/kernels/common/scene_curves.h
+++ /dev/null
@@ -1,341 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "default.h"
-#include "geometry.h"
-#include "buffer.h"
-
-namespace embree
-{
- /*! represents an array of bicubic bezier curves */
- struct CurveGeometry : public Geometry
- {
- /*! type of this geometry */
- static const Geometry::GTypeMask geom_type = Geometry::MTY_CURVE4;
-
- public:
-
- /*! bezier curve construction */
- CurveGeometry (Device* device, Geometry::GType gtype);
-
- public:
- void setMask(unsigned mask);
- void setNumTimeSteps (unsigned int numTimeSteps);
- void setVertexAttributeCount (unsigned int N);
- void setBuffer(RTCBufferType type, unsigned int slot, RTCFormat format, const Ref<Buffer>& buffer, size_t offset, size_t stride, unsigned int num);
- void* getBuffer(RTCBufferType type, unsigned int slot);
- void updateBuffer(RTCBufferType type, unsigned int slot);
- void commit();
- bool verify();
- void setTessellationRate(float N);
- void setMaxRadiusScale(float s);
- void addElementsToCount (GeometryCounts & counts) const;
-
- public:
-
- /*! returns the number of vertices */
- __forceinline size_t numVertices() const {
- return vertices[0].size();
- }
-
- /*! returns the i'th curve */
- __forceinline const unsigned int& curve(size_t i) const {
- return curves[i];
- }
-
- /*! returns i'th vertex of the first time step */
- __forceinline Vec3ff vertex(size_t i) const {
- return vertices0[i];
- }
-
- /*! returns i'th normal of the first time step */
- __forceinline Vec3fa normal(size_t i) const {
- return normals0[i];
- }
-
- /*! returns i'th tangent of the first time step */
- __forceinline Vec3ff tangent(size_t i) const {
- return tangents0[i];
- }
-
- /*! returns i'th normal derivative of the first time step */
- __forceinline Vec3fa dnormal(size_t i) const {
- return dnormals0[i];
- }
-
- /*! returns i'th radius of the first time step */
- __forceinline float radius(size_t i) const {
- return vertices0[i].w;
- }
-
- /*! returns i'th vertex of itime'th timestep */
- __forceinline Vec3ff vertex(size_t i, size_t itime) const {
- return vertices[itime][i];
- }
-
- /*! returns i'th normal of itime'th timestep */
- __forceinline Vec3fa normal(size_t i, size_t itime) const {
- return normals[itime][i];
- }
-
- /*! returns i'th tangent of itime'th timestep */
- __forceinline Vec3ff tangent(size_t i, size_t itime) const {
- return tangents[itime][i];
- }
-
- /*! returns i'th normal derivative of itime'th timestep */
- __forceinline Vec3fa dnormal(size_t i, size_t itime) const {
- return dnormals[itime][i];
- }
-
- /*! returns i'th radius of itime'th timestep */
- __forceinline float radius(size_t i, size_t itime) const {
- return vertices[itime][i].w;
- }
-
- /*! gathers the curve starting with i'th vertex */
- __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, size_t i) const
- {
- p0 = vertex(i+0);
- p1 = vertex(i+1);
- p2 = vertex(i+2);
- p3 = vertex(i+3);
- }
-
- /*! gathers the curve starting with i'th vertex of itime'th timestep */
- __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, size_t i, size_t itime) const
- {
- p0 = vertex(i+0,itime);
- p1 = vertex(i+1,itime);
- p2 = vertex(i+2,itime);
- p3 = vertex(i+3,itime);
- }
-
- /*! gathers the curve starting with i'th vertex */
- __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, Vec3fa& n0, Vec3fa& n1, Vec3fa& n2, Vec3fa& n3, size_t i) const
- {
- p0 = vertex(i+0);
- p1 = vertex(i+1);
- p2 = vertex(i+2);
- p3 = vertex(i+3);
- n0 = normal(i+0);
- n1 = normal(i+1);
- n2 = normal(i+2);
- n3 = normal(i+3);
- }
-
- /*! gathers the curve starting with i'th vertex of itime'th timestep */
- __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, Vec3fa& n0, Vec3fa& n1, Vec3fa& n2, Vec3fa& n3, size_t i, size_t itime) const
- {
- p0 = vertex(i+0,itime);
- p1 = vertex(i+1,itime);
- p2 = vertex(i+2,itime);
- p3 = vertex(i+3,itime);
- n0 = normal(i+0,itime);
- n1 = normal(i+1,itime);
- n2 = normal(i+2,itime);
- n3 = normal(i+3,itime);
- }
-
- /*! prefetches the curve starting with i'th vertex of itime'th timestep */
- __forceinline void prefetchL1_vertices(size_t i) const
- {
- prefetchL1(vertices0.getPtr(i)+0);
- prefetchL1(vertices0.getPtr(i)+64);
- }
-
- /*! prefetches the curve starting with i'th vertex of itime'th timestep */
- __forceinline void prefetchL2_vertices(size_t i) const
- {
- prefetchL2(vertices0.getPtr(i)+0);
- prefetchL2(vertices0.getPtr(i)+64);
- }
-
- /*! loads curve vertices for specified time */
- __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, size_t i, float time) const
- {
- float ftime;
- const size_t itime = timeSegment(time, ftime);
-
- const float t0 = 1.0f - ftime;
- const float t1 = ftime;
- Vec3ff a0,a1,a2,a3;
- gather(a0,a1,a2,a3,i,itime);
- Vec3ff b0,b1,b2,b3;
- gather(b0,b1,b2,b3,i,itime+1);
- p0 = madd(Vec3ff(t0),a0,t1*b0);
- p1 = madd(Vec3ff(t0),a1,t1*b1);
- p2 = madd(Vec3ff(t0),a2,t1*b2);
- p3 = madd(Vec3ff(t0),a3,t1*b3);
- }
-
- /*! loads curve vertices for specified time */
- __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, Vec3fa& n0, Vec3fa& n1, Vec3fa& n2, Vec3fa& n3, size_t i, float time) const
- {
- float ftime;
- const size_t itime = timeSegment(time, ftime);
-
- const float t0 = 1.0f - ftime;
- const float t1 = ftime;
- Vec3ff a0,a1,a2,a3; Vec3fa an0,an1,an2,an3;
- gather(a0,a1,a2,a3,an0,an1,an2,an3,i,itime);
- Vec3ff b0,b1,b2,b3; Vec3fa bn0,bn1,bn2,bn3;
- gather(b0,b1,b2,b3,bn0,bn1,bn2,bn3,i,itime+1);
- p0 = madd(Vec3ff(t0),a0,t1*b0);
- p1 = madd(Vec3ff(t0),a1,t1*b1);
- p2 = madd(Vec3ff(t0),a2,t1*b2);
- p3 = madd(Vec3ff(t0),a3,t1*b3);
- n0 = madd(Vec3ff(t0),an0,t1*bn0);
- n1 = madd(Vec3ff(t0),an1,t1*bn1);
- n2 = madd(Vec3ff(t0),an2,t1*bn2);
- n3 = madd(Vec3ff(t0),an3,t1*bn3);
- }
-
- template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
- __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const size_t itime) const
- {
- Vec3ff v0,v1,v2,v3; Vec3fa n0,n1,n2,n3;
- unsigned int vertexID = curve(primID);
- gather(v0,v1,v2,v3,n0,n1,n2,n3,vertexID,itime);
- SourceCurve3ff ccurve(v0,v1,v2,v3);
- SourceCurve3fa ncurve(n0,n1,n2,n3);
- ccurve = enlargeRadiusToMinWidth(context,this,ray_org,ccurve);
- return TensorLinearCubicBezierSurface3fa::fromCenterAndNormalCurve(ccurve,ncurve);
- }
-
- template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
- __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const float time) const
- {
- float ftime;
- const size_t itime = timeSegment(time, ftime);
- const TensorLinearCubicBezierSurface3fa curve0 = getNormalOrientedCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context,ray_org,primID,itime+0);
- const TensorLinearCubicBezierSurface3fa curve1 = getNormalOrientedCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context,ray_org,primID,itime+1);
- return clerp(curve0,curve1,ftime);
- }
-
- /*! gathers the hermite curve starting with i'th vertex */
- __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3ff& p1, Vec3ff& t1, size_t i) const
- {
- p0 = vertex (i+0);
- p1 = vertex (i+1);
- t0 = tangent(i+0);
- t1 = tangent(i+1);
- }
-
- /*! gathers the hermite curve starting with i'th vertex of itime'th timestep */
- __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3ff& p1, Vec3ff& t1, size_t i, size_t itime) const
- {
- p0 = vertex (i+0,itime);
- p1 = vertex (i+1,itime);
- t0 = tangent(i+0,itime);
- t1 = tangent(i+1,itime);
- }
-
- /*! loads curve vertices for specified time */
- __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3ff& p1, Vec3ff& t1, size_t i, float time) const
- {
- float ftime;
- const size_t itime = timeSegment(time, ftime);
- const float f0 = 1.0f - ftime, f1 = ftime;
- Vec3ff ap0,at0,ap1,at1;
- gather_hermite(ap0,at0,ap1,at1,i,itime);
- Vec3ff bp0,bt0,bp1,bt1;
- gather_hermite(bp0,bt0,bp1,bt1,i,itime+1);
- p0 = madd(Vec3ff(f0),ap0,f1*bp0);
- t0 = madd(Vec3ff(f0),at0,f1*bt0);
- p1 = madd(Vec3ff(f0),ap1,f1*bp1);
- t1 = madd(Vec3ff(f0),at1,f1*bt1);
- }
-
- /*! gathers the hermite curve starting with i'th vertex */
- __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3fa& n0, Vec3fa& dn0, Vec3ff& p1, Vec3ff& t1, Vec3fa& n1, Vec3fa& dn1, size_t i) const
- {
- p0 = vertex (i+0);
- p1 = vertex (i+1);
- t0 = tangent(i+0);
- t1 = tangent(i+1);
- n0 = normal(i+0);
- n1 = normal(i+1);
- dn0 = dnormal(i+0);
- dn1 = dnormal(i+1);
- }
-
- /*! gathers the hermite curve starting with i'th vertex of itime'th timestep */
- __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3fa& n0, Vec3fa& dn0, Vec3ff& p1, Vec3ff& t1, Vec3fa& n1, Vec3fa& dn1, size_t i, size_t itime) const
- {
- p0 = vertex (i+0,itime);
- p1 = vertex (i+1,itime);
- t0 = tangent(i+0,itime);
- t1 = tangent(i+1,itime);
- n0 = normal(i+0,itime);
- n1 = normal(i+1,itime);
- dn0 = dnormal(i+0,itime);
- dn1 = dnormal(i+1,itime);
- }
-
- /*! loads curve vertices for specified time */
- __forceinline void gather_hermite(Vec3ff& p0, Vec3fa& t0, Vec3fa& n0, Vec3fa& dn0, Vec3ff& p1, Vec3fa& t1, Vec3fa& n1, Vec3fa& dn1, size_t i, float time) const
- {
- float ftime;
- const size_t itime = timeSegment(time, ftime);
- const float f0 = 1.0f - ftime, f1 = ftime;
- Vec3ff ap0,at0,ap1,at1; Vec3fa an0,adn0,an1,adn1;
- gather_hermite(ap0,at0,an0,adn0,ap1,at1,an1,adn1,i,itime);
- Vec3ff bp0,bt0,bp1,bt1; Vec3fa bn0,bdn0,bn1,bdn1;
- gather_hermite(bp0,bt0,bn0,bdn0,bp1,bt1,bn1,bdn1,i,itime+1);
- p0 = madd(Vec3ff(f0),ap0,f1*bp0);
- t0 = madd(Vec3ff(f0),at0,f1*bt0);
- n0 = madd(Vec3ff(f0),an0,f1*bn0);
- dn0= madd(Vec3ff(f0),adn0,f1*bdn0);
- p1 = madd(Vec3ff(f0),ap1,f1*bp1);
- t1 = madd(Vec3ff(f0),at1,f1*bt1);
- n1 = madd(Vec3ff(f0),an1,f1*bn1);
- dn1= madd(Vec3ff(f0),adn1,f1*bdn1);
- }
-
- template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
- __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedHermiteCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const size_t itime) const
- {
- Vec3ff v0,t0,v1,t1; Vec3fa n0,dn0,n1,dn1;
- unsigned int vertexID = curve(primID);
- gather_hermite(v0,t0,n0,dn0,v1,t1,n1,dn1,vertexID,itime);
-
- SourceCurve3ff ccurve(v0,t0,v1,t1);
- SourceCurve3fa ncurve(n0,dn0,n1,dn1);
- ccurve = enlargeRadiusToMinWidth(context,this,ray_org,ccurve);
- return TensorLinearCubicBezierSurface3fa::fromCenterAndNormalCurve(ccurve,ncurve);
- }
-
- template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
- __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedHermiteCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const float time) const
- {
- float ftime;
- const size_t itime = timeSegment(time, ftime);
- const TensorLinearCubicBezierSurface3fa curve0 = getNormalOrientedHermiteCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context, ray_org, primID,itime+0);
- const TensorLinearCubicBezierSurface3fa curve1 = getNormalOrientedHermiteCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context, ray_org, primID,itime+1);
- return clerp(curve0,curve1,ftime);
- }
-
- private:
- void resizeBuffers(unsigned int numSteps);
-
- public:
- BufferView<unsigned int> curves; //!< array of curve indices
- BufferView<Vec3ff> vertices0; //!< fast access to first vertex buffer
- BufferView<Vec3fa> normals0; //!< fast access to first normal buffer
- BufferView<Vec3ff> tangents0; //!< fast access to first tangent buffer
- BufferView<Vec3fa> dnormals0; //!< fast access to first normal derivative buffer
- vector<BufferView<Vec3ff>> vertices; //!< vertex array for each timestep
- vector<BufferView<Vec3fa>> normals; //!< normal array for each timestep
- vector<BufferView<Vec3ff>> tangents; //!< tangent array for each timestep
- vector<BufferView<Vec3fa>> dnormals; //!< normal derivative array for each timestep
- BufferView<char> flags; //!< start, end flag per segment
- vector<BufferView<char>> vertexAttribs; //!< user buffers
- int tessellationRate; //!< tessellation rate for flat curve
- float maxRadiusScale = 1.0; //!< maximal min-width scaling of curve radii
- };
-
- DECLARE_ISA_FUNCTION(CurveGeometry*, createCurves, Device* COMMA Geometry::GType);
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bezier_curve.h b/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bezier_curve.h
deleted file mode 100644
index 69cf612275..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bezier_curve.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 Light Transport Entertainment Inc.
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "curve_intersector_virtual.h"
-
-namespace embree
-{
- namespace isa
- {
- void AddVirtualCurveBezierCurveInterector4i(VirtualCurveIntersector &prim);
- void AddVirtualCurveBezierCurveInterector4v(VirtualCurveIntersector &prim);
- void AddVirtualCurveBezierCurveInterector4iMB(VirtualCurveIntersector &prim);
-#if defined(__AVX__)
- void AddVirtualCurveBezierCurveInterector8i(VirtualCurveIntersector &prim);
- void AddVirtualCurveBezierCurveInterector8v(VirtualCurveIntersector &prim);
- void AddVirtualCurveBezierCurveInterector8iMB(VirtualCurveIntersector &prim);
-#endif
- }
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bspline_curve.h b/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bspline_curve.h
deleted file mode 100644
index d37e41098e..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_bspline_curve.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 Light Transport Entertainment Inc.
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "curve_intersector_virtual.h"
-
-namespace embree
-{
- namespace isa
- {
- void AddVirtualCurveBSplineCurveInterector4i(VirtualCurveIntersector &prim);
- void AddVirtualCurveBSplineCurveInterector4v(VirtualCurveIntersector &prim);
- void AddVirtualCurveBSplineCurveInterector4iMB(VirtualCurveIntersector &prim);
-#if defined(__AVX__)
- void AddVirtualCurveBSplineCurveInterector8i(VirtualCurveIntersector &prim);
- void AddVirtualCurveBSplineCurveInterector8v(VirtualCurveIntersector &prim);
- void AddVirtualCurveBSplineCurveInterector8iMB(VirtualCurveIntersector &prim);
-#endif
- }
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_catmullrom_curve.h b/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_catmullrom_curve.h
deleted file mode 100644
index a133a11d63..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_catmullrom_curve.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 Light Transport Entertainment Inc.
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "curve_intersector_virtual.h"
-
-namespace embree
-{
- namespace isa
- {
- void AddVirtualCurveCatmullRomCurveInterector4i(VirtualCurveIntersector &prim);
- void AddVirtualCurveCatmullRomCurveInterector4v(VirtualCurveIntersector &prim);
- void AddVirtualCurveCatmullRomCurveInterector4iMB(VirtualCurveIntersector &prim);
-#if defined(__AVX__)
- void AddVirtualCurveCatmullRomCurveInterector8i(VirtualCurveIntersector &prim);
- void AddVirtualCurveCatmullRomCurveInterector8v(VirtualCurveIntersector &prim);
- void AddVirtualCurveCatmullRomCurveInterector8iMB(VirtualCurveIntersector &prim);
-#endif
- }
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_hermite_curve.h b/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_hermite_curve.h
deleted file mode 100644
index 9aec35da45..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_hermite_curve.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 Light Transport Entertainment Inc.
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "curve_intersector_virtual.h"
-
-namespace embree
-{
- namespace isa
- {
- void AddVirtualCurveHermiteCurveInterector4i(VirtualCurveIntersector &prim);
- void AddVirtualCurveHermiteCurveInterector4v(VirtualCurveIntersector &prim);
- void AddVirtualCurveHermiteCurveInterector4iMB(VirtualCurveIntersector &prim);
-#if defined(__AVX__)
- void AddVirtualCurveHermiteCurveInterector8i(VirtualCurveIntersector &prim);
- void AddVirtualCurveHermiteCurveInterector8v(VirtualCurveIntersector &prim);
- void AddVirtualCurveHermiteCurveInterector8iMB(VirtualCurveIntersector &prim);
-#endif
- }
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_linear_curve.h b/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_linear_curve.h
deleted file mode 100644
index dd37d194f5..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_linear_curve.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2020 Light Transport Entertainment Inc.
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "curve_intersector_virtual.h"
-
-namespace embree
-{
- namespace isa
- {
- void AddVirtualCurveLinearCurveInterector4i(VirtualCurveIntersector &prim);
- void AddVirtualCurveLinearCurveInterector4v(VirtualCurveIntersector &prim);
- void AddVirtualCurveLinearCurveInterector4iMB(VirtualCurveIntersector &prim);
-#if defined(__AVX__)
- void AddVirtualCurveLinearCurveInterector8i(VirtualCurveIntersector &prim);
- void AddVirtualCurveLinearCurveInterector8v(VirtualCurveIntersector &prim);
- void AddVirtualCurveLinearCurveInterector8iMB(VirtualCurveIntersector &prim);
-#endif
- }
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_point.h b/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_point.h
deleted file mode 100644
index fe5ceed840..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual_point.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 Light Transport Entertainment Inc.
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "curve_intersector_virtual.h"
-
-namespace embree
-{
- namespace isa
- {
- void AddVirtualCurvePointInterector4i(VirtualCurveIntersector &prim);
- void AddVirtualCurvePointInterector4v(VirtualCurveIntersector &prim);
- void AddVirtualCurvePointInterector4iMB(VirtualCurveIntersector &prim);
-
-#if defined (__AVX__)
- void AddVirtualCurvePointInterector8i(VirtualCurveIntersector &prim);
- void AddVirtualCurvePointInterector8v(VirtualCurveIntersector &prim);
- void AddVirtualCurvePointInterector8iMB(VirtualCurveIntersector &prim);
-#endif
- }
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_moeller.h b/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_moeller.h
deleted file mode 100644
index f65b4abf61..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_moeller.h
+++ /dev/null
@@ -1,493 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "subgrid.h"
-#include "quad_intersector_moeller.h"
-
-namespace embree
-{
- namespace isa
- {
-
- /* ----------------------------- */
- /* -- single ray intersectors -- */
- /* ----------------------------- */
-
- template<int M>
- __forceinline void interpolateUV(MoellerTrumboreHitM<M> &hit,const GridMesh::Grid &g, const SubGrid& subgrid)
- {
- /* correct U,V interpolation across the entire grid */
- const vint<M> sx((int)subgrid.x());
- const vint<M> sy((int)subgrid.y());
- const vint<M> sxM(sx + vint<M>(0,1,1,0));
- const vint<M> syM(sy + vint<M>(0,0,1,1));
- const float inv_resX = rcp((float)((int)g.resX-1));
- const float inv_resY = rcp((float)((int)g.resY-1));
- hit.U = (hit.U + (vfloat<M>)sxM * hit.absDen) * inv_resX;
- hit.V = (hit.V + (vfloat<M>)syM * hit.absDen) * inv_resY;
- }
-
- template<int M, bool filter>
- struct SubGridQuadMIntersector1MoellerTrumbore;
-
- template<int M, bool filter>
- struct SubGridQuadMIntersector1MoellerTrumbore
- {
- __forceinline SubGridQuadMIntersector1MoellerTrumbore() {}
-
- __forceinline SubGridQuadMIntersector1MoellerTrumbore(const Ray& ray, const void* ptr) {}
-
- __forceinline void intersect(RayHit& ray, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- MoellerTrumboreHitM<M> hit;
- MoellerTrumboreIntersector1<M> intersector(ray,nullptr);
- Intersect1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
-
- /* intersect first triangle */
- if (intersector.intersect(ray,v0,v1,v3,hit))
- {
- interpolateUV<M>(hit,g,subgrid);
- epilog(hit.valid,hit);
- }
-
- /* intersect second triangle */
- if (intersector.intersect(ray,v2,v3,v1,hit))
- {
- hit.U = hit.absDen - hit.U;
- hit.V = hit.absDen - hit.V;
- interpolateUV<M>(hit,g,subgrid);
- epilog(hit.valid,hit);
- }
- }
-
- __forceinline bool occluded(Ray& ray, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- MoellerTrumboreHitM<M> hit;
- MoellerTrumboreIntersector1<M> intersector(ray,nullptr);
- Occluded1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
-
- /* intersect first triangle */
- if (intersector.intersect(ray,v0,v1,v3,hit))
- {
- interpolateUV<M>(hit,g,subgrid);
- if (epilog(hit.valid,hit))
- return true;
- }
-
- /* intersect second triangle */
- if (intersector.intersect(ray,v2,v3,v1,hit))
- {
- hit.U = hit.absDen - hit.U;
- hit.V = hit.absDen - hit.V;
- interpolateUV<M>(hit,g,subgrid);
- if (epilog(hit.valid,hit))
- return true;
- }
- return false;
- }
- };
-
-#if defined (__AVX__)
-
- /*! Intersects 4 quads with 1 ray using AVX */
- template<bool filter>
- struct SubGridQuadMIntersector1MoellerTrumbore<4,filter>
- {
- __forceinline SubGridQuadMIntersector1MoellerTrumbore() {}
-
- __forceinline SubGridQuadMIntersector1MoellerTrumbore(const Ray& ray, const void* ptr) {}
-
- template<typename Epilog>
- __forceinline bool intersect(Ray& ray, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid& subgrid, const Epilog& epilog) const
- {
- const Vec3vf8 vtx0(vfloat8(v0.x,v2.x),vfloat8(v0.y,v2.y),vfloat8(v0.z,v2.z));
-#if !defined(EMBREE_BACKFACE_CULLING)
- const Vec3vf8 vtx1(vfloat8(v1.x),vfloat8(v1.y),vfloat8(v1.z));
- const Vec3vf8 vtx2(vfloat8(v3.x),vfloat8(v3.y),vfloat8(v3.z));
-#else
- const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
- const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
-#endif
- MoellerTrumboreHitM<8> hit;
- MoellerTrumboreIntersector1<8> intersector(ray,nullptr);
- const vbool8 flags(0,0,0,0,1,1,1,1);
- if (unlikely(intersector.intersect(ray,vtx0,vtx1,vtx2,hit)))
- {
- vfloat8 U = hit.U, V = hit.V, absDen = hit.absDen;
-
-#if !defined(EMBREE_BACKFACE_CULLING)
- hit.U = select(flags,absDen-V,U);
- hit.V = select(flags,absDen-U,V);
- hit.vNg *= select(flags,vfloat8(-1.0f),vfloat8(1.0f));
-#else
- hit.U = select(flags,absDen-U,U);
- hit.V = select(flags,absDen-V,V);
-#endif
- /* correct U,V interpolation across the entire grid */
- const vint8 sx((int)subgrid.x());
- const vint8 sy((int)subgrid.y());
- const vint8 sx8(sx + vint8(0,1,1,0,0,1,1,0));
- const vint8 sy8(sy + vint8(0,0,1,1,0,0,1,1));
- const float inv_resX = rcp((float)((int)g.resX-1));
- const float inv_resY = rcp((float)((int)g.resY-1));
- hit.U = (hit.U + (vfloat8)sx8 * absDen) * inv_resX;
- hit.V = (hit.V + (vfloat8)sy8 * absDen) * inv_resY;
-
- if (unlikely(epilog(hit.valid,hit)))
- return true;
- }
- return false;
- }
-
- __forceinline bool intersect(RayHit& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- return intersect(ray,v0,v1,v2,v3,g,subgrid,Intersect1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
- }
-
- __forceinline bool occluded(Ray& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- return intersect(ray,v0,v1,v2,v3,g,subgrid,Occluded1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
- }
- };
-
-#endif
-
- // ============================================================================================================================
- // ============================================================================================================================
- // ============================================================================================================================
-
-
- /* ----------------------------- */
- /* -- ray packet intersectors -- */
- /* ----------------------------- */
-
- template<int K>
- struct SubGridQuadHitK
- {
- __forceinline SubGridQuadHitK(const vfloat<K>& U,
- const vfloat<K>& V,
- const vfloat<K>& T,
- const vfloat<K>& absDen,
- const Vec3vf<K>& Ng,
- const vbool<K>& flags,
- const GridMesh::Grid &g,
- const SubGrid& subgrid,
- const unsigned int i)
- : U(U), V(V), T(T), absDen(absDen), flags(flags), tri_Ng(Ng), g(g), subgrid(subgrid), i(i) {}
-
- __forceinline std::tuple<vfloat<K>,vfloat<K>,vfloat<K>,Vec3vf<K>> operator() () const
- {
- const vfloat<K> rcpAbsDen = rcp(absDen);
- const vfloat<K> t = T * rcpAbsDen;
- const vfloat<K> u0 = min(U * rcpAbsDen,1.0f);
- const vfloat<K> v0 = min(V * rcpAbsDen,1.0f);
- const vfloat<K> u1 = vfloat<K>(1.0f) - u0;
- const vfloat<K> v1 = vfloat<K>(1.0f) - v0;
- const vfloat<K> uu = select(flags,u1,u0);
- const vfloat<K> vv = select(flags,v1,v0);
- const unsigned int sx = subgrid.x() + (unsigned int)(i % 2);
- const unsigned int sy = subgrid.y() + (unsigned int)(i >>1);
- const float inv_resX = rcp((float)(int)(g.resX-1));
- const float inv_resY = rcp((float)(int)(g.resY-1));
- const vfloat<K> u = (uu + (float)(int)sx) * inv_resX;
- const vfloat<K> v = (vv + (float)(int)sy) * inv_resY;
- const Vec3vf<K> Ng(tri_Ng.x,tri_Ng.y,tri_Ng.z);
- return std::make_tuple(u,v,t,Ng);
- }
-
- private:
- const vfloat<K> U;
- const vfloat<K> V;
- const vfloat<K> T;
- const vfloat<K> absDen;
- const vbool<K> flags;
- const Vec3vf<K> tri_Ng;
-
- const GridMesh::Grid &g;
- const SubGrid& subgrid;
- const size_t i;
- };
-
- template<int M, int K, bool filter>
- struct SubGridQuadMIntersectorKMoellerTrumboreBase
- {
- __forceinline SubGridQuadMIntersectorKMoellerTrumboreBase(const vbool<K>& valid, const RayK<K>& ray) {}
-
- template<typename Epilog>
- __forceinline vbool<K> intersectK(const vbool<K>& valid0,
- RayK<K>& ray,
- const Vec3vf<K>& tri_v0,
- const Vec3vf<K>& tri_e1,
- const Vec3vf<K>& tri_e2,
- const Vec3vf<K>& tri_Ng,
- const vbool<K>& flags,
- const GridMesh::Grid &g,
- const SubGrid &subgrid,
- const unsigned int i,
- const Epilog& epilog) const
- {
- /* calculate denominator */
- vbool<K> valid = valid0;
- const Vec3vf<K> C = tri_v0 - ray.org;
- const Vec3vf<K> R = cross(C,ray.dir);
- const vfloat<K> den = dot(tri_Ng,ray.dir);
- const vfloat<K> absDen = abs(den);
- const vfloat<K> sgnDen = signmsk(den);
-
- /* test against edge p2 p0 */
- const vfloat<K> U = dot(R,tri_e2) ^ sgnDen;
- valid &= U >= 0.0f;
- if (likely(none(valid))) return false;
-
- /* test against edge p0 p1 */
- const vfloat<K> V = dot(R,tri_e1) ^ sgnDen;
- valid &= V >= 0.0f;
- if (likely(none(valid))) return false;
-
- /* test against edge p1 p2 */
- const vfloat<K> W = absDen-U-V;
- valid &= W >= 0.0f;
- if (likely(none(valid))) return false;
-
- /* perform depth test */
- const vfloat<K> T = dot(tri_Ng,C) ^ sgnDen;
- valid &= (absDen*ray.tnear() < T) & (T <= absDen*ray.tfar);
- if (unlikely(none(valid))) return false;
-
- /* perform backface culling */
-#if defined(EMBREE_BACKFACE_CULLING)
- valid &= den < vfloat<K>(zero);
- if (unlikely(none(valid))) return false;
-#else
- valid &= den != vfloat<K>(zero);
- if (unlikely(none(valid))) return false;
-#endif
-
- /* calculate hit information */
- SubGridQuadHitK<K> hit(U,V,T,absDen,tri_Ng,flags,g,subgrid,i);
- return epilog(valid,hit);
- }
-
- template<typename Epilog>
- __forceinline vbool<K> intersectK(const vbool<K>& valid0,
- RayK<K>& ray,
- const Vec3vf<K>& tri_v0,
- const Vec3vf<K>& tri_v1,
- const Vec3vf<K>& tri_v2,
- const vbool<K>& flags,
- const GridMesh::Grid &g,
- const SubGrid &subgrid,
- const unsigned int i,
- const Epilog& epilog) const
- {
- const Vec3vf<K> e1 = tri_v0-tri_v1;
- const Vec3vf<K> e2 = tri_v2-tri_v0;
- const Vec3vf<K> Ng = cross(e2,e1);
- return intersectK(valid0,ray,tri_v0,e1,e2,Ng,flags,g,subgrid,i,epilog);
- }
-
- template<typename Epilog>
- __forceinline bool intersectK(const vbool<K>& valid0,
- RayK<K>& ray,
- const Vec3vf<K>& v0,
- const Vec3vf<K>& v1,
- const Vec3vf<K>& v2,
- const Vec3vf<K>& v3,
- const GridMesh::Grid &g,
- const SubGrid &subgrid,
- const unsigned int i,
- const Epilog& epilog) const
- {
- intersectK(valid0,ray,v0,v1,v3,vbool<K>(false),g,subgrid,i,epilog);
- if (none(valid0)) return true;
- intersectK(valid0,ray,v2,v3,v1,vbool<K>(true ),g,subgrid,i,epilog);
- return none(valid0);
- }
-
- static __forceinline bool intersect1(RayK<K>& ray,
- size_t k,
- const Vec3vf<M>& tri_v0,
- const Vec3vf<M>& tri_e1,
- const Vec3vf<M>& tri_e2,
- const Vec3vf<M>& tri_Ng,
- MoellerTrumboreHitM<M> &hit)
- {
- /* calculate denominator */
- const Vec3vf<M> O = broadcast<vfloat<M>>(ray.org,k);
- const Vec3vf<M> D = broadcast<vfloat<M>>(ray.dir,k);
- const Vec3vf<M> C = Vec3vf<M>(tri_v0) - O;
- const Vec3vf<M> R = cross(C,D);
- const vfloat<M> den = dot(Vec3vf<M>(tri_Ng),D);
- const vfloat<M> absDen = abs(den);
- const vfloat<M> sgnDen = signmsk(den);
-
- /* perform edge tests */
- const vfloat<M> U = dot(R,Vec3vf<M>(tri_e2)) ^ sgnDen;
- const vfloat<M> V = dot(R,Vec3vf<M>(tri_e1)) ^ sgnDen;
-
- /* perform backface culling */
-#if defined(EMBREE_BACKFACE_CULLING)
- vbool<M> valid = (den < vfloat<M>(zero)) & (U >= 0.0f) & (V >= 0.0f) & (U+V<=absDen);
-#else
- vbool<M> valid = (den != vfloat<M>(zero)) & (U >= 0.0f) & (V >= 0.0f) & (U+V<=absDen);
-#endif
- if (likely(none(valid))) return false;
-
- /* perform depth test */
- const vfloat<M> T = dot(Vec3vf<M>(tri_Ng),C) ^ sgnDen;
- valid &= (absDen*vfloat<M>(ray.tnear()[k]) < T) & (T <= absDen*vfloat<M>(ray.tfar[k]));
- if (likely(none(valid))) return false;
-
- /* calculate hit information */
- new (&hit) MoellerTrumboreHitM<M>(valid,U,V,T,absDen,tri_Ng);
- return true;
- }
-
- static __forceinline bool intersect1(RayK<K>& ray,
- size_t k,
- const Vec3vf<M>& v0,
- const Vec3vf<M>& v1,
- const Vec3vf<M>& v2,
- MoellerTrumboreHitM<M> &hit)
- {
- const Vec3vf<M> e1 = v0-v1;
- const Vec3vf<M> e2 = v2-v0;
- const Vec3vf<M> Ng = cross(e2,e1);
- return intersect1(ray,k,v0,e1,e2,Ng,hit);
- }
-
- };
-
- template<int M, int K, bool filter>
- struct SubGridQuadMIntersectorKMoellerTrumbore : public SubGridQuadMIntersectorKMoellerTrumboreBase<M,K,filter>
- {
- __forceinline SubGridQuadMIntersectorKMoellerTrumbore(const vbool<K>& valid, const RayK<K>& ray)
- : SubGridQuadMIntersectorKMoellerTrumboreBase<M,K,filter>(valid,ray) {}
-
- __forceinline void intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
- {
- Intersect1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
-
- MoellerTrumboreHitM<4> hit;
- if (SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>::intersect1(ray,k,v0,v1,v3,hit))
- {
- interpolateUV<M>(hit,g,subgrid);
- epilog(hit.valid,hit);
- }
-
- if (SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>::intersect1(ray,k,v2,v3,v1,hit))
- {
- hit.U = hit.absDen - hit.U;
- hit.V = hit.absDen - hit.V;
- interpolateUV<M>(hit,g,subgrid);
- epilog(hit.valid,hit);
- }
-
- }
-
- __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
- {
- Occluded1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
-
- MoellerTrumboreHitM<4> hit;
- if (SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>::intersect1(ray,k,v0,v1,v3,hit))
- {
- interpolateUV<M>(hit,g,subgrid);
- if (epilog(hit.valid,hit)) return true;
- }
-
- if (SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>::intersect1(ray,k,v2,v3,v1,hit))
- {
- hit.U = hit.absDen - hit.U;
- hit.V = hit.absDen - hit.V;
- interpolateUV<M>(hit,g,subgrid);
- if (epilog(hit.valid,hit)) return true;
- }
- return false;
- }
- };
-
-
-#if defined (__AVX__)
-
- /*! Intersects 4 quads with 1 ray using AVX */
- template<int K, bool filter>
- struct SubGridQuadMIntersectorKMoellerTrumbore<4,K,filter> : public SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>
- {
- __forceinline SubGridQuadMIntersectorKMoellerTrumbore(const vbool<K>& valid, const RayK<K>& ray)
- : SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>(valid,ray) {}
-
- template<typename Epilog>
- __forceinline bool intersect1(RayK<K>& ray, size_t k,const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const GridMesh::Grid &g, const SubGrid &subgrid, const Epilog& epilog) const
- {
- const Vec3vf8 vtx0(vfloat8(v0.x,v2.x),vfloat8(v0.y,v2.y),vfloat8(v0.z,v2.z));
-#if !defined(EMBREE_BACKFACE_CULLING)
- const Vec3vf8 vtx1(vfloat8(v1.x),vfloat8(v1.y),vfloat8(v1.z));
- const Vec3vf8 vtx2(vfloat8(v3.x),vfloat8(v3.y),vfloat8(v3.z));
-#else
- const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
- const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
-#endif
- const vbool8 flags(0,0,0,0,1,1,1,1);
-
- MoellerTrumboreHitM<8> hit;
- if (SubGridQuadMIntersectorKMoellerTrumboreBase<8,K,filter>::intersect1(ray,k,vtx0,vtx1,vtx2,hit))
- {
- vfloat8 U = hit.U, V = hit.V, absDen = hit.absDen;
-#if !defined(EMBREE_BACKFACE_CULLING)
- hit.U = select(flags,absDen-V,U);
- hit.V = select(flags,absDen-U,V);
- hit.vNg *= select(flags,vfloat8(-1.0f),vfloat8(1.0f));
-#else
- hit.U = select(flags,absDen-U,U);
- hit.V = select(flags,absDen-V,V);
-#endif
-
- /* correct U,V interpolation across the entire grid */
- const vint8 sx((int)subgrid.x());
- const vint8 sy((int)subgrid.y());
- const vint8 sx8(sx + vint8(0,1,1,0,0,1,1,0));
- const vint8 sy8(sy + vint8(0,0,1,1,0,0,1,1));
- const float inv_resX = rcp((float)((int)g.resX-1));
- const float inv_resY = rcp((float)((int)g.resY-1));
- hit.U = (hit.U + (vfloat8)sx8 * absDen) * inv_resX;
- hit.V = (hit.V + (vfloat8)sy8 * absDen) * inv_resY;
- if (unlikely(epilog(hit.valid,hit)))
- return true;
-
- }
- return false;
- }
-
- __forceinline bool intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
- {
- return intersect1(ray,k,v0,v1,v2,v3,g,subgrid,Intersect1KEpilogMU<8,K,filter>(ray,k,context,subgrid.geomID(),subgrid.primID()));
- }
-
- __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
- {
- return intersect1(ray,k,v0,v1,v2,v3,g,subgrid,Occluded1KEpilogMU<8,K,filter>(ray,k,context,subgrid.geomID(),subgrid.primID()));
- }
- };
-
-#endif
-
-
-
- }
-}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_pluecker.h b/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_pluecker.h
deleted file mode 100644
index 1cd88aa799..0000000000
--- a/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector_pluecker.h
+++ /dev/null
@@ -1,508 +0,0 @@
-// Copyright 2009-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-
-#pragma once
-
-#include "subgrid.h"
-#include "quad_intersector_moeller.h"
-#include "quad_intersector_pluecker.h"
-
-namespace embree
-{
- namespace isa
- {
-
- template<int M>
- struct SubGridQuadHitPlueckerM
- {
- __forceinline SubGridQuadHitPlueckerM() {}
-
- __forceinline SubGridQuadHitPlueckerM(const vbool<M>& valid,
- const vfloat<M>& U,
- const vfloat<M>& V,
- const vfloat<M>& UVW,
- const vfloat<M>& t,
- const Vec3vf<M>& Ng,
- const vbool<M>& flags) : valid(valid), vt(t)
- {
- const vbool<M> invalid = abs(UVW) < min_rcp_input;
- const vfloat<M> rcpUVW = select(invalid,vfloat<M>(0.0f),rcp(UVW));
- const vfloat<M> u = min(U * rcpUVW,1.0f);
- const vfloat<M> v = min(V * rcpUVW,1.0f);
- const vfloat<M> u1 = vfloat<M>(1.0f) - u;
- const vfloat<M> v1 = vfloat<M>(1.0f) - v;
-#if !defined(__AVX__) || defined(EMBREE_BACKFACE_CULLING)
- vu = select(flags,u1,u);
- vv = select(flags,v1,v);
- vNg = Vec3vf<M>(Ng.x,Ng.y,Ng.z);
-#else
- const vfloat<M> flip = select(flags,vfloat<M>(-1.0f),vfloat<M>(1.0f));
- vv = select(flags,u1,v);
- vu = select(flags,v1,u);
- vNg = Vec3vf<M>(flip*Ng.x,flip*Ng.y,flip*Ng.z);
-#endif
- }
-
- __forceinline void finalize()
- {
- }
-
- __forceinline Vec2f uv(const size_t i)
- {
- const float u = vu[i];
- const float v = vv[i];
- return Vec2f(u,v);
- }
-
- __forceinline float t(const size_t i) { return vt[i]; }
- __forceinline Vec3fa Ng(const size_t i) { return Vec3fa(vNg.x[i],vNg.y[i],vNg.z[i]); }
-
- public:
- vbool<M> valid;
- vfloat<M> vu;
- vfloat<M> vv;
- vfloat<M> vt;
- Vec3vf<M> vNg;
- };
-
- template<int M>
- __forceinline void interpolateUV(SubGridQuadHitPlueckerM<M> &hit,const GridMesh::Grid &g, const SubGrid& subgrid, const vint<M> &stepX, const vint<M> &stepY)
- {
- /* correct U,V interpolation across the entire grid */
- const vint<M> sx((int)subgrid.x());
- const vint<M> sy((int)subgrid.y());
- const vint<M> sxM(sx + stepX);
- const vint<M> syM(sy + stepY);
- const float inv_resX = rcp((float)((int)g.resX-1));
- const float inv_resY = rcp((float)((int)g.resY-1));
- hit.vu = (hit.vu + vfloat<M>(sxM)) * inv_resX;
- hit.vv = (hit.vv + vfloat<M>(syM)) * inv_resY;
- }
-
- template<int M>
- __forceinline static bool intersectPluecker(Ray& ray,
- const Vec3vf<M>& tri_v0,
- const Vec3vf<M>& tri_v1,
- const Vec3vf<M>& tri_v2,
- const vbool<M>& flags,
- SubGridQuadHitPlueckerM<M> &hit)
- {
- /* calculate vertices relative to ray origin */
- const Vec3vf<M> O = Vec3vf<M>((Vec3fa)ray.org);
- const Vec3vf<M> D = Vec3vf<M>((Vec3fa)ray.dir);
- const Vec3vf<M> v0 = tri_v0-O;
- const Vec3vf<M> v1 = tri_v1-O;
- const Vec3vf<M> v2 = tri_v2-O;
-
- /* calculate triangle edges */
- const Vec3vf<M> e0 = v2-v0;
- const Vec3vf<M> e1 = v0-v1;
- const Vec3vf<M> e2 = v1-v2;
-
- /* perform edge tests */
- const vfloat<M> U = dot(cross(e0,v2+v0),D);
- const vfloat<M> V = dot(cross(e1,v0+v1),D);
- const vfloat<M> W = dot(cross(e2,v1+v2),D);
- const vfloat<M> UVW = U+V+W;
- const vfloat<M> eps = float(ulp)*abs(UVW);
-#if defined(EMBREE_BACKFACE_CULLING)
- vbool<M> valid = max(U,V,W) <= eps;
-#else
- vbool<M> valid = (min(U,V,W) >= -eps) | (max(U,V,W) <= eps);
-#endif
- if (unlikely(none(valid))) return false;
-
- /* calculate geometry normal and denominator */
- const Vec3vf<M> Ng = stable_triangle_normal(e0,e1,e2);
- const vfloat<M> den = twice(dot(Ng,D));
-
- /* perform depth test */
- const vfloat<M> T = twice(dot(v0,Ng));
- const vfloat<M> t = rcp(den)*T;
- valid &= vfloat<M>(ray.tnear()) <= t & t <= vfloat<M>(ray.tfar);
- valid &= den != vfloat<M>(zero);
- if (unlikely(none(valid))) return false;
-
- /* update hit information */
- new (&hit) SubGridQuadHitPlueckerM<M>(valid,U,V,UVW,t,Ng,flags);
- return true;
- }
-
- template<int M, bool filter>
- struct SubGridQuadMIntersector1Pluecker;
-
- template<int M, bool filter>
- struct SubGridQuadMIntersector1Pluecker
- {
- __forceinline SubGridQuadMIntersector1Pluecker() {}
-
- __forceinline SubGridQuadMIntersector1Pluecker(const Ray& ray, const void* ptr) {}
-
- __forceinline void intersect(RayHit& ray, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- SubGridQuadHitPlueckerM<M> hit;
- Intersect1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
-
- /* intersect first triangle */
- if (intersectPluecker(ray,v0,v1,v3,vbool<M>(false),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- epilog(hit.valid,hit);
- }
-
- /* intersect second triangle */
- if (intersectPluecker(ray,v2,v3,v1,vbool<M>(true),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- epilog(hit.valid,hit);
- }
- }
-
- __forceinline bool occluded(Ray& ray, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- SubGridQuadHitPlueckerM<M> hit;
- Occluded1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
-
- /* intersect first triangle */
- if (intersectPluecker(ray,v0,v1,v3,vbool<M>(false),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- if (epilog(hit.valid,hit))
- return true;
- }
-
- /* intersect second triangle */
- if (intersectPluecker(ray,v2,v3,v1,vbool<M>(true),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- if (epilog(hit.valid,hit))
- return true;
- }
-
- return false;
- }
- };
-
-#if defined (__AVX__)
-
- /*! Intersects 4 quads with 1 ray using AVX */
- template<bool filter>
- struct SubGridQuadMIntersector1Pluecker<4,filter>
- {
- __forceinline SubGridQuadMIntersector1Pluecker() {}
-
- __forceinline SubGridQuadMIntersector1Pluecker(const Ray& ray, const void* ptr) {}
-
- template<typename Epilog>
- __forceinline bool intersect(Ray& ray, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid& subgrid, const Epilog& epilog) const
- {
- const Vec3vf8 vtx0(vfloat8(v0.x,v2.x),vfloat8(v0.y,v2.y),vfloat8(v0.z,v2.z));
-#if !defined(EMBREE_BACKFACE_CULLING)
- const Vec3vf8 vtx1(vfloat8(v1.x),vfloat8(v1.y),vfloat8(v1.z));
- const Vec3vf8 vtx2(vfloat8(v3.x),vfloat8(v3.y),vfloat8(v3.z));
-#else
- const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
- const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
-#endif
- SubGridQuadHitPlueckerM<8> hit;
- const vbool8 flags(0,0,0,0,1,1,1,1);
- if (unlikely(intersectPluecker(ray,vtx0,vtx1,vtx2,flags,hit)))
- {
- /* correct U,V interpolation across the entire grid */
- interpolateUV<8>(hit,g,subgrid,vint<8>(0,1,1,0,0,1,1,0),vint<8>(0,0,1,1,0,0,1,1));
- if (unlikely(epilog(hit.valid,hit)))
- return true;
- }
- return false;
- }
-
- __forceinline bool intersect(RayHit& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- return intersect(ray,v0,v1,v2,v3,g,subgrid,Intersect1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
- }
-
- __forceinline bool occluded(Ray& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const GridMesh::Grid &g, const SubGrid& subgrid) const
- {
- return intersect(ray,v0,v1,v2,v3,g,subgrid,Occluded1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
- }
- };
-
-#endif
-
-
- /* ----------------------------- */
- /* -- ray packet intersectors -- */
- /* ----------------------------- */
-
- template<int K>
- struct SubGridQuadHitPlueckerK
- {
- __forceinline SubGridQuadHitPlueckerK(const vfloat<K>& U,
- const vfloat<K>& V,
- const vfloat<K>& UVW,
- const vfloat<K>& t,
- const Vec3vf<K>& Ng,
- const vbool<K>& flags,
- const GridMesh::Grid &g,
- const SubGrid& subgrid,
- const unsigned int i)
- : U(U), V(V), UVW(UVW), t(t), flags(flags), tri_Ng(Ng), g(g), subgrid(subgrid), i(i) {}
-
- __forceinline std::tuple<vfloat<K>,vfloat<K>,vfloat<K>,Vec3vf<K>> operator() () const
- {
- const vbool<K> invalid = abs(UVW) < min_rcp_input;
- const vfloat<K> rcpUVW = select(invalid,vfloat<K>(0.0f),rcp(UVW));
- const vfloat<K> u0 = min(U * rcpUVW,1.0f);
- const vfloat<K> v0 = min(V * rcpUVW,1.0f);
- const vfloat<K> u1 = vfloat<K>(1.0f) - u0;
- const vfloat<K> v1 = vfloat<K>(1.0f) - v0;
- const vfloat<K> uu = select(flags,u1,u0);
- const vfloat<K> vv = select(flags,v1,v0);
- const unsigned int sx = subgrid.x() + (unsigned int)(i % 2);
- const unsigned int sy = subgrid.y() + (unsigned int)(i >>1);
- const float inv_resX = rcp((float)(int)(g.resX-1));
- const float inv_resY = rcp((float)(int)(g.resY-1));
- const vfloat<K> u = (uu + (float)(int)sx) * inv_resX;
- const vfloat<K> v = (vv + (float)(int)sy) * inv_resY;
- const Vec3vf<K> Ng(tri_Ng.x,tri_Ng.y,tri_Ng.z);
- return std::make_tuple(u,v,t,Ng);
- }
-
- private:
- const vfloat<K> U;
- const vfloat<K> V;
- const vfloat<K> UVW;
- const vfloat<K> t;
- const vfloat<K> absDen;
- const vbool<K> flags;
- const Vec3vf<K> tri_Ng;
-
- const GridMesh::Grid &g;
- const SubGrid& subgrid;
- const size_t i;
- };
-
-
- template<int M, int K, bool filter>
- struct SubGridQuadMIntersectorKPlueckerBase
- {
- __forceinline SubGridQuadMIntersectorKPlueckerBase(const vbool<K>& valid, const RayK<K>& ray) {}
-
- template<typename Epilog>
- __forceinline vbool<K> intersectK(const vbool<K>& valid0,
- RayK<K>& ray,
- const Vec3vf<K>& tri_v0,
- const Vec3vf<K>& tri_v1,
- const Vec3vf<K>& tri_v2,
- const Vec3vf<K>& tri_Ng,
- const vbool<K>& flags,
- const GridMesh::Grid &g,
- const SubGrid &subgrid,
- const unsigned int i,
- const Epilog& epilog) const
- {
- /* calculate denominator */
- /* calculate vertices relative to ray origin */
- vbool<K> valid = valid0;
- const Vec3vf<K> O = ray.org;
- const Vec3vf<K> D = ray.dir;
- const Vec3vf<K> v0 = tri_v0-O;
- const Vec3vf<K> v1 = tri_v1-O;
- const Vec3vf<K> v2 = tri_v2-O;
-
- /* calculate triangle edges */
- const Vec3vf<K> e0 = v2-v0;
- const Vec3vf<K> e1 = v0-v1;
- const Vec3vf<K> e2 = v1-v2;
-
- /* perform edge tests */
- const vfloat<K> U = dot(Vec3vf<K>(cross(e0,v2+v0)),D);
- const vfloat<K> V = dot(Vec3vf<K>(cross(e1,v0+v1)),D);
- const vfloat<K> W = dot(Vec3vf<K>(cross(e2,v1+v2)),D);
- const vfloat<K> UVW = U+V+W;
- const vfloat<K> eps = float(ulp)*abs(UVW);
-#if defined(EMBREE_BACKFACE_CULLING)
- valid &= max(U,V,W) <= eps;
-#else
- valid &= (min(U,V,W) >= -eps) | (max(U,V,W) <= eps);
-#endif
- if (unlikely(none(valid))) return false;
-
- /* calculate geometry normal and denominator */
- const Vec3vf<K> Ng = stable_triangle_normal(e0,e1,e2);
- const vfloat<K> den = twice(dot(Vec3vf<K>(Ng),D));
-
- /* perform depth test */
- const vfloat<K> T = twice(dot(v0,Vec3vf<K>(Ng)));
- const vfloat<K> t = rcp(den)*T;
- valid &= ray.tnear() <= t & t <= ray.tfar;
- valid &= den != vfloat<K>(zero);
- if (unlikely(none(valid))) return false;
-
- /* calculate hit information */
- SubGridQuadHitPlueckerK<K> hit(U,V,UVW,t,tri_Ng,flags,g,subgrid,i);
- return epilog(valid,hit);
- }
-
- template<typename Epilog>
- __forceinline vbool<K> intersectK(const vbool<K>& valid0,
- RayK<K>& ray,
- const Vec3vf<K>& v0,
- const Vec3vf<K>& v1,
- const Vec3vf<K>& v2,
- const vbool<K>& flags,
- const GridMesh::Grid &g,
- const SubGrid &subgrid,
- const unsigned int i,
- const Epilog& epilog) const
- {
- const Vec3vf<K> e1 = v0-v1;
- const Vec3vf<K> e2 = v2-v0;
- const Vec3vf<K> Ng = cross(e2,e1);
- return intersectK(valid0,ray,v0,v1,v2,Ng,flags,g,subgrid,i,epilog);
- }
-
- template<typename Epilog>
- __forceinline bool intersectK(const vbool<K>& valid0,
- RayK<K>& ray,
- const Vec3vf<K>& v0,
- const Vec3vf<K>& v1,
- const Vec3vf<K>& v2,
- const Vec3vf<K>& v3,
- const GridMesh::Grid &g,
- const SubGrid &subgrid,
- const unsigned int i,
- const Epilog& epilog) const
- {
- intersectK(valid0,ray,v0,v1,v3,vbool<K>(false),g,subgrid,i,epilog);
- if (none(valid0)) return true;
- intersectK(valid0,ray,v2,v3,v1,vbool<K>(true ),g,subgrid,i,epilog);
- return none(valid0);
- }
-
- static __forceinline bool intersect1(RayK<K>& ray,
- size_t k,
- const Vec3vf<M>& tri_v0,
- const Vec3vf<M>& tri_v1,
- const Vec3vf<M>& tri_v2,
- const Vec3vf<M>& tri_Ng,
- const vbool<M>& flags,
- SubGridQuadHitPlueckerM<M> &hit)
- {
- /* calculate vertices relative to ray origin */
- const Vec3vf<M> O = broadcast<vfloat<M>>(ray.org,k);
- const Vec3vf<M> D = broadcast<vfloat<M>>(ray.dir,k);
- const Vec3vf<M> v0 = tri_v0-O;
- const Vec3vf<M> v1 = tri_v1-O;
- const Vec3vf<M> v2 = tri_v2-O;
-
- /* calculate triangle edges */
- const Vec3vf<M> e0 = v2-v0;
- const Vec3vf<M> e1 = v0-v1;
- const Vec3vf<M> e2 = v1-v2;
-
- /* perform edge tests */
- const vfloat<M> U = dot(cross(e0,v2+v0),D);
- const vfloat<M> V = dot(cross(e1,v0+v1),D);
- const vfloat<M> W = dot(cross(e2,v1+v2),D);
- const vfloat<M> UVW = U+V+W;
- const vfloat<M> eps = float(ulp)*abs(UVW);
-#if defined(EMBREE_BACKFACE_CULLING)
- vbool<M> valid = max(U,V,W) <= eps ;
-#else
- vbool<M> valid = (min(U,V,W) >= -eps) | (max(U,V,W) <= eps);
-#endif
- if (unlikely(none(valid))) return false;
-
- /* calculate geometry normal and denominator */
- const Vec3vf<M> Ng = stable_triangle_normal(e0,e1,e2);
- const vfloat<M> den = twice(dot(Ng,D));
-
- /* perform depth test */
- const vfloat<M> T = twice(dot(v0,Ng));
- const vfloat<M> t = rcp(den)*T;
- valid &= vfloat<M>(ray.tnear()[k]) <= t & t <= vfloat<M>(ray.tfar[k]);
- if (unlikely(none(valid))) return false;
-
- /* avoid division by 0 */
- valid &= den != vfloat<M>(zero);
- if (unlikely(none(valid))) return false;
-
- /* update hit information */
- new (&hit) SubGridQuadHitPlueckerM<M>(valid,U,V,UVW,t,tri_Ng,flags);
- return true;
- }
-
- static __forceinline bool intersect1(RayK<K>& ray,
- size_t k,
- const Vec3vf<M>& v0,
- const Vec3vf<M>& v1,
- const Vec3vf<M>& v2,
- const vbool<M>& flags,
- SubGridQuadHitPlueckerM<M> &hit)
- {
- const Vec3vf<M> e1 = v0-v1;
- const Vec3vf<M> e2 = v2-v0;
- const Vec3vf<M> Ng = cross(e2,e1); // FIXME: optimize!!!
- return intersect1(ray,k,v0,v1,v2,Ng,flags,hit);
- }
-
- };
-
- template<int M, int K, bool filter>
- struct SubGridQuadMIntersectorKPluecker : public SubGridQuadMIntersectorKPlueckerBase<M,K,filter>
- {
- __forceinline SubGridQuadMIntersectorKPluecker(const vbool<K>& valid, const RayK<K>& ray)
- : SubGridQuadMIntersectorKPlueckerBase<M,K,filter>(valid,ray) {}
-
- __forceinline void intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
- {
- Intersect1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
-
- SubGridQuadHitPlueckerM<4> hit;
- if (SubGridQuadMIntersectorKPlueckerBase<4,K,filter>::intersect1(ray,k,v0,v1,v3,vboolf4(false),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- epilog(hit.valid,hit);
- }
-
- if (SubGridQuadMIntersectorKPlueckerBase<4,K,filter>::intersect1(ray,k,v2,v3,v1,vboolf4(true),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- epilog(hit.valid,hit);
- }
-
- }
-
- __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
- {
- Occluded1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
-
- SubGridQuadHitPlueckerM<4> hit;
- if (SubGridQuadMIntersectorKPlueckerBase<4,K,filter>::intersect1(ray,k,v0,v1,v3,vboolf4(false),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- if (epilog(hit.valid,hit)) return true;
- }
-
- if (SubGridQuadMIntersectorKPlueckerBase<4,K,filter>::intersect1(ray,k,v2,v3,v1,vboolf4(true),hit))
- {
- interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
- if (epilog(hit.valid,hit)) return true;
- }
- return false;
- }
- };
-
- }
-}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_any_of.h b/thirdparty/embree/common/algorithms/parallel_any_of.h
index 01f1f80f6c..a64e4a1889 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_any_of.h
+++ b/thirdparty/embree/common/algorithms/parallel_any_of.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_filter.h b/thirdparty/embree/common/algorithms/parallel_filter.h
index 5823fc631f..090ef164c2 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_filter.h
+++ b/thirdparty/embree/common/algorithms/parallel_filter.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_for.h b/thirdparty/embree/common/algorithms/parallel_for.h
index 51d296fb16..645681ac63 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_for.h
+++ b/thirdparty/embree/common/algorithms/parallel_for.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -8,12 +8,6 @@
#include "../math/math.h"
#include "../math/range.h"
-#if defined(TASKING_GCD) && defined(BUILD_IOS)
-#include <dispatch/dispatch.h>
-#include <algorithm>
-#include <type_traits>
-#endif
-
namespace embree
{
/* parallel_for without range */
@@ -29,28 +23,10 @@ namespace embree
if (!TaskScheduler::wait())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
}
-#elif defined(TASKING_GCD) && defined(BUILD_IOS)
-
- const size_t baselineNumBlocks = (TaskScheduler::threadCount() > 1)? TaskScheduler::threadCount() : 1;
- const size_t length = N;
- const size_t blockSize = (length + baselineNumBlocks-1) / baselineNumBlocks;
- const size_t numBlocks = (length + blockSize-1) / blockSize;
-
- dispatch_apply(numBlocks, DISPATCH_APPLY_AUTO, ^(size_t currentBlock) {
-
- const size_t start = (currentBlock * blockSize);
- const size_t blockLength = std::min(length - start, blockSize);
- const size_t end = start + blockLength;
-
- for(size_t i=start; i < end; i++)
- {
- func(i);
- }
- });
-
+
#elif defined(TASKING_TBB)
#if TBB_INTERFACE_VERSION >= 12002
tbb::task_group_context context;
@@ -60,7 +36,7 @@ namespace embree
if (context.is_group_execution_cancelled())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
#else
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
@@ -69,7 +45,7 @@ namespace embree
if (tbb::task::self().is_cancelled())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
#endif
@@ -92,28 +68,9 @@ namespace embree
if (!TaskScheduler::wait())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
-#elif defined(TASKING_GCD) && defined(BUILD_IOS)
-
- const size_t baselineNumBlocks = (TaskScheduler::threadCount() > 1)? 4*TaskScheduler::threadCount() : 1;
- const size_t length = last - first;
- const size_t blockSizeByThreads = (length + baselineNumBlocks-1) / baselineNumBlocks;
- size_t blockSize = std::max<size_t>(minStepSize,blockSizeByThreads);
- blockSize += blockSize % 4;
-
- const size_t numBlocks = (length + blockSize-1) / blockSize;
-
- dispatch_apply(numBlocks, DISPATCH_APPLY_AUTO, ^(size_t currentBlock) {
-
- const size_t start = first + (currentBlock * blockSize);
- const size_t end = std::min<size_t>(last, start + blockSize);
-
- func( embree::range<Index>(start,end) );
- });
-
-
#elif defined(TASKING_TBB)
#if TBB_INTERFACE_VERSION >= 12002
tbb::task_group_context context;
@@ -123,7 +80,7 @@ namespace embree
if (context.is_group_execution_cancelled())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
#else
tbb::parallel_for(tbb::blocked_range<Index>(first,last,minStepSize),[&](const tbb::blocked_range<Index>& r) {
@@ -132,7 +89,7 @@ namespace embree
if (tbb::task::self().is_cancelled())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
#endif
@@ -167,7 +124,7 @@ namespace embree
if (context.is_group_execution_cancelled())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
#else
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
@@ -176,7 +133,7 @@ namespace embree
if (tbb::task::self().is_cancelled())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
#endif
}
@@ -192,10 +149,10 @@ namespace embree
func(i);
},ap,context);
if (context.is_group_execution_cancelled())
- // -- GODOT start --
- // throw std::runtime_error("task cancelled");
- abort();
- // -- GODOT end --
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
+ abort();
+ // -- GODOT end --
#else
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
func(i);
@@ -203,7 +160,7 @@ namespace embree
if (tbb::task::self().is_cancelled())
// -- GODOT start --
// throw std::runtime_error("task cancelled");
- abort();
+ abort();
// -- GODOT end --
#endif
}
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for.h b/thirdparty/embree/common/algorithms/parallel_for_for.h
index 852b8a0900..92c37a4a38 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for.h
+++ b/thirdparty/embree/common/algorithms/parallel_for_for.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for_prefix_sum.h b/thirdparty/embree/common/algorithms/parallel_for_for_prefix_sum.h
index d2671d8a6a..b15b44a991 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_for_for_prefix_sum.h
+++ b/thirdparty/embree/common/algorithms/parallel_for_for_prefix_sum.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_map.h b/thirdparty/embree/common/algorithms/parallel_map.h
index 02e1a8f8d0..15c098fe20 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_map.h
+++ b/thirdparty/embree/common/algorithms/parallel_map.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_partition.h b/thirdparty/embree/common/algorithms/parallel_partition.h
index 3b3ad7c854..a1cbdc8e04 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_partition.h
+++ b/thirdparty/embree/common/algorithms/parallel_partition.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_prefix_sum.h b/thirdparty/embree/common/algorithms/parallel_prefix_sum.h
index 117c7a79b0..208bb4e480 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_prefix_sum.h
+++ b/thirdparty/embree/common/algorithms/parallel_prefix_sum.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.h b/thirdparty/embree/common/algorithms/parallel_reduce.h
index 0daf94e50e..8271372ea4 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.h
+++ b/thirdparty/embree/common/algorithms/parallel_reduce.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -43,7 +43,7 @@ namespace embree
template<typename Index, typename Value, typename Func, typename Reduction>
__forceinline Value parallel_reduce( const Index first, const Index last, const Index minStepSize, const Value& identity, const Func& func, const Reduction& reduction )
{
-#if defined(TASKING_INTERNAL) || (defined(TASKING_GCD) && defined(BUILD_IOS))
+#if defined(TASKING_INTERNAL)
/* fast path for small number of iterations */
Index taskCount = (last-first+minStepSize-1)/minStepSize;
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_set.h b/thirdparty/embree/common/algorithms/parallel_set.h
index 640beba7ec..7eae577457 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_set.h
+++ b/thirdparty/embree/common/algorithms/parallel_set.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_sort.h b/thirdparty/embree/common/algorithms/parallel_sort.h
index a758227c1b..30e56c2bfc 100644
--- a/thirdparty/embree-aarch64/common/algorithms/parallel_sort.h
+++ b/thirdparty/embree/common/algorithms/parallel_sort.h
@@ -1,13 +1,10 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "../simd/simd.h"
#include "parallel_for.h"
-#if defined(TASKING_GCD) && defined(BUILD_IOS)
-#include "../sys/alloc.h"
-#endif
#include <algorithm>
namespace embree
@@ -323,7 +320,7 @@ namespace embree
#pragma nounroll
#endif
for (size_t i=startID; i<endID; i++) {
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
const size_t index = ((size_t)(Key)src[i] >> (size_t)shift) & (size_t)mask;
#else
const Key index = ((Key)src[i] >> shift) & mask;
@@ -385,7 +382,7 @@ namespace embree
#endif
for (size_t i=startID; i<endID; i++) {
const Ty elt = src[i];
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
const size_t index = ((size_t)(Key)src[i] >> (size_t)shift) & (size_t)mask;
#else
const size_t index = ((Key)src[i] >> shift) & mask;
diff --git a/thirdparty/embree-aarch64/common/lexers/parsestream.h b/thirdparty/embree/common/lexers/parsestream.h
index db46dc114f..f65a52cb47 100644
--- a/thirdparty/embree-aarch64/common/lexers/parsestream.h
+++ b/thirdparty/embree/common/lexers/parsestream.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/lexers/stream.h b/thirdparty/embree/common/lexers/stream.h
index 3f75677e68..a40c15f8eb 100644
--- a/thirdparty/embree-aarch64/common/lexers/stream.h
+++ b/thirdparty/embree/common/lexers/stream.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/lexers/streamfilters.h b/thirdparty/embree/common/lexers/streamfilters.h
index 25580a77b8..3592b77b03 100644
--- a/thirdparty/embree-aarch64/common/lexers/streamfilters.h
+++ b/thirdparty/embree/common/lexers/streamfilters.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/lexers/stringstream.cpp b/thirdparty/embree/common/lexers/stringstream.cpp
index 98dc80ad59..a037869506 100644
--- a/thirdparty/embree-aarch64/common/lexers/stringstream.cpp
+++ b/thirdparty/embree/common/lexers/stringstream.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "stringstream.h"
diff --git a/thirdparty/embree-aarch64/common/lexers/stringstream.h b/thirdparty/embree/common/lexers/stringstream.h
index e6dbd4aecc..6d9c27e3cd 100644
--- a/thirdparty/embree-aarch64/common/lexers/stringstream.h
+++ b/thirdparty/embree/common/lexers/stringstream.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/lexers/tokenstream.cpp b/thirdparty/embree/common/lexers/tokenstream.cpp
index d05be65862..6ed6f2045a 100644
--- a/thirdparty/embree-aarch64/common/lexers/tokenstream.cpp
+++ b/thirdparty/embree/common/lexers/tokenstream.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "tokenstream.h"
diff --git a/thirdparty/embree-aarch64/common/lexers/tokenstream.h b/thirdparty/embree/common/lexers/tokenstream.h
index 72a7b4f2f3..6e49dd0b39 100644
--- a/thirdparty/embree-aarch64/common/lexers/tokenstream.h
+++ b/thirdparty/embree/common/lexers/tokenstream.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/affinespace.h b/thirdparty/embree/common/math/affinespace.h
index 32452fbe72..9d4a0f0846 100644
--- a/thirdparty/embree-aarch64/common/math/affinespace.h
+++ b/thirdparty/embree/common/math/affinespace.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/bbox.h b/thirdparty/embree/common/math/bbox.h
index 29bb13912b..bc43155358 100644
--- a/thirdparty/embree-aarch64/common/math/bbox.h
+++ b/thirdparty/embree/common/math/bbox.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -77,7 +77,7 @@ namespace embree
return lower > upper;
}
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined(__SSE__)
template<> __forceinline bool BBox<Vec3fa>::empty() const {
return !all(le_mask(lower,upper));
}
@@ -228,11 +228,11 @@ namespace embree
/// SSE / AVX / MIC specializations
////////////////////////////////////////////////////////////////////////////////
-#if defined (__SSE__) || defined(__ARM_NEON)
+#if defined __SSE__
#include "../simd/sse.h"
#endif
-#if defined (__AVX__)
+#if defined __AVX__
#include "../simd/avx.h"
#endif
diff --git a/thirdparty/embree-aarch64/common/math/col3.h b/thirdparty/embree/common/math/col3.h
index f52015fb88..3f50c04393 100644
--- a/thirdparty/embree-aarch64/common/math/col3.h
+++ b/thirdparty/embree/common/math/col3.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -42,6 +42,6 @@ namespace embree
}
/*! default template instantiations */
- typedef Col3<uint8_t > Col3uc;
+ typedef Col3<unsigned char> Col3uc;
typedef Col3<float > Col3f;
}
diff --git a/thirdparty/embree-aarch64/common/math/col4.h b/thirdparty/embree/common/math/col4.h
index 90df293f8e..788508516b 100644
--- a/thirdparty/embree-aarch64/common/math/col4.h
+++ b/thirdparty/embree/common/math/col4.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -42,6 +42,6 @@ namespace embree
}
/*! default template instantiations */
- typedef Col4<uint8_t > Col4uc;
+ typedef Col4<unsigned char> Col4uc;
typedef Col4<float > Col4f;
}
diff --git a/thirdparty/embree-aarch64/common/math/color.h b/thirdparty/embree/common/math/color.h
index c3083e4fc0..529584ea16 100644
--- a/thirdparty/embree-aarch64/common/math/color.h
+++ b/thirdparty/embree/common/math/color.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -52,17 +52,17 @@ namespace embree
__forceinline void set(Col3uc& d) const
{
vfloat4 s = clamp(vfloat4(m128))*255.0f;
- d.r = (uint8_t)(s[0]);
- d.g = (uint8_t)(s[1]);
- d.b = (uint8_t)(s[2]);
+ d.r = (unsigned char)(s[0]);
+ d.g = (unsigned char)(s[1]);
+ d.b = (unsigned char)(s[2]);
}
__forceinline void set(Col4uc& d) const
{
vfloat4 s = clamp(vfloat4(m128))*255.0f;
- d.r = (uint8_t)(s[0]);
- d.g = (uint8_t)(s[1]);
- d.b = (uint8_t)(s[2]);
- d.a = (uint8_t)(s[3]);
+ d.r = (unsigned char)(s[0]);
+ d.g = (unsigned char)(s[1]);
+ d.b = (unsigned char)(s[2]);
+ d.a = (unsigned char)(s[3]);
}
////////////////////////////////////////////////////////////////////////////////
@@ -114,16 +114,16 @@ namespace embree
__forceinline void set(Col3uc& d) const
{
vfloat4 s = clamp(vfloat4(m128))*255.0f;
- d.r = (uint8_t)(s[0]);
- d.g = (uint8_t)(s[1]);
- d.b = (uint8_t)(s[2]);
+ d.r = (unsigned char)(s[0]);
+ d.g = (unsigned char)(s[1]);
+ d.b = (unsigned char)(s[2]);
}
__forceinline void set(Col4uc& d) const
{
vfloat4 s = clamp(vfloat4(m128))*255.0f;
- d.r = (uint8_t)(s[0]);
- d.g = (uint8_t)(s[1]);
- d.b = (uint8_t)(s[2]);
+ d.r = (unsigned char)(s[0]);
+ d.g = (unsigned char)(s[1]);
+ d.b = (unsigned char)(s[2]);
d.a = 255;
}
@@ -152,37 +152,21 @@ namespace embree
}
__forceinline const Color rcp ( const Color& a )
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- __m128 reciprocal = _mm_rcp_ps(a.m128);
- reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
- reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
- return (const Color)reciprocal;
-#else
#if defined(__AVX512VL__)
const Color r = _mm_rcp14_ps(a.m128);
#else
const Color r = _mm_rcp_ps(a.m128);
#endif
return _mm_sub_ps(_mm_add_ps(r, r), _mm_mul_ps(_mm_mul_ps(r, r), a));
-#endif //defined(__aarch64__) && defined(BUILD_IOS)
}
__forceinline const Color rsqrt( const Color& a )
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- __m128 r = _mm_rsqrt_ps(a.m128);
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
- return r;
-#else
-
#if defined(__AVX512VL__)
__m128 r = _mm_rsqrt14_ps(a.m128);
#else
__m128 r = _mm_rsqrt_ps(a.m128);
#endif
return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
-
-#endif //defined(__aarch64__) && defined(BUILD_IOS)
}
__forceinline const Color sqrt ( const Color& a ) { return _mm_sqrt_ps(a.m128); }
diff --git a/thirdparty/embree/common/math/constants.cpp b/thirdparty/embree/common/math/constants.cpp
new file mode 100644
index 0000000000..03919ae20c
--- /dev/null
+++ b/thirdparty/embree/common/math/constants.cpp
@@ -0,0 +1,27 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#include "constants.h"
+
+namespace embree
+{
+ TrueTy True;
+ FalseTy False;
+ ZeroTy zero;
+ OneTy one;
+ NegInfTy neg_inf;
+ PosInfTy inf;
+ PosInfTy pos_inf;
+ NaNTy nan;
+ UlpTy ulp;
+ PiTy pi;
+ OneOverPiTy one_over_pi;
+ TwoPiTy two_pi;
+ OneOverTwoPiTy one_over_two_pi;
+ FourPiTy four_pi;
+ OneOverFourPiTy one_over_four_pi;
+ StepTy step;
+ ReverseStepTy reverse_step;
+ EmptyTy empty;
+ UndefinedTy undefined;
+}
diff --git a/thirdparty/embree-aarch64/common/math/constants.h b/thirdparty/embree/common/math/constants.h
index e80abec80f..578473a8ab 100644
--- a/thirdparty/embree-aarch64/common/math/constants.h
+++ b/thirdparty/embree/common/math/constants.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -12,19 +12,6 @@
#include <cfloat>
#include <climits>
-// Math constants may not be defined in libcxx + mingw + strict C++ standard
-#if defined(__MINGW32__)
-
-// TODO(LTE): use constexpr
-#ifndef M_PI
-#define M_PI 3.14159265358979323846
-#endif
-#ifndef M_1_PI
-#define M_1_PI 0.31830988618379067154
-#endif
-
-#endif // __MINGW32__
-
namespace embree
{
static MAYBE_UNUSED const float one_over_255 = 1.0f/255.0f;
@@ -57,8 +44,8 @@ namespace embree
__forceinline operator unsigned int ( ) const { return 0; }
__forceinline operator short ( ) const { return 0; }
__forceinline operator unsigned short ( ) const { return 0; }
- __forceinline operator int8_t ( ) const { return 0; }
- __forceinline operator uint8_t ( ) const { return 0; }
+ __forceinline operator char ( ) const { return 0; }
+ __forceinline operator unsigned char ( ) const { return 0; }
};
extern MAYBE_UNUSED ZeroTy zero;
@@ -75,8 +62,8 @@ namespace embree
__forceinline operator unsigned int ( ) const { return 1; }
__forceinline operator short ( ) const { return 1; }
__forceinline operator unsigned short ( ) const { return 1; }
- __forceinline operator int8_t ( ) const { return 1; }
- __forceinline operator uint8_t ( ) const { return 1; }
+ __forceinline operator char ( ) const { return 1; }
+ __forceinline operator unsigned char ( ) const { return 1; }
};
extern MAYBE_UNUSED OneTy one;
@@ -93,8 +80,8 @@ namespace embree
__forceinline operator unsigned int ( ) const { return std::numeric_limits<unsigned int>::min(); }
__forceinline operator short ( ) const { return std::numeric_limits<short>::min(); }
__forceinline operator unsigned short ( ) const { return std::numeric_limits<unsigned short>::min(); }
- __forceinline operator int8_t ( ) const { return std::numeric_limits<int8_t>::min(); }
- __forceinline operator uint8_t ( ) const { return std::numeric_limits<uint8_t>::min(); }
+ __forceinline operator char ( ) const { return std::numeric_limits<char>::min(); }
+ __forceinline operator unsigned char ( ) const { return std::numeric_limits<unsigned char>::min(); }
};
@@ -112,8 +99,8 @@ namespace embree
__forceinline operator unsigned int ( ) const { return std::numeric_limits<unsigned int>::max(); }
__forceinline operator short ( ) const { return std::numeric_limits<short>::max(); }
__forceinline operator unsigned short ( ) const { return std::numeric_limits<unsigned short>::max(); }
- __forceinline operator int8_t ( ) const { return std::numeric_limits<int8_t>::max(); }
- __forceinline operator uint8_t ( ) const { return std::numeric_limits<uint8_t>::max(); }
+ __forceinline operator char ( ) const { return std::numeric_limits<char>::max(); }
+ __forceinline operator unsigned char ( ) const { return std::numeric_limits<unsigned char>::max(); }
};
extern MAYBE_UNUSED PosInfTy inf;
@@ -207,33 +194,4 @@ namespace embree
};
extern MAYBE_UNUSED UndefinedTy undefined;
-
-#if defined(__aarch64__)
- extern const uint32x4_t movemask_mask;
- extern const uint32x4_t vzero;
- extern const uint32x4_t v0x80000000;
- extern const uint32x4_t v0x7fffffff;
- extern const uint32x4_t v000F;
- extern const uint32x4_t v00F0;
- extern const uint32x4_t v00FF;
- extern const uint32x4_t v0F00;
- extern const uint32x4_t v0F0F;
- extern const uint32x4_t v0FF0;
- extern const uint32x4_t v0FFF;
- extern const uint32x4_t vF000;
- extern const uint32x4_t vF00F;
- extern const uint32x4_t vF0F0;
- extern const uint32x4_t vF0FF;
- extern const uint32x4_t vFF00;
- extern const uint32x4_t vFF0F;
- extern const uint32x4_t vFFF0;
- extern const uint32x4_t vFFFF;
- extern const uint8x16_t v0022;
- extern const uint8x16_t v1133;
- extern const uint8x16_t v0101;
- extern const float32x4_t vOne;
- extern const float32x4_t vmOne;
- extern const float32x4_t vInf;
- extern const float32x4_t vmInf;
-#endif
}
diff --git a/thirdparty/embree-aarch64/common/math/interval.h b/thirdparty/embree/common/math/interval.h
index f06478e881..310add2129 100644
--- a/thirdparty/embree-aarch64/common/math/interval.h
+++ b/thirdparty/embree/common/math/interval.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/lbbox.h b/thirdparty/embree/common/math/lbbox.h
index 95df4a918d..2b397a05c8 100644
--- a/thirdparty/embree-aarch64/common/math/lbbox.h
+++ b/thirdparty/embree/common/math/lbbox.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/linearspace2.h b/thirdparty/embree/common/math/linearspace2.h
index b9a382962c..184ee695fb 100644
--- a/thirdparty/embree-aarch64/common/math/linearspace2.h
+++ b/thirdparty/embree/common/math/linearspace2.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/linearspace3.h b/thirdparty/embree/common/math/linearspace3.h
index 12b5bb776b..9eaa2cc2bb 100644
--- a/thirdparty/embree-aarch64/common/math/linearspace3.h
+++ b/thirdparty/embree/common/math/linearspace3.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/math.h b/thirdparty/embree/common/math/math.h
index 6d54abd44d..4bc54c1a6a 100644
--- a/thirdparty/embree-aarch64/common/math/math.h
+++ b/thirdparty/embree/common/math/math.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -9,18 +9,15 @@
#include <cmath>
#if defined(__ARM_NEON)
-#include "SSE2NEON.h"
-#if defined(NEON_AVX2_EMULATION)
-#include "AVX2NEON.h"
-#endif
+#include "../simd/arm/emulation.h"
#else
#include <emmintrin.h>
#include <xmmintrin.h>
#include <immintrin.h>
#endif
-#if defined(__WIN32__) && !defined(__MINGW32__)
-#if (__MSV_VER <= 1700)
+#if defined(__WIN32__)
+#if defined(_MSC_VER) && (_MSC_VER <= 1700)
namespace std
{
__forceinline bool isinf ( const float x ) { return _finite(x) == 0; }
@@ -47,7 +44,7 @@ namespace embree
__forceinline int toInt (const float& a) { return int(a); }
__forceinline float toFloat(const int& a) { return float(a); }
-#if defined(__WIN32__) && !defined(__MINGW32__)
+#if defined(__WIN32__)
__forceinline bool finite ( const float x ) { return _finite(x) != 0; }
#endif
@@ -56,16 +53,6 @@ namespace embree
__forceinline float rcp ( const float x )
{
-#if defined(__aarch64__)
- // Move scalar to vector register and do rcp.
- __m128 a;
- a[0] = x;
- float32x4_t reciprocal = vrecpeq_f32(a);
- reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
- reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
- return reciprocal[0];
-#else
-
const __m128 a = _mm_set_ss(x);
#if defined(__AVX512VL__)
@@ -79,74 +66,33 @@ namespace embree
#else
return _mm_cvtss_f32(_mm_mul_ss(r,_mm_sub_ss(_mm_set_ss(2.0f), _mm_mul_ss(r, a))));
#endif
-
-#endif //defined(__aarch64__)
}
__forceinline float signmsk ( const float x ) {
-#if defined(__aarch64__)
- // FP and Neon shares same vector register in arm64
- __m128 a;
- __m128i b;
- a[0] = x;
- b[0] = 0x80000000;
- a = _mm_and_ps(a, vreinterpretq_f32_s32(b));
- return a[0];
-#else
return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(0x80000000))));
-#endif
}
__forceinline float xorf( const float x, const float y ) {
-#if defined(__aarch64__)
- // FP and Neon shares same vector register in arm64
- __m128 a;
- __m128 b;
- a[0] = x;
- b[0] = y;
- a = _mm_xor_ps(a, b);
- return a[0];
-#else
return _mm_cvtss_f32(_mm_xor_ps(_mm_set_ss(x),_mm_set_ss(y)));
-#endif
}
__forceinline float andf( const float x, const unsigned y ) {
-#if defined(__aarch64__)
- // FP and Neon shares same vector register in arm64
- __m128 a;
- __m128i b;
- a[0] = x;
- b[0] = y;
- a = _mm_and_ps(a, vreinterpretq_f32_s32(b));
- return a[0];
-#else
return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(y))));
-#endif
}
__forceinline float rsqrt( const float x )
{
-#if defined(__aarch64__)
- // FP and Neon shares same vector register in arm64
- __m128 a;
- a[0] = x;
- __m128 value = _mm_rsqrt_ps(a);
- value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value));
- value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value));
- return value[0];
-#else
-
const __m128 a = _mm_set_ss(x);
#if defined(__AVX512VL__)
- const __m128 r = _mm_rsqrt14_ss(_mm_set_ss(0.0f),a);
+ __m128 r = _mm_rsqrt14_ss(_mm_set_ss(0.0f),a);
#else
- const __m128 r = _mm_rsqrt_ss(a);
+ __m128 r = _mm_rsqrt_ss(a);
#endif
- const __m128 c = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r),
- _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r)));
- return _mm_cvtss_f32(c);
+ r = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r), _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r)));
+#if defined(__ARM_NEON)
+ r = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r), _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r)));
#endif
+ return _mm_cvtss_f32(r);
}
-#if defined(__WIN32__) && (__MSC_VER <= 1700) && !defined(__MINGW32__)
+#if defined(__WIN32__) && defined(_MSC_VER) && (_MSC_VER <= 1700)
__forceinline float nextafter(float x, float y) { if ((x<y) == (x>0)) return x*(1.1f+float(ulp)); else return x*(0.9f-float(ulp)); }
__forceinline double nextafter(double x, double y) { return _nextafter(x, y); }
__forceinline int roundf(float f) { return (int)(f + 0.5f); }
@@ -200,17 +146,7 @@ namespace embree
__forceinline double floor( const double x ) { return ::floor (x); }
__forceinline double ceil ( const double x ) { return ::ceil (x); }
-#if defined(__aarch64__)
- __forceinline float mini(float a, float b) {
- // FP and Neon shares same vector register in arm64
- __m128 x;
- __m128 y;
- x[0] = a;
- y[0] = b;
- x = _mm_min_ps(x, y);
- return x[0];
- }
-#elif defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline float mini(float a, float b) {
const __m128i ai = _mm_castps_si128(_mm_set_ss(a));
const __m128i bi = _mm_castps_si128(_mm_set_ss(b));
@@ -219,17 +155,7 @@ namespace embree
}
#endif
-#if defined(__aarch64__)
- __forceinline float maxi(float a, float b) {
- // FP and Neon shares same vector register in arm64
- __m128 x;
- __m128 y;
- x[0] = a;
- y[0] = b;
- x = _mm_max_ps(x, y);
- return x[0];
- }
-#elif defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline float maxi(float a, float b) {
const __m128i ai = _mm_castps_si128(_mm_set_ss(a));
const __m128i bi = _mm_castps_si128(_mm_set_ss(b));
@@ -246,7 +172,7 @@ namespace embree
__forceinline int64_t min(int64_t a, int64_t b) { return a<b ? a:b; }
__forceinline float min(float a, float b) { return a<b ? a:b; }
__forceinline double min(double a, double b) { return a<b ? a:b; }
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
__forceinline size_t min(size_t a, size_t b) { return a<b ? a:b; }
#endif
@@ -263,7 +189,7 @@ namespace embree
__forceinline int64_t max(int64_t a, int64_t b) { return a<b ? b:a; }
__forceinline float max(float a, float b) { return a<b ? b:a; }
__forceinline double max(double a, double b) { return a<b ? b:a; }
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
__forceinline size_t max(size_t a, size_t b) { return a<b ? b:a; }
#endif
@@ -305,16 +231,6 @@ namespace embree
__forceinline float msub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
__forceinline float nmadd ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmadd_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
__forceinline float nmsub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
-#elif defined (__aarch64__) && defined(__clang__)
-#pragma clang fp contract(fast)
-
-
-__forceinline float madd ( const float a, const float b, const float c) { return a*b + c; }
-__forceinline float msub ( const float a, const float b, const float c) { return a*b - c; }
-__forceinline float nmadd ( const float a, const float b, const float c) { return c - a*b; }
-__forceinline float nmsub ( const float a, const float b, const float c) { return -(c + a*b); }
-
-#pragma clang fp contract(on)
#else
__forceinline float madd ( const float a, const float b, const float c) { return a*b+c; }
__forceinline float msub ( const float a, const float b, const float c) { return a*b-c; }
@@ -363,15 +279,17 @@ __forceinline float nmsub ( const float a, const float b, const float c) { retur
/*! exchange */
template<typename T> __forceinline void xchg ( T& a, T& b ) { const T tmp = a; a = b; b = tmp; }
-
- template<typename T> __forceinline T prod_diff(const T& a,const T& b,const T& c,const T& d) {
-#if 1//!defined(__aarch64__)
- return msub(a,b,c*d);
-#else
- return nmadd(c,d,a*b);
-#endif
- }
-
+ /* load/store */
+ template<typename Ty> struct mem;
+
+ template<> struct mem<float> {
+ static __forceinline float load (bool mask, const void* ptr) { return mask ? *(float*)ptr : 0.0f; }
+ static __forceinline float loadu(bool mask, const void* ptr) { return mask ? *(float*)ptr : 0.0f; }
+
+ static __forceinline void store (bool mask, void* ptr, const float v) { if (mask) *(float*)ptr = v; }
+ static __forceinline void storeu(bool mask, void* ptr, const float v) { if (mask) *(float*)ptr = v; }
+ };
+
/*! bit reverse operation */
template<class T>
__forceinline T bitReverse(const T& vin)
@@ -389,7 +307,7 @@ __forceinline float nmsub ( const float a, const float b, const float c) { retur
template<class T>
__forceinline T bitInterleave(const T& xin, const T& yin, const T& zin)
{
- T x = xin, y = yin, z = zin;
+ T x = xin, y = yin, z = zin;
x = (x | (x << 16)) & 0x030000FF;
x = (x | (x << 8)) & 0x0300F00F;
x = (x | (x << 4)) & 0x030C30C3;
@@ -408,7 +326,7 @@ __forceinline float nmsub ( const float a, const float b, const float c) { retur
return x | (y << 1) | (z << 2);
}
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
template<>
__forceinline unsigned int bitInterleave(const unsigned int &xi, const unsigned int& yi, const unsigned int& zi)
diff --git a/thirdparty/embree-aarch64/common/math/obbox.h b/thirdparty/embree/common/math/obbox.h
index 032b56904e..2fe8bbf071 100644
--- a/thirdparty/embree-aarch64/common/math/obbox.h
+++ b/thirdparty/embree/common/math/obbox.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/quaternion.h b/thirdparty/embree/common/math/quaternion.h
index 20c69bc62f..080800efcd 100644
--- a/thirdparty/embree-aarch64/common/math/quaternion.h
+++ b/thirdparty/embree/common/math/quaternion.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/range.h b/thirdparty/embree/common/math/range.h
index 762d9cd9ea..909fadb995 100644
--- a/thirdparty/embree-aarch64/common/math/range.h
+++ b/thirdparty/embree/common/math/range.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/transcendental.h b/thirdparty/embree/common/math/transcendental.h
index 6855d82b53..fd16c26e81 100644
--- a/thirdparty/embree-aarch64/common/math/transcendental.h
+++ b/thirdparty/embree/common/math/transcendental.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -418,7 +418,7 @@ __forceinline void __rangeReduceLog(const T &input,
}
template <typename T> struct ExponentType { };
-template <int N> struct ExponentType<vfloat<N>> { typedef vint<N> Ty; };
+template <int N> struct ExponentType<vfloat_impl<N>> { typedef vint<N> Ty; };
template <> struct ExponentType<float> { typedef int Ty; };
template <typename T>
diff --git a/thirdparty/embree-aarch64/common/math/vec2.h b/thirdparty/embree/common/math/vec2.h
index a619459e9c..d62aef51f3 100644
--- a/thirdparty/embree-aarch64/common/math/vec2.h
+++ b/thirdparty/embree/common/math/vec2.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -205,11 +205,11 @@ namespace embree
#include "vec2fa.h"
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined __SSE__
#include "../simd/sse.h"
#endif
-#if defined(__AVX__)
+#if defined __AVX__
#include "../simd/avx.h"
#endif
@@ -221,7 +221,7 @@ namespace embree
{
template<> __forceinline Vec2<float>::Vec2(const Vec2fa& a) : x(a.x), y(a.y) {}
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined(__SSE__)
template<> __forceinline Vec2<vfloat4>::Vec2(const Vec2fa& a) : x(a.x), y(a.y) {}
#endif
diff --git a/thirdparty/embree-aarch64/common/math/vec2fa.h b/thirdparty/embree/common/math/vec2fa.h
index 451ecd556c..a51fb68fd0 100644
--- a/thirdparty/embree-aarch64/common/math/vec2fa.h
+++ b/thirdparty/embree/common/math/vec2fa.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -97,12 +97,6 @@ namespace embree
__forceinline Vec2fa rcp ( const Vec2fa& a )
{
-#if defined(__aarch64__)
- __m128 reciprocal = _mm_rcp_ps(a.m128);
- reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
- reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
- return (const Vec2fa)reciprocal;
-#else
#if defined(__AVX512VL__)
const Vec2fa r = _mm_rcp14_ps(a.m128);
#else
@@ -117,7 +111,6 @@ namespace embree
#endif
return res;
-#endif //defined(__aarch64__)
}
__forceinline Vec2fa sqrt ( const Vec2fa& a ) { return _mm_sqrt_ps(a.m128); }
@@ -125,21 +118,12 @@ namespace embree
__forceinline Vec2fa rsqrt( const Vec2fa& a )
{
-#if defined(__aarch64__)
- __m128 r = _mm_rsqrt_ps(a.m128);
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
- return r;
-#else
-
#if defined(__AVX512VL__)
__m128 r = _mm_rsqrt14_ps(a.m128);
#else
__m128 r = _mm_rsqrt_ps(a.m128);
#endif
return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
-
-#endif
}
__forceinline Vec2fa zero_fix(const Vec2fa& a) {
@@ -172,7 +156,7 @@ namespace embree
__forceinline Vec2fa min( const Vec2fa& a, const Vec2fa& b ) { return _mm_min_ps(a.m128,b.m128); }
__forceinline Vec2fa max( const Vec2fa& a, const Vec2fa& b ) { return _mm_max_ps(a.m128,b.m128); }
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline Vec2fa mini(const Vec2fa& a, const Vec2fa& b) {
const vint4 ai = _mm_castps_si128(a);
const vint4 bi = _mm_castps_si128(b);
@@ -181,7 +165,7 @@ namespace embree
}
#endif
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline Vec2fa maxi(const Vec2fa& a, const Vec2fa& b) {
const vint4 ai = _mm_castps_si128(a);
const vint4 bi = _mm_castps_si128(b);
@@ -292,9 +276,9 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
#if defined(__aarch64__)
-__forceinline Vec2fa floor(const Vec2fa& a) { return vrndmq_f32(a); }
-__forceinline Vec2fa ceil (const Vec2fa& a) { return vrndpq_f32(a); }
-//__forceinline Vec2fa trunc(const Vec2fa& a) { return vrndq_f32(a); }
+ //__forceinline Vec2fa trunc(const Vec2fa& a) { return vrndq_f32(a); }
+ __forceinline Vec2fa floor(const Vec2fa& a) { return vrndmq_f32(a); }
+ __forceinline Vec2fa ceil (const Vec2fa& a) { return vrndpq_f32(a); }
#elif defined (__SSE4_1__)
//__forceinline Vec2fa trunc( const Vec2fa& a ) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
__forceinline Vec2fa floor( const Vec2fa& a ) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
diff --git a/thirdparty/embree-aarch64/common/math/vec3.h b/thirdparty/embree/common/math/vec3.h
index 1870321715..ce94eff327 100644
--- a/thirdparty/embree-aarch64/common/math/vec3.h
+++ b/thirdparty/embree/common/math/vec3.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -206,7 +206,8 @@ namespace embree
template<typename T> __forceinline T rcp_length( const Vec3<T>& a ) { return rsqrt(sqr(a)); }
template<typename T> __forceinline Vec3<T> normalize( const Vec3<T>& a ) { return a*rsqrt(sqr(a)); }
template<typename T> __forceinline T distance ( const Vec3<T>& a, const Vec3<T>& b ) { return length(a-b); }
- template<typename T> __forceinline Vec3<T> cross ( const Vec3<T>& a, const Vec3<T>& b ) { return Vec3<T>(prod_diff(a.y,b.z,a.z,b.y), prod_diff(a.z,b.x,a.x,b.z), prod_diff(a.x,b.y,a.y,b.x)); }
+ template<typename T> __forceinline Vec3<T> cross ( const Vec3<T>& a, const Vec3<T>& b ) { return Vec3<T>(msub(a.y,b.z,a.z*b.y), msub(a.z,b.x,a.x*b.z), msub(a.x,b.y,a.y*b.x)); }
+
template<typename T> __forceinline Vec3<T> stable_triangle_normal( const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c )
{
const T ab_x = a.z*b.y, ab_y = a.x*b.z, ab_z = a.y*b.x;
@@ -265,11 +266,11 @@ namespace embree
/// SSE / AVX / MIC specializations
////////////////////////////////////////////////////////////////////////////////
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined __SSE__
#include "../simd/sse.h"
#endif
-#if defined(__AVX__)
+#if defined __AVX__
#include "../simd/avx.h"
#endif
@@ -290,18 +291,14 @@ namespace embree
template<> __forceinline Vec3<vfloat4>::Vec3(const Vec3fa& a) {
x = a.x; y = a.y; z = a.z;
}
-#elif defined(__SSE__) || defined(__ARM_NEON)
+#elif defined(__SSE__)
template<>
__forceinline Vec3<vfloat4>::Vec3(const Vec3fa& a) {
const vfloat4 v = vfloat4(a.m128); x = shuffle<0,0,0,0>(v); y = shuffle<1,1,1,1>(v); z = shuffle<2,2,2,2>(v);
}
#endif
-#if defined(__SSE__) || defined(__ARM_NEON)
- __forceinline Vec3<vfloat4> broadcast4f(const Vec3<vfloat4>& a, const size_t k) {
- return Vec3<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]));
- }
-
+#if defined(__SSE__)
template<>
__forceinline Vec3<vfloat4> broadcast<vfloat4,vfloat4>(const Vec3<vfloat4>& a, const size_t k) {
return Vec3<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]));
@@ -318,15 +315,6 @@ namespace embree
__forceinline Vec3<vfloat8>::Vec3(const Vec3fa& a) {
x = a.x; y = a.y; z = a.z;
}
- __forceinline Vec3<vfloat4> broadcast4f(const Vec3<vfloat8>& a, const size_t k) {
- return Vec3<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]));
- }
- __forceinline Vec3<vfloat8> broadcast8f(const Vec3<vfloat4>& a, const size_t k) {
- return Vec3<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]));
- }
- __forceinline Vec3<vfloat8> broadcast8f(const Vec3<vfloat8>& a, const size_t k) {
- return Vec3<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]));
- }
template<>
__forceinline Vec3<vfloat8> broadcast<vfloat8,vfloat4>(const Vec3<vfloat4>& a, const size_t k) {
diff --git a/thirdparty/embree-aarch64/common/math/vec3ba.h b/thirdparty/embree/common/math/vec3ba.h
index 90f31739c2..a021b522dc 100644
--- a/thirdparty/embree-aarch64/common/math/vec3ba.h
+++ b/thirdparty/embree/common/math/vec3ba.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/math/vec3fa.h b/thirdparty/embree/common/math/vec3fa.h
index 6163cfb596..586039741d 100644
--- a/thirdparty/embree-aarch64/common/math/vec3fa.h
+++ b/thirdparty/embree/common/math/vec3fa.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -55,13 +55,7 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
static __forceinline Vec3fa load( const void* const a ) {
-#if defined(__aarch64__)
- __m128 t = _mm_load_ps((float*)a);
- t[3] = 0.0f;
- return Vec3fa(t);
-#else
return Vec3fa(_mm_and_ps(_mm_load_ps((float*)a),_mm_castsi128_ps(_mm_set_epi32(0, -1, -1, -1))));
-#endif
}
static __forceinline Vec3fa loadu( const void* const a ) {
@@ -95,42 +89,19 @@ namespace embree
__forceinline Vec3fa operator +( const Vec3fa& a ) { return a; }
__forceinline Vec3fa operator -( const Vec3fa& a ) {
-#if defined(__aarch64__)
- return vnegq_f32(a.m128);
-#else
const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
-
return _mm_xor_ps(a.m128, mask);
-#endif
}
__forceinline Vec3fa abs ( const Vec3fa& a ) {
-#if defined(__aarch64__)
- return _mm_abs_ps(a.m128);
-#else
const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff));
return _mm_and_ps(a.m128, mask);
-#endif
}
__forceinline Vec3fa sign ( const Vec3fa& a ) {
-#if defined(__aarch64__)
- Vec3fa r = blendv_ps(vOne, vmOne, _mm_cmplt_ps (a.m128,vdupq_n_f32(0.0f)));
- return r;
-#else
return blendv_ps(Vec3fa(one).m128, (-Vec3fa(one)).m128, _mm_cmplt_ps (a.m128,Vec3fa(zero).m128));
-#endif
}
__forceinline Vec3fa rcp ( const Vec3fa& a )
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- return vdivq_f32(vdupq_n_f32(1.0f),a.m128);
-#elif defined(__aarch64__)
- __m128 reciprocal = _mm_rcp_ps(a.m128);
- reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
- reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
- return (const Vec3fa)reciprocal;
-#else
-
#if defined(__AVX512VL__)
const Vec3fa r = _mm_rcp14_ps(a.m128);
#else
@@ -145,7 +116,6 @@ namespace embree
#endif
return res;
-#endif //defined(__aarch64__)
}
__forceinline Vec3fa sqrt ( const Vec3fa& a ) { return _mm_sqrt_ps(a.m128); }
@@ -153,20 +123,12 @@ namespace embree
__forceinline Vec3fa rsqrt( const Vec3fa& a )
{
-#if defined(__aarch64__)
- __m128 r = _mm_rsqrt_ps(a.m128);
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
- return r;
-#else
-
#if defined(__AVX512VL__)
__m128 r = _mm_rsqrt14_ps(a.m128);
#else
__m128 r = _mm_rsqrt_ps(a.m128);
#endif
return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a.m128, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
-#endif
}
__forceinline Vec3fa zero_fix(const Vec3fa& a) {
@@ -199,7 +161,7 @@ namespace embree
__forceinline Vec3fa min( const Vec3fa& a, const Vec3fa& b ) { return _mm_min_ps(a.m128,b.m128); }
__forceinline Vec3fa max( const Vec3fa& a, const Vec3fa& b ) { return _mm_max_ps(a.m128,b.m128); }
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline Vec3fa mini(const Vec3fa& a, const Vec3fa& b) {
const vint4 ai = _mm_castps_si128(a.m128);
const vint4 bi = _mm_castps_si128(b.m128);
@@ -208,7 +170,7 @@ namespace embree
}
#endif
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline Vec3fa maxi(const Vec3fa& a, const Vec3fa& b) {
const vint4 ai = _mm_castps_si128(a.m128);
const vint4 bi = _mm_castps_si128(b.m128);
@@ -231,29 +193,10 @@ namespace embree
__forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fnmadd_ps(a.m128,b.m128,c.m128); }
__forceinline Vec3fa nmsub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fnmsub_ps(a.m128,b.m128,c.m128); }
#else
-
-#if defined(__aarch64__)
- __forceinline Vec3fa madd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
- return _mm_madd_ps(a.m128, b.m128, c.m128); //a*b+c;
- }
- __forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
- return _mm_msub_ps(a.m128, b.m128, c.m128); //-a*b+c;
- }
- __forceinline Vec3fa nmsub( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
- Vec3fa t = _mm_madd_ps(a.m128, b.m128, c.m128);
- return -t;
- }
- __forceinline Vec3fa msub( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
- return _mm_madd_ps(a.m128,b.m128,vnegq_f32(c.m128)); //a*b-c
- }
-
-#else
__forceinline Vec3fa madd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b+c; }
+ __forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b-c; }
__forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return -a*b+c;}
__forceinline Vec3fa nmsub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return -a*b-c; }
- __forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b-c; }
-#endif
-
#endif
__forceinline Vec3fa madd ( const float a, const Vec3fa& b, const Vec3fa& c) { return madd(Vec3fa(a),b,c); }
@@ -275,37 +218,18 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
-#if defined(__aarch64__) && defined(BUILD_IOS)
- __forceinline float reduce_add(const Vec3fa& v) {
- float32x4_t t = v.m128;
- t[3] = 0.0f;
- return vaddvq_f32(t);
- }
-
- __forceinline float reduce_mul(const Vec3fa& v) { return v.x*v.y*v.z; }
- __forceinline float reduce_min(const Vec3fa& v) {
- float32x4_t t = v.m128;
- t[3] = t[2];
- return vminvq_f32(t);
- }
- __forceinline float reduce_max(const Vec3fa& v) {
- float32x4_t t = v.m128;
- t[3] = t[2];
- return vmaxvq_f32(t);
- }
-#else
- __forceinline float reduce_add(const Vec3fa& v) {
+
+ __forceinline float reduce_add(const Vec3fa& v) {
const vfloat4 a(v.m128);
const vfloat4 b = shuffle<1>(a);
const vfloat4 c = shuffle<2>(a);
- return _mm_cvtss_f32(a+b+c);
+ return _mm_cvtss_f32(a+b+c);
}
__forceinline float reduce_mul(const Vec3fa& v) { return v.x*v.y*v.z; }
__forceinline float reduce_min(const Vec3fa& v) { return min(v.x,v.y,v.z); }
__forceinline float reduce_max(const Vec3fa& v) { return max(v.x,v.y,v.z); }
-#endif
-
+
////////////////////////////////////////////////////////////////////////////////
/// Comparison Operators
////////////////////////////////////////////////////////////////////////////////
@@ -317,13 +241,8 @@ namespace embree
__forceinline Vec3ba neq_mask(const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpneq_ps(a.m128, b.m128); }
__forceinline Vec3ba lt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmplt_ps (a.m128, b.m128); }
__forceinline Vec3ba le_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmple_ps (a.m128, b.m128); }
- #if defined(__aarch64__)
- __forceinline Vec3ba gt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpgt_ps (a.m128, b.m128); }
- __forceinline Vec3ba ge_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpge_ps (a.m128, b.m128); }
-#else
- __forceinline Vec3ba gt_mask(const Vec3fa& a, const Vec3fa& b) { return _mm_cmpnle_ps(a.m128, b.m128); }
- __forceinline Vec3ba ge_mask(const Vec3fa& a, const Vec3fa& b) { return _mm_cmpnlt_ps(a.m128, b.m128); }
-#endif
+ __forceinline Vec3ba gt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpnle_ps(a.m128, b.m128); }
+ __forceinline Vec3ba ge_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpnlt_ps(a.m128, b.m128); }
__forceinline bool isvalid ( const Vec3fa& v ) {
return all(gt_mask(v,Vec3fa(-FLT_LARGE)) & lt_mask(v,Vec3fa(+FLT_LARGE)));
@@ -361,7 +280,7 @@ namespace embree
vfloat4 b0 = shuffle<1,2,0,3>(vfloat4(b.m128));
vfloat4 a1 = shuffle<1,2,0,3>(vfloat4(a.m128));
vfloat4 b1 = vfloat4(b.m128);
- return Vec3fa(shuffle<1,2,0,3>(prod_diff(a0,b0,a1,b1)));
+ return Vec3fa(shuffle<1,2,0,3>(msub(a0,b0,a1*b1)));
}
__forceinline float sqr_length ( const Vec3fa& a ) { return dot(a,a); }
@@ -416,11 +335,7 @@ namespace embree
/// Rounding Functions
////////////////////////////////////////////////////////////////////////////////
-#if defined(__aarch64__)
- __forceinline Vec3fa floor(const Vec3fa& a) { return vrndmq_f32(a.m128); }
- __forceinline Vec3fa ceil (const Vec3fa& a) { return vrndpq_f32(a.m128); }
- __forceinline Vec3fa trunc(const Vec3fa& a) { return vrndq_f32(a.m128); }
-#elif defined (__SSE4_1__)
+#if defined (__SSE4_1__)
__forceinline Vec3fa trunc( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEAREST_INT); }
__forceinline Vec3fa floor( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEG_INF ); }
__forceinline Vec3fa ceil ( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_POS_INF ); }
@@ -478,10 +393,8 @@ namespace embree
__forceinline Vec3fx( const Vec3fa& other, const int a1) { m128 = other.m128; a = a1; }
__forceinline Vec3fx( const Vec3fa& other, const unsigned a1) { m128 = other.m128; u = a1; }
- __forceinline Vec3fx( const Vec3fa& other, const float w1) {
-#if defined (__aarch64__)
- m128 = other.m128; m128[3] = w1;
-#elif defined (__SSE4_1__)
+ __forceinline Vec3fx( const Vec3fa& other, const float w1) {
+#if defined (__SSE4_1__)
m128 = _mm_insert_ps(other.m128, _mm_set_ss(w1),3 << 4);
#else
const vint4 mask(-1,-1,-1,0);
@@ -613,7 +526,7 @@ namespace embree
__forceinline Vec3fx min( const Vec3fx& a, const Vec3fx& b ) { return _mm_min_ps(a.m128,b.m128); }
__forceinline Vec3fx max( const Vec3fx& a, const Vec3fx& b ) { return _mm_max_ps(a.m128,b.m128); }
-#if defined(__SSE4_1__) || defined(__aarch64__)
+#if defined(__SSE4_1__)
__forceinline Vec3fx mini(const Vec3fx& a, const Vec3fx& b) {
const vint4 ai = _mm_castps_si128(a.m128);
const vint4 bi = _mm_castps_si128(b.m128);
@@ -622,7 +535,7 @@ namespace embree
}
#endif
-#if defined(__SSE4_1__) || defined(__aarch64__)
+#if defined(__SSE4_1__)
__forceinline Vec3fx maxi(const Vec3fx& a, const Vec3fx& b) {
const vint4 ai = _mm_castps_si128(a.m128);
const vint4 bi = _mm_castps_si128(b.m128);
@@ -671,11 +584,11 @@ namespace embree
/// Reductions
////////////////////////////////////////////////////////////////////////////////
- __forceinline float reduce_add(const Vec3fx& v) {
+ __forceinline float reduce_add(const Vec3fx& v) {
const vfloat4 a(v.m128);
const vfloat4 b = shuffle<1>(a);
const vfloat4 c = shuffle<2>(a);
- return _mm_cvtss_f32(a+b+c);
+ return _mm_cvtss_f32(a+b+c);
}
__forceinline float reduce_mul(const Vec3fx& v) { return v.x*v.y*v.z; }
@@ -787,7 +700,11 @@ namespace embree
/// Rounding Functions
////////////////////////////////////////////////////////////////////////////////
-#if defined (__SSE4_1__) && !defined(__aarch64__)
+#if defined(__aarch64__)
+ __forceinline Vec3fx trunc(const Vec3fx& a) { return vrndq_f32(a.m128); }
+ __forceinline Vec3fx floor(const Vec3fx& a) { return vrndmq_f32(a.m128); }
+ __forceinline Vec3fx ceil (const Vec3fx& a) { return vrndpq_f32(a.m128); }
+#elif defined (__SSE4_1__)
__forceinline Vec3fx trunc( const Vec3fx& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEAREST_INT); }
__forceinline Vec3fx floor( const Vec3fx& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEG_INF ); }
__forceinline Vec3fx ceil ( const Vec3fx& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_POS_INF ); }
diff --git a/thirdparty/embree-aarch64/common/math/vec3ia.h b/thirdparty/embree/common/math/vec3ia.h
index 737f67fd72..694804c40d 100644
--- a/thirdparty/embree-aarch64/common/math/vec3ia.h
+++ b/thirdparty/embree/common/math/vec3ia.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -65,9 +65,7 @@ namespace embree
__forceinline Vec3ia operator +( const Vec3ia& a ) { return a; }
__forceinline Vec3ia operator -( const Vec3ia& a ) { return _mm_sub_epi32(_mm_setzero_si128(), a.m128); }
-#if (defined(__aarch64__))
- __forceinline Vec3ia abs ( const Vec3ia& a ) { return vabsq_s32(a.m128); }
-#elif defined(__SSSE3__)
+#if defined(__SSSE3__)
__forceinline Vec3ia abs ( const Vec3ia& a ) { return _mm_abs_epi32(a.m128); }
#endif
@@ -83,7 +81,7 @@ namespace embree
__forceinline Vec3ia operator -( const Vec3ia& a, const int b ) { return a-Vec3ia(b); }
__forceinline Vec3ia operator -( const int a, const Vec3ia& b ) { return Vec3ia(a)-b; }
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline Vec3ia operator *( const Vec3ia& a, const Vec3ia& b ) { return _mm_mullo_epi32(a.m128, b.m128); }
__forceinline Vec3ia operator *( const Vec3ia& a, const int b ) { return a * Vec3ia(b); }
__forceinline Vec3ia operator *( const int a, const Vec3ia& b ) { return Vec3ia(a) * b; }
@@ -101,14 +99,12 @@ namespace embree
__forceinline Vec3ia operator ^( const Vec3ia& a, const int b ) { return a ^ Vec3ia(b); }
__forceinline Vec3ia operator ^( const int a, const Vec3ia& b ) { return Vec3ia(a) ^ b; }
-#if !defined(__ARM_NEON)
__forceinline Vec3ia operator <<( const Vec3ia& a, const int n ) { return _mm_slli_epi32(a.m128, n); }
__forceinline Vec3ia operator >>( const Vec3ia& a, const int n ) { return _mm_srai_epi32(a.m128, n); }
__forceinline Vec3ia sll ( const Vec3ia& a, const int b ) { return _mm_slli_epi32(a.m128, b); }
__forceinline Vec3ia sra ( const Vec3ia& a, const int b ) { return _mm_srai_epi32(a.m128, b); }
__forceinline Vec3ia srl ( const Vec3ia& a, const int b ) { return _mm_srli_epi32(a.m128, b); }
-#endif
////////////////////////////////////////////////////////////////////////////////
/// Assignment Operators
@@ -120,7 +116,7 @@ namespace embree
__forceinline Vec3ia& operator -=( Vec3ia& a, const Vec3ia& b ) { return a = a - b; }
__forceinline Vec3ia& operator -=( Vec3ia& a, const int& b ) { return a = a - b; }
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline Vec3ia& operator *=( Vec3ia& a, const Vec3ia& b ) { return a = a * b; }
__forceinline Vec3ia& operator *=( Vec3ia& a, const int& b ) { return a = a * b; }
#endif
@@ -131,38 +127,18 @@ namespace embree
__forceinline Vec3ia& operator |=( Vec3ia& a, const Vec3ia& b ) { return a = a | b; }
__forceinline Vec3ia& operator |=( Vec3ia& a, const int& b ) { return a = a | b; }
-#if !defined(__ARM_NEON)
__forceinline Vec3ia& operator <<=( Vec3ia& a, const int& b ) { return a = a << b; }
__forceinline Vec3ia& operator >>=( Vec3ia& a, const int& b ) { return a = a >> b; }
-#endif
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
-#if defined(__aarch64__)
- __forceinline int reduce_add(const Vec3ia& v) {
- int32x4_t t = v.m128;
- t[3] = 0;
- return vaddvq_s32(t);
-
- }
- __forceinline int reduce_mul(const Vec3ia& v) { return v.x*v.y*v.z; }
- __forceinline int reduce_min(const Vec3ia& v) {
- int32x4_t t = (__m128i)blendv_ps((__m128)v0x7fffffff, (__m128)v.m128, (__m128)vFFF0);
- return vminvq_s32(t);
-
- }
- __forceinline int reduce_max(const Vec3ia& v) {
- int32x4_t t = (__m128i)blendv_ps((__m128)v0x80000000, (__m128)v.m128, (__m128)vFFF0);
- return vmaxvq_s32(t);
-
- }
-#else
+
__forceinline int reduce_add(const Vec3ia& v) { return v.x+v.y+v.z; }
__forceinline int reduce_mul(const Vec3ia& v) { return v.x*v.y*v.z; }
__forceinline int reduce_min(const Vec3ia& v) { return min(v.x,v.y,v.z); }
__forceinline int reduce_max(const Vec3ia& v) { return max(v.x,v.y,v.z); }
-#endif
+
////////////////////////////////////////////////////////////////////////////////
/// Comparison Operators
////////////////////////////////////////////////////////////////////////////////
@@ -185,14 +161,14 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
__forceinline Vec3ia select( const Vec3ba& m, const Vec3ia& t, const Vec3ia& f ) {
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
#else
return _mm_or_si128(_mm_and_si128(_mm_castps_si128(m), t), _mm_andnot_si128(_mm_castps_si128(m), f));
#endif
}
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline Vec3ia min( const Vec3ia& a, const Vec3ia& b ) { return _mm_min_epi32(a.m128,b.m128); }
__forceinline Vec3ia max( const Vec3ia& a, const Vec3ia& b ) { return _mm_max_epi32(a.m128,b.m128); }
#else
diff --git a/thirdparty/embree-aarch64/common/math/vec4.h b/thirdparty/embree/common/math/vec4.h
index d16542f507..0ed107928a 100644
--- a/thirdparty/embree-aarch64/common/math/vec4.h
+++ b/thirdparty/embree/common/math/vec4.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -192,7 +192,7 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
typedef Vec4<bool > Vec4b;
- typedef Vec4<uint8_t > Vec4uc;
+ typedef Vec4<unsigned char> Vec4uc;
typedef Vec4<int > Vec4i;
typedef Vec4<float > Vec4f;
}
@@ -205,7 +205,7 @@ namespace embree
/// SSE / AVX / MIC specializations
////////////////////////////////////////////////////////////////////////////////
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined __SSE__
#include "../simd/sse.h"
#endif
@@ -225,31 +225,16 @@ namespace embree
template<> __forceinline Vec4<vfloat4>::Vec4( const Vec3fx& a ) {
x = a.x; y = a.y; z = a.z; w = a.w;
}
-#elif defined(__SSE__) || defined(__ARM_NEON)
+#elif defined(__SSE__)
template<> __forceinline Vec4<vfloat4>::Vec4( const Vec3fx& a ) {
const vfloat4 v = vfloat4(a.m128); x = shuffle<0,0,0,0>(v); y = shuffle<1,1,1,1>(v); z = shuffle<2,2,2,2>(v); w = shuffle<3,3,3,3>(v);
}
#endif
-#if defined(__SSE__) || defined(__ARM_NEON)
- __forceinline Vec4<vfloat4> broadcast4f( const Vec4<vfloat4>& a, const size_t k ) {
- return Vec4<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]), vfloat4::broadcast(&a.w[k]));
- }
-#endif
-
#if defined(__AVX__)
template<> __forceinline Vec4<vfloat8>::Vec4( const Vec3fx& a ) {
x = a.x; y = a.y; z = a.z; w = a.w;
}
- __forceinline Vec4<vfloat4> broadcast4f( const Vec4<vfloat8>& a, const size_t k ) {
- return Vec4<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]), vfloat4::broadcast(&a.w[k]));
- }
- __forceinline Vec4<vfloat8> broadcast8f( const Vec4<vfloat4>& a, const size_t k ) {
- return Vec4<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]), vfloat8::broadcast(&a.w[k]));
- }
- __forceinline Vec4<vfloat8> broadcast8f( const Vec4<vfloat8>& a, const size_t k ) {
- return Vec4<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]), vfloat8::broadcast(&a.w[k]));
- }
#endif
#if defined(__AVX512F__)
diff --git a/thirdparty/embree/common/simd/arm/emulation.h b/thirdparty/embree/common/simd/arm/emulation.h
new file mode 100644
index 0000000000..1c3875fb27
--- /dev/null
+++ b/thirdparty/embree/common/simd/arm/emulation.h
@@ -0,0 +1,50 @@
+// Copyright 2009-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#pragma once
+
+/* Make precision match SSE, at the cost of some performance */
+#if !defined(__aarch64__)
+# define SSE2NEON_PRECISE_DIV 1
+# define SSE2NEON_PRECISE_SQRT 1
+#endif
+
+#include "sse2neon.h"
+
+__forceinline __m128 _mm_fmsub_ps(__m128 a, __m128 b, __m128 c) {
+ __m128 neg_c = vreinterpretq_m128_f32(vnegq_f32(vreinterpretq_f32_m128(c)));
+ return _mm_fmadd_ps(a, b, neg_c);
+}
+
+__forceinline __m128 _mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) {
+#if defined(__aarch64__)
+ return vreinterpretq_m128_f32(vfmsq_f32(vreinterpretq_f32_m128(c),
+ vreinterpretq_f32_m128(b),
+ vreinterpretq_f32_m128(a)));
+#else
+ return _mm_sub_ps(c, _mm_mul_ps(a, b));
+#endif
+}
+
+__forceinline __m128 _mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) {
+ return vreinterpretq_m128_f32(vnegq_f32(vreinterpretq_f32_m128(_mm_fmadd_ps(a,b,c))));
+}
+
+
+/* Dummy defines for floating point control */
+#define _MM_MASK_MASK 0x1f80
+#define _MM_MASK_DIV_ZERO 0x200
+#define _MM_FLUSH_ZERO_ON 0x8000
+#define _MM_MASK_DENORM 0x100
+#define _MM_SET_EXCEPTION_MASK(x)
+#define _MM_SET_FLUSH_ZERO_MODE(x)
+
+__forceinline int _mm_getcsr()
+{
+ return 0;
+}
+
+__forceinline void _mm_mfence()
+{
+ __sync_synchronize();
+}
diff --git a/thirdparty/embree/common/simd/arm/sse2neon.h b/thirdparty/embree/common/simd/arm/sse2neon.h
new file mode 100644
index 0000000000..7eb25cf2c5
--- /dev/null
+++ b/thirdparty/embree/common/simd/arm/sse2neon.h
@@ -0,0 +1,6996 @@
+#ifndef SSE2NEON_H
+#define SSE2NEON_H
+
+// This header file provides a simple API translation layer
+// between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
+//
+// This header file does not yet translate all of the SSE intrinsics.
+//
+// Contributors to this work are:
+// John W. Ratcliff <jratcliffscarab@gmail.com>
+// Brandon Rowlett <browlett@nvidia.com>
+// Ken Fast <kfast@gdeb.com>
+// Eric van Beurden <evanbeurden@nvidia.com>
+// Alexander Potylitsin <apotylitsin@nvidia.com>
+// Hasindu Gamaarachchi <hasindu2008@gmail.com>
+// Jim Huang <jserv@biilabs.io>
+// Mark Cheng <marktwtn@biilabs.io>
+// Malcolm James MacLeod <malcolm@gulden.com>
+// Devin Hussey (easyaspi314) <husseydevin@gmail.com>
+// Sebastian Pop <spop@amazon.com>
+// Developer Ecosystem Engineering <DeveloperEcosystemEngineering@apple.com>
+// Danila Kutenin <danilak@google.com>
+// François Turban (JishinMaster) <francois.turban@gmail.com>
+// Pei-Hsuan Hung <afcidk@gmail.com>
+// Yang-Hao Yuan <yanghau@biilabs.io>
+// Syoyo Fujita <syoyo@lighttransport.com>
+// Brecht Van Lommel <brecht@blender.org>
+
+/*
+ * sse2neon is freely redistributable under the MIT License.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Tunable configurations */
+
+/* Enable precise implementation of math operations
+ * This would slow down the computation a bit, but gives consistent result with
+ * x86 SSE2. (e.g. would solve a hole or NaN pixel in the rendering result)
+ */
+/* _mm_min_ps and _mm_max_ps */
+#ifndef SSE2NEON_PRECISE_MINMAX
+#define SSE2NEON_PRECISE_MINMAX (0)
+#endif
+/* _mm_rcp_ps and _mm_div_ps */
+#ifndef SSE2NEON_PRECISE_DIV
+#define SSE2NEON_PRECISE_DIV (0)
+#endif
+/* _mm_sqrt_ps and _mm_rsqrt_ps */
+#ifndef SSE2NEON_PRECISE_SQRT
+#define SSE2NEON_PRECISE_SQRT (0)
+#endif
+#ifndef SSE2NEON_PRECISE_RSQRT
+#define SSE2NEON_PRECISE_RSQRT (0)
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#pragma push_macro("FORCE_INLINE")
+#pragma push_macro("ALIGN_STRUCT")
+#define FORCE_INLINE static inline __attribute__((always_inline))
+#define ALIGN_STRUCT(x) __attribute__((aligned(x)))
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+#else
+#error "Macro name collisions may happen with unsupported compiler."
+#ifdef FORCE_INLINE
+#undef FORCE_INLINE
+#endif
+#define FORCE_INLINE static inline
+#ifndef ALIGN_STRUCT
+#define ALIGN_STRUCT(x) __declspec(align(x))
+#endif
+#endif
+#ifndef likely
+#define likely(x) (x)
+#endif
+#ifndef unlikely
+#define unlikely(x) (x)
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/* Architecture-specific build options */
+/* FIXME: #pragma GCC push_options is only available on GCC */
+#if defined(__GNUC__)
+#if defined(__arm__) && __ARM_ARCH == 7
+/* According to ARM C Language Extensions Architecture specification,
+ * __ARM_NEON is defined to a value indicating the Advanced SIMD (NEON)
+ * architecture supported.
+ */
+#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
+#error "You must enable NEON instructions (e.g. -mfpu=neon) to use SSE2NEON."
+#endif
+#if !defined(__clang__)
+#pragma GCC push_options
+#pragma GCC target("fpu=neon")
+#endif
+#elif defined(__aarch64__)
+#if !defined(__clang__)
+#pragma GCC push_options
+#pragma GCC target("+simd")
+#endif
+#else
+#error "Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A."
+#endif
+#endif
+
+#include <arm_neon.h>
+
+/* Rounding functions require either Aarch64 instructions or libm failback */
+#if !defined(__aarch64__)
+#include <math.h>
+#endif
+
+/* "__has_builtin" can be used to query support for built-in functions
+ * provided by gcc/clang and other compilers that support it.
+ */
+#ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
+/* Compatibility with gcc <= 9 */
+#if __GNUC__ <= 9
+#define __has_builtin(x) HAS##x
+#define HAS__builtin_popcount 1
+#define HAS__builtin_popcountll 1
+#else
+#define __has_builtin(x) 0
+#endif
+#endif
+
+/**
+ * MACRO for shuffle parameter for _mm_shuffle_ps().
+ * Argument fp3 is a digit[0123] that represents the fp from argument "b"
+ * of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
+ * for fp2 in result. fp1 is a digit[0123] that represents the fp from
+ * argument "a" of mm_shuffle_ps that will be places in fp1 of result.
+ * fp0 is the same for fp0 of result.
+ */
+#define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
+ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+#define _MM_FROUND_NO_EXC 0x08
+#define _MM_ROUND_NEAREST 0x0000
+#define _MM_ROUND_DOWN 0x2000
+#define _MM_ROUND_UP 0x4000
+#define _MM_ROUND_TOWARD_ZERO 0x6000
+
+/* indicate immediate constant argument in a given range */
+#define __constrange(a, b) const
+
+/* A few intrinsics accept traditional data types like ints or floats, but
+ * most operate on data types that are specific to SSE.
+ * If a vector type ends in d, it contains doubles, and if it does not have
+ * a suffix, it contains floats. An integer vector type can contain any type
+ * of integer, from chars to shorts to unsigned long longs.
+ */
+typedef int64x1_t __m64;
+typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
+// On ARM 32-bit architecture, the float64x2_t is not supported.
+// The data type __m128d should be represented in a different way for related
+// intrinsic conversion.
+#if defined(__aarch64__)
+typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
+#else
+typedef float32x4_t __m128d;
+#endif
+typedef int64x2_t __m128i; /* 128-bit vector containing integers */
+
+/* type-safe casting between types */
+
+#define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
+#define vreinterpretq_m128_f32(x) (x)
+#define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
+
+#define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
+#define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
+#define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
+#define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
+
+#define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
+#define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
+#define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
+#define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
+
+#define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
+#define vreinterpretq_f32_m128(x) (x)
+#define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
+
+#define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
+#define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
+#define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
+#define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
+
+#define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
+#define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
+#define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
+#define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
+
+#define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
+#define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
+#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
+#define vreinterpretq_m128i_s64(x) (x)
+
+#define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
+#define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
+#define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
+#define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
+
+#define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x)
+#define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x)
+
+#define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
+#define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
+#define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
+#define vreinterpretq_s64_m128i(x) (x)
+
+#define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
+#define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
+#define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
+#define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
+
+#define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
+#define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x)
+#define vreinterpret_m64_s32(x) vreinterpret_s64_s32(x)
+#define vreinterpret_m64_s64(x) (x)
+
+#define vreinterpret_m64_u8(x) vreinterpret_s64_u8(x)
+#define vreinterpret_m64_u16(x) vreinterpret_s64_u16(x)
+#define vreinterpret_m64_u32(x) vreinterpret_s64_u32(x)
+#define vreinterpret_m64_u64(x) vreinterpret_s64_u64(x)
+
+#define vreinterpret_m64_f16(x) vreinterpret_s64_f16(x)
+#define vreinterpret_m64_f32(x) vreinterpret_s64_f32(x)
+#define vreinterpret_m64_f64(x) vreinterpret_s64_f64(x)
+
+#define vreinterpret_u8_m64(x) vreinterpret_u8_s64(x)
+#define vreinterpret_u16_m64(x) vreinterpret_u16_s64(x)
+#define vreinterpret_u32_m64(x) vreinterpret_u32_s64(x)
+#define vreinterpret_u64_m64(x) vreinterpret_u64_s64(x)
+
+#define vreinterpret_s8_m64(x) vreinterpret_s8_s64(x)
+#define vreinterpret_s16_m64(x) vreinterpret_s16_s64(x)
+#define vreinterpret_s32_m64(x) vreinterpret_s32_s64(x)
+#define vreinterpret_s64_m64(x) (x)
+
+#define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x)
+
+#if defined(__aarch64__)
+#define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x)
+#define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x)
+
+#define vreinterpretq_m128d_u64(x) vreinterpretq_f64_u64(x)
+
+#define vreinterpretq_m128d_f32(x) vreinterpretq_f64_f32(x)
+#define vreinterpretq_m128d_f64(x) (x)
+
+#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x)
+
+#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x)
+
+#define vreinterpretq_f64_m128d(x) (x)
+#define vreinterpretq_f32_m128d(x) vreinterpretq_f32_f64(x)
+#else
+#define vreinterpretq_m128d_s32(x) vreinterpretq_f32_s32(x)
+#define vreinterpretq_m128d_s64(x) vreinterpretq_f32_s64(x)
+
+#define vreinterpretq_m128d_u32(x) vreinterpretq_f32_u32(x)
+#define vreinterpretq_m128d_u64(x) vreinterpretq_f32_u64(x)
+
+#define vreinterpretq_m128d_f32(x) (x)
+
+#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f32(x)
+
+#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f32(x)
+#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f32(x)
+
+#define vreinterpretq_f32_m128d(x) (x)
+#endif
+
+// A struct is defined in this header file called 'SIMDVec' which can be used
+// by applications which attempt to access the contents of an _m128 struct
+// directly. It is important to note that accessing the __m128 struct directly
+// is bad coding practice by Microsoft: @see:
+// https://msdn.microsoft.com/en-us/library/ayeb3ayc.aspx
+//
+// However, some legacy source code may try to access the contents of an __m128
+// struct directly so the developer can use the SIMDVec as an alias for it. Any
+// casting must be done manually by the developer, as you cannot cast or
+// otherwise alias the base NEON data type for intrinsic operations.
+//
+// union intended to allow direct access to an __m128 variable using the names
+// that the MSVC compiler provides. This union should really only be used when
+// trying to access the members of the vector as integer values. GCC/clang
+// allow native access to the float members through a simple array access
+// operator (in C since 4.6, in C++ since 4.8).
+//
+// Ideally direct accesses to SIMD vectors should not be used since it can cause
+// a performance hit. If it really is needed however, the original __m128
+// variable can be aliased with a pointer to this union and used to access
+// individual components. The use of this union should be hidden behind a macro
+// that is used throughout the codebase to access the members instead of always
+// declaring this type of variable.
+typedef union ALIGN_STRUCT(16) SIMDVec {
+ float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
+ int8_t m128_i8[16]; // as signed 8-bit integers.
+ int16_t m128_i16[8]; // as signed 16-bit integers.
+ int32_t m128_i32[4]; // as signed 32-bit integers.
+ int64_t m128_i64[2]; // as signed 64-bit integers.
+ uint8_t m128_u8[16]; // as unsigned 8-bit integers.
+ uint16_t m128_u16[8]; // as unsigned 16-bit integers.
+ uint32_t m128_u32[4]; // as unsigned 32-bit integers.
+ uint64_t m128_u64[2]; // as unsigned 64-bit integers.
+} SIMDVec;
+
+// casting using SIMDVec
+#define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *) &x)->m128_u64[n])
+#define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n])
+#define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n])
+
+/* Backwards compatibility for compilers with lack of specific type support */
+
+// Older gcc does not define vld1q_u8_x4 type
+#if defined(__GNUC__) && !defined(__clang__) && \
+ ((__GNUC__ == 10 && (__GNUC_MINOR__ <= 1)) || \
+ (__GNUC__ == 9 && (__GNUC_MINOR__ <= 3)) || \
+ (__GNUC__ == 8 && (__GNUC_MINOR__ <= 4)) || __GNUC__ <= 7)
+FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
+{
+ uint8x16x4_t ret;
+ ret.val[0] = vld1q_u8(p + 0);
+ ret.val[1] = vld1q_u8(p + 16);
+ ret.val[2] = vld1q_u8(p + 32);
+ ret.val[3] = vld1q_u8(p + 48);
+ return ret;
+}
+#else
+// Wraps vld1q_u8_x4
+FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
+{
+ return vld1q_u8_x4(p);
+}
+#endif
+
+/* Function Naming Conventions
+ * The naming convention of SSE intrinsics is straightforward. A generic SSE
+ * intrinsic function is given as follows:
+ * _mm_<name>_<data_type>
+ *
+ * The parts of this format are given as follows:
+ * 1. <name> describes the operation performed by the intrinsic
+ * 2. <data_type> identifies the data type of the function's primary arguments
+ *
+ * This last part, <data_type>, is a little complicated. It identifies the
+ * content of the input values, and can be set to any of the following values:
+ * + ps - vectors contain floats (ps stands for packed single-precision)
+ * + pd - vectors cantain doubles (pd stands for packed double-precision)
+ * + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
+ * signed integers
+ * + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
+ * unsigned integers
+ * + si128 - unspecified 128-bit vector or 256-bit vector
+ * + m128/m128i/m128d - identifies input vector types when they are different
+ * than the type of the returned vector
+ *
+ * For example, _mm_setzero_ps. The _mm implies that the function returns
+ * a 128-bit vector. The _ps at the end implies that the argument vectors
+ * contain floats.
+ *
+ * A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
+ * // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
+ * __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
+ * // Set packed 8-bit integers
+ * // 128 bits, 16 chars, per 8 bits
+ * __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
+ * 4, 5, 12, 13, 6, 7, 14, 15);
+ * // Shuffle packed 8-bit integers
+ * __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
+ *
+ * Data (Number, Binary, Byte Index):
+ +------+------+-------------+------+------+-------------+
+ | 1 | 2 | 3 | 4 | Number
+ +------+------+------+------+------+------+------+------+
+ | 0000 | 0001 | 0000 | 0010 | 0000 | 0011 | 0000 | 0100 | Binary
+ +------+------+------+------+------+------+------+------+
+ | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | Index
+ +------+------+------+------+------+------+------+------+
+
+ +------+------+------+------+------+------+------+------+
+ | 5 | 6 | 7 | 8 | Number
+ +------+------+------+------+------+------+------+------+
+ | 0000 | 0101 | 0000 | 0110 | 0000 | 0111 | 0000 | 1000 | Binary
+ +------+------+------+------+------+------+------+------+
+ | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | Index
+ +------+------+------+------+------+------+------+------+
+ * Index (Byte Index):
+ +------+------+------+------+------+------+------+------+
+ | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 |
+ +------+------+------+------+------+------+------+------+
+
+ +------+------+------+------+------+------+------+------+
+ | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 |
+ +------+------+------+------+------+------+------+------+
+ * Result:
+ +------+------+------+------+------+------+------+------+
+ | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 | Index
+ +------+------+------+------+------+------+------+------+
+ | 0001 | 0000 | 0000 | 0010 | 0000 | 0101 | 0000 | 0110 | Binary
+ +------+------+------+------+------+------+------+------+
+ | 256 | 2 | 5 | 6 | Number
+ +------+------+------+------+------+------+------+------+
+
+ +------+------+------+------+------+------+------+------+
+ | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 | Index
+ +------+------+------+------+------+------+------+------+
+ | 0000 | 0011 | 0000 | 0111 | 0000 | 0100 | 0000 | 1000 | Binary
+ +------+------+------+------+------+------+------+------+
+ | 3 | 7 | 4 | 8 | Number
+ +------+------+------+------+------+------+-------------+
+ */
+
+/* Set/get methods */
+
+/* Constants for use with _mm_prefetch. */
+enum _mm_hint {
+ _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
+ _MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
+ _MM_HINT_T1 = 2, /* load data to L2 cache only */
+ _MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
+ _MM_HINT_ENTA = 4, /* exclusive version of _MM_HINT_NTA */
+ _MM_HINT_ET0 = 5, /* exclusive version of _MM_HINT_T0 */
+ _MM_HINT_ET1 = 6, /* exclusive version of _MM_HINT_T1 */
+ _MM_HINT_ET2 = 7 /* exclusive version of _MM_HINT_T2 */
+};
+
+// Loads one cache line of data from address p to a location closer to the
+// processor. https://msdn.microsoft.com/en-us/library/84szxsww(v=vs.100).aspx
+FORCE_INLINE void _mm_prefetch(const void *p, int i)
+{
+ (void) i;
+ __builtin_prefetch(p);
+}
+
+// Pause the processor. This is typically used in spin-wait loops and depending
+// on the x86 processor typical values are in the 40-100 cycle range. The
+// 'yield' instruction isn't a good fit beacuse it's effectively a nop on most
+// Arm cores. Experience with several databases has shown has shown an 'isb' is
+// a reasonable approximation.
+FORCE_INLINE void _mm_pause()
+{
+ __asm__ __volatile__("isb\n");
+}
+
+// Copy the lower single-precision (32-bit) floating-point element of a to dst.
+//
+// dst[31:0] := a[31:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_f32
+FORCE_INLINE float _mm_cvtss_f32(__m128 a)
+{
+ return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+}
+
+// Convert the lower single-precision (32-bit) floating-point element in b to a
+// double-precision (64-bit) floating-point element, store the result in the
+// lower element of dst, and copy the upper element from a to the upper element
+// of dst.
+//
+// dst[63:0] := Convert_FP32_To_FP64(b[31:0])
+// dst[127:64] := a[127:64]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_sd
+FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b)
+{
+ double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0));
+#else
+ return vreinterpretq_m128d_s64(
+ vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0));
+#endif
+}
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer, and store the result in dst.
+//
+// dst[31:0] := Convert_FP32_To_Int32(a[31:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si32
+#define _mm_cvtss_si32(a) _mm_cvt_ss2si(a)
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 64-bit integer, and store the result in dst.
+//
+// dst[63:0] := Convert_FP32_To_Int64(a[31:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si64
+FORCE_INLINE int _mm_cvtss_si64(__m128 a)
+{
+#if defined(__aarch64__)
+ return vgetq_lane_s64(
+ vreinterpretq_s64_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a))), 0);
+#else
+ float32_t data = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ float32_t diff = data - floor(data);
+ if (diff > 0.5)
+ return (int64_t) ceil(data);
+ if (unlikely(diff == 0.5)) {
+ int64_t f = (int64_t) floor(data);
+ int64_t c = (int64_t) ceil(data);
+ return c & 1 ? f : c;
+ }
+ return (int64_t) floor(data);
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers with truncation, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := 32*j
+// dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ps2pi
+FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a)
+{
+ return vreinterpret_m64_s32(
+ vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))));
+}
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer with truncation, and store the result in dst.
+//
+// dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ss2si
+FORCE_INLINE int _mm_cvtt_ss2si(__m128 a)
+{
+ return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0);
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers with truncation, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := 32*j
+// dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttps_pi32
+#define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a)
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer with truncation, and store the result in dst.
+//
+// dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si32
+#define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a)
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 64-bit integer with truncation, and store the result in dst.
+//
+// dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si64
+FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a)
+{
+ return vgetq_lane_s64(
+ vmovl_s32(vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)))), 0);
+}
+
+// Sets the 128-bit value to zero
+// https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_setzero_si128(void)
+{
+ return vreinterpretq_m128i_s32(vdupq_n_s32(0));
+}
+
+// Clears the four single-precision, floating-point values.
+// https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_setzero_ps(void)
+{
+ return vreinterpretq_m128_f32(vdupq_n_f32(0));
+}
+
+// Return vector of type __m128d with all elements set to zero.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_pd
+FORCE_INLINE __m128d _mm_setzero_pd(void)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(vdupq_n_f64(0));
+#else
+ return vreinterpretq_m128d_f32(vdupq_n_f32(0));
+#endif
+}
+
+// Sets the four single-precision, floating-point values to w.
+//
+// r0 := r1 := r2 := r3 := w
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_set1_ps(float _w)
+{
+ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
+}
+
+// Sets the four single-precision, floating-point values to w.
+// https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_set_ps1(float _w)
+{
+ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
+}
+
+// Sets the four single-precision, floating-point values to the four inputs.
+// https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
+{
+ float ALIGN_STRUCT(16) data[4] = {x, y, z, w};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+}
+
+// Copy single-precision (32-bit) floating-point element a to the lower element
+// of dst, and zero the upper 3 elements.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ss
+FORCE_INLINE __m128 _mm_set_ss(float a)
+{
+ float ALIGN_STRUCT(16) data[4] = {a, 0, 0, 0};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+}
+
+// Sets the four single-precision, floating-point values to the four inputs in
+// reverse order.
+// https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x)
+{
+ float ALIGN_STRUCT(16) data[4] = {w, z, y, x};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+}
+
+// Sets the 8 signed 16-bit integer values in reverse order.
+//
+// Return Value
+// r0 := w0
+// r1 := w1
+// ...
+// r7 := w7
+FORCE_INLINE __m128i _mm_setr_epi16(short w0,
+ short w1,
+ short w2,
+ short w3,
+ short w4,
+ short w5,
+ short w6,
+ short w7)
+{
+ int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7};
+ return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data));
+}
+
+// Sets the 4 signed 32-bit integer values in reverse order
+// https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
+{
+ int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0};
+ return vreinterpretq_m128i_s32(vld1q_s32(data));
+}
+
+// Set packed 64-bit integers in dst with the supplied values in reverse order.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_epi64
+FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0)
+{
+ return vreinterpretq_m128i_s64(vcombine_s64(e1, e0));
+}
+
+// Sets the 16 signed 8-bit integer values to b.
+//
+// r0 := b
+// r1 := b
+// ...
+// r15 := b
+//
+// https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_set1_epi8(signed char w)
+{
+ return vreinterpretq_m128i_s8(vdupq_n_s8(w));
+}
+
+// Broadcast double-precision (64-bit) floating-point value a to all elements of
+// dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pd
+FORCE_INLINE __m128d _mm_set1_pd(double d)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(vdupq_n_f64(d));
+#else
+ return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d));
+#endif
+}
+
+// Sets the 8 signed 16-bit integer values to w.
+//
+// r0 := w
+// r1 := w
+// ...
+// r7 := w
+//
+// https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_set1_epi16(short w)
+{
+ return vreinterpretq_m128i_s16(vdupq_n_s16(w));
+}
+
+// Sets the 16 signed 8-bit integer values.
+// https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_set_epi8(signed char b15,
+ signed char b14,
+ signed char b13,
+ signed char b12,
+ signed char b11,
+ signed char b10,
+ signed char b9,
+ signed char b8,
+ signed char b7,
+ signed char b6,
+ signed char b5,
+ signed char b4,
+ signed char b3,
+ signed char b2,
+ signed char b1,
+ signed char b0)
+{
+ int8_t ALIGN_STRUCT(16)
+ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
+ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
+ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
+ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
+ return (__m128i) vld1q_s8(data);
+}
+
+// Sets the 8 signed 16-bit integer values.
+// https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_set_epi16(short i7,
+ short i6,
+ short i5,
+ short i4,
+ short i3,
+ short i2,
+ short i1,
+ short i0)
+{
+ int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
+ return vreinterpretq_m128i_s16(vld1q_s16(data));
+}
+
+// Sets the 16 signed 8-bit integer values in reverse order.
+// https://msdn.microsoft.com/en-us/library/2khb9c7k(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_setr_epi8(signed char b0,
+ signed char b1,
+ signed char b2,
+ signed char b3,
+ signed char b4,
+ signed char b5,
+ signed char b6,
+ signed char b7,
+ signed char b8,
+ signed char b9,
+ signed char b10,
+ signed char b11,
+ signed char b12,
+ signed char b13,
+ signed char b14,
+ signed char b15)
+{
+ int8_t ALIGN_STRUCT(16)
+ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
+ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
+ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
+ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
+ return (__m128i) vld1q_s8(data);
+}
+
+// Sets the 4 signed 32-bit integer values to i.
+//
+// r0 := i
+// r1 := i
+// r2 := i
+// r3 := I
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_set1_epi32(int _i)
+{
+ return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
+}
+
+// Sets the 2 signed 64-bit integer values to i.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/whtfzhzk(v=vs.100)
+FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i)
+{
+ return vreinterpretq_m128i_s64(vdupq_n_s64((int64_t) _i));
+}
+
+// Sets the 2 signed 64-bit integer values to i.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi64x
+FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
+{
+ return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
+}
+
+// Sets the 4 signed 32-bit integer values.
+// https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
+{
+ int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3};
+ return vreinterpretq_m128i_s32(vld1q_s32(data));
+}
+
+// Returns the __m128i structure with its two 64-bit integer values
+// initialized to the values of the two 64-bit integers passed in.
+// https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx
+FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2)
+{
+ return vreinterpretq_m128i_s64(
+ vcombine_s64(vcreate_s64(i2), vcreate_s64(i1)));
+}
+
+// Returns the __m128i structure with its two 64-bit integer values
+// initialized to the values of the two 64-bit integers passed in.
+// https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx
+FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2)
+{
+ return _mm_set_epi64x((int64_t) i1, (int64_t) i2);
+}
+
+// Set packed double-precision (64-bit) floating-point elements in dst with the
+// supplied values.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd
+FORCE_INLINE __m128d _mm_set_pd(double e1, double e0)
+{
+ double ALIGN_STRUCT(16) data[2] = {e0, e1};
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data));
+#else
+ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data));
+#endif
+}
+
+// Set packed double-precision (64-bit) floating-point elements in dst with the
+// supplied values in reverse order.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_pd
+FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0)
+{
+ return _mm_set_pd(e0, e1);
+}
+
+// Copy double-precision (64-bit) floating-point element a to the lower element
+// of dst, and zero the upper element.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_sd
+FORCE_INLINE __m128d _mm_set_sd(double a)
+{
+ return _mm_set_pd(0, a);
+}
+
+// Broadcast double-precision (64-bit) floating-point value a to all elements of
+// dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd1
+#define _mm_set_pd1 _mm_set1_pd
+
+// Stores four single-precision, floating-point values.
+// https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
+FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
+{
+ vst1q_f32(p, vreinterpretq_f32_m128(a));
+}
+
+// Store the lower single-precision (32-bit) floating-point element from a into
+// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+//
+// MEM[mem_addr+31:mem_addr] := a[31:0]
+// MEM[mem_addr+63:mem_addr+32] := a[31:0]
+// MEM[mem_addr+95:mem_addr+64] := a[31:0]
+// MEM[mem_addr+127:mem_addr+96] := a[31:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ps1
+FORCE_INLINE void _mm_store_ps1(float *p, __m128 a)
+{
+ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ vst1q_f32(p, vdupq_n_f32(a0));
+}
+
+// Store the lower single-precision (32-bit) floating-point element from a into
+// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+//
+// MEM[mem_addr+31:mem_addr] := a[31:0]
+// MEM[mem_addr+63:mem_addr+32] := a[31:0]
+// MEM[mem_addr+95:mem_addr+64] := a[31:0]
+// MEM[mem_addr+127:mem_addr+96] := a[31:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store1_ps
+#define _mm_store1_ps _mm_store_ps1
+
+// Store 4 single-precision (32-bit) floating-point elements from a into memory
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+//
+// MEM[mem_addr+31:mem_addr] := a[127:96]
+// MEM[mem_addr+63:mem_addr+32] := a[95:64]
+// MEM[mem_addr+95:mem_addr+64] := a[63:32]
+// MEM[mem_addr+127:mem_addr+96] := a[31:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_ps
+FORCE_INLINE void _mm_storer_ps(float *p, __m128 a)
+{
+ float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a));
+ float32x4_t rev = vextq_f32(tmp, tmp, 2);
+ vst1q_f32(p, rev);
+}
+
+// Stores four single-precision, floating-point values.
+// https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
+FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
+{
+ vst1q_f32(p, vreinterpretq_f32_m128(a));
+}
+
+// Stores four 32-bit integer values as (as a __m128i value) at the address p.
+// https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
+FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
+{
+ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
+}
+
+// Stores four 32-bit integer values as (as a __m128i value) at the address p.
+// https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
+FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
+{
+ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
+}
+
+// Stores the lower single - precision, floating - point value.
+// https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
+FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
+{
+ vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
+}
+
+// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
+// or a general-protection exception may be generated.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd
+FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__)
+ vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a));
+#else
+ vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a));
+#endif
+}
+
+// Store the upper double-precision (64-bit) floating-point element from a into
+// memory.
+//
+// MEM[mem_addr+63:mem_addr] := a[127:64]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeh_pd
+FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__)
+ vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a)));
+#else
+ vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a)));
+#endif
+}
+
+// Store the lower double-precision (64-bit) floating-point element from a into
+// memory.
+//
+// MEM[mem_addr+63:mem_addr] := a[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storel_pd
+FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__)
+ vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
+#else
+ vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a)));
+#endif
+}
+
+// Store 2 double-precision (64-bit) floating-point elements from a into memory
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+//
+// MEM[mem_addr+63:mem_addr] := a[127:64]
+// MEM[mem_addr+127:mem_addr+64] := a[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_pd
+FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a)
+{
+ float32x4_t f = vreinterpretq_f32_m128d(a);
+ _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2)));
+}
+
+// Store the lower double-precision (64-bit) floating-point element from a into
+// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd1
+FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__)
+ float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a));
+ vst1q_f64((float64_t *) mem_addr,
+ vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low)));
+#else
+ float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a));
+ vst1q_f32((float32_t *) mem_addr,
+ vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low)));
+#endif
+}
+
+// Store the lower double-precision (64-bit) floating-point element from a into
+// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=9,526,5601&text=_mm_store1_pd
+#define _mm_store1_pd _mm_store_pd1
+
+// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+// elements) from a into memory. mem_addr does not need to be aligned on any
+// particular boundary.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_pd
+FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a)
+{
+ _mm_store_pd(mem_addr, a);
+}
+
+// Reads the lower 64 bits of b and stores them into the lower 64 bits of a.
+// https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
+FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b)
+{
+ uint64x1_t hi = vget_high_u64(vreinterpretq_u64_m128i(*a));
+ uint64x1_t lo = vget_low_u64(vreinterpretq_u64_m128i(b));
+ *a = vreinterpretq_m128i_u64(vcombine_u64(lo, hi));
+}
+
+// Stores the lower two single-precision floating point values of a to the
+// address p.
+//
+// *p0 := a0
+// *p1 := a1
+//
+// https://msdn.microsoft.com/en-us/library/h54t98ks(v=vs.90).aspx
+FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a)
+{
+ *p = vreinterpret_m64_f32(vget_low_f32(a));
+}
+
+// Stores the upper two single-precision, floating-point values of a to the
+// address p.
+//
+// *p0 := a2
+// *p1 := a3
+//
+// https://msdn.microsoft.com/en-us/library/a7525fs8(v%3dvs.90).aspx
+FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
+{
+ *p = vreinterpret_m64_f32(vget_high_f32(a));
+}
+
+// Loads a single single-precision, floating-point value, copying it into all
+// four words
+// https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_load1_ps(const float *p)
+{
+ return vreinterpretq_m128_f32(vld1q_dup_f32(p));
+}
+
+// Load a single-precision (32-bit) floating-point element from memory into all
+// elements of dst.
+//
+// dst[31:0] := MEM[mem_addr+31:mem_addr]
+// dst[63:32] := MEM[mem_addr+31:mem_addr]
+// dst[95:64] := MEM[mem_addr+31:mem_addr]
+// dst[127:96] := MEM[mem_addr+31:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ps1
+#define _mm_load_ps1 _mm_load1_ps
+
+// Sets the lower two single-precision, floating-point values with 64
+// bits of data loaded from the address p; the upper two values are passed
+// through from a.
+//
+// Return Value
+// r0 := *p0
+// r1 := *p1
+// r2 := a2
+// r3 := a3
+//
+// https://msdn.microsoft.com/en-us/library/s57cyak2(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a)));
+}
+
+// Load 4 single-precision (32-bit) floating-point elements from memory into dst
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+//
+// dst[31:0] := MEM[mem_addr+127:mem_addr+96]
+// dst[63:32] := MEM[mem_addr+95:mem_addr+64]
+// dst[95:64] := MEM[mem_addr+63:mem_addr+32]
+// dst[127:96] := MEM[mem_addr+31:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_ps
+FORCE_INLINE __m128 _mm_loadr_ps(const float *p)
+{
+ float32x4_t v = vrev64q_f32(vld1q_f32(p));
+ return vreinterpretq_m128_f32(vextq_f32(v, v, 2));
+}
+
+// Sets the upper two single-precision, floating-point values with 64
+// bits of data loaded from the address p; the lower two values are passed
+// through from a.
+//
+// r0 := a0
+// r1 := a1
+// r2 := *p0
+// r3 := *p1
+//
+// https://msdn.microsoft.com/en-us/library/w92wta0x(v%3dvs.100).aspx
+FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p)));
+}
+
+// Loads four single-precision, floating-point values.
+// https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_load_ps(const float *p)
+{
+ return vreinterpretq_m128_f32(vld1q_f32(p));
+}
+
+// Loads four single-precision, floating-point values.
+// https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
+FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
+{
+ // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are
+ // equivalent for neon
+ return vreinterpretq_m128_f32(vld1q_f32(p));
+}
+
+// Load unaligned 16-bit integer from memory into the first element of dst.
+//
+// dst[15:0] := MEM[mem_addr+15:mem_addr]
+// dst[MAX:16] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si16
+FORCE_INLINE __m128i _mm_loadu_si16(const void *p)
+{
+ return vreinterpretq_m128i_s16(
+ vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0));
+}
+
+// Load unaligned 64-bit integer from memory into the first element of dst.
+//
+// dst[63:0] := MEM[mem_addr+63:mem_addr]
+// dst[MAX:64] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si64
+FORCE_INLINE __m128i _mm_loadu_si64(const void *p)
+{
+ return vreinterpretq_m128i_s64(
+ vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0)));
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into the
+// lower of dst, and zero the upper element. mem_addr does not need to be
+// aligned on any particular boundary.
+//
+// dst[63:0] := MEM[mem_addr+63:mem_addr]
+// dst[127:64] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_sd
+FORCE_INLINE __m128d _mm_load_sd(const double *p)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0));
+#else
+ const float *fp = (const float *) p;
+ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0};
+ return vreinterpretq_m128d_f32(vld1q_f32(data));
+#endif
+}
+
+// Loads two double-precision from 16-byte aligned memory, floating-point
+// values.
+//
+// dst[127:0] := MEM[mem_addr+127:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd
+FORCE_INLINE __m128d _mm_load_pd(const double *p)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(vld1q_f64(p));
+#else
+ const float *fp = (const float *) p;
+ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]};
+ return vreinterpretq_m128d_f32(vld1q_f32(data));
+#endif
+}
+
+// Loads two double-precision from unaligned memory, floating-point values.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_pd
+FORCE_INLINE __m128d _mm_loadu_pd(const double *p)
+{
+ return _mm_load_pd(p);
+}
+
+// Loads an single - precision, floating - point value into the low word and
+// clears the upper three words.
+// https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
+FORCE_INLINE __m128 _mm_load_ss(const float *p)
+{
+ return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
+}
+
+// Load 64-bit integer from memory into the first element of dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_epi64
+FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p)
+{
+ /* Load the lower 64 bits of the value pointed to by p into the
+ * lower 64 bits of the result, zeroing the upper 64 bits of the result.
+ */
+ return vreinterpretq_m128i_s32(
+ vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0)));
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into the
+// lower element of dst, and copy the upper element from a to dst. mem_addr does
+// not need to be aligned on any particular boundary.
+//
+// dst[63:0] := MEM[mem_addr+63:mem_addr]
+// dst[127:64] := a[127:64]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_pd
+FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a))));
+#else
+ return vreinterpretq_m128d_f32(
+ vcombine_f32(vld1_f32((const float *) p),
+ vget_high_f32(vreinterpretq_f32_m128d(a))));
+#endif
+}
+
+// Load 2 double-precision (64-bit) floating-point elements from memory into dst
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+//
+// dst[63:0] := MEM[mem_addr+127:mem_addr+64]
+// dst[127:64] := MEM[mem_addr+63:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_pd
+FORCE_INLINE __m128d _mm_loadr_pd(const double *p)
+{
+#if defined(__aarch64__)
+ float64x2_t v = vld1q_f64(p);
+ return vreinterpretq_m128d_f64(vextq_f64(v, v, 1));
+#else
+ int64x2_t v = vld1q_s64((const int64_t *) p);
+ return vreinterpretq_m128d_s64(vextq_s64(v, v, 1));
+#endif
+}
+
+// Sets the low word to the single-precision, floating-point value of b
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/35hdzazd(v=vs.100)
+FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0),
+ vreinterpretq_f32_m128(a), 0));
+}
+
+// Move the lower double-precision (64-bit) floating-point element from b to the
+// lower element of dst, and copy the upper element from a to the upper element
+// of dst.
+//
+// dst[63:0] := b[63:0]
+// dst[127:64] := a[127:64]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sd
+FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_f32(
+ vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)),
+ vget_high_f32(vreinterpretq_f32_m128d(a))));
+}
+
+// Copy the lower 64-bit integer in a to the lower element of dst, and zero the
+// upper element.
+//
+// dst[63:0] := a[63:0]
+// dst[127:64] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_epi64
+FORCE_INLINE __m128i _mm_move_epi64(__m128i a)
+{
+ return vreinterpretq_m128i_s64(
+ vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1));
+}
+
+// Return vector of type __m128 with undefined elements.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ps
+FORCE_INLINE __m128 _mm_undefined_ps(void)
+{
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+ __m128 a;
+ return a;
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+/* Logic/Binary operations */
+
+// Computes the bitwise AND-NOT of the four single-precision, floating-point
+// values of a and b.
+//
+// r0 := ~a0 & b0
+// r1 := ~a1 & b1
+// r2 := ~a2 & b2
+// r3 := ~a3 & b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ vbicq_s32(vreinterpretq_s32_m128(b),
+ vreinterpretq_s32_m128(a))); // *NOTE* argument swap
+}
+
+// Compute the bitwise NOT of packed double-precision (64-bit) floating-point
+// elements in a and then AND with b, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := j*64
+// dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_andnot_pd
+FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b)
+{
+ // *NOTE* argument swap
+ return vreinterpretq_m128d_s64(
+ vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a)));
+}
+
+// Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the
+// 128-bit value in a.
+//
+// r := (~a) & b
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vbicq_s32(vreinterpretq_s32_m128i(b),
+ vreinterpretq_s32_m128i(a))); // *NOTE* argument swap
+}
+
+// Computes the bitwise AND of the 128-bit value in a and the 128-bit value in
+// b.
+//
+// r := a & b
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Computes the bitwise AND of the four single-precision, floating-point values
+// of a and b.
+//
+// r0 := a0 & b0
+// r1 := a1 & b1
+// r2 := a2 & b2
+// r3 := a3 & b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+}
+
+// Compute the bitwise AND of packed double-precision (64-bit) floating-point
+// elements in a and b, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := j*64
+// dst[i+63:i] := a[i+63:i] AND b[i+63:i]
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_and_pd
+FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_s64(
+ vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+}
+
+// Computes the bitwise OR of the four single-precision, floating-point values
+// of a and b.
+// https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+}
+
+// Computes bitwise EXOR (exclusive-or) of the four single-precision,
+// floating-point values of a and b.
+// https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+}
+
+// Compute the bitwise XOR of packed double-precision (64-bit) floating-point
+// elements in a and b, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := j*64
+// dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_pd
+FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_s64(
+ veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+}
+
+// Compute the bitwise OR of packed double-precision (64-bit) floating-point
+// elements in a and b, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_or_pd
+FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_s64(
+ vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+}
+
+// Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b.
+//
+// r := a | b
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in
+// b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Duplicate the low double-precision (64-bit) floating-point element from a,
+// and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movedup_pd
+FORCE_INLINE __m128d _mm_movedup_pd(__m128d a)
+{
+#if (__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0));
+#else
+ return vreinterpretq_m128d_u64(
+ vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0)));
+#endif
+}
+
+// Duplicate odd-indexed single-precision (32-bit) floating-point elements
+// from a, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movehdup_ps
+FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a)
+{
+#if __has_builtin(__builtin_shufflevector)
+ return vreinterpretq_m128_f32(__builtin_shufflevector(
+ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3));
+#else
+ float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
+ float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3);
+ float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+#endif
+}
+
+// Duplicate even-indexed single-precision (32-bit) floating-point elements
+// from a, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_moveldup_ps
+FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a)
+{
+#if __has_builtin(__builtin_shufflevector)
+ return vreinterpretq_m128_f32(__builtin_shufflevector(
+ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2));
+#else
+ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2);
+ float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+#endif
+}
+
+// Moves the upper two values of B into the lower two values of A.
+//
+// r3 := a3
+// r2 := a2
+// r1 := b3
+// r0 := b2
+FORCE_INLINE __m128 _mm_movehl_ps(__m128 __A, __m128 __B)
+{
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(__A));
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(__B));
+ return vreinterpretq_m128_f32(vcombine_f32(b32, a32));
+}
+
+// Moves the lower two values of B into the upper two values of A.
+//
+// r3 := b1
+// r2 := b0
+// r1 := a1
+// r0 := a0
+FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(__A));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(__B));
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
+}
+
+// Compute the absolute value of packed signed 32-bit integers in a, and store
+// the unsigned results in dst.
+//
+// FOR j := 0 to 3
+// i := j*32
+// dst[i+31:i] := ABS(a[i+31:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi32
+FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
+{
+ return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a)));
+}
+
+// Compute the absolute value of packed signed 16-bit integers in a, and store
+// the unsigned results in dst.
+//
+// FOR j := 0 to 7
+// i := j*16
+// dst[i+15:i] := ABS(a[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi16
+FORCE_INLINE __m128i _mm_abs_epi16(__m128i a)
+{
+ return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a)));
+}
+
+// Compute the absolute value of packed signed 8-bit integers in a, and store
+// the unsigned results in dst.
+//
+// FOR j := 0 to 15
+// i := j*8
+// dst[i+7:i] := ABS(a[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi8
+FORCE_INLINE __m128i _mm_abs_epi8(__m128i a)
+{
+ return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a)));
+}
+
+// Compute the absolute value of packed signed 32-bit integers in a, and store
+// the unsigned results in dst.
+//
+// FOR j := 0 to 1
+// i := j*32
+// dst[i+31:i] := ABS(a[i+31:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi32
+FORCE_INLINE __m64 _mm_abs_pi32(__m64 a)
+{
+ return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a)));
+}
+
+// Compute the absolute value of packed signed 16-bit integers in a, and store
+// the unsigned results in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// dst[i+15:i] := ABS(a[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi16
+FORCE_INLINE __m64 _mm_abs_pi16(__m64 a)
+{
+ return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a)));
+}
+
+// Compute the absolute value of packed signed 8-bit integers in a, and store
+// the unsigned results in dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// dst[i+7:i] := ABS(a[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi8
+FORCE_INLINE __m64 _mm_abs_pi8(__m64 a)
+{
+ return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a)));
+}
+
+// Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift
+// the result right by imm8 bytes, and store the low 16 bytes in dst.
+//
+// tmp[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8)
+// dst[127:0] := tmp[127:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_epi8
+#define _mm_alignr_epi8(a, b, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely((imm) >= 32)) { \
+ ret = _mm_setzero_si128(); \
+ } else { \
+ uint8x16_t tmp_low, tmp_high; \
+ if (imm >= 16) { \
+ const int idx = imm - 16; \
+ tmp_low = vreinterpretq_u8_m128i(a); \
+ tmp_high = vdupq_n_u8(0); \
+ ret = \
+ vreinterpretq_m128i_u8(vextq_u8(tmp_low, tmp_high, idx)); \
+ } else { \
+ const int idx = imm; \
+ tmp_low = vreinterpretq_u8_m128i(b); \
+ tmp_high = vreinterpretq_u8_m128i(a); \
+ ret = \
+ vreinterpretq_m128i_u8(vextq_u8(tmp_low, tmp_high, idx)); \
+ } \
+ } \
+ ret; \
+ })
+
+// Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift
+// the result right by imm8 bytes, and store the low 8 bytes in dst.
+//
+// tmp[127:0] := ((a[63:0] << 64)[127:0] OR b[63:0]) >> (imm8*8)
+// dst[63:0] := tmp[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_pi8
+#define _mm_alignr_pi8(a, b, imm) \
+ __extension__({ \
+ __m64 ret; \
+ if (unlikely((imm) >= 16)) { \
+ ret = vreinterpret_m64_s8(vdup_n_s8(0)); \
+ } else { \
+ uint8x8_t tmp_low, tmp_high; \
+ if (imm >= 8) { \
+ const int idx = imm - 8; \
+ tmp_low = vreinterpret_u8_m64(a); \
+ tmp_high = vdup_n_u8(0); \
+ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
+ } else { \
+ const int idx = imm; \
+ tmp_low = vreinterpret_u8_m64(b); \
+ tmp_high = vreinterpret_u8_m64(a); \
+ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
+ } \
+ } \
+ ret; \
+ })
+
+// Takes the upper 64 bits of a and places it in the low end of the result
+// Takes the lower 64 bits of b and places it into the high end of the result.
+FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
+{
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
+}
+
+// takes the lower two 32-bit values from a and swaps them and places in high
+// end of result takes the higher two 32 bit values from b and swaps them and
+// places in low end of result.
+FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
+{
+ float32x2_t a21 = vget_high_f32(
+ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
+ float32x2_t b03 = vget_low_f32(
+ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
+ return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
+{
+ float32x2_t a03 = vget_low_f32(
+ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
+ float32x2_t b21 = vget_high_f32(
+ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
+ return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
+}
+
+// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
+// high
+FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
+{
+ float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
+{
+ float32x2_t a22 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
+{
+ float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
+ float32x2_t b22 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
+ return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
+{
+ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ float32x2_t a22 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
+ float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
+{
+ float32x2_t a33 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
+ float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32_t b2 = vgetq_lane_f32(b, 2);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
+{
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32_t b2 = vgetq_lane_f32(b, 2);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
+}
+
+// NEON does not support a general purpose permute intrinsic
+// Selects four specific single-precision, floating-point values from a and b,
+// based on the mask i.
+//
+// C equivalent:
+// __m128 _mm_shuffle_ps_default(__m128 a, __m128 b,
+// __constrange(0, 255) int imm) {
+// __m128 ret;
+// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
+// ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03];
+// return ret;
+// }
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
+#define _mm_shuffle_ps_default(a, b, imm) \
+ __extension__({ \
+ float32x4_t ret; \
+ ret = vmovq_n_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))); \
+ ret = vsetq_lane_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \
+ ret, 1); \
+ ret = vsetq_lane_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \
+ ret, 2); \
+ ret = vsetq_lane_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \
+ ret, 3); \
+ vreinterpretq_m128_f32(ret); \
+ })
+
+// FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255)
+// int imm)
+#if __has_builtin(__builtin_shufflevector)
+#define _mm_shuffle_ps(a, b, imm) \
+ __extension__({ \
+ float32x4_t _input1 = vreinterpretq_f32_m128(a); \
+ float32x4_t _input2 = vreinterpretq_f32_m128(b); \
+ float32x4_t _shuf = __builtin_shufflevector( \
+ _input1, _input2, (imm) & (0x3), ((imm) >> 2) & 0x3, \
+ (((imm) >> 4) & 0x3) + 4, (((imm) >> 6) & 0x3) + 4); \
+ vreinterpretq_m128_f32(_shuf); \
+ })
+#else // generic
+#define _mm_shuffle_ps(a, b, imm) \
+ __extension__({ \
+ __m128 ret; \
+ switch (imm) { \
+ case _MM_SHUFFLE(1, 0, 3, 2): \
+ ret = _mm_shuffle_ps_1032((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(2, 3, 0, 1): \
+ ret = _mm_shuffle_ps_2301((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(0, 3, 2, 1): \
+ ret = _mm_shuffle_ps_0321((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(2, 1, 0, 3): \
+ ret = _mm_shuffle_ps_2103((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 1, 0): \
+ ret = _mm_movelh_ps((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 0, 1): \
+ ret = _mm_shuffle_ps_1001((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(0, 1, 0, 1): \
+ ret = _mm_shuffle_ps_0101((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(3, 2, 1, 0): \
+ ret = _mm_shuffle_ps_3210((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(0, 0, 1, 1): \
+ ret = _mm_shuffle_ps_0011((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(0, 0, 2, 2): \
+ ret = _mm_shuffle_ps_0022((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(2, 2, 0, 0): \
+ ret = _mm_shuffle_ps_2200((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(3, 2, 0, 2): \
+ ret = _mm_shuffle_ps_3202((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(3, 2, 3, 2): \
+ ret = _mm_movehl_ps((b), (a)); \
+ break; \
+ case _MM_SHUFFLE(1, 1, 3, 3): \
+ ret = _mm_shuffle_ps_1133((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(2, 0, 1, 0): \
+ ret = _mm_shuffle_ps_2010((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(2, 0, 0, 1): \
+ ret = _mm_shuffle_ps_2001((a), (b)); \
+ break; \
+ case _MM_SHUFFLE(2, 0, 3, 2): \
+ ret = _mm_shuffle_ps_2032((a), (b)); \
+ break; \
+ default: \
+ ret = _mm_shuffle_ps_default((a), (b), (imm)); \
+ break; \
+ } \
+ ret; \
+ })
+#endif
+
+// Takes the upper 64 bits of a and places it in the low end of the result
+// Takes the lower 64 bits of a and places it into the high end of the result.
+FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
+{
+ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
+}
+
+// takes the lower two 32-bit values from a and swaps them and places in low end
+// of result takes the higher two 32 bit values from a and swaps them and places
+// in high end of result.
+FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
+{
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
+ return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
+}
+
+// rotates the least significant 32 bits into the most signficant 32 bits, and
+// shifts the rest down
+FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
+{
+ return vreinterpretq_m128i_s32(
+ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
+}
+
+// rotates the most significant 32 bits into the least signficant 32 bits, and
+// shifts the rest up
+FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
+{
+ return vreinterpretq_m128i_s32(
+ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
+}
+
+// gets the lower 64 bits of a, and places it in the upper 64 bits
+// gets the lower 64 bits of a and places it in the lower 64 bits
+FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
+{
+ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
+}
+
+// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the
+// lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits
+FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
+{
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
+}
+
+// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the
+// upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and
+// places it in the lower 64 bits
+FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
+{
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
+}
+
+FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
+{
+ int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
+ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
+ return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
+}
+
+FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
+{
+ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
+}
+
+FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
+{
+ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
+ return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
+}
+
+// Shuffle packed 8-bit integers in a according to shuffle control mask in the
+// corresponding 8-bit element of b, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_epi8
+FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
+{
+ int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a
+ uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b
+ uint8x16_t idx_masked =
+ vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked));
+#elif defined(__GNUC__)
+ int8x16_t ret;
+ // %e and %f represent the even and odd D registers
+ // respectively.
+ __asm__ __volatile__(
+ "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n"
+ "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n"
+ : [ret] "=&w"(ret)
+ : [tbl] "w"(tbl), [idx] "w"(idx_masked));
+ return vreinterpretq_m128i_s8(ret);
+#else
+ // use this line if testing on aarch64
+ int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)};
+ return vreinterpretq_m128i_s8(
+ vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)),
+ vtbl2_s8(a_split, vget_high_u8(idx_masked))));
+#endif
+}
+
+// C equivalent:
+// __m128i _mm_shuffle_epi32_default(__m128i a,
+// __constrange(0, 255) int imm) {
+// __m128i ret;
+// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
+// ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03];
+// return ret;
+// }
+#define _mm_shuffle_epi32_default(a, imm) \
+ __extension__({ \
+ int32x4_t ret; \
+ ret = vmovq_n_s32( \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & (0x3))); \
+ ret = vsetq_lane_s32( \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), \
+ ret, 1); \
+ ret = vsetq_lane_s32( \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \
+ ret, 2); \
+ ret = vsetq_lane_s32( \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \
+ ret, 3); \
+ vreinterpretq_m128i_s32(ret); \
+ })
+
+// FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255)
+// int imm)
+#if defined(__aarch64__)
+#define _mm_shuffle_epi32_splat(a, imm) \
+ __extension__({ \
+ vreinterpretq_m128i_s32( \
+ vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \
+ })
+#else
+#define _mm_shuffle_epi32_splat(a, imm) \
+ __extension__({ \
+ vreinterpretq_m128i_s32( \
+ vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \
+ })
+#endif
+
+// Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm.
+// https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
+// FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a,
+// __constrange(0,255) int imm)
+#if __has_builtin(__builtin_shufflevector)
+#define _mm_shuffle_epi32(a, imm) \
+ __extension__({ \
+ int32x4_t _input = vreinterpretq_s32_m128i(a); \
+ int32x4_t _shuf = __builtin_shufflevector( \
+ _input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \
+ ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \
+ vreinterpretq_m128i_s32(_shuf); \
+ })
+#else // generic
+#define _mm_shuffle_epi32(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ switch (imm) { \
+ case _MM_SHUFFLE(1, 0, 3, 2): \
+ ret = _mm_shuffle_epi_1032((a)); \
+ break; \
+ case _MM_SHUFFLE(2, 3, 0, 1): \
+ ret = _mm_shuffle_epi_2301((a)); \
+ break; \
+ case _MM_SHUFFLE(0, 3, 2, 1): \
+ ret = _mm_shuffle_epi_0321((a)); \
+ break; \
+ case _MM_SHUFFLE(2, 1, 0, 3): \
+ ret = _mm_shuffle_epi_2103((a)); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 1, 0): \
+ ret = _mm_shuffle_epi_1010((a)); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 0, 1): \
+ ret = _mm_shuffle_epi_1001((a)); \
+ break; \
+ case _MM_SHUFFLE(0, 1, 0, 1): \
+ ret = _mm_shuffle_epi_0101((a)); \
+ break; \
+ case _MM_SHUFFLE(2, 2, 1, 1): \
+ ret = _mm_shuffle_epi_2211((a)); \
+ break; \
+ case _MM_SHUFFLE(0, 1, 2, 2): \
+ ret = _mm_shuffle_epi_0122((a)); \
+ break; \
+ case _MM_SHUFFLE(3, 3, 3, 2): \
+ ret = _mm_shuffle_epi_3332((a)); \
+ break; \
+ case _MM_SHUFFLE(0, 0, 0, 0): \
+ ret = _mm_shuffle_epi32_splat((a), 0); \
+ break; \
+ case _MM_SHUFFLE(1, 1, 1, 1): \
+ ret = _mm_shuffle_epi32_splat((a), 1); \
+ break; \
+ case _MM_SHUFFLE(2, 2, 2, 2): \
+ ret = _mm_shuffle_epi32_splat((a), 2); \
+ break; \
+ case _MM_SHUFFLE(3, 3, 3, 3): \
+ ret = _mm_shuffle_epi32_splat((a), 3); \
+ break; \
+ default: \
+ ret = _mm_shuffle_epi32_default((a), (imm)); \
+ break; \
+ } \
+ ret; \
+ })
+#endif
+
+// Shuffles the lower 4 signed or unsigned 16-bit integers in a as specified
+// by imm.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/y41dkk37(v=vs.100)
+// FORCE_INLINE __m128i _mm_shufflelo_epi16_function(__m128i a,
+// __constrange(0,255) int
+// imm)
+#define _mm_shufflelo_epi16_function(a, imm) \
+ __extension__({ \
+ int16x8_t ret = vreinterpretq_s16_m128i(a); \
+ int16x4_t lowBits = vget_low_s16(ret); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \
+ 1); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \
+ 2); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \
+ 3); \
+ vreinterpretq_m128i_s16(ret); \
+ })
+
+// FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a,
+// __constrange(0,255) int imm)
+#if __has_builtin(__builtin_shufflevector)
+#define _mm_shufflelo_epi16(a, imm) \
+ __extension__({ \
+ int16x8_t _input = vreinterpretq_s16_m128i(a); \
+ int16x8_t _shuf = __builtin_shufflevector( \
+ _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \
+ (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \
+ vreinterpretq_m128i_s16(_shuf); \
+ })
+#else // generic
+#define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm))
+#endif
+
+// Shuffles the upper 4 signed or unsigned 16-bit integers in a as specified
+// by imm.
+// https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
+// FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a,
+// __constrange(0,255) int
+// imm)
+#define _mm_shufflehi_epi16_function(a, imm) \
+ __extension__({ \
+ int16x8_t ret = vreinterpretq_s16_m128i(a); \
+ int16x4_t highBits = vget_high_s16(ret); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \
+ 5); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \
+ 6); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \
+ 7); \
+ vreinterpretq_m128i_s16(ret); \
+ })
+
+// FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a,
+// __constrange(0,255) int imm)
+#if __has_builtin(__builtin_shufflevector)
+#define _mm_shufflehi_epi16(a, imm) \
+ __extension__({ \
+ int16x8_t _input = vreinterpretq_s16_m128i(a); \
+ int16x8_t _shuf = __builtin_shufflevector( \
+ _input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \
+ (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \
+ (((imm) >> 6) & 0x3) + 4); \
+ vreinterpretq_m128i_s16(_shuf); \
+ })
+#else // generic
+#define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm))
+#endif
+
+// Shuffle double-precision (64-bit) floating-point elements using the control
+// in imm8, and store the results in dst.
+//
+// dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+// dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_pd
+#if __has_builtin(__builtin_shufflevector)
+#define _mm_shuffle_pd(a, b, imm8) \
+ vreinterpretq_m128d_s64(__builtin_shufflevector( \
+ vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), imm8 & 0x1, \
+ ((imm8 & 0x2) >> 1) + 2))
+#else
+#define _mm_shuffle_pd(a, b, imm8) \
+ _mm_castsi128_pd(_mm_set_epi64x( \
+ vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \
+ vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1)))
+#endif
+
+// Blend packed 16-bit integers from a and b using control mask imm8, and store
+// the results in dst.
+//
+// FOR j := 0 to 7
+// i := j*16
+// IF imm8[j]
+// dst[i+15:i] := b[i+15:i]
+// ELSE
+// dst[i+15:i] := a[i+15:i]
+// FI
+// ENDFOR
+// FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b,
+// __constrange(0,255) int imm)
+#define _mm_blend_epi16(a, b, imm) \
+ __extension__({ \
+ const uint16_t _mask[8] = {((imm) & (1 << 0)) ? 0xFFFF : 0x0000, \
+ ((imm) & (1 << 1)) ? 0xFFFF : 0x0000, \
+ ((imm) & (1 << 2)) ? 0xFFFF : 0x0000, \
+ ((imm) & (1 << 3)) ? 0xFFFF : 0x0000, \
+ ((imm) & (1 << 4)) ? 0xFFFF : 0x0000, \
+ ((imm) & (1 << 5)) ? 0xFFFF : 0x0000, \
+ ((imm) & (1 << 6)) ? 0xFFFF : 0x0000, \
+ ((imm) & (1 << 7)) ? 0xFFFF : 0x0000}; \
+ uint16x8_t _mask_vec = vld1q_u16(_mask); \
+ uint16x8_t _a = vreinterpretq_u16_m128i(a); \
+ uint16x8_t _b = vreinterpretq_u16_m128i(b); \
+ vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, _b, _a)); \
+ })
+
+// Blend packed 8-bit integers from a and b using mask, and store the results in
+// dst.
+//
+// FOR j := 0 to 15
+// i := j*8
+// IF mask[i+7]
+// dst[i+7:i] := b[i+7:i]
+// ELSE
+// dst[i+7:i] := a[i+7:i]
+// FI
+// ENDFOR
+FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask)
+{
+ // Use a signed shift right to create a mask with the sign bit
+ uint8x16_t mask =
+ vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7));
+ uint8x16_t a = vreinterpretq_u8_m128i(_a);
+ uint8x16_t b = vreinterpretq_u8_m128i(_b);
+ return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a));
+}
+
+/* Shifts */
+
+
+// Shift packed 16-bit integers in a right by imm while shifting in sign
+// bits, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi16
+FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm)
+{
+ const int count = (imm & ~15) ? 15 : imm;
+ return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count));
+}
+
+// Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while
+// shifting in zeros.
+//
+// r0 := a0 << count
+// r1 := a1 << count
+// ...
+// r7 := a7 << count
+//
+// https://msdn.microsoft.com/en-us/library/es73bcsy(v=vs.90).aspx
+#define _mm_slli_epi16(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely((imm)) <= 0) { \
+ ret = a; \
+ } \
+ if (unlikely((imm) > 15)) { \
+ ret = _mm_setzero_si128(); \
+ } else { \
+ ret = vreinterpretq_m128i_s16( \
+ vshlq_n_s16(vreinterpretq_s16_m128i(a), (imm))); \
+ } \
+ ret; \
+ })
+
+// Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while
+// shifting in zeros. :
+// https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx
+// FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, __constrange(0,255) int imm)
+FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm)
+{
+ if (unlikely(imm <= 0)) /* TODO: add constant range macro: [0, 255] */
+ return a;
+ if (unlikely(imm > 31))
+ return _mm_setzero_si128();
+ return vreinterpretq_m128i_s32(
+ vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm)));
+}
+
+// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and
+// store the results in dst.
+FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm)
+{
+ if (unlikely(imm <= 0)) /* TODO: add constant range macro: [0, 255] */
+ return a;
+ if (unlikely(imm > 63))
+ return _mm_setzero_si128();
+ return vreinterpretq_m128i_s64(
+ vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm)));
+}
+
+// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and
+// store the results in dst.
+//
+// FOR j := 0 to 7
+// i := j*16
+// IF imm8[7:0] > 15
+// dst[i+15:i] := 0
+// ELSE
+// dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0])
+// FI
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi16
+#define _mm_srli_epi16(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely(imm) == 0) { \
+ ret = a; \
+ } \
+ if (likely(0 < (imm) && (imm) < 16)) { \
+ ret = vreinterpretq_m128i_u16( \
+ vshlq_u16(vreinterpretq_u16_m128i(a), vdupq_n_s16(-imm))); \
+ } else { \
+ ret = _mm_setzero_si128(); \
+ } \
+ ret; \
+ })
+
+// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and
+// store the results in dst.
+//
+// FOR j := 0 to 3
+// i := j*32
+// IF imm8[7:0] > 31
+// dst[i+31:i] := 0
+// ELSE
+// dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0])
+// FI
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi32
+// FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
+#define _mm_srli_epi32(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely((imm) == 0)) { \
+ ret = a; \
+ } \
+ if (likely(0 < (imm) && (imm) < 32)) { \
+ ret = vreinterpretq_m128i_u32( \
+ vshlq_u32(vreinterpretq_u32_m128i(a), vdupq_n_s32(-imm))); \
+ } else { \
+ ret = _mm_setzero_si128(); \
+ } \
+ ret; \
+ })
+
+// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and
+// store the results in dst.
+//
+// FOR j := 0 to 1
+// i := j*64
+// IF imm8[7:0] > 63
+// dst[i+63:i] := 0
+// ELSE
+// dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0])
+// FI
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi64
+#define _mm_srli_epi64(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely((imm) == 0)) { \
+ ret = a; \
+ } \
+ if (likely(0 < (imm) && (imm) < 64)) { \
+ ret = vreinterpretq_m128i_u64( \
+ vshlq_u64(vreinterpretq_u64_m128i(a), vdupq_n_s64(-imm))); \
+ } else { \
+ ret = _mm_setzero_si128(); \
+ } \
+ ret; \
+ })
+
+// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits,
+// and store the results in dst.
+//
+// FOR j := 0 to 3
+// i := j*32
+// IF imm8[7:0] > 31
+// dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+// ELSE
+// dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0])
+// FI
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi32
+// FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
+#define _mm_srai_epi32(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely((imm) == 0)) { \
+ ret = a; \
+ } \
+ if (likely(0 < (imm) && (imm) < 32)) { \
+ ret = vreinterpretq_m128i_s32( \
+ vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(-imm))); \
+ } else { \
+ ret = vreinterpretq_m128i_s32( \
+ vshrq_n_s32(vreinterpretq_s32_m128i(a), 31)); \
+ } \
+ ret; \
+ })
+
+// Shifts the 128 - bit value in a right by imm bytes while shifting in
+// zeros.imm must be an immediate.
+//
+// r := srl(a, imm*8)
+//
+// https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx
+// FORCE_INLINE _mm_srli_si128(__m128i a, __constrange(0,255) int imm)
+#define _mm_srli_si128(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely((imm) <= 0)) { \
+ ret = a; \
+ } \
+ if (unlikely((imm) > 15)) { \
+ ret = _mm_setzero_si128(); \
+ } else { \
+ ret = vreinterpretq_m128i_s8( \
+ vextq_s8(vreinterpretq_s8_m128i(a), vdupq_n_s8(0), (imm))); \
+ } \
+ ret; \
+ })
+
+// Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm
+// must be an immediate.
+//
+// r := a << (imm * 8)
+//
+// https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx
+// FORCE_INLINE __m128i _mm_slli_si128(__m128i a, __constrange(0,255) int imm)
+#define _mm_slli_si128(a, imm) \
+ __extension__({ \
+ __m128i ret; \
+ if (unlikely((imm) <= 0)) { \
+ ret = a; \
+ } \
+ if (unlikely((imm) > 15)) { \
+ ret = _mm_setzero_si128(); \
+ } else { \
+ ret = vreinterpretq_m128i_s8(vextq_s8( \
+ vdupq_n_s8(0), vreinterpretq_s8_m128i(a), 16 - (imm))); \
+ } \
+ ret; \
+ })
+
+// Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while
+// shifting in zeros.
+//
+// r0 := a0 << count
+// r1 := a1 << count
+// ...
+// r7 := a7 << count
+//
+// https://msdn.microsoft.com/en-us/library/c79w388h(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (unlikely(c > 15))
+ return _mm_setzero_si128();
+
+ int16x8_t vc = vdupq_n_s16((int16_t) c);
+ return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc));
+}
+
+// Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while
+// shifting in zeros.
+//
+// r0 := a0 << count
+// r1 := a1 << count
+// r2 := a2 << count
+// r3 := a3 << count
+//
+// https://msdn.microsoft.com/en-us/library/6fe5a6s9(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (unlikely(c > 31))
+ return _mm_setzero_si128();
+
+ int32x4_t vc = vdupq_n_s32((int32_t) c);
+ return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc));
+}
+
+// Shifts the 2 signed or unsigned 64-bit integers in a left by count bits while
+// shifting in zeros.
+//
+// r0 := a0 << count
+// r1 := a1 << count
+//
+// https://msdn.microsoft.com/en-us/library/6ta9dffd(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (unlikely(c > 63))
+ return _mm_setzero_si128();
+
+ int64x2_t vc = vdupq_n_s64((int64_t) c);
+ return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc));
+}
+
+// Shifts the 8 signed or unsigned 16-bit integers in a right by count bits
+// while shifting in zeros.
+//
+// r0 := srl(a0, count)
+// r1 := srl(a1, count)
+// ...
+// r7 := srl(a7, count)
+//
+// https://msdn.microsoft.com/en-us/library/wd5ax830(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (unlikely(c > 15))
+ return _mm_setzero_si128();
+
+ int16x8_t vc = vdupq_n_s16(-(int16_t) c);
+ return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc));
+}
+
+// Shifts the 4 signed or unsigned 32-bit integers in a right by count bits
+// while shifting in zeros.
+//
+// r0 := srl(a0, count)
+// r1 := srl(a1, count)
+// r2 := srl(a2, count)
+// r3 := srl(a3, count)
+//
+// https://msdn.microsoft.com/en-us/library/a9cbttf4(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (unlikely(c > 31))
+ return _mm_setzero_si128();
+
+ int32x4_t vc = vdupq_n_s32(-(int32_t) c);
+ return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc));
+}
+
+// Shifts the 2 signed or unsigned 64-bit integers in a right by count bits
+// while shifting in zeros.
+//
+// r0 := srl(a0, count)
+// r1 := srl(a1, count)
+//
+// https://msdn.microsoft.com/en-us/library/yf6cf9k8(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (unlikely(c > 63))
+ return _mm_setzero_si128();
+
+ int64x2_t vc = vdupq_n_s64(-(int64_t) c);
+ return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc));
+}
+
+// NEON does not provide a version of this function.
+// Creates a 16-bit mask from the most significant bits of the 16 signed or
+// unsigned 8-bit integers in a and zero extends the upper bits.
+// https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
+FORCE_INLINE int _mm_movemask_epi8(__m128i a)
+{
+ // Use increasingly wide shifts+adds to collect the sign bits
+ // together.
+ // Since the widening shifts would be rather confusing to follow in little
+ // endian, everything will be illustrated in big endian order instead. This
+ // has a different result - the bits would actually be reversed on a big
+ // endian machine.
+
+ // Starting input (only half the elements are shown):
+ // 89 ff 1d c0 00 10 99 33
+ uint8x16_t input = vreinterpretq_u8_m128i(a);
+
+ // Shift out everything but the sign bits with an unsigned shift right.
+ //
+ // Bytes of the vector::
+ // 89 ff 1d c0 00 10 99 33
+ // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
+ // | | | | | | | |
+ // 01 01 00 01 00 00 01 00
+ //
+ // Bits of first important lane(s):
+ // 10001001 (89)
+ // \______
+ // |
+ // 00000001 (01)
+ uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
+
+ // Merge the even lanes together with a 16-bit unsigned shift right + add.
+ // 'xx' represents garbage data which will be ignored in the final result.
+ // In the important bytes, the add functions like a binary OR.
+ //
+ // 01 01 00 01 00 00 01 00
+ // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
+ // \| \| \| \|
+ // xx 03 xx 01 xx 00 xx 02
+ //
+ // 00000001 00000001 (01 01)
+ // \_______ |
+ // \|
+ // xxxxxxxx xxxxxx11 (xx 03)
+ uint32x4_t paired16 =
+ vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
+
+ // Repeat with a wider 32-bit shift + add.
+ // xx 03 xx 01 xx 00 xx 02
+ // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >>
+ // 14))
+ // \| \|
+ // xx xx xx 0d xx xx xx 02
+ //
+ // 00000011 00000001 (03 01)
+ // \\_____ ||
+ // '----.\||
+ // xxxxxxxx xxxx1101 (xx 0d)
+ uint64x2_t paired32 =
+ vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
+
+ // Last, an even wider 64-bit shift + add to get our result in the low 8 bit
+ // lanes. xx xx xx 0d xx xx xx 02
+ // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >>
+ // 28))
+ // \|
+ // xx xx xx xx xx xx xx d2
+ //
+ // 00001101 00000010 (0d 02)
+ // \ \___ | |
+ // '---. \| |
+ // xxxxxxxx 11010010 (xx d2)
+ uint8x16_t paired64 =
+ vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
+
+ // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
+ // xx xx xx xx xx xx xx d2
+ // || return paired64[0]
+ // d2
+ // Note: Little endian would return the correct value 4b (01001011) instead.
+ return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8);
+}
+
+// Copy the lower 64-bit integer in a to dst.
+//
+// dst[63:0] := a[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movepi64_pi64
+FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a)
+{
+ return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a)));
+}
+
+// Copy the 64-bit integer a to the lower element of dst, and zero the upper
+// element.
+//
+// dst[63:0] := a[63:0]
+// dst[127:64] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movpi64_epi64
+FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a)
+{
+ return vreinterpretq_m128i_s64(
+ vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0)));
+}
+
+// NEON does not provide this method
+// Creates a 4-bit mask from the most significant bits of the four
+// single-precision, floating-point values.
+// https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
+FORCE_INLINE int _mm_movemask_ps(__m128 a)
+{
+ uint32x4_t input = vreinterpretq_u32_m128(a);
+#if defined(__aarch64__)
+ static const int32x4_t shift = {0, 1, 2, 3};
+ uint32x4_t tmp = vshrq_n_u32(input, 31);
+ return vaddvq_u32(vshlq_u32(tmp, shift));
+#else
+ // Uses the exact same method as _mm_movemask_epi8, see that for details.
+ // Shift out everything but the sign bits with a 32-bit unsigned shift
+ // right.
+ uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31));
+ // Merge the two pairs together with a 64-bit unsigned shift right + add.
+ uint8x16_t paired =
+ vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
+ // Extract the result.
+ return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
+#endif
+}
+
+// Compute the bitwise NOT of a and then AND with a 128-bit vector containing
+// all 1's, and return 1 if the result is zero, otherwise return 0.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_ones
+FORCE_INLINE int _mm_test_all_ones(__m128i a)
+{
+ return (uint64_t)(vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) ==
+ ~(uint64_t) 0;
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and
+// mask, and return 1 if the result is zero, otherwise return 0.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_zeros
+FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask)
+{
+ int64x2_t a_and_mask =
+ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask));
+ return (vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1)) ? 0
+ : 1;
+}
+
+/* Math operations */
+
+// Subtracts the four single-precision, floating-point values of a and b.
+//
+// r0 := a0 - b0
+// r1 := a1 - b1
+// r2 := a2 - b2
+// r3 := a3 - b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Subtract the lower single-precision (32-bit) floating-point element in b from
+// the lower single-precision (32-bit) floating-point element in a, store the
+// result in the lower element of dst, and copy the upper 3 packed elements from
+// a to the upper elements of dst.
+//
+// dst[31:0] := a[31:0] - b[31:0]
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ss
+FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_sub_ps(a, b));
+}
+
+// Subtract 2 packed 64-bit integers in b from 2 packed 64-bit integers in a,
+// and store the results in dst.
+// r0 := a0 - b0
+// r1 := a1 - b1
+FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s64(
+ vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+}
+
+// Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or
+// unsigned 32-bit integers of a.
+//
+// r0 := a0 - b0
+// r1 := a1 - b1
+// r2 := a2 - b2
+// r3 := a3 - b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and
+// store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi16
+FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and
+// store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi8
+FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Subtract 64-bit integer b from 64-bit integer a, and store the result in dst.
+//
+// dst[63:0] := a[63:0] - b[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_si64
+FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s64(
+ vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
+}
+
+// Subtracts the 8 unsigned 16-bit integers of bfrom the 8 unsigned 16-bit
+// integers of a and saturates..
+// https://technet.microsoft.com/en-us/subscriptions/index/f44y0s19(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Subtracts the 16 unsigned 8-bit integers of b from the 16 unsigned 8-bit
+// integers of a and saturates.
+//
+// r0 := UnsignedSaturate(a0 - b0)
+// r1 := UnsignedSaturate(a1 - b1)
+// ...
+// r15 := UnsignedSaturate(a15 - b15)
+//
+// https://technet.microsoft.com/en-us/subscriptions/yadkxc18(v=vs.90)
+FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Subtracts the 16 signed 8-bit integers of b from the 16 signed 8-bit integers
+// of a and saturates.
+//
+// r0 := SignedSaturate(a0 - b0)
+// r1 := SignedSaturate(a1 - b1)
+// ...
+// r15 := SignedSaturate(a15 - b15)
+//
+// https://technet.microsoft.com/en-us/subscriptions/by7kzks1(v=vs.90)
+FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Subtracts the 8 signed 16-bit integers of b from the 8 signed 16-bit integers
+// of a and saturates.
+//
+// r0 := SignedSaturate(a0 - b0)
+// r1 := SignedSaturate(a1 - b1)
+// ...
+// r7 := SignedSaturate(a7 - b7)
+//
+// https://technet.microsoft.com/en-us/subscriptions/3247z5b8(v=vs.90)
+FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Subtract packed double-precision (64-bit) floating-point elements in b from
+// packed double-precision (64-bit) floating-point elements in a, and store the
+// results in dst.
+//
+// FOR j := 0 to 1
+// i := j*64
+// dst[i+63:i] := a[i+63:i] - b[i+63:i]
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sub_pd
+FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] - db[0];
+ c[1] = da[1] - db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Subtract the lower double-precision (64-bit) floating-point element in b from
+// the lower double-precision (64-bit) floating-point element in a, store the
+// result in the lower element of dst, and copy the upper element from a to the
+// upper element of dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_sd
+FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_sub_pd(a, b));
+}
+
+// Add packed unsigned 16-bit integers in a and b using saturation, and store
+// the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epu16
+FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Negate packed 8-bit integers in a when the corresponding signed
+// 8-bit integer in b is negative, and store the results in dst.
+// Element in dst are zeroed out when the corresponding element
+// in b is zero.
+//
+// for i in 0..15
+// if b[i] < 0
+// r[i] := -a[i]
+// else if b[i] == 0
+// r[i] := 0
+// else
+// r[i] := a[i]
+// fi
+// done
+FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b)
+{
+ int8x16_t a = vreinterpretq_s8_m128i(_a);
+ int8x16_t b = vreinterpretq_s8_m128i(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFF : 0
+ uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7));
+
+ // (b == 0) ? 0xFF : 0
+#if defined(__aarch64__)
+ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b));
+#else
+ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0)));
+#endif
+
+ // bitwise select either a or nagative 'a' (vnegq_s8(a) return nagative 'a')
+ // based on ltMask
+ int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a);
+ // res = masked & (~zeroMask)
+ int8x16_t res = vbicq_s8(masked, zeroMask);
+
+ return vreinterpretq_m128i_s8(res);
+}
+
+// Negate packed 16-bit integers in a when the corresponding signed
+// 16-bit integer in b is negative, and store the results in dst.
+// Element in dst are zeroed out when the corresponding element
+// in b is zero.
+//
+// for i in 0..7
+// if b[i] < 0
+// r[i] := -a[i]
+// else if b[i] == 0
+// r[i] := 0
+// else
+// r[i] := a[i]
+// fi
+// done
+FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b)
+{
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFF : 0
+ uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15));
+ // (b == 0) ? 0xFFFF : 0
+#if defined(__aarch64__)
+ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b));
+#else
+ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative
+ // 'a') based on ltMask
+ int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a);
+ // res = masked & (~zeroMask)
+ int16x8_t res = vbicq_s16(masked, zeroMask);
+ return vreinterpretq_m128i_s16(res);
+}
+
+// Negate packed 32-bit integers in a when the corresponding signed
+// 32-bit integer in b is negative, and store the results in dst.
+// Element in dst are zeroed out when the corresponding element
+// in b is zero.
+//
+// for i in 0..3
+// if b[i] < 0
+// r[i] := -a[i]
+// else if b[i] == 0
+// r[i] := 0
+// else
+// r[i] := a[i]
+// fi
+// done
+FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b)
+{
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFFFFFF : 0
+ uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31));
+
+ // (b == 0) ? 0xFFFFFFFF : 0
+#if defined(__aarch64__)
+ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b));
+#else
+ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative
+ // 'a') based on ltMask
+ int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a);
+ // res = masked & (~zeroMask)
+ int32x4_t res = vbicq_s32(masked, zeroMask);
+ return vreinterpretq_m128i_s32(res);
+}
+
+// Negate packed 16-bit integers in a when the corresponding signed 16-bit
+// integer in b is negative, and store the results in dst. Element in dst are
+// zeroed out when the corresponding element in b is zero.
+//
+// FOR j := 0 to 3
+// i := j*16
+// IF b[i+15:i] < 0
+// dst[i+15:i] := -(a[i+15:i])
+// ELSE IF b[i+15:i] == 0
+// dst[i+15:i] := 0
+// ELSE
+// dst[i+15:i] := a[i+15:i]
+// FI
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi16
+FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b)
+{
+ int16x4_t a = vreinterpret_s16_m64(_a);
+ int16x4_t b = vreinterpret_s16_m64(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFF : 0
+ uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15));
+
+ // (b == 0) ? 0xFFFF : 0
+#if defined(__aarch64__)
+ int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b));
+#else
+ int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0)));
+#endif
+
+ // bitwise select either a or nagative 'a' (vneg_s16(a) return nagative 'a')
+ // based on ltMask
+ int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a);
+ // res = masked & (~zeroMask)
+ int16x4_t res = vbic_s16(masked, zeroMask);
+
+ return vreinterpret_m64_s16(res);
+}
+
+// Negate packed 32-bit integers in a when the corresponding signed 32-bit
+// integer in b is negative, and store the results in dst. Element in dst are
+// zeroed out when the corresponding element in b is zero.
+//
+// FOR j := 0 to 1
+// i := j*32
+// IF b[i+31:i] < 0
+// dst[i+31:i] := -(a[i+31:i])
+// ELSE IF b[i+31:i] == 0
+// dst[i+31:i] := 0
+// ELSE
+// dst[i+31:i] := a[i+31:i]
+// FI
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi32
+FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b)
+{
+ int32x2_t a = vreinterpret_s32_m64(_a);
+ int32x2_t b = vreinterpret_s32_m64(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFFFFFF : 0
+ uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31));
+
+ // (b == 0) ? 0xFFFFFFFF : 0
+#if defined(__aarch64__)
+ int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b));
+#else
+ int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0)));
+#endif
+
+ // bitwise select either a or nagative 'a' (vneg_s32(a) return nagative 'a')
+ // based on ltMask
+ int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a);
+ // res = masked & (~zeroMask)
+ int32x2_t res = vbic_s32(masked, zeroMask);
+
+ return vreinterpret_m64_s32(res);
+}
+
+// Negate packed 8-bit integers in a when the corresponding signed 8-bit integer
+// in b is negative, and store the results in dst. Element in dst are zeroed out
+// when the corresponding element in b is zero.
+//
+// FOR j := 0 to 7
+// i := j*8
+// IF b[i+7:i] < 0
+// dst[i+7:i] := -(a[i+7:i])
+// ELSE IF b[i+7:i] == 0
+// dst[i+7:i] := 0
+// ELSE
+// dst[i+7:i] := a[i+7:i]
+// FI
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi8
+FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b)
+{
+ int8x8_t a = vreinterpret_s8_m64(_a);
+ int8x8_t b = vreinterpret_s8_m64(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFF : 0
+ uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7));
+
+ // (b == 0) ? 0xFF : 0
+#if defined(__aarch64__)
+ int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b));
+#else
+ int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0)));
+#endif
+
+ // bitwise select either a or nagative 'a' (vneg_s8(a) return nagative 'a')
+ // based on ltMask
+ int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a);
+ // res = masked & (~zeroMask)
+ int8x8_t res = vbic_s8(masked, zeroMask);
+
+ return vreinterpret_m64_s8(res);
+}
+
+// Average packed unsigned 16-bit integers in a and b, and store the results in
+// dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu16
+FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u16(
+ vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)));
+}
+
+// Average packed unsigned 8-bit integers in a and b, and store the results in
+// dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu8
+FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u8(
+ vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+}
+
+// Average packed unsigned 8-bit integers in a and b, and store the results in
+// dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgb
+#define _m_pavgb(a, b) _mm_avg_pu8(a, b)
+
+// Average packed unsigned 16-bit integers in a and b, and store the results in
+// dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgw
+#define _m_pavgw(a, b) _mm_avg_pu16(a, b)
+
+// Extract a 16-bit integer from a, selected with imm8, and store the result in
+// the lower element of dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pextrw
+#define _m_pextrw(a, imm) _mm_extract_pi16(a, imm)
+
+// Copy a to dst, and insert the 16-bit integer i into dst at the location
+// specified by imm8.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=m_pinsrw
+#define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm)
+
+// Compare packed signed 16-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxsw
+#define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxub
+#define _m_pmaxub(a, b) _mm_max_pu8(a, b)
+
+// Compare packed signed 16-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminsw
+#define _m_pminsw(a, b) _mm_min_pi16(a, b)
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminub
+#define _m_pminub(a, b) _mm_min_pu8(a, b)
+
+// Computes the average of the 16 unsigned 8-bit integers in a and the 16
+// unsigned 8-bit integers in b and rounds.
+//
+// r0 := (a0 + b0) / 2
+// r1 := (a1 + b1) / 2
+// ...
+// r15 := (a15 + b15) / 2
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/8zwh554a(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Computes the average of the 8 unsigned 16-bit integers in a and the 8
+// unsigned 16-bit integers in b and rounds.
+//
+// r0 := (a0 + b0) / 2
+// r1 := (a1 + b1) / 2
+// ...
+// r7 := (a7 + b7) / 2
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/y13ca3c8(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b)
+{
+ return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a),
+ vreinterpretq_u16_m128i(b));
+}
+
+// Adds the four single-precision, floating-point values of a and b.
+//
+// r0 := a0 + b0
+// r1 := a1 + b1
+// r2 := a2 + b2
+// r3 := a3 + b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Add packed double-precision (64-bit) floating-point elements in a and b, and
+// store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_pd
+FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] + db[0];
+ c[1] = da[1] + db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Add the lower double-precision (64-bit) floating-point element in a and b,
+// store the result in the lower element of dst, and copy the upper element from
+// a to the upper element of dst.
+//
+// dst[63:0] := a[63:0] + b[63:0]
+// dst[127:64] := a[127:64]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_sd
+FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return _mm_move_sd(a, _mm_add_pd(a, b));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] + db[0];
+ c[1] = da[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Add 64-bit integers a and b, and store the result in dst.
+//
+// dst[63:0] := a[63:0] + b[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_si64
+FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s64(
+ vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
+}
+
+// adds the scalar single-precision floating point values of a and b.
+// https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
+{
+ float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
+ float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
+ // the upper values in the result must be the remnants of <a>.
+ return vreinterpretq_m128_f32(vaddq_f32(a, value));
+}
+
+// Adds the 4 signed or unsigned 64-bit integers in a to the 4 signed or
+// unsigned 32-bit integers in b.
+// https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s64(
+ vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+}
+
+// Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or
+// unsigned 32-bit integers in b.
+//
+// r0 := a0 + b0
+// r1 := a1 + b1
+// r2 := a2 + b2
+// r3 := a3 + b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or
+// unsigned 16-bit integers in b.
+// https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Adds the 16 signed or unsigned 8-bit integers in a to the 16 signed or
+// unsigned 8-bit integers in b.
+// https://technet.microsoft.com/en-us/subscriptions/yc7tcyzs(v=vs.90)
+FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Adds the 8 signed 16-bit integers in a to the 8 signed 16-bit integers in b
+// and saturates.
+//
+// r0 := SignedSaturate(a0 + b0)
+// r1 := SignedSaturate(a1 + b1)
+// ...
+// r7 := SignedSaturate(a7 + b7)
+//
+// https://msdn.microsoft.com/en-us/library/1a306ef8(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Add packed signed 8-bit integers in a and b using saturation, and store the
+// results in dst.
+//
+// FOR j := 0 to 15
+// i := j*8
+// dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epi8
+FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Adds the 16 unsigned 8-bit integers in a to the 16 unsigned 8-bit integers in
+// b and saturates..
+// https://msdn.microsoft.com/en-us/library/9hahyddy(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or
+// unsigned 16-bit integers from b.
+//
+// r0 := (a0 * b0)[15:0]
+// r1 := (a1 * b1)[15:0]
+// ...
+// r7 := (a7 * b7)[15:0]
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or
+// unsigned 32-bit integers from b.
+// https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Multiply the packed unsigned 16-bit integers in a and b, producing
+// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+// integers in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// tmp[31:0] := a[i+15:i] * b[i+15:i]
+// dst[i+15:i] := tmp[31:16]
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmulhuw
+#define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b)
+
+// Multiplies the four single-precision, floating-point values of a and b.
+//
+// r0 := a0 * b0
+// r1 := a1 * b1
+// r2 := a2 * b2
+// r3 := a3 * b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Multiply packed double-precision (64-bit) floating-point elements in a and b,
+// and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pd
+FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] * db[0];
+ c[1] = da[1] * db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Multiply the lower double-precision (64-bit) floating-point element in a and
+// b, store the result in the lower element of dst, and copy the upper element
+// from a to the upper element of dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mul_sd
+FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_mul_pd(a, b));
+}
+
+// Multiply the lower single-precision (32-bit) floating-point element in a and
+// b, store the result in the lower element of dst, and copy the upper 3 packed
+// elements from a to the upper elements of dst.
+//
+// dst[31:0] := a[31:0] * b[31:0]
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ss
+FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_mul_ps(a, b));
+}
+
+// Multiply the low unsigned 32-bit integers from each packed 64-bit element in
+// a and b, and store the unsigned 64-bit results in dst.
+//
+// r0 := (a0 & 0xFFFFFFFF) * (b0 & 0xFFFFFFFF)
+// r1 := (a2 & 0xFFFFFFFF) * (b2 & 0xFFFFFFFF)
+FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b)
+{
+ // vmull_u32 upcasts instead of masking, so we downcast.
+ uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a));
+ uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b));
+ return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo));
+}
+
+// Multiply the low unsigned 32-bit integers from a and b, and store the
+// unsigned 64-bit result in dst.
+//
+// dst[63:0] := a[31:0] * b[31:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_su32
+FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u64(vget_low_u64(
+ vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b))));
+}
+
+// Multiply the low signed 32-bit integers from each packed 64-bit element in
+// a and b, and store the signed 64-bit results in dst.
+//
+// r0 := (int64_t)(int32_t)a0 * (int64_t)(int32_t)b0
+// r1 := (int64_t)(int32_t)a2 * (int64_t)(int32_t)b2
+FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b)
+{
+ // vmull_s32 upcasts instead of masking, so we downcast.
+ int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a));
+ int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b));
+ return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo));
+}
+
+// Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
+// integers from b.
+//
+// r0 := (a0 * b0) + (a1 * b1)
+// r1 := (a2 * b2) + (a3 * b3)
+// r2 := (a4 * b4) + (a5 * b5)
+// r3 := (a6 * b6) + (a7 * b7)
+// https://msdn.microsoft.com/en-us/library/yht36sa6(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b)
+{
+ int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
+ vget_low_s16(vreinterpretq_s16_m128i(b)));
+ int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
+ vget_high_s16(vreinterpretq_s16_m128i(b)));
+
+ int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low));
+ int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high));
+
+ return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum));
+}
+
+// Multiply packed signed 16-bit integers in a and b, producing intermediate
+// signed 32-bit integers. Shift right by 15 bits while rounding up, and store
+// the packed 16-bit integers in dst.
+//
+// r0 := Round(((int32_t)a0 * (int32_t)b0) >> 15)
+// r1 := Round(((int32_t)a1 * (int32_t)b1) >> 15)
+// r2 := Round(((int32_t)a2 * (int32_t)b2) >> 15)
+// ...
+// r7 := Round(((int32_t)a7 * (int32_t)b7) >> 15)
+FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b)
+{
+ // Has issues due to saturation
+ // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b));
+
+ // Multiply
+ int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
+ vget_low_s16(vreinterpretq_s16_m128i(b)));
+ int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
+ vget_high_s16(vreinterpretq_s16_m128i(b)));
+
+ // Rounding narrowing shift right
+ // narrow = (int16_t)((mul + 16384) >> 15);
+ int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15);
+ int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15);
+
+ // Join together
+ return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi));
+}
+
+// Vertically multiply each unsigned 8-bit integer from a with the corresponding
+// signed 8-bit integer from b, producing intermediate signed 16-bit integers.
+// Horizontally add adjacent pairs of intermediate signed 16-bit integers,
+// and pack the saturated results in dst.
+//
+// FOR j := 0 to 7
+// i := j*16
+// dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] +
+// a[i+7:i]*b[i+7:i] )
+// ENDFOR
+FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b)
+{
+#if defined(__aarch64__)
+ uint8x16_t a = vreinterpretq_u8_m128i(_a);
+ int8x16_t b = vreinterpretq_s8_m128i(_b);
+ int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))),
+ vmovl_s8(vget_low_s8(b)));
+ int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))),
+ vmovl_s8(vget_high_s8(b)));
+ return vreinterpretq_m128i_s16(
+ vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th)));
+#else
+ // This would be much simpler if x86 would choose to zero extend OR sign
+ // extend, not both. This could probably be optimized better.
+ uint16x8_t a = vreinterpretq_u16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+
+ // Zero extend a
+ int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8));
+ int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00)));
+
+ // Sign extend by shifting left then shifting right.
+ int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8);
+ int16x8_t b_odd = vshrq_n_s16(b, 8);
+
+ // multiply
+ int16x8_t prod1 = vmulq_s16(a_even, b_even);
+ int16x8_t prod2 = vmulq_s16(a_odd, b_odd);
+
+ // saturated add
+ return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2));
+#endif
+}
+
+// Computes the fused multiple add product of 32-bit floating point numbers.
+//
+// Return Value
+// Multiplies A and B, and adds C to the temporary result before returning it.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd
+FORCE_INLINE __m128 _mm_fmadd_ps(__m128 a, __m128 b, __m128 c)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(c),
+ vreinterpretq_f32_m128(b),
+ vreinterpretq_f32_m128(a)));
+#else
+ return _mm_add_ps(_mm_mul_ps(a, b), c);
+#endif
+}
+
+// Alternatively add and subtract packed single-precision (32-bit)
+// floating-point elements in a to/from packed elements in b, and store the
+// results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=addsub_ps
+FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b)
+{
+ __m128 mask = {-1.0f, 1.0f, -1.0f, 1.0f};
+ return _mm_fmadd_ps(b, mask, a);
+}
+
+// Horizontally add adjacent pairs of double-precision (64-bit) floating-point
+// elements in a and b, and pack the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pd
+FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[] = {da[0] + da[1], db[0] + db[1]};
+ return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
+#endif
+}
+
+// Compute the absolute differences of packed unsigned 8-bit integers in a and
+// b, then horizontally sum each consecutive 8 differences to produce two
+// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+// 16 bits of 64-bit elements in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_epu8
+FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b)
+{
+ uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b));
+ uint16_t r0 = t[0] + t[1] + t[2] + t[3];
+ uint16_t r4 = t[4] + t[5] + t[6] + t[7];
+ uint16x8_t r = vsetq_lane_u16(r0, vdupq_n_u16(0), 0);
+ return (__m128i) vsetq_lane_u16(r4, r, 4);
+}
+
+// Compute the absolute differences of packed unsigned 8-bit integers in a and
+// b, then horizontally sum each consecutive 8 differences to produce four
+// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+// 16 bits of dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_pu8
+FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b)
+{
+ uint16x4_t t =
+ vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+ uint16_t r0 = t[0] + t[1] + t[2] + t[3];
+ return vreinterpret_m64_u16(vset_lane_u16(r0, vdup_n_u16(0), 0));
+}
+
+// Compute the absolute differences of packed unsigned 8-bit integers in a and
+// b, then horizontally sum each consecutive 8 differences to produce four
+// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+// 16 bits of dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i])
+// ENDFOR
+// dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] +
+// tmp[47:40] + tmp[55:48] + tmp[63:56] dst[63:16] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_psadbw
+#define _m_psadbw(a, b) _mm_sad_pu8(a, b)
+
+// Divides the four single-precision, floating-point values of a and b.
+//
+// r0 := a0 / b0
+// r1 := a1 / b1
+// r2 := a2 / b2
+// r3 := a3 / b3
+//
+// https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__) && !SSE2NEON_PRECISE_DIV
+ return vreinterpretq_m128_f32(
+ vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b));
+ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
+#if SSE2NEON_PRECISE_DIV
+ // Additional Netwon-Raphson iteration for accuracy
+ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
+#endif
+ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip));
+#endif
+}
+
+// Divides the scalar single-precision floating point value of a by b.
+// https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
+{
+ float32_t value =
+ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+}
+
+// Divide packed double-precision (64-bit) floating-point elements in a by
+// packed elements in b, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := 64*j
+// dst[i+63:i] := a[i+63:i] / b[i+63:i]
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_pd
+FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] / db[0];
+ c[1] = da[1] / db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Divide the lower double-precision (64-bit) floating-point element in a by the
+// lower double-precision (64-bit) floating-point element in b, store the result
+// in the lower element of dst, and copy the upper element from a to the upper
+// element of dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_sd
+FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ float64x2_t tmp =
+ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b));
+ return vreinterpretq_m128d_f64(
+ vsetq_lane_f64(vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1), tmp, 1));
+#else
+ return _mm_move_sd(a, _mm_div_pd(a, b));
+#endif
+}
+
+// Compute the approximate reciprocal of packed single-precision (32-bit)
+// floating-point elements in a, and store the results in dst. The maximum
+// relative error for this approximation is less than 1.5*2^-12.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ps
+FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
+{
+ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
+ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
+#if SSE2NEON_PRECISE_DIV
+ // Additional Netwon-Raphson iteration for accuracy
+ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
+#endif
+ return vreinterpretq_m128_f32(recip);
+}
+
+// Compute the approximate reciprocal of the lower single-precision (32-bit)
+// floating-point element in a, store the result in the lower element of dst,
+// and copy the upper 3 packed elements from a to the upper elements of dst. The
+// maximum relative error for this approximation is less than 1.5*2^-12.
+//
+// dst[31:0] := (1.0 / a[31:0])
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ss
+FORCE_INLINE __m128 _mm_rcp_ss(__m128 a)
+{
+ return _mm_move_ss(a, _mm_rcp_ps(a));
+}
+
+// Computes the approximations of square roots of the four single-precision,
+// floating-point values of a. First computes reciprocal square roots and then
+// reciprocals of the four values.
+//
+// r0 := sqrt(a0)
+// r1 := sqrt(a1)
+// r2 := sqrt(a2)
+// r3 := sqrt(a3)
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
+{
+#if SSE2NEON_PRECISE_SQRT
+ float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+
+ // Test for vrsqrteq_f32(0) -> positive infinity case.
+ // Change to zero, so that s * 1/sqrt(s) result is zero too.
+ const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
+ const uint32x4_t div_by_zero =
+ vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip));
+ recip = vreinterpretq_f32_u32(
+ vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip)));
+
+ // Additional Netwon-Raphson iteration for accuracy
+ recip = vmulq_f32(
+ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
+ recip);
+ recip = vmulq_f32(
+ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
+ recip);
+
+ // sqrt(s) = s * 1/sqrt(s)
+ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip));
+#elif defined(__aarch64__)
+ return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in)));
+#else
+ float32x4_t recipsq = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+ float32x4_t sq = vrecpeq_f32(recipsq);
+ return vreinterpretq_m128_f32(sq);
+#endif
+}
+
+// Computes the approximation of the square root of the scalar single-precision
+// floating point value of in.
+// https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
+{
+ float32_t value =
+ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
+}
+
+// Computes the approximations of the reciprocal square roots of the four
+// single-precision floating point values of in.
+// https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
+{
+ float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+#if SSE2NEON_PRECISE_RSQRT
+ // Additional Netwon-Raphson iteration for accuracy
+ out = vmulq_f32(
+ out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
+ out = vmulq_f32(
+ out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
+#endif
+ return vreinterpretq_m128_f32(out);
+}
+
+// Compute the approximate reciprocal square root of the lower single-precision
+// (32-bit) floating-point element in a, store the result in the lower element
+// of dst, and copy the upper 3 packed elements from a to the upper elements of
+// dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ss
+FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
+{
+ return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0);
+}
+
+// Compare packed signed 16-bit integers in a and b, and store packed maximum
+// values in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pi16
+FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s16(
+ vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+}
+
+// Compare packed signed 16-bit integers in a and b, and store packed maximum
+// values in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pi16
+#define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
+
+// Computes the maximums of the four single-precision, floating-point values of
+// a and b.
+// https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
+{
+#if SSE2NEON_PRECISE_MINMAX
+ float32x4_t _a = vreinterpretq_f32_m128(a);
+ float32x4_t _b = vreinterpretq_f32_m128(b);
+ return vbslq_f32(vcltq_f32(_b, _a), _a, _b);
+#else
+ return vreinterpretq_m128_f32(
+ vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#endif
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+// values in dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pu8
+FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u8(
+ vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+// values in dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pu8
+#define _m_pmaxub(a, b) _mm_max_pu8(a, b)
+
+// Compare packed signed 16-bit integers in a and b, and store packed minimum
+// values in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pi16
+FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s16(
+ vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+}
+
+// Compare packed signed 16-bit integers in a and b, and store packed minimum
+// values in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pi16
+#define _m_pminsw(a, b) _mm_min_pi16(a, b)
+
+// Computes the minima of the four single-precision, floating-point values of a
+// and b.
+// https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
+{
+#if SSE2NEON_PRECISE_MINMAX
+ float32x4_t _a = vreinterpretq_f32_m128(a);
+ float32x4_t _b = vreinterpretq_f32_m128(b);
+ return vbslq_f32(vcltq_f32(_a, _b), _a, _b);
+#else
+ return vreinterpretq_m128_f32(
+ vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#endif
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+// values in dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pu8
+FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u8(
+ vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+// values in dst.
+//
+// FOR j := 0 to 7
+// i := j*8
+// dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pu8
+#define _m_pminub(a, b) _mm_min_pu8(a, b)
+
+// Computes the maximum of the two lower scalar single-precision floating point
+// values of a and b.
+// https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
+{
+ float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+}
+
+// Computes the minimum of the two lower scalar single-precision floating point
+// values of a and b.
+// https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
+{
+ float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+}
+
+// Computes the pairwise maxima of the 16 unsigned 8-bit integers from a and the
+// 16 unsigned 8-bit integers from b.
+// https://msdn.microsoft.com/en-us/library/st6634za(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Computes the pairwise minima of the 16 unsigned 8-bit integers from a and the
+// 16 unsigned 8-bit integers from b.
+// https://msdn.microsoft.com/ko-kr/library/17k8cf58(v=vs.100).aspxx
+FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Computes the pairwise minima of the 8 signed 16-bit integers from a and the 8
+// signed 16-bit integers from b.
+// https://msdn.microsoft.com/en-us/library/vstudio/6te997ew(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compare packed signed 8-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi8
+FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed unsigned 16-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu16
+FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Compare packed signed 8-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epi8
+FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed unsigned 16-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu16
+FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Computes the pairwise maxima of the 8 signed 16-bit integers from a and the 8
+// signed 16-bit integers from b.
+// https://msdn.microsoft.com/en-us/LIBRary/3x060h7c(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// epi versions of min/max
+// Computes the pariwise maximums of the four signed 32-bit integer values of a
+// and b.
+//
+// A 128-bit parameter that can be defined with the following equations:
+// r0 := (a0 > b0) ? a0 : b0
+// r1 := (a1 > b1) ? a1 : b1
+// r2 := (a2 > b2) ? a2 : b2
+// r3 := (a3 > b3) ? a3 : b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Computes the pariwise minima of the four signed 32-bit integer values of a
+// and b.
+//
+// A 128-bit parameter that can be defined with the following equations:
+// r0 := (a0 < b0) ? a0 : b0
+// r1 := (a1 < b1) ? a1 : b1
+// r2 := (a2 < b2) ? a2 : b2
+// r3 := (a3 < b3) ? a3 : b3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compare packed unsigned 32-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32
+FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
+}
+
+// Compare packed unsigned 32-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32
+FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
+}
+
+// Multiply the packed unsigned 16-bit integers in a and b, producing
+// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+// integers in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_pu16
+FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u16(vshrn_n_u32(
+ vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16));
+}
+
+// Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
+// integers from b.
+//
+// r0 := (a0 * b0)[31:16]
+// r1 := (a1 * b1)[31:16]
+// ...
+// r7 := (a7 * b7)[31:16]
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/59hddw1d(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
+{
+ /* FIXME: issue with large values because of result saturation */
+ // int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a),
+ // vreinterpretq_s16_m128i(b)); /* =2*a*b */ return
+ // vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
+ int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
+ int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
+ int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
+ int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
+ uint16x8x2_t r =
+ vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
+ return vreinterpretq_m128i_u16(r.val[1]);
+}
+
+// Multiply the packed unsigned 16-bit integers in a and b, producing
+// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+// integers in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_epu16
+FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b)
+{
+ uint16x4_t a3210 = vget_low_u16(vreinterpretq_u16_m128i(a));
+ uint16x4_t b3210 = vget_low_u16(vreinterpretq_u16_m128i(b));
+ uint32x4_t ab3210 = vmull_u16(a3210, b3210);
+#if defined(__aarch64__)
+ uint32x4_t ab7654 =
+ vmull_high_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
+ uint16x8_t r = vuzp2q_u16(vreinterpretq_u16_u32(ab3210),
+ vreinterpretq_u16_u32(ab7654));
+ return vreinterpretq_m128i_u16(r);
+#else
+ uint16x4_t a7654 = vget_high_u16(vreinterpretq_u16_m128i(a));
+ uint16x4_t b7654 = vget_high_u16(vreinterpretq_u16_m128i(b));
+ uint32x4_t ab7654 = vmull_u16(a7654, b7654);
+ uint16x8x2_t r =
+ vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
+ return vreinterpretq_m128i_u16(r.val[1]);
+#endif
+}
+
+// Computes pairwise add of each argument as single-precision, floating-point
+// values a and b.
+// https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx
+FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128_f32(
+ vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
+#endif
+}
+
+// Computes pairwise add of each argument as a 16-bit signed or unsigned integer
+// values a and b.
+FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b)
+{
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s16(vpaddq_s16(a, b));
+#else
+ return vreinterpretq_m128i_s16(
+ vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)),
+ vpadd_s16(vget_low_s16(b), vget_high_s16(b))));
+#endif
+}
+
+// Horizontally substract adjacent pairs of single-precision (32-bit)
+// floating-point elements in a and b, and pack the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_ps
+FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128_f32(vsubq_f32(
+ vuzp1q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)),
+ vuzp2q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b))));
+#else
+ float32x4x2_t c =
+ vuzpq_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b));
+ return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1]));
+#endif
+}
+
+// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
+// signed 16-bit results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi16
+FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s16(
+ vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+}
+
+// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
+// signed 32-bit results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi32
+FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s32(
+ vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)));
+}
+
+// Computes pairwise difference of each argument as a 16-bit signed or unsigned
+// integer values a and b.
+FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b)
+{
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+ // Interleave using vshrn/vmovn
+ // [a0|a2|a4|a6|b0|b2|b4|b6]
+ // [a1|a3|a5|a7|b1|b3|b5|b7]
+ int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
+ int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
+ // Subtract
+ return vreinterpretq_m128i_s16(vsubq_s16(ab0246, ab1357));
+}
+
+// Computes saturated pairwise sub of each argument as a 16-bit signed
+// integer values a and b.
+FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b)
+{
+#if defined(__aarch64__)
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+ return vreinterpretq_s64_s16(
+ vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+#else
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+ // Interleave using vshrn/vmovn
+ // [a0|a2|a4|a6|b0|b2|b4|b6]
+ // [a1|a3|a5|a7|b1|b3|b5|b7]
+ int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
+ int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
+ // Saturated add
+ return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357));
+#endif
+}
+
+// Computes saturated pairwise difference of each argument as a 16-bit signed
+// integer values a and b.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsubs_epi16
+FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b)
+{
+#if defined(__aarch64__)
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+ return vreinterpretq_s64_s16(
+ vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+#else
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+ // Interleave using vshrn/vmovn
+ // [a0|a2|a4|a6|b0|b2|b4|b6]
+ // [a1|a3|a5|a7|b1|b3|b5|b7]
+ int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
+ int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
+ // Saturated subtract
+ return vreinterpretq_m128i_s16(vqsubq_s16(ab0246, ab1357));
+#endif
+}
+
+// Computes pairwise add of each argument as a 32-bit signed or unsigned integer
+// values a and b.
+FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b)
+{
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+ return vreinterpretq_m128i_s32(
+ vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)),
+ vpadd_s32(vget_low_s32(b), vget_high_s32(b))));
+}
+
+// Computes pairwise difference of each argument as a 32-bit signed or unsigned
+// integer values a and b.
+FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b)
+{
+ int64x2_t a = vreinterpretq_s64_m128i(_a);
+ int64x2_t b = vreinterpretq_s64_m128i(_b);
+ // Interleave using vshrn/vmovn
+ // [a0|a2|b0|b2]
+ // [a1|a2|b1|b3]
+ int32x4_t ab02 = vcombine_s32(vmovn_s64(a), vmovn_s64(b));
+ int32x4_t ab13 = vcombine_s32(vshrn_n_s64(a, 32), vshrn_n_s64(b, 32));
+ // Subtract
+ return vreinterpretq_m128i_s32(vsubq_s32(ab02, ab13));
+}
+
+// Kahan summation for accurate summation of floating-point numbers.
+// http://blog.zachbjornson.com/2019/08/11/fast-float-summation.html
+FORCE_INLINE void _sse2neon_kadd_f32(float *sum, float *c, float y)
+{
+ y -= *c;
+ float t = *sum + y;
+ *c = (t - *sum) - y;
+ *sum = t;
+}
+
+// Conditionally multiply the packed single-precision (32-bit) floating-point
+// elements in a and b using the high 4 bits in imm8, sum the four products,
+// and conditionally store the sum in dst using the low 4 bits of imm.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dp_ps
+FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm)
+{
+#if defined(__aarch64__)
+ /* shortcuts */
+ if (imm == 0xFF) {
+ return _mm_set1_ps(vaddvq_f32(_mm_mul_ps(a, b)));
+ }
+ if (imm == 0x7F) {
+ float32x4_t m = _mm_mul_ps(a, b);
+ m[3] = 0;
+ return _mm_set1_ps(vaddvq_f32(m));
+ }
+#endif
+
+ float s = 0, c = 0;
+ float32x4_t f32a = vreinterpretq_f32_m128(a);
+ float32x4_t f32b = vreinterpretq_f32_m128(b);
+
+ /* To improve the accuracy of floating-point summation, Kahan algorithm
+ * is used for each operation.
+ */
+ if (imm & (1 << 4))
+ _sse2neon_kadd_f32(&s, &c, f32a[0] * f32b[0]);
+ if (imm & (1 << 5))
+ _sse2neon_kadd_f32(&s, &c, f32a[1] * f32b[1]);
+ if (imm & (1 << 6))
+ _sse2neon_kadd_f32(&s, &c, f32a[2] * f32b[2]);
+ if (imm & (1 << 7))
+ _sse2neon_kadd_f32(&s, &c, f32a[3] * f32b[3]);
+ s += c;
+
+ float32x4_t res = {
+ (imm & 0x1) ? s : 0,
+ (imm & 0x2) ? s : 0,
+ (imm & 0x4) ? s : 0,
+ (imm & 0x8) ? s : 0,
+ };
+ return vreinterpretq_m128_f32(res);
+}
+
+/* Compare operations */
+
+// Compares for less than
+// https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compares for less than
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fy94wye7(v=vs.100)
+FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmplt_ps(a, b));
+}
+
+// Compares for greater than.
+//
+// r0 := (a0 > b0) ? 0xffffffff : 0x0
+// r1 := (a1 > b1) ? 0xffffffff : 0x0
+// r2 := (a2 > b2) ? 0xffffffff : 0x0
+// r3 := (a3 > b3) ? 0xffffffff : 0x0
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compares for greater than.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/1xyyyy9e(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpgt_ps(a, b));
+}
+
+// Compares for greater than or equal.
+// https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compares for greater than or equal.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/kesh3ddc(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpge_ps(a, b));
+}
+
+// Compares for less than or equal.
+//
+// r0 := (a0 <= b0) ? 0xffffffff : 0x0
+// r1 := (a1 <= b1) ? 0xffffffff : 0x0
+// r2 := (a2 <= b2) ? 0xffffffff : 0x0
+// r3 := (a3 <= b3) ? 0xffffffff : 0x0
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compares for less than or equal.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/a7x0hbhw(v=vs.100)
+FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmple_ps(a, b));
+}
+
+// Compares for equality.
+// https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compares for equality.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/k423z28e(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpeq_ps(a, b));
+}
+
+// Compares for inequality.
+// https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(vmvnq_u32(
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+}
+
+// Compares for inequality.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/ekya8fh4(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpneq_ps(a, b));
+}
+
+// Compares for not greater than or equal.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/wsexys62(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b)
+{
+ return _mm_cmplt_ps(a, b);
+}
+
+// Compares for not greater than or equal.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fk2y80s8(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b)
+{
+ return _mm_cmplt_ss(a, b);
+}
+
+// Compares for not greater than.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/d0xh7w0s(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b)
+{
+ return _mm_cmple_ps(a, b);
+}
+
+// Compares for not greater than.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b)
+{
+ return _mm_cmple_ss(a, b);
+}
+
+// Compares for not less than or equal.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/6a330kxw(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b)
+{
+ return _mm_cmpgt_ps(a, b);
+}
+
+// Compares for not less than or equal.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b)
+{
+ return _mm_cmpgt_ss(a, b);
+}
+
+// Compares for not less than.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/4686bbdw(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b)
+{
+ return _mm_cmpge_ps(a, b);
+}
+
+// Compares for not less than.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/56b9z2wf(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b)
+{
+ return _mm_cmpge_ss(a, b);
+}
+
+// Compares the 16 signed or unsigned 8-bit integers in a and the 16 signed or
+// unsigned 8-bit integers in b for equality.
+// https://msdn.microsoft.com/en-us/library/windows/desktop/bz5xk21a(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for equality, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_pd
+FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_u64(
+ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+ uint32x4_t cmp =
+ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
+ uint32x4_t swapped = vrev64q_u32(cmp);
+ return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped));
+#endif
+}
+
+// Compares the 8 signed or unsigned 16-bit integers in a and the 8 signed or
+// unsigned 16-bit integers in b for equality.
+// https://msdn.microsoft.com/en-us/library/2ay060te(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compare packed 32-bit integers in a and b for equality, and store the results
+// in dst
+FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compare packed 64-bit integers in a and b for equality, and store the results
+// in dst
+FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_u64(
+ vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b)));
+#else
+ // ARMv7 lacks vceqq_u64
+ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+ uint32x4_t cmp =
+ vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b));
+ uint32x4_t swapped = vrev64q_u32(cmp);
+ return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped));
+#endif
+}
+
+// Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
+// in b for lesser than.
+// https://msdn.microsoft.com/en-us/library/windows/desktop/9s46csht(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
+// in b for greater than.
+//
+// r0 := (a0 > b0) ? 0xff : 0x0
+// r1 := (a1 > b1) ? 0xff : 0x0
+// ...
+// r15 := (a15 > b15) ? 0xff : 0x0
+//
+// https://msdn.microsoft.com/zh-tw/library/wf45zt2b(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
+// in b for less than.
+//
+// r0 := (a0 < b0) ? 0xffff : 0x0
+// r1 := (a1 < b1) ? 0xffff : 0x0
+// ...
+// r7 := (a7 < b7) ? 0xffff : 0x0
+//
+// https://technet.microsoft.com/en-us/library/t863edb2(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
+// in b for greater than.
+//
+// r0 := (a0 > b0) ? 0xffff : 0x0
+// r1 := (a1 > b1) ? 0xffff : 0x0
+// ...
+// r7 := (a7 > b7) ? 0xffff : 0x0
+//
+// https://technet.microsoft.com/en-us/library/xd43yfsa(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+
+// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
+// in b for less than.
+// https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
+// in b for greater than.
+// https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers
+// in b for greater than.
+FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_u64(
+ vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+#else
+ // ARMv7 lacks vcgtq_s64.
+ // This is based off of Clang's SSE2 polyfill:
+ // (a > b) -> ((a_hi > b_hi) || (a_lo > b_lo && a_hi == b_hi))
+
+ // Mask the sign bit out since we need a signed AND an unsigned comparison
+ // and it is ugly to try and split them.
+ int32x4_t mask = vreinterpretq_s32_s64(vdupq_n_s64(0x80000000ull));
+ int32x4_t a_mask = veorq_s32(vreinterpretq_s32_m128i(a), mask);
+ int32x4_t b_mask = veorq_s32(vreinterpretq_s32_m128i(b), mask);
+ // Check if a > b
+ int64x2_t greater = vreinterpretq_s64_u32(vcgtq_s32(a_mask, b_mask));
+ // Copy upper mask to lower mask
+ // a_hi > b_hi
+ int64x2_t gt_hi = vshrq_n_s64(greater, 63);
+ // Copy lower mask to upper mask
+ // a_lo > b_lo
+ int64x2_t gt_lo = vsliq_n_s64(greater, greater, 32);
+ // Compare for equality
+ int64x2_t equal = vreinterpretq_s64_u32(vceqq_s32(a_mask, b_mask));
+ // Copy upper mask to lower mask
+ // a_hi == b_hi
+ int64x2_t eq_hi = vshrq_n_s64(equal, 63);
+ // a_hi > b_hi || (a_lo > b_lo && a_hi == b_hi)
+ int64x2_t ret = vorrq_s64(gt_hi, vandq_s64(gt_lo, eq_hi));
+ return vreinterpretq_m128i_s64(ret);
+#endif
+}
+
+// Compares the four 32-bit floats in a and b to check if any values are NaN.
+// Ordered compare between each value returns true for "orderable" and false for
+// "not orderable" (NaN).
+// https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx see
+// also:
+// http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
+// http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
+FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b)
+{
+ // Note: NEON does not have ordered compare builtin
+ // Need to compare a eq a and b eq b to check for NaN
+ // Do AND of results to get final
+ uint32x4_t ceqaa =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t ceqbb =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
+}
+
+// Compares for ordered.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/343t62da(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpord_ps(a, b));
+}
+
+// Compares for unordered.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/khy6fk1t(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b)
+{
+ uint32x4_t f32a =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t f32b =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b)));
+}
+
+// Compares for unordered.
+// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/2as2387b(v=vs.100)
+FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpunord_ps(a, b));
+}
+
+// Compares the lower single-precision floating point scalar values of a and b
+// using a less than operation. :
+// https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx Important
+// note!! The documentation on MSDN is incorrect! If either of the values is a
+// NAN the docs say you will get a one, but in fact, it will return a zero!!
+FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
+{
+ uint32x4_t a_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t b_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
+ uint32x4_t a_lt_b =
+ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_lt_b), 0) != 0) ? 1 : 0;
+}
+
+// Compares the lower single-precision floating point scalar values of a and b
+// using a greater than operation. :
+// https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx
+FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
+{
+ // return vgetq_lane_u32(vcgtq_f32(vreinterpretq_f32_m128(a),
+ // vreinterpretq_f32_m128(b)), 0);
+ uint32x4_t a_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t b_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
+ uint32x4_t a_gt_b =
+ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0) ? 1 : 0;
+}
+
+// Compares the lower single-precision floating point scalar values of a and b
+// using a less than or equal operation. :
+// https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx
+FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
+{
+ // return vgetq_lane_u32(vcleq_f32(vreinterpretq_f32_m128(a),
+ // vreinterpretq_f32_m128(b)), 0);
+ uint32x4_t a_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t b_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
+ uint32x4_t a_le_b =
+ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_le_b), 0) != 0) ? 1 : 0;
+}
+
+// Compares the lower single-precision floating point scalar values of a and b
+// using a greater than or equal operation. :
+// https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx
+FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
+{
+ // return vgetq_lane_u32(vcgeq_f32(vreinterpretq_f32_m128(a),
+ // vreinterpretq_f32_m128(b)), 0);
+ uint32x4_t a_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t b_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
+ uint32x4_t a_ge_b =
+ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0) ? 1 : 0;
+}
+
+// Compares the lower single-precision floating point scalar values of a and b
+// using an equality operation. :
+// https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx
+FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
+{
+ // return vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a),
+ // vreinterpretq_f32_m128(b)), 0);
+ uint32x4_t a_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t b_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
+ uint32x4_t a_eq_b =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_eq_b), 0) != 0) ? 1 : 0;
+}
+
+// Compares the lower single-precision floating point scalar values of a and b
+// using an inequality operation. :
+// https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx
+FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
+{
+ // return !vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a),
+ // vreinterpretq_f32_m128(b)), 0);
+ uint32x4_t a_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t b_not_nan =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
+ uint32x4_t a_neq_b = vmvnq_u32(
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+ return (vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_neq_b), 0) != 0) ? 1 : 0;
+}
+
+// according to the documentation, these intrinsics behave the same as the
+// non-'u' versions. We'll just alias them here.
+#define _mm_ucomieq_ss _mm_comieq_ss
+#define _mm_ucomige_ss _mm_comige_ss
+#define _mm_ucomigt_ss _mm_comigt_ss
+#define _mm_ucomile_ss _mm_comile_ss
+#define _mm_ucomilt_ss _mm_comilt_ss
+#define _mm_ucomineq_ss _mm_comineq_ss
+
+/* Conversions */
+
+// Convert packed signed 32-bit integers in b to packed single-precision
+// (32-bit) floating-point elements, store the results in the lower 2 elements
+// of dst, and copy the upper 2 packed elements from a to the upper elements of
+// dst.
+//
+// dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+// dst[63:32] := Convert_Int32_To_FP32(b[63:32])
+// dst[95:64] := a[95:64]
+// dst[127:96] := a[127:96]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_pi2ps
+FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
+ vget_high_f32(vreinterpretq_f32_m128(a))));
+}
+
+// Convert the signed 32-bit integer b to a single-precision (32-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+//
+// dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_si2ss
+FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b)
+{
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
+}
+
+// Convert the signed 32-bit integer b to a single-precision (32-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+//
+// dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_ss
+#define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b)
+
+// Convert the signed 64-bit integer b to a single-precision (32-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+//
+// dst[31:0] := Convert_Int64_To_FP32(b[63:0])
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_ss
+FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b)
+{
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
+}
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer, and store the result in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ss2si
+FORCE_INLINE int _mm_cvt_ss2si(__m128 a)
+{
+#if defined(__aarch64__)
+ return vgetq_lane_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a)), 0);
+#else
+ float32_t data = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ float32_t diff = data - floor(data);
+ if (diff > 0.5)
+ return (int32_t) ceil(data);
+ if (unlikely(diff == 0.5)) {
+ int32_t f = (int32_t) floor(data);
+ int32_t c = (int32_t) ceil(data);
+ return c & 1 ? f : c;
+ }
+ return (int32_t) floor(data);
+#endif
+}
+
+// Convert packed 16-bit integers in a to packed single-precision (32-bit)
+// floating-point elements, and store the results in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// m := j*32
+// dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi16_ps
+FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(
+ vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a))));
+}
+
+// Convert packed 32-bit integers in b to packed single-precision (32-bit)
+// floating-point elements, store the results in the lower 2 elements of dst,
+// and copy the upper 2 packed elements from a to the upper elements of dst.
+//
+// dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+// dst[63:32] := Convert_Int32_To_FP32(b[63:32])
+// dst[95:64] := a[95:64]
+// dst[127:96] := a[127:96]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32_ps
+FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
+ vget_high_f32(vreinterpretq_f32_m128(a))));
+}
+
+// Convert packed signed 32-bit integers in a to packed single-precision
+// (32-bit) floating-point elements, store the results in the lower 2 elements
+// of dst, then covert the packed signed 32-bit integers in b to
+// single-precision (32-bit) floating-point element, and store the results in
+// the upper 2 elements of dst.
+//
+// dst[31:0] := Convert_Int32_To_FP32(a[31:0])
+// dst[63:32] := Convert_Int32_To_FP32(a[63:32])
+// dst[95:64] := Convert_Int32_To_FP32(b[31:0])
+// dst[127:96] := Convert_Int32_To_FP32(b[63:32])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32x2_ps
+FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_s32(
+ vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))));
+}
+
+// Convert the lower packed 8-bit integers in a to packed single-precision
+// (32-bit) floating-point elements, and store the results in dst.
+//
+// FOR j := 0 to 3
+// i := j*8
+// m := j*32
+// dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi8_ps
+FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_s32(
+ vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a))))));
+}
+
+// Convert packed unsigned 16-bit integers in a to packed single-precision
+// (32-bit) floating-point elements, and store the results in dst.
+//
+// FOR j := 0 to 3
+// i := j*16
+// m := j*32
+// dst[m+31:m] := Convert_UInt16_To_FP32(a[i+15:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu16_ps
+FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(
+ vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a))));
+}
+
+// Convert the lower packed unsigned 8-bit integers in a to packed
+// single-precision (32-bit) floating-point elements, and store the results in
+// dst.
+//
+// FOR j := 0 to 3
+// i := j*8
+// m := j*32
+// dst[m+31:m] := Convert_UInt8_To_FP32(a[i+7:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu8_ps
+FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_u32(
+ vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a))))));
+}
+
+// Converts the four single-precision, floating-point values of a to signed
+// 32-bit integer values using truncate.
+// https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
+{
+ return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 64-bit integer with truncation, and store the result in dst.
+//
+// dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64
+FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a)
+{
+#if defined(__aarch64__)
+ return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0);
+#else
+ double ret = *((double *) &a);
+ return (int64_t) ret;
+#endif
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 64-bit integer with truncation, and store the result in dst.
+//
+// dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64x
+#define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a)
+
+// Converts the four signed 32-bit integer values of a to single-precision,
+// floating-point values
+// https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx
+FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
+}
+
+// Converts the four unsigned 8-bit integers in the lower 16 bits to four
+// unsigned 32-bit integers.
+FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a)
+{
+ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
+ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+ return vreinterpretq_m128i_u16(u16x8);
+}
+
+// Converts the four unsigned 8-bit integers in the lower 32 bits to four
+// unsigned 32-bit integers.
+// https://msdn.microsoft.com/en-us/library/bb531467%28v=vs.100%29.aspx
+FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
+{
+ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
+ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
+ return vreinterpretq_m128i_u32(u32x4);
+}
+
+// Converts the two unsigned 8-bit integers in the lower 16 bits to two
+// unsigned 64-bit integers.
+FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a)
+{
+ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */
+ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */
+ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
+ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_u64(u64x2);
+}
+
+// Converts the four unsigned 8-bit integers in the lower 16 bits to four
+// unsigned 32-bit integers.
+FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a)
+{
+ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
+ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+ return vreinterpretq_m128i_s16(s16x8);
+}
+
+// Converts the four unsigned 8-bit integers in the lower 32 bits to four
+// unsigned 32-bit integers.
+FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a)
+{
+ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
+ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */
+ return vreinterpretq_m128i_s32(s32x4);
+}
+
+// Converts the two signed 8-bit integers in the lower 32 bits to four
+// signed 64-bit integers.
+FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a)
+{
+ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */
+ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */
+ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
+ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_s64(s64x2);
+}
+
+// Converts the four signed 16-bit integers in the lower 64 bits to four signed
+// 32-bit integers.
+FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
+{
+ return vreinterpretq_m128i_s32(
+ vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
+}
+
+// Converts the two signed 16-bit integers in the lower 32 bits two signed
+// 32-bit integers.
+FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a)
+{
+ int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */
+ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
+ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_s64(s64x2);
+}
+
+// Converts the four unsigned 16-bit integers in the lower 64 bits to four
+// unsigned 32-bit integers.
+FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a)
+{
+ return vreinterpretq_m128i_u32(
+ vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a))));
+}
+
+// Converts the two unsigned 16-bit integers in the lower 32 bits to two
+// unsigned 64-bit integers.
+FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a)
+{
+ uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */
+ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
+ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_u64(u64x2);
+}
+
+// Converts the two unsigned 32-bit integers in the lower 64 bits to two
+// unsigned 64-bit integers.
+FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a)
+{
+ return vreinterpretq_m128i_u64(
+ vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a))));
+}
+
+// Converts the two signed 32-bit integers in the lower 64 bits to two signed
+// 64-bit integers.
+FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a)
+{
+ return vreinterpretq_m128i_s64(
+ vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))));
+}
+
+// Converts the four single-precision, floating-point values of a to signed
+// 32-bit integer values.
+//
+// r0 := (int) a0
+// r1 := (int) a1
+// r2 := (int) a2
+// r3 := (int) a3
+//
+// https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx
+// *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A
+// does not support! It is supported on ARMv8-A however.
+FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a));
+#else
+ uint32x4_t signmask = vdupq_n_u32(0x80000000);
+ float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
+ vdupq_n_f32(0.5f)); /* +/- 0.5 */
+ int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
+ vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
+ int32x4_t r_trunc =
+ vcvtq_s32_f32(vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
+ int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
+ vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
+ int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
+ vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
+ float32x4_t delta = vsubq_f32(
+ vreinterpretq_f32_m128(a),
+ vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
+ uint32x4_t is_delta_half = vceqq_f32(delta, half); /* delta == +/- 0.5 */
+ return vreinterpretq_m128i_s32(vbslq_s32(is_delta_half, r_even, r_normal));
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 16-bit integers, and store the results in dst. Note: this intrinsic
+// will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and
+// 0x7FFFFFFF.
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi16
+FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a)
+{
+ return vreinterpret_m64_s16(
+ vmovn_s32(vreinterpretq_s32_m128i(_mm_cvtps_epi32(a))));
+}
+
+// Copy the lower 32-bit integer in a to dst.
+//
+// dst[31:0] := a[31:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si32
+FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
+{
+ return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
+}
+
+// Copy the lower 64-bit integer in a to dst.
+//
+// dst[63:0] := a[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64
+FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a)
+{
+ return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0);
+}
+
+// Copy the lower 64-bit integer in a to dst.
+//
+// dst[63:0] := a[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64x
+#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
+
+// Moves 32-bit integer a to the least significant 32 bits of an __m128 object,
+// zero extending the upper bits.
+//
+// r0 := a
+// r1 := 0x0
+// r2 := 0x0
+// r3 := 0x0
+//
+// https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx
+FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
+{
+ return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
+}
+
+// Moves 64-bit integer a to the least significant 64 bits of an __m128 object,
+// zero extending the upper bits.
+//
+// r0 := a
+// r1 := 0x0
+FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a)
+{
+ return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0));
+}
+
+// Cast vector of type __m128 to type __m128d. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_pd
+FORCE_INLINE __m128d _mm_castps_pd(__m128 a)
+{
+ return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a));
+}
+
+// Applies a type cast to reinterpret four 32-bit floating point values passed
+// in as a 128-bit parameter as packed 32-bit integers.
+// https://msdn.microsoft.com/en-us/library/bb514099.aspx
+FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
+{
+ return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
+}
+
+// Cast vector of type __m128i to type __m128d. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_pd
+FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a));
+#else
+ return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a));
+#endif
+}
+
+// Applies a type cast to reinterpret four 32-bit integers passed in as a
+// 128-bit parameter as packed 32-bit floating point values.
+// https://msdn.microsoft.com/en-us/library/bb514029.aspx
+FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
+{
+ return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
+}
+
+// Loads 128-bit value. :
+// https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx
+FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
+{
+ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into both
+// elements of dst.
+//
+// dst[63:0] := MEM[mem_addr+63:mem_addr]
+// dst[127:64] := MEM[mem_addr+63:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load1_pd
+FORCE_INLINE __m128d _mm_load1_pd(const double *p)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(vld1q_dup_f64(p));
+#else
+ return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p));
+#endif
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into both
+// elements of dst.
+//
+// dst[63:0] := MEM[mem_addr+63:mem_addr]
+// dst[127:64] := MEM[mem_addr+63:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1
+#define _mm_load_pd1 _mm_load1_pd
+
+// Load a double-precision (64-bit) floating-point element from memory into the
+// upper element of dst, and copy the lower element from a to dst. mem_addr does
+// not need to be aligned on any particular boundary.
+//
+// dst[63:0] := a[63:0]
+// dst[127:64] := MEM[mem_addr+63:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadh_pd
+FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p)));
+#else
+ return vreinterpretq_m128d_f32(vcombine_f32(
+ vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p)));
+#endif
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into both
+// elements of dst.
+//
+// dst[63:0] := MEM[mem_addr+63:mem_addr]
+// dst[127:64] := MEM[mem_addr+63:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1
+#define _mm_load_pd1 _mm_load1_pd
+
+// Load a double-precision (64-bit) floating-point element from memory into both
+// elements of dst.
+//
+// dst[63:0] := MEM[mem_addr+63:mem_addr]
+// dst[127:64] := MEM[mem_addr+63:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loaddup_pd
+#define _mm_loaddup_pd _mm_load1_pd
+
+// Loads 128-bit value. :
+// https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx
+FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
+{
+ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
+}
+
+// Load unaligned 32-bit integer from memory into the first element of dst.
+//
+// dst[31:0] := MEM[mem_addr+31:mem_addr]
+// dst[MAX:32] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si32
+FORCE_INLINE __m128i _mm_loadu_si32(const void *p)
+{
+ return vreinterpretq_m128i_s32(
+ vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0));
+}
+
+// Convert packed double-precision (64-bit) floating-point elements in a to
+// packed single-precision (32-bit) floating-point elements, and store the
+// results in dst.
+//
+// FOR j := 0 to 1
+// i := 32*j
+// k := 64*j
+// dst[i+31:i] := Convert_FP64_To_FP32(a[k+64:k])
+// ENDFOR
+// dst[127:64] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_ps
+FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a)
+{
+#if defined(__aarch64__)
+ float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a));
+ return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0)));
+#else
+ float a0 = (float) ((double *) &a)[0];
+ float a1 = (float) ((double *) &a)[1];
+ return _mm_set_ps(0, 0, a1, a0);
+#endif
+}
+
+// Copy the lower double-precision (64-bit) floating-point element of a to dst.
+//
+// dst[63:0] := a[63:0]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_f64
+FORCE_INLINE double _mm_cvtsd_f64(__m128d a)
+{
+#if defined(__aarch64__)
+ return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0);
+#else
+ return ((double *) &a)[0];
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed double-precision (64-bit) floating-point elements, and store the
+// results in dst.
+//
+// FOR j := 0 to 1
+// i := 64*j
+// k := 32*j
+// dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pd
+FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a))));
+#else
+ double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
+ return _mm_set_pd(a1, a0);
+#endif
+}
+
+// Cast vector of type __m128d to type __m128i. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_si128
+FORCE_INLINE __m128i _mm_castpd_si128(__m128d a)
+{
+ return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a));
+}
+
+// Cast vector of type __m128d to type __m128. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ps
+FORCE_INLINE __m128 _mm_castpd_ps(__m128d a)
+{
+ return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a));
+}
+
+// Blend packed single-precision (32-bit) floating-point elements from a and b
+// using mask, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_ps
+FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask)
+{
+ // Use a signed shift right to create a mask with the sign bit
+ uint32x4_t mask =
+ vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31));
+ float32x4_t a = vreinterpretq_f32_m128(_a);
+ float32x4_t b = vreinterpretq_f32_m128(_b);
+ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
+}
+
+// Blend packed single-precision (32-bit) floating-point elements from a and b
+// using mask, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_ps
+FORCE_INLINE __m128 _mm_blend_ps(__m128 _a, __m128 _b, const char imm8)
+{
+ const uint32_t ALIGN_STRUCT(16)
+ data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0,
+ ((imm8) & (1 << 1)) ? UINT32_MAX : 0,
+ ((imm8) & (1 << 2)) ? UINT32_MAX : 0,
+ ((imm8) & (1 << 3)) ? UINT32_MAX : 0};
+ uint32x4_t mask = vld1q_u32(data);
+ float32x4_t a = vreinterpretq_f32_m128(_a);
+ float32x4_t b = vreinterpretq_f32_m128(_b);
+ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
+}
+
+// Blend packed double-precision (64-bit) floating-point elements from a and b
+// using mask, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_pd
+FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask)
+{
+ uint64x2_t mask =
+ vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_m128d(_mask), 63));
+#if defined(__aarch64__)
+ float64x2_t a = vreinterpretq_f64_m128d(_a);
+ float64x2_t b = vreinterpretq_f64_m128d(_b);
+ return vreinterpretq_m128d_f64(vbslq_f64(mask, b, a));
+#else
+ uint64x2_t a = vreinterpretq_u64_m128d(_a);
+ uint64x2_t b = vreinterpretq_u64_m128d(_b);
+ return vreinterpretq_m128d_u64(vbslq_u64(mask, b, a));
+#endif
+}
+
+typedef struct {
+ uint16_t res0;
+ uint8_t res1 : 6;
+ uint8_t bit22 : 1;
+ uint8_t bit23 : 1;
+ uint8_t res2;
+#if defined(__aarch64__)
+ uint32_t res3;
+#endif
+} fpcr_bitfield;
+
+// Macro: Set the rounding mode bits of the MXCSR control and status register to
+// the value in unsigned 32-bit integer a. The rounding mode may contain any of
+// the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP,
+// _MM_ROUND_TOWARD_ZERO
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_ROUNDING_MODE
+FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding)
+{
+ union {
+ fpcr_bitfield field;
+#if defined(__aarch64__)
+ uint64_t value;
+#else
+ uint32_t value;
+#endif
+ } r;
+
+#if defined(__aarch64__)
+ asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */
+#else
+ asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+#endif
+
+ switch (rounding) {
+ case _MM_ROUND_TOWARD_ZERO:
+ r.field.bit22 = 1;
+ r.field.bit23 = 1;
+ break;
+ case _MM_ROUND_DOWN:
+ r.field.bit22 = 0;
+ r.field.bit23 = 1;
+ break;
+ case _MM_ROUND_UP:
+ r.field.bit22 = 1;
+ r.field.bit23 = 0;
+ break;
+ default: //_MM_ROUND_NEAREST
+ r.field.bit22 = 0;
+ r.field.bit23 = 0;
+ }
+
+#if defined(__aarch64__)
+ asm volatile("msr FPCR, %0" ::"r"(r)); /* write */
+#else
+ asm volatile("vmsr FPSCR, %0" ::"r"(r)); /* write */
+#endif
+}
+
+FORCE_INLINE void _mm_setcsr(unsigned int a)
+{
+ _MM_SET_ROUNDING_MODE(a);
+}
+
+// Round the packed single-precision (32-bit) floating-point elements in a using
+// the rounding parameter, and store the results as packed single-precision
+// floating-point elements in dst.
+// software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps
+FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding)
+{
+#if defined(__aarch64__)
+ switch (rounding) {
+ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a)));
+ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a)));
+ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a)));
+ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a)));
+ default: //_MM_FROUND_CUR_DIRECTION
+ return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a)));
+ }
+#else
+ float *v_float = (float *) &a;
+ __m128 zero, neg_inf, pos_inf;
+
+ switch (rounding) {
+ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
+ return _mm_cvtepi32_ps(_mm_cvtps_epi32(a));
+ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
+ return (__m128){floorf(v_float[0]), floorf(v_float[1]),
+ floorf(v_float[2]), floorf(v_float[3])};
+ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
+ return (__m128){ceilf(v_float[0]), ceilf(v_float[1]), ceilf(v_float[2]),
+ ceilf(v_float[3])};
+ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
+ zero = _mm_set_ps(0.0f, 0.0f, 0.0f, 0.0f);
+ neg_inf = _mm_set_ps(floorf(v_float[0]), floorf(v_float[1]),
+ floorf(v_float[2]), floorf(v_float[3]));
+ pos_inf = _mm_set_ps(ceilf(v_float[0]), ceilf(v_float[1]),
+ ceilf(v_float[2]), ceilf(v_float[3]));
+ return _mm_blendv_ps(pos_inf, neg_inf, _mm_cmple_ps(a, zero));
+ default: //_MM_FROUND_CUR_DIRECTION
+ return (__m128){roundf(v_float[0]), roundf(v_float[1]),
+ roundf(v_float[2]), roundf(v_float[3])};
+ }
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := 32*j
+// dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ps2pi
+FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a)
+{
+#if defined(__aarch64__)
+ return vreinterpret_m64_s32(
+ vget_low_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a))));
+#else
+ return vreinterpret_m64_s32(
+ vcvt_s32_f32(vget_low_f32(vreinterpretq_f32_m128(
+ _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)))));
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers, and store the results in dst.
+//
+// FOR j := 0 to 1
+// i := 32*j
+// dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+// ENDFOR
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi32
+#define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a)
+
+// Round the packed single-precision (32-bit) floating-point elements in a up to
+// an integer value, and store the results as packed single-precision
+// floating-point elements in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ps
+FORCE_INLINE __m128 _mm_ceil_ps(__m128 a)
+{
+ return _mm_round_ps(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+}
+
+// Round the lower single-precision (32-bit) floating-point element in b up to
+// an integer value, store the result as a single-precision floating-point
+// element in the lower element of dst, and copy the upper 3 packed elements
+// from a to the upper elements of dst.
+//
+// dst[31:0] := CEIL(b[31:0])
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ss
+FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(
+ a, _mm_round_ps(b, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC));
+}
+
+// Round the packed single-precision (32-bit) floating-point elements in a down
+// to an integer value, and store the results as packed single-precision
+// floating-point elements in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ps
+FORCE_INLINE __m128 _mm_floor_ps(__m128 a)
+{
+ return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+}
+
+// Round the lower single-precision (32-bit) floating-point element in b down to
+// an integer value, store the result as a single-precision floating-point
+// element in the lower element of dst, and copy the upper 3 packed elements
+// from a to the upper elements of dst.
+//
+// dst[31:0] := FLOOR(b[31:0])
+// dst[127:32] := a[127:32]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ss
+FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(
+ a, _mm_round_ps(b, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC));
+}
+
+// Load 128-bits of integer data from unaligned memory into dst. This intrinsic
+// may perform better than _mm_loadu_si128 when the data crosses a cache line
+// boundary.
+//
+// dst[127:0] := MEM[mem_addr+127:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128
+#define _mm_lddqu_si128 _mm_loadu_si128
+
+/* Miscellaneous Operations */
+
+// Shifts the 8 signed 16-bit integers in a right by count bits while shifting
+// in the sign bit.
+//
+// r0 := a0 >> count
+// r1 := a1 >> count
+// ...
+// r7 := a7 >> count
+//
+// https://msdn.microsoft.com/en-us/library/3c9997dk(v%3dvs.90).aspx
+FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count)
+{
+ int64_t c = (int64_t) vget_low_s64((int64x2_t) count);
+ if (unlikely(c > 15))
+ return _mm_cmplt_epi16(a, _mm_setzero_si128());
+ return vreinterpretq_m128i_s16(vshlq_s16((int16x8_t) a, vdupq_n_s16(-c)));
+}
+
+// Shifts the 4 signed 32-bit integers in a right by count bits while shifting
+// in the sign bit.
+//
+// r0 := a0 >> count
+// r1 := a1 >> count
+// r2 := a2 >> count
+// r3 := a3 >> count
+//
+// https://msdn.microsoft.com/en-us/library/ce40009e(v%3dvs.100).aspx
+FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count)
+{
+ int64_t c = (int64_t) vget_low_s64((int64x2_t) count);
+ if (unlikely(c > 31))
+ return _mm_cmplt_epi32(a, _mm_setzero_si128());
+ return vreinterpretq_m128i_s32(vshlq_s32((int32x4_t) a, vdupq_n_s32(-c)));
+}
+
+// Packs the 16 signed 16-bit integers from a and b into 8-bit integers and
+// saturates.
+// https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx
+FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)),
+ vqmovn_s16(vreinterpretq_s16_m128i(b))));
+}
+
+// Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned
+// integers and saturates.
+//
+// r0 := UnsignedSaturate(a0)
+// r1 := UnsignedSaturate(a1)
+// ...
+// r7 := UnsignedSaturate(a7)
+// r8 := UnsignedSaturate(b0)
+// r9 := UnsignedSaturate(b1)
+// ...
+// r15 := UnsignedSaturate(b7)
+//
+// https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)),
+ vqmovun_s16(vreinterpretq_s16_m128i(b))));
+}
+
+// Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers
+// and saturates.
+//
+// r0 := SignedSaturate(a0)
+// r1 := SignedSaturate(a1)
+// r2 := SignedSaturate(a2)
+// r3 := SignedSaturate(a3)
+// r4 := SignedSaturate(b0)
+// r5 := SignedSaturate(b1)
+// r6 := SignedSaturate(b2)
+// r7 := SignedSaturate(b3)
+//
+// https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx
+FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)),
+ vqmovn_s32(vreinterpretq_s32_m128i(b))));
+}
+
+// Packs the 8 unsigned 32-bit integers from a and b into unsigned 16-bit
+// integers and saturates.
+//
+// r0 := UnsignedSaturate(a0)
+// r1 := UnsignedSaturate(a1)
+// r2 := UnsignedSaturate(a2)
+// r3 := UnsignedSaturate(a3)
+// r4 := UnsignedSaturate(b0)
+// r5 := UnsignedSaturate(b1)
+// r6 := UnsignedSaturate(b2)
+// r7 := UnsignedSaturate(b3)
+FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)),
+ vqmovun_s32(vreinterpretq_s32_m128i(b))));
+}
+
+// Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower
+// 8 signed or unsigned 8-bit integers in b.
+//
+// r0 := a0
+// r1 := b0
+// r2 := a1
+// r3 := b1
+// ...
+// r14 := a7
+// r15 := b7
+//
+// https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx
+FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s8(
+ vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+#else
+ int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
+ int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
+ int8x8x2_t result = vzip_s8(a1, b1);
+ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
+#endif
+}
+
+// Interleaves the lower 4 signed or unsigned 16-bit integers in a with the
+// lower 4 signed or unsigned 16-bit integers in b.
+//
+// r0 := a0
+// r1 := b0
+// r2 := a1
+// r3 := b1
+// r4 := a2
+// r5 := b2
+// r6 := a3
+// r7 := b3
+//
+// https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx
+FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s16(
+ vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+#else
+ int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
+ int16x4x2_t result = vzip_s16(a1, b1);
+ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
+#endif
+}
+
+// Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the
+// lower 2 signed or unsigned 32 - bit integers in b.
+//
+// r0 := a0
+// r1 := b0
+// r2 := a1
+// r3 := b1
+//
+// https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s32(
+ vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+#else
+ int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
+ int32x2x2_t result = vzip_s32(a1, b1);
+ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
+#endif
+}
+
+FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
+{
+ int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a));
+ int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b));
+ return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l));
+}
+
+// Selects and interleaves the lower two single-precision, floating-point values
+// from a and b.
+//
+// r0 := a0
+// r1 := b0
+// r2 := a1
+// r3 := b1
+//
+// https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx
+FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128_f32(
+ vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
+ float32x2x2_t result = vzip_f32(a1, b1);
+ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave double-precision (64-bit) floating-point elements from
+// the low half of a and b, and store the results in dst.
+//
+// DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+// dst[63:0] := src1[63:0]
+// dst[127:64] := src2[63:0]
+// RETURN dst[127:0]
+// }
+// dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_pd
+FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ return vreinterpretq_m128d_s64(
+ vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)),
+ vget_low_s64(vreinterpretq_s64_m128d(b))));
+#endif
+}
+
+// Unpack and interleave double-precision (64-bit) floating-point elements from
+// the high half of a and b, and store the results in dst.
+//
+// DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+// dst[63:0] := src1[127:64]
+// dst[127:64] := src2[127:64]
+// RETURN dst[127:0]
+// }
+// dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_pd
+FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128d_f64(
+ vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ return vreinterpretq_m128d_s64(
+ vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)),
+ vget_high_s64(vreinterpretq_s64_m128d(b))));
+#endif
+}
+
+// Selects and interleaves the upper two single-precision, floating-point values
+// from a and b.
+//
+// r0 := a2
+// r1 := b2
+// r2 := a3
+// r3 := b3
+//
+// https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx
+FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128_f32(
+ vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
+ float32x2x2_t result = vzip_f32(a1, b1);
+ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
+#endif
+}
+
+// Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper
+// 8 signed or unsigned 8-bit integers in b.
+//
+// r0 := a8
+// r1 := b8
+// r2 := a9
+// r3 := b9
+// ...
+// r14 := a15
+// r15 := b15
+//
+// https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s8(
+ vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+#else
+ int8x8_t a1 =
+ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
+ int8x8_t b1 =
+ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
+ int8x8x2_t result = vzip_s8(a1, b1);
+ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
+#endif
+}
+
+// Interleaves the upper 4 signed or unsigned 16-bit integers in a with the
+// upper 4 signed or unsigned 16-bit integers in b.
+//
+// r0 := a4
+// r1 := b4
+// r2 := a5
+// r3 := b5
+// r4 := a6
+// r5 := b6
+// r6 := a7
+// r7 := b7
+//
+// https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s16(
+ vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+#else
+ int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
+ int16x4x2_t result = vzip_s16(a1, b1);
+ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
+#endif
+}
+
+// Interleaves the upper 2 signed or unsigned 32-bit integers in a with the
+// upper 2 signed or unsigned 32-bit integers in b.
+// https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx
+FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
+{
+#if defined(__aarch64__)
+ return vreinterpretq_m128i_s32(
+ vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+#else
+ int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
+ int32x2x2_t result = vzip_s32(a1, b1);
+ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
+#endif
+}
+
+// Interleaves the upper signed or unsigned 64-bit integer in a with the
+// upper signed or unsigned 64-bit integer in b.
+//
+// r0 := a1
+// r1 := b1
+FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
+{
+ int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a));
+ int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b));
+ return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h));
+}
+
+// Horizontally compute the minimum amongst the packed unsigned 16-bit integers
+// in a, store the minimum and index in dst, and zero the remaining bits in dst.
+//
+// index[2:0] := 0
+// min[15:0] := a[15:0]
+// FOR j := 0 to 7
+// i := j*16
+// IF a[i+15:i] < min[15:0]
+// index[2:0] := j
+// min[15:0] := a[i+15:i]
+// FI
+// ENDFOR
+// dst[15:0] := min[15:0]
+// dst[18:16] := index[2:0]
+// dst[127:19] := 0
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_minpos_epu16
+FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a)
+{
+ __m128i dst;
+ uint16_t min, idx = 0;
+ // Find the minimum value
+#if defined(__aarch64__)
+ min = vminvq_u16(vreinterpretq_u16_m128i(a));
+#else
+ __m64 tmp;
+ tmp = vreinterpret_m64_u16(
+ vmin_u16(vget_low_u16(vreinterpretq_u16_m128i(a)),
+ vget_high_u16(vreinterpretq_u16_m128i(a))));
+ tmp = vreinterpret_m64_u16(
+ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
+ tmp = vreinterpret_m64_u16(
+ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
+ min = vget_lane_u16(vreinterpret_u16_m64(tmp), 0);
+#endif
+ // Get the index of the minimum value
+ int i;
+ for (i = 0; i < 8; i++) {
+ if (min == vgetq_lane_u16(vreinterpretq_u16_m128i(a), 0)) {
+ idx = (uint16_t) i;
+ break;
+ }
+ a = _mm_srli_si128(a, 2);
+ }
+ // Generate result
+ dst = _mm_setzero_si128();
+ dst = vreinterpretq_m128i_u16(
+ vsetq_lane_u16(min, vreinterpretq_u16_m128i(dst), 0));
+ dst = vreinterpretq_m128i_u16(
+ vsetq_lane_u16(idx, vreinterpretq_u16_m128i(dst), 1));
+ return dst;
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+// otherwise set CF to 0. Return the CF value.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testc_si128
+FORCE_INLINE int _mm_testc_si128(__m128i a, __m128i b)
+{
+ int64x2_t s64 =
+ vandq_s64(vreinterpretq_s64_s32(vmvnq_s32(vreinterpretq_s32_m128i(a))),
+ vreinterpretq_s64_m128i(b));
+ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+// otherwise set CF to 0. Return the ZF value.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_si128
+FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b)
+{
+ int64x2_t s64 =
+ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b));
+ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
+}
+
+// Extracts the selected signed or unsigned 8-bit integer from a and zero
+// extends.
+// FORCE_INLINE int _mm_extract_epi8(__m128i a, __constrange(0,16) int imm)
+#define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm))
+
+// Inserts the least significant 8 bits of b into the selected 8-bit integer
+// of a.
+// FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b,
+// __constrange(0,16) int imm)
+#define _mm_insert_epi8(a, b, imm) \
+ __extension__({ \
+ vreinterpretq_m128i_s8( \
+ vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))); \
+ })
+
+// Extracts the selected signed or unsigned 16-bit integer from a and zero
+// extends.
+// https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx
+// FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
+#define _mm_extract_epi16(a, imm) \
+ vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm))
+
+// Inserts the least significant 16 bits of b into the selected 16-bit integer
+// of a.
+// https://msdn.microsoft.com/en-us/library/kaze8hz1%28v=vs.100%29.aspx
+// FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b,
+// __constrange(0,8) int imm)
+#define _mm_insert_epi16(a, b, imm) \
+ __extension__({ \
+ vreinterpretq_m128i_s16( \
+ vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))); \
+ })
+
+// Copy a to dst, and insert the 16-bit integer i into dst at the location
+// specified by imm8.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_pi16
+#define _mm_insert_pi16(a, b, imm) \
+ __extension__({ \
+ vreinterpret_m64_s16( \
+ vset_lane_s16((b), vreinterpret_s16_m64(a), (imm))); \
+ })
+
+// Extracts the selected signed or unsigned 32-bit integer from a and zero
+// extends.
+// FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm)
+#define _mm_extract_epi32(a, imm) \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))
+
+// Extracts the selected single-precision (32-bit) floating-point from a.
+// FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm)
+#define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm))
+
+// Inserts the least significant 32 bits of b into the selected 32-bit integer
+// of a.
+// FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b,
+// __constrange(0,4) int imm)
+#define _mm_insert_epi32(a, b, imm) \
+ __extension__({ \
+ vreinterpretq_m128i_s32( \
+ vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm))); \
+ })
+
+// Extracts the selected signed or unsigned 64-bit integer from a and zero
+// extends.
+// FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm)
+#define _mm_extract_epi64(a, imm) \
+ vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm))
+
+// Inserts the least significant 64 bits of b into the selected 64-bit integer
+// of a.
+// FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b,
+// __constrange(0,2) int imm)
+#define _mm_insert_epi64(a, b, imm) \
+ __extension__({ \
+ vreinterpretq_m128i_s64( \
+ vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm))); \
+ })
+
+// Count the number of bits set to 1 in unsigned 32-bit integer a, and
+// return that count in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u32
+FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
+{
+#if defined(__aarch64__)
+#if __has_builtin(__builtin_popcount)
+ return __builtin_popcount(a);
+#else
+ return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a)));
+#endif
+#else
+ uint32_t count = 0;
+ uint8x8_t input_val, count8x8_val;
+ uint16x4_t count16x4_val;
+ uint32x2_t count32x2_val;
+
+ input_val = vld1_u8((uint8_t *) &a);
+ count8x8_val = vcnt_u8(input_val);
+ count16x4_val = vpaddl_u8(count8x8_val);
+ count32x2_val = vpaddl_u16(count16x4_val);
+
+ vst1_u32(&count, count32x2_val);
+ return count;
+#endif
+}
+
+// Count the number of bits set to 1 in unsigned 64-bit integer a, and
+// return that count in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u64
+FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
+{
+#if defined(__aarch64__)
+#if __has_builtin(__builtin_popcountll)
+ return __builtin_popcountll(a);
+#else
+ return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a)));
+#endif
+#else
+ uint64_t count = 0;
+ uint8x8_t input_val, count8x8_val;
+ uint16x4_t count16x4_val;
+ uint32x2_t count32x2_val;
+ uint64x1_t count64x1_val;
+
+ input_val = vld1_u8((uint8_t *) &a);
+ count8x8_val = vcnt_u8(input_val);
+ count16x4_val = vpaddl_u8(count8x8_val);
+ count32x2_val = vpaddl_u16(count16x4_val);
+ count64x1_val = vpaddl_u32(count32x2_val);
+ vst1_u64(&count, count64x1_val);
+ return count;
+#endif
+}
+
+// Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision
+// (32-bit) floating-point elements in row0, row1, row2, and row3, and store the
+// transposed matrix in these vectors (row0 now contains column 0, etc.).
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=MM_TRANSPOSE4_PS
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+ do { \
+ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
+ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
+ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
+ vget_low_f32(ROW23.val[0])); \
+ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
+ vget_low_f32(ROW23.val[1])); \
+ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
+ vget_high_f32(ROW23.val[0])); \
+ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
+ vget_high_f32(ROW23.val[1])); \
+ } while (0)
+
+/* Crypto Extensions */
+
+#if defined(__ARM_FEATURE_CRYPTO)
+// Wraps vmull_p64
+FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
+{
+ poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
+ poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
+ return vreinterpretq_u64_p128(vmull_p64(a, b));
+}
+#else // ARMv7 polyfill
+// ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
+//
+// vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
+// 64-bit->128-bit polynomial multiply.
+//
+// It needs some work and is somewhat slow, but it is still faster than all
+// known scalar methods.
+//
+// Algorithm adapted to C from
+// https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
+// from "Fast Software Polynomial Multiplication on ARM Processors Using the
+// NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
+// (https://hal.inria.fr/hal-01506572)
+static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
+{
+ poly8x8_t a = vreinterpret_p8_u64(_a);
+ poly8x8_t b = vreinterpret_p8_u64(_b);
+
+ // Masks
+ uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
+ vcreate_u8(0x00000000ffffffff));
+ uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
+ vcreate_u8(0x0000000000000000));
+
+ // Do the multiplies, rotating with vext to get all combinations
+ uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
+ uint8x16_t e =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
+ uint8x16_t f =
+ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
+ uint8x16_t g =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
+ uint8x16_t h =
+ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
+ uint8x16_t i =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
+ uint8x16_t j =
+ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
+ uint8x16_t k =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
+
+ // Add cross products
+ uint8x16_t l = veorq_u8(e, f); // L = E + F
+ uint8x16_t m = veorq_u8(g, h); // M = G + H
+ uint8x16_t n = veorq_u8(i, j); // N = I + J
+
+ // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
+ // instructions.
+#if defined(__aarch64__)
+ uint8x16_t lm_p0 = vreinterpretq_u8_u64(
+ vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
+ uint8x16_t lm_p1 = vreinterpretq_u8_u64(
+ vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
+ uint8x16_t nk_p0 = vreinterpretq_u8_u64(
+ vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
+ uint8x16_t nk_p1 = vreinterpretq_u8_u64(
+ vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
+#else
+ uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
+ uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
+ uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
+ uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
+#endif
+ // t0 = (L) (P0 + P1) << 8
+ // t1 = (M) (P2 + P3) << 16
+ uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
+ uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
+ uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
+
+ // t2 = (N) (P4 + P5) << 24
+ // t3 = (K) (P6 + P7) << 32
+ uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
+ uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
+ uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
+
+ // De-interleave
+#if defined(__aarch64__)
+ uint8x16_t t0 = vreinterpretq_u8_u64(
+ vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
+ uint8x16_t t1 = vreinterpretq_u8_u64(
+ vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
+ uint8x16_t t2 = vreinterpretq_u8_u64(
+ vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
+ uint8x16_t t3 = vreinterpretq_u8_u64(
+ vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
+#else
+ uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
+ uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
+ uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
+ uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
+#endif
+ // Shift the cross products
+ uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
+ uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
+ uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
+ uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
+
+ // Accumulate the products
+ uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
+ uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
+ uint8x16_t mix = veorq_u8(d, cross1);
+ uint8x16_t r = veorq_u8(mix, cross2);
+ return vreinterpretq_u64_u8(r);
+}
+#endif // ARMv7 polyfill
+
+// Perform a carry-less multiplication of two 64-bit integers, selected from a
+// and b according to imm8, and store the results in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_clmulepi64_si128
+FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm)
+{
+ uint64x2_t a = vreinterpretq_u64_m128i(_a);
+ uint64x2_t b = vreinterpretq_u64_m128i(_b);
+ switch (imm & 0x11) {
+ case 0x00:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b)));
+ case 0x01:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b)));
+ case 0x10:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b)));
+ case 0x11:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b)));
+ default:
+ abort();
+ }
+}
+
+#if !defined(__ARM_FEATURE_CRYPTO)
+/* clang-format off */
+#define SSE2NEON_AES_DATA(w) \
+ { \
+ w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), \
+ w(0xc5), w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), \
+ w(0xab), w(0x76), w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), \
+ w(0x59), w(0x47), w(0xf0), w(0xad), w(0xd4), w(0xa2), w(0xaf), \
+ w(0x9c), w(0xa4), w(0x72), w(0xc0), w(0xb7), w(0xfd), w(0x93), \
+ w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc), w(0x34), w(0xa5), \
+ w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15), w(0x04), \
+ w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a), \
+ w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), \
+ w(0x75), w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), \
+ w(0x5a), w(0xa0), w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), \
+ w(0xe3), w(0x2f), w(0x84), w(0x53), w(0xd1), w(0x00), w(0xed), \
+ w(0x20), w(0xfc), w(0xb1), w(0x5b), w(0x6a), w(0xcb), w(0xbe), \
+ w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf), w(0xd0), w(0xef), \
+ w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85), w(0x45), \
+ w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8), \
+ w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), \
+ w(0xf5), w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), \
+ w(0xf3), w(0xd2), w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), \
+ w(0x97), w(0x44), w(0x17), w(0xc4), w(0xa7), w(0x7e), w(0x3d), \
+ w(0x64), w(0x5d), w(0x19), w(0x73), w(0x60), w(0x81), w(0x4f), \
+ w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88), w(0x46), w(0xee), \
+ w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb), w(0xe0), \
+ w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c), \
+ w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), \
+ w(0x79), w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), \
+ w(0x4e), w(0xa9), w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), \
+ w(0x7a), w(0xae), w(0x08), w(0xba), w(0x78), w(0x25), w(0x2e), \
+ w(0x1c), w(0xa6), w(0xb4), w(0xc6), w(0xe8), w(0xdd), w(0x74), \
+ w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a), w(0x70), w(0x3e), \
+ w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e), w(0x61), \
+ w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e), \
+ w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), \
+ w(0x94), w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), \
+ w(0x28), w(0xdf), w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), \
+ w(0xe6), w(0x42), w(0x68), w(0x41), w(0x99), w(0x2d), w(0x0f), \
+ w(0xb0), w(0x54), w(0xbb), w(0x16) \
+ }
+/* clang-format on */
+
+/* X Macro trick. See https://en.wikipedia.org/wiki/X_Macro */
+#define SSE2NEON_AES_H0(x) (x)
+static const uint8_t SSE2NEON_sbox[256] = SSE2NEON_AES_DATA(SSE2NEON_AES_H0);
+#undef SSE2NEON_AES_H0
+
+// In the absence of crypto extensions, implement aesenc using regular neon
+// intrinsics instead. See:
+// https://www.workofard.com/2017/01/accelerated-aes-for-the-arm64-linux-kernel/
+// https://www.workofard.com/2017/07/ghash-for-low-end-cores/ and
+// https://github.com/ColinIanKing/linux-next-mirror/blob/b5f466091e130caaf0735976648f72bd5e09aa84/crypto/aegis128-neon-inner.c#L52
+// for more information Reproduced with permission of the author.
+FORCE_INLINE __m128i _mm_aesenc_si128(__m128i EncBlock, __m128i RoundKey)
+{
+#if defined(__aarch64__)
+ static const uint8_t shift_rows[] = {0x0, 0x5, 0xa, 0xf, 0x4, 0x9,
+ 0xe, 0x3, 0x8, 0xd, 0x2, 0x7,
+ 0xc, 0x1, 0x6, 0xb};
+ static const uint8_t ror32by8[] = {0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
+ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc};
+
+ uint8x16_t v;
+ uint8x16_t w = vreinterpretq_u8_m128i(EncBlock);
+
+ // shift rows
+ w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
+
+ // sub bytes
+ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(SSE2NEON_sbox), w);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(SSE2NEON_sbox + 0x40), w - 0x40);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(SSE2NEON_sbox + 0x80), w - 0x80);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(SSE2NEON_sbox + 0xc0), w - 0xc0);
+
+ // mix columns
+ w = (v << 1) ^ (uint8x16_t)(((int8x16_t) v >> 7) & 0x1b);
+ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
+ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
+
+ // add round key
+ return vreinterpretq_m128i_u8(w) ^ RoundKey;
+
+#else /* ARMv7-A NEON implementation */
+#define SSE2NEON_AES_B2W(b0, b1, b2, b3) \
+ (((uint32_t)(b3) << 24) | ((uint32_t)(b2) << 16) | ((uint32_t)(b1) << 8) | \
+ (b0))
+#define SSE2NEON_AES_F2(x) ((x << 1) ^ (((x >> 7) & 1) * 0x011b /* WPOLY */))
+#define SSE2NEON_AES_F3(x) (SSE2NEON_AES_F2(x) ^ x)
+#define SSE2NEON_AES_U0(p) \
+ SSE2NEON_AES_B2W(SSE2NEON_AES_F2(p), p, p, SSE2NEON_AES_F3(p))
+#define SSE2NEON_AES_U1(p) \
+ SSE2NEON_AES_B2W(SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p, p)
+#define SSE2NEON_AES_U2(p) \
+ SSE2NEON_AES_B2W(p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p)
+#define SSE2NEON_AES_U3(p) \
+ SSE2NEON_AES_B2W(p, p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p))
+ static const uint32_t ALIGN_STRUCT(16) aes_table[4][256] = {
+ SSE2NEON_AES_DATA(SSE2NEON_AES_U0),
+ SSE2NEON_AES_DATA(SSE2NEON_AES_U1),
+ SSE2NEON_AES_DATA(SSE2NEON_AES_U2),
+ SSE2NEON_AES_DATA(SSE2NEON_AES_U3),
+ };
+#undef SSE2NEON_AES_B2W
+#undef SSE2NEON_AES_F2
+#undef SSE2NEON_AES_F3
+#undef SSE2NEON_AES_U0
+#undef SSE2NEON_AES_U1
+#undef SSE2NEON_AES_U2
+#undef SSE2NEON_AES_U3
+
+ uint32_t x0 = _mm_cvtsi128_si32(EncBlock);
+ uint32_t x1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(EncBlock, 0x55));
+ uint32_t x2 = _mm_cvtsi128_si32(_mm_shuffle_epi32(EncBlock, 0xAA));
+ uint32_t x3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(EncBlock, 0xFF));
+
+ __m128i out = _mm_set_epi32(
+ (aes_table[0][x3 & 0xff] ^ aes_table[1][(x0 >> 8) & 0xff] ^
+ aes_table[2][(x1 >> 16) & 0xff] ^ aes_table[3][x2 >> 24]),
+ (aes_table[0][x2 & 0xff] ^ aes_table[1][(x3 >> 8) & 0xff] ^
+ aes_table[2][(x0 >> 16) & 0xff] ^ aes_table[3][x1 >> 24]),
+ (aes_table[0][x1 & 0xff] ^ aes_table[1][(x2 >> 8) & 0xff] ^
+ aes_table[2][(x3 >> 16) & 0xff] ^ aes_table[3][x0 >> 24]),
+ (aes_table[0][x0 & 0xff] ^ aes_table[1][(x1 >> 8) & 0xff] ^
+ aes_table[2][(x2 >> 16) & 0xff] ^ aes_table[3][x3 >> 24]));
+
+ return _mm_xor_si128(out, RoundKey);
+#endif
+}
+
+// Perform the last round of an AES encryption flow on data (state) in a using
+// the round key in RoundKey, and store the result in dst.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesenclast_si128
+FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
+{
+ /* FIXME: optimized for NEON */
+ uint8_t v[4][4] = {
+ [0] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 0)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 5)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 10)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 15)]},
+ [1] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 4)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 9)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 14)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 3)]},
+ [2] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 8)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 13)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 2)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 7)]},
+ [3] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 12)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 1)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 6)],
+ SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 11)]},
+ };
+ for (int i = 0; i < 16; i++)
+ vreinterpretq_nth_u8_m128i(a, i) =
+ v[i / 4][i % 4] ^ vreinterpretq_nth_u8_m128i(RoundKey, i);
+ return a;
+}
+
+// Emits the Advanced Encryption Standard (AES) instruction aeskeygenassist.
+// This instruction generates a round key for AES encryption. See
+// https://kazakov.life/2017/11/01/cryptocurrency-mining-on-ios-devices/
+// for details.
+//
+// https://msdn.microsoft.com/en-us/library/cc714138(v=vs.120).aspx
+FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i key, const int rcon)
+{
+ uint32_t X1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(key, 0x55));
+ uint32_t X3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(key, 0xFF));
+ for (int i = 0; i < 4; ++i) {
+ ((uint8_t *) &X1)[i] = SSE2NEON_sbox[((uint8_t *) &X1)[i]];
+ ((uint8_t *) &X3)[i] = SSE2NEON_sbox[((uint8_t *) &X3)[i]];
+ }
+ return _mm_set_epi32(((X3 >> 8) | (X3 << 24)) ^ rcon, X3,
+ ((X1 >> 8) | (X1 << 24)) ^ rcon, X1);
+}
+#undef SSE2NEON_AES_DATA
+
+#else /* __ARM_FEATURE_CRYPTO */
+// Implements equivalent of 'aesenc' by combining AESE (with an empty key) and
+// AESMC and then manually applying the real key as an xor operation. This
+// unfortunately means an additional xor op; the compiler should be able to
+// optimize this away for repeated calls however. See
+// https://blog.michaelbrase.com/2018/05/08/emulating-x86-aes-intrinsics-on-armv8-a
+// for more details.
+FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))) ^
+ vreinterpretq_u8_m128i(b));
+}
+
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_aesenclast_si128
+FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
+{
+ return _mm_xor_si128(vreinterpretq_m128i_u8(vaeseq_u8(
+ vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
+ RoundKey);
+}
+
+FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
+{
+ // AESE does ShiftRows and SubBytes on A
+ uint8x16_t u8 = vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0));
+
+ uint8x16_t dest = {
+ // Undo ShiftRows step from AESE and extract X1 and X3
+ u8[0x4], u8[0x1], u8[0xE], u8[0xB], // SubBytes(X1)
+ u8[0x1], u8[0xE], u8[0xB], u8[0x4], // ROT(SubBytes(X1))
+ u8[0xC], u8[0x9], u8[0x6], u8[0x3], // SubBytes(X3)
+ u8[0x9], u8[0x6], u8[0x3], u8[0xC], // ROT(SubBytes(X3))
+ };
+ uint32x4_t r = {0, (unsigned) rcon, 0, (unsigned) rcon};
+ return vreinterpretq_m128i_u8(dest) ^ vreinterpretq_m128i_u32(r);
+}
+#endif
+
+/* Streaming Extensions */
+
+// Guarantees that every preceding store is globally visible before any
+// subsequent store.
+// https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx
+FORCE_INLINE void _mm_sfence(void)
+{
+ __sync_synchronize();
+}
+
+// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-
+// point elements) from a into memory using a non-temporal memory hint.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_ps
+FORCE_INLINE void _mm_stream_ps(float *p, __m128 a)
+{
+#if __has_builtin(__builtin_nontemporal_store)
+ __builtin_nontemporal_store(a, (float32x4_t *) p);
+#else
+ vst1q_f32(p, vreinterpretq_f32_m128(a));
+#endif
+}
+
+// Stores the data in a to the address p without polluting the caches. If the
+// cache line containing address p is already in the cache, the cache will be
+// updated.
+// https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx
+FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
+{
+#if __has_builtin(__builtin_nontemporal_store)
+ __builtin_nontemporal_store(a, p);
+#else
+ vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a));
+#endif
+}
+
+// Load 128-bits of integer data from memory into dst using a non-temporal
+// memory hint. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+//
+// dst[127:0] := MEM[mem_addr+127:mem_addr]
+//
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_load_si128
+FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p)
+{
+#if __has_builtin(__builtin_nontemporal_store)
+ return __builtin_nontemporal_load(p);
+#else
+ return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p));
+#endif
+}
+
+// Cache line containing p is flushed and invalidated from all caches in the
+// coherency domain. :
+// https://msdn.microsoft.com/en-us/library/ba08y07y(v=vs.100).aspx
+FORCE_INLINE void _mm_clflush(void const *p)
+{
+ (void) p;
+ // no corollary for Neon?
+}
+
+// Allocate aligned blocks of memory.
+// https://software.intel.com/en-us/
+// cpp-compiler-developer-guide-and-reference-allocating-and-freeing-aligned-memory-blocks
+FORCE_INLINE void *_mm_malloc(size_t size, size_t align)
+{
+ void *ptr;
+ if (align == 1)
+ return malloc(size);
+ if (align == 2 || (sizeof(void *) == 8 && align == 4))
+ align = sizeof(void *);
+ if (!posix_memalign(&ptr, align, size))
+ return ptr;
+ return NULL;
+}
+
+// Free aligned memory that was allocated with _mm_malloc.
+// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_free
+FORCE_INLINE void _mm_free(void *addr)
+{
+ free(addr);
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 8-bit integer v.
+// https://msdn.microsoft.com/en-us/library/bb514036(v=vs.100)
+FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#else
+ crc ^= v;
+ for (int bit = 0; bit < 8; bit++) {
+ if (crc & 1)
+ crc = (crc >> 1) ^ UINT32_C(0x82f63b78);
+ else
+ crc = (crc >> 1);
+ }
+#endif
+ return crc;
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 16-bit integer v.
+// https://msdn.microsoft.com/en-us/library/bb531411(v=vs.100)
+FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#else
+ crc = _mm_crc32_u8(crc, v & 0xff);
+ crc = _mm_crc32_u8(crc, (v >> 8) & 0xff);
+#endif
+ return crc;
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 32-bit integer v.
+// https://msdn.microsoft.com/en-us/library/bb531394(v=vs.100)
+FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#else
+ crc = _mm_crc32_u16(crc, v & 0xffff);
+ crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff);
+#endif
+ return crc;
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 64-bit integer v.
+// https://msdn.microsoft.com/en-us/library/bb514033(v=vs.100)
+FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#else
+ crc = _mm_crc32_u32((uint32_t)(crc), v & 0xffffffff);
+ crc = _mm_crc32_u32((uint32_t)(crc), (v >> 32) & 0xffffffff);
+#endif
+ return crc;
+}
+
+#if defined(__GNUC__) || defined(__clang__)
+#pragma pop_macro("ALIGN_STRUCT")
+#pragma pop_macro("FORCE_INLINE")
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC pop_options
+#endif
+
+#endif
diff --git a/thirdparty/embree-aarch64/common/simd/avx.h b/thirdparty/embree/common/simd/avx.h
index c840e41805..d3100306ee 100644
--- a/thirdparty/embree-aarch64/common/simd/avx.h
+++ b/thirdparty/embree/common/simd/avx.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/simd/avx512.h b/thirdparty/embree/common/simd/avx512.h
index 25414ab5b1..d43bbacea1 100644
--- a/thirdparty/embree-aarch64/common/simd/avx512.h
+++ b/thirdparty/embree/common/simd/avx512.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/simd/simd.h b/thirdparty/embree/common/simd/simd.h
index 647851110b..195506b530 100644
--- a/thirdparty/embree-aarch64/common/simd/simd.h
+++ b/thirdparty/embree/common/simd/simd.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -6,7 +6,7 @@
#include "../math/math.h"
/* include SSE wrapper classes */
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined(__SSE__)
# include "sse.h"
#endif
diff --git a/thirdparty/embree-aarch64/common/simd/sse.cpp b/thirdparty/embree/common/simd/sse.cpp
index 1732cfa421..535d6943d8 100644
--- a/thirdparty/embree-aarch64/common/simd/sse.cpp
+++ b/thirdparty/embree/common/simd/sse.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "sse.h"
diff --git a/thirdparty/embree-aarch64/common/simd/sse.h b/thirdparty/embree/common/simd/sse.h
index 6bc818b55b..1465fb4fb0 100644
--- a/thirdparty/embree-aarch64/common/simd/sse.h
+++ b/thirdparty/embree/common/simd/sse.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -11,7 +11,7 @@
namespace embree
{
-#if (defined(__aarch64__) && defined(BUILD_IOS)) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline __m128 blendv_ps(__m128 f, __m128 t, __m128 mask) {
return _mm_blendv_ps(f,t,mask);
}
diff --git a/thirdparty/embree-aarch64/common/simd/varying.h b/thirdparty/embree/common/simd/varying.h
index 9a46817da9..9b98d326be 100644
--- a/thirdparty/embree-aarch64/common/simd/varying.h
+++ b/thirdparty/embree/common/simd/varying.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -9,7 +9,7 @@ namespace embree
{
/* Varying numeric types */
template<int N>
- struct vfloat
+ struct vfloat_impl
{
union { float f[N]; int i[N]; };
__forceinline const float& operator [](size_t index) const { assert(index < N); return f[index]; }
@@ -17,7 +17,7 @@ namespace embree
};
template<int N>
- struct vdouble
+ struct vdouble_impl
{
union { double f[N]; long long i[N]; };
__forceinline const double& operator [](size_t index) const { assert(index < N); return f[index]; }
@@ -25,7 +25,7 @@ namespace embree
};
template<int N>
- struct vint
+ struct vint_impl
{
int i[N];
__forceinline const int& operator [](size_t index) const { assert(index < N); return i[index]; }
@@ -33,7 +33,7 @@ namespace embree
};
template<int N>
- struct vuint
+ struct vuint_impl
{
unsigned int i[N];
__forceinline const unsigned int& operator [](size_t index) const { assert(index < N); return i[index]; }
@@ -41,7 +41,7 @@ namespace embree
};
template<int N>
- struct vllong
+ struct vllong_impl
{
long long i[N];
__forceinline const long long& operator [](size_t index) const { assert(index < N); return i[index]; }
@@ -49,20 +49,13 @@ namespace embree
};
/* Varying bool types */
- template<int N> struct vboolf { int i[N]; }; // for float/int
- template<int N> struct vboold { long long i[N]; }; // for double/long long
-
- /* Aliases to default types */
- template<int N> using vreal = vfloat<N>;
- template<int N> using vbool = vboolf<N>;
-
+ template<int N> struct vboolf_impl { int i[N]; }; // for float/int
+ template<int N> struct vboold_impl { long long i[N]; }; // for double/long long
+
/* Varying size constants */
#if defined(__AVX512VL__) // SKX
const int VSIZEX = 8; // default size
const int VSIZEL = 16; // large size
-#elif defined(__AVX512F__) // KNL
- const int VSIZEX = 16;
- const int VSIZEL = 16;
#elif defined(__AVX__)
const int VSIZEX = 8;
const int VSIZEL = 8;
@@ -71,21 +64,41 @@ namespace embree
const int VSIZEL = 4;
#endif
- /* Extends varying size N to optimal or up to max(N, N2) */
- template<int N, int N2 = VSIZEX>
- struct vextend
- {
-#if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
- /* use 16-wide SIMD calculations on KNL even for 4 and 8 wide SIMD */
- static const int size = (N2 == VSIZEX) ? VSIZEX : N;
- #define SIMD_MODE(N) N, 16
-#else
- /* calculate with same SIMD width otherwise */
- static const int size = N;
- #define SIMD_MODE(N) N, N
-#endif
+ template<int N>
+ struct vtypes {
+ using vbool = vboolf_impl<N>;
+ using vboolf = vboolf_impl<N>;
+ using vboold = vboold_impl<N>;
+ using vint = vint_impl<N>;
+ using vuint = vuint_impl<N>;
+ using vllong = vllong_impl<N>;
+ using vfloat = vfloat_impl<N>;
+ using vdouble = vdouble_impl<N>;
+ };
+
+ template<>
+ struct vtypes<1> {
+ using vbool = bool;
+ using vboolf = bool;
+ using vboold = bool;
+ using vint = int;
+ using vuint = unsigned int;
+ using vllong = long long;
+ using vfloat = float;
+ using vdouble = double;
};
+ /* Aliases to default types */
+ template<int N> using vbool = typename vtypes<N>::vbool;
+ template<int N> using vboolf = typename vtypes<N>::vboolf;
+ template<int N> using vboold = typename vtypes<N>::vboold;
+ template<int N> using vint = typename vtypes<N>::vint;
+ template<int N> using vuint = typename vtypes<N>::vuint;
+ template<int N> using vllong = typename vtypes<N>::vllong;
+ template<int N> using vreal = typename vtypes<N>::vfloat;
+ template<int N> using vfloat = typename vtypes<N>::vfloat;
+ template<int N> using vdouble = typename vtypes<N>::vdouble;
+
/* 4-wide shortcuts */
typedef vfloat<4> vfloat4;
typedef vdouble<4> vdouble4;
diff --git a/thirdparty/embree-aarch64/common/simd/vboold4_avx.h b/thirdparty/embree/common/simd/vboold4_avx.h
index 6505ee56f3..7db0d1c5c1 100644
--- a/thirdparty/embree-aarch64/common/simd/vboold4_avx.h
+++ b/thirdparty/embree/common/simd/vboold4_avx.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide AVX bool type for 64bit data types*/
@@ -49,19 +57,13 @@ namespace embree
#endif
}
- __forceinline vboold(__m128d a, __m128d b) : vl(a), vh(b) {}
-
////////////////////////////////////////////////////////////////////////////////
/// Constants
////////////////////////////////////////////////////////////////////////////////
__forceinline vboold(FalseTy) : v(_mm256_setzero_pd()) {}
-#if !defined(__aarch64__)
__forceinline vboold(TrueTy) : v(_mm256_cmp_pd(_mm256_setzero_pd(), _mm256_setzero_pd(), _CMP_EQ_OQ)) {}
-#else
- __forceinline vboold(TrueTy) : v(_mm256_cmpeq_pd(_mm256_setzero_pd(), _mm256_setzero_pd())) {}
-#endif
-
+
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -105,10 +107,9 @@ namespace embree
/// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
-#if !defined(__aarch64__)
__forceinline vboold4 unpacklo(const vboold4& a, const vboold4& b) { return _mm256_unpacklo_pd(a, b); }
__forceinline vboold4 unpackhi(const vboold4& a, const vboold4& b) { return _mm256_unpackhi_pd(a, b); }
-#endif
+
#if defined(__AVX2__)
template<int i0, int i1, int i2, int i3>
@@ -158,3 +159,11 @@ namespace embree
<< a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vboold4_avx512.h b/thirdparty/embree/common/simd/vboold4_avx512.h
index 4fe730d713..ceaad7bba5 100644
--- a/thirdparty/embree-aarch64/common/simd/vboold4_avx512.h
+++ b/thirdparty/embree/common/simd/vboold4_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide AVX-512 bool type */
@@ -138,3 +146,11 @@ namespace embree
return cout << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vboold8_avx512.h b/thirdparty/embree/common/simd/vboold8_avx512.h
index fdf3f00de5..66d2054872 100644
--- a/thirdparty/embree-aarch64/common/simd/vboold8_avx512.h
+++ b/thirdparty/embree/common/simd/vboold8_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX-512 bool type */
@@ -32,25 +40,12 @@ namespace embree
/* return int8 mask */
__forceinline __m128i mask8() const {
-#if defined(__AVX512BW__)
return _mm_movm_epi8(v);
-#else
- const __m512i f = _mm512_set1_epi64(0);
- const __m512i t = _mm512_set1_epi64(-1);
- const __m512i m = _mm512_mask_or_epi64(f,v,t,t);
- return _mm512_cvtepi64_epi8(m);
-#endif
}
/* return int64 mask */
__forceinline __m512i mask64() const {
-#if defined(__AVX512DQ__)
return _mm512_movm_epi64(v);
-#else
- const __m512i f = _mm512_set1_epi64(0);
- const __m512i t = _mm512_set1_epi64(-1);
- return _mm512_mask_or_epi64(f,v,t,t);
-#endif
}
////////////////////////////////////////////////////////////////////////////////
@@ -146,3 +141,11 @@ namespace embree
return cout << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vboolf16_avx512.h b/thirdparty/embree/common/simd/vboolf16_avx512.h
index 238cdc8eb9..19841dcea8 100644
--- a/thirdparty/embree-aarch64/common/simd/vboolf16_avx512.h
+++ b/thirdparty/embree/common/simd/vboolf16_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 16-wide AVX-512 bool type */
@@ -33,25 +41,12 @@ namespace embree
/* return int8 mask */
__forceinline __m128i mask8() const {
-#if defined(__AVX512BW__)
return _mm_movm_epi8(v);
-#else
- const __m512i f = _mm512_set1_epi32(0);
- const __m512i t = _mm512_set1_epi32(-1);
- const __m512i m = _mm512_mask_or_epi32(f,v,t,t);
- return _mm512_cvtepi32_epi8(m);
-#endif
}
/* return int32 mask */
__forceinline __m512i mask32() const {
-#if defined(__AVX512DQ__)
return _mm512_movm_epi32(v);
-#else
- const __m512i f = _mm512_set1_epi32(0);
- const __m512i t = _mm512_set1_epi32(-1);
- return _mm512_mask_or_epi32(f,v,t,t);
-#endif
}
////////////////////////////////////////////////////////////////////////////////
@@ -148,3 +143,11 @@ namespace embree
return cout << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vboolf4_avx512.h b/thirdparty/embree/common/simd/vboolf4_avx512.h
index 2ae4c4470e..e65f66b025 100644
--- a/thirdparty/embree-aarch64/common/simd/vboolf4_avx512.h
+++ b/thirdparty/embree/common/simd/vboolf4_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide AVX-512 bool type */
@@ -141,3 +149,11 @@ namespace embree
return cout << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vboolf4_sse2.h b/thirdparty/embree/common/simd/vboolf4_sse2.h
index ed53b3c783..fa84b1b6ee 100644
--- a/thirdparty/embree-aarch64/common/simd/vboolf4_sse2.h
+++ b/thirdparty/embree/common/simd/vboolf4_sse2.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide SSE bool type */
@@ -37,13 +45,9 @@ namespace embree
: v(mm_lookupmask_ps[(size_t(b) << 3) | (size_t(a) << 2) | (size_t(b) << 1) | size_t(a)]) {}
__forceinline vboolf(bool a, bool b, bool c, bool d)
: v(mm_lookupmask_ps[(size_t(d) << 3) | (size_t(c) << 2) | (size_t(b) << 1) | size_t(a)]) {}
-#if defined(__aarch64__) && defined(BUILD_IOS)
- __forceinline vboolf(int mask) { v = mm_lookupmask_ps[mask]; }
- __forceinline vboolf(unsigned int mask) { v = mm_lookupmask_ps[mask]; }
-#else
__forceinline vboolf(int mask) { assert(mask >= 0 && mask < 16); v = mm_lookupmask_ps[mask]; }
__forceinline vboolf(unsigned int mask) { assert(mask < 16); v = mm_lookupmask_ps[mask]; }
-#endif
+
/* return int32 mask */
__forceinline __m128i mask32() const {
return _mm_castps_si128(v);
@@ -60,13 +64,8 @@ namespace embree
/// Array Access
////////////////////////////////////////////////////////////////////////////////
-#if defined(__aarch64__) && defined(BUILD_IOS)
- __forceinline bool operator [](size_t index) const { return (_mm_movemask_ps(v) >> index) & 1; }
- __forceinline int& operator [](size_t index) { return i[index]; }
-#else
__forceinline bool operator [](size_t index) const { assert(index < 4); return (_mm_movemask_ps(v) >> index) & 1; }
__forceinline int& operator [](size_t index) { assert(index < 4); return i[index]; }
-#endif
};
////////////////////////////////////////////////////////////////////////////////
@@ -101,7 +100,7 @@ namespace embree
__forceinline vboolf4 operator ==(const vboolf4& a, const vboolf4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
__forceinline vboolf4 select(const vboolf4& m, const vboolf4& t, const vboolf4& f) {
-#if (defined(__aarch64__) && defined(BUILD_IOS)) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
return _mm_blendv_ps(f, t, m);
#else
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
@@ -115,17 +114,6 @@ namespace embree
__forceinline vboolf4 unpacklo(const vboolf4& a, const vboolf4& b) { return _mm_unpacklo_ps(a, b); }
__forceinline vboolf4 unpackhi(const vboolf4& a, const vboolf4& b) { return _mm_unpackhi_ps(a, b); }
-#if defined(__aarch64__)
- template<int i0, int i1, int i2, int i3>
- __forceinline vboolf4 shuffle(const vboolf4& v) {
- return vreinterpretq_f32_u8(vqtbl1q_u8( vreinterpretq_u8_s32(v), _MN_SHUFFLE(i0, i1, i2, i3)));
- }
-
- template<int i0, int i1, int i2, int i3>
- __forceinline vboolf4 shuffle(const vboolf4& a, const vboolf4& b) {
- return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
- }
-#else
template<int i0, int i1, int i2, int i3>
__forceinline vboolf4 shuffle(const vboolf4& v) {
return _mm_castsi128_ps(_mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0)));
@@ -135,8 +123,7 @@ namespace embree
__forceinline vboolf4 shuffle(const vboolf4& a, const vboolf4& b) {
return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
}
-#endif
-
+
template<int i0>
__forceinline vboolf4 shuffle(const vboolf4& v) {
return shuffle<i0,i0,i0,i0>(v);
@@ -148,7 +135,7 @@ namespace embree
template<> __forceinline vboolf4 shuffle<0, 1, 0, 1>(const vboolf4& v) { return _mm_castpd_ps(_mm_movedup_pd(v)); }
#endif
-#if defined(__SSE4_1__) && !defined(__aarch64__)
+#if defined(__SSE4_1__)
template<int dst, int src, int clr> __forceinline vboolf4 insert(const vboolf4& a, const vboolf4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); }
template<int dst, int src> __forceinline vboolf4 insert(const vboolf4& a, const vboolf4& b) { return insert<dst, src, 0>(a, b); }
template<int dst> __forceinline vboolf4 insert(const vboolf4& a, const bool b) { return insert<dst, 0>(a, vboolf4(b)); }
@@ -170,15 +157,11 @@ namespace embree
__forceinline bool none(const vboolf4& valid, const vboolf4& b) { return none(valid & b); }
__forceinline size_t movemask(const vboolf4& a) { return _mm_movemask_ps(a); }
-#if defined(__aarch64__) && defined(BUILD_IOS)
-__forceinline size_t popcnt(const vboolf4& a) { return _mm_movemask_popcnt_ps(a); }
-#else
#if defined(__SSE4_2__)
__forceinline size_t popcnt(const vboolf4& a) { return popcnt((size_t)_mm_movemask_ps(a)); }
#else
__forceinline size_t popcnt(const vboolf4& a) { return bool(a[0])+bool(a[1])+bool(a[2])+bool(a[3]); }
#endif
-#endif
////////////////////////////////////////////////////////////////////////////////
/// Get/Set Functions
@@ -196,3 +179,11 @@ __forceinline size_t popcnt(const vboolf4& a) { return _mm_movemask_popcnt_ps(a)
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vboolf8_avx.h b/thirdparty/embree/common/simd/vboolf8_avx.h
index 4f64741b55..ba77cc3c5e 100644
--- a/thirdparty/embree-aarch64/common/simd/vboolf8_avx.h
+++ b/thirdparty/embree/common/simd/vboolf8_avx.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX bool type */
@@ -68,11 +76,8 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
__forceinline vboolf(FalseTy) : v(_mm256_setzero_ps()) {}
-#if !defined(__aarch64__)
__forceinline vboolf(TrueTy) : v(_mm256_cmp_ps(_mm256_setzero_ps(), _mm256_setzero_ps(), _CMP_EQ_OQ)) {}
-#else
- __forceinline vboolf(TrueTy) : v(_mm256_cmpeq_ps(_mm256_setzero_ps(), _mm256_setzero_ps())) {}
-#endif
+
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -187,3 +192,11 @@ namespace embree
<< a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vboolf8_avx512.h b/thirdparty/embree/common/simd/vboolf8_avx512.h
index 2a52b554c7..73ff5666e1 100644
--- a/thirdparty/embree-aarch64/common/simd/vboolf8_avx512.h
+++ b/thirdparty/embree/common/simd/vboolf8_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX-512 bool type */
@@ -141,3 +149,11 @@ namespace embree
return cout << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vdouble4_avx.h b/thirdparty/embree/common/simd/vdouble4_avx.h
index 1f65b45d7e..55326de7dd 100644
--- a/thirdparty/embree-aarch64/common/simd/vdouble4_avx.h
+++ b/thirdparty/embree/common/simd/vdouble4_avx.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide AVX 64-bit double type */
@@ -181,20 +189,13 @@ namespace embree
__forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_GE); }
__forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_GT); }
__forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_LE); }
-#elif !defined(__aarch64__)
+#else
__forceinline vboold4 operator ==(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_EQ_OQ); }
__forceinline vboold4 operator !=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NEQ_UQ); }
__forceinline vboold4 operator < (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_LT_OS); }
__forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NLT_US); }
__forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NLE_US); }
__forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_LE_OS); }
-#else
- __forceinline vboold4 operator ==(const vdouble4& a, const vdouble4& b) { return _mm256_cmpeq_pd(a, b); }
- __forceinline vboold4 operator !=(const vdouble4& a, const vdouble4& b) { return _mm256_cmpneq_pd(a, b); }
- __forceinline vboold4 operator < (const vdouble4& a, const vdouble4& b) { return _mm256_cmplt_pd(a, b); }
- __forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmpnlt_pd(a, b); }
- __forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmpnle_pd(a, b); }
- __forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmple_pd(a, b); }
#endif
__forceinline vboold4 operator ==(const vdouble4& a, double b) { return a == vdouble4(b); }
@@ -246,18 +247,6 @@ namespace embree
#endif
}
- __forceinline void xchg(const vboold4& m, vdouble4& a, vdouble4& b) {
- const vdouble4 c = a; a = select(m,b,a); b = select(m,c,b);
- }
-
- __forceinline vboold4 test(const vdouble4& a, const vdouble4& b) {
-#if defined(__AVX512VL__)
- return _mm256_test_epi64_mask(_mm256_castpd_si256(a),_mm256_castpd_si256(b));
-#else
- return _mm256_testz_si256(_mm256_castpd_si256(a),_mm256_castpd_si256(b));
-#endif
- }
-
////////////////////////////////////////////////////////////////////////////////
// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
@@ -322,3 +311,11 @@ namespace embree
return cout;
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vdouble8_avx512.h b/thirdparty/embree/common/simd/vdouble8_avx512.h
index 4eec7d2f6a..98d21bfe4a 100644
--- a/thirdparty/embree-aarch64/common/simd/vdouble8_avx512.h
+++ b/thirdparty/embree/common/simd/vdouble8_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX-512 64-bit double type */
@@ -91,15 +99,6 @@ namespace embree
_mm512_mask_store_pd(addr, mask, v2);
}
- /* pass by value to avoid compiler generating inefficient code */
- static __forceinline void storeu_compact(const vboold8 mask,void * addr, const vdouble8& reg) {
- _mm512_mask_compressstoreu_pd(addr, mask, reg);
- }
-
- static __forceinline vdouble8 compact64bit(const vboold8& mask, vdouble8& v) {
- return _mm512_mask_compress_pd(v, mask, v);
- }
-
static __forceinline vdouble8 compact(const vboold8& mask, vdouble8& v) {
return _mm512_mask_compress_pd(v, mask, v);
}
@@ -260,18 +259,6 @@ namespace embree
return _mm512_mask_or_pd(f,m,t,t);
}
- __forceinline void xchg(const vboold8& m, vdouble8& a, vdouble8& b) {
- const vdouble8 c = a; a = select(m,b,a); b = select(m,c,b);
- }
-
- __forceinline vboold8 test(const vboold8& m, const vdouble8& a, const vdouble8& b) {
- return _mm512_mask_test_epi64_mask(m,_mm512_castpd_si512(a),_mm512_castpd_si512(b));
- }
-
- __forceinline vboold8 test(const vdouble8& a, const vdouble8& b) {
- return _mm512_test_epi64_mask(_mm512_castpd_si512(a),_mm512_castpd_si512(b));
- }
-
////////////////////////////////////////////////////////////////////////////////
// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
@@ -354,3 +341,11 @@ namespace embree
return cout;
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vfloat16_avx512.h b/thirdparty/embree/common/simd/vfloat16_avx512.h
index aed2419b77..9f1e2459c4 100644
--- a/thirdparty/embree-aarch64/common/simd/vfloat16_avx512.h
+++ b/thirdparty/embree/common/simd/vfloat16_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 16-wide AVX-512 float type */
@@ -73,11 +81,11 @@ namespace embree
}
/* WARNING: due to f64x4 the mask is considered as an 8bit mask */
- __forceinline vfloat(const vboolf16& mask, const vfloat8& a, const vfloat8& b) {
+ /*__forceinline vfloat(const vboolf16& mask, const vfloat8& a, const vfloat8& b) {
__m512d aa = _mm512_broadcast_f64x4(_mm256_castps_pd(a));
aa = _mm512_mask_broadcast_f64x4(aa,mask,_mm256_castps_pd(b));
v = _mm512_castpd_ps(aa);
- }
+ }*/
__forceinline explicit vfloat(const vint16& a) {
v = _mm512_cvtepi32_ps(a);
@@ -123,30 +131,6 @@ namespace embree
return _mm512_set1_ps(*f);
}
- static __forceinline vfloat16 compact(const vboolf16& mask, vfloat16 &v) {
- return _mm512_mask_compress_ps(v, mask, v);
- }
- static __forceinline vfloat16 compact(const vboolf16& mask, vfloat16 &a, const vfloat16& b) {
- return _mm512_mask_compress_ps(a, mask, b);
- }
-
- static __forceinline vfloat16 expand(const vboolf16& mask, const vfloat16& a, vfloat16& b) {
- return _mm512_mask_expand_ps(b, mask, a);
- }
-
- static __forceinline vfloat16 loadu_compact(const vboolf16& mask, const void* ptr) {
- return _mm512_mask_expandloadu_ps(_mm512_setzero_ps(), mask, (float*)ptr);
- }
-
- static __forceinline void storeu_compact(const vboolf16& mask, float *addr, const vfloat16 reg) {
- _mm512_mask_compressstoreu_ps(addr, mask, reg);
- }
-
- static __forceinline void storeu_compact_single(const vboolf16& mask, float * addr, const vfloat16& reg) {
- //_mm512_mask_compressstoreu_ps(addr,mask,reg);
- *addr = mm512_cvtss_f32(_mm512_mask_compress_ps(reg, mask, reg));
- }
-
template<int scale = 4>
static __forceinline vfloat16 gather(const float* ptr, const vint16& index) {
return _mm512_i32gather_ps(index, ptr, scale);
@@ -194,12 +178,8 @@ namespace embree
__forceinline vfloat16 signmsk(const vfloat16& a) { return _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(a),_mm512_set1_epi32(0x80000000))); }
__forceinline vfloat16 rcp(const vfloat16& a) {
-#if defined(__AVX512ER__)
- return _mm512_rcp28_ps(a);
-#else
const vfloat16 r = _mm512_rcp14_ps(a);
return _mm512_mul_ps(r, _mm512_fnmadd_ps(r, a, vfloat16(2.0f)));
-#endif
}
__forceinline vfloat16 sqr (const vfloat16& a) { return _mm512_mul_ps(a,a); }
@@ -207,13 +187,9 @@ namespace embree
__forceinline vfloat16 rsqrt(const vfloat16& a)
{
-#if defined(__AVX512VL__)
const vfloat16 r = _mm512_rsqrt14_ps(a);
return _mm512_fmadd_ps(_mm512_set1_ps(1.5f), r,
_mm512_mul_ps(_mm512_mul_ps(_mm512_mul_ps(a, _mm512_set1_ps(-0.5f)), r), _mm512_mul_ps(r, r)));
-#else
- return _mm512_rsqrt28_ps(a);
-#endif
}
////////////////////////////////////////////////////////////////////////////////
@@ -242,54 +218,26 @@ namespace embree
return _mm512_castsi512_ps(_mm512_xor_epi32(_mm512_castps_si512(a),_mm512_castps_si512(b)));
}
- __forceinline vfloat16 min(const vfloat16& a, const vfloat16& b) {
- return _mm512_min_ps(a,b);
- }
- __forceinline vfloat16 min(const vfloat16& a, float b) {
- return _mm512_min_ps(a,vfloat16(b));
- }
- __forceinline vfloat16 min(const float& a, const vfloat16& b) {
- return _mm512_min_ps(vfloat16(a),b);
- }
-
- __forceinline vfloat16 max(const vfloat16& a, const vfloat16& b) {
- return _mm512_max_ps(a,b);
- }
- __forceinline vfloat16 max(const vfloat16& a, float b) {
- return _mm512_max_ps(a,vfloat16(b));
- }
- __forceinline vfloat16 max(const float& a, const vfloat16& b) {
- return _mm512_max_ps(vfloat16(a),b);
- }
+ __forceinline vfloat16 min(const vfloat16& a, const vfloat16& b) { return _mm512_min_ps(a,b); }
+ __forceinline vfloat16 min(const vfloat16& a, float b) { return _mm512_min_ps(a,vfloat16(b)); }
+ __forceinline vfloat16 min(const float& a, const vfloat16& b) { return _mm512_min_ps(vfloat16(a),b); }
- __forceinline vfloat16 mask_add(const vboolf16& mask, const vfloat16& c, const vfloat16& a, const vfloat16& b) { return _mm512_mask_add_ps (c,mask,a,b); }
- __forceinline vfloat16 mask_min(const vboolf16& mask, const vfloat16& c, const vfloat16& a, const vfloat16& b) {
- return _mm512_mask_min_ps(c,mask,a,b);
- };
- __forceinline vfloat16 mask_max(const vboolf16& mask, const vfloat16& c, const vfloat16& a, const vfloat16& b) {
- return _mm512_mask_max_ps(c,mask,a,b);
- };
+ __forceinline vfloat16 max(const vfloat16& a, const vfloat16& b) { return _mm512_max_ps(a,b); }
+ __forceinline vfloat16 max(const vfloat16& a, float b) { return _mm512_max_ps(a,vfloat16(b)); }
+ __forceinline vfloat16 max(const float& a, const vfloat16& b) { return _mm512_max_ps(vfloat16(a),b); }
__forceinline vfloat16 mini(const vfloat16& a, const vfloat16& b) {
-#if !defined(__AVX512ER__) // SKX
const vint16 ai = _mm512_castps_si512(a);
const vint16 bi = _mm512_castps_si512(b);
const vint16 ci = _mm512_min_epi32(ai,bi);
return _mm512_castsi512_ps(ci);
-#else // KNL
- return min(a,b);
-#endif
}
__forceinline vfloat16 maxi(const vfloat16& a, const vfloat16& b) {
-#if !defined(__AVX512ER__) // SKX
const vint16 ai = _mm512_castps_si512(a);
const vint16 bi = _mm512_castps_si512(b);
const vint16 ci = _mm512_max_epi32(ai,bi);
return _mm512_castsi512_ps(ci);
-#else // KNL
- return max(a,b);
-#endif
}
////////////////////////////////////////////////////////////////////////////////
@@ -300,43 +248,6 @@ namespace embree
__forceinline vfloat16 msub (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmsub_ps(a,b,c); }
__forceinline vfloat16 nmadd(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmadd_ps(a,b,c); }
__forceinline vfloat16 nmsub(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmsub_ps(a,b,c); }
-
- __forceinline vfloat16 mask_msub(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_fmsub_ps(a,mask,b,c); }
-
- __forceinline vfloat16 madd231 (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmadd_ps(c,b,a); }
- __forceinline vfloat16 msub213 (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmsub_ps(a,b,c); }
- __forceinline vfloat16 msub231 (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmsub_ps(c,b,a); }
- __forceinline vfloat16 msubr231(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmadd_ps(c,b,a); }
-
-
- ////////////////////////////////////////////////////////////////////////////////
- /// Operators with rounding
- ////////////////////////////////////////////////////////////////////////////////
-
- __forceinline vfloat16 madd_round_down(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmadd_round_ps(a,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 madd_round_up (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmadd_round_ps(a,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
- __forceinline vfloat16 mul_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_mul_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 mul_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_mul_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
- __forceinline vfloat16 add_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_add_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 add_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_add_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
- __forceinline vfloat16 sub_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_sub_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 sub_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_sub_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
- __forceinline vfloat16 div_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_div_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 div_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_div_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
- __forceinline vfloat16 mask_msub_round_down(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_fmsub_round_ps(a,mask,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 mask_msub_round_up (const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_fmsub_round_ps(a,mask,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
- __forceinline vfloat16 mask_mul_round_down(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_mul_round_ps(a,mask,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 mask_mul_round_up (const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_mul_round_ps(a,mask,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
- __forceinline vfloat16 mask_sub_round_down(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_sub_round_ps(a,mask,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
- __forceinline vfloat16 mask_sub_round_up (const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_sub_round_ps(a,mask,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
-
////////////////////////////////////////////////////////////////////////////////
/// Assignment Operators
@@ -404,13 +315,6 @@ namespace embree
return madd(t,b-a,a);
}
- __forceinline void xchg(vboolf16 m, vfloat16& a, vfloat16& b)
- {
- vfloat16 c = a;
- a = select(m,b,a);
- b = select(m,c,b);
- }
-
////////////////////////////////////////////////////////////////////////////////
/// Rounding Functions
////////////////////////////////////////////////////////////////////////////////
@@ -455,24 +359,6 @@ namespace embree
return _mm512_shuffle_f32x4(v, v, _MM_SHUFFLE(i3, i2, i1, i0));
}
- __forceinline vfloat16 interleave_even(const vfloat16& a, const vfloat16& b) {
- return _mm512_castsi512_ps(_mm512_mask_shuffle_epi32(_mm512_castps_si512(a), mm512_int2mask(0xaaaa), _mm512_castps_si512(b), (_MM_PERM_ENUM)0xb1));
- }
-
- __forceinline vfloat16 interleave_odd(const vfloat16& a, const vfloat16& b) {
- return _mm512_castsi512_ps(_mm512_mask_shuffle_epi32(_mm512_castps_si512(b), mm512_int2mask(0x5555), _mm512_castps_si512(a), (_MM_PERM_ENUM)0xb1));
- }
-
- __forceinline vfloat16 interleave2_even(const vfloat16& a, const vfloat16& b) {
- /* mask should be 8-bit but is 16-bit to reuse for interleave_even */
- return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(a), mm512_int2mask(0xaaaa), _mm512_castps_si512(b), (_MM_PERM_ENUM)0xb1));
- }
-
- __forceinline vfloat16 interleave2_odd(const vfloat16& a, const vfloat16& b) {
- /* mask should be 8-bit but is 16-bit to reuse for interleave_odd */
- return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(b), mm512_int2mask(0x5555), _mm512_castps_si512(a), (_MM_PERM_ENUM)0xb1));
- }
-
__forceinline vfloat16 interleave4_even(const vfloat16& a, const vfloat16& b) {
return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(a), mm512_int2mask(0xcc), _mm512_castps_si512(b), (_MM_PERM_ENUM)0x4e));
}
@@ -537,17 +423,6 @@ namespace embree
__forceinline void transpose(const vfloat16& r0, const vfloat16& r1, const vfloat16& r2, const vfloat16& r3,
vfloat16& c0, vfloat16& c1, vfloat16& c2, vfloat16& c3)
{
-#if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
- vfloat16 a0a1_c0c1 = interleave_even(r0, r1);
- vfloat16 a2a3_c2c3 = interleave_even(r2, r3);
- vfloat16 b0b1_d0d1 = interleave_odd (r0, r1);
- vfloat16 b2b3_d2d3 = interleave_odd (r2, r3);
-
- c0 = interleave2_even(a0a1_c0c1, a2a3_c2c3);
- c1 = interleave2_even(b0b1_d0d1, b2b3_d2d3);
- c2 = interleave2_odd (a0a1_c0c1, a2a3_c2c3);
- c3 = interleave2_odd (b0b1_d0d1, b2b3_d2d3);
-#else
vfloat16 a0a2_b0b2 = unpacklo(r0, r2);
vfloat16 c0c2_d0d2 = unpackhi(r0, r2);
vfloat16 a1a3_b1b3 = unpacklo(r1, r3);
@@ -557,7 +432,6 @@ namespace embree
c1 = unpackhi(a0a2_b0b2, a1a3_b1b3);
c2 = unpacklo(c0c2_d0d2, c1c3_d1d3);
c3 = unpackhi(c0c2_d0d2, c1c3_d1d3);
-#endif
}
__forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3,
@@ -715,44 +589,6 @@ namespace embree
return v;
}
- ////////////////////////////////////////////////////////////////////////////////
- /// Memory load and store operations
- ////////////////////////////////////////////////////////////////////////////////
-
- __forceinline vfloat16 loadAOS4to16f(const float& x, const float& y, const float& z)
- {
- vfloat16 f = zero;
- f = select(0x1111,vfloat16::broadcast(&x),f);
- f = select(0x2222,vfloat16::broadcast(&y),f);
- f = select(0x4444,vfloat16::broadcast(&z),f);
- return f;
- }
-
- __forceinline vfloat16 loadAOS4to16f(unsigned int index,
- const vfloat16& x,
- const vfloat16& y,
- const vfloat16& z)
- {
- vfloat16 f = zero;
- f = select(0x1111,vfloat16::broadcast((float*)&x + index),f);
- f = select(0x2222,vfloat16::broadcast((float*)&y + index),f);
- f = select(0x4444,vfloat16::broadcast((float*)&z + index),f);
- return f;
- }
-
- __forceinline vfloat16 loadAOS4to16f(unsigned int index,
- const vfloat16& x,
- const vfloat16& y,
- const vfloat16& z,
- const vfloat16& fill)
- {
- vfloat16 f = fill;
- f = select(0x1111,vfloat16::broadcast((float*)&x + index),f);
- f = select(0x2222,vfloat16::broadcast((float*)&y + index),f);
- f = select(0x4444,vfloat16::broadcast((float*)&z + index),f);
- return f;
- }
-
__forceinline vfloat16 rcp_safe(const vfloat16& a) {
return rcp(select(a != vfloat16(zero), a, vfloat16(min_rcp_input)));
}
@@ -769,3 +605,11 @@ namespace embree
return cout;
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vfloat4_sse2.h b/thirdparty/embree/common/simd/vfloat4_sse2.h
index 5732c0fbc8..5215bf9730 100644
--- a/thirdparty/embree-aarch64/common/simd/vfloat4_sse2.h
+++ b/thirdparty/embree/common/simd/vfloat4_sse2.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide SSE float type */
@@ -10,18 +18,18 @@ namespace embree
struct vfloat<4>
{
ALIGNED_STRUCT_(16);
-
+
typedef vboolf4 Bool;
typedef vint4 Int;
typedef vfloat4 Float;
-
+
enum { size = 4 }; // number of SIMD elements
union { __m128 v; float f[4]; int i[4]; }; // data
////////////////////////////////////////////////////////////////////////////////
/// Constructors, Assignment & Cast Operators
////////////////////////////////////////////////////////////////////////////////
-
+
__forceinline vfloat() {}
__forceinline vfloat(const vfloat4& other) { v = other.v; }
__forceinline vfloat4& operator =(const vfloat4& other) { v = other.v; return *this; }
@@ -34,19 +42,14 @@ namespace embree
__forceinline vfloat(float a, float b, float c, float d) : v(_mm_set_ps(d, c, b, a)) {}
__forceinline explicit vfloat(const vint4& a) : v(_mm_cvtepi32_ps(a)) {}
-#if defined(__aarch64__)
- __forceinline explicit vfloat(const vuint4& x) {
- v = vcvtq_f32_u32(vreinterpretq_u32_s32(x.v));
- }
-#else
__forceinline explicit vfloat(const vuint4& x) {
const __m128i a = _mm_and_si128(x,_mm_set1_epi32(0x7FFFFFFF));
- const __m128i b = _mm_and_si128(_mm_srai_epi32(x,31),_mm_set1_epi32(0x4F000000)); //0x4F000000 = 2^31
+ const __m128i b = _mm_and_si128(_mm_srai_epi32(x,31),_mm_set1_epi32(0x4F000000)); //0x4F000000 = 2^31
const __m128 af = _mm_cvtepi32_ps(a);
- const __m128 bf = _mm_castsi128_ps(b);
+ const __m128 bf = _mm_castsi128_ps(b);
v = _mm_add_ps(af,bf);
}
-#endif
+
////////////////////////////////////////////////////////////////////////////////
/// Constants
////////////////////////////////////////////////////////////////////////////////
@@ -71,13 +74,6 @@ namespace embree
#if defined(__AVX512VL__)
- static __forceinline vfloat4 compact(const vboolf4& mask, vfloat4 &v) {
- return _mm_mask_compress_ps(v, mask, v);
- }
- static __forceinline vfloat4 compact(const vboolf4& mask, vfloat4 &a, const vfloat4& b) {
- return _mm_mask_compress_ps(a, mask, b);
- }
-
static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_ps (_mm_setzero_ps(),mask,(float*)ptr); }
static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_ps(_mm_setzero_ps(),mask,(float*)ptr); }
@@ -107,44 +103,32 @@ namespace embree
#if defined (__SSE4_1__)
return _mm_castsi128_ps(_mm_stream_load_si128((__m128i*)ptr));
#else
- return _mm_load_ps(ptr);
+ return _mm_load_ps(ptr);
#endif
}
-#if defined(__aarch64__)
- static __forceinline vfloat4 load(const int8_t* ptr) {
- return __m128(_mm_load4epi8_f32(((__m128i*)ptr)));
- }
-#elif defined(__SSE4_1__)
- static __forceinline vfloat4 load(const int8_t* ptr) {
+#if defined(__SSE4_1__)
+ static __forceinline vfloat4 load(const char* ptr) {
return _mm_cvtepi32_ps(_mm_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr)));
}
#else
- static __forceinline vfloat4 load(const int8_t* ptr) {
+ static __forceinline vfloat4 load(const char* ptr) {
return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]);
}
#endif
-#if defined(__aarch64__)
- static __forceinline vfloat4 load(const uint8_t* ptr) {
- return __m128(_mm_load4epu8_f32(((__m128i*)ptr)));
- }
-#elif defined(__SSE4_1__)
- static __forceinline vfloat4 load(const uint8_t* ptr) {
+#if defined(__SSE4_1__)
+ static __forceinline vfloat4 load(const unsigned char* ptr) {
return _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)));
}
#else
- static __forceinline vfloat4 load(const uint8_t* ptr) {
+ static __forceinline vfloat4 load(const unsigned char* ptr) {
//return _mm_cvtpu8_ps(*(__m64*)ptr); // don't enable, will use MMX instructions
return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]);
}
#endif
-#if defined(__aarch64__)
- static __forceinline vfloat4 load(const short* ptr) {
- return __m128(_mm_load4epi16_f32(((__m128i*)ptr)));
- }
-#elif defined(__SSE4_1__)
+#if defined(__SSE4_1__)
static __forceinline vfloat4 load(const short* ptr) {
return _mm_cvtepi32_ps(_mm_cvtepi16_epi32(_mm_loadu_si128((__m128i*)ptr)));
}
@@ -157,15 +141,11 @@ namespace embree
static __forceinline vfloat4 load(const unsigned short* ptr) {
return _mm_mul_ps(vfloat4(vint4::load(ptr)),vfloat4(1.0f/65535.0f));
}
-
+
static __forceinline void store_nt(void* ptr, const vfloat4& v)
{
#if defined (__SSE4_1__)
-#if defined(__aarch64__)
- _mm_stream_ps((float*)ptr,vreinterpretq_s32_f32(v.v));
-#else
_mm_stream_ps((float*)ptr,v);
-#endif
#else
_mm_store_ps((float*)ptr,v);
#endif
@@ -173,14 +153,14 @@ namespace embree
template<int scale = 4>
static __forceinline vfloat4 gather(const float* ptr, const vint4& index) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _mm_i32gather_ps(ptr, index, scale);
#else
return vfloat4(
- *(float*)(((int8_t*)ptr)+scale*index[0]),
- *(float*)(((int8_t*)ptr)+scale*index[1]),
- *(float*)(((int8_t*)ptr)+scale*index[2]),
- *(float*)(((int8_t*)ptr)+scale*index[3]));
+ *(float*)(((char*)ptr)+scale*index[0]),
+ *(float*)(((char*)ptr)+scale*index[1]),
+ *(float*)(((char*)ptr)+scale*index[2]),
+ *(float*)(((char*)ptr)+scale*index[3]));
#endif
}
@@ -189,13 +169,13 @@ namespace embree
vfloat4 r = zero;
#if defined(__AVX512VL__)
return _mm_mmask_i32gather_ps(r, mask, index, ptr, scale);
-#elif defined(__AVX2__) && !defined(__aarch64__)
+#elif defined(__AVX2__)
return _mm_mask_i32gather_ps(r, ptr, index, mask, scale);
#else
- if (likely(mask[0])) r[0] = *(float*)(((int8_t*)ptr)+scale*index[0]);
- if (likely(mask[1])) r[1] = *(float*)(((int8_t*)ptr)+scale*index[1]);
- if (likely(mask[2])) r[2] = *(float*)(((int8_t*)ptr)+scale*index[2]);
- if (likely(mask[3])) r[3] = *(float*)(((int8_t*)ptr)+scale*index[3]);
+ if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]);
+ if (likely(mask[1])) r[1] = *(float*)(((char*)ptr)+scale*index[1]);
+ if (likely(mask[2])) r[2] = *(float*)(((char*)ptr)+scale*index[2]);
+ if (likely(mask[3])) r[3] = *(float*)(((char*)ptr)+scale*index[3]);
return r;
#endif
}
@@ -206,10 +186,10 @@ namespace embree
#if defined(__AVX512VL__)
_mm_i32scatter_ps((float*)ptr, index, v, scale);
#else
- *(float*)(((int8_t*)ptr)+scale*index[0]) = v[0];
- *(float*)(((int8_t*)ptr)+scale*index[1]) = v[1];
- *(float*)(((int8_t*)ptr)+scale*index[2]) = v[2];
- *(float*)(((int8_t*)ptr)+scale*index[3]) = v[3];
+ *(float*)(((char*)ptr)+scale*index[0]) = v[0];
+ *(float*)(((char*)ptr)+scale*index[1]) = v[1];
+ *(float*)(((char*)ptr)+scale*index[2]) = v[2];
+ *(float*)(((char*)ptr)+scale*index[3]) = v[3];
#endif
}
@@ -219,20 +199,20 @@ namespace embree
#if defined(__AVX512VL__)
_mm_mask_i32scatter_ps((float*)ptr ,mask, index, v, scale);
#else
- if (likely(mask[0])) *(float*)(((int8_t*)ptr)+scale*index[0]) = v[0];
- if (likely(mask[1])) *(float*)(((int8_t*)ptr)+scale*index[1]) = v[1];
- if (likely(mask[2])) *(float*)(((int8_t*)ptr)+scale*index[2]) = v[2];
- if (likely(mask[3])) *(float*)(((int8_t*)ptr)+scale*index[3]) = v[3];
+ if (likely(mask[0])) *(float*)(((char*)ptr)+scale*index[0]) = v[0];
+ if (likely(mask[1])) *(float*)(((char*)ptr)+scale*index[1]) = v[1];
+ if (likely(mask[2])) *(float*)(((char*)ptr)+scale*index[2]) = v[2];
+ if (likely(mask[3])) *(float*)(((char*)ptr)+scale*index[3]) = v[3];
#endif
}
- static __forceinline void store(const vboolf4& mask, int8_t* ptr, const vint4& ofs, const vfloat4& v) {
+ static __forceinline void store(const vboolf4& mask, char* ptr, const vint4& ofs, const vfloat4& v) {
scatter<1>(mask,ptr,ofs,v);
}
static __forceinline void store(const vboolf4& mask, float* ptr, const vint4& ofs, const vfloat4& v) {
scatter<4>(mask,ptr,ofs,v);
}
-
+
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -243,15 +223,27 @@ namespace embree
friend __forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {
#if defined(__AVX512VL__)
return _mm_mask_blend_ps(m, f, t);
-#elif defined(__SSE4_1__) || (defined(__aarch64__))
- return _mm_blendv_ps(f, t, m);
+#elif defined(__SSE4_1__)
+ return _mm_blendv_ps(f, t, m);
#else
- return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
+ return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
#endif
}
};
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Load/Store
+ ////////////////////////////////////////////////////////////////////////////////
+ template<> struct mem<vfloat4>
+ {
+ static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return vfloat4::load (mask,ptr); }
+ static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return vfloat4::loadu(mask,ptr); }
+
+ static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::store (mask,ptr,v); }
+ static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::storeu(mask,ptr,v); }
+ };
+
////////////////////////////////////////////////////////////////////////////////
/// Unary Operators
////////////////////////////////////////////////////////////////////////////////
@@ -264,47 +256,18 @@ namespace embree
__forceinline vfloat4 toFloat(const vint4& a) { return vfloat4(a); }
__forceinline vfloat4 operator +(const vfloat4& a) { return a; }
-#if defined(__aarch64__)
- __forceinline vfloat4 operator -(const vfloat4& a) {
- return vnegq_f32(a);
- }
-#else
__forceinline vfloat4 operator -(const vfloat4& a) { return _mm_xor_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }
-#endif
-#if defined(__aarch64__)
- __forceinline vfloat4 abs(const vfloat4& a) { return _mm_abs_ps(a); }
-#else
__forceinline vfloat4 abs(const vfloat4& a) { return _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); }
-#endif
-
#if defined(__AVX512VL__)
__forceinline vfloat4 sign(const vfloat4& a) { return _mm_mask_blend_ps(_mm_cmp_ps_mask(a, vfloat4(zero), _CMP_LT_OQ), vfloat4(one), -vfloat4(one)); }
#else
__forceinline vfloat4 sign(const vfloat4& a) { return blendv_ps(vfloat4(one), -vfloat4(one), _mm_cmplt_ps(a, vfloat4(zero))); }
#endif
-
-#if defined(__aarch64__)
- __forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a, vreinterpretq_f32_u32(v0x80000000)); }
-#else
__forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a,_mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }
-#endif
-
+
__forceinline vfloat4 rcp(const vfloat4& a)
{
-#if defined(__aarch64__)
-#if defined(BUILD_IOS)
- return vfloat4(vdivq_f32(vdupq_n_f32(1.0f),a.v));
-#else //BUILD_IOS
- __m128 reciprocal = _mm_rcp_ps(a);
- reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
- reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
- // +1 round since NEON's reciprocal estimate instruction has less accuracy than SSE2's rcp.
- reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
- return (const vfloat4)reciprocal;
-#endif // BUILD_IOS
-#else
-
#if defined(__AVX512VL__)
const vfloat4 r = _mm_rcp14_ps(a);
#else
@@ -316,45 +279,31 @@ namespace embree
#else
return _mm_mul_ps(r,_mm_sub_ps(vfloat4(2.0f), _mm_mul_ps(r, a)));
#endif
-
-#endif //defined(__aarch64__)
}
__forceinline vfloat4 sqr (const vfloat4& a) { return _mm_mul_ps(a,a); }
__forceinline vfloat4 sqrt(const vfloat4& a) { return _mm_sqrt_ps(a); }
__forceinline vfloat4 rsqrt(const vfloat4& a)
{
-#if defined(__aarch64__)
- vfloat4 r = _mm_rsqrt_ps(a);
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));
- r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));
- return r;
-#else
-
#if defined(__AVX512VL__)
- const vfloat4 r = _mm_rsqrt14_ps(a);
+ vfloat4 r = _mm_rsqrt14_ps(a);
#else
- const vfloat4 r = _mm_rsqrt_ps(a);
+ vfloat4 r = _mm_rsqrt_ps(a);
#endif
-#if defined(__AVX2__)
- return _mm_fmadd_ps(_mm_set1_ps(1.5f), r,
- _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
+#if defined(__ARM_NEON)
+ r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
+ r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
+#elif defined(__AVX2__)
+ r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
#else
- return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r),
- _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
-#endif
-
+ r = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
#endif
+ return r;
}
__forceinline vboolf4 isnan(const vfloat4& a) {
-#if defined(__aarch64__)
- const vfloat4 b = _mm_and_ps(a, vreinterpretq_f32_u32(v0x7fffffff));
-#else
const vfloat4 b = _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)));
-#endif
#if defined(__AVX512VL__)
return _mm_cmp_epi32_mask(_mm_castps_si128(b), _mm_set1_epi32(0x7f800000), _MM_CMPINT_GT);
#else
@@ -395,8 +344,7 @@ namespace embree
__forceinline vfloat4 max(const vfloat4& a, float b) { return _mm_max_ps(a,vfloat4(b)); }
__forceinline vfloat4 max(float a, const vfloat4& b) { return _mm_max_ps(vfloat4(a),b); }
-#if defined(__SSE4_1__) || defined(__aarch64__)
-
+#if defined(__SSE4_1__)
__forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) {
const vint4 ai = _mm_castps_si128(a);
const vint4 bi = _mm_castps_si128(b);
@@ -438,31 +386,17 @@ namespace embree
/// Ternary Operators
////////////////////////////////////////////////////////////////////////////////
-#if defined(__AVX2__)
+#if defined(__AVX2__) || defined(__ARM_NEON)
__forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmadd_ps(a,b,c); }
__forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmsub_ps(a,b,c); }
__forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmadd_ps(a,b,c); }
__forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmsub_ps(a,b,c); }
#else
-
-#if defined(__aarch64__)
- __forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) {
- return _mm_madd_ps(a, b, c); //a*b+c;
- }
- __forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) {
- return _mm_msub_ps(a, b, c); //-a*b+c;
- }
- __forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) {
- return vnegq_f32(vfmaq_f32(c,a, b));
- }
-#else
__forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b+c; }
+ __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; }
__forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b+c;}
__forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b-c; }
#endif
- __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; }
-
-#endif
////////////////////////////////////////////////////////////////////////////////
/// Assignment Operators
@@ -495,13 +429,8 @@ namespace embree
__forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmpeq_ps (a, b); }
__forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmpneq_ps(a, b); }
__forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmplt_ps (a, b); }
-#if defined(__aarch64__)
- __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpge_ps (a, b); }
- __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpgt_ps (a, b); }
-#else
__forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpnlt_ps(a, b); }
__forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpnle_ps(a, b); }
-#endif
__forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmple_ps (a, b); }
#endif
@@ -513,7 +442,7 @@ namespace embree
__forceinline vboolf4 operator < (const vfloat4& a, float b) { return a < vfloat4(b); }
__forceinline vboolf4 operator < (float a, const vfloat4& b) { return vfloat4(a) < b; }
-
+
__forceinline vboolf4 operator >=(const vfloat4& a, float b) { return a >= vfloat4(b); }
__forceinline vboolf4 operator >=(float a, const vfloat4& b) { return vfloat4(a) >= b; }
@@ -549,68 +478,17 @@ namespace embree
template<int mask>
__forceinline vfloat4 select(const vfloat4& t, const vfloat4& f)
{
-#if defined(__SSE4_1__)
+#if defined(__SSE4_1__)
return _mm_blend_ps(f, t, mask);
#else
return select(vboolf4(mask), t, f);
#endif
}
-
-#if defined(__aarch64__)
- template<> __forceinline vfloat4 select<0>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vzero));
- }
- template<> __forceinline vfloat4 select<1>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v000F));
- }
- template<> __forceinline vfloat4 select<2>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v00F0));
- }
- template<> __forceinline vfloat4 select<3>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v00FF));
- }
- template<> __forceinline vfloat4 select<4>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0F00));
- }
- template<> __forceinline vfloat4 select<5>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0F0F));
- }
- template<> __forceinline vfloat4 select<6>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0FF0));
- }
- template<> __forceinline vfloat4 select<7>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0FFF));
- }
- template<> __forceinline vfloat4 select<8>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF000));
- }
- template<> __forceinline vfloat4 select<9>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF00F));
- }
- template<> __forceinline vfloat4 select<10>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF0F0));
- }
- template<> __forceinline vfloat4 select<11>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF0FF));
- }
- template<> __forceinline vfloat4 select<12>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFF00));
- }
- template<> __forceinline vfloat4 select<13>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFF0F));
- }
- template<> __forceinline vfloat4 select<14>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFFF0));
- }
- template<> __forceinline vfloat4 select<15>(const vfloat4& t, const vfloat4& f) {
- return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFFFF));
- }
-#endif
-
+
__forceinline vfloat4 lerp(const vfloat4& a, const vfloat4& b, const vfloat4& t) {
return madd(t,b-a,a);
}
-
+
__forceinline bool isvalid(const vfloat4& v) {
return all((v > vfloat4(-FLT_LARGE)) & (v < vfloat4(+FLT_LARGE)));
}
@@ -622,21 +500,21 @@ namespace embree
__forceinline bool is_finite(const vboolf4& valid, const vfloat4& a) {
return all(valid, (a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX)));
}
-
+
////////////////////////////////////////////////////////////////////////////////
/// Rounding Functions
////////////////////////////////////////////////////////////////////////////////
#if defined(__aarch64__)
- __forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); } // towards -inf
- __forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); } // toward +inf
- __forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); } // towards 0
- __forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); } // to nearest, ties to even. NOTE(LTE): arm clang uses vrndnq, old gcc uses vrndqn?
+ __forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); }
+ __forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); }
+ __forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); }
+ __forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); }
#elif defined (__SSE4_1__)
- __forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
- __forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }
- __forceinline vfloat4 trunc(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); }
- __forceinline vfloat4 round(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); } // (even) https://www.felixcloutier.com/x86/roundpd
+ __forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
+ __forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }
+ __forceinline vfloat4 trunc(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); }
+ __forceinline vfloat4 round(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
#else
__forceinline vfloat4 floor(const vfloat4& a) { return vfloat4(floorf(a[0]),floorf(a[1]),floorf(a[2]),floorf(a[3])); }
__forceinline vfloat4 ceil (const vfloat4& a) { return vfloat4(ceilf (a[0]),ceilf (a[1]),ceilf (a[2]),ceilf (a[3])); }
@@ -646,9 +524,7 @@ namespace embree
__forceinline vfloat4 frac(const vfloat4& a) { return a-floor(a); }
__forceinline vint4 floori(const vfloat4& a) {
-#if defined(__aarch64__)
- return vcvtq_s32_f32(floor(a));
-#elif defined(__SSE4_1__)
+#if defined(__SSE4_1__)
return vint4(floor(a));
#else
return vint4(a-vfloat4(0.5f));
@@ -662,16 +538,6 @@ namespace embree
__forceinline vfloat4 unpacklo(const vfloat4& a, const vfloat4& b) { return _mm_unpacklo_ps(a, b); }
__forceinline vfloat4 unpackhi(const vfloat4& a, const vfloat4& b) { return _mm_unpackhi_ps(a, b); }
-#if defined(__aarch64__)
- template<int i0, int i1, int i2, int i3>
- __forceinline vfloat4 shuffle(const vfloat4& v) {
- return vreinterpretq_f32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
- }
- template<int i0, int i1, int i2, int i3>
- __forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) {
- return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
- }
-#else
template<int i0, int i1, int i2, int i3>
__forceinline vfloat4 shuffle(const vfloat4& v) {
return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v), _MM_SHUFFLE(i3, i2, i1, i0)));
@@ -681,19 +547,8 @@ namespace embree
__forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) {
return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
}
-#endif
-
-#if defined (__SSSE3__)
- __forceinline vfloat4 shuffle8(const vfloat4& a, const vint4& shuf) {
- return _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(a), shuf));
- }
-#endif
-#if defined(__aarch64__)
- template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return __m128(vqtbl1q_u8( uint8x16_t(v.v), v0022 )); }
- template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return __m128(vqtbl1q_u8( uint8x16_t(v.v), v1133)); }
- template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return __m128(vqtbl1q_u8( uint8x16_t(v.v), v0101)); }
-#elif defined(__SSE3__)
+#if defined(__SSE3__)
template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return _mm_moveldup_ps(v); }
template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return _mm_movehdup_ps(v); }
template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(v))); }
@@ -704,56 +559,10 @@ namespace embree
return shuffle<i,i,i,i>(v);
}
-#if defined(__aarch64__)
- template<int i> __forceinline float extract(const vfloat4& a);
- template<> __forceinline float extract<0>(const vfloat4& b) {
- return b[0];
- }
- template<> __forceinline float extract<1>(const vfloat4& b) {
- return b[1];
- }
- template<> __forceinline float extract<2>(const vfloat4& b) {
- return b[2];
- }
- template<> __forceinline float extract<3>(const vfloat4& b) {
- return b[3];
- }
-#elif defined (__SSE4_1__) && !defined(__GNUC__)
- template<int i> __forceinline float extract(const vfloat4& a) { return _mm_cvtss_f32(_mm_extract_ps(a,i)); }
- template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); }
-#else
- template<int i> __forceinline float extract(const vfloat4& a) { return _mm_cvtss_f32(shuffle<i,i,i,i>(a)); }
- template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); }
-#endif
-
+ template<int i> __forceinline float extract (const vfloat4& a) { return _mm_cvtss_f32(shuffle<i>(a)); }
+ template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); }
-#if defined(__aarch64__)
- template<int dst> __forceinline vfloat4 insert(const vfloat4& a, float b);
- template<> __forceinline vfloat4 insert<0>(const vfloat4& a, float b)
- {
- vfloat4 c = a;
- c[0] = b;
- return c;
- }
- template<> __forceinline vfloat4 insert<1>(const vfloat4& a, float b)
- {
- vfloat4 c = a;
- c[1] = b;
- return c;
- }
- template<> __forceinline vfloat4 insert<2>(const vfloat4& a, float b)
- {
- vfloat4 c = a;
- c[2] = b;
- return c;
- }
- template<> __forceinline vfloat4 insert<3>(const vfloat4& a, float b)
- {
- vfloat4 c = a;
- c[3] = b;
- return c;
- }
-#elif defined (__SSE4_1__)
+#if defined (__SSE4_1__)
template<int dst, int src, int clr> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); }
template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return insert<dst, src, 0>(a, b); }
template<int dst> __forceinline vfloat4 insert(const vfloat4& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); }
@@ -762,19 +571,10 @@ namespace embree
template<int dst> __forceinline vfloat4 insert(const vfloat4& a, float b) { vfloat4 c = a; c[dst&3] = b; return c; }
#endif
-#if defined(__aarch64__)
- __forceinline float toScalar(const vfloat4& v) {
- return v[0];
- }
-#else
__forceinline float toScalar(const vfloat4& v) { return _mm_cvtss_f32(v); }
-#endif
- __forceinline vfloat4 broadcast4f(const vfloat4& a, size_t k) {
- return vfloat4::broadcast(&a[k]);
- }
__forceinline vfloat4 shift_right_1(const vfloat4& x) {
- return _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(x), 4));
+ return _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(x), 4));
}
#if defined (__AVX2__)
@@ -790,7 +590,7 @@ namespace embree
template<int i>
__forceinline vfloat4 align_shift_right(const vfloat4& a, const vfloat4& b) {
return _mm_castsi128_ps(_mm_alignr_epi32(_mm_castps_si128(a), _mm_castps_si128(b), i));
- }
+ }
#endif
@@ -864,39 +664,28 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
-#if defined(__aarch64__)
- __forceinline vfloat4 vreduce_min(const vfloat4& v) { float h = vminvq_f32(v); return vdupq_n_f32(h); }
- __forceinline vfloat4 vreduce_max(const vfloat4& v) { float h = vmaxvq_f32(v); return vdupq_n_f32(h); }
- __forceinline vfloat4 vreduce_add(const vfloat4& v) { float h = vaddvq_f32(v); return vdupq_n_f32(h); }
-#else
+
__forceinline vfloat4 vreduce_min(const vfloat4& v) { vfloat4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
__forceinline vfloat4 vreduce_max(const vfloat4& v) { vfloat4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
__forceinline vfloat4 vreduce_add(const vfloat4& v) { vfloat4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
-#endif
-#if defined(__aarch64__)
- __forceinline float reduce_min(const vfloat4& v) { return vminvq_f32(v); }
- __forceinline float reduce_max(const vfloat4& v) { return vmaxvq_f32(v); }
- __forceinline float reduce_add(const vfloat4& v) { return vaddvq_f32(v); }
-#else
__forceinline float reduce_min(const vfloat4& v) { return _mm_cvtss_f32(vreduce_min(v)); }
__forceinline float reduce_max(const vfloat4& v) { return _mm_cvtss_f32(vreduce_max(v)); }
__forceinline float reduce_add(const vfloat4& v) { return _mm_cvtss_f32(vreduce_add(v)); }
-#endif
- __forceinline size_t select_min(const vboolf4& valid, const vfloat4& v)
- {
- const vfloat4 a = select(valid,v,vfloat4(pos_inf));
+ __forceinline size_t select_min(const vboolf4& valid, const vfloat4& v)
+ {
+ const vfloat4 a = select(valid,v,vfloat4(pos_inf));
const vbool4 valid_min = valid & (a == vreduce_min(a));
- return bsf(movemask(any(valid_min) ? valid_min : valid));
+ return bsf(movemask(any(valid_min) ? valid_min : valid));
}
- __forceinline size_t select_max(const vboolf4& valid, const vfloat4& v)
- {
- const vfloat4 a = select(valid,v,vfloat4(neg_inf));
+ __forceinline size_t select_max(const vboolf4& valid, const vfloat4& v)
+ {
+ const vfloat4 a = select(valid,v,vfloat4(neg_inf));
const vbool4 valid_max = valid & (a == vreduce_max(a));
- return bsf(movemask(any(valid_max) ? valid_max : valid));
+ return bsf(movemask(any(valid_max) ? valid_max : valid));
}
-
+
////////////////////////////////////////////////////////////////////////////////
/// Euclidian Space Operators
////////////////////////////////////////////////////////////////////////////////
@@ -911,7 +700,7 @@ namespace embree
const vfloat4 b0 = shuffle<1,2,0,3>(b);
const vfloat4 a1 = shuffle<1,2,0,3>(a);
const vfloat4 b1 = b;
- return shuffle<1,2,0,3>(prod_diff(a0,b0,a1,b1));
+ return shuffle<1,2,0,3>(msub(a0,b0,a1*b1));
}
////////////////////////////////////////////////////////////////////////////////
@@ -923,3 +712,11 @@ namespace embree
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vfloat8_avx.h b/thirdparty/embree/common/simd/vfloat8_avx.h
index 3c7e4a8cdc..13446454e8 100644
--- a/thirdparty/embree-aarch64/common/simd/vfloat8_avx.h
+++ b/thirdparty/embree/common/simd/vfloat8_avx.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX float type */
@@ -33,7 +41,7 @@ namespace embree
__forceinline explicit vfloat(const vfloat4& a) : v(_mm256_insertf128_ps(_mm256_castps128_ps256(a),a,1)) {}
__forceinline vfloat(const vfloat4& a, const vfloat4& b) : v(_mm256_insertf128_ps(_mm256_castps128_ps256(a),b,1)) {}
- __forceinline explicit vfloat(const int8_t* a) : v(_mm256_loadu_ps((const float*)a)) {}
+ __forceinline explicit vfloat(const char* a) : v(_mm256_loadu_ps((const float*)a)) {}
__forceinline vfloat(float a) : v(_mm256_set1_ps(a)) {}
__forceinline vfloat(float a, float b) : v(_mm256_set_ps(b, a, b, a, b, a, b, a)) {}
__forceinline vfloat(float a, float b, float c, float d) : v(_mm256_set_ps(d, c, b, a, d, c, b, a)) {}
@@ -61,21 +69,7 @@ namespace embree
return _mm256_broadcast_ss((float*)a);
}
- static __forceinline vfloat8 broadcast2(const float* a, const float* b) {
-#if defined(__INTEL_COMPILER)
- const vfloat8 v0 = _mm256_broadcast_ss(a);
- const vfloat8 v1 = _mm256_broadcast_ss(b);
- return _mm256_blend_ps(v1, v0, 0xf);
-#else
- return _mm256_set_ps(*b,*b,*b,*b,*a,*a,*a,*a);
-#endif
- }
-
- static __forceinline vfloat8 broadcast4f(const vfloat4* ptr) {
- return _mm256_broadcast_ps((__m128*)ptr);
- }
-
- static __forceinline vfloat8 load(const int8_t* ptr) {
+ static __forceinline vfloat8 load(const char* ptr) {
#if defined(__AVX2__)
return _mm256_cvtepi32_ps(_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr)));
#else
@@ -83,7 +77,7 @@ namespace embree
#endif
}
- static __forceinline vfloat8 load(const uint8_t* ptr) {
+ static __forceinline vfloat8 load(const unsigned char* ptr) {
#if defined(__AVX2__)
return _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)));
#else
@@ -107,24 +101,11 @@ namespace embree
#if defined(__AVX512VL__)
- static __forceinline vfloat8 compact(const vboolf8& mask, vfloat8 &v) {
- return _mm256_mask_compress_ps(v, mask, v);
- }
- static __forceinline vfloat8 compact(const vboolf8& mask, vfloat8 &a, const vfloat8& b) {
- return _mm256_mask_compress_ps(a, mask, b);
- }
-
static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_mask_load_ps (_mm256_setzero_ps(),mask,(float*)ptr); }
static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_mask_loadu_ps(_mm256_setzero_ps(),mask,(float*)ptr); }
static __forceinline void store (const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_mask_store_ps ((float*)ptr,mask,v); }
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_mask_storeu_ps((float*)ptr,mask,v); }
-#elif defined(__aarch64__)
- static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask.v); }
- static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask.v); }
-
- static __forceinline void store (const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,v); }
- static __forceinline void storeu(const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,v); }
#else
static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask); }
static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask); }
@@ -145,18 +126,18 @@ namespace embree
template<int scale = 4>
static __forceinline vfloat8 gather(const float* ptr, const vint8& index) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _mm256_i32gather_ps(ptr, index ,scale);
#else
return vfloat8(
- *(float*)(((int8_t*)ptr)+scale*index[0]),
- *(float*)(((int8_t*)ptr)+scale*index[1]),
- *(float*)(((int8_t*)ptr)+scale*index[2]),
- *(float*)(((int8_t*)ptr)+scale*index[3]),
- *(float*)(((int8_t*)ptr)+scale*index[4]),
- *(float*)(((int8_t*)ptr)+scale*index[5]),
- *(float*)(((int8_t*)ptr)+scale*index[6]),
- *(float*)(((int8_t*)ptr)+scale*index[7]));
+ *(float*)(((char*)ptr)+scale*index[0]),
+ *(float*)(((char*)ptr)+scale*index[1]),
+ *(float*)(((char*)ptr)+scale*index[2]),
+ *(float*)(((char*)ptr)+scale*index[3]),
+ *(float*)(((char*)ptr)+scale*index[4]),
+ *(float*)(((char*)ptr)+scale*index[5]),
+ *(float*)(((char*)ptr)+scale*index[6]),
+ *(float*)(((char*)ptr)+scale*index[7]));
#endif
}
@@ -165,17 +146,17 @@ namespace embree
vfloat8 r = zero;
#if defined(__AVX512VL__)
return _mm256_mmask_i32gather_ps(r, mask, index, ptr, scale);
-#elif defined(__AVX2__) && !defined(__aarch64__)
+#elif defined(__AVX2__)
return _mm256_mask_i32gather_ps(r, ptr, index, mask, scale);
#else
- if (likely(mask[0])) r[0] = *(float*)(((int8_t*)ptr)+scale*index[0]);
- if (likely(mask[1])) r[1] = *(float*)(((int8_t*)ptr)+scale*index[1]);
- if (likely(mask[2])) r[2] = *(float*)(((int8_t*)ptr)+scale*index[2]);
- if (likely(mask[3])) r[3] = *(float*)(((int8_t*)ptr)+scale*index[3]);
- if (likely(mask[4])) r[4] = *(float*)(((int8_t*)ptr)+scale*index[4]);
- if (likely(mask[5])) r[5] = *(float*)(((int8_t*)ptr)+scale*index[5]);
- if (likely(mask[6])) r[6] = *(float*)(((int8_t*)ptr)+scale*index[6]);
- if (likely(mask[7])) r[7] = *(float*)(((int8_t*)ptr)+scale*index[7]);
+ if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]);
+ if (likely(mask[1])) r[1] = *(float*)(((char*)ptr)+scale*index[1]);
+ if (likely(mask[2])) r[2] = *(float*)(((char*)ptr)+scale*index[2]);
+ if (likely(mask[3])) r[3] = *(float*)(((char*)ptr)+scale*index[3]);
+ if (likely(mask[4])) r[4] = *(float*)(((char*)ptr)+scale*index[4]);
+ if (likely(mask[5])) r[5] = *(float*)(((char*)ptr)+scale*index[5]);
+ if (likely(mask[6])) r[6] = *(float*)(((char*)ptr)+scale*index[6]);
+ if (likely(mask[7])) r[7] = *(float*)(((char*)ptr)+scale*index[7]);
return r;
#endif
}
@@ -186,14 +167,14 @@ namespace embree
#if defined(__AVX512VL__)
_mm256_i32scatter_ps((float*)ptr, ofs, v, scale);
#else
- *(float*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- *(float*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- *(float*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- *(float*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- *(float*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- *(float*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- *(float*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- *(float*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ *(float*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ *(float*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ *(float*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ *(float*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ *(float*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ *(float*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ *(float*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ *(float*)(((char*)ptr)+scale*ofs[7]) = v[7];
#endif
}
@@ -203,24 +184,17 @@ namespace embree
#if defined(__AVX512VL__)
_mm256_mask_i32scatter_ps((float*)ptr, mask, ofs, v, scale);
#else
- if (likely(mask[0])) *(float*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- if (likely(mask[1])) *(float*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- if (likely(mask[2])) *(float*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- if (likely(mask[3])) *(float*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- if (likely(mask[4])) *(float*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- if (likely(mask[5])) *(float*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- if (likely(mask[6])) *(float*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- if (likely(mask[7])) *(float*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ if (likely(mask[0])) *(float*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ if (likely(mask[1])) *(float*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ if (likely(mask[2])) *(float*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ if (likely(mask[3])) *(float*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ if (likely(mask[4])) *(float*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ if (likely(mask[5])) *(float*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ if (likely(mask[6])) *(float*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ if (likely(mask[7])) *(float*)(((char*)ptr)+scale*ofs[7]) = v[7];
#endif
}
- static __forceinline void store(const vboolf8& mask, int8_t* ptr, const vint8& ofs, const vfloat8& v) {
- scatter<1>(mask,ptr,ofs,v);
- }
- static __forceinline void store(const vboolf8& mask, float* ptr, const vint8& ofs, const vfloat8& v) {
- scatter<4>(mask,ptr,ofs,v);
- }
-
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -241,60 +215,27 @@ namespace embree
__forceinline vfloat8 toFloat(const vint8& a) { return vfloat8(a); }
__forceinline vfloat8 operator +(const vfloat8& a) { return a; }
-#if !defined(__aarch64__)
__forceinline vfloat8 operator -(const vfloat8& a) {
const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
return _mm256_xor_ps(a, mask);
}
-#else
- __forceinline vfloat8 operator -(const vfloat8& a) {
- __m256 res;
- res.lo = vnegq_f32(a.v.lo);
- res.hi = vnegq_f32(a.v.hi);
- return res;
-}
-#endif
-
-#if !defined(__aarch64__)
-__forceinline vfloat8 abs(const vfloat8& a) {
- const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff));
- return _mm256_and_ps(a, mask);
-}
-#else
-__forceinline vfloat8 abs(const vfloat8& a) {
- __m256 res;
- res.lo = vabsq_f32(a.v.lo);
- res.hi = vabsq_f32(a.v.hi);
- return res;
-}
-#endif
-
-#if !defined(__aarch64__)
+ __forceinline vfloat8 abs(const vfloat8& a) {
+ const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff));
+ return _mm256_and_ps(a, mask);
+ }
__forceinline vfloat8 sign (const vfloat8& a) { return _mm256_blendv_ps(vfloat8(one), -vfloat8(one), _mm256_cmp_ps(a, vfloat8(zero), _CMP_NGE_UQ)); }
-#else
- __forceinline vfloat8 sign (const vfloat8& a) { return _mm256_blendv_ps(vfloat8(one), -vfloat8(one), _mm256_cmplt_ps(a, vfloat8(zero))); }
-#endif
__forceinline vfloat8 signmsk(const vfloat8& a) { return _mm256_and_ps(a,_mm256_castsi256_ps(_mm256_set1_epi32(0x80000000))); }
static __forceinline vfloat8 rcp(const vfloat8& a)
{
-#if defined(BUILD_IOS) && defined(__aarch64__)
- // ios devices are faster doing full divide, no need for NR fixup
- vfloat8 ret;
- const float32x4_t one = vdupq_n_f32(1.0f);
- ret.v.lo = vdivq_f32(one, a.v.lo);
- ret.v.hi = vdivq_f32(one, a.v.hi);
- return ret;
-#endif
-
#if defined(__AVX512VL__)
const vfloat8 r = _mm256_rcp14_ps(a);
#else
const vfloat8 r = _mm256_rcp_ps(a);
#endif
-
-#if defined(__AVX2__) //&& !defined(aarch64)
+
+#if defined(__AVX2__)
return _mm256_mul_ps(r, _mm256_fnmadd_ps(r, a, vfloat8(2.0f)));
#else
return _mm256_mul_ps(r, _mm256_sub_ps(vfloat8(2.0f), _mm256_mul_ps(r, a)));
@@ -443,29 +384,17 @@ __forceinline vfloat8 abs(const vfloat8& a) {
static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
return _mm256_mask_blend_ps(m, f, t);
}
-#elif !defined(__aarch64__)
- __forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_EQ_OQ); }
- __forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NEQ_UQ); }
- __forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LT_OS); }
- __forceinline vboolf8 operator >=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLT_US); }
- __forceinline vboolf8 operator > (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLE_US); }
- __forceinline vboolf8 operator <=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LE_OS); }
-
- __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
- return _mm256_blendv_ps(f, t, m);
- }
#else
- __forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmpeq_ps(a, b); }
- __forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmpneq_ps(a, b); }
- __forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmplt_ps(a, b); }
- __forceinline vboolf8 operator >=(const vfloat8& a, const vfloat8& b) { return _mm256_cmpge_ps(a, b); }
- __forceinline vboolf8 operator > (const vfloat8& a, const vfloat8& b) { return _mm256_cmpgt_ps(a, b); }
- __forceinline vboolf8 operator <=(const vfloat8& a, const vfloat8& b) { return _mm256_cmple_ps(a, b); }
+ static __forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_EQ_OQ); }
+ static __forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NEQ_UQ); }
+ static __forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LT_OS); }
+ static __forceinline vboolf8 operator >=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLT_US); }
+ static __forceinline vboolf8 operator > (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLE_US); }
+ static __forceinline vboolf8 operator <=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LE_OS); }
- __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
- return _mm256_blendv_ps(f, t, m);
+ static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
+ return _mm256_blendv_ps(f, t, m);
}
-
#endif
template<int mask>
@@ -534,17 +463,10 @@ __forceinline vfloat8 abs(const vfloat8& a) {
/// Rounding Functions
////////////////////////////////////////////////////////////////////////////////
-#if !defined(__aarch64__)
__forceinline vfloat8 floor(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
__forceinline vfloat8 ceil (const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_POS_INF ); }
__forceinline vfloat8 trunc(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_ZERO ); }
__forceinline vfloat8 round(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
-#else
- __forceinline vfloat8 floor(const vfloat8& a) { return _mm256_floor_ps(a); }
- __forceinline vfloat8 ceil (const vfloat8& a) { return _mm256_ceil_ps(a); }
-#endif
-
-
__forceinline vfloat8 frac (const vfloat8& a) { return a-floor(a); }
////////////////////////////////////////////////////////////////////////////////
@@ -579,11 +501,9 @@ __forceinline vfloat8 abs(const vfloat8& a) {
return _mm256_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
}
-#if !defined(__aarch64__)
template<> __forceinline vfloat8 shuffle<0, 0, 2, 2>(const vfloat8& v) { return _mm256_moveldup_ps(v); }
template<> __forceinline vfloat8 shuffle<1, 1, 3, 3>(const vfloat8& v) { return _mm256_movehdup_ps(v); }
template<> __forceinline vfloat8 shuffle<0, 1, 0, 1>(const vfloat8& v) { return _mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(v))); }
-#endif
__forceinline vfloat8 broadcast(const float* ptr) { return _mm256_broadcast_ss(ptr); }
template<size_t i> __forceinline vfloat8 insert4(const vfloat8& a, const vfloat4& b) { return _mm256_insertf128_ps(a, b, i); }
@@ -592,10 +512,8 @@ __forceinline vfloat8 abs(const vfloat8& a) {
__forceinline float toScalar(const vfloat8& v) { return _mm_cvtss_f32(_mm256_castps256_ps128(v)); }
- __forceinline vfloat8 assign(const vfloat4& a) { return _mm256_castps128_ps256(a); }
-
-#if defined (__AVX2__) && !defined(__aarch64__)
- __forceinline vfloat8 permute(const vfloat8& a, const __m256i& index) {
+#if defined (__AVX2__)
+ static __forceinline vfloat8 permute(const vfloat8& a, const __m256i& index) {
return _mm256_permutevar8x32_ps(a, index);
}
#endif
@@ -618,14 +536,6 @@ __forceinline vfloat8 abs(const vfloat8& a) {
}
#endif
- __forceinline vfloat4 broadcast4f(const vfloat8& a, const size_t k) {
- return vfloat4::broadcast(&a[k]);
- }
-
- __forceinline vfloat8 broadcast8f(const vfloat8& a, const size_t k) {
- return vfloat8::broadcast(&a[k]);
- }
-
#if defined(__AVX512VL__)
static __forceinline vfloat8 shift_right_1(const vfloat8& x) {
return align_shift_right<1>(zero,x);
@@ -699,7 +609,7 @@ __forceinline vfloat8 abs(const vfloat8& a) {
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
-#if !defined(__aarch64__)
+
__forceinline vfloat8 vreduce_min2(const vfloat8& v) { return min(v,shuffle<1,0,3,2>(v)); }
__forceinline vfloat8 vreduce_min4(const vfloat8& v) { vfloat8 v1 = vreduce_min2(v); return min(v1,shuffle<2,3,0,1>(v1)); }
__forceinline vfloat8 vreduce_min (const vfloat8& v) { vfloat8 v1 = vreduce_min4(v); return min(v1,shuffle4<1,0>(v1)); }
@@ -715,14 +625,7 @@ __forceinline vfloat8 abs(const vfloat8& a) {
__forceinline float reduce_min(const vfloat8& v) { return toScalar(vreduce_min(v)); }
__forceinline float reduce_max(const vfloat8& v) { return toScalar(vreduce_max(v)); }
__forceinline float reduce_add(const vfloat8& v) { return toScalar(vreduce_add(v)); }
-#else
- __forceinline float reduce_min(const vfloat8& v) { return vminvq_f32(_mm_min_ps(v.v.lo,v.v.hi)); }
- __forceinline float reduce_max(const vfloat8& v) { return vmaxvq_f32(_mm_max_ps(v.v.lo,v.v.hi)); }
- __forceinline vfloat8 vreduce_min(const vfloat8& v) { return vfloat8(reduce_min(v)); }
- __forceinline vfloat8 vreduce_max(const vfloat8& v) { return vfloat8(reduce_max(v)); }
- __forceinline float reduce_add(const vfloat8& v) { return vaddvq_f32(_mm_add_ps(v.v.lo,v.v.hi)); }
-#endif
__forceinline size_t select_min(const vboolf8& valid, const vfloat8& v)
{
const vfloat8 a = select(valid,v,vfloat8(pos_inf));
@@ -845,3 +748,11 @@ __forceinline vfloat8 abs(const vfloat8& a) {
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vint16_avx512.h b/thirdparty/embree/common/simd/vint16_avx512.h
index 3249bc2b45..3720c3c9d6 100644
--- a/thirdparty/embree-aarch64/common/simd/vint16_avx512.h
+++ b/thirdparty/embree/common/simd/vint16_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 16-wide AVX-512 integer type */
@@ -90,10 +98,10 @@ namespace embree
static __forceinline vint16 load (const void* addr) { return _mm512_load_si512((int*)addr); }
- static __forceinline vint16 load(const uint8_t* ptr) { return _mm512_cvtepu8_epi32(_mm_load_si128((__m128i*)ptr)); }
+ static __forceinline vint16 load(const unsigned char* ptr) { return _mm512_cvtepu8_epi32(_mm_load_si128((__m128i*)ptr)); }
static __forceinline vint16 load(const unsigned short* ptr) { return _mm512_cvtepu16_epi32(_mm256_load_si256((__m256i*)ptr)); }
- static __forceinline vint16 loadu(const uint8_t* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
+ static __forceinline vint16 loadu(const unsigned char* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
static __forceinline vint16 loadu(const unsigned short* ptr) { return _mm512_cvtepu16_epi32(_mm256_loadu_si256((__m256i*)ptr)); }
static __forceinline vint16 loadu(const void* addr) { return _mm512_loadu_si512(addr); }
@@ -109,20 +117,6 @@ namespace embree
static __forceinline void store_nt(void* __restrict__ ptr, const vint16& a) { _mm512_stream_si512((__m512i*)ptr,a); }
- /* pass by value to avoid compiler generating inefficient code */
- static __forceinline void storeu_compact(const vboolf16 mask, void* addr, vint16 reg) {
- _mm512_mask_compressstoreu_epi32(addr,mask,reg);
- }
-
- static __forceinline void storeu_compact_single(const vboolf16 mask, void* addr, vint16 reg) {
- //_mm512_mask_compressstoreu_epi32(addr,mask,reg);
- *(float*)addr = mm512_cvtss_f32(_mm512_mask_compress_ps(_mm512_castsi512_ps(reg),mask,_mm512_castsi512_ps(reg)));
- }
-
- static __forceinline vint16 compact64bit(const vboolf16& mask, vint16 &v) {
- return _mm512_mask_compress_epi64(v,mask,v);
- }
-
static __forceinline vint16 compact(const vboolf16& mask, vint16 &v) {
return _mm512_mask_compress_epi32(v,mask,v);
}
@@ -160,10 +154,6 @@ namespace embree
_mm512_mask_i32scatter_epi32((int*)ptr,mask,index,v,scale);
}
- static __forceinline vint16 broadcast64bit(size_t v) {
- return _mm512_set1_epi64(v);
- }
-
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -313,18 +303,6 @@ namespace embree
return _mm512_mask_or_epi32(f,m,t,t);
}
- __forceinline void xchg(const vboolf16& m, vint16& a, vint16& b) {
- const vint16 c = a; a = select(m,b,a); b = select(m,c,b);
- }
-
- __forceinline vboolf16 test(const vboolf16& m, const vint16& a, const vint16& b) {
- return _mm512_mask_test_epi32_mask(m,a,b);
- }
-
- __forceinline vboolf16 test(const vint16& a, const vint16& b) {
- return _mm512_test_epi32_mask(a,b);
- }
-
////////////////////////////////////////////////////////////////////////////////
// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
@@ -363,10 +341,6 @@ namespace embree
template<int i> __forceinline vint16 insert4(const vint16& a, const vint4& b) { return _mm512_inserti32x4(a, b, i); }
- __forceinline size_t extract64bit(const vint16& v) {
- return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
- }
-
template<int N, int i>
vint<N> extractN(const vint16& v);
@@ -488,3 +462,11 @@ namespace embree
return cout;
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vint4_sse2.h b/thirdparty/embree/common/simd/vint4_sse2.h
index 96f105a7c5..9814d5c71c 100644
--- a/thirdparty/embree-aarch64/common/simd/vint4_sse2.h
+++ b/thirdparty/embree/common/simd/vint4_sse2.h
@@ -1,10 +1,18 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "../math/math.h"
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide SSE integer type */
@@ -23,7 +31,7 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
/// Constructors, Assignment & Cast Operators
////////////////////////////////////////////////////////////////////////////////
-
+
__forceinline vint() {}
__forceinline vint(const vint4& a) { v = a.v; }
__forceinline vint4& operator =(const vint4& a) { v = a.v; return *this; }
@@ -68,7 +76,7 @@ namespace embree
static __forceinline void store (void* ptr, const vint4& v) { _mm_store_si128((__m128i*)ptr,v); }
static __forceinline void storeu(void* ptr, const vint4& v) { _mm_storeu_si128((__m128i*)ptr,v); }
-
+
#if defined(__AVX512VL__)
static __forceinline vint4 compact(const vboolf4& mask, vint4 &v) {
@@ -98,81 +106,61 @@ namespace embree
#endif
-#if defined(__aarch64__)
- static __forceinline vint4 load(const uint8_t* ptr) {
- return _mm_load4epu8_epi32(((__m128i*)ptr));
- }
- static __forceinline vint4 loadu(const uint8_t* ptr) {
- return _mm_load4epu8_epi32(((__m128i*)ptr));
- }
-#elif defined(__SSE4_1__)
- static __forceinline vint4 load(const uint8_t* ptr) {
+#if defined(__SSE4_1__)
+ static __forceinline vint4 load(const unsigned char* ptr) {
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
}
- static __forceinline vint4 loadu(const uint8_t* ptr) {
+ static __forceinline vint4 loadu(const unsigned char* ptr) {
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
}
#else
- static __forceinline vint4 load(const uint8_t* ptr) {
+ static __forceinline vint4 load(const unsigned char* ptr) {
return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
- }
+ }
- static __forceinline vint4 loadu(const uint8_t* ptr) {
+ static __forceinline vint4 loadu(const unsigned char* ptr) {
return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
}
#endif
static __forceinline vint4 load(const unsigned short* ptr) {
-#if defined(__aarch64__)
- return __m128i(vmovl_u16(vld1_u16(ptr)));
-#elif defined (__SSE4_1__)
+#if defined (__SSE4_1__)
return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr));
#else
return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
#endif
- }
+ }
- static __forceinline void store(uint8_t* ptr, const vint4& v) {
-#if defined(__aarch64__)
- int32x4_t x = v;
- uint16x4_t y = vqmovn_u32(uint32x4_t(x));
- uint8x8_t z = vqmovn_u16(vcombine_u16(y, y));
- vst1_lane_u32((uint32_t *)ptr,uint32x2_t(z), 0);
-#elif defined(__SSE4_1__)
+ static __forceinline void store(unsigned char* ptr, const vint4& v) {
+#if defined(__SSE4_1__)
__m128i x = v;
x = _mm_packus_epi32(x, x);
x = _mm_packus_epi16(x, x);
*(int*)ptr = _mm_cvtsi128_si32(x);
#else
for (size_t i=0;i<4;i++)
- ptr[i] = (uint8_t)v[i];
+ ptr[i] = (unsigned char)v[i];
#endif
}
static __forceinline void store(unsigned short* ptr, const vint4& v) {
-#if defined(__aarch64__)
- uint32x4_t x = uint32x4_t(v.v);
- uint16x4_t y = vqmovn_u32(x);
- vst1_u16(ptr, y);
-#else
for (size_t i=0;i<4;i++)
ptr[i] = (unsigned short)v[i];
-#endif
}
static __forceinline vint4 load_nt(void* ptr) {
-#if defined(__aarch64__) || defined(__SSE4_1__)
- return _mm_stream_load_si128((__m128i*)ptr);
+#if defined(__SSE4_1__)
+ return _mm_stream_load_si128((__m128i*)ptr);
#else
- return _mm_load_si128((__m128i*)ptr);
+ return _mm_load_si128((__m128i*)ptr);
#endif
}
-
+
static __forceinline void store_nt(void* ptr, const vint4& v) {
-#if !defined(__aarch64__) && defined(__SSE4_1__)
+#if defined(__SSE4_1__)
_mm_stream_ps((float*)ptr, _mm_castsi128_ps(v));
#else
_mm_store_si128((__m128i*)ptr,v);
@@ -181,14 +169,14 @@ namespace embree
template<int scale = 4>
static __forceinline vint4 gather(const int* ptr, const vint4& index) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _mm_i32gather_epi32(ptr, index, scale);
#else
return vint4(
- *(int*)(((int8_t*)ptr)+scale*index[0]),
- *(int*)(((int8_t*)ptr)+scale*index[1]),
- *(int*)(((int8_t*)ptr)+scale*index[2]),
- *(int*)(((int8_t*)ptr)+scale*index[3]));
+ *(int*)(((char*)ptr)+scale*index[0]),
+ *(int*)(((char*)ptr)+scale*index[1]),
+ *(int*)(((char*)ptr)+scale*index[2]),
+ *(int*)(((char*)ptr)+scale*index[3]));
#endif
}
@@ -197,13 +185,13 @@ namespace embree
vint4 r = zero;
#if defined(__AVX512VL__)
return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale);
-#elif defined(__AVX2__) && !defined(__aarch64__)
+#elif defined(__AVX2__)
return _mm_mask_i32gather_epi32(r, ptr, index, mask, scale);
#else
- if (likely(mask[0])) r[0] = *(int*)(((int8_t*)ptr)+scale*index[0]);
- if (likely(mask[1])) r[1] = *(int*)(((int8_t*)ptr)+scale*index[1]);
- if (likely(mask[2])) r[2] = *(int*)(((int8_t*)ptr)+scale*index[2]);
- if (likely(mask[3])) r[3] = *(int*)(((int8_t*)ptr)+scale*index[3]);
+ if (likely(mask[0])) r[0] = *(int*)(((char*)ptr)+scale*index[0]);
+ if (likely(mask[1])) r[1] = *(int*)(((char*)ptr)+scale*index[1]);
+ if (likely(mask[2])) r[2] = *(int*)(((char*)ptr)+scale*index[2]);
+ if (likely(mask[3])) r[3] = *(int*)(((char*)ptr)+scale*index[3]);
return r;
#endif
}
@@ -214,10 +202,10 @@ namespace embree
#if defined(__AVX512VL__)
_mm_i32scatter_epi32((int*)ptr, index, v, scale);
#else
- *(int*)(((int8_t*)ptr)+scale*index[0]) = v[0];
- *(int*)(((int8_t*)ptr)+scale*index[1]) = v[1];
- *(int*)(((int8_t*)ptr)+scale*index[2]) = v[2];
- *(int*)(((int8_t*)ptr)+scale*index[3]) = v[3];
+ *(int*)(((char*)ptr)+scale*index[0]) = v[0];
+ *(int*)(((char*)ptr)+scale*index[1]) = v[1];
+ *(int*)(((char*)ptr)+scale*index[2]) = v[2];
+ *(int*)(((char*)ptr)+scale*index[3]) = v[3];
#endif
}
@@ -227,14 +215,14 @@ namespace embree
#if defined(__AVX512VL__)
_mm_mask_i32scatter_epi32((int*)ptr, mask, index, v, scale);
#else
- if (likely(mask[0])) *(int*)(((int8_t*)ptr)+scale*index[0]) = v[0];
- if (likely(mask[1])) *(int*)(((int8_t*)ptr)+scale*index[1]) = v[1];
- if (likely(mask[2])) *(int*)(((int8_t*)ptr)+scale*index[2]) = v[2];
- if (likely(mask[3])) *(int*)(((int8_t*)ptr)+scale*index[3]) = v[3];
+ if (likely(mask[0])) *(int*)(((char*)ptr)+scale*index[0]) = v[0];
+ if (likely(mask[1])) *(int*)(((char*)ptr)+scale*index[1]) = v[1];
+ if (likely(mask[2])) *(int*)(((char*)ptr)+scale*index[2]) = v[2];
+ if (likely(mask[3])) *(int*)(((char*)ptr)+scale*index[3]) = v[3];
#endif
}
-#if defined(__x86_64__) || defined(__aarch64__)
+#if defined(__x86_64__)
static __forceinline vint4 broadcast64(long long a) { return _mm_set1_epi64x(a); }
#endif
@@ -248,12 +236,10 @@ namespace embree
friend __forceinline vint4 select(const vboolf4& m, const vint4& t, const vint4& f) {
#if defined(__AVX512VL__)
return _mm_mask_blend_epi32(m, (__m128i)f, (__m128i)t);
-#elif defined(__aarch64__)
- return _mm_castps_si128(_mm_blendv_ps((__m128)f.v,(__m128) t.v, (__m128)m.v));
#elif defined(__SSE4_1__)
- return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
+ return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
#else
- return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
+ return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
#endif
}
};
@@ -270,9 +256,7 @@ namespace embree
__forceinline vint4 operator +(const vint4& a) { return a; }
__forceinline vint4 operator -(const vint4& a) { return _mm_sub_epi32(_mm_setzero_si128(), a); }
-#if defined(__aarch64__)
- __forceinline vint4 abs(const vint4& a) { return vabsq_s32(a.v); }
-#elif defined(__SSSE3__)
+#if defined(__SSSE3__)
__forceinline vint4 abs(const vint4& a) { return _mm_abs_epi32(a); }
#endif
@@ -288,7 +272,7 @@ namespace embree
__forceinline vint4 operator -(const vint4& a, int b) { return a - vint4(b); }
__forceinline vint4 operator -(int a, const vint4& b) { return vint4(a) - b; }
-#if (defined(__aarch64__)) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline vint4 operator *(const vint4& a, const vint4& b) { return _mm_mullo_epi32(a, b); }
#else
__forceinline vint4 operator *(const vint4& a, const vint4& b) { return vint4(a[0]*b[0],a[1]*b[1],a[2]*b[2],a[3]*b[3]); }
@@ -308,34 +292,34 @@ namespace embree
__forceinline vint4 operator ^(const vint4& a, int b) { return a ^ vint4(b); }
__forceinline vint4 operator ^(int a, const vint4& b) { return vint4(a) ^ b; }
- __forceinline vint4 operator <<(const vint4& a, const int n) { return _mm_slli_epi32(a, n); }
- __forceinline vint4 operator >>(const vint4& a, const int n) { return _mm_srai_epi32(a, n); }
+ __forceinline vint4 operator <<(const vint4& a, int n) { return _mm_slli_epi32(a, n); }
+ __forceinline vint4 operator >>(const vint4& a, int n) { return _mm_srai_epi32(a, n); }
__forceinline vint4 sll (const vint4& a, int b) { return _mm_slli_epi32(a, b); }
__forceinline vint4 sra (const vint4& a, int b) { return _mm_srai_epi32(a, b); }
__forceinline vint4 srl (const vint4& a, int b) { return _mm_srli_epi32(a, b); }
-
+
////////////////////////////////////////////////////////////////////////////////
/// Assignment Operators
////////////////////////////////////////////////////////////////////////////////
__forceinline vint4& operator +=(vint4& a, const vint4& b) { return a = a + b; }
__forceinline vint4& operator +=(vint4& a, int b) { return a = a + b; }
-
+
__forceinline vint4& operator -=(vint4& a, const vint4& b) { return a = a - b; }
__forceinline vint4& operator -=(vint4& a, int b) { return a = a - b; }
-#if (defined(__aarch64__)) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline vint4& operator *=(vint4& a, const vint4& b) { return a = a * b; }
__forceinline vint4& operator *=(vint4& a, int b) { return a = a * b; }
#endif
-
+
__forceinline vint4& operator &=(vint4& a, const vint4& b) { return a = a & b; }
__forceinline vint4& operator &=(vint4& a, int b) { return a = a & b; }
-
+
__forceinline vint4& operator |=(vint4& a, const vint4& b) { return a = a | b; }
__forceinline vint4& operator |=(vint4& a, int b) { return a = a | b; }
-
+
__forceinline vint4& operator <<=(vint4& a, int b) { return a = a << b; }
__forceinline vint4& operator >>=(vint4& a, int b) { return a = a >> b; }
@@ -402,15 +386,14 @@ namespace embree
template<int mask>
__forceinline vint4 select(const vint4& t, const vint4& f) {
-#if defined(__SSE4_1__)
+#if defined(__SSE4_1__)
return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask));
#else
return select(vboolf4(mask), t, f);
-#endif
+#endif
}
-
-#if defined(__aarch64__) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline vint4 min(const vint4& a, const vint4& b) { return _mm_min_epi32(a, b); }
__forceinline vint4 max(const vint4& a, const vint4& b) { return _mm_max_epi32(a, b); }
@@ -434,25 +417,16 @@ namespace embree
__forceinline vint4 unpacklo(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
__forceinline vint4 unpackhi(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
-#if defined(__aarch64__)
- template<int i0, int i1, int i2, int i3>
- __forceinline vint4 shuffle(const vint4& v) {
- return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
- }
- template<int i0, int i1, int i2, int i3>
- __forceinline vint4 shuffle(const vint4& a, const vint4& b) {
- return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
- }
-#else
template<int i0, int i1, int i2, int i3>
__forceinline vint4 shuffle(const vint4& v) {
return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0));
}
+
template<int i0, int i1, int i2, int i3>
__forceinline vint4 shuffle(const vint4& a, const vint4& b) {
return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
}
-#endif
+
#if defined(__SSE3__)
template<> __forceinline vint4 shuffle<0, 0, 2, 2>(const vint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); }
template<> __forceinline vint4 shuffle<1, 1, 3, 3>(const vint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); }
@@ -464,10 +438,7 @@ namespace embree
return shuffle<i,i,i,i>(v);
}
-#if defined(__aarch64__)
- template<int src> __forceinline int extract(const vint4& b);
- template<int dst> __forceinline vint4 insert(const vint4& a, const int b);
-#elif defined(__SSE4_1__)
+#if defined(__SSE4_1__)
template<int src> __forceinline int extract(const vint4& b) { return _mm_extract_epi32(b, src); }
template<int dst> __forceinline vint4 insert(const vint4& a, const int b) { return _mm_insert_epi32(a, b, dst); }
#else
@@ -475,69 +446,19 @@ namespace embree
template<int dst> __forceinline vint4 insert(const vint4& a, int b) { vint4 c = a; c[dst&3] = b; return c; }
#endif
-#if defined(__aarch64__)
- template<> __forceinline int extract<0>(const vint4& b) {
- return b.v[0];
- }
- template<> __forceinline int extract<1>(const vint4& b) {
- return b.v[1];
- }
- template<> __forceinline int extract<2>(const vint4& b) {
- return b.v[2];
- }
- template<> __forceinline int extract<3>(const vint4& b) {
- return b.v[3];
- }
- template<> __forceinline vint4 insert<0>(const vint4& a, int b)
- {
- vint4 c = a;
- c[0] = b;
- return c;
- }
- template<> __forceinline vint4 insert<1>(const vint4& a, int b)
- {
- vint4 c = a;
- c[1] = b;
- return c;
- }
- template<> __forceinline vint4 insert<2>(const vint4& a, int b)
- {
- vint4 c = a;
- c[2] = b;
- return c;
- }
- template<> __forceinline vint4 insert<3>(const vint4& a, int b)
- {
- vint4 c = a;
- c[3] = b;
- return c;
- }
-
- __forceinline int toScalar(const vint4& v) {
- return v[0];
- }
-
- __forceinline size_t toSizeT(const vint4& v) {
- uint64x2_t x = uint64x2_t(v.v);
- return x[0];
- }
-#else
+
template<> __forceinline int extract<0>(const vint4& b) { return _mm_cvtsi128_si32(b); }
__forceinline int toScalar(const vint4& v) { return _mm_cvtsi128_si32(v); }
- __forceinline size_t toSizeT(const vint4& v) {
+ __forceinline size_t toSizeT(const vint4& v) {
#if defined(__WIN32__) && !defined(__X86_64__) // win32 workaround
return toScalar(v);
-#elif defined(__ARM_NEON)
- // FIXME(LTE): Do we need a swap(i.e. use lane 1)?
- return vgetq_lane_u64(*(reinterpret_cast<const uint64x2_t *>(&v)), 0);
#else
- return _mm_cvtsi128_si64(v);
+ return _mm_cvtsi128_si64(v);
#endif
}
-#endif
-
+
#if defined(__AVX512VL__)
__forceinline vint4 permute(const vint4 &a, const vint4 &index) {
@@ -546,25 +467,15 @@ namespace embree
template<int i>
__forceinline vint4 align_shift_right(const vint4& a, const vint4& b) {
- return _mm_alignr_epi32(a, b, i);
- }
+ return _mm_alignr_epi32(a, b, i);
+ }
#endif
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
-#if defined(__aarch64__) || defined(__SSE4_1__)
-
-#if defined(__aarch64__)
- __forceinline vint4 vreduce_min(const vint4& v) { int h = vminvq_s32(v); return vdupq_n_s32(h); }
- __forceinline vint4 vreduce_max(const vint4& v) { int h = vmaxvq_s32(v); return vdupq_n_s32(h); }
- __forceinline vint4 vreduce_add(const vint4& v) { int h = vaddvq_s32(v); return vdupq_n_s32(h); }
-
- __forceinline int reduce_min(const vint4& v) { return vminvq_s32(v); }
- __forceinline int reduce_max(const vint4& v) { return vmaxvq_s32(v); }
- __forceinline int reduce_add(const vint4& v) { return vaddvq_s32(v); }
-#else
+#if defined(__SSE4_1__)
__forceinline vint4 vreduce_min(const vint4& v) { vint4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
__forceinline vint4 vreduce_max(const vint4& v) { vint4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
__forceinline vint4 vreduce_add(const vint4& v) { vint4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
@@ -572,8 +483,7 @@ namespace embree
__forceinline int reduce_min(const vint4& v) { return toScalar(vreduce_min(v)); }
__forceinline int reduce_max(const vint4& v) { return toScalar(vreduce_max(v)); }
__forceinline int reduce_add(const vint4& v) { return toScalar(vreduce_add(v)); }
-#endif
-
+
__forceinline size_t select_min(const vint4& v) { return bsf(movemask(v == vreduce_min(v))); }
__forceinline size_t select_max(const vint4& v) { return bsf(movemask(v == vreduce_max(v))); }
@@ -592,7 +502,7 @@ namespace embree
/// Sorting networks
////////////////////////////////////////////////////////////////////////////////
-#if (defined(__aarch64__)) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
__forceinline vint4 usort_ascending(const vint4& v)
{
@@ -679,3 +589,10 @@ namespace embree
}
}
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vint8_avx.h b/thirdparty/embree/common/simd/vint8_avx.h
index 25a771284d..f43e9a8c22 100644
--- a/thirdparty/embree-aarch64/common/simd/vint8_avx.h
+++ b/thirdparty/embree/common/simd/vint8_avx.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX integer type */
@@ -71,25 +79,20 @@ namespace embree
static __forceinline void store (void* ptr, const vint8& f) { _mm256_store_ps((float*)ptr,_mm256_castsi256_ps(f)); }
static __forceinline void storeu(void* ptr, const vint8& f) { _mm256_storeu_ps((float*)ptr,_mm256_castsi256_ps(f)); }
-#if !defined(__aarch64__)
static __forceinline void store (const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
-#else
- static __forceinline void store (const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
- static __forceinline void storeu(const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
-#endif
static __forceinline void store_nt(void* ptr, const vint8& v) {
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
}
- static __forceinline vint8 load(const uint8_t* ptr) {
+ static __forceinline vint8 load(const unsigned char* ptr) {
vint4 il = vint4::load(ptr+0);
vint4 ih = vint4::load(ptr+4);
return vint8(il,ih);
}
- static __forceinline vint8 loadu(const uint8_t* ptr) {
+ static __forceinline vint8 loadu(const unsigned char* ptr) {
vint4 il = vint4::loadu(ptr+0);
vint4 ih = vint4::loadu(ptr+4);
return vint8(il,ih);
@@ -107,7 +110,7 @@ namespace embree
return vint8(il,ih);
}
- static __forceinline void store(uint8_t* ptr, const vint8& i) {
+ static __forceinline void store(unsigned char* ptr, const vint8& i) {
vint4 il(i.vl);
vint4 ih(i.vh);
vint4::store(ptr + 0,il);
@@ -122,54 +125,54 @@ namespace embree
template<int scale = 4>
static __forceinline vint8 gather(const int* ptr, const vint8& index) {
return vint8(
- *(int*)(((int8_t*)ptr)+scale*index[0]),
- *(int*)(((int8_t*)ptr)+scale*index[1]),
- *(int*)(((int8_t*)ptr)+scale*index[2]),
- *(int*)(((int8_t*)ptr)+scale*index[3]),
- *(int*)(((int8_t*)ptr)+scale*index[4]),
- *(int*)(((int8_t*)ptr)+scale*index[5]),
- *(int*)(((int8_t*)ptr)+scale*index[6]),
- *(int*)(((int8_t*)ptr)+scale*index[7]));
+ *(int*)(((char*)ptr)+scale*index[0]),
+ *(int*)(((char*)ptr)+scale*index[1]),
+ *(int*)(((char*)ptr)+scale*index[2]),
+ *(int*)(((char*)ptr)+scale*index[3]),
+ *(int*)(((char*)ptr)+scale*index[4]),
+ *(int*)(((char*)ptr)+scale*index[5]),
+ *(int*)(((char*)ptr)+scale*index[6]),
+ *(int*)(((char*)ptr)+scale*index[7]));
}
template<int scale = 4>
static __forceinline vint8 gather(const vboolf8& mask, const int* ptr, const vint8& index) {
vint8 r = zero;
- if (likely(mask[0])) r[0] = *(int*)(((int8_t*)ptr)+scale*index[0]);
- if (likely(mask[1])) r[1] = *(int*)(((int8_t*)ptr)+scale*index[1]);
- if (likely(mask[2])) r[2] = *(int*)(((int8_t*)ptr)+scale*index[2]);
- if (likely(mask[3])) r[3] = *(int*)(((int8_t*)ptr)+scale*index[3]);
- if (likely(mask[4])) r[4] = *(int*)(((int8_t*)ptr)+scale*index[4]);
- if (likely(mask[5])) r[5] = *(int*)(((int8_t*)ptr)+scale*index[5]);
- if (likely(mask[6])) r[6] = *(int*)(((int8_t*)ptr)+scale*index[6]);
- if (likely(mask[7])) r[7] = *(int*)(((int8_t*)ptr)+scale*index[7]);
+ if (likely(mask[0])) r[0] = *(int*)(((char*)ptr)+scale*index[0]);
+ if (likely(mask[1])) r[1] = *(int*)(((char*)ptr)+scale*index[1]);
+ if (likely(mask[2])) r[2] = *(int*)(((char*)ptr)+scale*index[2]);
+ if (likely(mask[3])) r[3] = *(int*)(((char*)ptr)+scale*index[3]);
+ if (likely(mask[4])) r[4] = *(int*)(((char*)ptr)+scale*index[4]);
+ if (likely(mask[5])) r[5] = *(int*)(((char*)ptr)+scale*index[5]);
+ if (likely(mask[6])) r[6] = *(int*)(((char*)ptr)+scale*index[6]);
+ if (likely(mask[7])) r[7] = *(int*)(((char*)ptr)+scale*index[7]);
return r;
}
template<int scale = 4>
static __forceinline void scatter(void* ptr, const vint8& ofs, const vint8& v)
{
- *(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- *(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- *(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- *(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- *(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- *(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- *(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- *(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ *(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ *(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ *(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ *(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ *(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ *(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ *(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ *(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
}
template<int scale = 4>
static __forceinline void scatter(const vboolf8& mask, void* ptr, const vint8& ofs, const vint8& v)
{
- if (likely(mask[0])) *(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- if (likely(mask[1])) *(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- if (likely(mask[2])) *(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- if (likely(mask[3])) *(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- if (likely(mask[4])) *(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- if (likely(mask[5])) *(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- if (likely(mask[6])) *(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- if (likely(mask[7])) *(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ if (likely(mask[0])) *(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ if (likely(mask[1])) *(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ if (likely(mask[2])) *(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ if (likely(mask[3])) *(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ if (likely(mask[4])) *(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ if (likely(mask[5])) *(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ if (likely(mask[6])) *(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ if (likely(mask[7])) *(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
}
@@ -315,11 +318,6 @@ namespace embree
return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(f), _mm256_castsi256_ps(t), m));
}
- __forceinline vint8 notand(const vboolf8& m, const vint8& f) {
- return _mm256_castps_si256(_mm256_andnot_ps(m, _mm256_castsi256_ps(f)));
- }
-
-
////////////////////////////////////////////////////////////////////////////////
/// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
@@ -462,3 +460,11 @@ namespace embree
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vint8_avx2.h b/thirdparty/embree/common/simd/vint8_avx2.h
index 4937d972cf..e04737ffbe 100644
--- a/thirdparty/embree-aarch64/common/simd/vint8_avx2.h
+++ b/thirdparty/embree/common/simd/vint8_avx2.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX integer type */
@@ -67,8 +75,8 @@ namespace embree
/// Loads and Stores
////////////////////////////////////////////////////////////////////////////////
- static __forceinline vint8 load(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
- static __forceinline vint8 loadu(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
+ static __forceinline vint8 load(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
+ static __forceinline vint8 loadu(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
static __forceinline vint8 load(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_load_si128((__m128i*)ptr)); }
static __forceinline vint8 loadu(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr)); }
@@ -108,7 +116,7 @@ namespace embree
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
}
- static __forceinline void store(uint8_t* ptr, const vint8& i)
+ static __forceinline void store(unsigned char* ptr, const vint8& i)
{
for (size_t j=0; j<8; j++)
ptr[j] = i[j];
@@ -140,14 +148,14 @@ namespace embree
#if defined(__AVX512VL__)
_mm256_i32scatter_epi32((int*)ptr, ofs, v, scale);
#else
- *(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- *(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- *(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- *(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- *(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- *(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- *(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- *(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ *(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ *(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ *(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ *(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ *(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ *(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ *(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ *(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
#endif
}
@@ -157,14 +165,14 @@ namespace embree
#if defined(__AVX512VL__)
_mm256_mask_i32scatter_epi32((int*)ptr, mask, ofs, v, scale);
#else
- if (likely(mask[0])) *(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- if (likely(mask[1])) *(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- if (likely(mask[2])) *(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- if (likely(mask[3])) *(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- if (likely(mask[4])) *(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- if (likely(mask[5])) *(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- if (likely(mask[6])) *(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- if (likely(mask[7])) *(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ if (likely(mask[0])) *(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ if (likely(mask[1])) *(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ if (likely(mask[2])) *(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ if (likely(mask[3])) *(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ if (likely(mask[4])) *(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ if (likely(mask[5])) *(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ if (likely(mask[6])) *(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ if (likely(mask[7])) *(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
#endif
}
@@ -385,9 +393,7 @@ namespace embree
__forceinline int toScalar(const vint8& v) { return _mm_cvtsi128_si32(_mm256_castsi256_si128(v)); }
-#if !defined(__aarch64__)
-
-__forceinline vint8 permute(const vint8& v, const __m256i& index) {
+ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
return _mm256_permutevar8x32_epi32(v, index);
}
@@ -395,8 +401,6 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
return _mm256_castps_si256(_mm256_permutevar_ps(_mm256_castsi256_ps(v), index));
}
-
-
template<int i>
static __forceinline vint8 align_shift_right(const vint8& a, const vint8& b) {
#if defined(__AVX512VL__)
@@ -406,9 +410,6 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
#endif
}
-#endif
-
-
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
@@ -435,9 +436,6 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
__forceinline size_t select_min(const vboolf8& valid, const vint8& v) { const vint8 a = select(valid,v,vint8(pos_inf)); return bsf(movemask(valid & (a == vreduce_min(a)))); }
__forceinline size_t select_max(const vboolf8& valid, const vint8& v) { const vint8 a = select(valid,v,vint8(neg_inf)); return bsf(movemask(valid & (a == vreduce_max(a)))); }
-
- __forceinline vint8 assign(const vint4& a) { return _mm256_castsi128_si256(a); }
-
////////////////////////////////////////////////////////////////////////////////
/// Sorting networks
////////////////////////////////////////////////////////////////////////////////
@@ -510,3 +508,11 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vllong4_avx2.h b/thirdparty/embree/common/simd/vllong4_avx2.h
index de3ebc16a7..6c86845877 100644
--- a/thirdparty/embree-aarch64/common/simd/vllong4_avx2.h
+++ b/thirdparty/embree/common/simd/vllong4_avx2.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide AVX2 64-bit long long type */
@@ -95,16 +103,6 @@ namespace embree
#endif
}
- static __forceinline vllong4 broadcast64bit(size_t v) {
- return _mm256_set1_epi64x(v);
- }
-
- static __forceinline size_t extract64bit(const vllong4& v)
- {
- return _mm_cvtsi128_si64(_mm256_castsi256_si128(v));
- }
-
-
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -276,18 +274,6 @@ namespace embree
__forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a <= b); }
#endif
- __forceinline void xchg(const vboold4& m, vllong4& a, vllong4& b) {
- const vllong4 c = a; a = select(m,b,a); b = select(m,c,b);
- }
-
- __forceinline vboold4 test(const vllong4& a, const vllong4& b) {
-#if defined(__AVX512VL__)
- return _mm256_test_epi64_mask(a,b);
-#else
- return _mm256_testz_si256(a,b);
-#endif
- }
-
////////////////////////////////////////////////////////////////////////////////
// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
@@ -356,3 +342,11 @@ namespace embree
return cout;
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vllong8_avx512.h b/thirdparty/embree/common/simd/vllong8_avx512.h
index 76dddd8991..ee69411637 100644
--- a/thirdparty/embree-aarch64/common/simd/vllong8_avx512.h
+++ b/thirdparty/embree/common/simd/vllong8_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX-512 64-bit long long type */
@@ -78,7 +86,7 @@ namespace embree
return _mm512_load_si512(addr);
}
- static __forceinline vllong8 load(const uint8_t* ptr) {
+ static __forceinline vllong8 load(const unsigned char* ptr) {
return _mm512_cvtepu8_epi64(*(__m128i*)ptr);
}
@@ -98,19 +106,6 @@ namespace embree
_mm512_mask_store_epi64(addr,mask,v2);
}
- /* pass by value to avoid compiler generating inefficient code */
- static __forceinline void storeu_compact(const vboold8 mask, void* addr, const vllong8& reg) {
- _mm512_mask_compressstoreu_epi64(addr,mask,reg);
- }
-
- static __forceinline vllong8 compact64bit(const vboold8& mask, vllong8& v) {
- return _mm512_mask_compress_epi64(v,mask,v);
- }
-
- static __forceinline vllong8 compact64bit(const vboold8& mask, vllong8& dest, const vllong8& source) {
- return _mm512_mask_compress_epi64(dest,mask,source);
- }
-
static __forceinline vllong8 compact(const vboold8& mask, vllong8& v) {
return _mm512_mask_compress_epi64(v,mask,v);
}
@@ -123,16 +118,6 @@ namespace embree
return _mm512_mask_expand_epi64(b,mask,a);
}
- static __forceinline vllong8 broadcast64bit(size_t v) {
- return _mm512_set1_epi64(v);
- }
-
- static __forceinline size_t extract64bit(const vllong8& v)
- {
- return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
- }
-
-
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -271,18 +256,6 @@ namespace embree
return _mm512_mask_or_epi64(f,m,t,t);
}
- __forceinline void xchg(const vboold8& m, vllong8& a, vllong8& b) {
- const vllong8 c = a; a = select(m,b,a); b = select(m,c,b);
- }
-
- __forceinline vboold8 test(const vboold8& m, const vllong8& a, const vllong8& b) {
- return _mm512_mask_test_epi64_mask(m,a,b);
- }
-
- __forceinline vboold8 test(const vllong8& a, const vllong8& b) {
- return _mm512_test_epi64_mask(a,b);
- }
-
////////////////////////////////////////////////////////////////////////////////
// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
@@ -321,10 +294,6 @@ namespace embree
return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
}
- __forceinline vllong8 zeroExtend32Bit(const __m512i& a) {
- return _mm512_cvtepu32_epi64(_mm512_castsi512_si256(a));
- }
-
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
@@ -379,3 +348,11 @@ namespace embree
return cout;
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vuint16_avx512.h b/thirdparty/embree/common/simd/vuint16_avx512.h
index 39752611bb..c9eb6682ff 100644
--- a/thirdparty/embree-aarch64/common/simd/vuint16_avx512.h
+++ b/thirdparty/embree/common/simd/vuint16_avx512.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 16-wide AVX-512 unsigned integer type */
@@ -83,7 +91,7 @@ namespace embree
return _mm512_loadu_si512(addr);
}
- static __forceinline vuint16 loadu(const uint8_t* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
+ static __forceinline vuint16 loadu(const unsigned char* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
static __forceinline vuint16 loadu(const unsigned short* ptr) { return _mm512_cvtepu16_epi32(_mm256_loadu_si256((__m256i*)ptr)); }
static __forceinline vuint16 load(const vuint16* addr) {
@@ -113,20 +121,6 @@ namespace embree
_mm512_mask_store_epi32(addr,mask,v2);
}
- /* pass by value to avoid compiler generating inefficient code */
- static __forceinline void storeu_compact(const vboolf16 mask, void* addr, const vuint16 reg) {
- _mm512_mask_compressstoreu_epi32(addr,mask,reg);
- }
-
- static __forceinline void storeu_compact_single(const vboolf16 mask, void* addr, vuint16 reg) {
- //_mm512_mask_compressstoreu_epi32(addr,mask,reg);
- *(float*)addr = mm512_cvtss_f32(_mm512_mask_compress_ps(_mm512_castsi512_ps(reg),mask,_mm512_castsi512_ps(reg)));
- }
-
- static __forceinline vuint16 compact64bit(const vboolf16& mask, vuint16& v) {
- return _mm512_mask_compress_epi64(v,mask,v);
- }
-
static __forceinline vuint16 compact(const vboolf16& mask, vuint16& v) {
return _mm512_mask_compress_epi32(v,mask,v);
}
@@ -164,15 +158,6 @@ namespace embree
_mm512_mask_i32scatter_epi32((int*)ptr,mask,index,v,scale);
}
- static __forceinline vuint16 broadcast64bit(size_t v) {
- return _mm512_set1_epi64(v);
- }
-
- static __forceinline size_t extract64bit(const vuint16& v)
- {
- return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
- }
-
////////////////////////////////////////////////////////////////////////////////
/// Array Access
////////////////////////////////////////////////////////////////////////////////
@@ -315,18 +300,6 @@ namespace embree
return _mm512_mask_or_epi32(f,m,t,t);
}
- __forceinline void xchg(const vboolf16& m, vuint16& a, vuint16& b) {
- const vuint16 c = a; a = select(m,b,a); b = select(m,c,b);
- }
-
- __forceinline vboolf16 test(const vboolf16& m, const vuint16& a, const vuint16& b) {
- return _mm512_mask_test_epi32_mask(m,a,b);
- }
-
- __forceinline vboolf16 test(const vuint16& a, const vuint16& b) {
- return _mm512_test_epi32_mask(a,b);
- }
-
////////////////////////////////////////////////////////////////////////////////
// Movement/Shifting/Shuffling Functions
////////////////////////////////////////////////////////////////////////////////
@@ -441,3 +414,11 @@ namespace embree
return cout;
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vuint4_sse2.h b/thirdparty/embree/common/simd/vuint4_sse2.h
index a3f393ebf2..0601b9ab80 100644
--- a/thirdparty/embree-aarch64/common/simd/vuint4_sse2.h
+++ b/thirdparty/embree/common/simd/vuint4_sse2.h
@@ -1,10 +1,18 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "../math/math.h"
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 4-wide SSE integer type */
@@ -87,64 +95,27 @@ namespace embree
static __forceinline void storeu(const vboolf4& mask, void* ptr, const vuint4& i) { storeu(ptr,select(mask,i,loadu(ptr))); }
#endif
-#if defined(__aarch64__)
- static __forceinline vuint4 load(const uint8_t* ptr) {
- return _mm_load4epu8_epi32(((__m128i*)ptr));
- }
- static __forceinline vuint4 loadu(const uint8_t* ptr) {
- return _mm_load4epu8_epi32(((__m128i*)ptr));
- }
-#elif defined(__SSE4_1__)
- static __forceinline vuint4 load(const uint8_t* ptr) {
+#if defined(__SSE4_1__)
+ static __forceinline vuint4 load(const unsigned char* ptr) {
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
}
- static __forceinline vuint4 loadu(const uint8_t* ptr) {
+ static __forceinline vuint4 loadu(const unsigned char* ptr) {
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
}
#endif
static __forceinline vuint4 load(const unsigned short* ptr) {
-#if defined(__aarch64__)
- return _mm_load4epu16_epi32(((__m128i*)ptr));
-#elif defined (__SSE4_1__)
+#if defined (__SSE4_1__)
return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr));
#else
return vuint4(ptr[0],ptr[1],ptr[2],ptr[3]);
#endif
}
- static __forceinline void store_uint8(uint8_t* ptr, const vuint4& v) {
-#if defined(__aarch64__)
- uint32x4_t x = uint32x4_t(v.v);
- uint16x4_t y = vqmovn_u32(x);
- uint8x8_t z = vqmovn_u16(vcombine_u16(y, y));
- vst1_lane_u32((uint32_t *)ptr, uint32x2_t(z), 0);
-#elif defined(__SSE4_1__)
- __m128i x = v;
- x = _mm_packus_epi32(x, x);
- x = _mm_packus_epi16(x, x);
- *(unsigned*)ptr = _mm_cvtsi128_si32(x);
-#else
- for (size_t i=0;i<4;i++)
- ptr[i] = (uint8_t)v[i];
-#endif
- }
-
- static __forceinline void store_uint8(unsigned short* ptr, const vuint4& v) {
-#if defined(__aarch64__)
- uint32x4_t x = (uint32x4_t)v.v;
- uint16x4_t y = vqmovn_u32(x);
- vst1_u16(ptr, y);
-#else
- for (size_t i=0;i<4;i++)
- ptr[i] = (unsigned short)v[i];
-#endif
- }
-
static __forceinline vuint4 load_nt(void* ptr) {
-#if (defined(__aarch64__)) || defined(__SSE4_1__)
+#if defined(__SSE4_1__)
return _mm_stream_load_si128((__m128i*)ptr);
#else
return _mm_load_si128((__m128i*)ptr);
@@ -152,8 +123,8 @@ namespace embree
}
static __forceinline void store_nt(void* ptr, const vuint4& v) {
-#if !defined(__aarch64__) && defined(__SSE4_1__)
- _mm_stream_ps((float*)ptr, _mm_castsi128_ps(v));
+#if defined(__SSE4_1__)
+ _mm_stream_ps((float*)ptr,_mm_castsi128_ps(v));
#else
_mm_store_si128((__m128i*)ptr,v);
#endif
@@ -161,14 +132,14 @@ namespace embree
template<int scale = 4>
static __forceinline vuint4 gather(const unsigned int* ptr, const vint4& index) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _mm_i32gather_epi32((const int*)ptr, index, scale);
#else
return vuint4(
- *(unsigned int*)(((int8_t*)ptr)+scale*index[0]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[1]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[2]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[3]));
+ *(unsigned int*)(((char*)ptr)+scale*index[0]),
+ *(unsigned int*)(((char*)ptr)+scale*index[1]),
+ *(unsigned int*)(((char*)ptr)+scale*index[2]),
+ *(unsigned int*)(((char*)ptr)+scale*index[3]));
#endif
}
@@ -177,13 +148,13 @@ namespace embree
vuint4 r = zero;
#if defined(__AVX512VL__)
return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale);
-#elif defined(__AVX2__) && !defined(__aarch64__)
+#elif defined(__AVX2__)
return _mm_mask_i32gather_epi32(r, (const int*)ptr, index, mask, scale);
#else
- if (likely(mask[0])) r[0] = *(unsigned int*)(((int8_t*)ptr)+scale*index[0]);
- if (likely(mask[1])) r[1] = *(unsigned int*)(((int8_t*)ptr)+scale*index[1]);
- if (likely(mask[2])) r[2] = *(unsigned int*)(((int8_t*)ptr)+scale*index[2]);
- if (likely(mask[3])) r[3] = *(unsigned int*)(((int8_t*)ptr)+scale*index[3]);
+ if (likely(mask[0])) r[0] = *(unsigned int*)(((char*)ptr)+scale*index[0]);
+ if (likely(mask[1])) r[1] = *(unsigned int*)(((char*)ptr)+scale*index[1]);
+ if (likely(mask[2])) r[2] = *(unsigned int*)(((char*)ptr)+scale*index[2]);
+ if (likely(mask[3])) r[3] = *(unsigned int*)(((char*)ptr)+scale*index[3]);
return r;
#endif
}
@@ -373,25 +344,16 @@ namespace embree
__forceinline vuint4 unpacklo(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
__forceinline vuint4 unpackhi(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
-#if defined(__aarch64__)
- template<int i0, int i1, int i2, int i3>
- __forceinline vuint4 shuffle(const vuint4& v) {
- return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
- }
- template<int i0, int i1, int i2, int i3>
- __forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) {
- return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
- }
-#else
template<int i0, int i1, int i2, int i3>
__forceinline vuint4 shuffle(const vuint4& v) {
return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0));
}
+
template<int i0, int i1, int i2, int i3>
__forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) {
return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
}
-#endif
+
#if defined(__SSE3__)
template<> __forceinline vuint4 shuffle<0, 0, 2, 2>(const vuint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); }
template<> __forceinline vuint4 shuffle<1, 1, 3, 3>(const vuint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); }
@@ -403,10 +365,7 @@ namespace embree
return shuffle<i,i,i,i>(v);
}
-#if defined(__aarch64__)
- template<int src> __forceinline unsigned int extract(const vuint4& b);
- template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b);
-#elif defined(__SSE4_1__)
+#if defined(__SSE4_1__)
template<int src> __forceinline unsigned int extract(const vuint4& b) { return _mm_extract_epi32(b, src); }
template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { return _mm_insert_epi32(a, b, dst); }
#else
@@ -414,50 +373,11 @@ namespace embree
template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { vuint4 c = a; c[dst&3] = b; return c; }
#endif
-#if defined(__aarch64__)
- template<> __forceinline unsigned int extract<0>(const vuint4& b) {
- return b[0];
- }
- template<> __forceinline unsigned int extract<1>(const vuint4& b) {
- return b[1];
- }
- template<> __forceinline unsigned int extract<2>(const vuint4& b) {
- return b[2];
- }
- template<> __forceinline unsigned int extract<3>(const vuint4& b) {
- return b[3];
- }
-
- template<> __forceinline vuint4 insert<0>(const vuint4& a, unsigned b){
- vuint4 c = a;
- c[0] = b;
- return c;
- }
- template<> __forceinline vuint4 insert<1>(const vuint4& a, unsigned b){
- vuint4 c = a;
- c[1] = b;
- return c;
- }
- template<> __forceinline vuint4 insert<2>(const vuint4& a, unsigned b){
- vuint4 c = a;
- c[2] = b;
- return c;
- }
- template<> __forceinline vuint4 insert<3>(const vuint4& a, unsigned b){
- vuint4 c = a;
- c[3] = b;
- return c;
- }
-
- __forceinline unsigned int toScalar(const vuint4& v) {
- return v[0];
- }
-#else
+
template<> __forceinline unsigned int extract<0>(const vuint4& b) { return _mm_cvtsi128_si32(b); }
__forceinline unsigned int toScalar(const vuint4& v) { return _mm_cvtsi128_si32(v); }
-#endif
-
+
////////////////////////////////////////////////////////////////////////////////
/// Reductions
////////////////////////////////////////////////////////////////////////////////
@@ -497,3 +417,10 @@ namespace embree
}
}
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vuint8_avx.h b/thirdparty/embree/common/simd/vuint8_avx.h
index d4e86ae92d..589cd9d731 100644
--- a/thirdparty/embree-aarch64/common/simd/vuint8_avx.h
+++ b/thirdparty/embree/common/simd/vuint8_avx.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX integer type */
@@ -69,24 +77,20 @@ namespace embree
static __forceinline void store (void* ptr, const vuint8& f) { _mm256_store_ps((float*)ptr,_mm256_castsi256_ps(f)); }
static __forceinline void storeu(void* ptr, const vuint8& f) { _mm256_storeu_ps((float*)ptr,_mm256_castsi256_ps(f)); }
-#if !defined(__aarch64__)
static __forceinline void store (const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
-#else
- static __forceinline void store (const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
- static __forceinline void storeu(const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
-#endif
+
static __forceinline void store_nt(void* ptr, const vuint8& v) {
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
}
- static __forceinline vuint8 load(const uint8_t* ptr) {
+ static __forceinline vuint8 load(const unsigned char* ptr) {
vuint4 il = vuint4::load(ptr+0);
vuint4 ih = vuint4::load(ptr+4);
return vuint8(il,ih);
}
- static __forceinline vuint8 loadu(const uint8_t* ptr) {
+ static __forceinline vuint8 loadu(const unsigned char* ptr) {
vuint4 il = vuint4::loadu(ptr+0);
vuint4 ih = vuint4::loadu(ptr+4);
return vuint8(il,ih);
@@ -104,7 +108,7 @@ namespace embree
return vuint8(il,ih);
}
- static __forceinline void store(uint8_t* ptr, const vuint8& i) {
+ static __forceinline void store(unsigned char* ptr, const vuint8& i) {
vuint4 il(i.vl);
vuint4 ih(i.vh);
vuint4::store(ptr + 0,il);
@@ -119,54 +123,54 @@ namespace embree
template<int scale = 4>
static __forceinline vuint8 gather(const unsigned int* ptr, const vint8& index) {
return vuint8(
- *(unsigned int*)(((int8_t*)ptr)+scale*index[0]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[1]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[2]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[3]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[4]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[5]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[6]),
- *(unsigned int*)(((int8_t*)ptr)+scale*index[7]));
+ *(unsigned int*)(((char*)ptr)+scale*index[0]),
+ *(unsigned int*)(((char*)ptr)+scale*index[1]),
+ *(unsigned int*)(((char*)ptr)+scale*index[2]),
+ *(unsigned int*)(((char*)ptr)+scale*index[3]),
+ *(unsigned int*)(((char*)ptr)+scale*index[4]),
+ *(unsigned int*)(((char*)ptr)+scale*index[5]),
+ *(unsigned int*)(((char*)ptr)+scale*index[6]),
+ *(unsigned int*)(((char*)ptr)+scale*index[7]));
}
template<int scale = 4>
static __forceinline vuint8 gather(const vboolf8& mask, const unsigned int* ptr, const vint8& index) {
vuint8 r = zero;
- if (likely(mask[0])) r[0] = *(unsigned int*)(((int8_t*)ptr)+scale*index[0]);
- if (likely(mask[1])) r[1] = *(unsigned int*)(((int8_t*)ptr)+scale*index[1]);
- if (likely(mask[2])) r[2] = *(unsigned int*)(((int8_t*)ptr)+scale*index[2]);
- if (likely(mask[3])) r[3] = *(unsigned int*)(((int8_t*)ptr)+scale*index[3]);
- if (likely(mask[4])) r[4] = *(unsigned int*)(((int8_t*)ptr)+scale*index[4]);
- if (likely(mask[5])) r[5] = *(unsigned int*)(((int8_t*)ptr)+scale*index[5]);
- if (likely(mask[6])) r[6] = *(unsigned int*)(((int8_t*)ptr)+scale*index[6]);
- if (likely(mask[7])) r[7] = *(unsigned int*)(((int8_t*)ptr)+scale*index[7]);
+ if (likely(mask[0])) r[0] = *(unsigned int*)(((char*)ptr)+scale*index[0]);
+ if (likely(mask[1])) r[1] = *(unsigned int*)(((char*)ptr)+scale*index[1]);
+ if (likely(mask[2])) r[2] = *(unsigned int*)(((char*)ptr)+scale*index[2]);
+ if (likely(mask[3])) r[3] = *(unsigned int*)(((char*)ptr)+scale*index[3]);
+ if (likely(mask[4])) r[4] = *(unsigned int*)(((char*)ptr)+scale*index[4]);
+ if (likely(mask[5])) r[5] = *(unsigned int*)(((char*)ptr)+scale*index[5]);
+ if (likely(mask[6])) r[6] = *(unsigned int*)(((char*)ptr)+scale*index[6]);
+ if (likely(mask[7])) r[7] = *(unsigned int*)(((char*)ptr)+scale*index[7]);
return r;
}
template<int scale = 4>
static __forceinline void scatter(void* ptr, const vint8& ofs, const vuint8& v)
{
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- *(unsigned int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
}
template<int scale = 4>
static __forceinline void scatter(const vboolf8& mask, void* ptr, const vint8& ofs, const vuint8& v)
{
- if (likely(mask[0])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- if (likely(mask[1])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- if (likely(mask[2])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- if (likely(mask[3])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- if (likely(mask[4])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- if (likely(mask[5])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- if (likely(mask[6])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- if (likely(mask[7])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ if (likely(mask[0])) *(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ if (likely(mask[1])) *(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ if (likely(mask[2])) *(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ if (likely(mask[3])) *(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ if (likely(mask[4])) *(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ if (likely(mask[5])) *(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ if (likely(mask[6])) *(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ if (likely(mask[7])) *(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
}
@@ -294,10 +298,6 @@ namespace embree
return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(f), _mm256_castsi256_ps(t), m));
}
- __forceinline vuint8 notand(const vboolf8& m, const vuint8& f) {
- return _mm256_castps_si256(_mm256_andnot_ps(m, _mm256_castsi256_ps(f)));
- }
-
////////////////////////////////////////////////////////////////////////////////
/// Movement/Shifting/Shuffling Functions
@@ -335,7 +335,6 @@ namespace embree
template<> __forceinline vuint8 shuffle<1, 1, 3, 3>(const vuint8& v) { return _mm256_castps_si256(_mm256_movehdup_ps(_mm256_castsi256_ps(v))); }
template<> __forceinline vuint8 shuffle<0, 1, 0, 1>(const vuint8& v) { return _mm256_castps_si256(_mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(_mm256_castsi256_ps(v))))); }
- __forceinline vuint8 broadcast(const unsigned int* ptr) { return _mm256_castps_si256(_mm256_broadcast_ss((const float*)ptr)); }
template<int i> __forceinline vuint8 insert4(const vuint8& a, const vuint4& b) { return _mm256_insertf128_si256(a, b, i); }
template<int i> __forceinline vuint4 extract4(const vuint8& a) { return _mm256_extractf128_si256(a, i); }
template<> __forceinline vuint4 extract4<0>(const vuint8& a) { return _mm256_castsi256_si128(a); }
@@ -377,3 +376,11 @@ namespace embree
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/simd/vuint8_avx2.h b/thirdparty/embree/common/simd/vuint8_avx2.h
index b2a965448d..17b994522f 100644
--- a/thirdparty/embree-aarch64/common/simd/vuint8_avx2.h
+++ b/thirdparty/embree/common/simd/vuint8_avx2.h
@@ -1,8 +1,16 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
+#define vboolf vboolf_impl
+#define vboold vboold_impl
+#define vint vint_impl
+#define vuint vuint_impl
+#define vllong vllong_impl
+#define vfloat vfloat_impl
+#define vdouble vdouble_impl
+
namespace embree
{
/* 8-wide AVX integer type */
@@ -66,8 +74,8 @@ namespace embree
/// Loads and Stores
////////////////////////////////////////////////////////////////////////////////
- static __forceinline vuint8 load(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
- static __forceinline vuint8 loadu(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
+ static __forceinline vuint8 load(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
+ static __forceinline vuint8 loadu(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
static __forceinline vuint8 load(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_load_si128((__m128i*)ptr)); }
static __forceinline vuint8 loadu(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr)); }
@@ -107,7 +115,7 @@ namespace embree
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
}
- static __forceinline void store(uint8_t* ptr, const vuint8& i)
+ static __forceinline void store(unsigned char* ptr, const vuint8& i)
{
for (size_t j=0; j<8; j++)
ptr[j] = i[j];
@@ -139,14 +147,14 @@ namespace embree
#if defined(__AVX512VL__)
_mm256_i32scatter_epi32((int*)ptr, ofs, v, scale);
#else
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[0]) = v[0];
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[1]) = v[1];
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[2]) = v[2];
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[3]) = v[3];
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[4]) = v[4];
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[5]) = v[5];
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[6]) = v[6];
- *(unsigned int*)(((int8_t*)ptr) + scale * ofs[7]) = v[7];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ *(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
#endif
}
@@ -156,14 +164,14 @@ namespace embree
#if defined(__AVX512VL__)
_mm256_mask_i32scatter_epi32((int*)ptr, mask, ofs, v, scale);
#else
- if (likely(mask[0])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
- if (likely(mask[1])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
- if (likely(mask[2])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
- if (likely(mask[3])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
- if (likely(mask[4])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
- if (likely(mask[5])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
- if (likely(mask[6])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
- if (likely(mask[7])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
+ if (likely(mask[0])) *(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
+ if (likely(mask[1])) *(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
+ if (likely(mask[2])) *(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
+ if (likely(mask[3])) *(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
+ if (likely(mask[4])) *(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
+ if (likely(mask[5])) *(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
+ if (likely(mask[6])) *(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
+ if (likely(mask[7])) *(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
#endif
}
@@ -371,16 +379,12 @@ namespace embree
template<> __forceinline vuint8 shuffle<1, 1, 3, 3>(const vuint8& v) { return _mm256_castps_si256(_mm256_movehdup_ps(_mm256_castsi256_ps(v))); }
template<> __forceinline vuint8 shuffle<0, 1, 0, 1>(const vuint8& v) { return _mm256_castps_si256(_mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(_mm256_castsi256_ps(v))))); }
- __forceinline vuint8 broadcast(const unsigned int* ptr) { return _mm256_castps_si256(_mm256_broadcast_ss((const float*)ptr)); }
-
template<int i> __forceinline vuint8 insert4(const vuint8& a, const vuint4& b) { return _mm256_insertf128_si256(a, b, i); }
template<int i> __forceinline vuint4 extract4(const vuint8& a) { return _mm256_extractf128_si256(a, i); }
template<> __forceinline vuint4 extract4<0>(const vuint8& a) { return _mm256_castsi256_si128(a); }
__forceinline int toScalar(const vuint8& v) { return _mm_cvtsi128_si32(_mm256_castsi256_si128(v)); }
-#if !defined(__aarch64__)
-
__forceinline vuint8 permute(const vuint8& v, const __m256i& index) {
return _mm256_permutevar8x32_epi32(v, index);
}
@@ -396,10 +400,7 @@ namespace embree
#else
return _mm256_alignr_epi8(a, b, 4*i);
#endif
- }
-
-#endif
-
+ }
////////////////////////////////////////////////////////////////////////////////
/// Reductions
@@ -427,8 +428,6 @@ namespace embree
//__forceinline size_t select_min(const vboolf8& valid, const vuint8& v) { const vuint8 a = select(valid,v,vuint8(pos_inf)); return bsf(movemask(valid & (a == vreduce_min(a)))); }
//__forceinline size_t select_max(const vboolf8& valid, const vuint8& v) { const vuint8 a = select(valid,v,vuint8(neg_inf)); return bsf(movemask(valid & (a == vreduce_max(a)))); }
- __forceinline vuint8 assign(const vuint4& a) { return _mm256_castsi128_si256(a); }
-
////////////////////////////////////////////////////////////////////////////////
/// Output Operators
////////////////////////////////////////////////////////////////////////////////
@@ -437,3 +436,11 @@ namespace embree
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
}
}
+
+#undef vboolf
+#undef vboold
+#undef vint
+#undef vuint
+#undef vllong
+#undef vfloat
+#undef vdouble
diff --git a/thirdparty/embree-aarch64/common/sys/alloc.cpp b/thirdparty/embree/common/sys/alloc.cpp
index 12f143f131..abdd269069 100644
--- a/thirdparty/embree-aarch64/common/sys/alloc.cpp
+++ b/thirdparty/embree/common/sys/alloc.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "alloc.h"
@@ -23,7 +23,7 @@ namespace embree
if (size != 0 && ptr == nullptr)
// -- GODOT start --
// throw std::bad_alloc();
- abort();
+ abort();
// -- GODOT end --
return ptr;
diff --git a/thirdparty/embree-aarch64/common/sys/alloc.h b/thirdparty/embree/common/sys/alloc.h
index 5898ecda70..4fa474ec1d 100644
--- a/thirdparty/embree-aarch64/common/sys/alloc.h
+++ b/thirdparty/embree/common/sys/alloc.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/array.h b/thirdparty/embree/common/sys/array.h
index 77722a39f6..dd9190c52a 100644
--- a/thirdparty/embree-aarch64/common/sys/array.h
+++ b/thirdparty/embree/common/sys/array.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -139,7 +139,7 @@ namespace embree
__forceinline Ty& operator[](const unsigned i) { assert(i<N); return data[i]; }
__forceinline const Ty& operator[](const unsigned i) const { assert(i<N); return data[i]; }
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
__forceinline Ty& operator[](const size_t i) { assert(i<N); return data[i]; }
__forceinline const Ty& operator[](const size_t i) const { assert(i<N); return data[i]; }
#endif
@@ -196,7 +196,7 @@ namespace embree
__forceinline Ty& operator[](const int i) { assert(i>=0 && i<max_total_elements); resize(i+1); return data[i]; }
__forceinline Ty& operator[](const unsigned i) { assert(i<max_total_elements); resize(i+1); return data[i]; }
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
__forceinline Ty& operator[](const size_t i) { assert(i<max_total_elements); resize(i+1); return data[i]; }
#endif
diff --git a/thirdparty/embree-aarch64/common/sys/atomic.h b/thirdparty/embree/common/sys/atomic.h
index ebfb8552c3..67af254f36 100644
--- a/thirdparty/embree-aarch64/common/sys/atomic.h
+++ b/thirdparty/embree/common/sys/atomic.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/barrier.cpp b/thirdparty/embree/common/sys/barrier.cpp
index 0061d18db2..0c0e39d92d 100644
--- a/thirdparty/embree-aarch64/common/sys/barrier.cpp
+++ b/thirdparty/embree/common/sys/barrier.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "barrier.h"
diff --git a/thirdparty/embree-aarch64/common/sys/barrier.h b/thirdparty/embree/common/sys/barrier.h
index 89607b8685..37fc036291 100644
--- a/thirdparty/embree-aarch64/common/sys/barrier.h
+++ b/thirdparty/embree/common/sys/barrier.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/condition.cpp b/thirdparty/embree/common/sys/condition.cpp
index 0e7ca7af39..606a1d0b04 100644
--- a/thirdparty/embree-aarch64/common/sys/condition.cpp
+++ b/thirdparty/embree/common/sys/condition.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "condition.h"
@@ -40,19 +40,23 @@ namespace embree
struct ConditionImplementation
{
__forceinline ConditionImplementation () {
- pthread_cond_init(&cond,nullptr);
+ if (pthread_cond_init(&cond,nullptr) != 0)
+ THROW_RUNTIME_ERROR("pthread_cond_init failed");
}
__forceinline ~ConditionImplementation() {
- pthread_cond_destroy(&cond);
- }
+ MAYBE_UNUSED bool ok = pthread_cond_destroy(&cond) == 0;
+ assert(ok);
+ }
__forceinline void wait(MutexSys& mutex) {
- pthread_cond_wait(&cond, (pthread_mutex_t*)mutex.mutex);
+ if (pthread_cond_wait(&cond, (pthread_mutex_t*)mutex.mutex) != 0)
+ THROW_RUNTIME_ERROR("pthread_cond_wait failed");
}
__forceinline void notify_all() {
- pthread_cond_broadcast(&cond);
+ if (pthread_cond_broadcast(&cond) != 0)
+ THROW_RUNTIME_ERROR("pthread_cond_broadcast failed");
}
public:
diff --git a/thirdparty/embree-aarch64/common/sys/condition.h b/thirdparty/embree/common/sys/condition.h
index 7a3a05aa81..557c6e3482 100644
--- a/thirdparty/embree-aarch64/common/sys/condition.h
+++ b/thirdparty/embree/common/sys/condition.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/filename.cpp b/thirdparty/embree/common/sys/filename.cpp
index 86182c1afb..f55b224302 100644
--- a/thirdparty/embree-aarch64/common/sys/filename.cpp
+++ b/thirdparty/embree/common/sys/filename.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "filename.h"
diff --git a/thirdparty/embree-aarch64/common/sys/filename.h b/thirdparty/embree/common/sys/filename.h
index 58f881b14d..d5929cd836 100644
--- a/thirdparty/embree-aarch64/common/sys/filename.h
+++ b/thirdparty/embree/common/sys/filename.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -73,7 +73,7 @@ namespace embree
friend bool operator!=(const FileName& a, const FileName& b);
/*! output operator */
- friend embree_ostream operator<<(embree_ostream cout, const FileName& filename);
+ friend std::ostream& operator<<(std::ostream& cout, const FileName& filename);
private:
std::string filename;
diff --git a/thirdparty/embree-aarch64/common/sys/intrinsics.h b/thirdparty/embree/common/sys/intrinsics.h
index 44cdbd8f0f..ed8dd7d40a 100644
--- a/thirdparty/embree-aarch64/common/sys/intrinsics.h
+++ b/thirdparty/embree/common/sys/intrinsics.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -10,10 +10,7 @@
#endif
#if defined(__ARM_NEON)
-#include "../math/SSE2NEON.h"
-#if defined(NEON_AVX2_EMULATION)
-#include "../math/AVX2NEON.h"
-#endif
+#include "../simd/arm/emulation.h"
#else
#include <immintrin.h>
#endif
@@ -27,14 +24,6 @@
#endif
#endif
-#if defined(__aarch64__)
-#if !defined(_lzcnt_u32)
- #define _lzcnt_u32 __builtin_clz
-#endif
-#if !defined(_lzcnt_u32)
- #define _lzcnt_u32 __builtin_clzll
-#endif
-#else
#if defined(__LZCNT__)
#if !defined(_lzcnt_u32)
#define _lzcnt_u32 __lzcnt32
@@ -43,13 +32,16 @@
#define _lzcnt_u64 __lzcnt64
#endif
#endif
-#endif
#if defined(__WIN32__)
-# ifndef NOMINMAX
-# define NOMINMAX
-# endif
-# include <windows.h>
+// -- GODOT start --
+#if !defined(NOMINMAX)
+// -- GODOT end --
+#define NOMINMAX
+// -- GODOT start --
+#endif
+#include "windows.h"
+// -- GODOT end --
#endif
/* normally defined in pmmintrin.h, but we always need this */
@@ -62,133 +54,133 @@
namespace embree
{
-
+
////////////////////////////////////////////////////////////////////////////////
/// Windows Platform
////////////////////////////////////////////////////////////////////////////////
-
+
#if defined(__WIN32__)
-
- __forceinline size_t read_tsc()
+
+ __forceinline size_t read_tsc()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return (size_t)li.QuadPart;
}
-
+
__forceinline int bsf(int v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _tzcnt_u32(v);
#else
unsigned long r = 0; _BitScanForward(&r,v); return r;
#endif
}
-
+
__forceinline unsigned bsf(unsigned v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _tzcnt_u32(v);
#else
unsigned long r = 0; _BitScanForward(&r,v); return r;
#endif
}
-
+
#if defined(__X86_64__)
__forceinline size_t bsf(size_t v) {
-#if defined(__AVX2__)
+#if defined(__AVX2__)
return _tzcnt_u64(v);
#else
unsigned long r = 0; _BitScanForward64(&r,v); return r;
#endif
}
#endif
-
- __forceinline int bscf(int& v)
+
+ __forceinline int bscf(int& v)
{
int i = bsf(v);
v &= v-1;
return i;
}
-
- __forceinline unsigned bscf(unsigned& v)
+
+ __forceinline unsigned bscf(unsigned& v)
{
unsigned i = bsf(v);
v &= v-1;
return i;
}
-
+
#if defined(__X86_64__)
- __forceinline size_t bscf(size_t& v)
+ __forceinline size_t bscf(size_t& v)
{
size_t i = bsf(v);
v &= v-1;
return i;
}
#endif
-
+
__forceinline int bsr(int v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return 31 - _lzcnt_u32(v);
#else
unsigned long r = 0; _BitScanReverse(&r,v); return r;
#endif
}
-
+
__forceinline unsigned bsr(unsigned v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return 31 - _lzcnt_u32(v);
#else
unsigned long r = 0; _BitScanReverse(&r,v); return r;
#endif
}
-
+
#if defined(__X86_64__)
__forceinline size_t bsr(size_t v) {
-#if defined(__AVX2__)
+#if defined(__AVX2__)
return 63 -_lzcnt_u64(v);
#else
unsigned long r = 0; _BitScanReverse64(&r, v); return r;
#endif
}
#endif
-
+
__forceinline int lzcnt(const int x)
{
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _lzcnt_u32(x);
#else
if (unlikely(x == 0)) return 32;
- return 31 - bsr(x);
+ return 31 - bsr(x);
#endif
}
-
+
__forceinline int btc(int v, int i) {
long r = v; _bittestandcomplement(&r,i); return r;
}
-
+
__forceinline int bts(int v, int i) {
long r = v; _bittestandset(&r,i); return r;
}
-
+
__forceinline int btr(int v, int i) {
long r = v; _bittestandreset(&r,i); return r;
}
-
+
#if defined(__X86_64__)
-
+
__forceinline size_t btc(size_t v, size_t i) {
size_t r = v; _bittestandcomplement64((__int64*)&r,i); return r;
}
-
+
__forceinline size_t bts(size_t v, size_t i) {
__int64 r = v; _bittestandset64(&r,i); return r;
}
-
+
__forceinline size_t btr(size_t v, size_t i) {
__int64 r = v; _bittestandreset64(&r,i); return r;
}
-
+
#endif
-
+
__forceinline int32_t atomic_cmpxchg(volatile int32_t* p, const int32_t c, const int32_t v) {
return _InterlockedCompareExchange((volatile long*)p,v,c);
}
@@ -196,174 +188,160 @@ namespace embree
////////////////////////////////////////////////////////////////////////////////
/// Unix Platform
////////////////////////////////////////////////////////////////////////////////
-
+
#else
-
+
#if defined(__i386__) && defined(__PIC__)
-
- __forceinline void __cpuid(int out[4], int op)
+
+ __forceinline void __cpuid(int out[4], int op)
{
asm volatile ("xchg{l}\t{%%}ebx, %1\n\t"
"cpuid\n\t"
"xchg{l}\t{%%}ebx, %1\n\t"
- : "=a"(out[0]), "=r"(out[1]), "=c"(out[2]), "=d"(out[3])
- : "0"(op));
+ : "=a"(out[0]), "=r"(out[1]), "=c"(out[2]), "=d"(out[3])
+ : "0"(op));
}
-
- __forceinline void __cpuid_count(int out[4], int op1, int op2)
+
+ __forceinline void __cpuid_count(int out[4], int op1, int op2)
{
asm volatile ("xchg{l}\t{%%}ebx, %1\n\t"
"cpuid\n\t"
"xchg{l}\t{%%}ebx, %1\n\t"
: "=a" (out[0]), "=r" (out[1]), "=c" (out[2]), "=d" (out[3])
- : "0" (op1), "2" (op2));
+ : "0" (op1), "2" (op2));
}
-
-#else
+
+#elif defined(__X86_ASM__)
__forceinline void __cpuid(int out[4], int op) {
-#if defined(__ARM_NEON)
- if (op == 0) { // Get CPU name
- out[0] = 0x41524d20;
- out[1] = 0x41524d20;
- out[2] = 0x41524d20;
- out[3] = 0x41524d20;
- }
-#else
- asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op));
-#endif
+ asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op));
}
-
-#if !defined(__ARM_NEON)
+
__forceinline void __cpuid_count(int out[4], int op1, int op2) {
- asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op1), "c"(op2));
+ asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op1), "c"(op2));
}
+
#endif
-
-#endif
-
+
__forceinline uint64_t read_tsc() {
-#if defined(__ARM_NEON)
- return 0; // FIXME(LTE): mimic rdtsc
-#else
+#if defined(__X86_ASM__)
uint32_t high,low;
asm volatile ("rdtsc" : "=d"(high), "=a"(low));
return (((uint64_t)high) << 32) + (uint64_t)low;
+#else
+ /* Not supported yet, meaning measuring traversal cost per pixel does not work. */
+ return 0;
#endif
}
-
+
__forceinline int bsf(int v) {
-#if defined(__ARM_NEON)
- return __builtin_ctz(v);
-#else
-#if defined(__AVX2__)
+#if defined(__AVX2__)
return _tzcnt_u32(v);
-#else
+#elif defined(__X86_ASM__)
int r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
-#endif
+#else
+ return __builtin_ctz(v);
#endif
}
-
-#if defined(__X86_64__) || defined(__aarch64__)
- __forceinline unsigned bsf(unsigned v)
+
+#if defined(__64BIT__)
+ __forceinline unsigned bsf(unsigned v)
{
-#if defined(__ARM_NEON)
- return __builtin_ctz(v);
-#else
-#if defined(__AVX2__)
+#if defined(__AVX2__)
return _tzcnt_u32(v);
-#else
+#elif defined(__X86_ASM__)
unsigned r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
-#endif
+#else
+ return __builtin_ctz(v);
#endif
}
#endif
-
+
__forceinline size_t bsf(size_t v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
#if defined(__X86_64__)
return _tzcnt_u64(v);
#else
return _tzcnt_u32(v);
#endif
-#elif defined(__ARM_NEON)
- return __builtin_ctzl(v);
-#else
+#elif defined(__X86_ASM__)
size_t r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
+#else
+ return __builtin_ctzl(v);
#endif
}
- __forceinline int bscf(int& v)
+ __forceinline int bscf(int& v)
{
int i = bsf(v);
v &= v-1;
return i;
}
-
-#if defined(__X86_64__) || defined(__aarch64__)
- __forceinline unsigned int bscf(unsigned int& v)
+
+#if defined(__64BIT__)
+ __forceinline unsigned int bscf(unsigned int& v)
{
unsigned int i = bsf(v);
v &= v-1;
return i;
}
#endif
-
- __forceinline size_t bscf(size_t& v)
+
+ __forceinline size_t bscf(size_t& v)
{
size_t i = bsf(v);
v &= v-1;
return i;
}
-
+
__forceinline int bsr(int v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return 31 - _lzcnt_u32(v);
-#elif defined(__ARM_NEON)
- return __builtin_clz(v)^31;
-#else
+#elif defined(__X86_ASM__)
int r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
+#else
+ return __builtin_clz(v) ^ 31;
#endif
}
-
-#if defined(__X86_64__) || defined(__aarch64__)
+
+#if defined(__64BIT__)
__forceinline unsigned bsr(unsigned v) {
-#if defined(__AVX2__)
+#if defined(__AVX2__)
return 31 - _lzcnt_u32(v);
-#elif defined(__ARM_NEON)
- return __builtin_clz(v)^31;
-#else
+#elif defined(__X86_ASM__)
unsigned r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
+#else
+ return __builtin_clz(v) ^ 31;
#endif
}
#endif
-
+
__forceinline size_t bsr(size_t v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
#if defined(__X86_64__)
return 63 - _lzcnt_u64(v);
#else
return 31 - _lzcnt_u32(v);
#endif
-#elif defined(__aarch64__)
- return (sizeof(v) * 8 - 1) - __builtin_clzl(v);
-#else
+#elif defined(__X86_ASM__)
size_t r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
+#else
+ return (sizeof(v) * 8 - 1) - __builtin_clzl(v);
#endif
}
-
+
__forceinline int lzcnt(const int x)
{
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
return _lzcnt_u32(x);
#else
if (unlikely(x == 0)) return 32;
- return 31 - bsr(x);
+ return 31 - bsr(x);
#endif
}
__forceinline size_t blsr(size_t v) {
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
#if defined(__INTEL_COMPILER)
return _blsr_u64(v);
#else
@@ -377,79 +355,65 @@ namespace embree
return v & (v-1);
#endif
}
-
+
__forceinline int btc(int v, int i) {
-#if defined(__aarch64__)
- // _bittestandcomplement(long *a, long b) {
- // unsigned char x = (*a >> b) & 1;
- // *a = *a ^ (1 << b);
- // return x;
-
- // We only need `*a`
- return (v ^ (1 << i));
-#else
+#if defined(__X86_ASM__)
int r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r;
+#else
+ return (v ^ (1 << i));
#endif
}
-
+
__forceinline int bts(int v, int i) {
-#if defined(__aarch64__)
- // _bittestandset(long *a, long b) {
- // unsigned char x = (*a >> b) & 1;
- // *a = *a | (1 << b);
- // return x;
- return (v | (v << i));
-#else
+#if defined(__X86_ASM__)
int r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
+#else
+ return (v | (v << i));
#endif
}
-
+
__forceinline int btr(int v, int i) {
-#if defined(__aarch64__)
- // _bittestandreset(long *a, long b) {
- // unsigned char x = (*a >> b) & 1;
- // *a = *a & ~(1 << b);
- // return x;
- return (v & ~(v << i));
-#else
+#if defined(__X86_ASM__)
int r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
+#else
+ return (v & ~(v << i));
#endif
}
-
+
__forceinline size_t btc(size_t v, size_t i) {
-#if defined(__aarch64__)
- return (v ^ (1 << i));
-#else
+#if defined(__X86_ASM__)
size_t r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r;
+#else
+ return (v ^ (1 << i));
#endif
}
-
+
__forceinline size_t bts(size_t v, size_t i) {
-#if defined(__aarch64__)
- return (v | (v << i));
-#else
+#if defined(__X86_ASM__)
size_t r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
+#else
+ return (v | (v << i));
#endif
}
-
+
__forceinline size_t btr(size_t v, size_t i) {
-#if defined(__ARM_NEON)
- return (v & ~(v << i));
-#else
+#if defined(__X86_ASM__)
size_t r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
+#else
+ return (v & ~(v << i));
#endif
}
__forceinline int32_t atomic_cmpxchg(int32_t volatile* value, int32_t comparand, const int32_t input) {
return __sync_val_compare_and_swap(value, comparand, input);
}
-
+
#endif
-
+
////////////////////////////////////////////////////////////////////////////////
/// All Platforms
////////////////////////////////////////////////////////////////////////////////
-
+
#if defined(__clang__) || defined(__GNUC__)
#if !defined(_mm_undefined_ps)
__forceinline __m128 _mm_undefined_ps() { return _mm_setzero_ps(); }
@@ -471,39 +435,41 @@ namespace embree
#endif
#endif
-#if defined(__SSE4_2__) || defined(__ARM_NEON)
-
+#if defined(__SSE4_2__)
+
__forceinline int popcnt(int in) {
return _mm_popcnt_u32(in);
}
-
+
__forceinline unsigned popcnt(unsigned in) {
return _mm_popcnt_u32(in);
}
-
-#if defined(__X86_64__) || defined(__ARM_NEON)
+
+#if defined(__64BIT__)
__forceinline size_t popcnt(size_t in) {
return _mm_popcnt_u64(in);
}
#endif
-
+
#endif
+#if defined(__X86_ASM__)
__forceinline uint64_t rdtsc()
{
- int dummy[4];
- __cpuid(dummy,0);
- uint64_t clock = read_tsc();
- __cpuid(dummy,0);
+ int dummy[4];
+ __cpuid(dummy,0);
+ uint64_t clock = read_tsc();
+ __cpuid(dummy,0);
return clock;
}
-
+#endif
+
__forceinline void pause_cpu(const size_t N = 8)
{
for (size_t i=0; i<N; i++)
- _mm_pause();
+ _mm_pause();
}
-
+
/* prefetches */
__forceinline void prefetchL1 (const void* ptr) { _mm_prefetch((const char*)ptr,_MM_HINT_T0); }
__forceinline void prefetchL2 (const void* ptr) { _mm_prefetch((const char*)ptr,_MM_HINT_T1); }
@@ -513,18 +479,18 @@ namespace embree
#if defined(__INTEL_COMPILER)
_mm_prefetch((const char*)ptr,_MM_HINT_ET0);
#else
- _mm_prefetch((const char*)ptr,_MM_HINT_T0);
+ _mm_prefetch((const char*)ptr,_MM_HINT_T0);
#endif
}
- __forceinline void prefetchL1EX(const void* ptr) {
- prefetchEX(ptr);
+ __forceinline void prefetchL1EX(const void* ptr) {
+ prefetchEX(ptr);
}
-
- __forceinline void prefetchL2EX(const void* ptr) {
- prefetchEX(ptr);
+
+ __forceinline void prefetchL2EX(const void* ptr) {
+ prefetchEX(ptr);
}
-#if defined(__AVX2__) && !defined(__aarch64__)
+#if defined(__AVX2__)
__forceinline unsigned int pext(unsigned int a, unsigned int b) { return _pext_u32(a, b); }
__forceinline unsigned int pdep(unsigned int a, unsigned int b) { return _pdep_u32(a, b); }
#if defined(__X86_64__)
diff --git a/thirdparty/embree-aarch64/common/sys/library.cpp b/thirdparty/embree/common/sys/library.cpp
index 899267a1e4..fc983dffd5 100644
--- a/thirdparty/embree-aarch64/common/sys/library.cpp
+++ b/thirdparty/embree/common/sys/library.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "library.h"
@@ -27,7 +27,7 @@ namespace embree
/* returns address of a symbol from the library */
void* getSymbol(lib_t lib, const std::string& sym) {
- return reinterpret_cast<void *>(GetProcAddress(HMODULE(lib),sym.c_str()));
+ return (void*)GetProcAddress(HMODULE(lib),sym.c_str());
}
/* closes the shared library */
@@ -61,7 +61,7 @@ namespace embree
lib = dlopen((executable.path() + fullName).c_str(),RTLD_NOW);
if (lib == nullptr) {
const char* error = dlerror();
- if (error) {
+ if (error) {
THROW_RUNTIME_ERROR(error);
} else {
THROW_RUNTIME_ERROR("could not load library "+executable.str());
diff --git a/thirdparty/embree-aarch64/common/sys/library.h b/thirdparty/embree/common/sys/library.h
index c2164e9fbe..67e14d2420 100644
--- a/thirdparty/embree-aarch64/common/sys/library.h
+++ b/thirdparty/embree/common/sys/library.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/mutex.cpp b/thirdparty/embree/common/sys/mutex.cpp
index 11779bc9b9..789feaf2d8 100644
--- a/thirdparty/embree-aarch64/common/sys/mutex.cpp
+++ b/thirdparty/embree/common/sys/mutex.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "mutex.h"
@@ -36,7 +36,6 @@ namespace embree
MAYBE_UNUSED bool ok = pthread_mutex_destroy((pthread_mutex_t*)mutex) == 0;
assert(ok);
delete (pthread_mutex_t*)mutex;
- mutex = nullptr;
}
void MutexSys::lock()
diff --git a/thirdparty/embree-aarch64/common/sys/mutex.h b/thirdparty/embree/common/sys/mutex.h
index 1164210f23..4cb3626d92 100644
--- a/thirdparty/embree-aarch64/common/sys/mutex.h
+++ b/thirdparty/embree/common/sys/mutex.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/platform.h b/thirdparty/embree/common/sys/platform.h
index 737f14aa6e..697e07bb86 100644
--- a/thirdparty/embree-aarch64/common/sys/platform.h
+++ b/thirdparty/embree/common/sys/platform.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -23,9 +23,17 @@
/// detect platform
////////////////////////////////////////////////////////////////////////////////
-/* detect 32 or 64 platform */
+/* detect 32 or 64 Intel platform */
#if defined(__x86_64__) || defined(__ia64__) || defined(_M_X64)
#define __X86_64__
+#define __X86_ASM__
+#elif defined(__i386__) || defined(_M_IX86)
+#define __X86_ASM__
+#endif
+
+/* detect 64 bit platform */
+#if defined(__X86_64__) || defined(__aarch64__)
+#define __64BIT__
#endif
/* detect Linux platform */
@@ -88,10 +96,12 @@
#define dll_import __declspec(dllimport)
#else
#define dll_export __attribute__ ((visibility ("default")))
-#define dll_import
+#define dll_import
#endif
-#ifdef __WIN32__
+// -- GODOT start --
+#if defined(__WIN32__) && !defined(__MINGW32__)
+// -- GODOT end --
#if !defined(__noinline)
#define __noinline __declspec(noinline)
#endif
@@ -103,16 +113,11 @@
#define __restrict__ //__restrict // causes issues with MSVC
#endif
#if !defined(__thread)
-// NOTE: Require `-fms-extensions` for clang
#define __thread __declspec(thread)
#endif
#if !defined(__aligned)
-#if defined(__MINGW32__)
-#define __aligned(...) __attribute__((aligned(__VA_ARGS__)))
-#else
#define __aligned(...) __declspec(align(__VA_ARGS__))
#endif
-#endif
//#define __FUNCTION__ __FUNCTION__
#define debugbreak() __debugbreak()
@@ -147,7 +152,7 @@
#endif
// -- GODOT start --
-#ifndef likely
+#if !defined(likely)
// -- GODOT end --
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define likely(expr) (expr)
@@ -205,7 +210,7 @@ namespace embree {
/* windows does not have ssize_t */
#if defined(__WIN32__)
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
typedef int64_t ssize_t;
#else
typedef int32_t ssize_t;
@@ -329,7 +334,7 @@ __forceinline std::string toString(long long value) {
/// Some macros for static profiling
////////////////////////////////////////////////////////////////////////////////
-#if defined (__GNUC__)
+#if defined (__GNUC__)
#define IACA_SSC_MARK( MARK_ID ) \
__asm__ __volatile__ ( \
"\n\t movl $"#MARK_ID", %%ebx" \
@@ -368,7 +373,7 @@ namespace embree
bool active;
const Closure f;
};
-
+
template <typename Closure>
OnScopeExitHelper<Closure> OnScopeExit(const Closure f) {
return OnScopeExitHelper<Closure>(f);
diff --git a/thirdparty/embree-aarch64/common/sys/ref.h b/thirdparty/embree/common/sys/ref.h
index 24648e6234..c2b56c1908 100644
--- a/thirdparty/embree-aarch64/common/sys/ref.h
+++ b/thirdparty/embree/common/sys/ref.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/regression.cpp b/thirdparty/embree/common/sys/regression.cpp
index d95ff8dfe0..45315b1105 100644
--- a/thirdparty/embree-aarch64/common/sys/regression.cpp
+++ b/thirdparty/embree/common/sys/regression.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "regression.h"
diff --git a/thirdparty/embree-aarch64/common/sys/regression.h b/thirdparty/embree/common/sys/regression.h
index 632f8d92cf..bb0bb94006 100644
--- a/thirdparty/embree-aarch64/common/sys/regression.h
+++ b/thirdparty/embree/common/sys/regression.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/string.cpp b/thirdparty/embree/common/sys/string.cpp
index 931244383e..f42fdc8536 100644
--- a/thirdparty/embree-aarch64/common/sys/string.cpp
+++ b/thirdparty/embree/common/sys/string.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "string.h"
diff --git a/thirdparty/embree-aarch64/common/sys/string.h b/thirdparty/embree/common/sys/string.h
index 2e9b0f88c3..820076b21c 100644
--- a/thirdparty/embree-aarch64/common/sys/string.h
+++ b/thirdparty/embree/common/sys/string.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/sys/sysinfo.cpp b/thirdparty/embree/common/sys/sysinfo.cpp
index 1d11436770..f1a59e511e 100644
--- a/thirdparty/embree-aarch64/common/sys/sysinfo.cpp
+++ b/thirdparty/embree/common/sys/sysinfo.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "sysinfo.h"
@@ -18,44 +18,32 @@ typedef cpuset_t cpu_set_t;
namespace embree
{
NullTy null;
-
- std::string getPlatformName()
+
+ std::string getPlatformName()
{
-#if defined(__LINUX__) && defined(__ANDROID__) && defined(__aarch64__) && defined(__ARM_NEON)
- return "Android Linux (aarch64 / arm64)";
-#elif defined(__LINUX__) && defined(__ANDROID__) && defined(__X86_64__)
- return "Android Linux (x64)";
-#elif defined(__LINUX__) && defined(__ANDROID__) && (defined(_X86_) || defined(__X86__) || defined(_M_IX86))
- return "Android Linux (x86)";
-#elif defined(__LINUX__) && !defined(__X86_64__)
+#if defined(__LINUX__) && !defined(__64BIT__)
return "Linux (32bit)";
-#elif defined(__LINUX__) && defined(__X86_64__)
+#elif defined(__LINUX__) && defined(__64BIT__)
return "Linux (64bit)";
-#elif defined(__FREEBSD__) && !defined(__X86_64__)
+#elif defined(__FREEBSD__) && !defined(__64BIT__)
return "FreeBSD (32bit)";
-#elif defined(__FREEBSD__) && defined(__X86_64__)
+#elif defined(__FREEBSD__) && defined(__64BIT__)
return "FreeBSD (64bit)";
-#elif defined(__CYGWIN__) && !defined(__X86_64__)
+#elif defined(__CYGWIN__) && !defined(__64BIT__)
return "Cygwin (32bit)";
-#elif defined(__CYGWIN__) && defined(__X86_64__)
+#elif defined(__CYGWIN__) && defined(__64BIT__)
return "Cygwin (64bit)";
-#elif defined(__WIN32__) && !defined(__X86_64__)
+#elif defined(__WIN32__) && !defined(__64BIT__)
return "Windows (32bit)";
-#elif defined(__WIN32__) && defined(__X86_64__)
+#elif defined(__WIN32__) && defined(__64BIT__)
return "Windows (64bit)";
-#elif defined(TARGET_IPHONE_SIMULATOR) && defined(__X86_64__)
- return "iOS Simulator (x64)";
-#elif defined(TARGET_OS_IPHONE) && defined(__aarch64__) && defined(__ARM_NEON)
- return "iOS (aarch64 / arm64)";
-#elif defined(__MACOSX__) && !defined(__X86_64__)
+#elif defined(__MACOSX__) && !defined(__64BIT__)
return "Mac OS X (32bit)";
-#elif defined(__MACOSX__) && defined(__X86_64__)
+#elif defined(__MACOSX__) && defined(__64BIT__)
return "Mac OS X (64bit)";
-#elif defined(__UNIX__) && defined(__aarch64__)
- return "Unix (aarch64)";
-#elif defined(__UNIX__) && !defined(__X86_64__)
+#elif defined(__UNIX__) && !defined(__64BIT__)
return "Unix (32bit)";
-#elif defined(__UNIX__) && defined(__X86_64__)
+#elif defined(__UNIX__) && defined(__64BIT__)
return "Unix (64bit)";
#else
return "Unknown";
@@ -91,21 +79,28 @@ namespace embree
std::string getCPUVendor()
{
- int cpuinfo[4];
- __cpuid (cpuinfo, 0);
+#if defined(__X86_ASM__)
+ int cpuinfo[4];
+ __cpuid (cpuinfo, 0);
int name[4];
name[0] = cpuinfo[1];
name[1] = cpuinfo[3];
name[2] = cpuinfo[2];
name[3] = 0;
return (char*)name;
+#elif defined(__ARM_NEON)
+ return "ARM";
+#else
+ return "Unknown";
+#endif
}
- CPU getCPUModel()
+ CPU getCPUModel()
{
+#if defined(__X86_ASM__)
if (getCPUVendor() != "GenuineIntel")
return CPU::UNKNOWN;
-
+
int out[4];
__cpuid(out, 0);
if (out[0] < 1) return CPU::UNKNOWN;
@@ -169,6 +164,10 @@ namespace embree
if (DisplayFamily_DisplayModel == 0x0685) return CPU::XEON_PHI_KNIGHTS_MILL;
if (DisplayFamily_DisplayModel == 0x0657) return CPU::XEON_PHI_KNIGHTS_LANDING;
+#elif defined(__ARM_NEON)
+ return CPU::ARM;
+#endif
+
return CPU::UNKNOWN;
}
@@ -195,13 +194,13 @@ namespace embree
case CPU::NEHALEM : return "Nehalem";
case CPU::CORE2 : return "Core2";
case CPU::CORE1 : return "Core";
- case CPU::ARM : return "Arm";
+ case CPU::ARM : return "ARM";
case CPU::UNKNOWN : return "Unknown CPU";
}
return "Unknown CPU (error)";
}
-#if !defined(__ARM_NEON)
+#if defined(__X86_ASM__)
/* constants to access destination registers of CPUID instruction */
static const int EAX = 0;
static const int EBX = 1;
@@ -241,16 +240,17 @@ namespace embree
static const int CPU_FEATURE_BIT_AVX512BW = 1 << 30; // AVX512BW (byte and word instructions)
static const int CPU_FEATURE_BIT_AVX512VL = 1 << 31; // AVX512VL (vector length extensions)
static const int CPU_FEATURE_BIT_AVX512IFMA = 1 << 21; // AVX512IFMA (integer fused multiple-add instructions)
-
+
/* cpuid[eax=7,ecx=0].ecx */
static const int CPU_FEATURE_BIT_AVX512VBMI = 1 << 1; // AVX512VBMI (vector bit manipulation instructions)
#endif
-#if !defined(__ARM_NEON)
- __noinline int64_t get_xcr0()
+#if defined(__X86_ASM__)
+ __noinline int64_t get_xcr0()
{
- // https://github.com/opencv/opencv/blob/master/modules/core/src/system.cpp#L466
-#if defined (__WIN32__) && defined(_XCR_XFEATURE_ENABLED_MASK)
+// -- GODOT start --
+#if defined (__WIN32__) && !defined (__MINGW32__)
+// -- GODOT end --
int64_t xcr0 = 0; // int64_t is workaround for compiler bug under VS2013, Win32
xcr0 = _xgetbv(0);
return xcr0;
@@ -264,40 +264,19 @@ namespace embree
int getCPUFeatures()
{
-#if defined(__ARM_NEON)
- int cpu_features = CPU_FEATURE_NEON|CPU_FEATURE_SSE|CPU_FEATURE_SSE2;
-#if defined(NEON_AVX2_EMULATION)
- cpu_features |= CPU_FEATURE_SSE3|CPU_FEATURE_SSSE3|CPU_FEATURE_SSE42;
- cpu_features |= CPU_FEATURE_XMM_ENABLED;
- cpu_features |= CPU_FEATURE_YMM_ENABLED;
- cpu_features |= CPU_FEATURE_SSE41 | CPU_FEATURE_RDRAND | CPU_FEATURE_F16C;
- cpu_features |= CPU_FEATURE_POPCNT;
- cpu_features |= CPU_FEATURE_AVX;
- cpu_features |= CPU_FEATURE_AVX2;
- cpu_features |= CPU_FEATURE_FMA3;
- cpu_features |= CPU_FEATURE_LZCNT;
- cpu_features |= CPU_FEATURE_BMI1;
- cpu_features |= CPU_FEATURE_BMI2;
- cpu_features |= CPU_FEATURE_NEON_2X;
-
-
-
-#endif
- return cpu_features;
-
-#else
+#if defined(__X86_ASM__)
/* cache CPU features access */
static int cpu_features = 0;
- if (cpu_features)
+ if (cpu_features)
return cpu_features;
/* get number of CPUID leaves */
- int cpuid_leaf0[4];
+ int cpuid_leaf0[4];
__cpuid(cpuid_leaf0, 0x00000000);
- unsigned nIds = cpuid_leaf0[EAX];
+ unsigned nIds = cpuid_leaf0[EAX];
/* get number of extended CPUID leaves */
- int cpuid_leafe[4];
+ int cpuid_leafe[4];
__cpuid(cpuid_leafe, 0x80000000);
unsigned nExIds = cpuid_leafe[EAX];
@@ -329,7 +308,7 @@ namespace embree
if (xmm_enabled) cpu_features |= CPU_FEATURE_XMM_ENABLED;
if (ymm_enabled) cpu_features |= CPU_FEATURE_YMM_ENABLED;
if (zmm_enabled) cpu_features |= CPU_FEATURE_ZMM_ENABLED;
-
+
if (cpuid_leaf_1[EDX] & CPU_FEATURE_BIT_SSE ) cpu_features |= CPU_FEATURE_SSE;
if (cpuid_leaf_1[EDX] & CPU_FEATURE_BIT_SSE2 ) cpu_features |= CPU_FEATURE_SSE2;
if (cpuid_leaf_1[ECX] & CPU_FEATURE_BIT_SSE3 ) cpu_features |= CPU_FEATURE_SSE3;
@@ -337,8 +316,8 @@ namespace embree
if (cpuid_leaf_1[ECX] & CPU_FEATURE_BIT_SSE4_1) cpu_features |= CPU_FEATURE_SSE41;
if (cpuid_leaf_1[ECX] & CPU_FEATURE_BIT_SSE4_2) cpu_features |= CPU_FEATURE_SSE42;
if (cpuid_leaf_1[ECX] & CPU_FEATURE_BIT_POPCNT) cpu_features |= CPU_FEATURE_POPCNT;
+
if (cpuid_leaf_1[ECX] & CPU_FEATURE_BIT_AVX ) cpu_features |= CPU_FEATURE_AVX;
-
if (cpuid_leaf_1[ECX] & CPU_FEATURE_BIT_F16C ) cpu_features |= CPU_FEATURE_F16C;
if (cpuid_leaf_1[ECX] & CPU_FEATURE_BIT_RDRAND) cpu_features |= CPU_FEATURE_RDRAND;
if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX2 ) cpu_features |= CPU_FEATURE_AVX2;
@@ -350,7 +329,7 @@ namespace embree
if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512F ) cpu_features |= CPU_FEATURE_AVX512F;
if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512DQ ) cpu_features |= CPU_FEATURE_AVX512DQ;
if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512PF ) cpu_features |= CPU_FEATURE_AVX512PF;
- if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512ER ) cpu_features |= CPU_FEATURE_AVX512ER;
+ if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512ER ) cpu_features |= CPU_FEATURE_AVX512ER;
if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512CD ) cpu_features |= CPU_FEATURE_AVX512CD;
if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512BW ) cpu_features |= CPU_FEATURE_AVX512BW;
if (cpuid_leaf_7[EBX] & CPU_FEATURE_BIT_AVX512IFMA) cpu_features |= CPU_FEATURE_AVX512IFMA;
@@ -358,6 +337,12 @@ namespace embree
if (cpuid_leaf_7[ECX] & CPU_FEATURE_BIT_AVX512VBMI) cpu_features |= CPU_FEATURE_AVX512VBMI;
return cpu_features;
+#elif defined(__ARM_NEON)
+ /* emulated features with sse2neon */
+ return CPU_FEATURE_SSE|CPU_FEATURE_SSE2|CPU_FEATURE_XMM_ENABLED;
+#else
+ /* Unknown CPU. */
+ return 0;
#endif
}
@@ -391,11 +376,9 @@ namespace embree
if (features & CPU_FEATURE_AVX512VL) str += "AVX512VL ";
if (features & CPU_FEATURE_AVX512IFMA) str += "AVX512IFMA ";
if (features & CPU_FEATURE_AVX512VBMI) str += "AVX512VBMI ";
- if (features & CPU_FEATURE_NEON) str += "NEON ";
- if (features & CPU_FEATURE_NEON_2X) str += "2xNEON ";
return str;
}
-
+
std::string stringOfISA (int isa)
{
if (isa == SSE) return "SSE";
@@ -406,17 +389,14 @@ namespace embree
if (isa == SSE42) return "SSE4.2";
if (isa == AVX) return "AVX";
if (isa == AVX2) return "AVX2";
- if (isa == AVX512KNL) return "AVX512KNL";
- if (isa == AVX512SKX) return "AVX512SKX";
- if (isa == NEON) return "NEON";
- if (isa == NEON_2X) return "2xNEON";
+ if (isa == AVX512) return "AVX512";
return "UNKNOWN";
}
bool hasISA(int features, int isa) {
return (features & isa) == isa;
}
-
+
std::string supportedTargetList (int features)
{
std::string v;
@@ -429,10 +409,7 @@ namespace embree
if (hasISA(features,AVX)) v += "AVX ";
if (hasISA(features,AVXI)) v += "AVXI ";
if (hasISA(features,AVX2)) v += "AVX2 ";
- if (hasISA(features,AVX512KNL)) v += "AVX512KNL ";
- if (hasISA(features,AVX512SKX)) v += "AVX512SKX ";
- if (hasISA(features,NEON)) v += "NEON ";
- if (hasISA(features,NEON_2X)) v += "2xNEON ";
+ if (hasISA(features,AVX512)) v += "AVX512 ";
return v;
}
}
@@ -456,7 +433,7 @@ namespace embree
return std::string(filename);
}
- unsigned int getNumberOfLogicalThreads()
+ unsigned int getNumberOfLogicalThreads()
{
static int nThreads = -1;
if (nThreads != -1) return nThreads;
@@ -467,11 +444,11 @@ namespace embree
GetActiveProcessorGroupCountFunc pGetActiveProcessorGroupCount = (GetActiveProcessorGroupCountFunc)GetProcAddress(hlib, "GetActiveProcessorGroupCount");
GetActiveProcessorCountFunc pGetActiveProcessorCount = (GetActiveProcessorCountFunc) GetProcAddress(hlib, "GetActiveProcessorCount");
- if (pGetActiveProcessorGroupCount && pGetActiveProcessorCount)
+ if (pGetActiveProcessorGroupCount && pGetActiveProcessorCount)
{
int groups = pGetActiveProcessorGroupCount();
int totalProcessors = 0;
- for (int i = 0; i < groups; i++)
+ for (int i = 0; i < groups; i++)
totalProcessors += pGetActiveProcessorCount(i);
nThreads = totalProcessors;
}
@@ -485,7 +462,7 @@ namespace embree
return nThreads;
}
- int getTerminalWidth()
+ int getTerminalWidth()
{
HANDLE handle = GetStdHandle(STD_OUTPUT_HANDLE);
if (handle == INVALID_HANDLE_VALUE) return 80;
@@ -495,7 +472,7 @@ namespace embree
return info.dwSize.X;
}
- double getSeconds()
+ double getSeconds()
{
LARGE_INTEGER freq, val;
QueryPerformanceFrequency(&freq);
@@ -534,7 +511,7 @@ namespace embree
namespace embree
{
- std::string getExecutableFileName()
+ std::string getExecutableFileName()
{
std::string pid = "/proc/" + toString(getpid()) + "/exe";
char buf[4096];
@@ -587,7 +564,7 @@ namespace embree
size_t getVirtualMemoryBytes() {
return 0;
}
-
+
size_t getResidentMemoryBytes() {
return 0;
}
@@ -617,7 +594,7 @@ namespace embree
size_t getVirtualMemoryBytes() {
return 0;
}
-
+
size_t getResidentMemoryBytes() {
return 0;
}
@@ -638,12 +615,15 @@ namespace embree
namespace embree
{
- unsigned int getNumberOfLogicalThreads()
+ unsigned int getNumberOfLogicalThreads()
{
static int nThreads = -1;
if (nThreads != -1) return nThreads;
+// -- GODOT start --
+// #if defined(__MACOSX__)
#if defined(__MACOSX__) || defined(__ANDROID__)
+// -- GODOT end --
nThreads = sysconf(_SC_NPROCESSORS_ONLN); // does not work in Linux LXC container
assert(nThreads);
#else
@@ -651,12 +631,12 @@ namespace embree
if (pthread_getaffinity_np(pthread_self(), sizeof(set), &set) == 0)
nThreads = CPU_COUNT(&set);
#endif
-
+
assert(nThreads);
return nThreads;
}
- int getTerminalWidth()
+ int getTerminalWidth()
{
struct winsize info;
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &info) < 0) return 80;
diff --git a/thirdparty/embree-aarch64/common/sys/sysinfo.h b/thirdparty/embree/common/sys/sysinfo.h
index 8e313a59b3..72351d12e4 100644
--- a/thirdparty/embree-aarch64/common/sys/sysinfo.h
+++ b/thirdparty/embree/common/sys/sysinfo.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -16,13 +16,9 @@
/* define isa namespace and ISA bitvector */
#if defined (__AVX512VL__)
-# define isa avx512skx
-# define ISA AVX512SKX
-# define ISA_STR "AVX512SKX"
-#elif defined (__AVX512F__)
-# define isa avx512knl
-# define ISA AVX512KNL
-# define ISA_STR "AVX512KNL"
+# define isa avx512
+# define ISA AVX512
+# define ISA_STR "AVX512"
#elif defined (__AVX2__)
# define isa avx2
# define ISA AVX2
@@ -59,12 +55,7 @@
# define isa sse
# define ISA SSE
# define ISA_STR "SSE"
-#elif defined(__ARM_NEON)
-// NOTE(LTE): Use sse2 for `isa` for the compatibility at the moment.
-#define isa sse2
-#define ISA NEON
-#define ISA_STR "NEON"
-#else
+#else
#error Unknown ISA
#endif
@@ -120,7 +111,7 @@ namespace embree
static const int CPU_FEATURE_SSE3 = 1 << 2;
static const int CPU_FEATURE_SSSE3 = 1 << 3;
static const int CPU_FEATURE_SSE41 = 1 << 4;
- static const int CPU_FEATURE_SSE42 = 1 << 5;
+ static const int CPU_FEATURE_SSE42 = 1 << 5;
static const int CPU_FEATURE_POPCNT = 1 << 6;
static const int CPU_FEATURE_AVX = 1 << 7;
static const int CPU_FEATURE_F16C = 1 << 8;
@@ -131,7 +122,7 @@ namespace embree
static const int CPU_FEATURE_BMI1 = 1 << 13;
static const int CPU_FEATURE_BMI2 = 1 << 14;
static const int CPU_FEATURE_AVX512F = 1 << 16;
- static const int CPU_FEATURE_AVX512DQ = 1 << 17;
+ static const int CPU_FEATURE_AVX512DQ = 1 << 17;
static const int CPU_FEATURE_AVX512PF = 1 << 18;
static const int CPU_FEATURE_AVX512ER = 1 << 19;
static const int CPU_FEATURE_AVX512CD = 1 << 20;
@@ -142,9 +133,7 @@ namespace embree
static const int CPU_FEATURE_XMM_ENABLED = 1 << 25;
static const int CPU_FEATURE_YMM_ENABLED = 1 << 26;
static const int CPU_FEATURE_ZMM_ENABLED = 1 << 27;
- static const int CPU_FEATURE_NEON = 1 << 28;
- static const int CPU_FEATURE_NEON_2X = 1 << 29;
-
+
/*! get CPU features */
int getCPUFeatures();
@@ -155,7 +144,7 @@ namespace embree
std::string supportedTargetList (int isa);
/*! ISAs */
- static const int SSE = CPU_FEATURE_SSE | CPU_FEATURE_XMM_ENABLED;
+ static const int SSE = CPU_FEATURE_SSE | CPU_FEATURE_XMM_ENABLED;
static const int SSE2 = SSE | CPU_FEATURE_SSE2;
static const int SSE3 = SSE2 | CPU_FEATURE_SSE3;
static const int SSSE3 = SSE3 | CPU_FEATURE_SSSE3;
@@ -164,10 +153,7 @@ namespace embree
static const int AVX = SSE42 | CPU_FEATURE_AVX | CPU_FEATURE_YMM_ENABLED;
static const int AVXI = AVX | CPU_FEATURE_F16C | CPU_FEATURE_RDRAND;
static const int AVX2 = AVXI | CPU_FEATURE_AVX2 | CPU_FEATURE_FMA3 | CPU_FEATURE_BMI1 | CPU_FEATURE_BMI2 | CPU_FEATURE_LZCNT;
- static const int AVX512KNL = AVX2 | CPU_FEATURE_AVX512F | CPU_FEATURE_AVX512PF | CPU_FEATURE_AVX512ER | CPU_FEATURE_AVX512CD | CPU_FEATURE_ZMM_ENABLED;
- static const int AVX512SKX = AVX2 | CPU_FEATURE_AVX512F | CPU_FEATURE_AVX512DQ | CPU_FEATURE_AVX512CD | CPU_FEATURE_AVX512BW | CPU_FEATURE_AVX512VL | CPU_FEATURE_ZMM_ENABLED;
- static const int NEON = CPU_FEATURE_NEON | CPU_FEATURE_SSE | CPU_FEATURE_SSE2;
- static const int NEON_2X = CPU_FEATURE_NEON_2X | AVX2;
+ static const int AVX512 = AVX2 | CPU_FEATURE_AVX512F | CPU_FEATURE_AVX512DQ | CPU_FEATURE_AVX512CD | CPU_FEATURE_AVX512BW | CPU_FEATURE_AVX512VL | CPU_FEATURE_ZMM_ENABLED;
/*! converts ISA bitvector into a string */
std::string stringOfISA(int features);
diff --git a/thirdparty/embree-aarch64/common/sys/thread.cpp b/thirdparty/embree/common/sys/thread.cpp
index f9ea5b7d96..f4014be89b 100644
--- a/thirdparty/embree-aarch64/common/sys/thread.cpp
+++ b/thirdparty/embree/common/sys/thread.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "thread.h"
@@ -7,7 +7,7 @@
#include <iostream>
#if defined(__ARM_NEON)
-#include "../math/SSE2NEON.h"
+#include "../simd/arm/emulation.h"
#else
#include <xmmintrin.h>
#endif
@@ -39,7 +39,7 @@ namespace embree
GetActiveProcessorCountFunc pGetActiveProcessorCount = (GetActiveProcessorCountFunc)GetProcAddress(hlib, "GetActiveProcessorCount");
SetThreadGroupAffinityFunc pSetThreadGroupAffinity = (SetThreadGroupAffinityFunc)GetProcAddress(hlib, "SetThreadGroupAffinity");
SetThreadIdealProcessorExFunc pSetThreadIdealProcessorEx = (SetThreadIdealProcessorExFunc)GetProcAddress(hlib, "SetThreadIdealProcessorEx");
- if (pGetActiveProcessorGroupCount && pGetActiveProcessorCount && pSetThreadGroupAffinity && pSetThreadIdealProcessorEx)
+ if (pGetActiveProcessorGroupCount && pGetActiveProcessorCount && pSetThreadGroupAffinity && pSetThreadIdealProcessorEx)
{
int groups = pGetActiveProcessorGroupCount();
int totalProcessors = 0, group = 0, number = 0;
@@ -52,7 +52,7 @@ namespace embree
}
totalProcessors += processors;
}
-
+
GROUP_AFFINITY groupAffinity;
groupAffinity.Group = (WORD)group;
groupAffinity.Mask = (KAFFINITY)(uint64_t(1) << number);
@@ -61,15 +61,15 @@ namespace embree
groupAffinity.Reserved[2] = 0;
if (!pSetThreadGroupAffinity(thread, &groupAffinity, nullptr))
WARNING("SetThreadGroupAffinity failed"); // on purpose only a warning
-
+
PROCESSOR_NUMBER processorNumber;
processorNumber.Group = group;
processorNumber.Number = number;
processorNumber.Reserved = 0;
if (!pSetThreadIdealProcessorEx(thread, &processorNumber, nullptr))
WARNING("SetThreadIdealProcessorEx failed"); // on purpose only a warning
- }
- else
+ }
+ else
{
if (!SetThreadAffinityMask(thread, DWORD_PTR(uint64_t(1) << affinity)))
WARNING("SetThreadAffinityMask failed"); // on purpose only a warning
@@ -83,10 +83,10 @@ namespace embree
setAffinity(GetCurrentThread(), affinity);
}
- struct ThreadStartupData
+ struct ThreadStartupData
{
public:
- ThreadStartupData (thread_func f, void* arg)
+ ThreadStartupData (thread_func f, void* arg)
: f(f), arg(arg) {}
public:
thread_func f;
@@ -99,7 +99,6 @@ namespace embree
_mm_setcsr(_mm_getcsr() | /*FTZ:*/ (1<<15) | /*DAZ:*/ (1<<6));
parg->f(parg->arg);
delete parg;
- parg = nullptr;
return 0;
}
@@ -125,6 +124,12 @@ namespace embree
CloseHandle(HANDLE(tid));
}
+ /*! destroy a hardware thread by its handle */
+ void destroyThread(thread_t tid) {
+ TerminateThread(HANDLE(tid),0);
+ CloseHandle(HANDLE(tid));
+ }
+
/*! creates thread local storage */
tls_t createTls() {
return tls_t(size_t(TlsAlloc()));
@@ -153,27 +158,24 @@ namespace embree
/// Linux Platform
////////////////////////////////////////////////////////////////////////////////
-#if defined(__LINUX__)
+// -- GODOT start --
+#if defined(__LINUX__) && !defined(__ANDROID__)
+// -- GODOT end --
#include <fstream>
#include <sstream>
#include <algorithm>
-#if defined(__ANDROID__)
-#include <pthread.h>
-#endif
-
namespace embree
{
static MutexSys mutex;
static std::vector<size_t> threadIDs;
-
-#if !defined(__ANDROID__) // TODO(LTE): Implement for Android target
+
/* changes thread ID mapping such that we first fill up all thread on one core */
size_t mapThreadID(size_t threadID)
{
Lock<MutexSys> lock(mutex);
-
+
if (threadIDs.size() == 0)
{
/* parse thread/CPU topology */
@@ -185,11 +187,11 @@ namespace embree
if (fs.fail()) break;
int i;
- while (fs >> i)
+ while (fs >> i)
{
if (std::none_of(threadIDs.begin(),threadIDs.end(),[&] (int id) { return id == i; }))
threadIDs.push_back(i);
- if (fs.peek() == ',')
+ if (fs.peek() == ',')
fs.ignore();
}
fs.close();
@@ -233,24 +235,41 @@ namespace embree
return ID;
}
-#endif
/*! set affinity of the calling thread */
void setAffinity(ssize_t affinity)
{
-#if defined(__ANDROID__)
- // TODO(LTE): Implement
-#else
cpu_set_t cset;
CPU_ZERO(&cset);
size_t threadID = mapThreadID(affinity);
CPU_SET(threadID, &cset);
pthread_setaffinity_np(pthread_self(), sizeof(cset), &cset);
+ }
+}
#endif
+
+// -- GODOT start --
+////////////////////////////////////////////////////////////////////////////////
+/// Android Platform
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__ANDROID__)
+
+namespace embree
+{
+ /*! set affinity of the calling thread */
+ void setAffinity(ssize_t affinity)
+ {
+ cpu_set_t cset;
+ CPU_ZERO(&cset);
+ CPU_SET(affinity, &cset);
+
+ sched_setaffinity(0, sizeof(cset), &cset);
}
}
#endif
+// -- GODOT end --
////////////////////////////////////////////////////////////////////////////////
/// FreeBSD Platform
@@ -289,10 +308,14 @@ namespace embree
/*! set affinity of the calling thread */
void setAffinity(ssize_t affinity)
{
+#if !defined(__ARM_NEON) // affinity seems not supported on M1 chip
+
thread_affinity_policy ap;
ap.affinity_tag = affinity;
if (thread_policy_set(mach_thread_self(),THREAD_AFFINITY_POLICY,(thread_policy_t)&ap,THREAD_AFFINITY_POLICY_COUNT) != KERN_SUCCESS)
WARNING("setting thread affinity failed"); // on purpose only a warning
+
+#endif
}
}
#endif
@@ -312,21 +335,21 @@ namespace embree
namespace embree
{
- struct ThreadStartupData
+ struct ThreadStartupData
{
public:
- ThreadStartupData (thread_func f, void* arg, int affinity)
+ ThreadStartupData (thread_func f, void* arg, int affinity)
: f(f), arg(arg), affinity(affinity) {}
- public:
+ public:
thread_func f;
void* arg;
ssize_t affinity;
};
-
+
static void* threadStartup(ThreadStartupData* parg)
{
_mm_setcsr(_mm_getcsr() | /*FTZ:*/ (1<<15) | /*DAZ:*/ (1<<6));
-
+
/*! Mac OS X does not support setting affinity at thread creation time */
#if defined(__MACOSX__)
if (parg->affinity >= 0)
@@ -335,7 +358,6 @@ namespace embree
parg->f(parg->arg);
delete parg;
- parg = nullptr;
return nullptr;
}
@@ -351,13 +373,15 @@ namespace embree
pthread_t* tid = new pthread_t;
if (pthread_create(tid,&attr,(void*(*)(void*))threadStartup,new ThreadStartupData(f,arg,threadID)) != 0) {
pthread_attr_destroy(&attr);
- delete tid;
+ delete tid;
FATAL("pthread_create failed");
}
pthread_attr_destroy(&attr);
/* set affinity */
+// -- GODOT start --
#if defined(__LINUX__) && !defined(__ANDROID__)
+// -- GODOT end --
if (threadID >= 0) {
cpu_set_t cset;
CPU_ZERO(&cset);
@@ -372,7 +396,16 @@ namespace embree
CPU_SET(threadID, &cset);
pthread_setaffinity_np(*tid, sizeof(cset), &cset);
}
+// -- GODOT start --
+#elif defined(__ANDROID__)
+ if (threadID >= 0) {
+ cpu_set_t cset;
+ CPU_ZERO(&cset);
+ CPU_SET(threadID, &cset);
+ sched_setaffinity(pthread_gettid_np(*tid), sizeof(cset), &cset);
+ }
#endif
+// -- GODOT end --
return thread_t(tid);
}
@@ -389,8 +422,20 @@ namespace embree
delete (pthread_t*)tid;
}
+ /*! destroy a hardware thread by its handle */
+ void destroyThread(thread_t tid) {
+// -- GODOT start --
+#if defined(__ANDROID__)
+ FATAL("Can't destroy threads on Android.");
+#else
+ pthread_cancel(*(pthread_t*)tid);
+ delete (pthread_t*)tid;
+#endif
+// -- GODOT end --
+ }
+
/*! creates thread local storage */
- tls_t createTls()
+ tls_t createTls()
{
pthread_key_t* key = new pthread_key_t;
if (pthread_key_create(key,nullptr) != 0) {
@@ -402,14 +447,14 @@ namespace embree
}
/*! return the thread local storage pointer */
- void* getTls(tls_t tls)
+ void* getTls(tls_t tls)
{
assert(tls);
return pthread_getspecific(*(pthread_key_t*)tls);
}
/*! set the thread local storage pointer */
- void setTls(tls_t tls, void* const ptr)
+ void setTls(tls_t tls, void* const ptr)
{
assert(tls);
if (pthread_setspecific(*(pthread_key_t*)tls, ptr) != 0)
@@ -417,7 +462,7 @@ namespace embree
}
/*! destroys thread local storage identifier */
- void destroyTls(tls_t tls)
+ void destroyTls(tls_t tls)
{
assert(tls);
if (pthread_key_delete(*(pthread_key_t*)tls) != 0)
diff --git a/thirdparty/embree-aarch64/common/sys/thread.h b/thirdparty/embree/common/sys/thread.h
index 45da6e6a70..92a10d5c5d 100644
--- a/thirdparty/embree-aarch64/common/sys/thread.h
+++ b/thirdparty/embree/common/sys/thread.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -29,6 +29,9 @@ namespace embree
/*! waits until the given thread has terminated */
void join(thread_t tid);
+ /*! destroy handle of a thread */
+ void destroyThread(thread_t tid);
+
/*! type for handle to thread local storage */
typedef struct opaque_tls_t* tls_t;
diff --git a/thirdparty/embree-aarch64/common/sys/vector.h b/thirdparty/embree/common/sys/vector.h
index e41794de7c..f832626789 100644
--- a/thirdparty/embree-aarch64/common/sys/vector.h
+++ b/thirdparty/embree/common/sys/vector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/tasking/taskscheduler.h b/thirdparty/embree/common/tasking/taskscheduler.h
index 9940e068d0..8f3dd87689 100644
--- a/thirdparty/embree-aarch64/common/tasking/taskscheduler.h
+++ b/thirdparty/embree/common/tasking/taskscheduler.h
@@ -1,12 +1,10 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#if defined(TASKING_INTERNAL)
# include "taskschedulerinternal.h"
-#elif defined(TASKING_GCD) && defined(BUILD_IOS)
-# include "taskschedulergcd.h"
#elif defined(TASKING_TBB)
# include "taskschedulertbb.h"
#elif defined(TASKING_PPL)
diff --git a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.cpp b/thirdparty/embree/common/tasking/taskschedulerinternal.cpp
index ebf656d1a0..ad438588a3 100644
--- a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.cpp
+++ b/thirdparty/embree/common/tasking/taskschedulerinternal.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "taskschedulerinternal.h"
@@ -154,12 +154,6 @@ namespace embree
assert(newNumThreads);
newNumThreads = min(newNumThreads, (size_t) getNumberOfLogicalThreads());
- // We are observing a few % gain by increasing number threads by 2 on aarch64.
-#if defined(__aarch64__) && defined(BUILD_IOS)
- numThreads = newNumThreads*2;
-#else
- numThreads = newNumThreads;
-#endif
numThreads = newNumThreads;
if (!startThreads && !running) return;
running = true;
@@ -382,10 +376,10 @@ namespace embree
yield();
#endif
}
- // -- GODOT start --
- // return except;
- return;
- // -- GODOT end --
+ // -- GODOT start --
+ // return except;
+ return;
+ // -- GODOT end --
}
bool TaskScheduler::steal_from_other_threads(Thread& thread)
diff --git a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.h b/thirdparty/embree/common/tasking/taskschedulerinternal.h
index 8bd70b2b8c..8fa6bb12fa 100644
--- a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.h
+++ b/thirdparty/embree/common/tasking/taskschedulerinternal.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -135,16 +135,15 @@ namespace embree
__forceinline void push_right(Thread& thread, const size_t size, const Closure& closure)
{
if (right >= TASK_STACK_SIZE)
- // -- GODOT start --
- // throw std::runtime_error("task stack overflow");
- abort();
- // -- GODOT end --
+ // -- GODOT start --
+ // throw std::runtime_error("task stack overflow");
+ abort();
+ // -- GODOT end --
/* allocate new task on right side of stack */
size_t oldStackPtr = stackPtr;
TaskFunction* func = new (alloc(sizeof(ClosureTaskFunction<Closure>))) ClosureTaskFunction<Closure>(closure);
- /* gcc 8 or later fails to compile without explicit .load() */
- new (&(tasks[right.load()])) Task(func,thread.task,oldStackPtr,size);
+ new (&tasks[right]) Task(func,thread.task,oldStackPtr,size);
right++;
/* also move left pointer */
diff --git a/thirdparty/embree-aarch64/common/tasking/taskschedulerppl.h b/thirdparty/embree/common/tasking/taskschedulerppl.h
index 776f98cdac..cbc2ecdbb8 100644
--- a/thirdparty/embree-aarch64/common/tasking/taskschedulerppl.h
+++ b/thirdparty/embree/common/tasking/taskschedulerppl.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/common/tasking/taskschedulertbb.h b/thirdparty/embree/common/tasking/taskschedulertbb.h
index 98dba26871..35bd49849f 100644
--- a/thirdparty/embree-aarch64/common/tasking/taskschedulertbb.h
+++ b/thirdparty/embree/common/tasking/taskschedulertbb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -12,7 +12,13 @@
#include "../sys/ref.h"
#if defined(__WIN32__)
+// -- GODOT start --
+#if !defined(NOMINMAX)
+// -- GODOT end --
# define NOMINMAX
+// -- GODOT start --
+#endif
+// -- GODOT end --
#endif
// We need to define these to avoid implicit linkage against
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore.h b/thirdparty/embree/include/embree3/rtcore.h
index 5830bb5880..450ab4c535 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore.h
+++ b/thirdparty/embree/include/embree3/rtcore.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_buffer.h b/thirdparty/embree/include/embree3/rtcore_buffer.h
index 400b604aa5..6b8eba9769 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_buffer.h
+++ b/thirdparty/embree/include/embree3/rtcore_buffer.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_builder.h b/thirdparty/embree/include/embree3/rtcore_builder.h
index d62a7f72cc..4bff999fed 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_builder.h
+++ b/thirdparty/embree/include/embree3/rtcore_builder.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_common.h b/thirdparty/embree/include/embree3/rtcore_common.h
index 890e06faa3..4857e1e05e 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_common.h
+++ b/thirdparty/embree/include/embree3/rtcore_common.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -19,7 +19,9 @@ typedef int ssize_t;
#endif
#endif
-#if defined(_WIN32) && !defined(__MINGW32__)
+// -- GODOT start --
+#if defined(_WIN32) && defined(_MSC_VER)
+// -- GODOT end --
# define RTC_ALIGN(...) __declspec(align(__VA_ARGS__))
#else
# define RTC_ALIGN(...) __attribute__((aligned(__VA_ARGS__)))
@@ -35,7 +37,7 @@ typedef int ssize_t;
#endif
#endif
-#if defined(_WIN32)
+#if defined(_WIN32)
# define RTC_FORCEINLINE __forceinline
#else
# define RTC_FORCEINLINE inline __attribute__((always_inline))
@@ -224,13 +226,13 @@ RTC_FORCEINLINE void rtcInitIntersectContext(struct RTCIntersectContext* context
}
/* Point query structure for closest point query */
-struct RTC_ALIGN(16) RTCPointQuery
+struct RTC_ALIGN(16) RTCPointQuery
{
float x; // x coordinate of the query point
float y; // y coordinate of the query point
float z; // z coordinate of the query point
float time; // time of the point query
- float radius; // radius of the point query
+ float radius; // radius of the point query
};
/* Structure of a packet of 4 query points */
@@ -250,7 +252,7 @@ struct RTC_ALIGN(32) RTCPointQuery8
float y[8]; // y coordinate of the query point
float z[8]; // z coordinate of the query point
float time[8]; // time of the point query
- float radius[8]; // radius ofr the point query
+ float radius[8]; // radius ofr the point query
};
/* Structure of a packet of 16 query points */
@@ -269,11 +271,11 @@ struct RTC_ALIGN(16) RTCPointQueryContext
{
// accumulated 4x4 column major matrices from world space to instance space.
// undefined if size == 0.
- float world2inst[RTC_MAX_INSTANCE_LEVEL_COUNT][16];
+ float world2inst[RTC_MAX_INSTANCE_LEVEL_COUNT][16];
// accumulated 4x4 column major matrices from instance space to world space.
// undefined if size == 0.
- float inst2world[RTC_MAX_INSTANCE_LEVEL_COUNT][16];
+ float inst2world[RTC_MAX_INSTANCE_LEVEL_COUNT][16];
// instance ids.
unsigned int instID[RTC_MAX_INSTANCE_LEVEL_COUNT];
@@ -301,13 +303,13 @@ struct RTC_ALIGN(16) RTCPointQueryFunctionArguments
void* userPtr;
// primitive and geometry ID of primitive
- unsigned int primID;
- unsigned int geomID;
+ unsigned int primID;
+ unsigned int geomID;
// the context with transformation and instance ID stack
struct RTCPointQueryContext* context;
- // If the current instance transform M (= context->world2inst[context->instStackSize])
+ // If the current instance transform M (= context->world2inst[context->instStackSize])
// is a similarity matrix, i.e there is a constant factor similarityScale such that,
// for all x,y: dist(Mx, My) = similarityScale * dist(x, y),
// The similarity scale is 0, if the current instance transform is not a
@@ -322,5 +324,5 @@ struct RTC_ALIGN(16) RTCPointQueryFunctionArguments
};
typedef bool (*RTCPointQueryFunction)(struct RTCPointQueryFunctionArguments* args);
-
+
RTC_NAMESPACE_END
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_config.h b/thirdparty/embree/include/embree3/rtcore_config.h
index 337d4e9487..3a9819c9f1 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_config.h
+++ b/thirdparty/embree/include/embree3/rtcore_config.h
@@ -1,14 +1,14 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#define RTC_VERSION_MAJOR 3
-#define RTC_VERSION_MINOR 12
-#define RTC_VERSION_PATCH 1
-#define RTC_VERSION 31201
-#define RTC_VERSION_STRING "3.12.1"
+#define RTC_VERSION_MINOR 13
+#define RTC_VERSION_PATCH 0
+#define RTC_VERSION 31300
+#define RTC_VERSION_STRING "3.13.0"
#define RTC_MAX_INSTANCE_LEVEL_COUNT 1
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_device.h b/thirdparty/embree/include/embree3/rtcore_device.h
index 594e2b755d..2dd3047603 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_device.h
+++ b/thirdparty/embree/include/embree3/rtcore_device.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_geometry.h b/thirdparty/embree/include/embree3/rtcore_geometry.h
index c70f1b0e5c..d1de17491c 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_geometry.h
+++ b/thirdparty/embree/include/embree3/rtcore_geometry.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_quaternion.h b/thirdparty/embree/include/embree3/rtcore_quaternion.h
index 449cdedfdc..6489fa3467 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_quaternion.h
+++ b/thirdparty/embree/include/embree3/rtcore_quaternion.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_ray.h b/thirdparty/embree/include/embree3/rtcore_ray.h
index 1ae3309ef1..a2ee6dabbb 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_ray.h
+++ b/thirdparty/embree/include/embree3/rtcore_ray.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/include/embree3/rtcore_scene.h b/thirdparty/embree/include/embree3/rtcore_scene.h
index 0cd6401593..5878a3d402 100644
--- a/thirdparty/embree-aarch64/include/embree3/rtcore_scene.h
+++ b/thirdparty/embree/include/embree3/rtcore_scene.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_hair.h b/thirdparty/embree/kernels/builders/bvh_builder_hair.h
index 755ce255fb..d83e8918a1 100644
--- a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_hair.h
+++ b/thirdparty/embree/kernels/builders/bvh_builder_hair.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_morton.h b/thirdparty/embree/kernels/builders/bvh_builder_morton.h
index 92be2f7e65..8f21e3254f 100644
--- a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_morton.h
+++ b/thirdparty/embree/kernels/builders/bvh_builder_morton.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_msmblur.h b/thirdparty/embree/kernels/builders/bvh_builder_msmblur.h
index 4c138dacdb..f9a08d65cd 100644
--- a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_msmblur.h
+++ b/thirdparty/embree/kernels/builders/bvh_builder_msmblur.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -297,7 +297,7 @@ namespace embree
if (object_split_sah < 0.50f*leaf_sah)
return object_split;
- /* do temporal splits only if the the time range is big enough */
+ /* do temporal splits only if the time range is big enough */
if (set.time_range.size() > 1.01f/float(set.max_num_time_segments))
{
const Split temporal_split = heuristicTemporalSplit.find(set,cfg.logBlockSize);
diff --git a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_msmblur_hair.h b/thirdparty/embree/kernels/builders/bvh_builder_msmblur_hair.h
index e477c313a3..397e8636b1 100644
--- a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_msmblur_hair.h
+++ b/thirdparty/embree/kernels/builders/bvh_builder_msmblur_hair.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_sah.h b/thirdparty/embree/kernels/builders/bvh_builder_sah.h
index 3f7e678a10..fff4bf2a35 100644
--- a/thirdparty/embree-aarch64/kernels/builders/bvh_builder_sah.h
+++ b/thirdparty/embree/kernels/builders/bvh_builder_sah.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -43,7 +43,7 @@ namespace embree
{
if (RTC_BUILD_ARGUMENTS_HAS(settings,maxBranchingFactor)) branchingFactor = settings.maxBranchingFactor;
if (RTC_BUILD_ARGUMENTS_HAS(settings,maxDepth )) maxDepth = settings.maxDepth;
- if (RTC_BUILD_ARGUMENTS_HAS(settings,sahBlockSize )) logBlockSize = bsr(static_cast<size_t>(settings.sahBlockSize));
+ if (RTC_BUILD_ARGUMENTS_HAS(settings,sahBlockSize )) logBlockSize = bsr(settings.sahBlockSize);
if (RTC_BUILD_ARGUMENTS_HAS(settings,minLeafSize )) minLeafSize = settings.minLeafSize;
if (RTC_BUILD_ARGUMENTS_HAS(settings,maxLeafSize )) maxLeafSize = settings.maxLeafSize;
if (RTC_BUILD_ARGUMENTS_HAS(settings,traversalCost )) travCost = settings.traversalCost;
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_binning.h b/thirdparty/embree/kernels/builders/heuristic_binning.h
index a4d3b68e46..ee29d09ac9 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_binning.h
+++ b/thirdparty/embree/kernels/builders/heuristic_binning.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -444,482 +444,6 @@ namespace embree
BBox _bounds[BINS][3]; //!< geometry bounds for each bin in each dimension
vuint4 _counts[BINS]; //!< counts number of primitives that map into the bins
};
-
-#if defined(__AVX512ER__) // KNL
-
- /*! mapping into bins */
- template<>
- struct BinMapping<16>
- {
- public:
- __forceinline BinMapping() {}
-
- /*! calculates the mapping */
- template<typename PrimInfo>
- __forceinline BinMapping(const PrimInfo& pinfo)
- {
- num = 16;
- const vfloat4 eps = 1E-34f;
- const vfloat4 diag = max(eps,(vfloat4) pinfo.centBounds.size());
- scale = select(diag > eps,vfloat4(0.99f*num)/diag,vfloat4(0.0f));
- ofs = (vfloat4) pinfo.centBounds.lower;
- scale16 = scale;
- ofs16 = ofs;
- }
-
- /*! returns number of bins */
- __forceinline size_t size() const { return num; }
-
- __forceinline vint16 bin16(const Vec3fa& p) const {
- return vint16(vint4(floori((vfloat4(p)-ofs)*scale)));
- }
-
- __forceinline vint16 bin16(const vfloat16& p) const {
- return floori((p-ofs16)*scale16);
- }
-
- __forceinline int bin_unsafe(const PrimRef& ref,
- const vint16& vSplitPos,
- const vbool16& splitDimMask) const // FIXME: rename to isLeft
- {
- const vfloat16 lower(*(vfloat4*)&ref.lower);
- const vfloat16 upper(*(vfloat4*)&ref.upper);
- const vfloat16 p = lower + upper;
- const vint16 i = floori((p-ofs16)*scale16);
- return lt(splitDimMask,i,vSplitPos);
- }
-
- /*! returns true if the mapping is invalid in some dimension */
- __forceinline bool invalid(const size_t dim) const {
- return scale[dim] == 0.0f;
- }
-
- public:
- size_t num;
- vfloat4 ofs,scale; //!< linear function that maps to bin ID
- vfloat16 ofs16,scale16; //!< linear function that maps to bin ID
- };
-
- /* 16 bins in-register binner */
- template<typename PrimRef>
- struct __aligned(64) BinInfoT<16,PrimRef,BBox3fa>
- {
- typedef BinSplit<16> Split;
- typedef vbool16 vbool;
- typedef vint16 vint;
- typedef vfloat16 vfloat;
-
- __forceinline BinInfoT() {
- }
-
- __forceinline BinInfoT(EmptyTy) {
- clear();
- }
-
- /*! clears the bin info */
- __forceinline void clear()
- {
- lower[0] = lower[1] = lower[2] = pos_inf;
- upper[0] = upper[1] = upper[2] = neg_inf;
- count[0] = count[1] = count[2] = 0;
- }
-
-
- static __forceinline vfloat16 prefix_area_rl(const vfloat16 min_x,
- const vfloat16 min_y,
- const vfloat16 min_z,
- const vfloat16 max_x,
- const vfloat16 max_y,
- const vfloat16 max_z)
- {
- const vfloat16 r_min_x = reverse_prefix_min(min_x);
- const vfloat16 r_min_y = reverse_prefix_min(min_y);
- const vfloat16 r_min_z = reverse_prefix_min(min_z);
- const vfloat16 r_max_x = reverse_prefix_max(max_x);
- const vfloat16 r_max_y = reverse_prefix_max(max_y);
- const vfloat16 r_max_z = reverse_prefix_max(max_z);
- const vfloat16 dx = r_max_x - r_min_x;
- const vfloat16 dy = r_max_y - r_min_y;
- const vfloat16 dz = r_max_z - r_min_z;
- const vfloat16 area_rl = madd(dx,dy,madd(dx,dz,dy*dz));
- return area_rl;
- }
-
- static __forceinline vfloat16 prefix_area_lr(const vfloat16 min_x,
- const vfloat16 min_y,
- const vfloat16 min_z,
- const vfloat16 max_x,
- const vfloat16 max_y,
- const vfloat16 max_z)
- {
- const vfloat16 r_min_x = prefix_min(min_x);
- const vfloat16 r_min_y = prefix_min(min_y);
- const vfloat16 r_min_z = prefix_min(min_z);
- const vfloat16 r_max_x = prefix_max(max_x);
- const vfloat16 r_max_y = prefix_max(max_y);
- const vfloat16 r_max_z = prefix_max(max_z);
- const vfloat16 dx = r_max_x - r_min_x;
- const vfloat16 dy = r_max_y - r_min_y;
- const vfloat16 dz = r_max_z - r_min_z;
- const vfloat16 area_lr = madd(dx,dy,madd(dx,dz,dy*dz));
- return area_lr;
- }
-
-
- /*! bins an array of primitives */
- __forceinline void bin (const PrimRef* prims, size_t N, const BinMapping<16>& mapping)
- {
- if (unlikely(N == 0)) return;
-
- const vfloat16 init_min(pos_inf);
- const vfloat16 init_max(neg_inf);
-
- vfloat16 min_x0,min_x1,min_x2;
- vfloat16 min_y0,min_y1,min_y2;
- vfloat16 min_z0,min_z1,min_z2;
- vfloat16 max_x0,max_x1,max_x2;
- vfloat16 max_y0,max_y1,max_y2;
- vfloat16 max_z0,max_z1,max_z2;
- vuint16 count0,count1,count2;
-
- min_x0 = init_min;
- min_x1 = init_min;
- min_x2 = init_min;
- min_y0 = init_min;
- min_y1 = init_min;
- min_y2 = init_min;
- min_z0 = init_min;
- min_z1 = init_min;
- min_z2 = init_min;
-
- max_x0 = init_max;
- max_x1 = init_max;
- max_x2 = init_max;
- max_y0 = init_max;
- max_y1 = init_max;
- max_y2 = init_max;
- max_z0 = init_max;
- max_z1 = init_max;
- max_z2 = init_max;
-
- count0 = zero;
- count1 = zero;
- count2 = zero;
-
- const vint16 step16(step);
- size_t i;
- for (i=0; i<N-1; i+=2)
- {
- /*! map even and odd primitive to bin */
- const BBox3fa primA = prims[i+0].bounds();
- const vfloat16 centerA = vfloat16((vfloat4)primA.lower) + vfloat16((vfloat4)primA.upper);
- const vint16 binA = mapping.bin16(centerA);
-
- const BBox3fa primB = prims[i+1].bounds();
- const vfloat16 centerB = vfloat16((vfloat4)primB.lower) + vfloat16((vfloat4)primB.upper);
- const vint16 binB = mapping.bin16(centerB);
-
- /* A */
- {
- const vfloat16 b_min_x = prims[i+0].lower.x;
- const vfloat16 b_min_y = prims[i+0].lower.y;
- const vfloat16 b_min_z = prims[i+0].lower.z;
- const vfloat16 b_max_x = prims[i+0].upper.x;
- const vfloat16 b_max_y = prims[i+0].upper.y;
- const vfloat16 b_max_z = prims[i+0].upper.z;
-
- const vint16 bin0 = shuffle<0>(binA);
- const vint16 bin1 = shuffle<1>(binA);
- const vint16 bin2 = shuffle<2>(binA);
-
- const vbool16 m_update_x = step16 == bin0;
- const vbool16 m_update_y = step16 == bin1;
- const vbool16 m_update_z = step16 == bin2;
-
- assert(popcnt((size_t)m_update_x) == 1);
- assert(popcnt((size_t)m_update_y) == 1);
- assert(popcnt((size_t)m_update_z) == 1);
-
- min_x0 = mask_min(m_update_x,min_x0,min_x0,b_min_x);
- min_y0 = mask_min(m_update_x,min_y0,min_y0,b_min_y);
- min_z0 = mask_min(m_update_x,min_z0,min_z0,b_min_z);
- // ------------------------------------------------------------------------
- max_x0 = mask_max(m_update_x,max_x0,max_x0,b_max_x);
- max_y0 = mask_max(m_update_x,max_y0,max_y0,b_max_y);
- max_z0 = mask_max(m_update_x,max_z0,max_z0,b_max_z);
- // ------------------------------------------------------------------------
- min_x1 = mask_min(m_update_y,min_x1,min_x1,b_min_x);
- min_y1 = mask_min(m_update_y,min_y1,min_y1,b_min_y);
- min_z1 = mask_min(m_update_y,min_z1,min_z1,b_min_z);
- // ------------------------------------------------------------------------
- max_x1 = mask_max(m_update_y,max_x1,max_x1,b_max_x);
- max_y1 = mask_max(m_update_y,max_y1,max_y1,b_max_y);
- max_z1 = mask_max(m_update_y,max_z1,max_z1,b_max_z);
- // ------------------------------------------------------------------------
- min_x2 = mask_min(m_update_z,min_x2,min_x2,b_min_x);
- min_y2 = mask_min(m_update_z,min_y2,min_y2,b_min_y);
- min_z2 = mask_min(m_update_z,min_z2,min_z2,b_min_z);
- // ------------------------------------------------------------------------
- max_x2 = mask_max(m_update_z,max_x2,max_x2,b_max_x);
- max_y2 = mask_max(m_update_z,max_y2,max_y2,b_max_y);
- max_z2 = mask_max(m_update_z,max_z2,max_z2,b_max_z);
- // ------------------------------------------------------------------------
- count0 = mask_add(m_update_x,count0,count0,vuint16(1));
- count1 = mask_add(m_update_y,count1,count1,vuint16(1));
- count2 = mask_add(m_update_z,count2,count2,vuint16(1));
- }
-
-
- /* B */
- {
- const vfloat16 b_min_x = prims[i+1].lower.x;
- const vfloat16 b_min_y = prims[i+1].lower.y;
- const vfloat16 b_min_z = prims[i+1].lower.z;
- const vfloat16 b_max_x = prims[i+1].upper.x;
- const vfloat16 b_max_y = prims[i+1].upper.y;
- const vfloat16 b_max_z = prims[i+1].upper.z;
-
- const vint16 bin0 = shuffle<0>(binB);
- const vint16 bin1 = shuffle<1>(binB);
- const vint16 bin2 = shuffle<2>(binB);
-
- const vbool16 m_update_x = step16 == bin0;
- const vbool16 m_update_y = step16 == bin1;
- const vbool16 m_update_z = step16 == bin2;
-
- assert(popcnt((size_t)m_update_x) == 1);
- assert(popcnt((size_t)m_update_y) == 1);
- assert(popcnt((size_t)m_update_z) == 1);
-
- min_x0 = mask_min(m_update_x,min_x0,min_x0,b_min_x);
- min_y0 = mask_min(m_update_x,min_y0,min_y0,b_min_y);
- min_z0 = mask_min(m_update_x,min_z0,min_z0,b_min_z);
- // ------------------------------------------------------------------------
- max_x0 = mask_max(m_update_x,max_x0,max_x0,b_max_x);
- max_y0 = mask_max(m_update_x,max_y0,max_y0,b_max_y);
- max_z0 = mask_max(m_update_x,max_z0,max_z0,b_max_z);
- // ------------------------------------------------------------------------
- min_x1 = mask_min(m_update_y,min_x1,min_x1,b_min_x);
- min_y1 = mask_min(m_update_y,min_y1,min_y1,b_min_y);
- min_z1 = mask_min(m_update_y,min_z1,min_z1,b_min_z);
- // ------------------------------------------------------------------------
- max_x1 = mask_max(m_update_y,max_x1,max_x1,b_max_x);
- max_y1 = mask_max(m_update_y,max_y1,max_y1,b_max_y);
- max_z1 = mask_max(m_update_y,max_z1,max_z1,b_max_z);
- // ------------------------------------------------------------------------
- min_x2 = mask_min(m_update_z,min_x2,min_x2,b_min_x);
- min_y2 = mask_min(m_update_z,min_y2,min_y2,b_min_y);
- min_z2 = mask_min(m_update_z,min_z2,min_z2,b_min_z);
- // ------------------------------------------------------------------------
- max_x2 = mask_max(m_update_z,max_x2,max_x2,b_max_x);
- max_y2 = mask_max(m_update_z,max_y2,max_y2,b_max_y);
- max_z2 = mask_max(m_update_z,max_z2,max_z2,b_max_z);
- // ------------------------------------------------------------------------
- count0 = mask_add(m_update_x,count0,count0,vuint16(1));
- count1 = mask_add(m_update_y,count1,count1,vuint16(1));
- count2 = mask_add(m_update_z,count2,count2,vuint16(1));
- }
-
- }
-
- if (i < N)
- {
- const BBox3fa prim0 = prims[i].bounds();
- const vfloat16 center0 = vfloat16((vfloat4)prim0.lower) + vfloat16((vfloat4)prim0.upper);
- const vint16 bin = mapping.bin16(center0);
-
- const vfloat16 b_min_x = prims[i].lower.x;
- const vfloat16 b_min_y = prims[i].lower.y;
- const vfloat16 b_min_z = prims[i].lower.z;
- const vfloat16 b_max_x = prims[i].upper.x;
- const vfloat16 b_max_y = prims[i].upper.y;
- const vfloat16 b_max_z = prims[i].upper.z;
-
- const vint16 bin0 = shuffle<0>(bin);
- const vint16 bin1 = shuffle<1>(bin);
- const vint16 bin2 = shuffle<2>(bin);
-
- const vbool16 m_update_x = step16 == bin0;
- const vbool16 m_update_y = step16 == bin1;
- const vbool16 m_update_z = step16 == bin2;
-
- assert(popcnt((size_t)m_update_x) == 1);
- assert(popcnt((size_t)m_update_y) == 1);
- assert(popcnt((size_t)m_update_z) == 1);
-
- min_x0 = mask_min(m_update_x,min_x0,min_x0,b_min_x);
- min_y0 = mask_min(m_update_x,min_y0,min_y0,b_min_y);
- min_z0 = mask_min(m_update_x,min_z0,min_z0,b_min_z);
- // ------------------------------------------------------------------------
- max_x0 = mask_max(m_update_x,max_x0,max_x0,b_max_x);
- max_y0 = mask_max(m_update_x,max_y0,max_y0,b_max_y);
- max_z0 = mask_max(m_update_x,max_z0,max_z0,b_max_z);
- // ------------------------------------------------------------------------
- min_x1 = mask_min(m_update_y,min_x1,min_x1,b_min_x);
- min_y1 = mask_min(m_update_y,min_y1,min_y1,b_min_y);
- min_z1 = mask_min(m_update_y,min_z1,min_z1,b_min_z);
- // ------------------------------------------------------------------------
- max_x1 = mask_max(m_update_y,max_x1,max_x1,b_max_x);
- max_y1 = mask_max(m_update_y,max_y1,max_y1,b_max_y);
- max_z1 = mask_max(m_update_y,max_z1,max_z1,b_max_z);
- // ------------------------------------------------------------------------
- min_x2 = mask_min(m_update_z,min_x2,min_x2,b_min_x);
- min_y2 = mask_min(m_update_z,min_y2,min_y2,b_min_y);
- min_z2 = mask_min(m_update_z,min_z2,min_z2,b_min_z);
- // ------------------------------------------------------------------------
- max_x2 = mask_max(m_update_z,max_x2,max_x2,b_max_x);
- max_y2 = mask_max(m_update_z,max_y2,max_y2,b_max_y);
- max_z2 = mask_max(m_update_z,max_z2,max_z2,b_max_z);
- // ------------------------------------------------------------------------
- count0 = mask_add(m_update_x,count0,count0,vuint16(1));
- count1 = mask_add(m_update_y,count1,count1,vuint16(1));
- count2 = mask_add(m_update_z,count2,count2,vuint16(1));
- }
-
- lower[0] = Vec3vf16( min_x0, min_y0, min_z0 );
- lower[1] = Vec3vf16( min_x1, min_y1, min_z1 );
- lower[2] = Vec3vf16( min_x2, min_y2, min_z2 );
-
- upper[0] = Vec3vf16( max_x0, max_y0, max_z0 );
- upper[1] = Vec3vf16( max_x1, max_y1, max_z1 );
- upper[2] = Vec3vf16( max_x2, max_y2, max_z2 );
-
- count[0] = count0;
- count[1] = count1;
- count[2] = count2;
- }
-
- __forceinline void bin(const PrimRef* prims, size_t begin, size_t end, const BinMapping<16>& mapping) {
- bin(prims+begin,end-begin,mapping);
- }
-
- /*! merges in other binning information */
- __forceinline void merge (const BinInfoT& other, size_t numBins)
- {
- for (size_t i=0; i<3; i++)
- {
- lower[i] = min(lower[i],other.lower[i]);
- upper[i] = max(upper[i],other.upper[i]);
- count[i] += other.count[i];
- }
- }
-
- /*! reducesr binning information */
- static __forceinline const BinInfoT reduce (const BinInfoT& a, const BinInfoT& b)
- {
- BinInfoT c;
- for (size_t i=0; i<3; i++)
- {
- c.counts[i] = a.counts[i] + b.counts[i];
- c.lower[i] = min(a.lower[i],b.lower[i]);
- c.upper[i] = max(a.upper[i],b.upper[i]);
- }
- return c;
- }
-
- /*! finds the best split by scanning binning information */
- __forceinline Split best(const BinMapping<16>& mapping, const size_t blocks_shift) const
- {
- /* find best dimension */
- float bestSAH = inf;
- int bestDim = -1;
- int bestPos = 0;
- const vuint16 blocks_add = (1 << blocks_shift)-1;
- const vfloat16 inf(pos_inf);
- for (size_t dim=0; dim<3; dim++)
- {
- /* ignore zero sized dimensions */
- if (unlikely(mapping.invalid(dim)))
- continue;
-
- const vfloat16 rArea16 = prefix_area_rl(lower[dim].x,lower[dim].y,lower[dim].z, upper[dim].x,upper[dim].y,upper[dim].z);
- const vfloat16 lArea16 = prefix_area_lr(lower[dim].x,lower[dim].y,lower[dim].z, upper[dim].x,upper[dim].y,upper[dim].z);
- const vuint16 lCount16 = prefix_sum(count[dim]);
- const vuint16 rCount16 = reverse_prefix_sum(count[dim]);
-
- /* compute best split in this dimension */
- const vfloat16 leftArea = lArea16;
- const vfloat16 rightArea = align_shift_right<1>(zero,rArea16);
- const vuint16 lC = lCount16;
- const vuint16 rC = align_shift_right<1>(zero,rCount16);
- const vuint16 leftCount = ( lC + blocks_add) >> blocks_shift;
- const vuint16 rightCount = ( rC + blocks_add) >> blocks_shift;
- const vbool16 valid = (leftArea < inf) & (rightArea < inf) & vbool16(0x7fff); // handles inf entries
- const vfloat16 sah = select(valid,madd(leftArea,vfloat16(leftCount),rightArea*vfloat16(rightCount)),vfloat16(pos_inf));
- /* test if this is a better dimension */
- if (any(sah < vfloat16(bestSAH)))
- {
- const size_t index = select_min(sah);
- assert(index < 15);
- assert(sah[index] < bestSAH);
- bestDim = dim;
- bestPos = index+1;
- bestSAH = sah[index];
- }
- }
-
- return Split(bestSAH,bestDim,bestPos,mapping);
-
- }
-
- /*! calculates extended split information */
- __forceinline void getSplitInfo(const BinMapping<16>& mapping, const Split& split, SplitInfo& info) const
- {
- if (split.dim == -1) {
- new (&info) SplitInfo(0,empty,0,empty);
- return;
- }
- // FIXME: horizontal reduction!
-
- size_t leftCount = 0;
- BBox3fa leftBounds = empty;
- for (size_t i=0; i<(size_t)split.pos; i++) {
- leftCount += count[split.dim][i];
- Vec3fa bounds_lower(lower[split.dim].x[i],lower[split.dim].y[i],lower[split.dim].z[i]);
- Vec3fa bounds_upper(upper[split.dim].x[i],upper[split.dim].y[i],upper[split.dim].z[i]);
- leftBounds.extend(BBox3fa(bounds_lower,bounds_upper));
- }
- size_t rightCount = 0;
- BBox3fa rightBounds = empty;
- for (size_t i=split.pos; i<mapping.size(); i++) {
- rightCount += count[split.dim][i];
- Vec3fa bounds_lower(lower[split.dim].x[i],lower[split.dim].y[i],lower[split.dim].z[i]);
- Vec3fa bounds_upper(upper[split.dim].x[i],upper[split.dim].y[i],upper[split.dim].z[i]);
- rightBounds.extend(BBox3fa(bounds_lower,bounds_upper));
- }
- new (&info) SplitInfo(leftCount,leftBounds,rightCount,rightBounds);
- }
-
- /*! gets the number of primitives left of the split */
- __forceinline size_t getLeftCount(const BinMapping<16>& mapping, const Split& split) const
- {
- if (unlikely(split.dim == -1)) return -1;
-
- size_t leftCount = 0;
- for (size_t i = 0; i < (size_t)split.pos; i++) {
- leftCount += count[split.dim][i];
- }
- return leftCount;
- }
-
- /*! gets the number of primitives right of the split */
- __forceinline size_t getRightCount(const BinMapping<16>& mapping, const Split& split) const
- {
- if (unlikely(split.dim == -1)) return -1;
-
- size_t rightCount = 0;
- for (size_t i = (size_t)split.pos; i<mapping.size(); i++) {
- rightCount += count[split.dim][i];
- }
- return rightCount;
- }
-
- private:
- Vec3vf16 lower[3];
- Vec3vf16 upper[3];
- vuint16 count[3];
- };
-#endif
}
template<typename BinInfoT, typename BinMapping, typename PrimRef>
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_binning_array_aligned.h b/thirdparty/embree/kernels/builders/heuristic_binning_array_aligned.h
index a4c272f015..ab3b97efb9 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_binning_array_aligned.h
+++ b/thirdparty/embree/kernels/builders/heuristic_binning_array_aligned.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -40,15 +40,10 @@ namespace embree
typedef BinInfoT<BINS,PrimRef,BBox3fa> Binner;
typedef range<size_t> Set;
-#if defined(__AVX512ER__) // KNL
- static const size_t PARALLEL_THRESHOLD = 4*768;
- static const size_t PARALLEL_FIND_BLOCK_SIZE = 768;
- static const size_t PARALLEL_PARTITION_BLOCK_SIZE = 768;
-#else
static const size_t PARALLEL_THRESHOLD = 3 * 1024;
static const size_t PARALLEL_FIND_BLOCK_SIZE = 1024;
static const size_t PARALLEL_PARTITION_BLOCK_SIZE = 128;
-#endif
+
__forceinline HeuristicArrayBinningSAH ()
: prims(nullptr) {}
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_binning_array_unaligned.h b/thirdparty/embree/kernels/builders/heuristic_binning_array_unaligned.h
index 1370244586..34a7f121bb 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_binning_array_unaligned.h
+++ b/thirdparty/embree/kernels/builders/heuristic_binning_array_unaligned.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_openmerge_array.h b/thirdparty/embree/kernels/builders/heuristic_openmerge_array.h
index 21f18c0208..4249d16ea1 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_openmerge_array.h
+++ b/thirdparty/embree/kernels/builders/heuristic_openmerge_array.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// TODO:
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_spatial.h b/thirdparty/embree/kernels/builders/heuristic_spatial.h
index d8ca6cb92c..a6939ba258 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_spatial.h
+++ b/thirdparty/embree/kernels/builders/heuristic_spatial.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_spatial_array.h b/thirdparty/embree/kernels/builders/heuristic_spatial_array.h
index 911dcf950c..60d235f48d 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_spatial_array.h
+++ b/thirdparty/embree/kernels/builders/heuristic_spatial_array.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -107,15 +107,9 @@ namespace embree
//typedef extended_range<size_t> Set;
typedef Split2<ObjectSplit,SpatialSplit> Split;
-#if defined(__AVX512ER__) // KNL
- static const size_t PARALLEL_THRESHOLD = 3*1024;
- static const size_t PARALLEL_FIND_BLOCK_SIZE = 768;
- static const size_t PARALLEL_PARTITION_BLOCK_SIZE = 128;
-#else
static const size_t PARALLEL_THRESHOLD = 3*1024;
static const size_t PARALLEL_FIND_BLOCK_SIZE = 1024;
static const size_t PARALLEL_PARTITION_BLOCK_SIZE = 128;
-#endif
static const size_t MOVE_STEP_SIZE = 64;
static const size_t CREATE_SPLITS_STEP_SIZE = 64;
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_strand_array.h b/thirdparty/embree/kernels/builders/heuristic_strand_array.h
index ede0d04c78..19c7fcdaa8 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_strand_array.h
+++ b/thirdparty/embree/kernels/builders/heuristic_strand_array.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/heuristic_timesplit_array.h b/thirdparty/embree/kernels/builders/heuristic_timesplit_array.h
index c999941a11..b968e01c90 100644
--- a/thirdparty/embree-aarch64/kernels/builders/heuristic_timesplit_array.h
+++ b/thirdparty/embree/kernels/builders/heuristic_timesplit_array.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/priminfo.h b/thirdparty/embree/kernels/builders/priminfo.h
index 06c1388742..fee515247a 100644
--- a/thirdparty/embree-aarch64/kernels/builders/priminfo.h
+++ b/thirdparty/embree/kernels/builders/priminfo.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/primrefgen.cpp b/thirdparty/embree/kernels/builders/primrefgen.cpp
index e23de3df28..d279dc4993 100644
--- a/thirdparty/embree-aarch64/kernels/builders/primrefgen.cpp
+++ b/thirdparty/embree/kernels/builders/primrefgen.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "primrefgen.h"
@@ -11,7 +11,7 @@ namespace embree
{
namespace isa
{
- PrimInfo createPrimRefArray(Geometry* geometry, unsigned int geomID, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor)
+ PrimInfo createPrimRefArray(Geometry* geometry, unsigned int geomID, const size_t numPrimRefs, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor)
{
ParallelPrefixSumState<PrimInfo> pstate;
@@ -22,7 +22,7 @@ namespace embree
}, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
/* if we need to filter out geometry, run again */
- if (pinfo.size() != prims.size())
+ if (pinfo.size() != numPrimRefs)
{
progressMonitor(0);
pinfo = parallel_prefix_sum( pstate, size_t(0), geometry->size(), size_t(1024), PrimInfo(empty), [&](const range<size_t>& r, const PrimInfo& base) -> PrimInfo {
@@ -32,7 +32,7 @@ namespace embree
return pinfo;
}
- PrimInfo createPrimRefArray(Scene* scene, Geometry::GTypeMask types, bool mblur, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor)
+ PrimInfo createPrimRefArray(Scene* scene, Geometry::GTypeMask types, bool mblur, const size_t numPrimRefs, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor)
{
ParallelForForPrefixSumState<PrimInfo> pstate;
Scene::Iterator2 iter(scene,types,mblur);
@@ -45,7 +45,7 @@ namespace embree
}, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
/* if we need to filter out geometry, run again */
- if (pinfo.size() != prims.size())
+ if (pinfo.size() != numPrimRefs)
{
progressMonitor(0);
pinfo = parallel_for_for_prefix_sum1( pstate, iter, PrimInfo(empty), [&](Geometry* mesh, const range<size_t>& r, size_t k, size_t geomID, const PrimInfo& base) -> PrimInfo {
@@ -55,7 +55,7 @@ namespace embree
return pinfo;
}
- PrimInfo createPrimRefArrayMBlur(Scene* scene, Geometry::GTypeMask types, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor, size_t itime)
+ PrimInfo createPrimRefArrayMBlur(Scene* scene, Geometry::GTypeMask types, const size_t numPrimRefs, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor, size_t itime)
{
ParallelForForPrefixSumState<PrimInfo> pstate;
Scene::Iterator2 iter(scene,types,true);
@@ -68,7 +68,7 @@ namespace embree
}, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
/* if we need to filter out geometry, run again */
- if (pinfo.size() != prims.size())
+ if (pinfo.size() != numPrimRefs)
{
progressMonitor(0);
pinfo = parallel_for_for_prefix_sum1( pstate, iter, PrimInfo(empty), [&](Geometry* mesh, const range<size_t>& r, size_t k, size_t geomID, const PrimInfo& base) -> PrimInfo {
@@ -78,7 +78,7 @@ namespace embree
return pinfo;
}
- PrimInfoMB createPrimRefArrayMSMBlur(Scene* scene, Geometry::GTypeMask types, mvector<PrimRefMB>& prims, BuildProgressMonitor& progressMonitor, BBox1f t0t1)
+ PrimInfoMB createPrimRefArrayMSMBlur(Scene* scene, Geometry::GTypeMask types, const size_t numPrimRefs, mvector<PrimRefMB>& prims, BuildProgressMonitor& progressMonitor, BBox1f t0t1)
{
ParallelForForPrefixSumState<PrimInfoMB> pstate;
Scene::Iterator2 iter(scene,types,true);
@@ -91,7 +91,7 @@ namespace embree
}, [](const PrimInfoMB& a, const PrimInfoMB& b) -> PrimInfoMB { return PrimInfoMB::merge2(a,b); });
/* if we need to filter out geometry, run again */
- if (pinfo.size() != prims.size())
+ if (pinfo.size() != numPrimRefs)
{
progressMonitor(0);
pinfo = parallel_for_for_prefix_sum1( pstate, iter, PrimInfoMB(empty), [&](Geometry* mesh, const range<size_t>& r, size_t k, size_t geomID, const PrimInfoMB& base) -> PrimInfoMB {
@@ -182,56 +182,124 @@ namespace embree
// ====================================================================================================
// ====================================================================================================
- // template for grid meshes
+ // special variants for grid meshes
-#if 0
- template<>
- PrimInfo createPrimRefArray<GridMesh,false>(Scene* scene, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor)
+// -- GODOT start --
+#if defined(EMBREE_GEOMETRY_GRID)
+// -- GODOT end --
+ PrimInfo createPrimRefArrayGrids(Scene* scene, mvector<PrimRef>& prims, mvector<SubGridBuildData>& sgrids)
{
- PING;
+ PrimInfo pinfo(empty);
+ size_t numPrimitives = 0;
+
+ /* first run to get #primitives */
+
ParallelForForPrefixSumState<PrimInfo> pstate;
Scene::Iterator<GridMesh,false> iter(scene);
-
- /* first try */
- progressMonitor(0);
+
pstate.init(iter,size_t(1024));
- PrimInfo pinfo = parallel_for_for_prefix_sum0( pstate, iter, PrimInfo(empty), [&](GridMesh* mesh, const range<size_t>& r, size_t k) -> PrimInfo
- {
- PrimInfo pinfo(empty);
- for (size_t j=r.begin(); j<r.end(); j++)
- {
- BBox3fa bounds = empty;
- if (!mesh->buildBounds(j,&bounds)) continue;
- const PrimRef prim(bounds,mesh->geomID,unsigned(j));
- pinfo.add_center2(prim);
- prims[k++] = prim;
- }
- return pinfo;
- }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
-
- /* if we need to filter out geometry, run again */
- if (pinfo.size() != prims.size())
- {
- progressMonitor(0);
- pinfo = parallel_for_for_prefix_sum1( pstate, iter, PrimInfo(empty), [&](GridMesh* mesh, const range<size_t>& r, size_t k, const PrimInfo& base) -> PrimInfo
- {
- k = base.size();
+
+ /* iterate over all meshes in the scene */
+ pinfo = parallel_for_for_prefix_sum0( pstate, iter, PrimInfo(empty), [&](GridMesh* mesh, const range<size_t>& r, size_t k, size_t geomID) -> PrimInfo {
PrimInfo pinfo(empty);
for (size_t j=r.begin(); j<r.end(); j++)
{
+ if (!mesh->valid(j)) continue;
BBox3fa bounds = empty;
- if (!mesh->buildBounds(j,&bounds)) continue;
- const PrimRef prim(bounds,mesh->geomID,unsigned(j));
- pinfo.add_center2(prim);
- prims[k++] = prim;
+ const PrimRef prim(bounds,(unsigned)geomID,(unsigned)j);
+ if (!mesh->valid(j)) continue;
+ pinfo.add_center2(prim,mesh->getNumSubGrids(j));
}
return pinfo;
}, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
- }
+ numPrimitives = pinfo.size();
+
+ /* resize arrays */
+ sgrids.resize(numPrimitives);
+ prims.resize(numPrimitives);
+
+ /* second run to fill primrefs and SubGridBuildData arrays */
+ pinfo = parallel_for_for_prefix_sum1( pstate, iter, PrimInfo(empty), [&](GridMesh* mesh, const range<size_t>& r, size_t k, size_t geomID, const PrimInfo& base) -> PrimInfo {
+ k = base.size();
+ size_t p_index = k;
+ PrimInfo pinfo(empty);
+ for (size_t j=r.begin(); j<r.end(); j++)
+ {
+ if (!mesh->valid(j)) continue;
+ const GridMesh::Grid &g = mesh->grid(j);
+ for (unsigned int y=0; y<g.resY-1u; y+=2)
+ for (unsigned int x=0; x<g.resX-1u; x+=2)
+ {
+ BBox3fa bounds = empty;
+ if (!mesh->buildBounds(g,x,y,bounds)) continue; // get bounds of subgrid
+ const PrimRef prim(bounds,(unsigned)geomID,(unsigned)p_index);
+ pinfo.add_center2(prim);
+ sgrids[p_index] = SubGridBuildData(x | g.get3x3FlagsX(x), y | g.get3x3FlagsY(y), unsigned(j));
+ prims[p_index++] = prim;
+ }
+ }
+ return pinfo;
+ }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
+ assert(pinfo.size() == numPrimitives);
return pinfo;
}
-#endif
+ PrimInfo createPrimRefArrayGrids(GridMesh* mesh, mvector<PrimRef>& prims, mvector<SubGridBuildData>& sgrids)
+ {
+ unsigned int geomID_ = std::numeric_limits<unsigned int>::max ();
+
+ PrimInfo pinfo(empty);
+ size_t numPrimitives = 0;
+
+ ParallelPrefixSumState<PrimInfo> pstate;
+ /* iterate over all grids in a single mesh */
+ pinfo = parallel_prefix_sum( pstate, size_t(0), mesh->size(), size_t(1024), PrimInfo(empty), [&](const range<size_t>& r, const PrimInfo& base) -> PrimInfo
+ {
+ PrimInfo pinfo(empty);
+ for (size_t j=r.begin(); j<r.end(); j++)
+ {
+ if (!mesh->valid(j)) continue;
+ BBox3fa bounds = empty;
+ const PrimRef prim(bounds,geomID_,unsigned(j));
+ pinfo.add_center2(prim,mesh->getNumSubGrids(j));
+ }
+ return pinfo;
+ }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
+ numPrimitives = pinfo.size();
+ /* resize arrays */
+ sgrids.resize(numPrimitives);
+ prims.resize(numPrimitives);
+
+ /* second run to fill primrefs and SubGridBuildData arrays */
+ pinfo = parallel_prefix_sum( pstate, size_t(0), mesh->size(), size_t(1024), PrimInfo(empty), [&](const range<size_t>& r, const PrimInfo& base) -> PrimInfo
+ {
+
+ size_t p_index = base.size();
+ PrimInfo pinfo(empty);
+ for (size_t j=r.begin(); j<r.end(); j++)
+ {
+ if (!mesh->valid(j)) continue;
+ const GridMesh::Grid &g = mesh->grid(j);
+ for (unsigned int y=0; y<g.resY-1u; y+=2)
+ for (unsigned int x=0; x<g.resX-1u; x+=2)
+ {
+ BBox3fa bounds = empty;
+ if (!mesh->buildBounds(g,x,y,bounds)) continue; // get bounds of subgrid
+ const PrimRef prim(bounds,geomID_,unsigned(p_index));
+ pinfo.add_center2(prim);
+ sgrids[p_index] = SubGridBuildData(x | g.get3x3FlagsX(x), y | g.get3x3FlagsY(y), unsigned(j));
+ prims[p_index++] = prim;
+ }
+ }
+ return pinfo;
+ }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
+
+ return pinfo;
+ }
+// -- GODOT start --
+#endif
+// -- GODOT end --
+
// ====================================================================================================
// ====================================================================================================
// ====================================================================================================
diff --git a/thirdparty/embree/kernels/builders/primrefgen.h b/thirdparty/embree/kernels/builders/primrefgen.h
new file mode 100644
index 0000000000..c09a848ba3
--- /dev/null
+++ b/thirdparty/embree/kernels/builders/primrefgen.h
@@ -0,0 +1,34 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#pragma once
+
+#include "../common/scene.h"
+#include "../common/primref.h"
+#include "../common/primref_mb.h"
+#include "priminfo.h"
+#include "bvh_builder_morton.h"
+
+namespace embree
+{
+ namespace isa
+ {
+ PrimInfo createPrimRefArray(Geometry* geometry, unsigned int geomID, size_t numPrimitives, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor);
+
+ PrimInfo createPrimRefArray(Scene* scene, Geometry::GTypeMask types, bool mblur, size_t numPrimitives, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor);
+
+ PrimInfo createPrimRefArrayMBlur(Scene* scene, Geometry::GTypeMask types, size_t numPrimitives, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor, size_t itime = 0);
+
+ PrimInfoMB createPrimRefArrayMSMBlur(Scene* scene, Geometry::GTypeMask types, size_t numPrimitives, mvector<PrimRefMB>& prims, BuildProgressMonitor& progressMonitor, BBox1f t0t1 = BBox1f(0.0f,1.0f));
+
+ template<typename Mesh>
+ size_t createMortonCodeArray(Mesh* mesh, mvector<BVHBuilderMorton::BuildPrim>& morton, BuildProgressMonitor& progressMonitor);
+
+ /* special variants for grids */
+ PrimInfo createPrimRefArrayGrids(Scene* scene, mvector<PrimRef>& prims, mvector<SubGridBuildData>& sgrids);
+
+ PrimInfo createPrimRefArrayGrids(GridMesh* mesh, mvector<PrimRef>& prims, mvector<SubGridBuildData>& sgrids);
+
+ }
+}
+
diff --git a/thirdparty/embree-aarch64/kernels/builders/primrefgen_presplit.h b/thirdparty/embree/kernels/builders/primrefgen_presplit.h
index 8bdb38b955..8cd251ddd2 100644
--- a/thirdparty/embree-aarch64/kernels/builders/primrefgen_presplit.h
+++ b/thirdparty/embree/kernels/builders/primrefgen_presplit.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/builders/splitter.h b/thirdparty/embree/kernels/builders/splitter.h
index dbd6cf07c7..f7720bd284 100644
--- a/thirdparty/embree-aarch64/kernels/builders/splitter.h
+++ b/thirdparty/embree/kernels/builders/splitter.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -164,6 +164,28 @@ namespace embree
private:
const Scene* scene;
};
+
+
+ struct DummySplitter
+ {
+ __forceinline DummySplitter(const Scene* scene, const PrimRef& prim)
+ {
+ }
+ };
+
+ struct DummySplitterFactory
+ {
+ __forceinline DummySplitterFactory(const Scene* scene)
+ : scene(scene) {}
+
+ __forceinline DummySplitter operator() (const PrimRef& prim) const {
+ return DummySplitter(scene,prim);
+ }
+
+ private:
+ const Scene* scene;
+ };
+
}
}
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh.cpp b/thirdparty/embree/kernels/bvh/bvh.cpp
index bd102bd6ef..a84295f0da 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh.h"
@@ -51,7 +51,7 @@ namespace embree
template<int N>
void BVHN<N>::layoutLargeNodes(size_t num)
{
-#if defined(__X86_64__) || defined(__aarch64__) // do not use tree rotations on 32 bit platforms, barrier bit in NodeRef will cause issues
+#if defined(__64BIT__) // do not use tree rotations on 32 bit platforms, barrier bit in NodeRef will cause issues
struct NodeArea
{
__forceinline NodeArea() {}
@@ -183,7 +183,7 @@ namespace embree
template class BVHN<8>;
#endif
-#if !defined(__AVX__) || !defined(EMBREE_TARGET_SSE2) && !defined(EMBREE_TARGET_SSE42) || defined(__aarch64__)
+#if !defined(__AVX__) || !defined(EMBREE_TARGET_SSE2) && !defined(EMBREE_TARGET_SSE42)
template class BVHN<4>;
#endif
}
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh.h b/thirdparty/embree/kernels/bvh/bvh.h
index 8fdf912e52..565eec5a58 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh.h
+++ b/thirdparty/embree/kernels/bvh/bvh.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -81,7 +81,7 @@ namespace embree
struct CreateAlloc : public FastAllocator::Create {
__forceinline CreateAlloc (BVHN* bvh) : FastAllocator::Create(&bvh->alloc) {}
};
-
+
typedef BVHNodeRecord<NodeRef> NodeRecord;
typedef BVHNodeRecordMB<NodeRef> NodeRecordMB;
typedef BVHNodeRecordMB4D<NodeRef> NodeRecordMB4D;
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh4_factory.cpp b/thirdparty/embree/kernels/bvh/bvh4_factory.cpp
index 23f4f63d45..890d5e7b7c 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh4_factory.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh4_factory.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh4_factory.h"
@@ -260,12 +260,12 @@ namespace embree
void BVH4Factory::selectBuilders(int features)
{
- IF_ENABLED_TRIS (SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4BuilderTwoLevelTriangle4MeshSAH));
- IF_ENABLED_TRIS (SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4BuilderTwoLevelTriangle4iMeshSAH));
- IF_ENABLED_TRIS (SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4BuilderTwoLevelTriangle4vMeshSAH));
- IF_ENABLED_QUADS (SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4BuilderTwoLevelQuadMeshSAH));
- IF_ENABLED_USER (SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4BuilderTwoLevelVirtualSAH));
- IF_ENABLED_INSTANCE (SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4BuilderTwoLevelInstanceSAH));
+ IF_ENABLED_TRIS (SELECT_SYMBOL_DEFAULT_AVX(features,BVH4BuilderTwoLevelTriangle4MeshSAH));
+ IF_ENABLED_TRIS (SELECT_SYMBOL_DEFAULT_AVX(features,BVH4BuilderTwoLevelTriangle4iMeshSAH));
+ IF_ENABLED_TRIS (SELECT_SYMBOL_DEFAULT_AVX(features,BVH4BuilderTwoLevelTriangle4vMeshSAH));
+ IF_ENABLED_QUADS (SELECT_SYMBOL_DEFAULT_AVX(features,BVH4BuilderTwoLevelQuadMeshSAH));
+ IF_ENABLED_USER (SELECT_SYMBOL_DEFAULT_AVX(features,BVH4BuilderTwoLevelVirtualSAH));
+ IF_ENABLED_INSTANCE (SELECT_SYMBOL_DEFAULT_AVX(features,BVH4BuilderTwoLevelInstanceSAH));
IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Curve4vBuilder_OBB_New));
IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Curve4iBuilder_OBB_New));
@@ -273,15 +273,15 @@ namespace embree
IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX(features,BVH4Curve8iBuilder_OBB_New));
IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX(features,BVH4OBBCurve8iMBBuilder_OBB));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4Triangle4SceneBuilderSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4Triangle4vSceneBuilderSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4Triangle4iSceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Triangle4SceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Triangle4vSceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Triangle4iSceneBuilderSAH));
IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Triangle4iMBSceneBuilderSAH));
IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Triangle4vMBSceneBuilderSAH));
IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4QuantizedTriangle4iSceneBuilderSAH));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4Quad4vSceneBuilderSAH));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4Quad4iSceneBuilderSAH));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Quad4vSceneBuilderSAH));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Quad4iSceneBuilderSAH));
IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Quad4iMBSceneBuilderSAH));
IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4QuantizedQuad4iSceneBuilderSAH));
@@ -291,207 +291,207 @@ namespace embree
IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4Quad4vSceneBuilderFastSpatialSAH));
- IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4VirtualSceneBuilderSAH));
+ IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4VirtualSceneBuilderSAH));
IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4VirtualMBSceneBuilderSAH));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4InstanceSceneBuilderSAH));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4InstanceSceneBuilderSAH));
IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4InstanceMBSceneBuilderSAH));
IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4GridSceneBuilderSAH));
IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4GridMBSceneBuilderSAH));
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4SubdivPatch1BuilderSAH));
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,BVH4SubdivPatch1MBBuilderSAH));
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4SubdivPatch1BuilderSAH));
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_AVX(features,BVH4SubdivPatch1MBBuilderSAH));
}
void BVH4Factory::selectIntersectors(int features)
{
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector4i));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector8i));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector4v));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector8v));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector4iMB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector8iMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,VirtualCurveIntersector4i));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,VirtualCurveIntersector8i));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,VirtualCurveIntersector4v));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,VirtualCurveIntersector8v));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,VirtualCurveIntersector4iMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,VirtualCurveIntersector8iMB));
/* select intersectors1 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersector1));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersector1MB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust1));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust1MB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersector1));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersector1MB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust1));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust1MB));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Triangle4Intersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512SKX(features,BVH4Triangle4iIntersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512SKX(features,BVH4Triangle4vIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512SKX(features,BVH4Triangle4iIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4Triangle4Intersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512(features,BVH4Triangle4iIntersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512(features,BVH4Triangle4vIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512(features,BVH4Triangle4iIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vMBIntersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iMBIntersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vMBIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iMBIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4vMBIntersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iMBIntersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4vMBIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iMBIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector1Moeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iIntersector1Moeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iIntersector1Pluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector1Moeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iIntersector1Moeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector1Pluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iMBIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iMBIntersector1Moeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iMBIntersector1Pluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iMBIntersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512SKX(features,QBVH4Triangle4iIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512SKX(features,QBVH4Quad4iIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512(features,QBVH4Triangle4iIntersector1Pluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512(features,QBVH4Quad4iIntersector1Pluecker));
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4SubdivPatch1Intersector1));
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4SubdivPatch1MBIntersector1));
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4SubdivPatch1Intersector1));
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4SubdivPatch1MBIntersector1));
- IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4VirtualIntersector1));
- IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4VirtualMBIntersector1));
+ IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4VirtualIntersector1));
+ IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4VirtualMBIntersector1));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4InstanceIntersector1));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4InstanceMBIntersector1));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4InstanceIntersector1));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4InstanceMBIntersector1));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4GridIntersector1Moeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4GridMBIntersector1Moeller))
- IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4GridIntersector1Pluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4GridIntersector1Moeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4GridMBIntersector1Moeller))
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4GridIntersector1Pluecker));
#if defined (EMBREE_RAY_PACKETS)
/* select intersectors4 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersector4Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersector4HybridMB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust4Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust4HybridMB));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4Intersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4Intersector4HybridMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iIntersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vIntersector4HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iIntersector4HybridPluecker));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vMBIntersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iMBIntersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vMBIntersector4HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iMBIntersector4HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector4HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector4HybridMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iIntersector4HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector4HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iIntersector4HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iMBIntersector4HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4iMBIntersector4HybridPluecker));
-
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4SubdivPatch1Intersector4));
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4SubdivPatch1MBIntersector4));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersector4Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersector4HybridMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust4Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust4HybridMB));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4Intersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4Intersector4HybridMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iIntersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4vIntersector4HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iIntersector4HybridPluecker));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4vMBIntersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iMBIntersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4vMBIntersector4HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iMBIntersector4HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector4HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector4HybridMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iIntersector4HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector4HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iIntersector4HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iMBIntersector4HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iMBIntersector4HybridPluecker));
+
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4SubdivPatch1Intersector4));
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4SubdivPatch1MBIntersector4));
- IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4VirtualIntersector4Chunk));
- IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4VirtualMBIntersector4Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4VirtualIntersector4Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4VirtualMBIntersector4Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4InstanceIntersector4Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4InstanceMBIntersector4Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4InstanceIntersector4Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4InstanceMBIntersector4Chunk));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector4HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector4HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4GridIntersector4HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4GridMBIntersector4HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,BVH4GridIntersector4HybridPluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4GridIntersector4HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4GridMBIntersector4HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4GridIntersector4HybridPluecker));
/* select intersectors8 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersector8Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersector8HybridMB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust8Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust8HybridMB));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4Intersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4Intersector8HybridMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iIntersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vIntersector8HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iIntersector8HybridPluecker));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vMBIntersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iMBIntersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4vMBIntersector8HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Triangle4iMBIntersector8HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector8HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector8HybridMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Quad4iIntersector8HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Quad4vIntersector8HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Quad4iIntersector8HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Quad4iMBIntersector8HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4Quad4iMBIntersector8HybridPluecker));
-
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4SubdivPatch1Intersector8));
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4SubdivPatch1MBIntersector8));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersector8Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersector8HybridMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust8Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust8HybridMB));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4Intersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4Intersector8HybridMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4iIntersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4vIntersector8HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4iIntersector8HybridPluecker));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4vMBIntersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4iMBIntersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4vMBIntersector8HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Triangle4iMBIntersector8HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector8HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector8HybridMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Quad4iIntersector8HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Quad4vIntersector8HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Quad4iIntersector8HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Quad4iMBIntersector8HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4Quad4iMBIntersector8HybridPluecker));
+
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4SubdivPatch1Intersector8));
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4SubdivPatch1MBIntersector8));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4VirtualIntersector8Chunk));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4VirtualMBIntersector8Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4VirtualIntersector8Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4VirtualMBIntersector8Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4InstanceIntersector8Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4InstanceMBIntersector8Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4InstanceIntersector8Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4InstanceMBIntersector8Chunk));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4GridIntersector8HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4GridMBIntersector8HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH4GridIntersector8HybridPluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4GridIntersector8HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4GridMBIntersector8HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH4GridIntersector8HybridPluecker));
/* select intersectors16 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4OBBVirtualCurveIntersector16Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4OBBVirtualCurveIntersector16HybridMB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust16Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4OBBVirtualCurveIntersectorRobust16HybridMB));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4Intersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4Intersector16HybridMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4iIntersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4vIntersector16HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4iIntersector16HybridPluecker));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4vMBIntersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4iMBIntersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4vMBIntersector16HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Triangle4iMBIntersector16HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Quad4vIntersector16HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Quad4vIntersector16HybridMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Quad4iIntersector16HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Quad4vIntersector16HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Quad4iIntersector16HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Quad4iMBIntersector16HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4Quad4iMBIntersector16HybridPluecker));
-
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4SubdivPatch1Intersector16));
- IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4SubdivPatch1MBIntersector16));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH4OBBVirtualCurveIntersector16Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH4OBBVirtualCurveIntersector16HybridMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust16Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH4OBBVirtualCurveIntersectorRobust16HybridMB));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4Intersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4Intersector16HybridMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4iIntersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4vIntersector16HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4iIntersector16HybridPluecker));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4vMBIntersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4iMBIntersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4vMBIntersector16HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Triangle4iMBIntersector16HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Quad4vIntersector16HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Quad4vIntersector16HybridMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Quad4iIntersector16HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Quad4vIntersector16HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Quad4iIntersector16HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Quad4iMBIntersector16HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH4Quad4iMBIntersector16HybridPluecker));
+
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX512(features,BVH4SubdivPatch1Intersector16));
+ IF_ENABLED_SUBDIV(SELECT_SYMBOL_INIT_AVX512(features,BVH4SubdivPatch1MBIntersector16));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4VirtualIntersector16Chunk));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4VirtualMBIntersector16Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512(features,BVH4VirtualIntersector16Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512(features,BVH4VirtualMBIntersector16Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4InstanceIntersector16Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4InstanceMBIntersector16Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512(features,BVH4InstanceIntersector16Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512(features,BVH4InstanceMBIntersector16Chunk));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4GridIntersector16HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4GridMBIntersector16HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH4GridIntersector16HybridPluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512(features,BVH4GridIntersector16HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512(features,BVH4GridMBIntersector16HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512(features,BVH4GridIntersector16HybridPluecker));
/* select stream intersectors */
- SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4IntersectorStreamPacketFallback);
+ SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4IntersectorStreamPacketFallback);
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Triangle4IntersectorStreamMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Triangle4IntersectorStreamMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Triangle4iIntersectorStreamMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Triangle4vIntersectorStreamPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Triangle4iIntersectorStreamPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4IntersectorStreamMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4IntersectorStreamMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iIntersectorStreamMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4vIntersectorStreamPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Triangle4iIntersectorStreamPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Quad4vIntersectorStreamMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Quad4vIntersectorStreamMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Quad4iIntersectorStreamMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Quad4vIntersectorStreamPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4Quad4iIntersectorStreamPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersectorStreamMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersectorStreamMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iIntersectorStreamMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4vIntersectorStreamPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4Quad4iIntersectorStreamPluecker));
- IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4VirtualIntersectorStream));
+ IF_ENABLED_USER(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4VirtualIntersectorStream));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH4InstanceIntersectorStream));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,BVH4InstanceIntersectorStream));
#endif
}
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh4_factory.h b/thirdparty/embree/kernels/bvh/bvh4_factory.h
index a68227b41f..30973971a4 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh4_factory.h
+++ b/thirdparty/embree/kernels/bvh/bvh4_factory.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh8_factory.cpp b/thirdparty/embree/kernels/bvh/bvh8_factory.cpp
index 9fe057c392..d4521af241 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh8_factory.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh8_factory.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "../common/isa.h" // to define EMBREE_TARGET_SIMD8
@@ -238,17 +238,17 @@ namespace embree
IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX(features,BVH8Curve8vBuilder_OBB_New));
IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX(features,BVH8OBBCurve8iMBBuilder_OBB));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Triangle4SceneBuilderSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Triangle4vSceneBuilderSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Triangle4iSceneBuilderSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Triangle4iMBSceneBuilderSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Triangle4vMBSceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8Triangle4SceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8Triangle4vSceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8Triangle4iSceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8Triangle4iMBSceneBuilderSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8Triangle4vMBSceneBuilderSAH));
IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8QuantizedTriangle4iSceneBuilderSAH));
IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8QuantizedTriangle4SceneBuilderSAH));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Quad4vSceneBuilderSAH));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Quad4iSceneBuilderSAH));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Quad4iMBSceneBuilderSAH));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX(features,BVH8Quad4vSceneBuilderSAH));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX(features,BVH8Quad4iSceneBuilderSAH));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX(features,BVH8Quad4iMBSceneBuilderSAH));
IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX(features,BVH8QuantizedQuad4iSceneBuilderSAH));
IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX(features,BVH8VirtualSceneBuilderSAH));
@@ -260,189 +260,189 @@ namespace embree
IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX(features,BVH8GridSceneBuilderSAH));
IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX(features,BVH8GridMBSceneBuilderSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Triangle4SceneBuilderFastSpatialSAH));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Triangle4vSceneBuilderFastSpatialSAH));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8Quad4vSceneBuilderFastSpatialSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8Triangle4SceneBuilderFastSpatialSAH));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX(features,BVH8Triangle4vSceneBuilderFastSpatialSAH));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX(features,BVH8Quad4vSceneBuilderFastSpatialSAH));
- IF_ENABLED_TRIS (SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8BuilderTwoLevelTriangle4MeshSAH));
- IF_ENABLED_TRIS (SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8BuilderTwoLevelTriangle4vMeshSAH));
- IF_ENABLED_TRIS (SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8BuilderTwoLevelTriangle4iMeshSAH));
- IF_ENABLED_QUADS (SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8BuilderTwoLevelQuadMeshSAH));
- IF_ENABLED_USER (SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8BuilderTwoLevelVirtualSAH));
- IF_ENABLED_INSTANCE (SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,BVH8BuilderTwoLevelInstanceSAH));
+ IF_ENABLED_TRIS (SELECT_SYMBOL_INIT_AVX(features,BVH8BuilderTwoLevelTriangle4MeshSAH));
+ IF_ENABLED_TRIS (SELECT_SYMBOL_INIT_AVX(features,BVH8BuilderTwoLevelTriangle4vMeshSAH));
+ IF_ENABLED_TRIS (SELECT_SYMBOL_INIT_AVX(features,BVH8BuilderTwoLevelTriangle4iMeshSAH));
+ IF_ENABLED_QUADS (SELECT_SYMBOL_INIT_AVX(features,BVH8BuilderTwoLevelQuadMeshSAH));
+ IF_ENABLED_USER (SELECT_SYMBOL_INIT_AVX(features,BVH8BuilderTwoLevelVirtualSAH));
+ IF_ENABLED_INSTANCE (SELECT_SYMBOL_INIT_AVX(features,BVH8BuilderTwoLevelInstanceSAH));
}
void BVH8Factory::selectIntersectors(int features)
{
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector8v));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,VirtualCurveIntersector8iMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,VirtualCurveIntersector8v));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,VirtualCurveIntersector8iMB));
/* select intersectors1 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersector1));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersector1MB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust1));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust1MB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersector1));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersector1MB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust1));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust1MB));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4Intersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4iIntersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4vIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4iIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4Intersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4vIntersector1Woop));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vIntersector1Woop));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4vMBIntersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4iMBIntersector1Moeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4vMBIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4iMBIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vMBIntersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iMBIntersector1Moeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vMBIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iMBIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersector1Moeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4iIntersector1Moeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4iIntersector1Pluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector1Moeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersector1Moeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector1Pluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersector1Pluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4iMBIntersector1Moeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4iMBIntersector1Pluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iMBIntersector1Moeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iMBIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,QBVH8Triangle4iIntersector1Pluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,QBVH8Triangle4Intersector1Moeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,QBVH8Quad4iIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,QBVH8Triangle4iIntersector1Pluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,QBVH8Triangle4Intersector1Moeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,QBVH8Quad4iIntersector1Pluecker));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8VirtualIntersector1));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8VirtualMBIntersector1));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8VirtualIntersector1));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8VirtualMBIntersector1));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8InstanceIntersector1));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8InstanceMBIntersector1));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8InstanceIntersector1));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8InstanceMBIntersector1));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8GridIntersector1Moeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8GridMBIntersector1Moeller))
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8GridIntersector1Pluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8GridIntersector1Moeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8GridMBIntersector1Moeller))
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8GridIntersector1Pluecker));
#if defined (EMBREE_RAY_PACKETS)
/* select intersectors4 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersector4Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersector4HybridMB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust4Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust4HybridMB));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4Intersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4Intersector4HybridMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iIntersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4vIntersector4HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iIntersector4HybridPluecker));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4vMBIntersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iMBIntersector4HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4vMBIntersector4HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iMBIntersector4HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4vIntersector4HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4vIntersector4HybridMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4iIntersector4HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4vIntersector4HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4iIntersector4HybridPluecker));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersector4Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersector4HybridMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust4Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust4HybridMB));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4Intersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4Intersector4HybridMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vIntersector4HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersector4HybridPluecker));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vMBIntersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iMBIntersector4HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vMBIntersector4HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iMBIntersector4HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector4HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector4HybridMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersector4HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector4HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersector4HybridPluecker));
IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2(features,BVH8Quad4iMBIntersector4HybridMoeller));
IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2(features,BVH8Quad4iMBIntersector4HybridPluecker));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8VirtualIntersector4Chunk));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8VirtualMBIntersector4Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8VirtualIntersector4Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8VirtualMBIntersector4Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8InstanceIntersector4Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8InstanceMBIntersector4Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8InstanceIntersector4Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8InstanceMBIntersector4Chunk));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8GridIntersector4HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8GridIntersector4HybridPluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8GridIntersector4HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8GridIntersector4HybridPluecker));
/* select intersectors8 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersector8Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersector8HybridMB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust8Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust8HybridMB));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4Intersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4Intersector8HybridMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iIntersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4vIntersector8HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iIntersector8HybridPluecker));
-
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4vMBIntersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iMBIntersector8HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4vMBIntersector8HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Triangle4iMBIntersector8HybridPluecker));
-
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4vIntersector8HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4vIntersector8HybridMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4iIntersector8HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4vIntersector8HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8Quad4iIntersector8HybridPluecker));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersector8Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersector8HybridMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust8Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust8HybridMB));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4Intersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4Intersector8HybridMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vIntersector8HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersector8HybridPluecker));
+
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vMBIntersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iMBIntersector8HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vMBIntersector8HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iMBIntersector8HybridPluecker));
+
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector8HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector8HybridMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersector8HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersector8HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersector8HybridPluecker));
IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2(features,BVH8Quad4iMBIntersector8HybridMoeller));
IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2(features,BVH8Quad4iMBIntersector8HybridPluecker));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8VirtualIntersector8Chunk));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8VirtualMBIntersector8Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8VirtualIntersector8Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8VirtualMBIntersector8Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8InstanceIntersector8Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8InstanceMBIntersector8Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8InstanceIntersector8Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8InstanceMBIntersector8Chunk));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8GridIntersector8HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,BVH8GridIntersector8HybridPluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8GridIntersector8HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8GridIntersector8HybridPluecker));
/* select intersectors16 */
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersector16Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersector16HybridMB));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust16Hybrid));
- IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8OBBVirtualCurveIntersectorRobust16HybridMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH8OBBVirtualCurveIntersector16Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH8OBBVirtualCurveIntersector16HybridMB));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust16Hybrid));
+ IF_ENABLED_CURVES_OR_POINTS(SELECT_SYMBOL_INIT_AVX512(features,BVH8OBBVirtualCurveIntersectorRobust16HybridMB));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4Intersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4Intersector16HybridMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4iIntersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4vIntersector16HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4iIntersector16HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4Intersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4Intersector16HybridMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4iIntersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4vIntersector16HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4iIntersector16HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4vMBIntersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4iMBIntersector16HybridMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4vMBIntersector16HybridPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Triangle4iMBIntersector16HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4vMBIntersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4iMBIntersector16HybridMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4vMBIntersector16HybridPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Triangle4iMBIntersector16HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersector16HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersector16HybridMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Quad4iIntersector16HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersector16HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Quad4iIntersector16HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Quad4vIntersector16HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Quad4vIntersector16HybridMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Quad4iIntersector16HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Quad4vIntersector16HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Quad4iIntersector16HybridPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Quad4iMBIntersector16HybridMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8Quad4iMBIntersector16HybridPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Quad4iMBIntersector16HybridMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX512(features,BVH8Quad4iMBIntersector16HybridPluecker));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8VirtualIntersector16Chunk));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8VirtualMBIntersector16Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512(features,BVH8VirtualIntersector16Chunk));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX512(features,BVH8VirtualMBIntersector16Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8InstanceIntersector16Chunk));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8InstanceMBIntersector16Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512(features,BVH8InstanceIntersector16Chunk));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX512(features,BVH8InstanceMBIntersector16Chunk));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8GridIntersector16HybridMoeller));
- IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,BVH8GridIntersector16HybridPluecker));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512(features,BVH8GridIntersector16HybridMoeller));
+ IF_ENABLED_GRIDS(SELECT_SYMBOL_INIT_AVX512(features,BVH8GridIntersector16HybridPluecker));
/* select stream intersectors */
- SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8IntersectorStreamPacketFallback);
+ SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8IntersectorStreamPacketFallback);
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4IntersectorStreamMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4IntersectorStreamMoellerNoFilter));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4iIntersectorStreamMoeller));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4vIntersectorStreamPluecker));
- IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Triangle4iIntersectorStreamPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4IntersectorStreamMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4IntersectorStreamMoellerNoFilter));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersectorStreamMoeller));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4vIntersectorStreamPluecker));
+ IF_ENABLED_TRIS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Triangle4iIntersectorStreamPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersectorStreamMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersectorStreamMoellerNoFilter));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4iIntersectorStreamMoeller));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4vIntersectorStreamPluecker));
- IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8Quad4iIntersectorStreamPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersectorStreamMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersectorStreamMoellerNoFilter));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersectorStreamMoeller));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4vIntersectorStreamPluecker));
+ IF_ENABLED_QUADS(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8Quad4iIntersectorStreamPluecker));
- IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8VirtualIntersectorStream));
+ IF_ENABLED_USER(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8VirtualIntersectorStream));
- IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,BVH8InstanceIntersectorStream));
+ IF_ENABLED_INSTANCE(SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,BVH8InstanceIntersectorStream));
#endif
}
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh8_factory.h b/thirdparty/embree/kernels/bvh/bvh8_factory.h
index b92188e7d3..198d6f1df0 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh8_factory.h
+++ b/thirdparty/embree/kernels/bvh/bvh8_factory.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder.cpp b/thirdparty/embree/kernels/bvh/bvh_builder.cpp
index e832537ec5..161d01bb5c 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_builder.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_builder.h"
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder.h b/thirdparty/embree/kernels/bvh/bvh_builder.h
index 1b86bb45ad..e35d052a62 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder.h
+++ b/thirdparty/embree/kernels/bvh/bvh_builder.h
@@ -1,8 +1,9 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh.h"
#include "../builders/bvh_builder_sah.h"
+#include "../builders/bvh_builder_msmblur.h"
namespace embree
{
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_morton.cpp b/thirdparty/embree/kernels/bvh/bvh_builder_morton.cpp
index 64759c1294..4a4d8d71df 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_morton.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_builder_morton.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh.h"
@@ -18,7 +18,7 @@
#include "../geometry/object.h"
#include "../geometry/instance.h"
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
# define ROTATE_TREE 1 // specifies number of tree rotation rounds to perform
#else
# define ROTATE_TREE 0 // do not use tree rotations on 32 bit platforms, barrier bit in NodeRef will cause issues
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah.cpp b/thirdparty/embree/kernels/bvh/bvh_builder_sah.cpp
index cf5b2eb47f..fad02fcc04 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_builder_sah.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh.h"
@@ -153,8 +153,8 @@ namespace embree
prims.resize(numPrimitives);
PrimInfo pinfo = mesh ?
- createPrimRefArray(mesh,geomID_,prims,bvh->scene->progressInterface) :
- createPrimRefArray(scene,gtype_,false,prims,bvh->scene->progressInterface);
+ createPrimRefArray(mesh,geomID_,numPrimitives,prims,bvh->scene->progressInterface) :
+ createPrimRefArray(scene,gtype_,false,numPrimitives,prims,bvh->scene->progressInterface);
/* pinfo might has zero size due to invalid geometry */
if (unlikely(pinfo.size() == 0))
@@ -242,8 +242,8 @@ namespace embree
/* create primref array */
prims.resize(numPrimitives);
PrimInfo pinfo = mesh ?
- createPrimRefArray(mesh,geomID_,prims,bvh->scene->progressInterface) :
- createPrimRefArray(scene,gtype_,false,prims,bvh->scene->progressInterface);
+ createPrimRefArray(mesh,geomID_,numPrimitives,prims,bvh->scene->progressInterface) :
+ createPrimRefArray(scene,gtype_,false,numPrimitives,prims,bvh->scene->progressInterface);
/* enable os_malloc for two level build */
if (mesh)
@@ -356,7 +356,7 @@ namespace embree
mvector<PrimRef> prims;
mvector<SubGridBuildData> sgrids;
GeneralBVHBuilder::Settings settings;
- unsigned int geomID_ = std::numeric_limits<unsigned int>::max();
+ const unsigned int geomID_ = std::numeric_limits<unsigned int>::max();
unsigned int numPreviousPrimitives = 0;
BVHNBuilderSAHGrid (BVH* bvh, Scene* scene, const size_t sahBlockSize, const float intCost, const size_t minLeafSize, const size_t maxLeafSize, const size_t mode)
@@ -378,109 +378,10 @@ namespace embree
const size_t numGridPrimitives = mesh ? mesh->size() : scene->getNumPrimitives(GridMesh::geom_type,false);
numPreviousPrimitives = numGridPrimitives;
-
- PrimInfo pinfo(empty);
- size_t numPrimitives = 0;
-
- if (!mesh)
- {
- /* first run to get #primitives */
-
- ParallelForForPrefixSumState<PrimInfo> pstate;
- Scene::Iterator<GridMesh,false> iter(scene);
-
- pstate.init(iter,size_t(1024));
-
- /* iterate over all meshes in the scene */
- pinfo = parallel_for_for_prefix_sum0( pstate, iter, PrimInfo(empty), [&](GridMesh* mesh, const range<size_t>& r, size_t k, size_t geomID) -> PrimInfo {
- PrimInfo pinfo(empty);
- for (size_t j=r.begin(); j<r.end(); j++)
- {
- if (!mesh->valid(j)) continue;
- BBox3fa bounds = empty;
- const PrimRef prim(bounds,(unsigned)geomID,(unsigned)j);
- if (!mesh->valid(j)) continue;
- pinfo.add_center2(prim,mesh->getNumSubGrids(j));
- }
- return pinfo;
- }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
- numPrimitives = pinfo.size();
-
- /* resize arrays */
- sgrids.resize(numPrimitives);
- prims.resize(numPrimitives);
-
- /* second run to fill primrefs and SubGridBuildData arrays */
- pinfo = parallel_for_for_prefix_sum1( pstate, iter, PrimInfo(empty), [&](GridMesh* mesh, const range<size_t>& r, size_t k, size_t geomID, const PrimInfo& base) -> PrimInfo {
- k = base.size();
- size_t p_index = k;
- PrimInfo pinfo(empty);
- for (size_t j=r.begin(); j<r.end(); j++)
- {
- if (!mesh->valid(j)) continue;
- const GridMesh::Grid &g = mesh->grid(j);
- for (unsigned int y=0; y<g.resY-1u; y+=2)
- for (unsigned int x=0; x<g.resX-1u; x+=2)
- {
- BBox3fa bounds = empty;
- if (!mesh->buildBounds(g,x,y,bounds)) continue; // get bounds of subgrid
- const PrimRef prim(bounds,(unsigned)geomID,(unsigned)p_index);
- pinfo.add_center2(prim);
- sgrids[p_index] = SubGridBuildData(x | g.get3x3FlagsX(x), y | g.get3x3FlagsY(y), unsigned(j));
- prims[p_index++] = prim;
- }
- }
- return pinfo;
- }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
- assert(pinfo.size() == numPrimitives);
- }
- else
- {
- ParallelPrefixSumState<PrimInfo> pstate;
- /* iterate over all grids in a single mesh */
- pinfo = parallel_prefix_sum( pstate, size_t(0), mesh->size(), size_t(1024), PrimInfo(empty), [&](const range<size_t>& r, const PrimInfo& base) -> PrimInfo
- {
- PrimInfo pinfo(empty);
- for (size_t j=r.begin(); j<r.end(); j++)
- {
- if (!mesh->valid(j)) continue;
- BBox3fa bounds = empty;
- const PrimRef prim(bounds,geomID_,unsigned(j));
- pinfo.add_center2(prim,mesh->getNumSubGrids(j));
- }
- return pinfo;
- }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
- numPrimitives = pinfo.size();
- /* resize arrays */
- sgrids.resize(numPrimitives);
- prims.resize(numPrimitives);
-
- /* second run to fill primrefs and SubGridBuildData arrays */
- pinfo = parallel_prefix_sum( pstate, size_t(0), mesh->size(), size_t(1024), PrimInfo(empty), [&](const range<size_t>& r, const PrimInfo& base) -> PrimInfo
- {
-
- size_t p_index = base.size();
- PrimInfo pinfo(empty);
- for (size_t j=r.begin(); j<r.end(); j++)
- {
- if (!mesh->valid(j)) continue;
- const GridMesh::Grid &g = mesh->grid(j);
- for (unsigned int y=0; y<g.resY-1u; y+=2)
- for (unsigned int x=0; x<g.resX-1u; x+=2)
- {
- BBox3fa bounds = empty;
- if (!mesh->buildBounds(g,x,y,bounds)) continue; // get bounds of subgrid
- const PrimRef prim(bounds,geomID_,unsigned(p_index));
- pinfo.add_center2(prim);
- sgrids[p_index] = SubGridBuildData(x | g.get3x3FlagsX(x), y | g.get3x3FlagsY(y), unsigned(j));
- prims[p_index++] = prim;
- }
- }
- return pinfo;
- }, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
- }
+ PrimInfo pinfo = mesh ? createPrimRefArrayGrids(mesh,prims,sgrids) : createPrimRefArrayGrids(scene,prims,sgrids);
+ const size_t numPrimitives = pinfo.size();
/* no primitives */
if (numPrimitives == 0) {
bvh->clear();
@@ -546,6 +447,7 @@ namespace embree
/************************************************************************************/
/************************************************************************************/
+
#if defined(EMBREE_GEOMETRY_TRIANGLE)
Builder* BVH4Triangle4MeshBuilderSAH (void* bvh, TriangleMesh* mesh, unsigned int geomID, size_t mode) { return new BVHNBuilderSAH<4,Triangle4>((BVH4*)bvh,mesh,geomID,4,1.0f,4,inf,TriangleMesh::geom_type); }
Builder* BVH4Triangle4vMeshBuilderSAH (void* bvh, TriangleMesh* mesh, unsigned int geomID, size_t mode) { return new BVHNBuilderSAH<4,Triangle4v>((BVH4*)bvh,mesh,geomID,4,1.0f,4,inf,TriangleMesh::geom_type); }
@@ -555,7 +457,6 @@ namespace embree
Builder* BVH4Triangle4vSceneBuilderSAH (void* bvh, Scene* scene, size_t mode) { return new BVHNBuilderSAH<4,Triangle4v>((BVH4*)bvh,scene,4,1.0f,4,inf,TriangleMesh::geom_type); }
Builder* BVH4Triangle4iSceneBuilderSAH (void* bvh, Scene* scene, size_t mode) { return new BVHNBuilderSAH<4,Triangle4i>((BVH4*)bvh,scene,4,1.0f,4,inf,TriangleMesh::geom_type,true); }
-
Builder* BVH4QuantizedTriangle4iSceneBuilderSAH (void* bvh, Scene* scene, size_t mode) { return new BVHNBuilderSAHQuantized<4,Triangle4i>((BVH4*)bvh,scene,4,1.0f,4,inf,TriangleMesh::geom_type); }
#if defined(__AVX__)
Builder* BVH8Triangle4MeshBuilderSAH (void* bvh, TriangleMesh* mesh, unsigned int geomID, size_t mode) { return new BVHNBuilderSAH<8,Triangle4>((BVH8*)bvh,mesh,geomID,4,1.0f,4,inf,TriangleMesh::geom_type); }
@@ -568,6 +469,8 @@ namespace embree
Builder* BVH8QuantizedTriangle4iSceneBuilderSAH (void* bvh, Scene* scene, size_t mode) { return new BVHNBuilderSAHQuantized<8,Triangle4i>((BVH8*)bvh,scene,4,1.0f,4,inf,TriangleMesh::geom_type); }
Builder* BVH8QuantizedTriangle4SceneBuilderSAH (void* bvh, Scene* scene, size_t mode) { return new BVHNBuilderSAHQuantized<8,Triangle4>((BVH8*)bvh,scene,4,1.0f,4,inf,TriangleMesh::geom_type); }
+
+
#endif
#endif
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah_mb.cpp b/thirdparty/embree/kernels/bvh/bvh_builder_sah_mb.cpp
index 9c01553ec6..d163a80ab1 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah_mb.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_builder_sah_mb.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh.h"
@@ -142,7 +142,7 @@ namespace embree
{
/* create primref array */
mvector<PrimRef> prims(scene->device,numPrimitives);
- const PrimInfo pinfo = createPrimRefArrayMBlur(scene,gtype_,prims,bvh->scene->progressInterface,0);
+ const PrimInfo pinfo = createPrimRefArrayMBlur(scene,gtype_,numPrimitives,prims,bvh->scene->progressInterface,0);
/* early out if no valid primitives */
if (pinfo.size() == 0) { bvh->clear(); return; }
/* estimate acceleration structure size */
@@ -175,7 +175,7 @@ namespace embree
{
/* create primref array */
mvector<PrimRefMB> prims(scene->device,numPrimitives);
- PrimInfoMB pinfo = createPrimRefArrayMSMBlur(scene,gtype_,prims,bvh->scene->progressInterface);
+ PrimInfoMB pinfo = createPrimRefArrayMSMBlur(scene,gtype_,numPrimitives,prims,bvh->scene->progressInterface);
/* early out if no valid primitives */
if (pinfo.size() == 0) { bvh->clear(); return; }
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah_spatial.cpp b/thirdparty/embree/kernels/bvh/bvh_builder_sah_spatial.cpp
index 285b38c39d..a4e55d7484 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_sah_spatial.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_builder_sah_spatial.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh.h"
@@ -127,8 +127,8 @@ namespace embree
{
/* standard spatial split SAH BVH builder */
pinfo = mesh ?
- createPrimRefArray(mesh,geomID_,/*numSplitPrimitives,*/prims0,bvh->scene->progressInterface) :
- createPrimRefArray(scene,Mesh::geom_type,false,/*numSplitPrimitives,*/prims0,bvh->scene->progressInterface);
+ createPrimRefArray(mesh,geomID_,numSplitPrimitives,prims0,bvh->scene->progressInterface) :
+ createPrimRefArray(scene,Mesh::geom_type,false,numSplitPrimitives,prims0,bvh->scene->progressInterface);
Splitter splitter(scene);
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel.cpp b/thirdparty/embree/kernels/bvh/bvh_builder_twolevel.cpp
index 1a78f347ac..5d45ed3748 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_builder_twolevel.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_builder_twolevel.h"
@@ -129,10 +129,6 @@ namespace embree
prims.resize(refs.size());
#endif
-#if defined(TASKING_TBB) && defined(__AVX512ER__) && USE_TASK_ARENA // KNL
- tbb::task_arena limited(min(32,(int)TaskScheduler::threadCount()));
- limited.execute([&]
-#endif
{
#if ENABLE_DIRECT_SAH_MERGE_BUILDER
@@ -211,10 +207,6 @@ namespace embree
bvh->set(root,LBBox3fa(pinfo.geomBounds),numPrimitives);
}
}
-#if defined(TASKING_TBB) && defined(__AVX512ER__) && USE_TASK_ARENA // KNL
- );
-#endif
-
}
bvh->alloc.cleanup();
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel.h b/thirdparty/embree/kernels/bvh/bvh_builder_twolevel.h
index 8f57c3b406..dc7ec7d278 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel.h
+++ b/thirdparty/embree/kernels/bvh/bvh_builder_twolevel.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -137,7 +137,7 @@ namespace embree
assert(isSmallGeometry(mesh));
mvector<PrimRef> prefs(topBuilder->scene->device, meshSize);
- auto pinfo = createPrimRefArray(mesh,objectID_,prefs,topBuilder->bvh->scene->progressInterface);
+ auto pinfo = createPrimRefArray(mesh,objectID_,meshSize,prefs,topBuilder->bvh->scene->progressInterface);
size_t begin=0;
while (begin < pinfo.size())
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel_internal.h b/thirdparty/embree/kernels/bvh/bvh_builder_twolevel_internal.h
index 1c1ae8d6a7..023b52b780 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_builder_twolevel_internal.h
+++ b/thirdparty/embree/kernels/bvh/bvh_builder_twolevel_internal.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_collider.cpp b/thirdparty/embree/kernels/bvh/bvh_collider.cpp
index a27be8bae8..9428c0b88e 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_collider.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_collider.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_collider.h"
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_collider.h b/thirdparty/embree/kernels/bvh/bvh_collider.h
index ac4f99c96a..3c42f211c1 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_collider.h
+++ b/thirdparty/embree/kernels/bvh/bvh_collider.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_factory.h b/thirdparty/embree/kernels/bvh/bvh_factory.h
index 54021ca6eb..453d455bd9 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_factory.h
+++ b/thirdparty/embree/kernels/bvh/bvh_factory.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1.cpp b/thirdparty/embree/kernels/bvh/bvh_intersector1.cpp
index ea6adc2717..9594f402c3 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector1.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_intersector1.h"
@@ -61,10 +61,10 @@ namespace embree
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
- TravRay<N,Nx,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
+ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
- BVHNNodeTraverser1Hit<N, Nx, types> nodeTraverser;
+ BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
@@ -75,22 +75,16 @@ namespace embree
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
-#if defined(__AVX512ER__)
- /* much faster on KNL */
- if (unlikely(any(vfloat<Nx>(*(float*)&stackPtr->dist) > tray.tfar)))
- continue;
-#else
if (unlikely(*(float*)&stackPtr->dist > ray.tfar))
continue;
-#endif
/* downtraversal loop */
while (true)
{
/* intersect node */
- size_t mask; vfloat<Nx> tNear;
+ size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes,1,1,1);
- bool nodeIntersected = BVHNNodeIntersector1<N, Nx, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
+ bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
@@ -153,10 +147,10 @@ namespace embree
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
- TravRay<N,Nx,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
+ TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
- BVHNNodeTraverser1Hit<N, Nx, types> nodeTraverser;
+ BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
@@ -170,9 +164,9 @@ namespace embree
while (true)
{
/* intersect node */
- size_t mask; vfloat<Nx> tNear;
+ size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
- bool nodeIntersected = BVHNNodeIntersector1<N, Nx, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
+ bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
@@ -213,9 +207,6 @@ namespace embree
static const size_t stackSize = 1+(N-1)*BVH::maxDepth+3; // +3 due to 16-wide store
- /* right now AVX512KNL SIMD extension only for standard node types */
- static const size_t Nx = (types == BVH_AN1 || types == BVH_QN1) ? vextend<N>::size : N;
-
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
@@ -238,7 +229,7 @@ namespace embree
TravPointQuery<N> tquery(query->p, context->query_radius);
/* initialize the node traverser */
- BVHNNodeTraverser1Hit<N, N, types> nodeTraverser;
+ BVHNNodeTraverser1Hit<N,types> nodeTraverser;
bool changed = false;
float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1.h b/thirdparty/embree/kernels/bvh/bvh_intersector1.h
index 1a269c319a..2df3d6eddb 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1.h
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector1.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -25,9 +25,6 @@ namespace embree
static const size_t stackSize = 1+(N-1)*BVH::maxDepth+3; // +3 due to 16-wide store
- /* right now AVX512KNL SIMD extension only for standard node types */
- static const size_t Nx = (types == BVH_AN1 || types == BVH_QN1) ? vextend<N>::size : N;
-
public:
static void intersect (const Accel::Intersectors* This, RayHit& ray, IntersectContext* context);
static void occluded (const Accel::Intersectors* This, Ray& ray, IntersectContext* context);
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1_bvh4.cpp b/thirdparty/embree/kernels/bvh/bvh_intersector1_bvh4.cpp
index 989f7354fd..831d613367 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector1_bvh4.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector1_bvh4.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_intersector1.cpp"
@@ -21,15 +21,15 @@ namespace embree
IF_ENABLED_CURVES_OR_POINTS(DEFINE_INTERSECTOR1(BVH4OBBVirtualCurveIntersectorRobust1,BVHNIntersector1<4 COMMA BVH_AN1_UN1 COMMA true COMMA VirtualCurveIntersector1 >));
IF_ENABLED_CURVES_OR_POINTS(DEFINE_INTERSECTOR1(BVH4OBBVirtualCurveIntersectorRobust1MB,BVHNIntersector1<4 COMMA BVH_AN2_AN4D_UN2 COMMA true COMMA VirtualCurveIntersector1 >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4Intersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersector1<TriangleMIntersector1Moeller <SIMD_MODE(4) COMMA true> > >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersector1<TriangleMiIntersector1Moeller <SIMD_MODE(4) COMMA true> > >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4vIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersector1<TriangleMvIntersector1Pluecker<SIMD_MODE(4) COMMA true> > >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersector1<TriangleMiIntersector1Pluecker<SIMD_MODE(4) COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4Intersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersector1<TriangleMIntersector1Moeller <4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersector1<TriangleMiIntersector1Moeller <4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4vIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersector1<TriangleMvIntersector1Pluecker<4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersector1<TriangleMiIntersector1Pluecker<4 COMMA true> > >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4vMBIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersector1<TriangleMvMBIntersector1Moeller <SIMD_MODE(4) COMMA true> > >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iMBIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersector1<TriangleMiMBIntersector1Moeller <SIMD_MODE(4) COMMA true> > >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4vMBIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA true COMMA ArrayIntersector1<TriangleMvMBIntersector1Pluecker<SIMD_MODE(4) COMMA true> > >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iMBIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA true COMMA ArrayIntersector1<TriangleMiMBIntersector1Pluecker<SIMD_MODE(4) COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4vMBIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersector1<TriangleMvMBIntersector1Moeller <4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iMBIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersector1<TriangleMiMBIntersector1Moeller <4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4vMBIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA true COMMA ArrayIntersector1<TriangleMvMBIntersector1Pluecker<4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(BVH4Triangle4iMBIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA true COMMA ArrayIntersector1<TriangleMiMBIntersector1Pluecker<4 COMMA true> > >));
IF_ENABLED_QUADS(DEFINE_INTERSECTOR1(BVH4Quad4vIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersector1<QuadMvIntersector1Moeller <4 COMMA true> > >));
IF_ENABLED_QUADS(DEFINE_INTERSECTOR1(BVH4Quad4iIntersector1Moeller, BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersector1<QuadMiIntersector1Moeller <4 COMMA true> > >));
@@ -48,7 +48,7 @@ namespace embree
IF_ENABLED_INSTANCE(DEFINE_INTERSECTOR1(BVH4InstanceIntersector1,BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersector1<InstanceIntersector1> >));
IF_ENABLED_INSTANCE(DEFINE_INTERSECTOR1(BVH4InstanceMBIntersector1,BVHNIntersector1<4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersector1<InstanceIntersector1MB> >));
- IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(QBVH4Triangle4iIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_QN1 COMMA false COMMA ArrayIntersector1<TriangleMiIntersector1Pluecker<SIMD_MODE(4) COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR1(QBVH4Triangle4iIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_QN1 COMMA false COMMA ArrayIntersector1<TriangleMiIntersector1Pluecker<4 COMMA true> > >));
IF_ENABLED_QUADS(DEFINE_INTERSECTOR1(QBVH4Quad4iIntersector1Pluecker,BVHNIntersector1<4 COMMA BVH_QN1 COMMA false COMMA ArrayIntersector1<QuadMiIntersector1Pluecker<4 COMMA true> > >));
IF_ENABLED_GRIDS(DEFINE_INTERSECTOR1(BVH4GridIntersector1Moeller,BVHNIntersector1<4 COMMA BVH_AN1 COMMA false COMMA SubGridIntersector1Moeller<4 COMMA true> >));
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_hybrid.h b/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid.h
index d764cc928d..50ebf375c4 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_hybrid.h
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -19,9 +19,6 @@ namespace embree
template<int N, int K, int types, bool robust, typename PrimitiveIntersectorK, bool single = true>
class BVHNIntersectorKHybrid
{
- /* right now AVX512KNL SIMD extension only for standard node types */
- static const size_t Nx = types == BVH_AN1 ? vextend<N>::size : N;
-
/* shortcuts for frequently used types */
typedef typename PrimitiveIntersectorK::Precalculations Precalculations;
typedef typename PrimitiveIntersectorK::Primitive Primitive;
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_stream.h b/thirdparty/embree/kernels/bvh/bvh_intersector_stream.h
index 83d1fb4d3d..717f559677 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_stream.h
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_stream.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -12,11 +12,9 @@ namespace embree
namespace isa
{
/*! BVH ray stream intersector. */
- template<int N, int Nx, int types, bool robust, typename PrimitiveIntersector>
+ template<int N, int types, bool robust, typename PrimitiveIntersector>
class BVHNIntersectorStream
{
- static const int Nxd = (Nx == N) ? N : Nx/2;
-
/* shortcuts for frequently used types */
template<int K> using PrimitiveIntersectorK = typename PrimitiveIntersector::template Type<K>;
template<int K> using PrimitiveK = typename PrimitiveIntersectorK<K>::PrimitiveK;
@@ -128,13 +126,13 @@ namespace embree
const AABBNode* __restrict__ node,
const Frustum<robust>& frustum,
size_t* maskK,
- vfloat<Nx>& dist)
+ vfloat<N>& dist)
{
- size_t m_node_hit = intersectNodeFrustum<N,Nx>(node, frustum, dist);
+ size_t m_node_hit = intersectNodeFrustum<N>(node, frustum, dist);
const size_t first_index = bsf(m_active);
const size_t first_packetID = first_index / K;
const size_t first_rayID = first_index % K;
- size_t m_first_hit = intersectNode1<N,Nx>(node, packets[first_packetID], first_rayID, frustum.nf);
+ size_t m_first_hit = intersectNode1<N>(node, packets[first_packetID], first_rayID, frustum.nf);
/* this make traversal independent of the ordering of rays */
size_t m_node = m_node_hit ^ m_first_hit;
@@ -150,20 +148,20 @@ namespace embree
// TODO: explicit 16-wide path for KNL
template<int K>
- __forceinline static vint<Nx> traverseIncoherentStream(size_t m_active,
+ __forceinline static vint<N> traverseIncoherentStream(size_t m_active,
TravRayKStreamFast<K>* __restrict__ packets,
const AABBNode* __restrict__ node,
const NearFarPrecalculations& nf,
const int shiftTable[32])
{
- const vfloat<Nx> bminX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
- const vfloat<Nx> bminY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
- const vfloat<Nx> bminZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
- const vfloat<Nx> bmaxX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
- const vfloat<Nx> bmaxY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
- const vfloat<Nx> bmaxZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
+ const vfloat<N> bminX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
+ const vfloat<N> bminY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
+ const vfloat<N> bminZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
+ const vfloat<N> bmaxX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
+ const vfloat<N> bmaxY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
+ const vfloat<N> bmaxZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
assert(m_active);
- vint<Nx> vmask(zero);
+ vint<N> vmask(zero);
do
{
STAT3(shadow.trav_nodes,1,1,1);
@@ -171,58 +169,41 @@ namespace embree
assert(rayID < MAX_INTERNAL_STREAM_SIZE);
TravRayKStream<K,robust> &p = packets[rayID / K];
const size_t i = rayID % K;
- const vint<Nx> bitmask(shiftTable[rayID]);
-
-#if defined (__aarch64__)
- const vfloat<Nx> tNearX = madd(bminX, p.rdir.x[i], p.neg_org_rdir.x[i]);
- const vfloat<Nx> tNearY = madd(bminY, p.rdir.y[i], p.neg_org_rdir.y[i]);
- const vfloat<Nx> tNearZ = madd(bminZ, p.rdir.z[i], p.neg_org_rdir.z[i]);
- const vfloat<Nx> tFarX = madd(bmaxX, p.rdir.x[i], p.neg_org_rdir.x[i]);
- const vfloat<Nx> tFarY = madd(bmaxY, p.rdir.y[i], p.neg_org_rdir.y[i]);
- const vfloat<Nx> tFarZ = madd(bmaxZ, p.rdir.z[i], p.neg_org_rdir.z[i]);
-#else
- const vfloat<Nx> tNearX = msub(bminX, p.rdir.x[i], p.org_rdir.x[i]);
- const vfloat<Nx> tNearY = msub(bminY, p.rdir.y[i], p.org_rdir.y[i]);
- const vfloat<Nx> tNearZ = msub(bminZ, p.rdir.z[i], p.org_rdir.z[i]);
- const vfloat<Nx> tFarX = msub(bmaxX, p.rdir.x[i], p.org_rdir.x[i]);
- const vfloat<Nx> tFarY = msub(bmaxY, p.rdir.y[i], p.org_rdir.y[i]);
- const vfloat<Nx> tFarZ = msub(bmaxZ, p.rdir.z[i], p.org_rdir.z[i]);
-#endif
-
- const vfloat<Nx> tNear = maxi(tNearX, tNearY, tNearZ, vfloat<Nx>(p.tnear[i]));
- const vfloat<Nx> tFar = mini(tFarX , tFarY , tFarZ, vfloat<Nx>(p.tfar[i]));
-
-#if defined(__AVX512ER__)
- const vboolx m_node((1 << N)-1);
- const vbool<Nx> hit_mask = le(m_node, tNear, tFar);
- vmask = mask_or(hit_mask, vmask, vmask, bitmask);
-#else
- const vbool<Nx> hit_mask = tNear <= tFar;
+ const vint<N> bitmask(shiftTable[rayID]);
+ const vfloat<N> tNearX = msub(bminX, p.rdir.x[i], p.org_rdir.x[i]);
+ const vfloat<N> tNearY = msub(bminY, p.rdir.y[i], p.org_rdir.y[i]);
+ const vfloat<N> tNearZ = msub(bminZ, p.rdir.z[i], p.org_rdir.z[i]);
+ const vfloat<N> tFarX = msub(bmaxX, p.rdir.x[i], p.org_rdir.x[i]);
+ const vfloat<N> tFarY = msub(bmaxY, p.rdir.y[i], p.org_rdir.y[i]);
+ const vfloat<N> tFarZ = msub(bmaxZ, p.rdir.z[i], p.org_rdir.z[i]);
+ const vfloat<N> tNear = maxi(tNearX, tNearY, tNearZ, vfloat<N>(p.tnear[i]));
+ const vfloat<N> tFar = mini(tFarX , tFarY , tFarZ, vfloat<N>(p.tfar[i]));
+
+ const vbool<N> hit_mask = tNear <= tFar;
#if defined(__AVX2__)
- vmask = vmask | (bitmask & vint<Nx>(hit_mask));
+ vmask = vmask | (bitmask & vint<N>(hit_mask));
#else
vmask = select(hit_mask, vmask | bitmask, vmask);
#endif
-#endif
} while(m_active);
return vmask;
}
template<int K>
- __forceinline static vint<Nx> traverseIncoherentStream(size_t m_active,
+ __forceinline static vint<N> traverseIncoherentStream(size_t m_active,
TravRayKStreamRobust<K>* __restrict__ packets,
const AABBNode* __restrict__ node,
const NearFarPrecalculations& nf,
const int shiftTable[32])
{
- const vfloat<Nx> bminX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
- const vfloat<Nx> bminY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
- const vfloat<Nx> bminZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
- const vfloat<Nx> bmaxX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
- const vfloat<Nx> bmaxY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
- const vfloat<Nx> bmaxZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
+ const vfloat<N> bminX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
+ const vfloat<N> bminY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
+ const vfloat<N> bminZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
+ const vfloat<N> bmaxX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
+ const vfloat<N> bmaxY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
+ const vfloat<N> bmaxZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
assert(m_active);
- vint<Nx> vmask(zero);
+ vint<N> vmask(zero);
do
{
STAT3(shadow.trav_nodes,1,1,1);
@@ -230,29 +211,23 @@ namespace embree
assert(rayID < MAX_INTERNAL_STREAM_SIZE);
TravRayKStream<K,robust> &p = packets[rayID / K];
const size_t i = rayID % K;
- const vint<Nx> bitmask(shiftTable[rayID]);
- const vfloat<Nx> tNearX = (bminX - p.org.x[i]) * p.rdir.x[i];
- const vfloat<Nx> tNearY = (bminY - p.org.y[i]) * p.rdir.y[i];
- const vfloat<Nx> tNearZ = (bminZ - p.org.z[i]) * p.rdir.z[i];
- const vfloat<Nx> tFarX = (bmaxX - p.org.x[i]) * p.rdir.x[i];
- const vfloat<Nx> tFarY = (bmaxY - p.org.y[i]) * p.rdir.y[i];
- const vfloat<Nx> tFarZ = (bmaxZ - p.org.z[i]) * p.rdir.z[i];
- const vfloat<Nx> tNear = maxi(tNearX, tNearY, tNearZ, vfloat<Nx>(p.tnear[i]));
- const vfloat<Nx> tFar = mini(tFarX , tFarY , tFarZ, vfloat<Nx>(p.tfar[i]));
+ const vint<N> bitmask(shiftTable[rayID]);
+ const vfloat<N> tNearX = (bminX - p.org.x[i]) * p.rdir.x[i];
+ const vfloat<N> tNearY = (bminY - p.org.y[i]) * p.rdir.y[i];
+ const vfloat<N> tNearZ = (bminZ - p.org.z[i]) * p.rdir.z[i];
+ const vfloat<N> tFarX = (bmaxX - p.org.x[i]) * p.rdir.x[i];
+ const vfloat<N> tFarY = (bmaxY - p.org.y[i]) * p.rdir.y[i];
+ const vfloat<N> tFarZ = (bmaxZ - p.org.z[i]) * p.rdir.z[i];
+ const vfloat<N> tNear = maxi(tNearX, tNearY, tNearZ, vfloat<N>(p.tnear[i]));
+ const vfloat<N> tFar = mini(tFarX , tFarY , tFarZ, vfloat<N>(p.tfar[i]));
const float round_down = 1.0f-2.0f*float(ulp);
const float round_up = 1.0f+2.0f*float(ulp);
-#if defined(__AVX512ER__)
- const vboolx m_node((1 << N)-1);
- const vbool<Nx> hit_mask = le(m_node, round_down*tNear, round_up*tFar);
- vmask = mask_or(hit_mask, vmask, vmask, bitmask);
-#else
- const vbool<Nx> hit_mask = round_down*tNear <= round_up*tFar;
+ const vbool<N> hit_mask = round_down*tNear <= round_up*tFar;
#if defined(__AVX2__)
- vmask = vmask | (bitmask & vint<Nx>(hit_mask));
+ vmask = vmask | (bitmask & vint<N>(hit_mask));
#else
vmask = select(hit_mask, vmask | bitmask, vmask);
#endif
-#endif
} while(m_active);
return vmask;
}
@@ -277,7 +252,7 @@ namespace embree
/*! BVH ray stream intersector with direct fallback to packets. */
- template<int N, int Nx>
+ template<int N>
class BVHNIntersectorStreamPacketFallback
{
public:
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_stream_filters.h b/thirdparty/embree/kernels/bvh/bvh_intersector_stream_filters.h
index cdeb923637..e7df7c2ae2 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_intersector_stream_filters.h
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_stream_filters.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb.h b/thirdparty/embree/kernels/bvh/bvh_node_aabb.h
index baa4a8d805..57530692bc 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_aabb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb_mb.h b/thirdparty/embree/kernels/bvh/bvh_node_aabb_mb.h
index 501f4bce5b..c4cea7d8ba 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb_mb.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_aabb_mb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb_mb4d.h b/thirdparty/embree/kernels/bvh/bvh_node_aabb_mb4d.h
index e968bbbc39..46a81d7581 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_aabb_mb4d.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_aabb_mb4d.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_base.h b/thirdparty/embree/kernels/bvh/bvh_node_base.h
index 8268f3b932..a5570a7b9e 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_base.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_base.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_obb.h b/thirdparty/embree/kernels/bvh/bvh_node_obb.h
index fa7cc08211..e6b500691e 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_obb.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_obb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_obb_mb.h b/thirdparty/embree/kernels/bvh/bvh_node_obb_mb.h
index 834cf5ec28..c06b1aea5e 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_obb_mb.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_obb_mb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_qaabb.h b/thirdparty/embree/kernels/bvh/bvh_node_qaabb.h
index 5212821f3f..2afc8c98e7 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_qaabb.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_qaabb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_ref.h b/thirdparty/embree/kernels/bvh/bvh_node_ref.h
index 0f6d4dac7e..6f6da758de 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_node_ref.h
+++ b/thirdparty/embree/kernels/bvh/bvh_node_ref.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -102,7 +102,7 @@ namespace embree
/*! Sets the barrier bit. */
__forceinline void setBarrier() {
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
assert(!isBarrier());
ptr |= barrier_mask;
#else
@@ -112,7 +112,7 @@ namespace embree
/*! Clears the barrier bit. */
__forceinline void clearBarrier() {
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
ptr &= ~barrier_mask;
#else
assert(false);
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_refit.cpp b/thirdparty/embree/kernels/bvh/bvh_refit.cpp
index a273c21e8b..bf5c8538ba 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_refit.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_refit.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_refit.h"
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_refit.h b/thirdparty/embree/kernels/bvh/bvh_refit.h
index 4aa9bdd7cc..09bb3d8da5 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_refit.h
+++ b/thirdparty/embree/kernels/bvh/bvh_refit.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_rotate.cpp b/thirdparty/embree/kernels/bvh/bvh_rotate.cpp
index 2bb431bf0e..460bd60c62 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_rotate.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_rotate.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_rotate.h"
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_rotate.h b/thirdparty/embree/kernels/bvh/bvh_rotate.h
index 009bef339e..61ef64a679 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_rotate.h
+++ b/thirdparty/embree/kernels/bvh/bvh_rotate.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.cpp b/thirdparty/embree/kernels/bvh/bvh_statistics.cpp
index aa56035026..d857ff7d95 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.cpp
+++ b/thirdparty/embree/kernels/bvh/bvh_statistics.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "bvh_statistics.h"
@@ -162,7 +162,7 @@ namespace embree
template class BVHNStatistics<8>;
#endif
-#if !defined(__AVX__) || (!defined(EMBREE_TARGET_SSE2) && !defined(EMBREE_TARGET_SSE42)) || defined(__aarch64__)
+#if !defined(__AVX__) || !defined(EMBREE_TARGET_SSE2) && !defined(EMBREE_TARGET_SSE42)
template class BVHNStatistics<4>;
#endif
}
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.h b/thirdparty/embree/kernels/bvh/bvh_statistics.h
index 73dfc6fbcc..a28e115f1c 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.h
+++ b/thirdparty/embree/kernels/bvh/bvh_statistics.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_traverser1.h b/thirdparty/embree/kernels/bvh/bvh_traverser1.h
index 7f17084b81..8ce01b57f5 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_traverser1.h
+++ b/thirdparty/embree/kernels/bvh/bvh_traverser1.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -14,213 +14,9 @@ namespace embree
namespace isa
{
/*! BVH regular node traversal for single rays. */
- template<int N, int Nx, int types>
+ template<int N, int types>
class BVHNNodeTraverser1Hit;
- /*! Helper functions for fast sorting using AVX512 instructions. */
-#if defined(__AVX512ER__)
-
- /* KNL code path */
- __forceinline void isort_update(vfloat16 &dist, vllong8 &ptr, const vfloat16 &d, const vllong8 &p)
- {
- const vfloat16 dist_shift = align_shift_right<15>(dist,dist);
- const vllong8 ptr_shift = align_shift_right<7>(ptr,ptr);
- const vbool16 m_geq = d >= dist;
- const vbool16 m_geq_shift = m_geq << 1;
- dist = select(m_geq,d,dist);
- ptr = select(vboold8(m_geq),p,ptr);
- dist = select(m_geq_shift,dist_shift,dist);
- ptr = select(vboold8(m_geq_shift),ptr_shift,ptr);
- }
-
- __forceinline void isort_quick_update(vfloat16 &dist, vllong8 &ptr, const vfloat16 &d, const vllong8 &p)
- {
- //dist = align_shift_right<15>(dist,d);
- //ptr = align_shift_right<7>(ptr,p);
- dist = align_shift_right<15>(dist,permute(d,vint16(zero)));
- ptr = align_shift_right<7>(ptr,permute(p,vllong8(zero)));
- }
-
- template<int N, int Nx, int types, class NodeRef, class BaseNode>
- __forceinline void traverseClosestHitAVX512(NodeRef& cur,
- size_t mask,
- const vfloat<Nx>& tNear,
- StackItemT<NodeRef>*& stackPtr,
- StackItemT<NodeRef>* stackEnd)
- {
- assert(mask != 0);
- const BaseNode* node = cur.baseNode();
-
- vllong8 children( vllong<N>::loadu((void*)node->children) );
- children = vllong8::compact((int)mask,children);
- vfloat16 distance = tNear;
- distance = vfloat16::compact((int)mask,distance,tNear);
-
- cur = toScalar(children);
- BVHN<N>::prefetch(cur,types);
-
- mask &= mask-1;
- if (likely(mask == 0)) return;
-
- /* 2 hits: order A0 B0 */
- const vllong8 c0(children);
- const vfloat16 d0(distance);
- children = align_shift_right<1>(children,children);
- distance = align_shift_right<1>(distance,distance);
- const vllong8 c1(children);
- const vfloat16 d1(distance);
-
- cur = toScalar(children);
- BVHN<N>::prefetch(cur,types);
-
- /* a '<' keeps the order for equal distances, scenes like powerplant largely benefit from it */
- const vboolf16 m_dist = d0 < d1;
- const vfloat16 dist_A0 = select(m_dist, d0, d1);
- const vfloat16 dist_B0 = select(m_dist, d1, d0);
- const vllong8 ptr_A0 = select(vboold8(m_dist), c0, c1);
- const vllong8 ptr_B0 = select(vboold8(m_dist), c1, c0);
-
- mask &= mask-1;
- if (likely(mask == 0)) {
- cur = toScalar(ptr_A0);
- stackPtr[0].ptr = toScalar(ptr_B0);
- *(float*)&stackPtr[0].dist = toScalar(dist_B0);
- stackPtr++;
- return;
- }
-
- /* 3 hits: order A1 B1 C1 */
-
- children = align_shift_right<1>(children,children);
- distance = align_shift_right<1>(distance,distance);
-
- const vllong8 c2(children);
- const vfloat16 d2(distance);
-
- cur = toScalar(children);
- BVHN<N>::prefetch(cur,types);
-
- const vboolf16 m_dist1 = dist_A0 <= d2;
- const vfloat16 dist_tmp_B1 = select(m_dist1, d2, dist_A0);
- const vllong8 ptr_A1 = select(vboold8(m_dist1), ptr_A0, c2);
- const vllong8 ptr_tmp_B1 = select(vboold8(m_dist1), c2, ptr_A0);
-
- const vboolf16 m_dist2 = dist_B0 <= dist_tmp_B1;
- const vfloat16 dist_B1 = select(m_dist2, dist_B0 , dist_tmp_B1);
- const vfloat16 dist_C1 = select(m_dist2, dist_tmp_B1, dist_B0);
- const vllong8 ptr_B1 = select(vboold8(m_dist2), ptr_B0, ptr_tmp_B1);
- const vllong8 ptr_C1 = select(vboold8(m_dist2), ptr_tmp_B1, ptr_B0);
-
- mask &= mask-1;
- if (likely(mask == 0)) {
- cur = toScalar(ptr_A1);
- stackPtr[0].ptr = toScalar(ptr_C1);
- *(float*)&stackPtr[0].dist = toScalar(dist_C1);
- stackPtr[1].ptr = toScalar(ptr_B1);
- *(float*)&stackPtr[1].dist = toScalar(dist_B1);
- stackPtr+=2;
- return;
- }
-
- /* 4 hits: order A2 B2 C2 D2 */
-
- const vfloat16 dist_A1 = select(m_dist1, dist_A0, d2);
-
- children = align_shift_right<1>(children,children);
- distance = align_shift_right<1>(distance,distance);
-
- const vllong8 c3(children);
- const vfloat16 d3(distance);
-
- cur = toScalar(children);
- BVHN<N>::prefetch(cur,types);
-
- const vboolf16 m_dist3 = dist_A1 <= d3;
- const vfloat16 dist_tmp_B2 = select(m_dist3, d3, dist_A1);
- const vllong8 ptr_A2 = select(vboold8(m_dist3), ptr_A1, c3);
- const vllong8 ptr_tmp_B2 = select(vboold8(m_dist3), c3, ptr_A1);
-
- const vboolf16 m_dist4 = dist_B1 <= dist_tmp_B2;
- const vfloat16 dist_B2 = select(m_dist4, dist_B1 , dist_tmp_B2);
- const vfloat16 dist_tmp_C2 = select(m_dist4, dist_tmp_B2, dist_B1);
- const vllong8 ptr_B2 = select(vboold8(m_dist4), ptr_B1, ptr_tmp_B2);
- const vllong8 ptr_tmp_C2 = select(vboold8(m_dist4), ptr_tmp_B2, ptr_B1);
-
- const vboolf16 m_dist5 = dist_C1 <= dist_tmp_C2;
- const vfloat16 dist_C2 = select(m_dist5, dist_C1 , dist_tmp_C2);
- const vfloat16 dist_D2 = select(m_dist5, dist_tmp_C2, dist_C1);
- const vllong8 ptr_C2 = select(vboold8(m_dist5), ptr_C1, ptr_tmp_C2);
- const vllong8 ptr_D2 = select(vboold8(m_dist5), ptr_tmp_C2, ptr_C1);
-
- mask &= mask-1;
- if (likely(mask == 0)) {
- cur = toScalar(ptr_A2);
- stackPtr[0].ptr = toScalar(ptr_D2);
- *(float*)&stackPtr[0].dist = toScalar(dist_D2);
- stackPtr[1].ptr = toScalar(ptr_C2);
- *(float*)&stackPtr[1].dist = toScalar(dist_C2);
- stackPtr[2].ptr = toScalar(ptr_B2);
- *(float*)&stackPtr[2].dist = toScalar(dist_B2);
- stackPtr+=3;
- return;
- }
-
- /* >=5 hits: reverse to descending order for writing to stack */
-
- const size_t hits = 4 + popcnt(mask);
- const vfloat16 dist_A2 = select(m_dist3, dist_A1, d3);
- vfloat16 dist(neg_inf);
- vllong8 ptr(zero);
-
-
- isort_quick_update(dist,ptr,dist_A2,ptr_A2);
- isort_quick_update(dist,ptr,dist_B2,ptr_B2);
- isort_quick_update(dist,ptr,dist_C2,ptr_C2);
- isort_quick_update(dist,ptr,dist_D2,ptr_D2);
-
- do {
-
- children = align_shift_right<1>(children,children);
- distance = align_shift_right<1>(distance,distance);
-
- cur = toScalar(children);
- BVHN<N>::prefetch(cur,types);
-
- const vfloat16 new_dist(permute(distance,vint16(zero)));
- const vllong8 new_ptr(permute(children,vllong8(zero)));
-
- mask &= mask-1;
- isort_update(dist,ptr,new_dist,new_ptr);
-
- } while(mask);
-
- const vboold8 m_stack_ptr(0x55); // 10101010 (lsb -> msb)
- const vboolf16 m_stack_dist(0x4444); // 0010001000100010 (lsb -> msb)
-
- /* extract current noderef */
- cur = toScalar(permute(ptr,vllong8(hits-1)));
- /* rearrange pointers to beginning of 16 bytes block */
- vllong8 stackElementA0;
- stackElementA0 = vllong8::expand(m_stack_ptr,ptr,stackElementA0);
- /* put distances in between */
- vuint16 stackElementA1((__m512i)stackElementA0);
- stackElementA1 = vuint16::expand(m_stack_dist,asUInt(dist),stackElementA1);
- /* write out first 4 x 16 bytes block to stack */
- vuint16::storeu(stackPtr,stackElementA1);
- /* get upper half of dist and ptr */
- dist = align_shift_right<4>(dist,dist);
- ptr = align_shift_right<4>(ptr,ptr);
- /* assemble and write out second block */
- vllong8 stackElementB0;
- stackElementB0 = vllong8::expand(m_stack_ptr,ptr,stackElementB0);
- vuint16 stackElementB1((__m512i)stackElementB0);
- stackElementB1 = vuint16::expand(m_stack_dist,asUInt(dist),stackElementB1);
- vuint16::storeu(stackPtr + 4,stackElementB1);
- /* increase stack pointer */
- stackPtr += hits-1;
- }
-#endif
-
#if defined(__AVX512VL__) // SKX
template<int N>
@@ -249,8 +45,8 @@ namespace embree
#endif
/* Specialization for BVH4. */
- template<int Nx, int types>
- class BVHNNodeTraverser1Hit<4, Nx, types>
+ template<int types>
+ class BVHNNodeTraverser1Hit<4, types>
{
typedef BVH4 BVH;
typedef BVH4::NodeRef NodeRef;
@@ -261,14 +57,11 @@ namespace embree
/* Traverses a node with at least one hit child. Optimized for finding the closest hit (intersection). */
static __forceinline void traverseClosestHit(NodeRef& cur,
size_t mask,
- const vfloat<Nx>& tNear,
+ const vfloat4& tNear,
StackItemT<NodeRef>*& stackPtr,
StackItemT<NodeRef>* stackEnd)
{
assert(mask != 0);
-#if defined(__AVX512ER__)
- traverseClosestHitAVX512<4,Nx,types,NodeRef,BaseNode>(cur,mask,tNear,stackPtr,stackEnd);
-#else
const BaseNode* node = cur.baseNode();
/*! one child is hit, continue with that child */
@@ -344,13 +137,12 @@ namespace embree
sort(stackPtr[-1],stackPtr[-2],stackPtr[-3],stackPtr[-4]);
cur = (NodeRef) stackPtr[-1].ptr; stackPtr--;
#endif
-#endif
}
/* Traverses a node with at least one hit child. Optimized for finding any hit (occlusion). */
static __forceinline void traverseAnyHit(NodeRef& cur,
size_t mask,
- const vfloat<Nx>& tNear,
+ const vfloat4& tNear,
NodeRef*& stackPtr,
NodeRef* stackEnd)
{
@@ -380,8 +172,8 @@ namespace embree
};
/* Specialization for BVH8. */
- template<int Nx, int types>
- class BVHNNodeTraverser1Hit<8, Nx, types>
+ template<int types>
+ class BVHNNodeTraverser1Hit<8, types>
{
typedef BVH8 BVH;
typedef BVH8::NodeRef NodeRef;
@@ -485,10 +277,10 @@ namespace embree
const size_t hits = 4 + popcnt(mask);
vint8 dist(INT_MIN); // this will work with -0.0f (0x80000000) as distance, isort_update uses >= to insert
- isort_quick_update(dist,dist_A2);
- isort_quick_update(dist,dist_B2);
- isort_quick_update(dist,dist_C2);
- isort_quick_update(dist,dist_D2);
+ isort_quick_update<8>(dist,dist_A2);
+ isort_quick_update<8>(dist,dist_B2);
+ isort_quick_update<8>(dist,dist_C2);
+ isort_quick_update<8>(dist,dist_D2);
do {
@@ -497,7 +289,7 @@ namespace embree
BVH::prefetch(cur,types);
const vint8 new_dist(permute(distance_i,vint8(zero)));
mask &= mask-1;
- isort_update(dist,new_dist);
+ isort_update<8>(dist,new_dist);
} while(mask);
@@ -518,14 +310,12 @@ namespace embree
public:
static __forceinline void traverseClosestHit(NodeRef& cur,
size_t mask,
- const vfloat<Nx>& tNear,
+ const vfloat8& tNear,
StackItemT<NodeRef>*& stackPtr,
StackItemT<NodeRef>* stackEnd)
{
assert(mask != 0);
-#if defined(__AVX512ER__)
- traverseClosestHitAVX512<8,Nx,types,NodeRef,BaseNode>(cur,mask,tNear,stackPtr,stackEnd);
-#elif defined(__AVX512VL__)
+#if defined(__AVX512VL__)
traverseClosestHitAVX512VL8<NodeRef,BaseNode>(cur,mask,tNear,stackPtr,stackEnd);
#else
@@ -644,7 +434,7 @@ namespace embree
static __forceinline void traverseAnyHit(NodeRef& cur,
size_t mask,
- const vfloat<Nx>& tNear,
+ const vfloat8& tNear,
NodeRef*& stackPtr,
NodeRef* stackEnd)
{
diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_traverser_stream.h b/thirdparty/embree/kernels/bvh/bvh_traverser_stream.h
index 9c603babf0..852981e69d 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/bvh_traverser_stream.h
+++ b/thirdparty/embree/kernels/bvh/bvh_traverser_stream.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -11,7 +11,7 @@ namespace embree
{
namespace isa
{
- template<int N, int Nx, int types>
+ template<int N, int types>
class BVHNNodeTraverserStreamHitCoherent
{
typedef BVHN<N> BVH;
@@ -22,8 +22,8 @@ namespace embree
template<class T>
static __forceinline void traverseClosestHit(NodeRef& cur,
size_t& m_trav_active,
- const vbool<Nx>& vmask,
- const vfloat<Nx>& tNear,
+ const vbool<N>& vmask,
+ const vfloat<N>& tNear,
const T* const tMask,
StackItemMaskCoherent*& stackPtr)
{
@@ -79,14 +79,9 @@ namespace embree
/*! slow path for more than two hits */
size_t hits = movemask(vmask);
- const vint<Nx> dist_i = select(vmask, (asInt(tNear) & 0xfffffff8) | vint<Nx>(step), 0);
- #if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
- const vint<N> tmp = extractN<N,0>(dist_i);
- const vint<Nx> dist_i_sorted = usort_descending(tmp);
- #else
- const vint<Nx> dist_i_sorted = usort_descending(dist_i);
- #endif
- const vint<Nx> sorted_index = dist_i_sorted & 7;
+ const vint<N> dist_i = select(vmask, (asInt(tNear) & 0xfffffff8) | vint<N>(step), 0);
+ const vint<N> dist_i_sorted = usort_descending(dist_i);
+ const vint<N> sorted_index = dist_i_sorted & 7;
size_t i = 0;
for (;;)
@@ -112,7 +107,7 @@ namespace embree
template<class T>
static __forceinline void traverseAnyHit(NodeRef& cur,
size_t& m_trav_active,
- const vbool<Nx>& vmask,
+ const vbool<N>& vmask,
const T* const tMask,
StackItemMaskCoherent*& stackPtr)
{
diff --git a/thirdparty/embree-aarch64/kernels/bvh/node_intersector.h b/thirdparty/embree/kernels/bvh/node_intersector.h
index a978c0c459..25edaf295d 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/node_intersector.h
+++ b/thirdparty/embree/kernels/bvh/node_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/bvh/node_intersector1.h b/thirdparty/embree/kernels/bvh/node_intersector1.h
index aa0d4ba4d7..1ec4fc63fc 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/node_intersector1.h
+++ b/thirdparty/embree/kernels/bvh/node_intersector1.h
@@ -1,19 +1,10 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "node_intersector.h"
-#if defined(__AVX2__)
-#define __FMA_X4__
-#endif
-
-#if defined(__aarch64__)
-#define __FMA_X4__
-#endif
-
-
namespace embree
{
namespace isa
@@ -22,12 +13,12 @@ namespace embree
// Ray structure used in single-ray traversal
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx, bool robust>
+ template<int N, bool robust>
struct TravRayBase;
/* Base (without tnear and tfar) */
- template<int N, int Nx>
- struct TravRayBase<N,Nx,false>
+ template<int N>
+ struct TravRayBase<N,false>
{
__forceinline TravRayBase() {}
@@ -38,15 +29,9 @@ namespace embree
org = Vec3vf<N>(ray_org.x,ray_org.y,ray_org.z);
dir = Vec3vf<N>(ray_dir.x,ray_dir.y,ray_dir.z);
rdir = Vec3vf<N>(ray_rdir.x,ray_rdir.y,ray_rdir.z);
-#if defined(__FMA_X4__)
+#if defined(__AVX2__) || defined(__ARM_NEON)
const Vec3fa ray_org_rdir = ray_org*ray_rdir;
-#if !defined(__aarch64__)
org_rdir = Vec3vf<N>(ray_org_rdir.x,ray_org_rdir.y,ray_org_rdir.z);
-#else
- //for aarch64, we do not have msub equal instruction, so we negeate orig and use madd
- //x86 will use msub
- neg_org_rdir = Vec3vf<N>(-ray_org_rdir.x,-ray_org_rdir.y,-ray_org_rdir.z);
-#endif
#endif
nearX = ray_rdir.x >= 0.0f ? 0*sizeof(vfloat<N>) : 1*sizeof(vfloat<N>);
nearY = ray_rdir.y >= 0.0f ? 2*sizeof(vfloat<N>) : 3*sizeof(vfloat<N>);
@@ -54,32 +39,18 @@ namespace embree
farX = nearX ^ sizeof(vfloat<N>);
farY = nearY ^ sizeof(vfloat<N>);
farZ = nearZ ^ sizeof(vfloat<N>);
-
-#if defined(__AVX512ER__) // KNL+
- /* optimization works only for 8-wide BVHs with 16-wide SIMD */
- const vint<16> id(step);
- const vint<16> id2 = align_shift_right<16/2>(id, id);
- permX = select(vfloat<16>(dir.x) >= 0.0f, id, id2);
- permY = select(vfloat<16>(dir.y) >= 0.0f, id, id2);
- permZ = select(vfloat<16>(dir.z) >= 0.0f, id, id2);
-#endif
-
}
template<int K>
- __forceinline TravRayBase(size_t k, const Vec3vf<K>& ray_org, const Vec3vf<K>& ray_dir,
- const Vec3vf<K>& ray_rdir, const Vec3vi<K>& nearXYZ,
- size_t flip = sizeof(vfloat<N>))
+ __forceinline void init(size_t k, const Vec3vf<K>& ray_org, const Vec3vf<K>& ray_dir,
+ const Vec3vf<K>& ray_rdir, const Vec3vi<K>& nearXYZ,
+ size_t flip = sizeof(vfloat<N>))
{
- org = Vec3vf<Nx>(ray_org.x[k], ray_org.y[k], ray_org.z[k]);
- dir = Vec3vf<Nx>(ray_dir.x[k], ray_dir.y[k], ray_dir.z[k]);
- rdir = Vec3vf<Nx>(ray_rdir.x[k], ray_rdir.y[k], ray_rdir.z[k]);
-#if defined(__FMA_X4__)
-#if !defined(__aarch64__)
- org_rdir = org*rdir;
-#else
- neg_org_rdir = -(org*rdir);
-#endif
+ org = Vec3vf<N>(ray_org.x[k], ray_org.y[k], ray_org.z[k]);
+ dir = Vec3vf<N>(ray_dir.x[k], ray_dir.y[k], ray_dir.z[k]);
+ rdir = Vec3vf<N>(ray_rdir.x[k], ray_rdir.y[k], ray_rdir.z[k]);
+#if defined(__AVX2__) || defined(__ARM_NEON)
+ org_rdir = org*rdir;
#endif
nearX = nearXYZ.x[k];
nearY = nearXYZ.y[k];
@@ -87,39 +58,20 @@ namespace embree
farX = nearX ^ flip;
farY = nearY ^ flip;
farZ = nearZ ^ flip;
-
-#if defined(__AVX512ER__) // KNL+
- /* optimization works only for 8-wide BVHs with 16-wide SIMD */
- const vint<16> id(step);
- const vint<16> id2 = align_shift_right<16/2>(id, id);
- permX = select(vfloat<16>(dir.x) >= 0.0f, id, id2);
- permY = select(vfloat<16>(dir.y) >= 0.0f, id, id2);
- permZ = select(vfloat<16>(dir.z) >= 0.0f, id, id2);
-#endif
}
Vec3fa org_xyz, dir_xyz;
- Vec3vf<Nx> org, dir, rdir;
-#if defined(__FMA_X4__)
-#if !defined(__aarch64__)
- Vec3vf<Nx> org_rdir;
-#else
- //aarch64 version are keeping negation of the org_rdir and use madd
- //x86 uses msub
- Vec3vf<Nx> neg_org_rdir;
-#endif
+ Vec3vf<N> org, dir, rdir;
+#if defined(__AVX2__) || defined(__ARM_NEON)
+ Vec3vf<N> org_rdir;
#endif
-#if defined(__AVX512ER__) // KNL+
- vint16 permX, permY, permZ;
-#endif
-
size_t nearX, nearY, nearZ;
size_t farX, farY, farZ;
};
/* Base (without tnear and tfar) */
- template<int N, int Nx>
- struct TravRayBase<N,Nx,true>
+ template<int N>
+ struct TravRayBase<N,true>
{
__forceinline TravRayBase() {}
@@ -135,34 +87,26 @@ namespace embree
dir = Vec3vf<N>(ray_dir.x,ray_dir.y,ray_dir.z);
rdir_near = Vec3vf<N>(ray_rdir_near.x,ray_rdir_near.y,ray_rdir_near.z);
rdir_far = Vec3vf<N>(ray_rdir_far .x,ray_rdir_far .y,ray_rdir_far .z);
+
nearX = ray_rdir_near.x >= 0.0f ? 0*sizeof(vfloat<N>) : 1*sizeof(vfloat<N>);
nearY = ray_rdir_near.y >= 0.0f ? 2*sizeof(vfloat<N>) : 3*sizeof(vfloat<N>);
nearZ = ray_rdir_near.z >= 0.0f ? 4*sizeof(vfloat<N>) : 5*sizeof(vfloat<N>);
farX = nearX ^ sizeof(vfloat<N>);
farY = nearY ^ sizeof(vfloat<N>);
farZ = nearZ ^ sizeof(vfloat<N>);
-
-#if defined(__AVX512ER__) // KNL+
- /* optimization works only for 8-wide BVHs with 16-wide SIMD */
- const vint<16> id(step);
- const vint<16> id2 = align_shift_right<16/2>(id, id);
- permX = select(vfloat<16>(dir.x) >= 0.0f, id, id2);
- permY = select(vfloat<16>(dir.y) >= 0.0f, id, id2);
- permZ = select(vfloat<16>(dir.z) >= 0.0f, id, id2);
-#endif
}
template<int K>
- __forceinline TravRayBase(size_t k, const Vec3vf<K>& ray_org, const Vec3vf<K>& ray_dir,
- const Vec3vf<K>& ray_rdir, const Vec3vi<K>& nearXYZ,
- size_t flip = sizeof(vfloat<N>))
+ __forceinline void init(size_t k, const Vec3vf<K>& ray_org, const Vec3vf<K>& ray_dir,
+ const Vec3vf<K>& ray_rdir, const Vec3vi<K>& nearXYZ,
+ size_t flip = sizeof(vfloat<N>))
{
- const vfloat<Nx> round_down = 1.0f-3.0f*float(ulp);
- const vfloat<Nx> round_up = 1.0f+3.0f*float(ulp);
- org = Vec3vf<Nx>(ray_org.x[k], ray_org.y[k], ray_org.z[k]);
- dir = Vec3vf<Nx>(ray_dir.x[k], ray_dir.y[k], ray_dir.z[k]);
- rdir_near = round_down*Vec3vf<Nx>(ray_rdir.x[k], ray_rdir.y[k], ray_rdir.z[k]);
- rdir_far = round_up *Vec3vf<Nx>(ray_rdir.x[k], ray_rdir.y[k], ray_rdir.z[k]);
+ const vfloat<N> round_down = 1.0f-3.0f*float(ulp);
+ const vfloat<N> round_up = 1.0f+3.0f*float(ulp);
+ org = Vec3vf<N>(ray_org.x[k], ray_org.y[k], ray_org.z[k]);
+ dir = Vec3vf<N>(ray_dir.x[k], ray_dir.y[k], ray_dir.z[k]);
+ rdir_near = round_down*Vec3vf<N>(ray_rdir.x[k], ray_rdir.y[k], ray_rdir.z[k]);
+ rdir_far = round_up *Vec3vf<N>(ray_rdir.x[k], ray_rdir.y[k], ray_rdir.z[k]);
nearX = nearXYZ.x[k];
nearY = nearXYZ.y[k];
@@ -170,47 +114,36 @@ namespace embree
farX = nearX ^ flip;
farY = nearY ^ flip;
farZ = nearZ ^ flip;
-
-#if defined(__AVX512ER__) // KNL+
- /* optimization works only for 8-wide BVHs with 16-wide SIMD */
- const vint<16> id(step);
- const vint<16> id2 = align_shift_right<16/2>(id, id);
- permX = select(vfloat<16>(dir.x) >= 0.0f, id, id2);
- permY = select(vfloat<16>(dir.y) >= 0.0f, id, id2);
- permZ = select(vfloat<16>(dir.z) >= 0.0f, id, id2);
-#endif
}
Vec3fa org_xyz, dir_xyz;
- Vec3vf<Nx> org, dir, rdir_near, rdir_far;
-#if defined(__AVX512ER__) // KNL+
- vint16 permX, permY, permZ;
-#endif
-
+ Vec3vf<N> org, dir, rdir_near, rdir_far;
size_t nearX, nearY, nearZ;
size_t farX, farY, farZ;
};
/* Full (with tnear and tfar) */
- template<int N, int Nx, bool robust>
- struct TravRay : TravRayBase<N,Nx,robust>
+ template<int N, bool robust>
+ struct TravRay : TravRayBase<N,robust>
{
__forceinline TravRay() {}
__forceinline TravRay(const Vec3fa& ray_org, const Vec3fa& ray_dir, float ray_tnear, float ray_tfar)
- : TravRayBase<N,Nx,robust>(ray_org, ray_dir),
+ : TravRayBase<N,robust>(ray_org, ray_dir),
tnear(ray_tnear), tfar(ray_tfar) {}
template<int K>
- __forceinline TravRay(size_t k, const Vec3vf<K>& ray_org, const Vec3vf<K>& ray_dir,
- const Vec3vf<K>& ray_rdir, const Vec3vi<K>& nearXYZ,
- float ray_tnear, float ray_tfar,
- size_t flip = sizeof(vfloat<N>))
- : TravRayBase<N,Nx,robust>(k, ray_org, ray_dir, ray_rdir, nearXYZ, flip),
- tnear(ray_tnear), tfar(ray_tfar) {}
+ __forceinline void init(size_t k, const Vec3vf<K>& ray_org, const Vec3vf<K>& ray_dir,
+ const Vec3vf<K>& ray_rdir, const Vec3vi<K>& nearXYZ,
+ float ray_tnear, float ray_tfar,
+ size_t flip = sizeof(vfloat<N>))
+ {
+ TravRayBase<N,robust>::template init<K>(k, ray_org, ray_dir, ray_rdir, nearXYZ, flip);
+ tnear = ray_tnear; tfar = ray_tfar;
+ }
- vfloat<Nx> tnear;
- vfloat<Nx> tfar;
+ vfloat<N> tnear;
+ vfloat<N> tfar;
};
//////////////////////////////////////////////////////////////////////////////////////
@@ -465,28 +398,19 @@ namespace embree
// Fast AABBNode intersection
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx, bool robust>
- __forceinline size_t intersectNode(const typename BVHN<N>::AABBNode* node, const TravRay<N,Nx,robust>& ray, vfloat<Nx>& dist);
+ template<int N, bool robust>
+ __forceinline size_t intersectNode(const typename BVHN<N>::AABBNode* node, const TravRay<N,robust>& ray, vfloat<N>& dist);
template<>
- __forceinline size_t intersectNode<4,4>(const typename BVH4::AABBNode* node, const TravRay<4,4,false>& ray, vfloat4& dist)
+ __forceinline size_t intersectNode<4>(const typename BVH4::AABBNode* node, const TravRay<4,false>& ray, vfloat4& dist)
{
-#if defined(__FMA_X4__)
-#if defined(__aarch64__)
- const vfloat4 tNearX = madd(vfloat4::load((float*)((const char*)&node->lower_x+ray.nearX)), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat4 tNearY = madd(vfloat4::load((float*)((const char*)&node->lower_x+ray.nearY)), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat4 tNearZ = madd(vfloat4::load((float*)((const char*)&node->lower_x+ray.nearZ)), ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat4 tFarX = madd(vfloat4::load((float*)((const char*)&node->lower_x+ray.farX )), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat4 tFarY = madd(vfloat4::load((float*)((const char*)&node->lower_x+ray.farY )), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat4 tFarZ = madd(vfloat4::load((float*)((const char*)&node->lower_x+ray.farZ )), ray.rdir.z, ray.neg_org_rdir.z);
-#else
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat4 tNearX = msub(vfloat4::load((float*)((const char*)&node->lower_x+ray.nearX)), ray.rdir.x, ray.org_rdir.x);
const vfloat4 tNearY = msub(vfloat4::load((float*)((const char*)&node->lower_x+ray.nearY)), ray.rdir.y, ray.org_rdir.y);
const vfloat4 tNearZ = msub(vfloat4::load((float*)((const char*)&node->lower_x+ray.nearZ)), ray.rdir.z, ray.org_rdir.z);
const vfloat4 tFarX = msub(vfloat4::load((float*)((const char*)&node->lower_x+ray.farX )), ray.rdir.x, ray.org_rdir.x);
const vfloat4 tFarY = msub(vfloat4::load((float*)((const char*)&node->lower_x+ray.farY )), ray.rdir.y, ray.org_rdir.y);
const vfloat4 tFarZ = msub(vfloat4::load((float*)((const char*)&node->lower_x+ray.farZ )), ray.rdir.z, ray.org_rdir.z);
-#endif
#else
const vfloat4 tNearX = (vfloat4::load((float*)((const char*)&node->lower_x+ray.nearX)) - ray.org.x) * ray.rdir.x;
const vfloat4 tNearY = (vfloat4::load((float*)((const char*)&node->lower_x+ray.nearY)) - ray.org.y) * ray.rdir.y;
@@ -506,7 +430,7 @@ namespace embree
const vfloat4 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool4 vmask = asInt(tNear) > asInt(tFar);
const size_t mask = movemask(vmask) ^ ((1<<4)-1);
-#elif defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#elif defined(__AVX512F__) // SKX
const vfloat4 tNear = maxi(tNearX,tNearY,tNearZ,ray.tnear);
const vfloat4 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool4 vmask = asInt(tNear) <= asInt(tFar);
@@ -524,25 +448,15 @@ namespace embree
#if defined(__AVX__)
template<>
- __forceinline size_t intersectNode<8,8>(const typename BVH8::AABBNode* node, const TravRay<8,8,false>& ray, vfloat8& dist)
+ __forceinline size_t intersectNode<8>(const typename BVH8::AABBNode* node, const TravRay<8,false>& ray, vfloat8& dist)
{
-#if defined(__AVX2__)
-#if defined(__aarch64__)
- const vfloat8 tNearX = madd(vfloat8::load((float*)((const char*)&node->lower_x+ray.nearX)), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat8 tNearY = madd(vfloat8::load((float*)((const char*)&node->lower_x+ray.nearY)), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat8 tNearZ = madd(vfloat8::load((float*)((const char*)&node->lower_x+ray.nearZ)), ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat8 tFarX = madd(vfloat8::load((float*)((const char*)&node->lower_x+ray.farX )), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat8 tFarY = madd(vfloat8::load((float*)((const char*)&node->lower_x+ray.farY )), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat8 tFarZ = madd(vfloat8::load((float*)((const char*)&node->lower_x+ray.farZ )), ray.rdir.z, ray.neg_org_rdir.z);
-#else
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat8 tNearX = msub(vfloat8::load((float*)((const char*)&node->lower_x+ray.nearX)), ray.rdir.x, ray.org_rdir.x);
const vfloat8 tNearY = msub(vfloat8::load((float*)((const char*)&node->lower_x+ray.nearY)), ray.rdir.y, ray.org_rdir.y);
const vfloat8 tNearZ = msub(vfloat8::load((float*)((const char*)&node->lower_x+ray.nearZ)), ray.rdir.z, ray.org_rdir.z);
const vfloat8 tFarX = msub(vfloat8::load((float*)((const char*)&node->lower_x+ray.farX )), ray.rdir.x, ray.org_rdir.x);
const vfloat8 tFarY = msub(vfloat8::load((float*)((const char*)&node->lower_x+ray.farY )), ray.rdir.y, ray.org_rdir.y);
const vfloat8 tFarZ = msub(vfloat8::load((float*)((const char*)&node->lower_x+ray.farZ )), ray.rdir.z, ray.org_rdir.z);
-#endif
-
#else
const vfloat8 tNearX = (vfloat8::load((float*)((const char*)&node->lower_x+ray.nearX)) - ray.org.x) * ray.rdir.x;
const vfloat8 tNearY = (vfloat8::load((float*)((const char*)&node->lower_x+ray.nearY)) - ray.org.y) * ray.rdir.y;
@@ -557,7 +471,7 @@ namespace embree
const vfloat8 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool8 vmask = asInt(tNear) > asInt(tFar);
const size_t mask = movemask(vmask) ^ ((1<<8)-1);
-#elif defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#elif defined(__AVX512F__) // SKX
const vfloat8 tNear = maxi(tNearX,tNearY,tNearZ,ray.tnear);
const vfloat8 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool8 vmask = asInt(tNear) <= asInt(tFar);
@@ -574,52 +488,12 @@ namespace embree
#endif
-#if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
-
- template<>
- __forceinline size_t intersectNode<4,16>(const typename BVH4::AABBNode* node, const TravRay<4,16,false>& ray, vfloat16& dist)
- {
- const vfloat16 tNearX = msub(vfloat16(*(vfloat4*)((const char*)&node->lower_x+ray.nearX)), ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tNearY = msub(vfloat16(*(vfloat4*)((const char*)&node->lower_x+ray.nearY)), ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tNearZ = msub(vfloat16(*(vfloat4*)((const char*)&node->lower_x+ray.nearZ)), ray.rdir.z, ray.org_rdir.z);
- const vfloat16 tFarX = msub(vfloat16(*(vfloat4*)((const char*)&node->lower_x+ray.farX )), ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tFarY = msub(vfloat16(*(vfloat4*)((const char*)&node->lower_x+ray.farY )), ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tFarZ = msub(vfloat16(*(vfloat4*)((const char*)&node->lower_x+ray.farZ )), ray.rdir.z, ray.org_rdir.z);
- const vfloat16 tNear = max(tNearX,tNearY,tNearZ,ray.tnear);
- const vfloat16 tFar = min(tFarX ,tFarY ,tFarZ ,ray.tfar);
- const vbool16 vmask = le(vbool16(0xf),tNear,tFar);
- const size_t mask = movemask(vmask);
- dist = tNear;
- return mask;
- }
-
- template<>
- __forceinline size_t intersectNode<8,16>(const typename BVH8::AABBNode* node, const TravRay<8,16,false>& ray, vfloat16& dist)
- {
- const vllong8 invalid((size_t)BVH8::emptyNode);
- const vboold8 m_valid(invalid != vllong8::loadu(node->children));
- const vfloat16 bminmaxX = permute(vfloat16::load((const float*)&node->lower_x), ray.permX);
- const vfloat16 bminmaxY = permute(vfloat16::load((const float*)&node->lower_y), ray.permY);
- const vfloat16 bminmaxZ = permute(vfloat16::load((const float*)&node->lower_z), ray.permZ);
- const vfloat16 tNearFarX = msub(bminmaxX, ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tNearFarY = msub(bminmaxY, ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tNearFarZ = msub(bminmaxZ, ray.rdir.z, ray.org_rdir.z);
- const vfloat16 tNear = max(tNearFarX, tNearFarY, tNearFarZ, ray.tnear);
- const vfloat16 tFar = min(tNearFarX, tNearFarY, tNearFarZ, ray.tfar);
- const vbool16 vmask = le(vboolf16(m_valid),tNear,align_shift_right<8>(tFar, tFar));
- const size_t mask = movemask(vmask);
- dist = tNear;
- return mask;
- }
-
-#endif
-
//////////////////////////////////////////////////////////////////////////////////////
// Robust AABBNode intersection
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx>
- __forceinline size_t intersectNodeRobust(const typename BVHN<N>::AABBNode* node, const TravRay<N,Nx,true>& ray, vfloat<Nx>& dist)
+ template<int N>
+ __forceinline size_t intersectNodeRobust(const typename BVHN<N>::AABBNode* node, const TravRay<N,true>& ray, vfloat<N>& dist)
{
const vfloat<N> tNearX = (vfloat<N>::load((float*)((const char*)&node->lower_x+ray.nearX)) - ray.org.x) * ray.rdir_near.x;
const vfloat<N> tNearY = (vfloat<N>::load((float*)((const char*)&node->lower_x+ray.nearY)) - ray.org.y) * ray.rdir_near.y;
@@ -635,50 +509,12 @@ namespace embree
return mask;
}
-#if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
-
- template<>
- __forceinline size_t intersectNodeRobust<4,16>(const typename BVHN<4>::AABBNode* node, const TravRay<4,16,true>& ray, vfloat<16>& dist)
- {
- const vfloat16 tNearX = (vfloat16(*(vfloat<4>*)((const char*)&node->lower_x+ray.nearX)) - ray.org.x) * ray.rdir_near.x;
- const vfloat16 tNearY = (vfloat16(*(vfloat<4>*)((const char*)&node->lower_x+ray.nearY)) - ray.org.y) * ray.rdir_near.y;
- const vfloat16 tNearZ = (vfloat16(*(vfloat<4>*)((const char*)&node->lower_x+ray.nearZ)) - ray.org.z) * ray.rdir_near.z;
- const vfloat16 tFarX = (vfloat16(*(vfloat<4>*)((const char*)&node->lower_x+ray.farX )) - ray.org.x) * ray.rdir_far.x;
- const vfloat16 tFarY = (vfloat16(*(vfloat<4>*)((const char*)&node->lower_x+ray.farY )) - ray.org.y) * ray.rdir_far.y;
- const vfloat16 tFarZ = (vfloat16(*(vfloat<4>*)((const char*)&node->lower_x+ray.farZ )) - ray.org.z) * ray.rdir_far.z;
- const vfloat16 tNear = max(tNearX,tNearY,tNearZ,ray.tnear);
- const vfloat16 tFar = min(tFarX ,tFarY ,tFarZ ,ray.tfar);
- const vbool16 vmask = le((1 << 4)-1,tNear,tFar);
- const size_t mask = movemask(vmask);
- dist = tNear;
- return mask;
- }
-
- template<>
- __forceinline size_t intersectNodeRobust<8,16>(const typename BVHN<8>::AABBNode* node, const TravRay<8,16,true>& ray, vfloat<16>& dist)
- {
- const vfloat16 tNearX = (vfloat16(*(vfloat<8>*)((const char*)&node->lower_x+ray.nearX)) - ray.org.x) * ray.rdir_near.x;
- const vfloat16 tNearY = (vfloat16(*(vfloat<8>*)((const char*)&node->lower_x+ray.nearY)) - ray.org.y) * ray.rdir_near.y;
- const vfloat16 tNearZ = (vfloat16(*(vfloat<8>*)((const char*)&node->lower_x+ray.nearZ)) - ray.org.z) * ray.rdir_near.z;
- const vfloat16 tFarX = (vfloat16(*(vfloat<8>*)((const char*)&node->lower_x+ray.farX )) - ray.org.x) * ray.rdir_far.x;
- const vfloat16 tFarY = (vfloat16(*(vfloat<8>*)((const char*)&node->lower_x+ray.farY )) - ray.org.y) * ray.rdir_far.y;
- const vfloat16 tFarZ = (vfloat16(*(vfloat<8>*)((const char*)&node->lower_x+ray.farZ )) - ray.org.z) * ray.rdir_far.z;
- const vfloat16 tNear = max(tNearX,tNearY,tNearZ,ray.tnear);
- const vfloat16 tFar = min(tFarX ,tFarY ,tFarZ ,ray.tfar);
- const vbool16 vmask = le((1 << 8)-1,tNear,tFar);
- const size_t mask = movemask(vmask);
- dist = tNear;
- return mask;
- }
-
-#endif
-
//////////////////////////////////////////////////////////////////////////////////////
// Fast AABBNodeMB intersection
//////////////////////////////////////////////////////////////////////////////////////
template<int N>
- __forceinline size_t intersectNode(const typename BVHN<N>::AABBNodeMB* node, const TravRay<N,N,false>& ray, const float time, vfloat<N>& dist)
+ __forceinline size_t intersectNode(const typename BVHN<N>::AABBNodeMB* node, const TravRay<N,false>& ray, const float time, vfloat<N>& dist)
{
const vfloat<N>* pNearX = (const vfloat<N>*)((const char*)&node->lower_x+ray.nearX);
const vfloat<N>* pNearY = (const vfloat<N>*)((const char*)&node->lower_x+ray.nearY);
@@ -686,22 +522,13 @@ namespace embree
const vfloat<N>* pFarX = (const vfloat<N>*)((const char*)&node->lower_x+ray.farX);
const vfloat<N>* pFarY = (const vfloat<N>*)((const char*)&node->lower_x+ray.farY);
const vfloat<N>* pFarZ = (const vfloat<N>*)((const char*)&node->lower_x+ray.farZ);
-#if defined(__FMA_X4__)
-#if defined(__aarch64__)
- const vfloat<N> tNearX = madd(madd(time,pNearX[6],vfloat<N>(pNearX[0])), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<N> tNearY = madd(madd(time,pNearY[6],vfloat<N>(pNearY[0])), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<N> tNearZ = madd(madd(time,pNearZ[6],vfloat<N>(pNearZ[0])), ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<N> tFarX = madd(madd(time,pFarX [6],vfloat<N>(pFarX [0])), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<N> tFarY = madd(madd(time,pFarY [6],vfloat<N>(pFarY [0])), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<N> tFarZ = madd(madd(time,pFarZ [6],vfloat<N>(pFarZ [0])), ray.rdir.z, ray.neg_org_rdir.z);
-#else
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat<N> tNearX = msub(madd(time,pNearX[6],vfloat<N>(pNearX[0])), ray.rdir.x, ray.org_rdir.x);
const vfloat<N> tNearY = msub(madd(time,pNearY[6],vfloat<N>(pNearY[0])), ray.rdir.y, ray.org_rdir.y);
const vfloat<N> tNearZ = msub(madd(time,pNearZ[6],vfloat<N>(pNearZ[0])), ray.rdir.z, ray.org_rdir.z);
const vfloat<N> tFarX = msub(madd(time,pFarX [6],vfloat<N>(pFarX [0])), ray.rdir.x, ray.org_rdir.x);
const vfloat<N> tFarY = msub(madd(time,pFarY [6],vfloat<N>(pFarY [0])), ray.rdir.y, ray.org_rdir.y);
const vfloat<N> tFarZ = msub(madd(time,pFarZ [6],vfloat<N>(pFarZ [0])), ray.rdir.z, ray.org_rdir.z);
-#endif
#else
const vfloat<N> tNearX = (madd(time,pNearX[6],vfloat<N>(pNearX[0])) - ray.org.x) * ray.rdir.x;
const vfloat<N> tNearY = (madd(time,pNearY[6],vfloat<N>(pNearY[0])) - ray.org.y) * ray.rdir.y;
@@ -710,12 +537,12 @@ namespace embree
const vfloat<N> tFarY = (madd(time,pFarY [6],vfloat<N>(pFarY [0])) - ray.org.y) * ray.rdir.y;
const vfloat<N> tFarZ = (madd(time,pFarZ [6],vfloat<N>(pFarZ [0])) - ray.org.z) * ray.rdir.z;
#endif
-#if defined(__FMA_X4__) && !defined(__AVX512F__) // HSW
+#if defined(__AVX2__) && !defined(__AVX512F__) // HSW
const vfloat<N> tNear = maxi(tNearX,tNearY,tNearZ,ray.tnear);
const vfloat<N> tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool<N> vmask = asInt(tNear) > asInt(tFar);
const size_t mask = movemask(vmask) ^ ((1<<N)-1);
-#elif defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#elif defined(__AVX512F__) // SKX
const vfloat<N> tNear = maxi(tNearX,tNearY,tNearZ,ray.tnear);
const vfloat<N> tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool<N> vmask = asInt(tNear) <= asInt(tFar);
@@ -735,7 +562,7 @@ namespace embree
//////////////////////////////////////////////////////////////////////////////////////
template<int N>
- __forceinline size_t intersectNodeRobust(const typename BVHN<N>::AABBNodeMB* node, const TravRay<N,N,true>& ray, const float time, vfloat<N>& dist)
+ __forceinline size_t intersectNodeRobust(const typename BVHN<N>::AABBNodeMB* node, const TravRay<N,true>& ray, const float time, vfloat<N>& dist)
{
const vfloat<N>* pNearX = (const vfloat<N>*)((const char*)&node->lower_x+ray.nearX);
const vfloat<N>* pNearY = (const vfloat<N>*)((const char*)&node->lower_x+ray.nearY);
@@ -761,7 +588,7 @@ namespace embree
//////////////////////////////////////////////////////////////////////////////////////
template<int N>
- __forceinline size_t intersectNodeMB4D(const typename BVHN<N>::NodeRef ref, const TravRay<N,N,false>& ray, const float time, vfloat<N>& dist)
+ __forceinline size_t intersectNodeMB4D(const typename BVHN<N>::NodeRef ref, const TravRay<N,false>& ray, const float time, vfloat<N>& dist)
{
const typename BVHN<N>::AABBNodeMB* node = ref.getAABBNodeMB();
@@ -771,22 +598,13 @@ namespace embree
const vfloat<N>* pFarX = (const vfloat<N>*)((const char*)&node->lower_x+ray.farX);
const vfloat<N>* pFarY = (const vfloat<N>*)((const char*)&node->lower_x+ray.farY);
const vfloat<N>* pFarZ = (const vfloat<N>*)((const char*)&node->lower_x+ray.farZ);
-#if defined (__FMA_X4__)
-#if defined(__aarch64__)
- const vfloat<N> tNearX = madd(madd(time,pNearX[6],vfloat<N>(pNearX[0])), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<N> tNearY = madd(madd(time,pNearY[6],vfloat<N>(pNearY[0])), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<N> tNearZ = madd(madd(time,pNearZ[6],vfloat<N>(pNearZ[0])), ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<N> tFarX = madd(madd(time,pFarX [6],vfloat<N>(pFarX [0])), ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<N> tFarY = madd(madd(time,pFarY [6],vfloat<N>(pFarY [0])), ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<N> tFarZ = madd(madd(time,pFarZ [6],vfloat<N>(pFarZ [0])), ray.rdir.z, ray.neg_org_rdir.z);
-#else
+#if defined (__AVX2__) || defined(__ARM_NEON)
const vfloat<N> tNearX = msub(madd(time,pNearX[6],vfloat<N>(pNearX[0])), ray.rdir.x, ray.org_rdir.x);
const vfloat<N> tNearY = msub(madd(time,pNearY[6],vfloat<N>(pNearY[0])), ray.rdir.y, ray.org_rdir.y);
const vfloat<N> tNearZ = msub(madd(time,pNearZ[6],vfloat<N>(pNearZ[0])), ray.rdir.z, ray.org_rdir.z);
const vfloat<N> tFarX = msub(madd(time,pFarX [6],vfloat<N>(pFarX [0])), ray.rdir.x, ray.org_rdir.x);
const vfloat<N> tFarY = msub(madd(time,pFarY [6],vfloat<N>(pFarY [0])), ray.rdir.y, ray.org_rdir.y);
const vfloat<N> tFarZ = msub(madd(time,pFarZ [6],vfloat<N>(pFarZ [0])), ray.rdir.z, ray.org_rdir.z);
-#endif
#else
const vfloat<N> tNearX = (madd(time,pNearX[6],vfloat<N>(pNearX[0])) - ray.org.x) * ray.rdir.x;
const vfloat<N> tNearY = (madd(time,pNearY[6],vfloat<N>(pNearY[0])) - ray.org.y) * ray.rdir.y;
@@ -795,7 +613,7 @@ namespace embree
const vfloat<N> tFarY = (madd(time,pFarY [6],vfloat<N>(pFarY [0])) - ray.org.y) * ray.rdir.y;
const vfloat<N> tFarZ = (madd(time,pFarZ [6],vfloat<N>(pFarZ [0])) - ray.org.z) * ray.rdir.z;
#endif
-#if defined(__FMA_X4__) && !defined(__AVX512F__)
+#if defined(__AVX2__) && !defined(__AVX512F__)
const vfloat<N> tNear = maxi(maxi(tNearX,tNearY),maxi(tNearZ,ray.tnear));
const vfloat<N> tFar = mini(mini(tFarX ,tFarY ),mini(tFarZ ,ray.tfar ));
#else
@@ -817,7 +635,7 @@ namespace embree
//////////////////////////////////////////////////////////////////////////////////////
template<int N>
- __forceinline size_t intersectNodeMB4DRobust(const typename BVHN<N>::NodeRef ref, const TravRay<N,N,true>& ray, const float time, vfloat<N>& dist)
+ __forceinline size_t intersectNodeMB4DRobust(const typename BVHN<N>::NodeRef ref, const TravRay<N,true>& ray, const float time, vfloat<N>& dist)
{
const typename BVHN<N>::AABBNodeMB* node = ref.getAABBNodeMB();
@@ -849,11 +667,11 @@ namespace embree
// Fast QuantizedBaseNode intersection
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx, bool robust>
- __forceinline size_t intersectNode(const typename BVHN<N>::QuantizedBaseNode* node, const TravRay<N,Nx,robust>& ray, vfloat<Nx>& dist);
+ template<int N, bool robust>
+ __forceinline size_t intersectNode(const typename BVHN<N>::QuantizedBaseNode* node, const TravRay<N,robust>& ray, vfloat<N>& dist);
template<>
- __forceinline size_t intersectNode<4,4>(const typename BVH4::QuantizedBaseNode* node, const TravRay<4,4,false>& ray, vfloat4& dist)
+ __forceinline size_t intersectNode<4>(const typename BVH4::QuantizedBaseNode* node, const TravRay<4,false>& ray, vfloat4& dist)
{
const size_t mvalid = movemask(node->validMask());
const vfloat4 start_x(node->start.x);
@@ -869,22 +687,13 @@ namespace embree
const vfloat4 lower_z = madd(node->dequantize<4>(ray.nearZ >> 2),scale_z,start_z);
const vfloat4 upper_z = madd(node->dequantize<4>(ray.farZ >> 2),scale_z,start_z);
-#if defined(__FMA_X4__)
-#if defined(__aarch64__)
- const vfloat4 tNearX = madd(lower_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat4 tNearY = madd(lower_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat4 tNearZ = madd(lower_z, ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat4 tFarX = madd(upper_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat4 tFarY = madd(upper_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat4 tFarZ = madd(upper_z, ray.rdir.z, ray.neg_org_rdir.z);
-#else
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat4 tNearX = msub(lower_x, ray.rdir.x, ray.org_rdir.x);
const vfloat4 tNearY = msub(lower_y, ray.rdir.y, ray.org_rdir.y);
const vfloat4 tNearZ = msub(lower_z, ray.rdir.z, ray.org_rdir.z);
const vfloat4 tFarX = msub(upper_x, ray.rdir.x, ray.org_rdir.x);
const vfloat4 tFarY = msub(upper_y, ray.rdir.y, ray.org_rdir.y);
const vfloat4 tFarZ = msub(upper_z, ray.rdir.z, ray.org_rdir.z);
-#endif
#else
const vfloat4 tNearX = (lower_x - ray.org.x) * ray.rdir.x;
const vfloat4 tNearY = (lower_y - ray.org.y) * ray.rdir.y;
@@ -894,12 +703,12 @@ namespace embree
const vfloat4 tFarZ = (upper_z - ray.org.z) * ray.rdir.z;
#endif
-#if (defined(__aarch64__) && defined(BUILD_IOS)) || defined(__SSE4_1__) && !defined(__AVX512F__) // up to HSW
+#if defined(__SSE4_1__) && !defined(__AVX512F__) // up to HSW
const vfloat4 tNear = maxi(tNearX,tNearY,tNearZ,ray.tnear);
const vfloat4 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool4 vmask = asInt(tNear) > asInt(tFar);
const size_t mask = movemask(vmask) ^ ((1<<4)-1);
-#elif defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#elif defined(__AVX512F__) // SKX
const vfloat4 tNear = maxi(tNearX,tNearY,tNearZ,ray.tnear);
const vfloat4 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool4 vmask = asInt(tNear) <= asInt(tFar);
@@ -915,7 +724,7 @@ namespace embree
}
template<>
- __forceinline size_t intersectNode<4,4>(const typename BVH4::QuantizedBaseNode* node, const TravRay<4,4,true>& ray, vfloat4& dist)
+ __forceinline size_t intersectNode<4>(const typename BVH4::QuantizedBaseNode* node, const TravRay<4,true>& ray, vfloat4& dist)
{
const size_t mvalid = movemask(node->validMask());
const vfloat4 start_x(node->start.x);
@@ -950,7 +759,7 @@ namespace embree
#if defined(__AVX__)
template<>
- __forceinline size_t intersectNode<8,8>(const typename BVH8::QuantizedBaseNode* node, const TravRay<8,8,false>& ray, vfloat8& dist)
+ __forceinline size_t intersectNode<8>(const typename BVH8::QuantizedBaseNode* node, const TravRay<8,false>& ray, vfloat8& dist)
{
const size_t mvalid = movemask(node->validMask());
const vfloat8 start_x(node->start.x);
@@ -966,22 +775,13 @@ namespace embree
const vfloat8 lower_z = madd(node->dequantize<8>(ray.nearZ >> 2),scale_z,start_z);
const vfloat8 upper_z = madd(node->dequantize<8>(ray.farZ >> 2),scale_z,start_z);
-#if defined(__AVX2__)
-#if defined(__aarch64__)
- const vfloat8 tNearX = madd(lower_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat8 tNearY = madd(lower_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat8 tNearZ = madd(lower_z, ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat8 tFarX = madd(upper_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat8 tFarY = madd(upper_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat8 tFarZ = madd(upper_z, ray.rdir.z, ray.neg_org_rdir.z);
-#else
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat8 tNearX = msub(lower_x, ray.rdir.x, ray.org_rdir.x);
const vfloat8 tNearY = msub(lower_y, ray.rdir.y, ray.org_rdir.y);
const vfloat8 tNearZ = msub(lower_z, ray.rdir.z, ray.org_rdir.z);
const vfloat8 tFarX = msub(upper_x, ray.rdir.x, ray.org_rdir.x);
const vfloat8 tFarY = msub(upper_y, ray.rdir.y, ray.org_rdir.y);
const vfloat8 tFarZ = msub(upper_z, ray.rdir.z, ray.org_rdir.z);
-#endif
#else
const vfloat8 tNearX = (lower_x - ray.org.x) * ray.rdir.x;
const vfloat8 tNearY = (lower_y - ray.org.y) * ray.rdir.y;
@@ -996,7 +796,7 @@ namespace embree
const vfloat8 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool8 vmask = asInt(tNear) > asInt(tFar);
const size_t mask = movemask(vmask) ^ ((1<<8)-1);
-#elif defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#elif defined(__AVX512F__) // SKX
const vfloat8 tNear = maxi(tNearX,tNearY,tNearZ,ray.tnear);
const vfloat8 tFar = mini(tFarX ,tFarY ,tFarZ ,ray.tfar);
const vbool8 vmask = asInt(tNear) <= asInt(tFar);
@@ -1012,7 +812,7 @@ namespace embree
}
template<>
- __forceinline size_t intersectNode<8,8>(const typename BVH8::QuantizedBaseNode* node, const TravRay<8,8,true>& ray, vfloat8& dist)
+ __forceinline size_t intersectNode<8>(const typename BVH8::QuantizedBaseNode* node, const TravRay<8,true>& ray, vfloat8& dist)
{
const size_t mvalid = movemask(node->validMask());
const vfloat8 start_x(node->start.x);
@@ -1047,113 +847,8 @@ namespace embree
#endif
-#if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
-
- template<>
- __forceinline size_t intersectNode<4,16>(const typename BVH4::QuantizedBaseNode* node, const TravRay<4,16,false>& ray, vfloat16& dist)
- {
- const size_t mvalid = movemask(node->validMask());
- const vfloat16 start_x(node->start.x);
- const vfloat16 scale_x(node->scale.x);
- const vfloat16 lower_x = madd(vfloat16(node->dequantize<4>(ray.nearX >> 2)),scale_x,start_x);
- const vfloat16 upper_x = madd(vfloat16(node->dequantize<4>(ray.farX >> 2)),scale_x,start_x);
- const vfloat16 start_y(node->start.y);
- const vfloat16 scale_y(node->scale.y);
- const vfloat16 lower_y = madd(vfloat16(node->dequantize<4>(ray.nearY >> 2)),scale_y,start_y);
- const vfloat16 upper_y = madd(vfloat16(node->dequantize<4>(ray.farY >> 2)),scale_y,start_y);
- const vfloat16 start_z(node->start.z);
- const vfloat16 scale_z(node->scale.z);
- const vfloat16 lower_z = madd(vfloat16(node->dequantize<4>(ray.nearZ >> 2)),scale_z,start_z);
- const vfloat16 upper_z = madd(vfloat16(node->dequantize<4>(ray.farZ >> 2)),scale_z,start_z);
-
- const vfloat16 tNearX = msub(lower_x, ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tNearY = msub(lower_y, ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tNearZ = msub(lower_z, ray.rdir.z, ray.org_rdir.z);
- const vfloat16 tFarX = msub(upper_x, ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tFarY = msub(upper_y, ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tFarZ = msub(upper_z, ray.rdir.z, ray.org_rdir.z);
- const vfloat16 tNear = max(tNearX,tNearY,tNearZ,ray.tnear);
- const vfloat16 tFar = min(tFarX ,tFarY ,tFarZ ,ray.tfar);
- const vbool16 vmask = le(vbool16(0xf),tNear,tFar);
- const size_t mask = movemask(vmask) & mvalid;
- dist = tNear;
- return mask;
- }
-
- template<>
- __forceinline size_t intersectNode<4,16>(const typename BVH4::QuantizedBaseNode* node, const TravRay<4,16,true>& ray, vfloat16& dist)
- {
- const size_t mvalid = movemask(node->validMask());
- const vfloat16 start_x(node->start.x);
- const vfloat16 scale_x(node->scale.x);
- const vfloat16 lower_x = madd(vfloat16(node->dequantize<4>(ray.nearX >> 2)),scale_x,start_x);
- const vfloat16 upper_x = madd(vfloat16(node->dequantize<4>(ray.farX >> 2)),scale_x,start_x);
- const vfloat16 start_y(node->start.y);
- const vfloat16 scale_y(node->scale.y);
- const vfloat16 lower_y = madd(vfloat16(node->dequantize<4>(ray.nearY >> 2)),scale_y,start_y);
- const vfloat16 upper_y = madd(vfloat16(node->dequantize<4>(ray.farY >> 2)),scale_y,start_y);
- const vfloat16 start_z(node->start.z);
- const vfloat16 scale_z(node->scale.z);
- const vfloat16 lower_z = madd(vfloat16(node->dequantize<4>(ray.nearZ >> 2)),scale_z,start_z);
- const vfloat16 upper_z = madd(vfloat16(node->dequantize<4>(ray.farZ >> 2)),scale_z,start_z);
-
- const vfloat16 tNearX = (lower_x - ray.org.x) * ray.rdir_near.x;
- const vfloat16 tNearY = (lower_y - ray.org.y) * ray.rdir_near.y;
- const vfloat16 tNearZ = (lower_z - ray.org.z) * ray.rdir_near.z;
- const vfloat16 tFarX = (upper_x - ray.org.x) * ray.rdir_far.x;
- const vfloat16 tFarY = (upper_y - ray.org.y) * ray.rdir_far.y;
- const vfloat16 tFarZ = (upper_z - ray.org.z) * ray.rdir_far.z;
-
- const vfloat16 tNear = max(tNearX,tNearY,tNearZ,ray.tnear);
- const vfloat16 tFar = min(tFarX ,tFarY ,tFarZ ,ray.tfar);
- const vbool16 vmask = le(vbool16(0xf),tNear,tFar);
- const size_t mask = movemask(vmask) & mvalid;
- dist = tNear;
- return mask;
- }
-
- template<>
- __forceinline size_t intersectNode<8,16>(const typename BVH8::QuantizedBaseNode* node, const TravRay<8,16,false>& ray, vfloat16& dist)
- {
- const vbool16 m_valid(node->validMask16());
- const vfloat16 bminmaxX = node->dequantizeLowerUpperX(ray.permX);
- const vfloat16 bminmaxY = node->dequantizeLowerUpperY(ray.permY);
- const vfloat16 bminmaxZ = node->dequantizeLowerUpperZ(ray.permZ);
- const vfloat16 tNearFarX = msub(bminmaxX, ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tNearFarY = msub(bminmaxY, ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tNearFarZ = msub(bminmaxZ, ray.rdir.z, ray.org_rdir.z);
- const vfloat16 tNear = max(tNearFarX, tNearFarY, tNearFarZ, ray.tnear);
- const vfloat16 tFar = min(tNearFarX, tNearFarY, tNearFarZ, ray.tfar);
- const vbool16 vmask = le(m_valid,tNear,align_shift_right<8>(tFar, tFar));
- const size_t mask = movemask(vmask);
- dist = tNear;
- return mask;
- }
-
- template<>
- __forceinline size_t intersectNode<8,16>(const typename BVH8::QuantizedBaseNode* node, const TravRay<8,16,true>& ray, vfloat16& dist)
- {
- const vbool16 m_valid(node->validMask16());
- const vfloat16 bminmaxX = node->dequantizeLowerUpperX(ray.permX);
- const vfloat16 bminmaxY = node->dequantizeLowerUpperY(ray.permY);
- const vfloat16 bminmaxZ = node->dequantizeLowerUpperZ(ray.permZ);
- const vfloat16 tNearFarX = (bminmaxX - ray.org.x) * ray.rdir_far.x; // FIXME: this is not conservative !!!!!!!!!
- const vfloat16 tNearFarY = (bminmaxY - ray.org.y) * ray.rdir_far.y;
- const vfloat16 tNearFarZ = (bminmaxZ - ray.org.z) * ray.rdir_far.z;
- const vfloat16 tNear = max(tNearFarX, tNearFarY, tNearFarZ, ray.tnear);
- const vfloat16 tFar = min(tNearFarX, tNearFarY, tNearFarZ, ray.tfar);
- const vbool16 vmask = le(m_valid,tNear,align_shift_right<8>(tFar, tFar));
- const size_t mask = movemask(vmask);
- dist = tNear;
- return mask;
- }
-
-
-#endif
-
-
- template<int N, int Nx>
- __forceinline size_t intersectNode(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,Nx,false>& ray, const float time, vfloat<N>& dist)
+ template<int N>
+ __forceinline size_t intersectNode(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,false>& ray, const float time, vfloat<N>& dist)
{
const vboolf<N> mvalid = node->validMask();
const vfloat<N> lower_x = node->dequantizeLowerX(time);
@@ -1162,22 +857,13 @@ namespace embree
const vfloat<N> upper_y = node->dequantizeUpperY(time);
const vfloat<N> lower_z = node->dequantizeLowerZ(time);
const vfloat<N> upper_z = node->dequantizeUpperZ(time);
-#if defined(__FMA_X4__)
-#if defined(__aarch64__)
- const vfloat<N> tNearX = madd(lower_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<N> tNearY = madd(lower_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<N> tNearZ = madd(lower_z, ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<N> tFarX = madd(upper_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<N> tFarY = madd(upper_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<N> tFarZ = madd(upper_z, ray.rdir.z, ray.neg_org_rdir.z);
-#else
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat<N> tNearX = msub(lower_x, ray.rdir.x, ray.org_rdir.x);
const vfloat<N> tNearY = msub(lower_y, ray.rdir.y, ray.org_rdir.y);
const vfloat<N> tNearZ = msub(lower_z, ray.rdir.z, ray.org_rdir.z);
const vfloat<N> tFarX = msub(upper_x, ray.rdir.x, ray.org_rdir.x);
const vfloat<N> tFarY = msub(upper_y, ray.rdir.y, ray.org_rdir.y);
const vfloat<N> tFarZ = msub(upper_z, ray.rdir.z, ray.org_rdir.z);
-#endif
#else
const vfloat<N> tNearX = (lower_x - ray.org.x) * ray.rdir.x;
const vfloat<N> tNearY = (lower_y - ray.org.y) * ray.rdir.y;
@@ -1195,7 +881,7 @@ namespace embree
const vfloat<N> tmaxZ = maxi(tNearZ,tFarZ);
const vfloat<N> tNear = maxi(tminX,tminY,tminZ,ray.tnear);
const vfloat<N> tFar = mini(tmaxX,tmaxY,tmaxZ,ray.tfar);
-#if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#if defined(__AVX512F__) // SKX
const vbool<N> vmask = le(mvalid,asInt(tNear),asInt(tFar));
#else
const vbool<N> vmask = (asInt(tNear) <= asInt(tFar)) & mvalid;
@@ -1205,8 +891,8 @@ namespace embree
return mask;
}
- template<int N, int Nx>
- __forceinline size_t intersectNode(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,Nx,true>& ray, const float time, vfloat<N>& dist)
+ template<int N>
+ __forceinline size_t intersectNode(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,true>& ray, const float time, vfloat<N>& dist)
{
const vboolf<N> mvalid = node->validMask();
const vfloat<N> lower_x = node->dequantizeLowerX(time);
@@ -1230,7 +916,7 @@ namespace embree
const vfloat<N> tmaxZ = maxi(tNearZ,tFarZ);
const vfloat<N> tNear = maxi(tminX,tminY,tminZ,ray.tnear);
const vfloat<N> tFar = mini(tmaxX,tmaxY,tmaxZ,ray.tfar);
-#if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#if defined(__AVX512F__) // SKX
const vbool<N> vmask = le(mvalid,asInt(tNear),asInt(tFar));
#else
const vbool<N> vmask = (asInt(tNear) <= asInt(tFar)) & mvalid;
@@ -1240,83 +926,12 @@ namespace embree
return mask;
}
-
-#if defined(__AVX512ER__)
- // for KNL
- template<>
- __forceinline size_t intersectNode<4,16>(const typename BVHN<4>::QuantizedBaseNodeMB* node, const TravRay<4,16,false>& ray, const float time, vfloat<4>& dist)
- {
- const size_t mvalid = movemask(node->validMask());
- const vfloat16 lower_x = node->dequantizeLowerX(time);
- const vfloat16 upper_x = node->dequantizeUpperX(time);
- const vfloat16 lower_y = node->dequantizeLowerY(time);
- const vfloat16 upper_y = node->dequantizeUpperY(time);
- const vfloat16 lower_z = node->dequantizeLowerZ(time);
- const vfloat16 upper_z = node->dequantizeUpperZ(time);
-
- const vfloat16 tNearX = msub(lower_x, ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tNearY = msub(lower_y, ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tNearZ = msub(lower_z, ray.rdir.z, ray.org_rdir.z);
- const vfloat16 tFarX = msub(upper_x, ray.rdir.x, ray.org_rdir.x);
- const vfloat16 tFarY = msub(upper_y, ray.rdir.y, ray.org_rdir.y);
- const vfloat16 tFarZ = msub(upper_z, ray.rdir.z, ray.org_rdir.z);
-
- const vfloat16 tminX = min(tNearX,tFarX);
- const vfloat16 tmaxX = max(tNearX,tFarX);
- const vfloat16 tminY = min(tNearY,tFarY);
- const vfloat16 tmaxY = max(tNearY,tFarY);
- const vfloat16 tminZ = min(tNearZ,tFarZ);
- const vfloat16 tmaxZ = max(tNearZ,tFarZ);
- const vfloat16 tNear = max(tminX,tminY,tminZ,ray.tnear);
- const vfloat16 tFar = min(tmaxX,tmaxY,tmaxZ,ray.tfar );
- const vbool16 vmask = tNear <= tFar;
- const size_t mask = movemask(vmask) & mvalid;
- dist = extractN<4,0>(tNear);
- return mask;
- }
-
-
- // for KNL
- template<>
- __forceinline size_t intersectNode<4,16>(const typename BVHN<4>::QuantizedBaseNodeMB* node, const TravRay<4,16,true>& ray, const float time, vfloat<4>& dist)
- {
- const size_t mvalid = movemask(node->validMask());
- const vfloat16 lower_x = node->dequantizeLowerX(time);
- const vfloat16 upper_x = node->dequantizeUpperX(time);
- const vfloat16 lower_y = node->dequantizeLowerY(time);
- const vfloat16 upper_y = node->dequantizeUpperY(time);
- const vfloat16 lower_z = node->dequantizeLowerZ(time);
- const vfloat16 upper_z = node->dequantizeUpperZ(time);
-
- const vfloat16 tNearX = (lower_x - ray.org.x) * ray.rdir_near.x;
- const vfloat16 tNearY = (lower_y - ray.org.y) * ray.rdir_near.y;
- const vfloat16 tNearZ = (lower_z - ray.org.z) * ray.rdir_near.z;
- const vfloat16 tFarX = (upper_x - ray.org.x) * ray.rdir_far.x;
- const vfloat16 tFarY = (upper_y - ray.org.y) * ray.rdir_far.y;
- const vfloat16 tFarZ = (upper_z - ray.org.z) * ray.rdir_far.z;
-
- const vfloat16 tminX = min(tNearX,tFarX);
- const vfloat16 tmaxX = max(tNearX,tFarX);
- const vfloat16 tminY = min(tNearY,tFarY);
- const vfloat16 tmaxY = max(tNearY,tFarY);
- const vfloat16 tminZ = min(tNearZ,tFarZ);
- const vfloat16 tmaxZ = max(tNearZ,tFarZ);
- const vfloat16 tNear = max(tminX,tminY,tminZ,ray.tnear);
- const vfloat16 tFar = min(tmaxX,tmaxY,tmaxZ,ray.tfar );
- const vbool16 vmask = tNear <= tFar;
- const size_t mask = movemask(vmask) & mvalid;
- dist = extractN<4,0>(tNear);
- return mask;
- }
-
-#endif
-
//////////////////////////////////////////////////////////////////////////////////////
// Fast OBBNode intersection
//////////////////////////////////////////////////////////////////////////////////////
template<int N, bool robust>
- __forceinline size_t intersectNode(const typename BVHN<N>::OBBNode* node, const TravRay<N,N,robust>& ray, vfloat<N>& dist)
+ __forceinline size_t intersectNode(const typename BVHN<N>::OBBNode* node, const TravRay<N,robust>& ray, vfloat<N>& dist)
{
const Vec3vf<N> dir = xfmVector(node->naabb,ray.dir);
//const Vec3vf<N> nrdir = Vec3vf<N>(vfloat<N>(-1.0f))/dir;
@@ -1347,7 +962,7 @@ namespace embree
//////////////////////////////////////////////////////////////////////////////////////
template<int N, bool robust>
- __forceinline size_t intersectNode(const typename BVHN<N>::OBBNodeMB* node, const TravRay<N,N,robust>& ray, const float time, vfloat<N>& dist)
+ __forceinline size_t intersectNode(const typename BVHN<N>::OBBNodeMB* node, const TravRay<N,robust>& ray, const float time, vfloat<N>& dist)
{
const AffineSpace3vf<N> xfm = node->space0;
const Vec3vf<N> b0_lower = zero;
@@ -1586,13 +1201,13 @@ namespace embree
//////////////////////////////////////////////////////////////////////////////////////
/*! Intersects N nodes with 1 ray */
- template<int N, int Nx, int types, bool robust>
+ template<int N, int types, bool robust>
struct BVHNNodeIntersector1;
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN1, false>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN1, false>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,false>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,false>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNode(node.getAABBNode(), ray, dist);
@@ -1600,10 +1215,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN1, true>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN1, true>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,true>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,true>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNodeRobust(node.getAABBNode(), ray, dist);
@@ -1611,10 +1226,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2, false>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2, false>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,false>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,false>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNode(node.getAABBNodeMB(), ray, time, dist);
@@ -1622,10 +1237,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2, true>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2, true>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,true>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,true>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNodeRobust(node.getAABBNodeMB(), ray, time, dist);
@@ -1633,10 +1248,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2_AN4D, false>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2_AN4D, false>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,false>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,false>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNodeMB4D<N>(node, ray, time, dist);
@@ -1644,10 +1259,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2_AN4D, true>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2_AN4D, true>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,true>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,true>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNodeMB4DRobust<N>(node, ray, time, dist);
@@ -1655,10 +1270,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN1_UN1, false>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN1_UN1, false>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,false>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,false>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (likely(node.isAABBNode())) mask = intersectNode(node.getAABBNode(), ray, dist);
else if (unlikely(node.isOBBNode())) mask = intersectNode(node.ungetAABBNode(), ray, dist);
@@ -1667,10 +1282,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN1_UN1, true>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN1_UN1, true>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,true>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,true>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (likely(node.isAABBNode())) mask = intersectNodeRobust(node.getAABBNode(), ray, dist);
else if (unlikely(node.isOBBNode())) mask = intersectNode(node.ungetAABBNode(), ray, dist);
@@ -1679,10 +1294,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2_UN2, false>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2_UN2, false>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,false>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,false>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (likely(node.isAABBNodeMB())) mask = intersectNode(node.getAABBNodeMB(), ray, time, dist);
else if (unlikely(node.isOBBNodeMB())) mask = intersectNode(node.ungetAABBNodeMB(), ray, time, dist);
@@ -1691,10 +1306,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2_UN2, true>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2_UN2, true>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,true>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,true>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (likely(node.isAABBNodeMB())) mask = intersectNodeRobust(node.getAABBNodeMB(), ray, time, dist);
else if (unlikely(node.isOBBNodeMB())) mask = intersectNode(node.ungetAABBNodeMB(), ray, time, dist);
@@ -1703,10 +1318,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2_AN4D_UN2, false>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2_AN4D_UN2, false>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,false>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,false>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
if (unlikely(node.isOBBNodeMB())) mask = intersectNode(node.ungetAABBNodeMB(), ray, time, dist);
@@ -1715,10 +1330,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_AN2_AN4D_UN2, true>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_AN2_AN4D_UN2, true>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,true>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,true>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
if (unlikely(node.isOBBNodeMB())) mask = intersectNode(node.ungetAABBNodeMB(), ray, time, dist);
@@ -1727,10 +1342,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_QN1, false>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_QN1, false>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,false>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,false>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNode((const typename BVHN<N>::QuantizedNode*)node.quantizedNode(), ray, dist);
@@ -1738,10 +1353,10 @@ namespace embree
}
};
- template<int N, int Nx>
- struct BVHNNodeIntersector1<N, Nx, BVH_QN1, true>
+ template<int N>
+ struct BVHNNodeIntersector1<N, BVH_QN1, true>
{
- static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,Nx,true>& ray, float time, vfloat<Nx>& dist, size_t& mask)
+ static __forceinline bool intersect(const typename BVHN<N>::NodeRef& node, const TravRay<N,true>& ray, float time, vfloat<N>& dist, size_t& mask)
{
if (unlikely(node.isLeaf())) return false;
mask = intersectNodeRobust((const typename BVHN<N>::QuantizedNode*)node.quantizedNode(), ray, dist);
@@ -1750,33 +1365,33 @@ namespace embree
};
/*! Intersects N nodes with K rays */
- template<int N, int Nx, bool robust>
+ template<int N, bool robust>
struct BVHNQuantizedBaseNodeIntersector1;
- template<int N, int Nx>
- struct BVHNQuantizedBaseNodeIntersector1<N, Nx, false>
+ template<int N>
+ struct BVHNQuantizedBaseNodeIntersector1<N, false>
{
- static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNode* node, const TravRay<N,Nx,false>& ray, vfloat<Nx>& dist)
+ static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNode* node, const TravRay<N,false>& ray, vfloat<N>& dist)
{
return intersectNode(node,ray,dist);
}
- static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,Nx,false>& ray, const float time, vfloat<N>& dist)
+ static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,false>& ray, const float time, vfloat<N>& dist)
{
return intersectNode(node,ray,time,dist);
}
};
- template<int N, int Nx>
- struct BVHNQuantizedBaseNodeIntersector1<N, Nx, true>
+ template<int N>
+ struct BVHNQuantizedBaseNodeIntersector1<N, true>
{
- static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNode* node, const TravRay<N,Nx,true>& ray, vfloat<Nx>& dist)
+ static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNode* node, const TravRay<N,true>& ray, vfloat<N>& dist)
{
return intersectNode(node,ray,dist);
}
- static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,Nx,true>& ray, const float time, vfloat<N>& dist)
+ static __forceinline size_t intersect(const typename BVHN<N>::QuantizedBaseNodeMB* node, const TravRay<N,true>& ray, const float time, vfloat<N>& dist)
{
return intersectNode(node,ray,time,dist);
}
diff --git a/thirdparty/embree-aarch64/kernels/bvh/node_intersector_frustum.h b/thirdparty/embree/kernels/bvh/node_intersector_frustum.h
index 800ac8b478..1f7215e5df 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/node_intersector_frustum.h
+++ b/thirdparty/embree/kernels/bvh/node_intersector_frustum.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -38,12 +38,6 @@ namespace embree
__forceinline Frustum() {}
template<int K>
- __forceinline Frustum(const vbool<K>& valid, const Vec3vf<K>& org, const Vec3vf<K>& rdir, const vfloat<K>& ray_tnear, const vfloat<K>& ray_tfar, int N)
- {
- init(valid, org, rdir, ray_tnear, ray_tfar, N);
- }
-
- template<int K>
__forceinline void init(const vbool<K>& valid, const Vec3vf<K>& org, const Vec3vf<K>& rdir, const vfloat<K>& ray_tnear, const vfloat<K>& ray_tfar, int N)
{
const Vec3fa reduced_min_org(reduce_min(select(valid, org.x, pos_inf)),
@@ -81,13 +75,9 @@ namespace embree
min_rdir = select(pos_rdir, reduced_min_rdir, reduced_max_rdir);
max_rdir = select(pos_rdir, reduced_max_rdir, reduced_min_rdir);
-#if defined (__aarch64__)
- neg_min_org_rdir = -(min_rdir * select(pos_rdir, reduced_max_org, reduced_min_org));
- neg_max_org_rdir = -(max_rdir * select(pos_rdir, reduced_min_org, reduced_max_org));
-#else
min_org_rdir = min_rdir * select(pos_rdir, reduced_max_org, reduced_min_org);
max_org_rdir = max_rdir * select(pos_rdir, reduced_min_org, reduced_max_org);
-#endif
+
min_dist = reduced_min_dist;
max_dist = reduced_max_dist;
@@ -105,13 +95,9 @@ namespace embree
Vec3fa min_rdir;
Vec3fa max_rdir;
-#if defined (__aarch64__)
- Vec3fa neg_min_org_rdir;
- Vec3fa neg_max_org_rdir;
-#else
Vec3fa min_org_rdir;
Vec3fa max_org_rdir;
-#endif
+
float min_dist;
float max_dist;
};
@@ -125,12 +111,6 @@ namespace embree
__forceinline Frustum() {}
template<int K>
- __forceinline Frustum(const vbool<K>& valid, const Vec3vf<K>& org, const Vec3vf<K>& rdir, const vfloat<K>& ray_tnear, const vfloat<K>& ray_tfar, int N)
- {
- init(valid, org, rdir, ray_tnear, ray_tfar, N);
- }
-
- template<int K>
__forceinline void init(const vbool<K>& valid, const Vec3vf<K>& org, const Vec3vf<K>& rdir, const vfloat<K>& ray_tnear, const vfloat<K>& ray_tfar, int N)
{
const Vec3fa reduced_min_org(reduce_min(select(valid, org.x, pos_inf)),
@@ -200,36 +180,28 @@ namespace embree
// Fast AABBNode intersection
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx>
+ template<int N>
__forceinline size_t intersectNodeFrustum(const typename BVHN<N>::AABBNode* __restrict__ node,
- const FrustumFast& frustum, vfloat<Nx>& dist)
+ const FrustumFast& frustum, vfloat<N>& dist)
{
- const vfloat<Nx> bminX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearX);
- const vfloat<Nx> bminY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearY);
- const vfloat<Nx> bminZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearZ);
- const vfloat<Nx> bmaxX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farX);
- const vfloat<Nx> bmaxY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farY);
- const vfloat<Nx> bmaxZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farZ);
-
-#if defined (__aarch64__)
- const vfloat<Nx> fminX = madd(bminX, vfloat<Nx>(frustum.min_rdir.x), vfloat<Nx>(frustum.neg_min_org_rdir.x));
- const vfloat<Nx> fminY = madd(bminY, vfloat<Nx>(frustum.min_rdir.y), vfloat<Nx>(frustum.neg_min_org_rdir.y));
- const vfloat<Nx> fminZ = madd(bminZ, vfloat<Nx>(frustum.min_rdir.z), vfloat<Nx>(frustum.neg_min_org_rdir.z));
- const vfloat<Nx> fmaxX = madd(bmaxX, vfloat<Nx>(frustum.max_rdir.x), vfloat<Nx>(frustum.neg_max_org_rdir.x));
- const vfloat<Nx> fmaxY = madd(bmaxY, vfloat<Nx>(frustum.max_rdir.y), vfloat<Nx>(frustum.neg_max_org_rdir.y));
- const vfloat<Nx> fmaxZ = madd(bmaxZ, vfloat<Nx>(frustum.max_rdir.z), vfloat<Nx>(frustum.neg_max_org_rdir.z));
-#else
- const vfloat<Nx> fminX = msub(bminX, vfloat<Nx>(frustum.min_rdir.x), vfloat<Nx>(frustum.min_org_rdir.x));
- const vfloat<Nx> fminY = msub(bminY, vfloat<Nx>(frustum.min_rdir.y), vfloat<Nx>(frustum.min_org_rdir.y));
- const vfloat<Nx> fminZ = msub(bminZ, vfloat<Nx>(frustum.min_rdir.z), vfloat<Nx>(frustum.min_org_rdir.z));
- const vfloat<Nx> fmaxX = msub(bmaxX, vfloat<Nx>(frustum.max_rdir.x), vfloat<Nx>(frustum.max_org_rdir.x));
- const vfloat<Nx> fmaxY = msub(bmaxY, vfloat<Nx>(frustum.max_rdir.y), vfloat<Nx>(frustum.max_org_rdir.y));
- const vfloat<Nx> fmaxZ = msub(bmaxZ, vfloat<Nx>(frustum.max_rdir.z), vfloat<Nx>(frustum.max_org_rdir.z));
-#endif
- const vfloat<Nx> fmin = maxi(fminX, fminY, fminZ, vfloat<Nx>(frustum.min_dist));
+ const vfloat<N> bminX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearX);
+ const vfloat<N> bminY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearY);
+ const vfloat<N> bminZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearZ);
+ const vfloat<N> bmaxX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farX);
+ const vfloat<N> bmaxY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farY);
+ const vfloat<N> bmaxZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farZ);
+
+ const vfloat<N> fminX = msub(bminX, vfloat<N>(frustum.min_rdir.x), vfloat<N>(frustum.min_org_rdir.x));
+ const vfloat<N> fminY = msub(bminY, vfloat<N>(frustum.min_rdir.y), vfloat<N>(frustum.min_org_rdir.y));
+ const vfloat<N> fminZ = msub(bminZ, vfloat<N>(frustum.min_rdir.z), vfloat<N>(frustum.min_org_rdir.z));
+ const vfloat<N> fmaxX = msub(bmaxX, vfloat<N>(frustum.max_rdir.x), vfloat<N>(frustum.max_org_rdir.x));
+ const vfloat<N> fmaxY = msub(bmaxY, vfloat<N>(frustum.max_rdir.y), vfloat<N>(frustum.max_org_rdir.y));
+ const vfloat<N> fmaxZ = msub(bmaxZ, vfloat<N>(frustum.max_rdir.z), vfloat<N>(frustum.max_org_rdir.z));
+
+ const vfloat<N> fmin = maxi(fminX, fminY, fminZ, vfloat<N>(frustum.min_dist));
dist = fmin;
- const vfloat<Nx> fmax = mini(fmaxX, fmaxY, fmaxZ, vfloat<Nx>(frustum.max_dist));
- const vbool<Nx> vmask_node_hit = fmin <= fmax;
+ const vfloat<N> fmax = mini(fmaxX, fmaxY, fmaxZ, vfloat<N>(frustum.max_dist));
+ const vbool<N> vmask_node_hit = fmin <= fmax;
size_t m_node = movemask(vmask_node_hit) & (((size_t)1 << N)-1);
return m_node;
}
@@ -238,30 +210,30 @@ namespace embree
// Robust AABBNode intersection
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx>
+ template<int N>
__forceinline size_t intersectNodeFrustum(const typename BVHN<N>::AABBNode* __restrict__ node,
- const FrustumRobust& frustum, vfloat<Nx>& dist)
+ const FrustumRobust& frustum, vfloat<N>& dist)
{
- const vfloat<Nx> bminX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearX);
- const vfloat<Nx> bminY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearY);
- const vfloat<Nx> bminZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearZ);
- const vfloat<Nx> bmaxX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farX);
- const vfloat<Nx> bmaxY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farY);
- const vfloat<Nx> bmaxZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farZ);
-
- const vfloat<Nx> fminX = (bminX - vfloat<Nx>(frustum.min_org.x)) * vfloat<Nx>(frustum.min_rdir.x);
- const vfloat<Nx> fminY = (bminY - vfloat<Nx>(frustum.min_org.y)) * vfloat<Nx>(frustum.min_rdir.y);
- const vfloat<Nx> fminZ = (bminZ - vfloat<Nx>(frustum.min_org.z)) * vfloat<Nx>(frustum.min_rdir.z);
- const vfloat<Nx> fmaxX = (bmaxX - vfloat<Nx>(frustum.max_org.x)) * vfloat<Nx>(frustum.max_rdir.x);
- const vfloat<Nx> fmaxY = (bmaxY - vfloat<Nx>(frustum.max_org.y)) * vfloat<Nx>(frustum.max_rdir.y);
- const vfloat<Nx> fmaxZ = (bmaxZ - vfloat<Nx>(frustum.max_org.z)) * vfloat<Nx>(frustum.max_rdir.z);
+ const vfloat<N> bminX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearX);
+ const vfloat<N> bminY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearY);
+ const vfloat<N> bminZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.nearZ);
+ const vfloat<N> bmaxX = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farX);
+ const vfloat<N> bmaxY = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farY);
+ const vfloat<N> bmaxZ = *(const vfloat<N>*)((const char*)&node->lower_x + frustum.nf.farZ);
+
+ const vfloat<N> fminX = (bminX - vfloat<N>(frustum.min_org.x)) * vfloat<N>(frustum.min_rdir.x);
+ const vfloat<N> fminY = (bminY - vfloat<N>(frustum.min_org.y)) * vfloat<N>(frustum.min_rdir.y);
+ const vfloat<N> fminZ = (bminZ - vfloat<N>(frustum.min_org.z)) * vfloat<N>(frustum.min_rdir.z);
+ const vfloat<N> fmaxX = (bmaxX - vfloat<N>(frustum.max_org.x)) * vfloat<N>(frustum.max_rdir.x);
+ const vfloat<N> fmaxY = (bmaxY - vfloat<N>(frustum.max_org.y)) * vfloat<N>(frustum.max_rdir.y);
+ const vfloat<N> fmaxZ = (bmaxZ - vfloat<N>(frustum.max_org.z)) * vfloat<N>(frustum.max_rdir.z);
const float round_down = 1.0f-2.0f*float(ulp); // FIXME: use per instruction rounding for AVX512
const float round_up = 1.0f+2.0f*float(ulp);
- const vfloat<Nx> fmin = max(fminX, fminY, fminZ, vfloat<Nx>(frustum.min_dist));
+ const vfloat<N> fmin = max(fminX, fminY, fminZ, vfloat<N>(frustum.min_dist));
dist = fmin;
- const vfloat<Nx> fmax = min(fmaxX, fmaxY, fmaxZ, vfloat<Nx>(frustum.max_dist));
- const vbool<Nx> vmask_node_hit = (round_down*fmin <= round_up*fmax);
+ const vfloat<N> fmax = min(fmaxX, fmaxY, fmaxZ, vfloat<N>(frustum.max_dist));
+ const vbool<N> vmask_node_hit = (round_down*fmin <= round_up*fmax);
size_t m_node = movemask(vmask_node_hit) & (((size_t)1 << N)-1);
return m_node;
}
diff --git a/thirdparty/embree-aarch64/kernels/bvh/node_intersector_packet.h b/thirdparty/embree/kernels/bvh/node_intersector_packet.h
index 0543e56f8e..d5498fc5db 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/node_intersector_packet.h
+++ b/thirdparty/embree/kernels/bvh/node_intersector_packet.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -39,11 +39,10 @@ namespace embree
org = ray_org;
dir = ray_dir;
rdir = rcp_safe(ray_dir);
-#if defined(__aarch64__)
- neg_org_rdir = -(org * rdir);
-#elif defined(__AVX2__)
+#if defined(__AVX2__) || defined(__ARM_NEON)
org_rdir = org * rdir;
#endif
+
if (N)
{
const int size = sizeof(float)*N;
@@ -56,9 +55,7 @@ namespace embree
Vec3vf<K> org;
Vec3vf<K> dir;
Vec3vf<K> rdir;
-#if defined(__aarch64__)
- Vec3vf<K> neg_org_rdir;
-#elif defined(__AVX2__)
+#if defined(__AVX2__) || defined(__ARM_NEON)
Vec3vf<K> org_rdir;
#endif
Vec3vi<K> nearXYZ;
@@ -122,14 +119,7 @@ namespace embree
const TravRayKFast<K>& ray, vfloat<K>& dist)
{
-#if defined(__aarch64__)
- const vfloat<K> lclipMinX = madd(node->lower_x[i], ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMinY = madd(node->lower_y[i], ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMinZ = madd(node->lower_z[i], ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<K> lclipMaxX = madd(node->upper_x[i], ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMaxY = madd(node->upper_y[i], ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMaxZ = madd(node->upper_z[i], ray.rdir.z, ray.neg_org_rdir.z);
-#elif defined(__AVX2__)
+ #if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat<K> lclipMinX = msub(node->lower_x[i], ray.rdir.x, ray.org_rdir.x);
const vfloat<K> lclipMinY = msub(node->lower_y[i], ray.rdir.y, ray.org_rdir.y);
const vfloat<K> lclipMinZ = msub(node->lower_z[i], ray.rdir.z, ray.org_rdir.z);
@@ -145,7 +135,7 @@ namespace embree
const vfloat<K> lclipMaxZ = (node->upper_z[i] - ray.org.z) * ray.rdir.z;
#endif
- #if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+ #if defined(__AVX512F__) // SKX
if (K == 16)
{
/* use mixed float/int min/max */
@@ -160,7 +150,7 @@ namespace embree
{
const vfloat<K> lnearP = maxi(mini(lclipMinX, lclipMaxX), mini(lclipMinY, lclipMaxY), mini(lclipMinZ, lclipMaxZ));
const vfloat<K> lfarP = mini(maxi(lclipMinX, lclipMaxX), maxi(lclipMinY, lclipMaxY), maxi(lclipMinZ, lclipMaxZ));
- #if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+ #if defined(__AVX512F__) // SKX
const vbool<K> lhit = asInt(maxi(lnearP, ray.tnear)) <= asInt(mini(lfarP, ray.tfar));
#else
const vbool<K> lhit = maxi(lnearP, ray.tnear) <= mini(lfarP, ray.tfar);
@@ -209,14 +199,7 @@ namespace embree
const vfloat<K> vupper_y = madd(time, vfloat<K>(node->upper_dy[i]), vfloat<K>(node->upper_y[i]));
const vfloat<K> vupper_z = madd(time, vfloat<K>(node->upper_dz[i]), vfloat<K>(node->upper_z[i]));
-#if defined(__aarch64__)
- const vfloat<K> lclipMinX = madd(vlower_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMinY = madd(vlower_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMinZ = madd(vlower_z, ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<K> lclipMaxX = madd(vupper_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMaxY = madd(vupper_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMaxZ = madd(vupper_z, ray.rdir.z, ray.neg_org_rdir.z);
-#elif defined(__AVX2__)
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat<K> lclipMinX = msub(vlower_x, ray.rdir.x, ray.org_rdir.x);
const vfloat<K> lclipMinY = msub(vlower_y, ray.rdir.y, ray.org_rdir.y);
const vfloat<K> lclipMinZ = msub(vlower_z, ray.rdir.z, ray.org_rdir.z);
@@ -232,7 +215,7 @@ namespace embree
const vfloat<K> lclipMaxZ = (vupper_z - ray.org.z) * ray.rdir.z;
#endif
-#if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#if defined(__AVX512F__) // SKX
if (K == 16)
{
/* use mixed float/int min/max */
@@ -247,7 +230,7 @@ namespace embree
{
const vfloat<K> lnearP = maxi(mini(lclipMinX, lclipMaxX), mini(lclipMinY, lclipMaxY), mini(lclipMinZ, lclipMaxZ));
const vfloat<K> lfarP = mini(maxi(lclipMinX, lclipMaxX), maxi(lclipMinY, lclipMaxY), maxi(lclipMinZ, lclipMaxZ));
-#if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#if defined(__AVX512F__) // SKX
const vbool<K> lhit = asInt(maxi(lnearP, ray.tnear)) <= asInt(mini(lfarP, ray.tfar));
#else
const vbool<K> lhit = maxi(lnearP, ray.tnear) <= mini(lfarP, ray.tfar);
@@ -282,7 +265,7 @@ namespace embree
const float round_up = 1.0f+3.0f*float(ulp);
const float round_down = 1.0f-3.0f*float(ulp);
-#if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+#if defined(__AVX512F__) // SKX
if (K == 16)
{
const vfloat<K> lnearP = round_down*maxi(min(lclipMinX, lclipMaxX), min(lclipMinY, lclipMaxY), min(lclipMinZ, lclipMaxZ));
@@ -319,14 +302,7 @@ namespace embree
const vfloat<K> vupper_y = madd(time, vfloat<K>(node->upper_dy[i]), vfloat<K>(node->upper_y[i]));
const vfloat<K> vupper_z = madd(time, vfloat<K>(node->upper_dz[i]), vfloat<K>(node->upper_z[i]));
-#if defined(__aarch64__)
- const vfloat<K> lclipMinX = madd(vlower_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMinY = madd(vlower_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMinZ = madd(vlower_z, ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<K> lclipMaxX = madd(vupper_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMaxY = madd(vupper_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMaxZ = madd(vupper_z, ray.rdir.z, ray.neg_org_rdir.z);
-#elif defined(__AVX2__)
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat<K> lclipMinX = msub(vlower_x, ray.rdir.x, ray.org_rdir.x);
const vfloat<K> lclipMinY = msub(vlower_y, ray.rdir.y, ray.org_rdir.y);
const vfloat<K> lclipMinZ = msub(vlower_z, ray.rdir.z, ray.org_rdir.z);
@@ -488,14 +464,7 @@ namespace embree
const vfloat<N> lower_z = node->dequantizeLowerZ();
const vfloat<N> upper_z = node->dequantizeUpperZ();
- #if defined(__aarch64__)
- const vfloat<K> lclipMinX = madd(lower_x[i], ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMinY = madd(lower_y[i], ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMinZ = madd(lower_z[i], ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<K> lclipMaxX = madd(upper_x[i], ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMaxY = madd(upper_y[i], ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMaxZ = madd(upper_z[i], ray.rdir.z, ray.neg_org_rdir.z);
- #elif defined(__AVX2__)
+ #if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat<K> lclipMinX = msub(lower_x[i], ray.rdir.x, ray.org_rdir.x);
const vfloat<K> lclipMinY = msub(lower_y[i], ray.rdir.y, ray.org_rdir.y);
const vfloat<K> lclipMinZ = msub(lower_z[i], ray.rdir.z, ray.org_rdir.z);
@@ -511,7 +480,7 @@ namespace embree
const vfloat<K> lclipMaxZ = (upper_z[i] - ray.org.z) * ray.rdir.z;
#endif
- #if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+ #if defined(__AVX512F__) // SKX
if (K == 16)
{
/* use mixed float/int min/max */
@@ -526,7 +495,7 @@ namespace embree
{
const vfloat<K> lnearP = maxi(mini(lclipMinX, lclipMaxX), mini(lclipMinY, lclipMaxY), mini(lclipMinZ, lclipMaxZ));
const vfloat<K> lfarP = mini(maxi(lclipMinX, lclipMaxX), maxi(lclipMinY, lclipMaxY), maxi(lclipMinZ, lclipMaxZ));
- #if defined(__AVX512F__) && !defined(__AVX512ER__) // SKX
+ #if defined(__AVX512F__) // SKX
const vbool<K> lhit = asInt(maxi(lnearP, ray.tnear)) <= asInt(mini(lfarP, ray.tfar));
#else
const vbool<K> lhit = maxi(lnearP, ray.tnear) <= mini(lfarP, ray.tfar);
@@ -573,21 +542,14 @@ namespace embree
{
assert(movemask(node->validMask()) & ((size_t)1 << i));
- const vfloat<K> lower_x = node->dequantizeLowerX(i,time);
- const vfloat<K> upper_x = node->dequantizeUpperX(i,time);
- const vfloat<K> lower_y = node->dequantizeLowerY(i,time);
- const vfloat<K> upper_y = node->dequantizeUpperY(i,time);
- const vfloat<K> lower_z = node->dequantizeLowerZ(i,time);
- const vfloat<K> upper_z = node->dequantizeUpperZ(i,time);
+ const vfloat<K> lower_x = node->template dequantizeLowerX<K>(i,time);
+ const vfloat<K> upper_x = node->template dequantizeUpperX<K>(i,time);
+ const vfloat<K> lower_y = node->template dequantizeLowerY<K>(i,time);
+ const vfloat<K> upper_y = node->template dequantizeUpperY<K>(i,time);
+ const vfloat<K> lower_z = node->template dequantizeLowerZ<K>(i,time);
+ const vfloat<K> upper_z = node->template dequantizeUpperZ<K>(i,time);
-#if defined(__aarch64__)
- const vfloat<K> lclipMinX = madd(lower_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMinY = madd(lower_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMinZ = madd(lower_z, ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<K> lclipMaxX = madd(upper_x, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> lclipMaxY = madd(upper_y, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> lclipMaxZ = madd(upper_z, ray.rdir.z, ray.neg_org_rdir.z);
-#elif defined(__AVX2__)
+#if defined(__AVX2__) || defined(__ARM_NEON)
const vfloat<K> lclipMinX = msub(lower_x, ray.rdir.x, ray.org_rdir.x);
const vfloat<K> lclipMinY = msub(lower_y, ray.rdir.y, ray.org_rdir.y);
const vfloat<K> lclipMinZ = msub(lower_z, ray.rdir.z, ray.org_rdir.z);
@@ -617,12 +579,12 @@ namespace embree
{
assert(movemask(node->validMask()) & ((size_t)1 << i));
- const vfloat<K> lower_x = node->dequantizeLowerX(i,time);
- const vfloat<K> upper_x = node->dequantizeUpperX(i,time);
- const vfloat<K> lower_y = node->dequantizeLowerY(i,time);
- const vfloat<K> upper_y = node->dequantizeUpperY(i,time);
- const vfloat<K> lower_z = node->dequantizeLowerZ(i,time);
- const vfloat<K> upper_z = node->dequantizeUpperZ(i,time);
+ const vfloat<K> lower_x = node->template dequantizeLowerX<K>(i,time);
+ const vfloat<K> upper_x = node->template dequantizeUpperX<K>(i,time);
+ const vfloat<K> lower_y = node->template dequantizeLowerY<K>(i,time);
+ const vfloat<K> upper_y = node->template dequantizeUpperY<K>(i,time);
+ const vfloat<K> lower_z = node->template dequantizeLowerZ<K>(i,time);
+ const vfloat<K> upper_z = node->template dequantizeUpperZ<K>(i,time);
const vfloat<K> lclipMinX = (lower_x - ray.org.x) * ray.rdir.x;
const vfloat<K> lclipMinY = (lower_y - ray.org.y) * ray.rdir.y;
diff --git a/thirdparty/embree-aarch64/kernels/bvh/node_intersector_packet_stream.h b/thirdparty/embree/kernels/bvh/node_intersector_packet_stream.h
index f379b57aea..55b2c27231 100644
--- a/thirdparty/embree-aarch64/kernels/bvh/node_intersector_packet_stream.h
+++ b/thirdparty/embree/kernels/bvh/node_intersector_packet_stream.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -32,19 +32,11 @@ namespace embree
__forceinline void init(const Vec3vf<K>& ray_org, const Vec3vf<K>& ray_dir)
{
rdir = rcp_safe(ray_dir);
-#if defined(__aarch64__)
- neg_org_rdir = -(ray_org * rdir);
-#else
org_rdir = ray_org * rdir;
-#endif
}
Vec3vf<K> rdir;
-#if defined(__aarch64__)
- Vec3vf<K> neg_org_rdir;
-#else
Vec3vf<K> org_rdir;
-#endif
vfloat<K> tnear;
vfloat<K> tfar;
};
@@ -84,36 +76,27 @@ namespace embree
// Fast AABBNode intersection
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx, int K>
+ template<int N, int K>
__forceinline size_t intersectNode1(const typename BVHN<N>::AABBNode* __restrict__ node,
const TravRayKStreamFast<K>& ray, size_t k, const NearFarPrecalculations& nf)
{
- const vfloat<Nx> bminX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
- const vfloat<Nx> bminY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
- const vfloat<Nx> bminZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
- const vfloat<Nx> bmaxX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
- const vfloat<Nx> bmaxY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
- const vfloat<Nx> bmaxZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
-
-#if defined (__aarch64__)
- const vfloat<Nx> rminX = madd(bminX, vfloat<Nx>(ray.rdir.x[k]), vfloat<Nx>(ray.neg_org_rdir.x[k]));
- const vfloat<Nx> rminY = madd(bminY, vfloat<Nx>(ray.rdir.y[k]), vfloat<Nx>(ray.neg_org_rdir.y[k]));
- const vfloat<Nx> rminZ = madd(bminZ, vfloat<Nx>(ray.rdir.z[k]), vfloat<Nx>(ray.neg_org_rdir.z[k]));
- const vfloat<Nx> rmaxX = madd(bmaxX, vfloat<Nx>(ray.rdir.x[k]), vfloat<Nx>(ray.neg_org_rdir.x[k]));
- const vfloat<Nx> rmaxY = madd(bmaxY, vfloat<Nx>(ray.rdir.y[k]), vfloat<Nx>(ray.neg_org_rdir.y[k]));
- const vfloat<Nx> rmaxZ = madd(bmaxZ, vfloat<Nx>(ray.rdir.z[k]), vfloat<Nx>(ray.neg_org_rdir.z[k]));
-#else
- const vfloat<Nx> rminX = msub(bminX, vfloat<Nx>(ray.rdir.x[k]), vfloat<Nx>(ray.org_rdir.x[k]));
- const vfloat<Nx> rminY = msub(bminY, vfloat<Nx>(ray.rdir.y[k]), vfloat<Nx>(ray.org_rdir.y[k]));
- const vfloat<Nx> rminZ = msub(bminZ, vfloat<Nx>(ray.rdir.z[k]), vfloat<Nx>(ray.org_rdir.z[k]));
- const vfloat<Nx> rmaxX = msub(bmaxX, vfloat<Nx>(ray.rdir.x[k]), vfloat<Nx>(ray.org_rdir.x[k]));
- const vfloat<Nx> rmaxY = msub(bmaxY, vfloat<Nx>(ray.rdir.y[k]), vfloat<Nx>(ray.org_rdir.y[k]));
- const vfloat<Nx> rmaxZ = msub(bmaxZ, vfloat<Nx>(ray.rdir.z[k]), vfloat<Nx>(ray.org_rdir.z[k]));
-#endif
- const vfloat<Nx> rmin = maxi(rminX, rminY, rminZ, vfloat<Nx>(ray.tnear[k]));
- const vfloat<Nx> rmax = mini(rmaxX, rmaxY, rmaxZ, vfloat<Nx>(ray.tfar[k]));
-
- const vbool<Nx> vmask_first_hit = rmin <= rmax;
+ const vfloat<N> bminX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
+ const vfloat<N> bminY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
+ const vfloat<N> bminZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
+ const vfloat<N> bmaxX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
+ const vfloat<N> bmaxY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
+ const vfloat<N> bmaxZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
+
+ const vfloat<N> rminX = msub(bminX, vfloat<N>(ray.rdir.x[k]), vfloat<N>(ray.org_rdir.x[k]));
+ const vfloat<N> rminY = msub(bminY, vfloat<N>(ray.rdir.y[k]), vfloat<N>(ray.org_rdir.y[k]));
+ const vfloat<N> rminZ = msub(bminZ, vfloat<N>(ray.rdir.z[k]), vfloat<N>(ray.org_rdir.z[k]));
+ const vfloat<N> rmaxX = msub(bmaxX, vfloat<N>(ray.rdir.x[k]), vfloat<N>(ray.org_rdir.x[k]));
+ const vfloat<N> rmaxY = msub(bmaxY, vfloat<N>(ray.rdir.y[k]), vfloat<N>(ray.org_rdir.y[k]));
+ const vfloat<N> rmaxZ = msub(bmaxZ, vfloat<N>(ray.rdir.z[k]), vfloat<N>(ray.org_rdir.z[k]));
+ const vfloat<N> rmin = maxi(rminX, rminY, rminZ, vfloat<N>(ray.tnear[k]));
+ const vfloat<N> rmax = mini(rmaxX, rmaxY, rmaxZ, vfloat<N>(ray.tfar[k]));
+
+ const vbool<N> vmask_first_hit = rmin <= rmax;
return movemask(vmask_first_hit) & (((size_t)1 << N)-1);
}
@@ -130,21 +113,12 @@ namespace embree
const vfloat<K> bmaxY = *(const float*)(ptr + nf.farY);
const vfloat<K> bmaxZ = *(const float*)(ptr + nf.farZ);
-#if defined (__aarch64__)
- const vfloat<K> rminX = madd(bminX, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> rminY = madd(bminY, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> rminZ = madd(bminZ, ray.rdir.z, ray.neg_org_rdir.z);
- const vfloat<K> rmaxX = madd(bmaxX, ray.rdir.x, ray.neg_org_rdir.x);
- const vfloat<K> rmaxY = madd(bmaxY, ray.rdir.y, ray.neg_org_rdir.y);
- const vfloat<K> rmaxZ = madd(bmaxZ, ray.rdir.z, ray.neg_org_rdir.z);
-#else
const vfloat<K> rminX = msub(bminX, ray.rdir.x, ray.org_rdir.x);
const vfloat<K> rminY = msub(bminY, ray.rdir.y, ray.org_rdir.y);
const vfloat<K> rminZ = msub(bminZ, ray.rdir.z, ray.org_rdir.z);
const vfloat<K> rmaxX = msub(bmaxX, ray.rdir.x, ray.org_rdir.x);
const vfloat<K> rmaxY = msub(bmaxY, ray.rdir.y, ray.org_rdir.y);
const vfloat<K> rmaxZ = msub(bmaxZ, ray.rdir.z, ray.org_rdir.z);
-#endif
const vfloat<K> rmin = maxi(rminX, rminY, rminZ, ray.tnear);
const vfloat<K> rmax = mini(rmaxX, rmaxY, rmaxZ, ray.tfar);
@@ -158,28 +132,28 @@ namespace embree
// Robust AABBNode intersection
//////////////////////////////////////////////////////////////////////////////////////
- template<int N, int Nx, int K>
+ template<int N, int K>
__forceinline size_t intersectNode1(const typename BVHN<N>::AABBNode* __restrict__ node,
const TravRayKStreamRobust<K>& ray, size_t k, const NearFarPrecalculations& nf)
{
- const vfloat<Nx> bminX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
- const vfloat<Nx> bminY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
- const vfloat<Nx> bminZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
- const vfloat<Nx> bmaxX = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
- const vfloat<Nx> bmaxY = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
- const vfloat<Nx> bmaxZ = vfloat<Nx>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
-
- const vfloat<Nx> rminX = (bminX - vfloat<Nx>(ray.org.x[k])) * vfloat<Nx>(ray.rdir.x[k]);
- const vfloat<Nx> rminY = (bminY - vfloat<Nx>(ray.org.y[k])) * vfloat<Nx>(ray.rdir.y[k]);
- const vfloat<Nx> rminZ = (bminZ - vfloat<Nx>(ray.org.z[k])) * vfloat<Nx>(ray.rdir.z[k]);
- const vfloat<Nx> rmaxX = (bmaxX - vfloat<Nx>(ray.org.x[k])) * vfloat<Nx>(ray.rdir.x[k]);
- const vfloat<Nx> rmaxY = (bmaxY - vfloat<Nx>(ray.org.y[k])) * vfloat<Nx>(ray.rdir.y[k]);
- const vfloat<Nx> rmaxZ = (bmaxZ - vfloat<Nx>(ray.org.z[k])) * vfloat<Nx>(ray.rdir.z[k]);
+ const vfloat<N> bminX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearX));
+ const vfloat<N> bminY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearY));
+ const vfloat<N> bminZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.nearZ));
+ const vfloat<N> bmaxX = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farX));
+ const vfloat<N> bmaxY = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farY));
+ const vfloat<N> bmaxZ = vfloat<N>(*(const vfloat<N>*)((const char*)&node->lower_x + nf.farZ));
+
+ const vfloat<N> rminX = (bminX - vfloat<N>(ray.org.x[k])) * vfloat<N>(ray.rdir.x[k]);
+ const vfloat<N> rminY = (bminY - vfloat<N>(ray.org.y[k])) * vfloat<N>(ray.rdir.y[k]);
+ const vfloat<N> rminZ = (bminZ - vfloat<N>(ray.org.z[k])) * vfloat<N>(ray.rdir.z[k]);
+ const vfloat<N> rmaxX = (bmaxX - vfloat<N>(ray.org.x[k])) * vfloat<N>(ray.rdir.x[k]);
+ const vfloat<N> rmaxY = (bmaxY - vfloat<N>(ray.org.y[k])) * vfloat<N>(ray.rdir.y[k]);
+ const vfloat<N> rmaxZ = (bmaxZ - vfloat<N>(ray.org.z[k])) * vfloat<N>(ray.rdir.z[k]);
const float round_up = 1.0f+3.0f*float(ulp); // FIXME: use per instruction rounding for AVX512
- const vfloat<Nx> rmin = max(rminX, rminY, rminZ, vfloat<Nx>(ray.tnear[k]));
- const vfloat<Nx> rmax = round_up *min(rmaxX, rmaxY, rmaxZ, vfloat<Nx>(ray.tfar[k]));
+ const vfloat<N> rmin = max(rminX, rminY, rminZ, vfloat<N>(ray.tnear[k]));
+ const vfloat<N> rmax = round_up *min(rmaxX, rmaxY, rmaxZ, vfloat<N>(ray.tfar[k]));
- const vbool<Nx> vmask_first_hit = rmin <= rmax;
+ const vbool<N> vmask_first_hit = rmin <= rmax;
return movemask(vmask_first_hit) & (((size_t)1 << N)-1);
}
diff --git a/thirdparty/embree-aarch64/kernels/common/accel.h b/thirdparty/embree/kernels/common/accel.h
index c038d3cf21..cc4ea1805b 100644
--- a/thirdparty/embree-aarch64/kernels/common/accel.h
+++ b/thirdparty/embree/kernels/common/accel.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -332,7 +332,7 @@ namespace embree
intersectorN.intersect(this,rayN,N,context);
}
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined(__SSE__)
__forceinline void intersect(const vbool4& valid, RayHitK<4>& ray, IntersectContext* context) {
const vint<4> mask = valid.mask32();
intersect4(&mask,(RTCRayHit4&)ray,context);
@@ -388,7 +388,7 @@ namespace embree
intersectorN.occluded(this,rayN,N,context);
}
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined(__SSE__)
__forceinline void occluded(const vbool4& valid, RayK<4>& ray, IntersectContext* context) {
const vint<4> mask = valid.mask32();
occluded4(&mask,(RTCRay4&)ray,context);
diff --git a/thirdparty/embree-aarch64/kernels/common/accelinstance.h b/thirdparty/embree/kernels/common/accelinstance.h
index d74b96df3f..c63ef998bd 100644
--- a/thirdparty/embree-aarch64/kernels/common/accelinstance.h
+++ b/thirdparty/embree/kernels/common/accelinstance.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/acceln.cpp b/thirdparty/embree/kernels/common/acceln.cpp
index aadb4a64ef..32a27c560a 100644
--- a/thirdparty/embree-aarch64/kernels/common/acceln.cpp
+++ b/thirdparty/embree/kernels/common/acceln.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "acceln.h"
@@ -97,7 +97,7 @@ namespace embree
for (size_t i=0; i<This->accels.size(); i++) {
if (This->accels[i]->isEmpty()) continue;
This->accels[i]->intersectors.occluded4(valid,ray,context);
-#if defined(__SSE2__) || defined(__ARM_NEON)
+#if defined(__SSE2__)
vbool4 valid0 = asBool(((vint4*)valid)[0]);
vbool4 hit0 = ((vfloat4*)ray.tfar)[0] >= vfloat4(zero);
if (unlikely(none(valid0 & hit0))) break;
@@ -111,7 +111,7 @@ namespace embree
for (size_t i=0; i<This->accels.size(); i++) {
if (This->accels[i]->isEmpty()) continue;
This->accels[i]->intersectors.occluded8(valid,ray,context);
-#if defined(__SSE2__) || defined(__ARM_NEON) // FIXME: use higher ISA
+#if defined(__SSE2__) // FIXME: use higher ISA
vbool4 valid0 = asBool(((vint4*)valid)[0]);
vbool4 hit0 = ((vfloat4*)ray.tfar)[0] >= vfloat4(zero);
vbool4 valid1 = asBool(((vint4*)valid)[1]);
@@ -127,7 +127,7 @@ namespace embree
for (size_t i=0; i<This->accels.size(); i++) {
if (This->accels[i]->isEmpty()) continue;
This->accels[i]->intersectors.occluded16(valid,ray,context);
-#if defined(__SSE2__) || defined(__ARM_NEON) // FIXME: use higher ISA
+#if defined(__SSE2__) // FIXME: use higher ISA
vbool4 valid0 = asBool(((vint4*)valid)[0]);
vbool4 hit0 = ((vfloat4*)ray.tfar)[0] >= vfloat4(zero);
vbool4 valid1 = asBool(((vint4*)valid)[1]);
diff --git a/thirdparty/embree-aarch64/kernels/common/acceln.h b/thirdparty/embree/kernels/common/acceln.h
index 2edd98f647..0445b2e811 100644
--- a/thirdparty/embree-aarch64/kernels/common/acceln.h
+++ b/thirdparty/embree/kernels/common/acceln.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/accelset.cpp b/thirdparty/embree/kernels/common/accelset.cpp
index 79be1c4301..8c18f31776 100644
--- a/thirdparty/embree-aarch64/kernels/common/accelset.cpp
+++ b/thirdparty/embree/kernels/common/accelset.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "accelset.h"
diff --git a/thirdparty/embree-aarch64/kernels/common/accelset.h b/thirdparty/embree/kernels/common/accelset.h
index 3774b2accb..90b184a07b 100644
--- a/thirdparty/embree-aarch64/kernels/common/accelset.h
+++ b/thirdparty/embree/kernels/common/accelset.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/alloc.cpp b/thirdparty/embree/kernels/common/alloc.cpp
index 6fa406f03a..1a0e1aeed3 100644
--- a/thirdparty/embree-aarch64/kernels/common/alloc.cpp
+++ b/thirdparty/embree/kernels/common/alloc.cpp
@@ -1,11 +1,8 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "alloc.h"
#include "../../common/sys/thread.h"
-#if defined(__aarch64__) && defined(BUILD_IOS)
-#include "../../common/sys/barrier.h"
-#endif
namespace embree
{
diff --git a/thirdparty/embree-aarch64/kernels/common/alloc.h b/thirdparty/embree/kernels/common/alloc.h
index 488fa707ef..4458e35c24 100644
--- a/thirdparty/embree-aarch64/kernels/common/alloc.h
+++ b/thirdparty/embree/kernels/common/alloc.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -8,10 +8,6 @@
#include "scene.h"
#include "primref.h"
-#if defined(__aarch64__) && defined(BUILD_IOS)
-#include <mutex>
-#endif
-
namespace embree
{
class FastAllocator
@@ -30,7 +26,7 @@ namespace embree
public:
struct ThreadLocal2;
- enum AllocationType { ALIGNED_MALLOC, EMBREE_OS_MALLOC, SHARED, ANY_TYPE };
+ enum AllocationType { ALIGNED_MALLOC, OS_MALLOC, SHARED, ANY_TYPE };
/*! Per thread structure holding the current memory block. */
struct __aligned(64) ThreadLocal
@@ -136,11 +132,7 @@ namespace embree
{
assert(alloc_i);
if (alloc.load() == alloc_i) return;
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(mutex);
-#else
Lock<SpinLock> lock(mutex);
-#endif
//if (alloc.load() == alloc_i) return; // not required as only one thread calls bind
if (alloc.load()) {
alloc.load()->bytesUsed += alloc0.getUsedBytes() + alloc1.getUsedBytes();
@@ -158,11 +150,7 @@ namespace embree
{
assert(alloc_i);
if (alloc.load() != alloc_i) return;
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(mutex);
-#else
Lock<SpinLock> lock(mutex);
-#endif
if (alloc.load() != alloc_i) return; // required as a different thread calls unbind
alloc.load()->bytesUsed += alloc0.getUsedBytes() + alloc1.getUsedBytes();
alloc.load()->bytesFree += alloc0.getFreeBytes() + alloc1.getFreeBytes();
@@ -173,11 +161,7 @@ namespace embree
}
public:
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::mutex mutex;
-#else
SpinLock mutex; //!< required as unbind is called from other threads
-#endif
std::atomic<FastAllocator*> alloc; //!< parent allocator
ThreadLocal alloc0;
ThreadLocal alloc1;
@@ -185,7 +169,7 @@ namespace embree
FastAllocator (Device* device, bool osAllocation)
: device(device), slotMask(0), usedBlocks(nullptr), freeBlocks(nullptr), use_single_mode(false), defaultBlockSize(PAGE_SIZE), estimatedSize(0),
- growSize(PAGE_SIZE), maxGrowSize(maxAllocationSize), log2_grow_size_scale(0), bytesUsed(0), bytesFree(0), bytesWasted(0), atype(osAllocation ? EMBREE_OS_MALLOC : ALIGNED_MALLOC),
+ growSize(PAGE_SIZE), maxGrowSize(maxAllocationSize), log2_grow_size_scale(0), bytesUsed(0), bytesFree(0), bytesWasted(0), atype(osAllocation ? OS_MALLOC : ALIGNED_MALLOC),
primrefarray(device,0)
{
for (size_t i=0; i<MAX_THREAD_USED_BLOCK_SLOTS; i++)
@@ -222,7 +206,7 @@ namespace embree
void setOSallocation(bool flag)
{
- atype = flag ? EMBREE_OS_MALLOC : ALIGNED_MALLOC;
+ atype = flag ? OS_MALLOC : ALIGNED_MALLOC;
}
private:
@@ -233,11 +217,7 @@ namespace embree
ThreadLocal2* alloc = thread_local_allocator2;
if (alloc == nullptr) {
thread_local_allocator2 = alloc = new ThreadLocal2;
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(s_thread_local_allocators_lock);
-#else
Lock<SpinLock> lock(s_thread_local_allocators_lock);
-#endif
s_thread_local_allocators.push_back(make_unique(alloc));
}
return alloc;
@@ -247,11 +227,7 @@ namespace embree
__forceinline void join(ThreadLocal2* alloc)
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(s_thread_local_allocators_lock);
-#else
Lock<SpinLock> lock(thread_local_allocators_lock);
-#endif
thread_local_allocators.push_back(alloc);
}
@@ -321,11 +297,7 @@ namespace embree
}
static const size_t threadLocalAllocOverhead = 20; //! 20 means 5% parallel allocation overhead through unfilled thread local blocks
-#if defined(__AVX512ER__) // KNL
- static const size_t mainAllocOverheadStatic = 15; //! 15 means 7.5% allocation overhead through unfilled main alloc blocks
-#else
static const size_t mainAllocOverheadStatic = 20; //! 20 means 5% allocation overhead through unfilled main alloc blocks
-#endif
static const size_t mainAllocOverheadDynamic = 8; //! 20 means 12.5% allocation overhead through unfilled main alloc blocks
/* calculates a single threaded threshold for the builders such
@@ -520,11 +492,7 @@ namespace embree
/* parallel block creation in case of no freeBlocks, avoids single global mutex */
if (likely(freeBlocks.load() == nullptr))
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(slotMutex[slot]);
-#else
Lock<SpinLock> lock(slotMutex[slot]);
-#endif
if (myUsedBlocks == threadUsedBlocks[slot]) {
const size_t alignedBytes = (bytes+(align-1)) & ~(align-1);
const size_t allocSize = max(min(growSize,maxGrowSize),alignedBytes);
@@ -537,11 +505,7 @@ namespace embree
/* if this fails allocate new block */
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(mutex);
-#else
- Lock<SpinLock> lock(mutex);
-#endif
+ Lock<SpinLock> lock(mutex);
if (myUsedBlocks == threadUsedBlocks[slot])
{
if (freeBlocks.load() != nullptr) {
@@ -563,11 +527,7 @@ namespace embree
/*! add new block */
void addBlock(void* ptr, ssize_t bytes)
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(mutex);
-#else
Lock<SpinLock> lock(mutex);
-#endif
const size_t sizeof_Header = offsetof(Block,data[0]);
void* aptr = (void*) ((((size_t)ptr)+maxAlignment-1) & ~(maxAlignment-1));
size_t ofs = (size_t) aptr - (size_t) ptr;
@@ -653,8 +613,8 @@ namespace embree
bytesWasted(alloc->bytesWasted),
stat_all(alloc,ANY_TYPE),
stat_malloc(alloc,ALIGNED_MALLOC),
- stat_4K(alloc,EMBREE_OS_MALLOC,false),
- stat_2M(alloc,EMBREE_OS_MALLOC,true),
+ stat_4K(alloc,OS_MALLOC,false),
+ stat_2M(alloc,OS_MALLOC,true),
stat_shared(alloc,SHARED) {}
AllStatistics (size_t bytesUsed,
@@ -747,7 +707,7 @@ namespace embree
/* We avoid using os_malloc for small blocks as this could
* cause a risk of fragmenting the virtual address space and
* reach the limit of vm.max_map_count = 65k under Linux. */
- if (atype == EMBREE_OS_MALLOC && bytesAllocate < maxAllocationSize)
+ if (atype == OS_MALLOC && bytesAllocate < maxAllocationSize)
atype = ALIGNED_MALLOC;
/* we need to additionally allocate some header */
@@ -756,7 +716,7 @@ namespace embree
bytesReserve = sizeof_Header+bytesReserve;
/* consume full 4k pages with using os_malloc */
- if (atype == EMBREE_OS_MALLOC) {
+ if (atype == OS_MALLOC) {
bytesAllocate = ((bytesAllocate+PAGE_SIZE-1) & ~(PAGE_SIZE-1));
bytesReserve = ((bytesReserve +PAGE_SIZE-1) & ~(PAGE_SIZE-1));
}
@@ -788,11 +748,11 @@ namespace embree
return new (ptr) Block(ALIGNED_MALLOC,bytesAllocate-sizeof_Header,bytesAllocate-sizeof_Header,next,alignment);
}
}
- else if (atype == EMBREE_OS_MALLOC)
+ else if (atype == OS_MALLOC)
{
if (device) device->memoryMonitor(bytesAllocate,false);
bool huge_pages; ptr = os_malloc(bytesReserve,huge_pages);
- return new (ptr) Block(EMBREE_OS_MALLOC,bytesAllocate-sizeof_Header,bytesReserve-sizeof_Header,next,0,huge_pages);
+ return new (ptr) Block(OS_MALLOC,bytesAllocate-sizeof_Header,bytesReserve-sizeof_Header,next,0,huge_pages);
}
else
assert(false);
@@ -836,7 +796,7 @@ namespace embree
if (device) device->memoryMonitor(-sizeof_Alloced,true);
}
- else if (atype == EMBREE_OS_MALLOC) {
+ else if (atype == OS_MALLOC) {
size_t sizeof_This = sizeof_Header+reserveEnd;
os_free(this,sizeof_This,huge_pages);
if (device) device->memoryMonitor(-sizeof_Alloced,true);
@@ -897,7 +857,7 @@ namespace embree
bool hasType(AllocationType atype_i, bool huge_pages_i) const
{
if (atype_i == ANY_TYPE ) return true;
- else if (atype == EMBREE_OS_MALLOC) return atype_i == atype && huge_pages_i == huge_pages;
+ else if (atype == OS_MALLOC) return atype_i == atype && huge_pages_i == huge_pages;
else return atype_i == atype;
}
@@ -946,7 +906,7 @@ namespace embree
void print_block() const
{
if (atype == ALIGNED_MALLOC) std::cout << "A";
- else if (atype == EMBREE_OS_MALLOC) std::cout << "O";
+ else if (atype == OS_MALLOC) std::cout << "O";
else if (atype == SHARED) std::cout << "S";
if (huge_pages) std::cout << "H";
size_t bytesUsed = getBlockUsedBytes();
@@ -976,11 +936,7 @@ namespace embree
std::atomic<Block*> freeBlocks;
std::atomic<Block*> threadBlocks[MAX_THREAD_USED_BLOCK_SLOTS];
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::mutex slotMutex[MAX_THREAD_USED_BLOCK_SLOTS];
-#else
SpinLock slotMutex[MAX_THREAD_USED_BLOCK_SLOTS];
-#endif
bool use_single_mode;
size_t defaultBlockSize;
@@ -994,11 +950,7 @@ namespace embree
static __thread ThreadLocal2* thread_local_allocator2;
static SpinLock s_thread_local_allocators_lock;
static std::vector<std::unique_ptr<ThreadLocal2>> s_thread_local_allocators;
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::mutex thread_local_allocators_lock;
-#else
SpinLock thread_local_allocators_lock;
-#endif
std::vector<ThreadLocal2*> thread_local_allocators;
AllocationType atype;
mvector<PrimRef> primrefarray; //!< primrefarray used to allocate nodes
diff --git a/thirdparty/embree-aarch64/kernels/common/buffer.h b/thirdparty/embree/kernels/common/buffer.h
index 02d319c59d..793012c04d 100644
--- a/thirdparty/embree-aarch64/kernels/common/buffer.h
+++ b/thirdparty/embree/kernels/common/buffer.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/builder.h b/thirdparty/embree/kernels/common/builder.h
index d2a1cfe3ce..07fe7b069b 100644
--- a/thirdparty/embree-aarch64/kernels/common/builder.h
+++ b/thirdparty/embree/kernels/common/builder.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/context.h b/thirdparty/embree/kernels/common/context.h
index d0185a74f2..ccd88bdeac 100644
--- a/thirdparty/embree-aarch64/kernels/common/context.h
+++ b/thirdparty/embree/kernels/common/context.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/default.h b/thirdparty/embree/kernels/common/default.h
index 709119163b..f15d61b768 100644
--- a/thirdparty/embree-aarch64/kernels/common/default.h
+++ b/thirdparty/embree/kernels/common/default.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -55,11 +55,6 @@
#include <utility>
#include <sstream>
-#if !defined(_DEBUG) && defined(BUILD_IOS)
-#undef assert
-#define assert(_EXPR)
-#endif
-
namespace embree
{
////////////////////////////////////////////////////////////////////////////////
diff --git a/thirdparty/embree-aarch64/kernels/common/device.cpp b/thirdparty/embree/kernels/common/device.cpp
index 16ec11b892..068e0c2983 100644
--- a/thirdparty/embree-aarch64/kernels/common/device.cpp
+++ b/thirdparty/embree/kernels/common/device.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "device.h"
@@ -64,6 +64,9 @@ namespace embree
case CPU::NEHALEM: frequency_level = FREQUENCY_SIMD128; break;
case CPU::CORE2: frequency_level = FREQUENCY_SIMD128; break;
case CPU::CORE1: frequency_level = FREQUENCY_SIMD128; break;
+ case CPU::XEON_PHI_KNIGHTS_MILL : frequency_level = FREQUENCY_SIMD512; break;
+ case CPU::XEON_PHI_KNIGHTS_LANDING: frequency_level = FREQUENCY_SIMD512; break;
+ case CPU::ARM: frequency_level = FREQUENCY_SIMD128; break;
}
/* initialize global state */
@@ -71,10 +74,6 @@ namespace embree
State::parseString(EMBREE_CONFIG);
#endif
State::parseString(cfg);
- if (!ignore_config_files && FileName::executableFolder() != FileName(""))
- State::parseFile(FileName::executableFolder()+FileName(".embree" TOSTRING(RTC_VERSION_MAJOR)));
- if (!ignore_config_files && FileName::homeFolder() != FileName(""))
- State::parseFile(FileName::homeFolder()+FileName(".embree" TOSTRING(RTC_VERSION_MAJOR)));
State::verify();
/* check whether selected ISA is supported by the HW, as the user could have forced an unsupported ISA */
@@ -127,7 +126,7 @@ namespace embree
/* ray stream SOA to AOS conversion */
#if defined(EMBREE_RAY_PACKETS)
RayStreamFilterFuncsType rayStreamFilterFuncs;
- SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(enabled_cpu_features,rayStreamFilterFuncs);
+ SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(enabled_cpu_features,rayStreamFilterFuncs);
rayStreamFilters = rayStreamFilterFuncs();
#endif
}
@@ -153,11 +152,8 @@ namespace embree
#if defined(EMBREE_TARGET_AVX2)
v += "AVX2 ";
#endif
-#if defined(EMBREE_TARGET_AVX512KNL)
- v += "AVX512KNL ";
-#endif
-#if defined(EMBREE_TARGET_AVX512SKX)
- v += "AVX512SKX ";
+#if defined(EMBREE_TARGET_AVX512)
+ v += "AVX512 ";
#endif
return v;
}
@@ -221,9 +217,6 @@ namespace embree
#if defined(TASKING_INTERNAL)
std::cout << "internal_tasking_system ";
#endif
-#if defined(TASKING_GCD) && defined(BUILD_IOS)
- std::cout << "GCD tasking system ";
-#endif
#if defined(TASKING_PPL)
std::cout << "PPL ";
#endif
@@ -448,7 +441,7 @@ namespace embree
#endif
#if defined(EMBREE_TARGET_SIMD16) && defined(EMBREE_RAY_PACKETS)
- case RTC_DEVICE_PROPERTY_NATIVE_RAY16_SUPPORTED: return hasISA(AVX512KNL) | hasISA(AVX512SKX);
+ case RTC_DEVICE_PROPERTY_NATIVE_RAY16_SUPPORTED: return hasISA(AVX512);
#else
case RTC_DEVICE_PROPERTY_NATIVE_RAY16_SUPPORTED: return 0;
#endif
@@ -506,10 +499,6 @@ namespace embree
#if defined(TASKING_PPL)
case RTC_DEVICE_PROPERTY_TASKING_SYSTEM: return 2;
#endif
-
-#if defined(TASKING_GCD) && defined(BUILD_IOS)
- case RTC_DEVICE_PROPERTY_TASKING_SYSTEM: return 3;
-#endif
#if defined(EMBREE_GEOMETRY_TRIANGLE)
case RTC_DEVICE_PROPERTY_TRIANGLE_GEOMETRY_SUPPORTED: return 1;
diff --git a/thirdparty/embree-aarch64/kernels/common/device.h b/thirdparty/embree/kernels/common/device.h
index e9a81bb109..21c42c654d 100644
--- a/thirdparty/embree-aarch64/kernels/common/device.h
+++ b/thirdparty/embree/kernels/common/device.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/geometry.cpp b/thirdparty/embree/kernels/common/geometry.cpp
index b3aa8e3396..d8d3f65a5c 100644
--- a/thirdparty/embree-aarch64/kernels/common/geometry.cpp
+++ b/thirdparty/embree/kernels/common/geometry.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "geometry.h"
diff --git a/thirdparty/embree-aarch64/kernels/common/geometry.h b/thirdparty/embree/kernels/common/geometry.h
index 953974bfd2..2f9f2e7c94 100644
--- a/thirdparty/embree-aarch64/kernels/common/geometry.h
+++ b/thirdparty/embree/kernels/common/geometry.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -301,7 +301,7 @@ namespace embree
template<int N>
__forceinline vint<N> timeSegment(const vfloat<N>& time, vfloat<N>& ftime) const {
- return getTimeSegment(time,vfloat<N>(time_range.lower),vfloat<N>(time_range.upper),vfloat<N>(fnumTimeSegments),ftime);
+ return getTimeSegment<N>(time,vfloat<N>(time_range.lower),vfloat<N>(time_range.upper),vfloat<N>(fnumTimeSegments),ftime);
}
/* calculate overlapping time segment range */
diff --git a/thirdparty/embree-aarch64/kernels/common/hit.h b/thirdparty/embree/kernels/common/hit.h
index 32a198cdfe..fd1a9d6391 100644
--- a/thirdparty/embree-aarch64/kernels/common/hit.h
+++ b/thirdparty/embree/kernels/common/hit.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -22,7 +22,7 @@ namespace embree
{
for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l)
instID[l] = RTC_INVALID_GEOMETRY_ID;
- instance_id_stack::copy(context->instID, instID);
+ instance_id_stack::copy_UV<K>(context->instID, instID);
}
/* Returns the size of the hit */
@@ -48,7 +48,7 @@ namespace embree
__forceinline HitK(const RTCIntersectContext* context, unsigned int geomID, unsigned int primID, float u, float v, const Vec3fa& Ng)
: Ng(Ng.x,Ng.y,Ng.z), u(u), v(v), primID(primID), geomID(geomID)
{
- instance_id_stack::copy(context->instID, instID);
+ instance_id_stack::copy_UU(context->instID, instID);
}
/* Returns the size of the hit */
@@ -96,7 +96,7 @@ namespace embree
ray.v = hit.v;
ray.primID = hit.primID;
ray.geomID = hit.geomID;
- instance_id_stack::copy(hit.instID, ray.instID);
+ instance_id_stack::copy_UU(hit.instID, ray.instID);
}
template<int K>
@@ -109,6 +109,6 @@ namespace embree
vfloat<K>::storeu(mask,&ray.v, hit.v);
vuint<K>::storeu(mask,&ray.primID, hit.primID);
vuint<K>::storeu(mask,&ray.geomID, hit.geomID);
- instance_id_stack::copy(hit.instID, ray.instID, mask);
+ instance_id_stack::copy_VV<K>(hit.instID, ray.instID, mask);
}
}
diff --git a/thirdparty/embree/kernels/common/instance_stack.h b/thirdparty/embree/kernels/common/instance_stack.h
new file mode 100644
index 0000000000..d3c0a643f1
--- /dev/null
+++ b/thirdparty/embree/kernels/common/instance_stack.h
@@ -0,0 +1,179 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#pragma once
+
+#include "rtcore.h"
+
+namespace embree {
+namespace instance_id_stack {
+
+static_assert(RTC_MAX_INSTANCE_LEVEL_COUNT > 0,
+ "RTC_MAX_INSTANCE_LEVEL_COUNT must be greater than 0.");
+
+/*******************************************************************************
+ * Instance ID stack manipulation.
+ * This is used from the instance intersector.
+ ******************************************************************************/
+
+/*
+ * Push an instance to the stack.
+ */
+RTC_FORCEINLINE bool push(RTCIntersectContext* context,
+ unsigned instanceId)
+{
+#if RTC_MAX_INSTANCE_LEVEL_COUNT > 1
+ const bool spaceAvailable = context->instStackSize < RTC_MAX_INSTANCE_LEVEL_COUNT;
+ /* We assert here because instances are silently dropped when the stack is full.
+ This might be quite hard to find in production. */
+ assert(spaceAvailable);
+ if (likely(spaceAvailable))
+ context->instID[context->instStackSize++] = instanceId;
+ return spaceAvailable;
+#else
+ const bool spaceAvailable = (context->instID[0] == RTC_INVALID_GEOMETRY_ID);
+ assert(spaceAvailable);
+ if (likely(spaceAvailable))
+ context->instID[0] = instanceId;
+ return spaceAvailable;
+#endif
+}
+
+
+/*
+ * Pop the last instance pushed to the stack.
+ * Do not call on an empty stack.
+ */
+RTC_FORCEINLINE void pop(RTCIntersectContext* context)
+{
+ assert(context);
+#if RTC_MAX_INSTANCE_LEVEL_COUNT > 1
+ assert(context->instStackSize > 0);
+ context->instID[--context->instStackSize] = RTC_INVALID_GEOMETRY_ID;
+#else
+ assert(context->instID[0] != RTC_INVALID_GEOMETRY_ID);
+ context->instID[0] = RTC_INVALID_GEOMETRY_ID;
+#endif
+}
+
+/*
+ * Optimized instance id stack copy.
+ * The copy() functions will either copy full
+ * stacks or copy only until the last valid element has been copied, depending
+ * on RTC_MAX_INSTANCE_LEVEL_COUNT.
+ */
+RTC_FORCEINLINE void copy_UU(const unsigned* src, unsigned* tgt)
+{
+#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
+ tgt[0] = src[0];
+
+#else
+ for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l) {
+ tgt[l] = src[l];
+ if (RTC_MAX_INSTANCE_LEVEL_COUNT > 4)
+ if (src[l] == RTC_INVALID_GEOMETRY_ID)
+ break;
+ }
+#endif
+}
+
+template <int K>
+RTC_FORCEINLINE void copy_UV(const unsigned* src, vuint<K>* tgt)
+{
+#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
+ tgt[0] = src[0];
+
+#else
+ for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l) {
+ tgt[l] = src[l];
+ if (RTC_MAX_INSTANCE_LEVEL_COUNT > 4)
+ if (src[l] == RTC_INVALID_GEOMETRY_ID)
+ break;
+ }
+#endif
+}
+
+template <int K>
+RTC_FORCEINLINE void copy_UV(const unsigned* src, vuint<K>* tgt, size_t j)
+{
+#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
+ tgt[0][j] = src[0];
+
+#else
+ for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l) {
+ tgt[l][j] = src[l];
+ if (RTC_MAX_INSTANCE_LEVEL_COUNT > 4)
+ if (src[l] == RTC_INVALID_GEOMETRY_ID)
+ break;
+ }
+#endif
+}
+
+template <int K>
+RTC_FORCEINLINE void copy_UV(const unsigned* src, vuint<K>* tgt, const vbool<K>& mask)
+{
+#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
+ vuint<K>::store(mask, tgt, src[0]);
+
+#else
+ for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l) {
+ vuint<K>::store(mask, tgt + l, src[l]);
+ if (RTC_MAX_INSTANCE_LEVEL_COUNT > 4)
+ if (src[l] == RTC_INVALID_GEOMETRY_ID)
+ break;
+ }
+#endif
+}
+
+template <int K>
+RTC_FORCEINLINE void copy_VU(const vuint<K>* src, unsigned* tgt, size_t i)
+{
+#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
+ tgt[0] = src[0][i];
+
+#else
+ for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l) {
+ tgt[l] = src[l][i];
+ if (RTC_MAX_INSTANCE_LEVEL_COUNT > 4)
+ if (src[l][i] == RTC_INVALID_GEOMETRY_ID)
+ break;
+ }
+#endif
+}
+
+template <int K>
+RTC_FORCEINLINE void copy_VV(const vuint<K>* src, vuint<K>* tgt, size_t i, size_t j)
+{
+#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
+ tgt[0][j] = src[0][i];
+
+#else
+ for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l) {
+ tgt[l][j] = src[l][i];
+ if (RTC_MAX_INSTANCE_LEVEL_COUNT > 4)
+ if (src[l][i] == RTC_INVALID_GEOMETRY_ID)
+ break;
+ }
+#endif
+}
+
+template <int K>
+RTC_FORCEINLINE void copy_VV(const vuint<K>* src, vuint<K>* tgt, const vbool<K>& mask)
+{
+#if (RTC_MAX_INSTANCE_LEVEL_COUNT == 1)
+ vuint<K>::store(mask, tgt, src[0]);
+
+#else
+ vbool<K> done = !mask;
+ for (unsigned l = 0; l < RTC_MAX_INSTANCE_LEVEL_COUNT; ++l) {
+ vuint<K>::store(mask, tgt + l, src[l]);
+ if (RTC_MAX_INSTANCE_LEVEL_COUNT > 4) {
+ done |= src[l] == RTC_INVALID_GEOMETRY_ID;
+ if (all(done)) break;
+ }
+ }
+#endif
+}
+
+} // namespace instance_id_stack
+} // namespace embree
diff --git a/thirdparty/embree-aarch64/kernels/common/isa.h b/thirdparty/embree/kernels/common/isa.h
index 63fb8d3351..ae6556336c 100644
--- a/thirdparty/embree-aarch64/kernels/common/isa.h
+++ b/thirdparty/embree/kernels/common/isa.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -13,23 +13,21 @@ namespace embree
name##Func name;
#define DECLARE_SYMBOL2(type,name) \
- namespace sse2 { extern type name(); } \
- namespace sse42 { extern type name(); } \
- namespace avx { extern type name(); } \
- namespace avx2 { extern type name(); } \
- namespace avx512knl { extern type name(); } \
- namespace avx512skx { extern type name(); } \
+ namespace sse2 { extern type name(); } \
+ namespace sse42 { extern type name(); } \
+ namespace avx { extern type name(); } \
+ namespace avx2 { extern type name(); } \
+ namespace avx512 { extern type name(); } \
void name##_error2() { throw_RTCError(RTC_ERROR_UNKNOWN,"internal error in ISA selection for " TOSTRING(name)); } \
type name##_error() { return type(name##_error2); } \
type name##_zero() { return type(nullptr); }
#define DECLARE_ISA_FUNCTION(type,symbol,args) \
- namespace sse2 { extern type symbol(args); } \
- namespace sse42 { extern type symbol(args); } \
- namespace avx { extern type symbol(args); } \
- namespace avx2 { extern type symbol(args); } \
- namespace avx512knl { extern type symbol(args); } \
- namespace avx512skx { extern type symbol(args); } \
+ namespace sse2 { extern type symbol(args); } \
+ namespace sse42 { extern type symbol(args); } \
+ namespace avx { extern type symbol(args); } \
+ namespace avx2 { extern type symbol(args); } \
+ namespace avx512 { extern type symbol(args); } \
inline type symbol##_error(args) { throw_RTCError(RTC_ERROR_UNSUPPORTED_CPU,"function " TOSTRING(symbol) " not supported by your CPU"); } \
typedef type (*symbol##Ty)(args); \
@@ -46,7 +44,7 @@ namespace embree
#define SELECT_SYMBOL_DEFAULT(features,intersector) \
intersector = isa::intersector;
-#if defined(__SSE__) || defined(__ARM_NEON)
+#if defined(__SSE__)
#if !defined(EMBREE_TARGET_SIMD4)
#define EMBREE_TARGET_SIMD4
#endif
@@ -84,24 +82,14 @@ namespace embree
#define SELECT_SYMBOL_AVX2(features,intersector)
#endif
-#if defined(EMBREE_TARGET_AVX512KNL)
+#if defined(EMBREE_TARGET_AVX512)
#if !defined(EMBREE_TARGET_SIMD16)
#define EMBREE_TARGET_SIMD16
#endif
-#define SELECT_SYMBOL_AVX512KNL(features,intersector) \
- if ((features & AVX512KNL) == AVX512KNL) intersector = avx512knl::intersector;
+#define SELECT_SYMBOL_AVX512(features,intersector) \
+ if ((features & AVX512) == AVX512) intersector = avx512::intersector;
#else
-#define SELECT_SYMBOL_AVX512KNL(features,intersector)
-#endif
-
-#if defined(EMBREE_TARGET_AVX512SKX)
-#if !defined(EMBREE_TARGET_SIMD16)
-#define EMBREE_TARGET_SIMD16
-#endif
-#define SELECT_SYMBOL_AVX512SKX(features,intersector) \
- if ((features & AVX512SKX) == AVX512SKX) intersector = avx512skx::intersector;
-#else
-#define SELECT_SYMBOL_AVX512SKX(features,intersector)
+#define SELECT_SYMBOL_AVX512(features,intersector)
#endif
#define SELECT_SYMBOL_DEFAULT_SSE42(features,intersector) \
@@ -119,39 +107,37 @@ namespace embree
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_SSE42(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_SSE42(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_SSE42_AVX_AVX2_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_SSE42(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
#define SELECT_SYMBOL_DEFAULT_AVX(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
@@ -162,21 +148,19 @@ namespace embree
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_AVX(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
- SELECT_SYMBOL_AVX(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector);
-
-#define SELECT_SYMBOL_DEFAULT_AVX_AVX512KNL_AVX512SKX(features,intersector) \
+ SELECT_SYMBOL_AVX(features,intersector);
+
+#define SELECT_SYMBOL_DEFAULT_AVX_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_AVX_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_AVX_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
#define SELECT_SYMBOL_INIT_AVX(features,intersector) \
INIT_SYMBOL(features,intersector); \
@@ -187,11 +171,11 @@ namespace embree
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector);
-#define SELECT_SYMBOL_INIT_AVX_AVX2_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,intersector) \
INIT_SYMBOL(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
#define SELECT_SYMBOL_INIT_SSE42_AVX_AVX2(features,intersector) \
INIT_SYMBOL(features,intersector); \
@@ -199,57 +183,49 @@ namespace embree
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector);
-#define SELECT_SYMBOL_INIT_AVX_AVX512KNL(features,intersector) \
+#define SELECT_SYMBOL_INIT_AVX(features,intersector) \
INIT_SYMBOL(features,intersector); \
- SELECT_SYMBOL_AVX(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector);
+ SELECT_SYMBOL_AVX(features,intersector);
-#define SELECT_SYMBOL_INIT_AVX_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_INIT_AVX_AVX512(features,intersector) \
INIT_SYMBOL(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL(features,intersector) \
+#define SELECT_SYMBOL_INIT_AVX_AVX2(features,intersector) \
INIT_SYMBOL(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
- SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector);
+ SELECT_SYMBOL_AVX2(features,intersector);
-#define SELECT_SYMBOL_INIT_AVX_AVX2_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_INIT_AVX_AVX2_AVX512(features,intersector) \
INIT_SYMBOL(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_INIT_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_INIT_SSE42_AVX_AVX2_AVX512(features,intersector) \
INIT_SYMBOL(features,intersector); \
SELECT_SYMBOL_SSE42(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_ZERO_SSE42_AVX_AVX2_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_ZERO_SSE42_AVX_AVX2_AVX512(features,intersector) \
ZERO_SYMBOL(features,intersector); \
SELECT_SYMBOL_SSE42(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(features,intersector) \
SELECT_SYMBOL_DEFAULT(features,intersector); \
SELECT_SYMBOL_AVX(features,intersector); \
SELECT_SYMBOL_AVX2(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
-#define SELECT_SYMBOL_INIT_AVX512KNL_AVX512SKX(features,intersector) \
+#define SELECT_SYMBOL_INIT_AVX512(features,intersector) \
INIT_SYMBOL(features,intersector); \
- SELECT_SYMBOL_AVX512KNL(features,intersector); \
- SELECT_SYMBOL_AVX512SKX(features,intersector);
+ SELECT_SYMBOL_AVX512(features,intersector);
#define SELECT_SYMBOL_SSE42_AVX_AVX2(features,intersector) \
SELECT_SYMBOL_SSE42(features,intersector); \
@@ -262,10 +238,9 @@ namespace embree
else return getISA(depth-1);
}
};
- namespace sse2 { int getISA(); };
- namespace sse42 { int getISA(); };
- namespace avx { int getISA(); };
- namespace avx2 { int getISA(); };
- namespace avx512knl { int getISA(); };
- namespace avx512skx { int getISA(); };
+ namespace sse2 { int getISA(); };
+ namespace sse42 { int getISA(); };
+ namespace avx { int getISA(); };
+ namespace avx2 { int getISA(); };
+ namespace avx512 { int getISA(); };
}
diff --git a/thirdparty/embree-aarch64/kernels/common/motion_derivative.h b/thirdparty/embree/kernels/common/motion_derivative.h
index 82953f0e89..c619d6a675 100644
--- a/thirdparty/embree-aarch64/kernels/common/motion_derivative.h
+++ b/thirdparty/embree/kernels/common/motion_derivative.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/point_query.h b/thirdparty/embree/kernels/common/point_query.h
index 27d158ca3a..7d55c91fff 100644
--- a/thirdparty/embree-aarch64/kernels/common/point_query.h
+++ b/thirdparty/embree/kernels/common/point_query.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/primref.h b/thirdparty/embree/kernels/common/primref.h
index ce75c982bb..d61763487b 100644
--- a/thirdparty/embree-aarch64/kernels/common/primref.h
+++ b/thirdparty/embree/kernels/common/primref.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -29,7 +29,7 @@ namespace embree
__forceinline PrimRef (const BBox3fa& bounds, size_t id)
{
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
lower = Vec3fx(bounds.lower, (unsigned)(id & 0xFFFFFFFF));
upper = Vec3fx(bounds.upper, (unsigned)((id >> 32) & 0xFFFFFFFF));
#else
@@ -79,7 +79,7 @@ namespace embree
/*! returns an size_t sized ID */
__forceinline size_t ID() const {
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
return size_t(lower.u) + (size_t(upper.u) << 32);
#else
return size_t(lower.u);
diff --git a/thirdparty/embree-aarch64/kernels/common/primref_mb.h b/thirdparty/embree/kernels/common/primref_mb.h
index b6c1ad5712..fb08a05003 100644
--- a/thirdparty/embree-aarch64/kernels/common/primref_mb.h
+++ b/thirdparty/embree/kernels/common/primref_mb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -32,7 +32,7 @@ namespace embree
: lbounds((LBBox3fx)lbounds_i), time_range(time_range)
{
assert(activeTimeSegments > 0);
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
lbounds.bounds0.lower.a = id & 0xFFFFFFFF;
lbounds.bounds0.upper.a = (id >> 32) & 0xFFFFFFFF;
#else
@@ -47,7 +47,7 @@ namespace embree
: lbounds((LBBox3fx)lbounds_i), time_range(time_range)
{
assert(activeTimeSegments > 0);
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
lbounds.bounds0.lower.u = id & 0xFFFFFFFF;
lbounds.bounds0.upper.u = (id >> 32) & 0xFFFFFFFF;
#else
@@ -115,7 +115,7 @@ namespace embree
/*! returns an size_t sized ID */
__forceinline size_t ID() const {
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
return size_t(lbounds.bounds0.lower.u) + (size_t(lbounds.bounds0.upper.u) << 32);
#else
return size_t(lbounds.bounds0.lower.u);
@@ -163,7 +163,7 @@ namespace embree
: bbox(bounds.interpolate(0.5f)), _activeTimeSegments(activeTimeSegments), _totalTimeSegments(totalTimeSegments), time_range(time_range)
{
assert(activeTimeSegments > 0);
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
bbox.lower.u = id & 0xFFFFFFFF;
bbox.upper.u = (id >> 32) & 0xFFFFFFFF;
#else
@@ -229,7 +229,7 @@ namespace embree
/*! returns an size_t sized ID */
__forceinline size_t ID() const {
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
return size_t(bbox.lower.u) + (size_t(bbox.upper.u) << 32);
#else
return size_t(bbox.lower.u);
diff --git a/thirdparty/embree-aarch64/kernels/common/profile.h b/thirdparty/embree/kernels/common/profile.h
index a7de36414d..5ef7f6ec0f 100644
--- a/thirdparty/embree-aarch64/kernels/common/profile.h
+++ b/thirdparty/embree/kernels/common/profile.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/ray.h b/thirdparty/embree/kernels/common/ray.h
index 336d48942c..7b951cc1e8 100644
--- a/thirdparty/embree-aarch64/kernels/common/ray.h
+++ b/thirdparty/embree/kernels/common/ray.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -292,7 +292,7 @@ namespace embree
ray.u = u[i]; ray.v = v[i];
ray.primID = primID[i]; ray.geomID = geomID[i];
- instance_id_stack::copy(instID, ray.instID, i);
+ instance_id_stack::copy_VU<K>(instID, ray.instID, i);
}
/* Converts single rays to ray packet */
@@ -331,7 +331,7 @@ namespace embree
u[i] = ray.u; v[i] = ray.v;
primID[i] = ray.primID; geomID[i] = ray.geomID;
- instance_id_stack::copy(ray.instID, instID, i);
+ instance_id_stack::copy_UV<K>(ray.instID, instID, i);
}
/* copies a ray packet element into another element*/
@@ -353,7 +353,7 @@ namespace embree
u[dest] = u[source]; v[dest] = v[source];
primID[dest] = primID[source]; geomID[dest] = geomID[source];
- instance_id_stack::copy(instID, instID, source, dest);
+ instance_id_stack::copy_VV<K>(instID, instID, source, dest);
}
/* Shortcuts */
@@ -1112,7 +1112,7 @@ namespace embree
__forceinline RayK<K> getRayByOffset(const vbool<K>& valid, const vint<K>& offset)
{
const vint<K> valid_offset = select(valid, offset, vintx(zero));
- return getRayByOffset(valid_offset);
+ return getRayByOffset<K>(valid_offset);
}
template<int K>
@@ -1153,7 +1153,7 @@ namespace embree
ray_k->primID = ray.primID[k];
ray_k->geomID = ray.geomID[k];
- instance_id_stack::copy(ray.instID, ray_k->instID, k);
+ instance_id_stack::copy_VU<K>(ray.instID, ray_k->instID, k);
}
#endif
}
@@ -1185,7 +1185,7 @@ namespace embree
};
template<>
- __forceinline Ray4 RayStreamAOS::getRayByOffset(const vint4& offset)
+ __forceinline Ray4 RayStreamAOS::getRayByOffset<4>(const vint4& offset)
{
Ray4 ray;
@@ -1222,7 +1222,7 @@ namespace embree
#if defined(__AVX__)
template<>
- __forceinline Ray8 RayStreamAOS::getRayByOffset(const vint8& offset)
+ __forceinline Ray8 RayStreamAOS::getRayByOffset<8>(const vint8& offset)
{
Ray8 ray;
@@ -1260,7 +1260,7 @@ namespace embree
#if defined(__AVX512F__)
template<>
- __forceinline Ray16 RayStreamAOS::getRayByOffset(const vint16& offset)
+ __forceinline Ray16 RayStreamAOS::getRayByOffset<16>(const vint16& offset)
{
Ray16 ray;
@@ -1332,7 +1332,7 @@ namespace embree
__forceinline RayK<K> getRayByIndex(const vbool<K>& valid, const vint<K>& index)
{
const vint<K> valid_index = select(valid, index, vintx(zero));
- return getRayByIndex(valid_index);
+ return getRayByIndex<K>(valid_index);
}
template<int K>
@@ -1357,7 +1357,7 @@ namespace embree
ray_k->v = ray.v[k];
ray_k->primID = ray.primID[k];
ray_k->geomID = ray.geomID[k];
- instance_id_stack::copy(ray.instID, ray_k->instID, k);
+ instance_id_stack::copy_VU<K>(ray.instID, ray_k->instID, k);
}
}
}
@@ -1385,7 +1385,7 @@ namespace embree
};
template<>
- __forceinline Ray4 RayStreamAOP::getRayByIndex(const vint4& index)
+ __forceinline Ray4 RayStreamAOP::getRayByIndex<4>(const vint4& index)
{
Ray4 ray;
@@ -1422,7 +1422,7 @@ namespace embree
#if defined(__AVX__)
template<>
- __forceinline Ray8 RayStreamAOP::getRayByIndex(const vint8& index)
+ __forceinline Ray8 RayStreamAOP::getRayByIndex<8>(const vint8& index)
{
Ray8 ray;
@@ -1460,7 +1460,7 @@ namespace embree
#if defined(__AVX512F__)
template<>
- __forceinline Ray16 RayStreamAOP::getRayByIndex(const vint16& index)
+ __forceinline Ray16 RayStreamAOP::getRayByIndex<16>(const vint16& index)
{
Ray16 ray;
diff --git a/thirdparty/embree-aarch64/kernels/common/rtcore.cpp b/thirdparty/embree/kernels/common/rtcore.cpp
index 625fbf6d4f..94b3819e42 100644
--- a/thirdparty/embree-aarch64/kernels/common/rtcore.cpp
+++ b/thirdparty/embree/kernels/common/rtcore.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#define RTC_EXPORT_API
@@ -8,31 +8,18 @@
#include "scene.h"
#include "context.h"
#include "../../include/embree3/rtcore_ray.h"
-
-#if defined(__aarch64__) && defined(BUILD_IOS)
-#include <mutex>
-#endif
-
using namespace embree;
RTC_NAMESPACE_BEGIN;
/* mutex to make API thread safe */
-#if defined(__aarch64__) && defined(BUILD_IOS)
- static std::mutex g_mutex;
-#else
- static MutexSys g_mutex;
-#endif
+ static MutexSys g_mutex;
RTC_API RTCDevice rtcNewDevice(const char* config)
{
RTC_CATCH_BEGIN;
RTC_TRACE(rtcNewDevice);
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(g_mutex);
-#else
Lock<MutexSys> lock(g_mutex);
-#endif
Device* device = new Device(config);
return (RTCDevice) device->refInc();
RTC_CATCH_END(nullptr);
@@ -45,11 +32,7 @@ RTC_NAMESPACE_BEGIN;
RTC_CATCH_BEGIN;
RTC_TRACE(rtcRetainDevice);
RTC_VERIFY_HANDLE(hdevice);
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(g_mutex);
-#else
Lock<MutexSys> lock(g_mutex);
-#endif
device->refInc();
RTC_CATCH_END(nullptr);
}
@@ -60,11 +43,7 @@ RTC_NAMESPACE_BEGIN;
RTC_CATCH_BEGIN;
RTC_TRACE(rtcReleaseDevice);
RTC_VERIFY_HANDLE(hdevice);
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(g_mutex);
-#else
Lock<MutexSys> lock(g_mutex);
-#endif
device->refDec();
RTC_CATCH_END(nullptr);
}
@@ -75,11 +54,7 @@ RTC_NAMESPACE_BEGIN;
RTC_CATCH_BEGIN;
RTC_TRACE(rtcGetDeviceProperty);
RTC_VERIFY_HANDLE(hdevice);
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(g_mutex);
-#else
Lock<MutexSys> lock(g_mutex);
-#endif
return device->getProperty(prop);
RTC_CATCH_END(device);
return 0;
@@ -92,11 +67,7 @@ RTC_NAMESPACE_BEGIN;
RTC_TRACE(rtcSetDeviceProperty);
const bool internal_prop = (size_t)prop >= 1000000 && (size_t)prop < 1000004;
if (!internal_prop) RTC_VERIFY_HANDLE(hdevice); // allow NULL device for special internal settings
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(g_mutex);
-#else
Lock<MutexSys> lock(g_mutex);
-#endif
device->setProperty(prop,val);
RTC_CATCH_END(device);
}
@@ -212,11 +183,7 @@ RTC_NAMESPACE_BEGIN;
RTC_CATCH_BEGIN;
RTC_TRACE(rtcSetSceneProgressMonitorFunction);
RTC_VERIFY_HANDLE(hscene);
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(g_mutex);
-#else
Lock<MutexSys> lock(g_mutex);
-#endif
scene->setProgressMonitorFunction(progress,ptr);
RTC_CATCH_END2(scene);
}
@@ -515,12 +482,12 @@ RTC_NAMESPACE_BEGIN;
IntersectContext context(scene,user_context);
#if !defined(EMBREE_RAY_PACKETS)
- RayHit4* rayhit4 = (RayHit4*)rayhit;
+ Ray4* ray4 = (Ray4*) rayhit;
for (size_t i=0; i<4; i++) {
if (!valid[i]) continue;
- RayHit ray1; rayhit4->get(i,ray1);
+ RayHit ray1; ray4->get(i,ray1);
scene->intersectors.intersect((RTCRayHit&)ray1,&context);
- rayhit4->set(i,ray1);
+ ray4->set(i,ray1);
}
#else
scene->intersectors.intersect4(valid,*rayhit,&context);
@@ -546,12 +513,12 @@ RTC_NAMESPACE_BEGIN;
IntersectContext context(scene,user_context);
#if !defined(EMBREE_RAY_PACKETS)
- RayHit8* rayhit8 = (RayHit8*) rayhit;
+ Ray8* ray8 = (Ray8*) rayhit;
for (size_t i=0; i<8; i++) {
if (!valid[i]) continue;
- RayHit ray1; rayhit8->get(i,ray1);
+ RayHit ray1; ray8->get(i,ray1);
scene->intersectors.intersect((RTCRayHit&)ray1,&context);
- rayhit8->set(i,ray1);
+ ray8->set(i,ray1);
}
#else
if (likely(scene->intersectors.intersector8))
@@ -579,12 +546,12 @@ RTC_NAMESPACE_BEGIN;
IntersectContext context(scene,user_context);
#if !defined(EMBREE_RAY_PACKETS)
- RayHit16* rayhit16 = (RayHit16*) rayhit;
+ Ray16* ray16 = (Ray16*) rayhit;
for (size_t i=0; i<16; i++) {
if (!valid[i]) continue;
- RayHit ray1; rayhit16->get(i,ray1);
+ RayHit ray1; ray16->get(i,ray1);
scene->intersectors.intersect((RTCRayHit&)ray1,&context);
- rayhit16->set(i,ray1);
+ ray16->set(i,ray1);
}
#else
if (likely(scene->intersectors.intersector16))
@@ -766,12 +733,12 @@ RTC_NAMESPACE_BEGIN;
IntersectContext context(scene,user_context);
#if !defined(EMBREE_RAY_PACKETS)
- Ray4* ray4 = (Ray4*) ray;
+ RayHit4* ray4 = (RayHit4*) ray;
for (size_t i=0; i<4; i++) {
if (!valid[i]) continue;
- Ray ray1; ray4->get(i,ray1);
+ RayHit ray1; ray4->get(i,ray1);
scene->intersectors.occluded((RTCRay&)ray1,&context);
- ray4->set(i,ray1);
+ ray4->geomID[i] = ray1.geomID;
}
#else
scene->intersectors.occluded4(valid,*ray,&context);
@@ -797,10 +764,10 @@ RTC_NAMESPACE_BEGIN;
IntersectContext context(scene,user_context);
#if !defined(EMBREE_RAY_PACKETS)
- Ray8* ray8 = (Ray8*) ray;
+ RayHit8* ray8 = (RayHit8*) ray;
for (size_t i=0; i<8; i++) {
if (!valid[i]) continue;
- Ray ray1; ray8->get(i,ray1);
+ RayHit ray1; ray8->get(i,ray1);
scene->intersectors.occluded((RTCRay&)ray1,&context);
ray8->set(i,ray1);
}
@@ -831,10 +798,10 @@ RTC_NAMESPACE_BEGIN;
IntersectContext context(scene,user_context);
#if !defined(EMBREE_RAY_PACKETS)
- Ray16* ray16 = (Ray16*) ray;
+ RayHit16* ray16 = (RayHit16*) ray;
for (size_t i=0; i<16; i++) {
if (!valid[i]) continue;
- Ray ray1; ray16->get(i,ray1);
+ RayHit ray1; ray16->get(i,ray1);
scene->intersectors.occluded((RTCRay&)ray1,&context);
ray16->set(i,ray1);
}
@@ -1152,7 +1119,7 @@ RTC_NAMESPACE_BEGIN;
{
#if defined(EMBREE_GEOMETRY_TRIANGLE)
createTriangleMeshTy createTriangleMesh = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createTriangleMesh);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createTriangleMesh);
Geometry* geom = createTriangleMesh(device);
return (RTCGeometry) geom->refInc();
#else
@@ -1164,7 +1131,7 @@ RTC_NAMESPACE_BEGIN;
{
#if defined(EMBREE_GEOMETRY_QUAD)
createQuadMeshTy createQuadMesh = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createQuadMesh);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createQuadMesh);
Geometry* geom = createQuadMesh(device);
return (RTCGeometry) geom->refInc();
#else
@@ -1178,7 +1145,7 @@ RTC_NAMESPACE_BEGIN;
{
#if defined(EMBREE_GEOMETRY_POINT)
createPointsTy createPoints = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_builder_cpu_features, createPoints);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_builder_cpu_features, createPoints);
Geometry *geom;
switch(type) {
@@ -1223,9 +1190,9 @@ RTC_NAMESPACE_BEGIN;
{
#if defined(EMBREE_GEOMETRY_CURVE)
createLineSegmentsTy createLineSegments = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createLineSegments);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createLineSegments);
createCurvesTy createCurves = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createCurves);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createCurves);
Geometry* geom;
switch (type) {
@@ -1262,7 +1229,7 @@ RTC_NAMESPACE_BEGIN;
#if defined(EMBREE_GEOMETRY_SUBDIVISION)
createSubdivMeshTy createSubdivMesh = nullptr;
SELECT_SYMBOL_DEFAULT_AVX(device->enabled_cpu_features,createSubdivMesh);
- //SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createSubdivMesh); // FIXME: this does not work for some reason?
+ //SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createSubdivMesh); // FIXME: this does not work for some reason?
Geometry* geom = createSubdivMesh(device);
return (RTCGeometry) geom->refInc();
#else
@@ -1274,7 +1241,7 @@ RTC_NAMESPACE_BEGIN;
{
#if defined(EMBREE_GEOMETRY_USER)
createUserGeometryTy createUserGeometry = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createUserGeometry);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createUserGeometry);
Geometry* geom = createUserGeometry(device);
return (RTCGeometry) geom->refInc();
#else
@@ -1286,7 +1253,7 @@ RTC_NAMESPACE_BEGIN;
{
#if defined(EMBREE_GEOMETRY_INSTANCE)
createInstanceTy createInstance = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createInstance);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createInstance);
Geometry* geom = createInstance(device);
return (RTCGeometry) geom->refInc();
#else
@@ -1298,7 +1265,7 @@ RTC_NAMESPACE_BEGIN;
{
#if defined(EMBREE_GEOMETRY_GRID)
createGridMeshTy createGridMesh = nullptr;
- SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512KNL_AVX512SKX(device->enabled_cpu_features,createGridMesh);
+ SELECT_SYMBOL_DEFAULT_AVX_AVX2_AVX512(device->enabled_cpu_features,createGridMesh);
Geometry* geom = createGridMesh(device);
return (RTCGeometry) geom->refInc();
#else
diff --git a/thirdparty/embree-aarch64/kernels/common/rtcore.h b/thirdparty/embree/kernels/common/rtcore.h
index 4b070e122b..373e49a689 100644
--- a/thirdparty/embree-aarch64/kernels/common/rtcore.h
+++ b/thirdparty/embree/kernels/common/rtcore.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/rtcore_builder.cpp b/thirdparty/embree/kernels/common/rtcore_builder.cpp
index 6bb96bba07..1f1b6f6ddf 100644
--- a/thirdparty/embree-aarch64/kernels/common/rtcore_builder.cpp
+++ b/thirdparty/embree/kernels/common/rtcore_builder.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#define RTC_EXPORT_API
diff --git a/thirdparty/embree-aarch64/kernels/common/scene.cpp b/thirdparty/embree/kernels/common/scene.cpp
index 1e23aeb415..408d7eae6f 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene.cpp
+++ b/thirdparty/embree/kernels/common/scene.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "scene.h"
@@ -6,7 +6,7 @@
#include "../bvh/bvh4_factory.h"
#include "../bvh/bvh8_factory.h"
#include "../../common/algorithms/parallel_reduce.h"
-
+
namespace embree
{
/* error raising rtcIntersect and rtcOccluded functions */
@@ -40,7 +40,7 @@ namespace embree
{
device->refDec();
}
-
+
void Scene::printStatistics()
{
/* calculate maximum number of time segments */
@@ -56,12 +56,12 @@ namespace embree
statistics[i].resize(max_time_steps);
/* gather statistics */
- for (size_t i=0; i<size(); i++)
+ for (size_t i=0; i<size(); i++)
{
if (!get(i)) continue;
- int ty = get(i)->getType();
+ int ty = get(i)->getType();
assert(ty<Geometry::GTY_END);
- int timesegments = get(i)->numTimeSegments();
+ int timesegments = get(i)->numTimeSegments();
assert((unsigned int)timesegments < max_time_steps);
statistics[ty][timesegments] += get(i)->size();
}
@@ -76,7 +76,7 @@ namespace embree
for (size_t t=0; t<max_time_steps; t++)
std::cout << "----------";
std::cout << std::endl;
-
+
for (size_t p=0; p<Geometry::GTY_END; p++)
{
if (std::string(Geometry::gtype_names[p]) == "") continue;
@@ -90,34 +90,34 @@ namespace embree
void Scene::createTriangleAccel()
{
#if defined(EMBREE_GEOMETRY_TRIANGLE)
- if (device->tri_accel == "default")
+ if (device->tri_accel == "default")
{
if (quality_flags != RTC_BUILD_QUALITY_LOW)
{
- int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
+ int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
switch (mode) {
- case /*0b00*/ 0:
+ case /*0b00*/ 0:
#if defined (EMBREE_TARGET_SIMD8)
if (device->canUseAVX())
{
- if (quality_flags == RTC_BUILD_QUALITY_HIGH)
+ if (quality_flags == RTC_BUILD_QUALITY_HIGH)
accels_add(device->bvh8_factory->BVH8Triangle4(this,BVHFactory::BuildVariant::HIGH_QUALITY,BVHFactory::IntersectVariant::FAST));
else
accels_add(device->bvh8_factory->BVH8Triangle4(this,BVHFactory::BuildVariant::STATIC,BVHFactory::IntersectVariant::FAST));
}
- else
+ else
#endif
- {
- if (quality_flags == RTC_BUILD_QUALITY_HIGH)
+ {
+ if (quality_flags == RTC_BUILD_QUALITY_HIGH)
accels_add(device->bvh4_factory->BVH4Triangle4(this,BVHFactory::BuildVariant::HIGH_QUALITY,BVHFactory::IntersectVariant::FAST));
- else
+ else
accels_add(device->bvh4_factory->BVH4Triangle4(this,BVHFactory::BuildVariant::STATIC,BVHFactory::IntersectVariant::FAST));
}
break;
- case /*0b01*/ 1:
+ case /*0b01*/ 1:
#if defined (EMBREE_TARGET_SIMD8)
- if (device->canUseAVX())
+ if (device->canUseAVX())
accels_add(device->bvh8_factory->BVH8Triangle4v(this,BVHFactory::BuildVariant::STATIC,BVHFactory::IntersectVariant::ROBUST));
else
#endif
@@ -175,8 +175,8 @@ namespace embree
#if defined(EMBREE_GEOMETRY_TRIANGLE)
if (device->tri_accel_mb == "default")
{
- int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
-
+ int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
+
#if defined (EMBREE_TARGET_SIMD8)
if (device->canUseAVX2()) // BVH8 reduces performance on AVX only-machines
{
@@ -211,18 +211,18 @@ namespace embree
void Scene::createQuadAccel()
{
#if defined(EMBREE_GEOMETRY_QUAD)
- if (device->quad_accel == "default")
+ if (device->quad_accel == "default")
{
if (quality_flags != RTC_BUILD_QUALITY_LOW)
{
/* static */
- int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
+ int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
switch (mode) {
case /*0b00*/ 0:
#if defined (EMBREE_TARGET_SIMD8)
if (device->canUseAVX())
{
- if (quality_flags == RTC_BUILD_QUALITY_HIGH)
+ if (quality_flags == RTC_BUILD_QUALITY_HIGH)
accels_add(device->bvh8_factory->BVH8Quad4v(this,BVHFactory::BuildVariant::HIGH_QUALITY,BVHFactory::IntersectVariant::FAST));
else
accels_add(device->bvh8_factory->BVH8Quad4v(this,BVHFactory::BuildVariant::STATIC,BVHFactory::IntersectVariant::FAST));
@@ -230,7 +230,7 @@ namespace embree
else
#endif
{
- if (quality_flags == RTC_BUILD_QUALITY_HIGH)
+ if (quality_flags == RTC_BUILD_QUALITY_HIGH)
accels_add(device->bvh4_factory->BVH4Quad4v(this,BVHFactory::BuildVariant::HIGH_QUALITY,BVHFactory::IntersectVariant::FAST));
else
accels_add(device->bvh4_factory->BVH4Quad4v(this,BVHFactory::BuildVariant::STATIC,BVHFactory::IntersectVariant::FAST));
@@ -292,9 +292,9 @@ namespace embree
void Scene::createQuadMBAccel()
{
#if defined(EMBREE_GEOMETRY_QUAD)
- if (device->quad_accel_mb == "default")
+ if (device->quad_accel_mb == "default")
{
- int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
+ int mode = 2*(int)isCompactAccel() + 1*(int)isRobustAccel();
switch (mode) {
case /*0b00*/ 0:
#if defined (EMBREE_TARGET_SIMD8)
@@ -416,7 +416,7 @@ namespace embree
void Scene::createUserGeometryAccel()
{
#if defined(EMBREE_GEOMETRY_USER)
- if (device->object_accel == "default")
+ if (device->object_accel == "default")
{
#if defined (EMBREE_TARGET_SIMD8)
if (device->canUseAVX() && !isCompactAccel())
@@ -554,7 +554,7 @@ namespace embree
{
BVHFactory::IntersectVariant ivariant = isRobustAccel() ? BVHFactory::IntersectVariant::ROBUST : BVHFactory::IntersectVariant::FAST;
#if defined(EMBREE_GEOMETRY_GRID)
- if (device->grid_accel == "default")
+ if (device->grid_accel == "default")
{
#if defined (EMBREE_TARGET_SIMD8)
if (device->canUseAVX() && !isCompactAccel())
@@ -579,7 +579,7 @@ namespace embree
void Scene::createGridMBAccel()
{
#if defined(EMBREE_GEOMETRY_GRID)
- if (device->grid_accel_mb == "default")
+ if (device->grid_accel_mb == "default")
{
accels_add(device->bvh4_factory->BVH4GridMB(this,BVHFactory::BuildVariant::STATIC));
}
@@ -588,17 +588,13 @@ namespace embree
#endif
}
-
+
void Scene::clear() {
}
- unsigned Scene::bind(unsigned geomID, Ref<Geometry> geometry)
+ unsigned Scene::bind(unsigned geomID, Ref<Geometry> geometry)
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(geometriesMutex);
-#else
Lock<SpinLock> lock(geometriesMutex);
-#endif
if (geomID == RTC_INVALID_GEOMETRY_ID) {
geomID = id_pool.allocate();
if (geomID == RTC_INVALID_GEOMETRY_ID)
@@ -624,19 +620,15 @@ namespace embree
void Scene::detachGeometry(size_t geomID)
{
-#if defined(__aarch64__) && defined(BUILD_IOS)
- std::scoped_lock lock(geometriesMutex);
-#else
Lock<SpinLock> lock(geometriesMutex);
-#endif
-
+
if (geomID >= geometries.size())
throw_RTCError(RTC_ERROR_INVALID_OPERATION,"invalid geometry ID");
Ref<Geometry>& geometry = geometries[geomID];
if (geometry == null)
throw_RTCError(RTC_ERROR_INVALID_OPERATION,"invalid geometry");
-
+
if (geometry->isEnabled()) {
setModified ();
}
@@ -658,21 +650,21 @@ namespace embree
if (!isModified()) {
return;
}
-
+
/* print scene statistics */
if (device->verbosity(2))
printStatistics();
progress_monitor_counter = 0;
-
+
/* gather scene stats and call preCommit function of each geometry */
- this->world = parallel_reduce (size_t(0), geometries.size(), GeometryCounts (),
+ this->world = parallel_reduce (size_t(0), geometries.size(), GeometryCounts (),
[this](const range<size_t>& r)->GeometryCounts
{
GeometryCounts c;
- for (auto i=r.begin(); i<r.end(); ++i)
+ for (auto i=r.begin(); i<r.end(); ++i)
{
- if (geometries[i] && geometries[i]->isEnabled())
+ if (geometries[i] && geometries[i]->isEnabled())
{
geometries[i]->preCommit();
geometries[i]->addElementsToCount (c);
@@ -683,19 +675,19 @@ namespace embree
},
std::plus<GeometryCounts>()
);
-
+
/* select acceleration structures to build */
unsigned int new_enabled_geometry_types = world.enabledGeometryTypesMask();
if (flags_modified || new_enabled_geometry_types != enabled_geometry_types)
{
accels_init();
- /* we need to make all geometries modified, otherwise two level builder will
+ /* we need to make all geometries modified, otherwise two level builder will
not rebuild currently not modified geometries */
parallel_for(geometryModCounters_.size(), [&] ( const size_t i ) {
geometryModCounters_[i] = 0;
});
-
+
if (getNumPrimitives(TriangleMesh::geom_type,false)) createTriangleAccel();
if (getNumPrimitives(TriangleMesh::geom_type,true)) createTriangleMBAccel();
if (getNumPrimitives(QuadMesh::geom_type,false)) createQuadAccel();
@@ -712,14 +704,14 @@ namespace embree
if (getNumPrimitives(Geometry::MTY_INSTANCE_CHEAP,true)) createInstanceMBAccel();
if (getNumPrimitives(Geometry::MTY_INSTANCE_EXPENSIVE,false)) createInstanceExpensiveAccel();
if (getNumPrimitives(Geometry::MTY_INSTANCE_EXPENSIVE,true)) createInstanceExpensiveMBAccel();
-
+
flags_modified = false;
enabled_geometry_types = new_enabled_geometry_types;
}
-
+
/* select fast code path if no filter function is present */
accels_select(hasFilterFunction());
-
+
/* build all hierarchies of this scene */
accels_build();
@@ -737,7 +729,7 @@ namespace embree
geometryModCounters_[i] = geometries[i]->getModCounter();
}
});
-
+
updateInterface();
if (device->verbosity(2)) {
@@ -746,7 +738,7 @@ namespace embree
std::cout << "selected scene intersector" << std::endl;
intersectors.print(2);
}
-
+
setModified(false);
}
@@ -771,16 +763,16 @@ namespace embree
RTCSceneFlags Scene::getSceneFlags() const {
return scene_flags;
}
-
+
#if defined(TASKING_INTERNAL)
- void Scene::commit (bool join)
+ void Scene::commit (bool join)
{
Lock<MutexSys> buildLock(buildMutex,false);
/* allocates own taskscheduler for each build */
Ref<TaskScheduler> scheduler = nullptr;
- {
+ {
Lock<MutexSys> lock(schedulerMutex);
scheduler = this->scheduler;
if (scheduler == null) {
@@ -792,9 +784,9 @@ namespace embree
/* worker threads join build */
if (!buildLock.isLocked())
{
- if (!join)
+ if (!join)
throw_RTCError(RTC_ERROR_INVALID_OPERATION,"use rtcJoinCommitScene to join a build operation");
-
+
scheduler->join();
return;
}
@@ -816,9 +808,9 @@ namespace embree
#endif
-#if defined(TASKING_TBB) || defined(TASKING_GCD)
+#if defined(TASKING_TBB)
- void Scene::commit (bool join)
+ void Scene::commit (bool join)
{
#if defined(TASKING_TBB) && (TBB_INTERFACE_VERSION_MAJOR < 8)
if (join)
@@ -832,15 +824,12 @@ namespace embree
if (!lock.isLocked())
{
#if !TASKING_TBB_USE_TASK_ISOLATION
- if (!join)
+ if (!join)
throw_RTCError(RTC_ERROR_INVALID_OPERATION,"invoking rtcCommitScene from multiple threads is not supported with this TBB version");
#endif
-
+
do {
-#if defined(TASKING_GCD)
- // Do Nothing
-#else
#if USE_TASK_ARENA
if (join) {
device->arena->execute([&]{ group.wait(); });
@@ -850,24 +839,21 @@ namespace embree
{
group.wait();
}
-#endif
pause_cpu();
yield();
-
} while (!buildMutex.try_lock());
-
+
buildMutex.unlock();
return;
- }
+ }
/* for best performance set FTZ and DAZ flags in the MXCSR control and status register */
const unsigned int mxcsr = _mm_getcsr();
_mm_setcsr(mxcsr | /* FTZ */ (1<<15) | /* DAZ */ (1<<6));
-
+
try {
-#if defined(TASKING_TBB)
-#if TBB_INTERFACE_VERSION_MAJOR < 8
+#if TBB_INTERFACE_VERSION_MAJOR < 8
tbb::task_group_context ctx( tbb::task_group_context::isolated, tbb::task_group_context::default_traits);
#else
tbb::task_group_context ctx( tbb::task_group_context::isolated, tbb::task_group_context::default_traits | tbb::task_group_context::fp_settings );
@@ -892,22 +878,15 @@ namespace embree
});
group.wait();
}
-
+
/* reset MXCSR register again */
_mm_setcsr(mxcsr);
-
-#elif defined(TASKING_GCD)
-
- commit_task();
-
-#endif // #if defined(TASKING_TBB)
-
- }
+ }
catch (...)
{
/* reset MXCSR register again */
_mm_setcsr(mxcsr);
-
+
accels_clear();
updateInterface();
throw;
@@ -917,7 +896,7 @@ namespace embree
#if defined(TASKING_PPL)
- void Scene::commit (bool join)
+ void Scene::commit (bool join)
{
#if defined(TASKING_PPL)
if (join)
@@ -935,7 +914,7 @@ namespace embree
/* for best performance set FTZ and DAZ flags in the MXCSR control and status register */
const unsigned int mxcsr = _mm_getcsr();
_mm_setcsr(mxcsr | /* FTZ */ (1<<15) | /* DAZ */ (1<<6));
-
+
try {
group.run([&]{
@@ -945,12 +924,12 @@ namespace embree
/* reset MXCSR register again */
_mm_setcsr(mxcsr);
- }
+ }
catch (...)
{
/* reset MXCSR register again */
_mm_setcsr(mxcsr);
-
+
accels_clear();
updateInterface();
throw;
@@ -958,7 +937,7 @@ namespace embree
}
#endif
- void Scene::setProgressMonitorFunction(RTCProgressMonitorFunction func, void* ptr)
+ void Scene::setProgressMonitorFunction(RTCProgressMonitorFunction func, void* ptr)
{
progress_monitor_function = func;
progress_monitor_ptr = ptr;
diff --git a/thirdparty/embree-aarch64/kernels/common/scene.h b/thirdparty/embree/kernels/common/scene.h
index b41c6cde91..5ed80a63f6 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene.h
+++ b/thirdparty/embree/kernels/common/scene.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree/kernels/common/scene_curves.h b/thirdparty/embree/kernels/common/scene_curves.h
new file mode 100644
index 0000000000..a5a39e42d4
--- /dev/null
+++ b/thirdparty/embree/kernels/common/scene_curves.h
@@ -0,0 +1,688 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#pragma once
+
+#include "default.h"
+#include "geometry.h"
+#include "buffer.h"
+
+#include "../subdiv/bezier_curve.h"
+#include "../subdiv/hermite_curve.h"
+#include "../subdiv/bspline_curve.h"
+#include "../subdiv/catmullrom_curve.h"
+#include "../subdiv/linear_bezier_patch.h"
+
+namespace embree
+{
+ /*! represents an array of bicubic bezier curves */
+ struct CurveGeometry : public Geometry
+ {
+ /*! type of this geometry */
+ static const Geometry::GTypeMask geom_type = Geometry::MTY_CURVE4;
+
+ public:
+
+ /*! bezier curve construction */
+ CurveGeometry (Device* device, Geometry::GType gtype);
+
+ public:
+ void setMask(unsigned mask);
+ void setNumTimeSteps (unsigned int numTimeSteps);
+ void setVertexAttributeCount (unsigned int N);
+ void setBuffer(RTCBufferType type, unsigned int slot, RTCFormat format, const Ref<Buffer>& buffer, size_t offset, size_t stride, unsigned int num);
+ void* getBuffer(RTCBufferType type, unsigned int slot);
+ void updateBuffer(RTCBufferType type, unsigned int slot);
+ void commit();
+ bool verify();
+ void setTessellationRate(float N);
+ void setMaxRadiusScale(float s);
+ void addElementsToCount (GeometryCounts & counts) const;
+
+ public:
+
+ /*! returns the number of vertices */
+ __forceinline size_t numVertices() const {
+ return vertices[0].size();
+ }
+
+ /*! returns the i'th curve */
+ __forceinline const unsigned int& curve(size_t i) const {
+ return curves[i];
+ }
+
+ /*! returns i'th vertex of the first time step */
+ __forceinline Vec3ff vertex(size_t i) const {
+ return vertices0[i];
+ }
+
+ /*! returns i'th normal of the first time step */
+ __forceinline Vec3fa normal(size_t i) const {
+ return normals0[i];
+ }
+
+ /*! returns i'th tangent of the first time step */
+ __forceinline Vec3ff tangent(size_t i) const {
+ return tangents0[i];
+ }
+
+ /*! returns i'th normal derivative of the first time step */
+ __forceinline Vec3fa dnormal(size_t i) const {
+ return dnormals0[i];
+ }
+
+ /*! returns i'th radius of the first time step */
+ __forceinline float radius(size_t i) const {
+ return vertices0[i].w;
+ }
+
+ /*! returns i'th vertex of itime'th timestep */
+ __forceinline Vec3ff vertex(size_t i, size_t itime) const {
+ return vertices[itime][i];
+ }
+
+ /*! returns i'th normal of itime'th timestep */
+ __forceinline Vec3fa normal(size_t i, size_t itime) const {
+ return normals[itime][i];
+ }
+
+ /*! returns i'th tangent of itime'th timestep */
+ __forceinline Vec3ff tangent(size_t i, size_t itime) const {
+ return tangents[itime][i];
+ }
+
+ /*! returns i'th normal derivative of itime'th timestep */
+ __forceinline Vec3fa dnormal(size_t i, size_t itime) const {
+ return dnormals[itime][i];
+ }
+
+ /*! returns i'th radius of itime'th timestep */
+ __forceinline float radius(size_t i, size_t itime) const {
+ return vertices[itime][i].w;
+ }
+
+ /*! gathers the curve starting with i'th vertex */
+ __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, size_t i) const
+ {
+ p0 = vertex(i+0);
+ p1 = vertex(i+1);
+ p2 = vertex(i+2);
+ p3 = vertex(i+3);
+ }
+
+ /*! gathers the curve starting with i'th vertex of itime'th timestep */
+ __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, size_t i, size_t itime) const
+ {
+ p0 = vertex(i+0,itime);
+ p1 = vertex(i+1,itime);
+ p2 = vertex(i+2,itime);
+ p3 = vertex(i+3,itime);
+ }
+
+ /*! gathers the curve starting with i'th vertex */
+ __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, Vec3fa& n0, Vec3fa& n1, Vec3fa& n2, Vec3fa& n3, size_t i) const
+ {
+ p0 = vertex(i+0);
+ p1 = vertex(i+1);
+ p2 = vertex(i+2);
+ p3 = vertex(i+3);
+ n0 = normal(i+0);
+ n1 = normal(i+1);
+ n2 = normal(i+2);
+ n3 = normal(i+3);
+ }
+
+ /*! gathers the curve starting with i'th vertex of itime'th timestep */
+ __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, Vec3fa& n0, Vec3fa& n1, Vec3fa& n2, Vec3fa& n3, size_t i, size_t itime) const
+ {
+ p0 = vertex(i+0,itime);
+ p1 = vertex(i+1,itime);
+ p2 = vertex(i+2,itime);
+ p3 = vertex(i+3,itime);
+ n0 = normal(i+0,itime);
+ n1 = normal(i+1,itime);
+ n2 = normal(i+2,itime);
+ n3 = normal(i+3,itime);
+ }
+
+ /*! prefetches the curve starting with i'th vertex of itime'th timestep */
+ __forceinline void prefetchL1_vertices(size_t i) const
+ {
+ prefetchL1(vertices0.getPtr(i)+0);
+ prefetchL1(vertices0.getPtr(i)+64);
+ }
+
+ /*! prefetches the curve starting with i'th vertex of itime'th timestep */
+ __forceinline void prefetchL2_vertices(size_t i) const
+ {
+ prefetchL2(vertices0.getPtr(i)+0);
+ prefetchL2(vertices0.getPtr(i)+64);
+ }
+
+ /*! loads curve vertices for specified time */
+ __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, size_t i, float time) const
+ {
+ float ftime;
+ const size_t itime = timeSegment(time, ftime);
+
+ const float t0 = 1.0f - ftime;
+ const float t1 = ftime;
+ Vec3ff a0,a1,a2,a3;
+ gather(a0,a1,a2,a3,i,itime);
+ Vec3ff b0,b1,b2,b3;
+ gather(b0,b1,b2,b3,i,itime+1);
+ p0 = madd(Vec3ff(t0),a0,t1*b0);
+ p1 = madd(Vec3ff(t0),a1,t1*b1);
+ p2 = madd(Vec3ff(t0),a2,t1*b2);
+ p3 = madd(Vec3ff(t0),a3,t1*b3);
+ }
+
+ /*! loads curve vertices for specified time */
+ __forceinline void gather(Vec3ff& p0, Vec3ff& p1, Vec3ff& p2, Vec3ff& p3, Vec3fa& n0, Vec3fa& n1, Vec3fa& n2, Vec3fa& n3, size_t i, float time) const
+ {
+ float ftime;
+ const size_t itime = timeSegment(time, ftime);
+
+ const float t0 = 1.0f - ftime;
+ const float t1 = ftime;
+ Vec3ff a0,a1,a2,a3; Vec3fa an0,an1,an2,an3;
+ gather(a0,a1,a2,a3,an0,an1,an2,an3,i,itime);
+ Vec3ff b0,b1,b2,b3; Vec3fa bn0,bn1,bn2,bn3;
+ gather(b0,b1,b2,b3,bn0,bn1,bn2,bn3,i,itime+1);
+ p0 = madd(Vec3ff(t0),a0,t1*b0);
+ p1 = madd(Vec3ff(t0),a1,t1*b1);
+ p2 = madd(Vec3ff(t0),a2,t1*b2);
+ p3 = madd(Vec3ff(t0),a3,t1*b3);
+ n0 = madd(Vec3ff(t0),an0,t1*bn0);
+ n1 = madd(Vec3ff(t0),an1,t1*bn1);
+ n2 = madd(Vec3ff(t0),an2,t1*bn2);
+ n3 = madd(Vec3ff(t0),an3,t1*bn3);
+ }
+
+ template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
+ __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const size_t itime) const
+ {
+ Vec3ff v0,v1,v2,v3; Vec3fa n0,n1,n2,n3;
+ unsigned int vertexID = curve(primID);
+ gather(v0,v1,v2,v3,n0,n1,n2,n3,vertexID,itime);
+ SourceCurve3ff ccurve(v0,v1,v2,v3);
+ SourceCurve3fa ncurve(n0,n1,n2,n3);
+ ccurve = enlargeRadiusToMinWidth(context,this,ray_org,ccurve);
+ return TensorLinearCubicBezierSurface3fa::fromCenterAndNormalCurve(ccurve,ncurve);
+ }
+
+ template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
+ __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const float time) const
+ {
+ float ftime;
+ const size_t itime = timeSegment(time, ftime);
+ const TensorLinearCubicBezierSurface3fa curve0 = getNormalOrientedCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context,ray_org,primID,itime+0);
+ const TensorLinearCubicBezierSurface3fa curve1 = getNormalOrientedCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context,ray_org,primID,itime+1);
+ return clerp(curve0,curve1,ftime);
+ }
+
+ /*! gathers the hermite curve starting with i'th vertex */
+ __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3ff& p1, Vec3ff& t1, size_t i) const
+ {
+ p0 = vertex (i+0);
+ p1 = vertex (i+1);
+ t0 = tangent(i+0);
+ t1 = tangent(i+1);
+ }
+
+ /*! gathers the hermite curve starting with i'th vertex of itime'th timestep */
+ __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3ff& p1, Vec3ff& t1, size_t i, size_t itime) const
+ {
+ p0 = vertex (i+0,itime);
+ p1 = vertex (i+1,itime);
+ t0 = tangent(i+0,itime);
+ t1 = tangent(i+1,itime);
+ }
+
+ /*! loads curve vertices for specified time */
+ __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3ff& p1, Vec3ff& t1, size_t i, float time) const
+ {
+ float ftime;
+ const size_t itime = timeSegment(time, ftime);
+ const float f0 = 1.0f - ftime, f1 = ftime;
+ Vec3ff ap0,at0,ap1,at1;
+ gather_hermite(ap0,at0,ap1,at1,i,itime);
+ Vec3ff bp0,bt0,bp1,bt1;
+ gather_hermite(bp0,bt0,bp1,bt1,i,itime+1);
+ p0 = madd(Vec3ff(f0),ap0,f1*bp0);
+ t0 = madd(Vec3ff(f0),at0,f1*bt0);
+ p1 = madd(Vec3ff(f0),ap1,f1*bp1);
+ t1 = madd(Vec3ff(f0),at1,f1*bt1);
+ }
+
+ /*! gathers the hermite curve starting with i'th vertex */
+ __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3fa& n0, Vec3fa& dn0, Vec3ff& p1, Vec3ff& t1, Vec3fa& n1, Vec3fa& dn1, size_t i) const
+ {
+ p0 = vertex (i+0);
+ p1 = vertex (i+1);
+ t0 = tangent(i+0);
+ t1 = tangent(i+1);
+ n0 = normal(i+0);
+ n1 = normal(i+1);
+ dn0 = dnormal(i+0);
+ dn1 = dnormal(i+1);
+ }
+
+ /*! gathers the hermite curve starting with i'th vertex of itime'th timestep */
+ __forceinline void gather_hermite(Vec3ff& p0, Vec3ff& t0, Vec3fa& n0, Vec3fa& dn0, Vec3ff& p1, Vec3ff& t1, Vec3fa& n1, Vec3fa& dn1, size_t i, size_t itime) const
+ {
+ p0 = vertex (i+0,itime);
+ p1 = vertex (i+1,itime);
+ t0 = tangent(i+0,itime);
+ t1 = tangent(i+1,itime);
+ n0 = normal(i+0,itime);
+ n1 = normal(i+1,itime);
+ dn0 = dnormal(i+0,itime);
+ dn1 = dnormal(i+1,itime);
+ }
+
+ /*! loads curve vertices for specified time */
+ __forceinline void gather_hermite(Vec3ff& p0, Vec3fa& t0, Vec3fa& n0, Vec3fa& dn0, Vec3ff& p1, Vec3fa& t1, Vec3fa& n1, Vec3fa& dn1, size_t i, float time) const
+ {
+ float ftime;
+ const size_t itime = timeSegment(time, ftime);
+ const float f0 = 1.0f - ftime, f1 = ftime;
+ Vec3ff ap0,at0,ap1,at1; Vec3fa an0,adn0,an1,adn1;
+ gather_hermite(ap0,at0,an0,adn0,ap1,at1,an1,adn1,i,itime);
+ Vec3ff bp0,bt0,bp1,bt1; Vec3fa bn0,bdn0,bn1,bdn1;
+ gather_hermite(bp0,bt0,bn0,bdn0,bp1,bt1,bn1,bdn1,i,itime+1);
+ p0 = madd(Vec3ff(f0),ap0,f1*bp0);
+ t0 = madd(Vec3ff(f0),at0,f1*bt0);
+ n0 = madd(Vec3ff(f0),an0,f1*bn0);
+ dn0= madd(Vec3ff(f0),adn0,f1*bdn0);
+ p1 = madd(Vec3ff(f0),ap1,f1*bp1);
+ t1 = madd(Vec3ff(f0),at1,f1*bt1);
+ n1 = madd(Vec3ff(f0),an1,f1*bn1);
+ dn1= madd(Vec3ff(f0),adn1,f1*bdn1);
+ }
+
+ template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
+ __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedHermiteCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const size_t itime) const
+ {
+ Vec3ff v0,t0,v1,t1; Vec3fa n0,dn0,n1,dn1;
+ unsigned int vertexID = curve(primID);
+ gather_hermite(v0,t0,n0,dn0,v1,t1,n1,dn1,vertexID,itime);
+
+ SourceCurve3ff ccurve(v0,t0,v1,t1);
+ SourceCurve3fa ncurve(n0,dn0,n1,dn1);
+ ccurve = enlargeRadiusToMinWidth(context,this,ray_org,ccurve);
+ return TensorLinearCubicBezierSurface3fa::fromCenterAndNormalCurve(ccurve,ncurve);
+ }
+
+ template<typename SourceCurve3ff, typename SourceCurve3fa, typename TensorLinearCubicBezierSurface3fa>
+ __forceinline TensorLinearCubicBezierSurface3fa getNormalOrientedHermiteCurve(IntersectContext* context, const Vec3fa& ray_org, const unsigned int primID, const float time) const
+ {
+ float ftime;
+ const size_t itime = timeSegment(time, ftime);
+ const TensorLinearCubicBezierSurface3fa curve0 = getNormalOrientedHermiteCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context, ray_org, primID,itime+0);
+ const TensorLinearCubicBezierSurface3fa curve1 = getNormalOrientedHermiteCurve<SourceCurve3ff, SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context, ray_org, primID,itime+1);
+ return clerp(curve0,curve1,ftime);
+ }
+
+ private:
+ void resizeBuffers(unsigned int numSteps);
+
+ public:
+ BufferView<unsigned int> curves; //!< array of curve indices
+ BufferView<Vec3ff> vertices0; //!< fast access to first vertex buffer
+ BufferView<Vec3fa> normals0; //!< fast access to first normal buffer
+ BufferView<Vec3ff> tangents0; //!< fast access to first tangent buffer
+ BufferView<Vec3fa> dnormals0; //!< fast access to first normal derivative buffer
+ vector<BufferView<Vec3ff>> vertices; //!< vertex array for each timestep
+ vector<BufferView<Vec3fa>> normals; //!< normal array for each timestep
+ vector<BufferView<Vec3ff>> tangents; //!< tangent array for each timestep
+ vector<BufferView<Vec3fa>> dnormals; //!< normal derivative array for each timestep
+ BufferView<char> flags; //!< start, end flag per segment
+ vector<BufferView<char>> vertexAttribs; //!< user buffers
+ int tessellationRate; //!< tessellation rate for flat curve
+ float maxRadiusScale = 1.0; //!< maximal min-width scaling of curve radii
+ };
+
+ namespace isa
+ {
+
+ template<template<typename Ty> class Curve>
+ struct CurveGeometryInterface : public CurveGeometry
+ {
+ typedef Curve<Vec3ff> Curve3ff;
+ typedef Curve<Vec3fa> Curve3fa;
+
+ CurveGeometryInterface (Device* device, Geometry::GType gtype)
+ : CurveGeometry(device,gtype) {}
+
+ __forceinline const Curve3ff getCurveScaledRadius(size_t i, size_t itime = 0) const
+ {
+ const unsigned int index = curve(i);
+ Vec3ff v0 = vertex(index+0,itime);
+ Vec3ff v1 = vertex(index+1,itime);
+ Vec3ff v2 = vertex(index+2,itime);
+ Vec3ff v3 = vertex(index+3,itime);
+ v0.w *= maxRadiusScale;
+ v1.w *= maxRadiusScale;
+ v2.w *= maxRadiusScale;
+ v3.w *= maxRadiusScale;
+ return Curve3ff (v0,v1,v2,v3);
+ }
+
+ __forceinline const Curve3ff getCurveScaledRadius(const LinearSpace3fa& space, size_t i, size_t itime = 0) const
+ {
+ const unsigned int index = curve(i);
+ const Vec3ff v0 = vertex(index+0,itime);
+ const Vec3ff v1 = vertex(index+1,itime);
+ const Vec3ff v2 = vertex(index+2,itime);
+ const Vec3ff v3 = vertex(index+3,itime);
+ const Vec3ff w0(xfmPoint(space,(Vec3fa)v0), maxRadiusScale*v0.w);
+ const Vec3ff w1(xfmPoint(space,(Vec3fa)v1), maxRadiusScale*v1.w);
+ const Vec3ff w2(xfmPoint(space,(Vec3fa)v2), maxRadiusScale*v2.w);
+ const Vec3ff w3(xfmPoint(space,(Vec3fa)v3), maxRadiusScale*v3.w);
+ return Curve3ff(w0,w1,w2,w3);
+ }
+
+ __forceinline const Curve3ff getCurveScaledRadius(const Vec3fa& ofs, const float scale, const float r_scale0, const LinearSpace3fa& space, size_t i, size_t itime = 0) const
+ {
+ const float r_scale = r_scale0*scale;
+ const unsigned int index = curve(i);
+ const Vec3ff v0 = vertex(index+0,itime);
+ const Vec3ff v1 = vertex(index+1,itime);
+ const Vec3ff v2 = vertex(index+2,itime);
+ const Vec3ff v3 = vertex(index+3,itime);
+ const Vec3ff w0(xfmPoint(space,((Vec3fa)v0-ofs)*Vec3fa(scale)), maxRadiusScale*v0.w*r_scale);
+ const Vec3ff w1(xfmPoint(space,((Vec3fa)v1-ofs)*Vec3fa(scale)), maxRadiusScale*v1.w*r_scale);
+ const Vec3ff w2(xfmPoint(space,((Vec3fa)v2-ofs)*Vec3fa(scale)), maxRadiusScale*v2.w*r_scale);
+ const Vec3ff w3(xfmPoint(space,((Vec3fa)v3-ofs)*Vec3fa(scale)), maxRadiusScale*v3.w*r_scale);
+ return Curve3ff(w0,w1,w2,w3);
+ }
+
+ __forceinline const Curve3fa getNormalCurve(size_t i, size_t itime = 0) const
+ {
+ const unsigned int index = curve(i);
+ const Vec3fa n0 = normal(index+0,itime);
+ const Vec3fa n1 = normal(index+1,itime);
+ const Vec3fa n2 = normal(index+2,itime);
+ const Vec3fa n3 = normal(index+3,itime);
+ return Curve3fa (n0,n1,n2,n3);
+ }
+
+ __forceinline const TensorLinearCubicBezierSurface3fa getOrientedCurveScaledRadius(size_t i, size_t itime = 0) const
+ {
+ const Curve3ff center = getCurveScaledRadius(i,itime);
+ const Curve3fa normal = getNormalCurve(i,itime);
+ const TensorLinearCubicBezierSurface3fa ocurve = TensorLinearCubicBezierSurface3fa::fromCenterAndNormalCurve(center,normal);
+ return ocurve;
+ }
+
+ __forceinline const TensorLinearCubicBezierSurface3fa getOrientedCurveScaledRadius(const LinearSpace3fa& space, size_t i, size_t itime = 0) const {
+ return getOrientedCurveScaledRadius(i,itime).xfm(space);
+ }
+
+ __forceinline const TensorLinearCubicBezierSurface3fa getOrientedCurveScaledRadius(const Vec3fa& ofs, const float scale, const LinearSpace3fa& space, size_t i, size_t itime = 0) const {
+ return getOrientedCurveScaledRadius(i,itime).xfm(space,ofs,scale);
+ }
+
+ /*! check if the i'th primitive is valid at the itime'th time step */
+ __forceinline bool valid(Geometry::GType ctype, size_t i, const range<size_t>& itime_range) const
+ {
+ const unsigned int index = curve(i);
+ if (index+3 >= numVertices()) return false;
+
+ for (size_t itime = itime_range.begin(); itime <= itime_range.end(); itime++)
+ {
+ const float r0 = radius(index+0,itime);
+ const float r1 = radius(index+1,itime);
+ const float r2 = radius(index+2,itime);
+ const float r3 = radius(index+3,itime);
+ if (!isvalid(r0) || !isvalid(r1) || !isvalid(r2) || !isvalid(r3))
+ return false;
+
+ const Vec3fa v0 = vertex(index+0,itime);
+ const Vec3fa v1 = vertex(index+1,itime);
+ const Vec3fa v2 = vertex(index+2,itime);
+ const Vec3fa v3 = vertex(index+3,itime);
+ if (!isvalid(v0) || !isvalid(v1) || !isvalid(v2) || !isvalid(v3))
+ return false;
+
+ if (ctype == Geometry::GTY_SUBTYPE_ORIENTED_CURVE)
+ {
+ const Vec3fa n0 = normal(index+0,itime);
+ const Vec3fa n1 = normal(index+1,itime);
+ if (!isvalid(n0) || !isvalid(n1))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ template<int N>
+ void interpolate_impl(const RTCInterpolateArguments* const args)
+ {
+ unsigned int primID = args->primID;
+ float u = args->u;
+ RTCBufferType bufferType = args->bufferType;
+ unsigned int bufferSlot = args->bufferSlot;
+ float* P = args->P;
+ float* dPdu = args->dPdu;
+ float* ddPdudu = args->ddPdudu;
+ unsigned int valueCount = args->valueCount;
+
+ /* calculate base pointer and stride */
+ assert((bufferType == RTC_BUFFER_TYPE_VERTEX && bufferSlot < numTimeSteps) ||
+ (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE && bufferSlot <= vertexAttribs.size()));
+ const char* src = nullptr;
+ size_t stride = 0;
+ if (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE) {
+ src = vertexAttribs[bufferSlot].getPtr();
+ stride = vertexAttribs[bufferSlot].getStride();
+ } else {
+ src = vertices[bufferSlot].getPtr();
+ stride = vertices[bufferSlot].getStride();
+ }
+
+ for (unsigned int i=0; i<valueCount; i+=N)
+ {
+ size_t ofs = i*sizeof(float);
+ const size_t index = curves[primID];
+ const vbool<N> valid = vint<N>((int)i)+vint<N>(step) < vint<N>((int)valueCount);
+ const vfloat<N> p0 = mem<vfloat<N>>::loadu(valid,(float*)&src[(index+0)*stride+ofs]);
+ const vfloat<N> p1 = mem<vfloat<N>>::loadu(valid,(float*)&src[(index+1)*stride+ofs]);
+ const vfloat<N> p2 = mem<vfloat<N>>::loadu(valid,(float*)&src[(index+2)*stride+ofs]);
+ const vfloat<N> p3 = mem<vfloat<N>>::loadu(valid,(float*)&src[(index+3)*stride+ofs]);
+
+ const Curve<vfloat<N>> curve(p0,p1,p2,p3);
+ if (P ) mem<vfloat<N>>::storeu(valid,P+i, curve.eval(u));
+ if (dPdu ) mem<vfloat<N>>::storeu(valid,dPdu+i, curve.eval_du(u));
+ if (ddPdudu) mem<vfloat<N>>::storeu(valid,ddPdudu+i,curve.eval_dudu(u));
+ }
+ }
+
+ void interpolate(const RTCInterpolateArguments* const args) {
+ interpolate_impl<4>(args);
+ }
+ };
+
+ template<template<typename Ty> class Curve>
+ struct HermiteCurveGeometryInterface : public CurveGeometry
+ {
+ typedef Curve<Vec3ff> HermiteCurve3ff;
+ typedef Curve<Vec3fa> HermiteCurve3fa;
+
+ HermiteCurveGeometryInterface (Device* device, Geometry::GType gtype)
+ : CurveGeometry(device,gtype) {}
+
+ __forceinline const HermiteCurve3ff getCurveScaledRadius(size_t i, size_t itime = 0) const
+ {
+ const unsigned int index = curve(i);
+ Vec3ff v0 = vertex(index+0,itime);
+ Vec3ff v1 = vertex(index+1,itime);
+ Vec3ff t0 = tangent(index+0,itime);
+ Vec3ff t1 = tangent(index+1,itime);
+ v0.w *= maxRadiusScale;
+ v1.w *= maxRadiusScale;
+ t0.w *= maxRadiusScale;
+ t1.w *= maxRadiusScale;
+ return HermiteCurve3ff (v0,t0,v1,t1);
+ }
+
+ __forceinline const HermiteCurve3ff getCurveScaledRadius(const LinearSpace3fa& space, size_t i, size_t itime = 0) const
+ {
+ const unsigned int index = curve(i);
+ const Vec3ff v0 = vertex(index+0,itime);
+ const Vec3ff v1 = vertex(index+1,itime);
+ const Vec3ff t0 = tangent(index+0,itime);
+ const Vec3ff t1 = tangent(index+1,itime);
+ const Vec3ff V0(xfmPoint(space,(Vec3fa)v0),maxRadiusScale*v0.w);
+ const Vec3ff V1(xfmPoint(space,(Vec3fa)v1),maxRadiusScale*v1.w);
+ const Vec3ff T0(xfmVector(space,(Vec3fa)t0),maxRadiusScale*t0.w);
+ const Vec3ff T1(xfmVector(space,(Vec3fa)t1),maxRadiusScale*t1.w);
+ return HermiteCurve3ff(V0,T0,V1,T1);
+ }
+
+ __forceinline const HermiteCurve3ff getCurveScaledRadius(const Vec3fa& ofs, const float scale, const float r_scale0, const LinearSpace3fa& space, size_t i, size_t itime = 0) const
+ {
+ const float r_scale = r_scale0*scale;
+ const unsigned int index = curve(i);
+ const Vec3ff v0 = vertex(index+0,itime);
+ const Vec3ff v1 = vertex(index+1,itime);
+ const Vec3ff t0 = tangent(index+0,itime);
+ const Vec3ff t1 = tangent(index+1,itime);
+ const Vec3ff V0(xfmPoint(space,(v0-ofs)*Vec3fa(scale)), maxRadiusScale*v0.w*r_scale);
+ const Vec3ff V1(xfmPoint(space,(v1-ofs)*Vec3fa(scale)), maxRadiusScale*v1.w*r_scale);
+ const Vec3ff T0(xfmVector(space,t0*Vec3fa(scale)), maxRadiusScale*t0.w*r_scale);
+ const Vec3ff T1(xfmVector(space,t1*Vec3fa(scale)), maxRadiusScale*t1.w*r_scale);
+ return HermiteCurve3ff(V0,T0,V1,T1);
+ }
+
+ __forceinline const HermiteCurve3fa getNormalCurve(size_t i, size_t itime = 0) const
+ {
+ const unsigned int index = curve(i);
+ const Vec3fa n0 = normal(index+0,itime);
+ const Vec3fa n1 = normal(index+1,itime);
+ const Vec3fa dn0 = dnormal(index+0,itime);
+ const Vec3fa dn1 = dnormal(index+1,itime);
+ return HermiteCurve3fa (n0,dn0,n1,dn1);
+ }
+
+ __forceinline const TensorLinearCubicBezierSurface3fa getOrientedCurveScaledRadius(size_t i, size_t itime = 0) const
+ {
+ const HermiteCurve3ff center = getCurveScaledRadius(i,itime);
+ const HermiteCurve3fa normal = getNormalCurve(i,itime);
+ const TensorLinearCubicBezierSurface3fa ocurve = TensorLinearCubicBezierSurface3fa::fromCenterAndNormalCurve(center,normal);
+ return ocurve;
+ }
+
+ __forceinline const TensorLinearCubicBezierSurface3fa getOrientedCurveScaledRadius(const LinearSpace3fa& space, size_t i, size_t itime = 0) const {
+ return getOrientedCurveScaledRadius(i,itime).xfm(space);
+ }
+
+ __forceinline const TensorLinearCubicBezierSurface3fa getOrientedCurveScaledRadius(const Vec3fa& ofs, const float scale, const LinearSpace3fa& space, size_t i, size_t itime = 0) const {
+ return getOrientedCurveScaledRadius(i,itime).xfm(space,ofs,scale);
+ }
+
+ /*! check if the i'th primitive is valid at the itime'th time step */
+ __forceinline bool valid(Geometry::GType ctype, size_t i, const range<size_t>& itime_range) const
+ {
+ const unsigned int index = curve(i);
+ if (index+1 >= numVertices()) return false;
+
+ for (size_t itime = itime_range.begin(); itime <= itime_range.end(); itime++)
+ {
+ const Vec3ff v0 = vertex(index+0,itime);
+ const Vec3ff v1 = vertex(index+1,itime);
+ if (!isvalid4(v0) || !isvalid4(v1))
+ return false;
+
+ const Vec3ff t0 = tangent(index+0,itime);
+ const Vec3ff t1 = tangent(index+1,itime);
+ if (!isvalid4(t0) || !isvalid4(t1))
+ return false;
+
+ if (ctype == Geometry::GTY_SUBTYPE_ORIENTED_CURVE)
+ {
+ const Vec3fa n0 = normal(index+0,itime);
+ const Vec3fa n1 = normal(index+1,itime);
+ if (!isvalid(n0) || !isvalid(n1))
+ return false;
+
+ const Vec3fa dn0 = dnormal(index+0,itime);
+ const Vec3fa dn1 = dnormal(index+1,itime);
+ if (!isvalid(dn0) || !isvalid(dn1))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ template<int N>
+ void interpolate_impl(const RTCInterpolateArguments* const args)
+ {
+ unsigned int primID = args->primID;
+ float u = args->u;
+ RTCBufferType bufferType = args->bufferType;
+ unsigned int bufferSlot = args->bufferSlot;
+ float* P = args->P;
+ float* dPdu = args->dPdu;
+ float* ddPdudu = args->ddPdudu;
+ unsigned int valueCount = args->valueCount;
+
+ /* we interpolate vertex attributes linearly for hermite basis */
+ if (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE)
+ {
+ assert(bufferSlot <= vertexAttribs.size());
+ const char* vsrc = vertexAttribs[bufferSlot].getPtr();
+ const size_t vstride = vertexAttribs[bufferSlot].getStride();
+
+ for (unsigned int i=0; i<valueCount; i+=N)
+ {
+ const size_t ofs = i*sizeof(float);
+ const size_t index = curves[primID];
+ const vbool<N> valid = vint<N>((int)i)+vint<N>(step) < vint<N>((int)valueCount);
+ const vfloat<N> p0 = mem<vfloat<N>>::loadu(valid,(float*)&vsrc[(index+0)*vstride+ofs]);
+ const vfloat<N> p1 = mem<vfloat<N>>::loadu(valid,(float*)&vsrc[(index+1)*vstride+ofs]);
+
+ if (P ) mem<vfloat<N>>::storeu(valid,P+i, madd(1.0f-u,p0,u*p1));
+ if (dPdu ) mem<vfloat<N>>::storeu(valid,dPdu+i, p1-p0);
+ if (ddPdudu) mem<vfloat<N>>::storeu(valid,ddPdudu+i,vfloat<N>(zero));
+ }
+ }
+
+ /* interpolation for vertex buffers */
+ else
+ {
+ assert(bufferSlot < numTimeSteps);
+ const char* vsrc = vertices[bufferSlot].getPtr();
+ const char* tsrc = tangents[bufferSlot].getPtr();
+ const size_t vstride = vertices[bufferSlot].getStride();
+ const size_t tstride = vertices[bufferSlot].getStride();
+
+ for (unsigned int i=0; i<valueCount; i+=N)
+ {
+ const size_t ofs = i*sizeof(float);
+ const size_t index = curves[primID];
+ const vbool<N> valid = vint<N>((int)i)+vint<N>(step) < vint<N>((int)valueCount);
+ const vfloat<N> p0 = mem<vfloat<N>>::loadu(valid,(float*)&vsrc[(index+0)*vstride+ofs]);
+ const vfloat<N> p1 = mem<vfloat<N>>::loadu(valid,(float*)&vsrc[(index+1)*vstride+ofs]);
+ const vfloat<N> t0 = mem<vfloat<N>>::loadu(valid,(float*)&tsrc[(index+0)*tstride+ofs]);
+ const vfloat<N> t1 = mem<vfloat<N>>::loadu(valid,(float*)&tsrc[(index+1)*tstride+ofs]);
+
+ const HermiteCurveT<vfloat<N>> curve(p0,t0,p1,t1);
+ if (P ) mem<vfloat<N>>::storeu(valid,P+i, curve.eval(u));
+ if (dPdu ) mem<vfloat<N>>::storeu(valid,dPdu+i, curve.eval_du(u));
+ if (ddPdudu) mem<vfloat<N>>::storeu(valid,ddPdudu+i,curve.eval_dudu(u));
+ }
+ }
+ }
+
+ void interpolate(const RTCInterpolateArguments* const args) {
+ interpolate_impl<4>(args);
+ }
+ };
+ }
+
+ DECLARE_ISA_FUNCTION(CurveGeometry*, createCurves, Device* COMMA Geometry::GType);
+}
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_grid_mesh.h b/thirdparty/embree/kernels/common/scene_grid_mesh.h
index c08658466a..fb6fed445b 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_grid_mesh.h
+++ b/thirdparty/embree/kernels/common/scene_grid_mesh.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -55,8 +55,87 @@ namespace embree
void commit();
bool verify();
void interpolate(const RTCInterpolateArguments* const args);
- void addElementsToCount (GeometryCounts & counts) const;
+ template<int N>
+ void interpolate_impl(const RTCInterpolateArguments* const args)
+ {
+ unsigned int primID = args->primID;
+ float U = args->u;
+ float V = args->v;
+
+ /* clamp input u,v to [0;1] range */
+ U = max(min(U,1.0f),0.0f);
+ V = max(min(V,1.0f),0.0f);
+
+ RTCBufferType bufferType = args->bufferType;
+ unsigned int bufferSlot = args->bufferSlot;
+ float* P = args->P;
+ float* dPdu = args->dPdu;
+ float* dPdv = args->dPdv;
+ float* ddPdudu = args->ddPdudu;
+ float* ddPdvdv = args->ddPdvdv;
+ float* ddPdudv = args->ddPdudv;
+ unsigned int valueCount = args->valueCount;
+
+ /* calculate base pointer and stride */
+ assert((bufferType == RTC_BUFFER_TYPE_VERTEX && bufferSlot < numTimeSteps) ||
+ (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE && bufferSlot <= vertexAttribs.size()));
+ const char* src = nullptr;
+ size_t stride = 0;
+ if (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE) {
+ src = vertexAttribs[bufferSlot].getPtr();
+ stride = vertexAttribs[bufferSlot].getStride();
+ } else {
+ src = vertices[bufferSlot].getPtr();
+ stride = vertices[bufferSlot].getStride();
+ }
+
+ const Grid& grid = grids[primID];
+ const int grid_width = grid.resX-1;
+ const int grid_height = grid.resY-1;
+ const float rcp_grid_width = rcp(float(grid_width));
+ const float rcp_grid_height = rcp(float(grid_height));
+ const int iu = min((int)floor(U*grid_width ),grid_width);
+ const int iv = min((int)floor(V*grid_height),grid_height);
+ const float u = U*grid_width-float(iu);
+ const float v = V*grid_height-float(iv);
+
+ for (unsigned int i=0; i<valueCount; i+=N)
+ {
+ const size_t ofs = i*sizeof(float);
+ const unsigned int idx0 = grid.startVtxID + (iv+0)*grid.lineVtxOffset + iu;
+ const unsigned int idx1 = grid.startVtxID + (iv+1)*grid.lineVtxOffset + iu;
+
+ const vbool<N> valid = vint<N>((int)i)+vint<N>(step) < vint<N>(int(valueCount));
+ const vfloat<N> p0 = mem<vfloat<N>>::loadu(valid,(float*)&src[(idx0+0)*stride+ofs]);
+ const vfloat<N> p1 = mem<vfloat<N>>::loadu(valid,(float*)&src[(idx0+1)*stride+ofs]);
+ const vfloat<N> p2 = mem<vfloat<N>>::loadu(valid,(float*)&src[(idx1+1)*stride+ofs]);
+ const vfloat<N> p3 = mem<vfloat<N>>::loadu(valid,(float*)&src[(idx1+0)*stride+ofs]);
+ const vbool<N> left = u+v <= 1.0f;
+ const vfloat<N> Q0 = select(left,p0,p2);
+ const vfloat<N> Q1 = select(left,p1,p3);
+ const vfloat<N> Q2 = select(left,p3,p1);
+ const vfloat<N> U = select(left,u,vfloat<N>(1.0f)-u);
+ const vfloat<N> V = select(left,v,vfloat<N>(1.0f)-v);
+ const vfloat<N> W = 1.0f-U-V;
+
+ if (P) {
+ mem<vfloat<N>>::storeu(valid,P+i,madd(W,Q0,madd(U,Q1,V*Q2)));
+ }
+ if (dPdu) {
+ assert(dPdu); mem<vfloat<N>>::storeu(valid,dPdu+i,select(left,Q1-Q0,Q0-Q1)*rcp_grid_width);
+ assert(dPdv); mem<vfloat<N>>::storeu(valid,dPdv+i,select(left,Q2-Q0,Q0-Q2)*rcp_grid_height);
+ }
+ if (ddPdudu) {
+ assert(ddPdudu); mem<vfloat<N>>::storeu(valid,ddPdudu+i,vfloat<N>(zero));
+ assert(ddPdvdv); mem<vfloat<N>>::storeu(valid,ddPdvdv+i,vfloat<N>(zero));
+ assert(ddPdudv); mem<vfloat<N>>::storeu(valid,ddPdudv+i,vfloat<N>(zero));
+ }
+ }
+ }
+
+ void addElementsToCount (GeometryCounts & counts) const;
+
__forceinline unsigned int getNumSubGrids(const size_t gridID)
{
const Grid &g = grid(gridID);
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_instance.h b/thirdparty/embree/kernels/common/scene_instance.h
index 7ff82a4fb8..773f2b6fec 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_instance.h
+++ b/thirdparty/embree/kernels/common/scene_instance.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -150,8 +150,8 @@ namespace embree
__forceinline AffineSpace3vf<K> getWorld2Local(const vbool<K>& valid, const vfloat<K>& t) const
{
if (unlikely(gsubtype == GTY_SUBTYPE_INSTANCE_QUATERNION))
- return getWorld2LocalSlerp(valid, t);
- return getWorld2LocalLerp(valid, t);
+ return getWorld2LocalSlerp<K>(valid, t);
+ return getWorld2LocalLerp<K>(valid, t);
}
private:
@@ -160,7 +160,7 @@ namespace embree
__forceinline AffineSpace3vf<K> getWorld2LocalSlerp(const vbool<K>& valid, const vfloat<K>& t) const
{
vfloat<K> ftime;
- const vint<K> itime_k = timeSegment(t, ftime);
+ const vint<K> itime_k = timeSegment<K>(t, ftime);
assert(any(valid));
const size_t index = bsf(movemask(valid));
const int itime = itime_k[index];
@@ -186,7 +186,7 @@ namespace embree
__forceinline AffineSpace3vf<K> getWorld2LocalLerp(const vbool<K>& valid, const vfloat<K>& t) const
{
vfloat<K> ftime;
- const vint<K> itime_k = timeSegment(t, ftime);
+ const vint<K> itime_k = timeSegment<K>(t, ftime);
assert(any(valid));
const size_t index = bsf(movemask(valid));
const int itime = itime_k[index];
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_line_segments.h b/thirdparty/embree/kernels/common/scene_line_segments.h
index c0f9ee8f77..3c9fdb39db 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_line_segments.h
+++ b/thirdparty/embree/kernels/common/scene_line_segments.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -34,6 +34,44 @@ namespace embree
void setMaxRadiusScale(float s);
void addElementsToCount (GeometryCounts & counts) const;
+ template<int N>
+ void interpolate_impl(const RTCInterpolateArguments* const args)
+ {
+ unsigned int primID = args->primID;
+ float u = args->u;
+ RTCBufferType bufferType = args->bufferType;
+ unsigned int bufferSlot = args->bufferSlot;
+ float* P = args->P;
+ float* dPdu = args->dPdu;
+ float* ddPdudu = args->ddPdudu;
+ unsigned int valueCount = args->valueCount;
+
+ /* calculate base pointer and stride */
+ assert((bufferType == RTC_BUFFER_TYPE_VERTEX && bufferSlot < numTimeSteps) ||
+ (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE && bufferSlot <= vertexAttribs.size()));
+ const char* src = nullptr;
+ size_t stride = 0;
+ if (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE) {
+ src = vertexAttribs[bufferSlot].getPtr();
+ stride = vertexAttribs[bufferSlot].getStride();
+ } else {
+ src = vertices[bufferSlot].getPtr();
+ stride = vertices[bufferSlot].getStride();
+ }
+
+ for (unsigned int i=0; i<valueCount; i+=N)
+ {
+ const size_t ofs = i*sizeof(float);
+ const size_t segment = segments[primID];
+ const vbool<N> valid = vint<N>((int)i)+vint<N>(step) < vint<N>(int(valueCount));
+ const vfloat<N> p0 = mem<vfloat<N>>::loadu(valid,(float*)&src[(segment+0)*stride+ofs]);
+ const vfloat<N> p1 = mem<vfloat<N>>::loadu(valid,(float*)&src[(segment+1)*stride+ofs]);
+ if (P ) mem<vfloat<N>>::storeu(valid,P+i,lerp(p0,p1,u));
+ if (dPdu ) mem<vfloat<N>>::storeu(valid,dPdu+i,p1-p0);
+ if (ddPdudu) mem<vfloat<N>>::storeu(valid,dPdu+i,vfloat<N>(zero));
+ }
+ }
+
public:
/*! returns the number of vertices */
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_points.h b/thirdparty/embree/kernels/common/scene_points.h
index 1d39ed07ba..017e098a51 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_points.h
+++ b/thirdparty/embree/kernels/common/scene_points.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_quad_mesh.h b/thirdparty/embree/kernels/common/scene_quad_mesh.h
index d5bb054b14..bd8eeaaeb7 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_quad_mesh.h
+++ b/thirdparty/embree/kernels/common/scene_quad_mesh.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -43,6 +43,66 @@ namespace embree
void interpolate(const RTCInterpolateArguments* const args);
void addElementsToCount (GeometryCounts & counts) const;
+ template<int N>
+ void interpolate_impl(const RTCInterpolateArguments* const args)
+ {
+ unsigned int primID = args->primID;
+ float u = args->u;
+ float v = args->v;
+ RTCBufferType bufferType = args->bufferType;
+ unsigned int bufferSlot = args->bufferSlot;
+ float* P = args->P;
+ float* dPdu = args->dPdu;
+ float* dPdv = args->dPdv;
+ float* ddPdudu = args->ddPdudu;
+ float* ddPdvdv = args->ddPdvdv;
+ float* ddPdudv = args->ddPdudv;
+ unsigned int valueCount = args->valueCount;
+
+ /* calculate base pointer and stride */
+ assert((bufferType == RTC_BUFFER_TYPE_VERTEX && bufferSlot < numTimeSteps) ||
+ (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE && bufferSlot <= vertexAttribs.size()));
+ const char* src = nullptr;
+ size_t stride = 0;
+ if (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE) {
+ src = vertexAttribs[bufferSlot].getPtr();
+ stride = vertexAttribs[bufferSlot].getStride();
+ } else {
+ src = vertices[bufferSlot].getPtr();
+ stride = vertices[bufferSlot].getStride();
+ }
+
+ for (unsigned int i=0; i<valueCount; i+=N)
+ {
+ const vbool<N> valid = vint<N>((int)i)+vint<N>(step) < vint<N>(int(valueCount));
+ const size_t ofs = i*sizeof(float);
+ const Quad& tri = quad(primID);
+ const vfloat<N> p0 = mem<vfloat<N>>::loadu(valid,(float*)&src[tri.v[0]*stride+ofs]);
+ const vfloat<N> p1 = mem<vfloat<N>>::loadu(valid,(float*)&src[tri.v[1]*stride+ofs]);
+ const vfloat<N> p2 = mem<vfloat<N>>::loadu(valid,(float*)&src[tri.v[2]*stride+ofs]);
+ const vfloat<N> p3 = mem<vfloat<N>>::loadu(valid,(float*)&src[tri.v[3]*stride+ofs]);
+ const vbool<N> left = u+v <= 1.0f;
+ const vfloat<N> Q0 = select(left,p0,p2);
+ const vfloat<N> Q1 = select(left,p1,p3);
+ const vfloat<N> Q2 = select(left,p3,p1);
+ const vfloat<N> U = select(left,u,vfloat<N>(1.0f)-u);
+ const vfloat<N> V = select(left,v,vfloat<N>(1.0f)-v);
+ const vfloat<N> W = 1.0f-U-V;
+ if (P) {
+ mem<vfloat<N>>::storeu(valid,P+i,madd(W,Q0,madd(U,Q1,V*Q2)));
+ }
+ if (dPdu) {
+ assert(dPdu); mem<vfloat<N>>::storeu(valid,dPdu+i,select(left,Q1-Q0,Q0-Q1));
+ assert(dPdv); mem<vfloat<N>>::storeu(valid,dPdv+i,select(left,Q2-Q0,Q0-Q2));
+ }
+ if (ddPdudu) {
+ assert(ddPdudu); mem<vfloat<N>>::storeu(valid,ddPdudu+i,vfloat<N>(zero));
+ assert(ddPdvdv); mem<vfloat<N>>::storeu(valid,ddPdvdv+i,vfloat<N>(zero));
+ assert(ddPdudv); mem<vfloat<N>>::storeu(valid,ddPdudv+i,vfloat<N>(zero));
+ }
+ }
+ }
+
public:
/*! returns number of vertices */
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_subdiv_mesh.h b/thirdparty/embree/kernels/common/scene_subdiv_mesh.h
index d0246009db..1db170196d 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_subdiv_mesh.h
+++ b/thirdparty/embree/kernels/common/scene_subdiv_mesh.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -275,11 +275,11 @@ namespace embree
parallel_set<uint32_t> holeSet;
/*! fast lookup table to detect invalid faces */
- mvector<int8_t> invalid_face;
+ mvector<char> invalid_face;
/*! test if face i is invalid in timestep j */
- __forceinline int8_t& invalidFace(size_t i, size_t j = 0) { return invalid_face[i*numTimeSteps+j]; }
- __forceinline const int8_t& invalidFace(size_t i, size_t j = 0) const { return invalid_face[i*numTimeSteps+j]; }
+ __forceinline char& invalidFace(size_t i, size_t j = 0) { return invalid_face[i*numTimeSteps+j]; }
+ __forceinline const char& invalidFace(size_t i, size_t j = 0) const { return invalid_face[i*numTimeSteps+j]; }
/*! interpolation cache */
public:
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_triangle_mesh.cpp b/thirdparty/embree/kernels/common/scene_triangle_mesh.cpp
index d1c2750f14..3bbd7e51ae 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_triangle_mesh.cpp
+++ b/thirdparty/embree/kernels/common/scene_triangle_mesh.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "scene_triangle_mesh.h"
@@ -178,62 +178,13 @@ namespace embree
return true;
}
-
- void TriangleMesh::interpolate(const RTCInterpolateArguments* const args)
- {
- unsigned int primID = args->primID;
- float u = args->u;
- float v = args->v;
- RTCBufferType bufferType = args->bufferType;
- unsigned int bufferSlot = args->bufferSlot;
- float* P = args->P;
- float* dPdu = args->dPdu;
- float* dPdv = args->dPdv;
- float* ddPdudu = args->ddPdudu;
- float* ddPdvdv = args->ddPdvdv;
- float* ddPdudv = args->ddPdudv;
- unsigned int valueCount = args->valueCount;
-
- /* calculate base pointer and stride */
- assert((bufferType == RTC_BUFFER_TYPE_VERTEX && bufferSlot < numTimeSteps) ||
- (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE && bufferSlot <= vertexAttribs.size()));
- const char* src = nullptr;
- size_t stride = 0;
- if (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE) {
- src = vertexAttribs[bufferSlot].getPtr();
- stride = vertexAttribs[bufferSlot].getStride();
- } else {
- src = vertices[bufferSlot].getPtr();
- stride = vertices[bufferSlot].getStride();
- }
-
- for (unsigned int i=0; i<valueCount; i+=4)
- {
- size_t ofs = i*sizeof(float);
- const float w = 1.0f-u-v;
- const Triangle& tri = triangle(primID);
- const vbool4 valid = vint4((int)i)+vint4(step) < vint4(int(valueCount));
- const vfloat4 p0 = vfloat4::loadu(valid,(float*)&src[tri.v[0]*stride+ofs]);
- const vfloat4 p1 = vfloat4::loadu(valid,(float*)&src[tri.v[1]*stride+ofs]);
- const vfloat4 p2 = vfloat4::loadu(valid,(float*)&src[tri.v[2]*stride+ofs]);
-
- if (P) {
- vfloat4::storeu(valid,P+i,madd(w,p0,madd(u,p1,v*p2)));
- }
- if (dPdu) {
- assert(dPdu); vfloat4::storeu(valid,dPdu+i,p1-p0);
- assert(dPdv); vfloat4::storeu(valid,dPdv+i,p2-p0);
- }
- if (ddPdudu) {
- assert(ddPdudu); vfloat4::storeu(valid,ddPdudu+i,vfloat4(zero));
- assert(ddPdvdv); vfloat4::storeu(valid,ddPdvdv+i,vfloat4(zero));
- assert(ddPdudv); vfloat4::storeu(valid,ddPdudv+i,vfloat4(zero));
- }
- }
+
+ void TriangleMesh::interpolate(const RTCInterpolateArguments* const args) {
+ interpolate_impl<4>(args);
}
-
+
#endif
-
+
namespace isa
{
TriangleMesh* createTriangleMesh(Device* device) {
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_triangle_mesh.h b/thirdparty/embree/kernels/common/scene_triangle_mesh.h
index eaf2e1799a..ad3f602fde 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_triangle_mesh.h
+++ b/thirdparty/embree/kernels/common/scene_triangle_mesh.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -43,8 +43,62 @@ namespace embree
void interpolate(const RTCInterpolateArguments* const args);
void addElementsToCount (GeometryCounts & counts) const;
+ template<int N>
+ void interpolate_impl(const RTCInterpolateArguments* const args)
+ {
+ unsigned int primID = args->primID;
+ float u = args->u;
+ float v = args->v;
+ RTCBufferType bufferType = args->bufferType;
+ unsigned int bufferSlot = args->bufferSlot;
+ float* P = args->P;
+ float* dPdu = args->dPdu;
+ float* dPdv = args->dPdv;
+ float* ddPdudu = args->ddPdudu;
+ float* ddPdvdv = args->ddPdvdv;
+ float* ddPdudv = args->ddPdudv;
+ unsigned int valueCount = args->valueCount;
+
+ /* calculate base pointer and stride */
+ assert((bufferType == RTC_BUFFER_TYPE_VERTEX && bufferSlot < numTimeSteps) ||
+ (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE && bufferSlot <= vertexAttribs.size()));
+ const char* src = nullptr;
+ size_t stride = 0;
+ if (bufferType == RTC_BUFFER_TYPE_VERTEX_ATTRIBUTE) {
+ src = vertexAttribs[bufferSlot].getPtr();
+ stride = vertexAttribs[bufferSlot].getStride();
+ } else {
+ src = vertices[bufferSlot].getPtr();
+ stride = vertices[bufferSlot].getStride();
+ }
+
+ for (unsigned int i=0; i<valueCount; i+=N)
+ {
+ size_t ofs = i*sizeof(float);
+ const float w = 1.0f-u-v;
+ const Triangle& tri = triangle(primID);
+ const vbool<N> valid = vint<N>((int)i)+vint<N>(step) < vint<N>(int(valueCount));
+ const vfloat<N> p0 = mem<vfloat<N>>::loadu(valid,(float*)&src[tri.v[0]*stride+ofs]);
+ const vfloat<N> p1 = mem<vfloat<N>>::loadu(valid,(float*)&src[tri.v[1]*stride+ofs]);
+ const vfloat<N> p2 = mem<vfloat<N>>::loadu(valid,(float*)&src[tri.v[2]*stride+ofs]);
+
+ if (P) {
+ mem<vfloat<N>>::storeu(valid,P+i,madd(w,p0,madd(u,p1,v*p2)));
+ }
+ if (dPdu) {
+ assert(dPdu); mem<vfloat<N>>::storeu(valid,dPdu+i,p1-p0);
+ assert(dPdv); mem<vfloat<N>>::storeu(valid,dPdv+i,p2-p0);
+ }
+ if (ddPdudu) {
+ assert(ddPdudu); mem<vfloat<N>>::storeu(valid,ddPdudu+i,vfloat<N>(zero));
+ assert(ddPdvdv); mem<vfloat<N>>::storeu(valid,ddPdvdv+i,vfloat<N>(zero));
+ assert(ddPdudv); mem<vfloat<N>>::storeu(valid,ddPdudv+i,vfloat<N>(zero));
+ }
+ }
+ }
+
public:
-
+
/*! returns number of vertices */
__forceinline size_t numVertices() const {
return vertices[0].size();
diff --git a/thirdparty/embree-aarch64/kernels/common/scene_user_geometry.h b/thirdparty/embree/kernels/common/scene_user_geometry.h
index 8d11ed6986..2867b18b79 100644
--- a/thirdparty/embree-aarch64/kernels/common/scene_user_geometry.h
+++ b/thirdparty/embree/kernels/common/scene_user_geometry.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/stack_item.h b/thirdparty/embree/kernels/common/stack_item.h
index 533c385365..c31c64e862 100644
--- a/thirdparty/embree-aarch64/kernels/common/stack_item.h
+++ b/thirdparty/embree/kernels/common/stack_item.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/stat.cpp b/thirdparty/embree/kernels/common/stat.cpp
index b73c3a8c76..ebb77cd534 100644
--- a/thirdparty/embree-aarch64/kernels/common/stat.cpp
+++ b/thirdparty/embree/kernels/common/stat.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "stat.h"
diff --git a/thirdparty/embree-aarch64/kernels/common/stat.h b/thirdparty/embree/kernels/common/stat.h
index 3cda2bd014..02fc07e67f 100644
--- a/thirdparty/embree-aarch64/kernels/common/stat.h
+++ b/thirdparty/embree/kernels/common/stat.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/common/state.cpp b/thirdparty/embree/kernels/common/state.cpp
index 51fc9b7826..01c862da0c 100644
--- a/thirdparty/embree-aarch64/kernels/common/state.cpp
+++ b/thirdparty/embree/kernels/common/state.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "state.h"
@@ -100,7 +100,6 @@ namespace embree
instancing_open_max_depth = 32;
instancing_open_max = 50000000;
- ignore_config_files = false;
float_exceptions = false;
quality_flags = -1;
scene_flags = -1;
@@ -115,8 +114,6 @@ namespace embree
#else
set_affinity = false;
#endif
- /* per default enable affinity on KNL */
- if (hasISA(AVX512KNL)) set_affinity = true;
start_threads = false;
enable_selockmemoryprivilege = false;
@@ -147,20 +144,7 @@ namespace embree
}
bool State::checkISASupport() {
-#if defined(__ARM_NEON)
- /*
- * NEON CPU type is a mixture of NEON and SSE2
- */
-
- bool hasSSE2 = (getCPUFeatures() & enabled_cpu_features) & CPU_FEATURE_SSE2;
-
- /* this will be true when explicitly initialize Device with `isa=neon` config */
- bool hasNEON = (getCPUFeatures() & enabled_cpu_features) & CPU_FEATURE_NEON;
-
- return hasSSE2 || hasNEON;
-#else
return (getCPUFeatures() & enabled_cpu_features) == enabled_cpu_features;
-#endif
}
void State::verify()
@@ -173,10 +157,8 @@ namespace embree
* functions */
#if defined(DEBUG)
#if defined(EMBREE_TARGET_SSE2)
-#if !defined(__ARM_NEON)
assert(sse2::getISA() <= SSE2);
#endif
-#endif
#if defined(EMBREE_TARGET_SSE42)
assert(sse42::getISA() <= SSE42);
#endif
@@ -186,11 +168,8 @@ namespace embree
#if defined(EMBREE_TARGET_AVX2)
assert(avx2::getISA() <= AVX2);
#endif
-#if defined (EMBREE_TARGET_AVX512KNL)
- assert(avx512knl::getISA() <= AVX512KNL);
-#endif
-#if defined (EMBREE_TARGET_AVX512SKX)
- assert(avx512skx::getISA() <= AVX512SKX);
+#if defined (EMBREE_TARGET_AVX512)
+ assert(avx512::getISA() <= AVX512);
#endif
#endif
}
@@ -241,8 +220,7 @@ namespace embree
else if (isa == "avx") return AVX;
else if (isa == "avxi") return AVXI;
else if (isa == "avx2") return AVX2;
- else if (isa == "avx512knl") return AVX512KNL;
- else if (isa == "avx512skx") return AVX512SKX;
+ else if (isa == "avx512") return AVX512;
else return SSE2;
}
@@ -269,20 +247,20 @@ namespace embree
start_threads = cin->get().Int();
else if (tok == Token::Id("isa") && cin->trySymbol("=")) {
- std::string isa = toLowerCase(cin->get().Identifier());
- enabled_cpu_features = string_to_cpufeatures(isa);
+ std::string isa_str = toLowerCase(cin->get().Identifier());
+ enabled_cpu_features = string_to_cpufeatures(isa_str);
enabled_builder_cpu_features = enabled_cpu_features;
}
else if (tok == Token::Id("max_isa") && cin->trySymbol("=")) {
- std::string isa = toLowerCase(cin->get().Identifier());
- enabled_cpu_features &= string_to_cpufeatures(isa);
+ std::string isa_str = toLowerCase(cin->get().Identifier());
+ enabled_cpu_features &= string_to_cpufeatures(isa_str);
enabled_builder_cpu_features &= enabled_cpu_features;
}
else if (tok == Token::Id("max_builder_isa") && cin->trySymbol("=")) {
- std::string isa = toLowerCase(cin->get().Identifier());
- enabled_builder_cpu_features &= string_to_cpufeatures(isa);
+ std::string isa_str = toLowerCase(cin->get().Identifier());
+ enabled_builder_cpu_features &= string_to_cpufeatures(isa_str);
}
else if (tok == Token::Id("frequency_level") && cin->trySymbol("=")) {
@@ -299,8 +277,6 @@ namespace embree
hugepages = cin->get().Int();
}
- else if (tok == Token::Id("ignore_config_files") && cin->trySymbol("="))
- ignore_config_files = cin->get().Int();
else if (tok == Token::Id("float_exceptions") && cin->trySymbol("="))
float_exceptions = cin->get().Int();
diff --git a/thirdparty/embree-aarch64/kernels/common/state.h b/thirdparty/embree/kernels/common/state.h
index d0fccc023f..33bcc843b2 100644
--- a/thirdparty/embree-aarch64/kernels/common/state.h
+++ b/thirdparty/embree/kernels/common/state.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -118,7 +118,6 @@ namespace embree
size_t instancing_open_max; //!< instancing opens tree to maximally that number of subtrees
public:
- bool ignore_config_files; //!< if true no more config files get parse
bool float_exceptions; //!< enable floating point exceptions
int quality_flags;
int scene_flags;
diff --git a/thirdparty/embree-aarch64/kernels/common/vector.h b/thirdparty/embree/kernels/common/vector.h
index b478762240..4b08275f3b 100644
--- a/thirdparty/embree-aarch64/kernels/common/vector.h
+++ b/thirdparty/embree/kernels/common/vector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "default.h"
diff --git a/thirdparty/embree-aarch64/kernels/config.h b/thirdparty/embree/kernels/config.h
index 80a8ab2a56..80a8ab2a56 100644
--- a/thirdparty/embree-aarch64/kernels/config.h
+++ b/thirdparty/embree/kernels/config.h
diff --git a/thirdparty/embree-aarch64/kernels/geometry/cone.h b/thirdparty/embree/kernels/geometry/cone.h
index 961ef86160..17429bab32 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/cone.h
+++ b/thirdparty/embree/kernels/geometry/cone.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/coneline_intersector.h b/thirdparty/embree/kernels/geometry/coneline_intersector.h
index 0902baff7d..90f3792eff 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/coneline_intersector.h
+++ b/thirdparty/embree/kernels/geometry/coneline_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -169,9 +169,9 @@ namespace embree
const Vec3vf<M> ray_org(ray.org.x, ray.org.y, ray.org.z);
const Vec3vf<M> ray_dir(ray.dir.x, ray.dir.y, ray.dir.z);
const vfloat<M> ray_tnear(ray.tnear());
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
- const Vec4vf<M> v1 = enlargeRadiusToMinWidth(context,geom,ray_org,v1i);
- return __coneline_internal::intersectCone(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar(ray),v0,v1,cL,cR,epilog);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
+ const Vec4vf<M> v1 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v1i);
+ return __coneline_internal::intersectCone<M>(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar(ray),v0,v1,cL,cR,epilog);
}
};
@@ -200,9 +200,9 @@ namespace embree
const Vec3vf<M> ray_org(ray.org.x[k], ray.org.y[k], ray.org.z[k]);
const Vec3vf<M> ray_dir(ray.dir.x[k], ray.dir.y[k], ray.dir.z[k]);
const vfloat<M> ray_tnear = ray.tnear()[k];
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
- const Vec4vf<M> v1 = enlargeRadiusToMinWidth(context,geom,ray_org,v1i);
- return __coneline_internal::intersectCone(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar(ray,k),v0,v1,cL,cR,epilog);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
+ const Vec4vf<M> v1 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v1i);
+ return __coneline_internal::intersectCone<M>(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar(ray,k),v0,v1,cL,cR,epilog);
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/conelinei_intersector.h b/thirdparty/embree/kernels/geometry/conelinei_intersector.h
index d47218eb8b..6a985ebcad 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/conelinei_intersector.h
+++ b/thirdparty/embree/kernels/geometry/conelinei_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -10,7 +10,7 @@ namespace embree
{
namespace isa
{
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct ConeCurveMiIntersector1
{
typedef LineMi<M> Primitive;
@@ -23,8 +23,8 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- ConeCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Intersect1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
@@ -34,8 +34,8 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- return ConeCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Occluded1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
return false;
}
@@ -45,7 +45,7 @@ namespace embree
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct ConeCurveMiMBIntersector1
{
typedef LineMi<M> Primitive;
@@ -58,8 +58,8 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time());
- const vbool<Mx> valid = line.template valid<Mx>();
- ConeCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Intersect1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
@@ -69,8 +69,8 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time());
- const vbool<Mx> valid = line.template valid<Mx>();
- return ConeCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Occluded1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return ConeCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,cL,cR,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
return false;
}
@@ -80,7 +80,7 @@ namespace embree
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct ConeCurveMiIntersectorK
{
typedef LineMi<M> Primitive;
@@ -93,8 +93,8 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- ConeCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
@@ -104,12 +104,12 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- return ConeCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct ConeCurveMiMBIntersectorK
{
typedef LineMi<M> Primitive;
@@ -122,8 +122,8 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time()[k]);
- const vbool<Mx> valid = line.template valid<Mx>();
- ConeCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
@@ -133,8 +133,8 @@ namespace embree
Vec4vf<M> v0,v1;
vbool<M> cL,cR;
line.gather(v0,v1,cL,cR,geom,ray.time()[k]);
- const vbool<Mx> valid = line.template valid<Mx>();
- return ConeCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return ConeCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,cL,cR,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curveNi.h b/thirdparty/embree/kernels/geometry/curveNi.h
index 51384f1959..6366a6fb9c 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curveNi.h
+++ b/thirdparty/embree/kernels/geometry/curveNi.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -43,10 +43,10 @@ namespace embree
__forceinline void fill(const PrimRef* prims, size_t& begin, size_t _end, Scene* scene)
{
size_t end = min(begin+M,_end);
- N = (uint8_t)(end-begin);
+ N = (unsigned char)(end-begin);
const unsigned int geomID0 = prims[begin].geomID();
this->geomID(N) = geomID0;
- ty = (uint8_t) scene->get(geomID0)->getType();
+ ty = (unsigned char) scene->get(geomID0)->getType();
/* encode all primitives */
BBox3fa bounds = empty;
@@ -76,25 +76,25 @@ namespace embree
const LinearSpace3fa space3(trunc(126.0f*space2.vx),trunc(126.0f*space2.vy),trunc(126.0f*space2.vz));
const BBox3fa bounds = scene->get(geomID)->vbounds(loffset,lscale,max(length(space3.vx),length(space3.vy),length(space3.vz)),space3.transposed(),primID);
- bounds_vx_x(N)[i] = (int8_t) space3.vx.x;
- bounds_vx_y(N)[i] = (int8_t) space3.vx.y;
- bounds_vx_z(N)[i] = (int8_t) space3.vx.z;
+ bounds_vx_x(N)[i] = (char) space3.vx.x;
+ bounds_vx_y(N)[i] = (char) space3.vx.y;
+ bounds_vx_z(N)[i] = (char) space3.vx.z;
bounds_vx_lower(N)[i] = (short) clamp(floor(bounds.lower.x),-32767.0f,32767.0f);
bounds_vx_upper(N)[i] = (short) clamp(ceil (bounds.upper.x),-32767.0f,32767.0f);
assert(-32767.0f <= floor(bounds.lower.x) && floor(bounds.lower.x) <= 32767.0f);
assert(-32767.0f <= ceil (bounds.upper.x) && ceil (bounds.upper.x) <= 32767.0f);
- bounds_vy_x(N)[i] = (int8_t) space3.vy.x;
- bounds_vy_y(N)[i] = (int8_t) space3.vy.y;
- bounds_vy_z(N)[i] = (int8_t) space3.vy.z;
+ bounds_vy_x(N)[i] = (char) space3.vy.x;
+ bounds_vy_y(N)[i] = (char) space3.vy.y;
+ bounds_vy_z(N)[i] = (char) space3.vy.z;
bounds_vy_lower(N)[i] = (short) clamp(floor(bounds.lower.y),-32767.0f,32767.0f);
bounds_vy_upper(N)[i] = (short) clamp(ceil (bounds.upper.y),-32767.0f,32767.0f);
assert(-32767.0f <= floor(bounds.lower.y) && floor(bounds.lower.y) <= 32767.0f);
assert(-32767.0f <= ceil (bounds.upper.y) && ceil (bounds.upper.y) <= 32767.0f);
- bounds_vz_x(N)[i] = (int8_t) space3.vz.x;
- bounds_vz_y(N)[i] = (int8_t) space3.vz.y;
- bounds_vz_z(N)[i] = (int8_t) space3.vz.z;
+ bounds_vz_x(N)[i] = (char) space3.vz.x;
+ bounds_vz_y(N)[i] = (char) space3.vz.y;
+ bounds_vz_z(N)[i] = (char) space3.vz.z;
bounds_vz_lower(N)[i] = (short) clamp(floor(bounds.lower.z),-32767.0f,32767.0f);
bounds_vz_upper(N)[i] = (short) clamp(ceil (bounds.upper.z),-32767.0f,32767.0f);
assert(-32767.0f <= floor(bounds.lower.z) && floor(bounds.lower.z) <= 32767.0f);
@@ -114,15 +114,15 @@ namespace embree
for (size_t i=0; i<items; i++) {
accel[i].fill(prims,start,set.end(),bvh->scene);
}
- return bvh->encodeLeaf((int8_t*)accel,items);
+ return bvh->encodeLeaf((char*)accel,items);
};
public:
// 27.6 - 46 bytes per primitive
- uint8_t ty;
- uint8_t N;
- uint8_t data[4+25*M+16];
+ unsigned char ty;
+ unsigned char N;
+ unsigned char data[4+25*M+16];
/*
struct Layout
@@ -130,21 +130,21 @@ namespace embree
unsigned int geomID;
unsigned int primID[N];
- int8_t bounds_vx_x[N];
- int8_t bounds_vx_y[N];
- int8_t bounds_vx_z[N];
+ char bounds_vx_x[N];
+ char bounds_vx_y[N];
+ char bounds_vx_z[N];
short bounds_vx_lower[N];
short bounds_vx_upper[N];
- int8_t bounds_vy_x[N];
- int8_t bounds_vy_y[N];
- int8_t bounds_vy_z[N];
+ char bounds_vy_x[N];
+ char bounds_vy_y[N];
+ char bounds_vy_z[N];
short bounds_vy_lower[N];
short bounds_vy_upper[N];
- int8_t bounds_vz_x[N];
- int8_t bounds_vz_y[N];
- int8_t bounds_vz_z[N];
+ char bounds_vz_x[N];
+ char bounds_vz_y[N];
+ char bounds_vz_z[N];
short bounds_vz_lower[N];
short bounds_vz_upper[N];
@@ -153,65 +153,65 @@ namespace embree
};
*/
- __forceinline unsigned int& geomID(size_t N) { return *(unsigned int*)((int8_t*)this+2); }
- __forceinline const unsigned int& geomID(size_t N) const { return *(unsigned int*)((int8_t*)this+2); }
+ __forceinline unsigned int& geomID(size_t N) { return *(unsigned int*)((char*)this+2); }
+ __forceinline const unsigned int& geomID(size_t N) const { return *(unsigned int*)((char*)this+2); }
- __forceinline unsigned int* primID(size_t N) { return (unsigned int*)((int8_t*)this+6); }
- __forceinline const unsigned int* primID(size_t N) const { return (unsigned int*)((int8_t*)this+6); }
+ __forceinline unsigned int* primID(size_t N) { return (unsigned int*)((char*)this+6); }
+ __forceinline const unsigned int* primID(size_t N) const { return (unsigned int*)((char*)this+6); }
- __forceinline int8_t* bounds_vx_x(size_t N) { return (int8_t*)((int8_t*)this+6+4*N); }
- __forceinline const int8_t* bounds_vx_x(size_t N) const { return (int8_t*)((int8_t*)this+6+4*N); }
+ __forceinline char* bounds_vx_x(size_t N) { return (char*)((char*)this+6+4*N); }
+ __forceinline const char* bounds_vx_x(size_t N) const { return (char*)((char*)this+6+4*N); }
- __forceinline int8_t* bounds_vx_y(size_t N) { return (int8_t*)((int8_t*)this+6+5*N); }
- __forceinline const int8_t* bounds_vx_y(size_t N) const { return (int8_t*)((int8_t*)this+6+5*N); }
+ __forceinline char* bounds_vx_y(size_t N) { return (char*)((char*)this+6+5*N); }
+ __forceinline const char* bounds_vx_y(size_t N) const { return (char*)((char*)this+6+5*N); }
- __forceinline int8_t* bounds_vx_z(size_t N) { return (int8_t*)((int8_t*)this+6+6*N); }
- __forceinline const int8_t* bounds_vx_z(size_t N) const { return (int8_t*)((int8_t*)this+6+6*N); }
+ __forceinline char* bounds_vx_z(size_t N) { return (char*)((char*)this+6+6*N); }
+ __forceinline const char* bounds_vx_z(size_t N) const { return (char*)((char*)this+6+6*N); }
- __forceinline short* bounds_vx_lower(size_t N) { return (short*)((int8_t*)this+6+7*N); }
- __forceinline const short* bounds_vx_lower(size_t N) const { return (short*)((int8_t*)this+6+7*N); }
+ __forceinline short* bounds_vx_lower(size_t N) { return (short*)((char*)this+6+7*N); }
+ __forceinline const short* bounds_vx_lower(size_t N) const { return (short*)((char*)this+6+7*N); }
- __forceinline short* bounds_vx_upper(size_t N) { return (short*)((int8_t*)this+6+9*N); }
- __forceinline const short* bounds_vx_upper(size_t N) const { return (short*)((int8_t*)this+6+9*N); }
+ __forceinline short* bounds_vx_upper(size_t N) { return (short*)((char*)this+6+9*N); }
+ __forceinline const short* bounds_vx_upper(size_t N) const { return (short*)((char*)this+6+9*N); }
- __forceinline int8_t* bounds_vy_x(size_t N) { return (int8_t*)((int8_t*)this+6+11*N); }
- __forceinline const int8_t* bounds_vy_x(size_t N) const { return (int8_t*)((int8_t*)this+6+11*N); }
+ __forceinline char* bounds_vy_x(size_t N) { return (char*)((char*)this+6+11*N); }
+ __forceinline const char* bounds_vy_x(size_t N) const { return (char*)((char*)this+6+11*N); }
- __forceinline int8_t* bounds_vy_y(size_t N) { return (int8_t*)((int8_t*)this+6+12*N); }
- __forceinline const int8_t* bounds_vy_y(size_t N) const { return (int8_t*)((int8_t*)this+6+12*N); }
+ __forceinline char* bounds_vy_y(size_t N) { return (char*)((char*)this+6+12*N); }
+ __forceinline const char* bounds_vy_y(size_t N) const { return (char*)((char*)this+6+12*N); }
- __forceinline int8_t* bounds_vy_z(size_t N) { return (int8_t*)((int8_t*)this+6+13*N); }
- __forceinline const int8_t* bounds_vy_z(size_t N) const { return (int8_t*)((int8_t*)this+6+13*N); }
+ __forceinline char* bounds_vy_z(size_t N) { return (char*)((char*)this+6+13*N); }
+ __forceinline const char* bounds_vy_z(size_t N) const { return (char*)((char*)this+6+13*N); }
- __forceinline short* bounds_vy_lower(size_t N) { return (short*)((int8_t*)this+6+14*N); }
- __forceinline const short* bounds_vy_lower(size_t N) const { return (short*)((int8_t*)this+6+14*N); }
+ __forceinline short* bounds_vy_lower(size_t N) { return (short*)((char*)this+6+14*N); }
+ __forceinline const short* bounds_vy_lower(size_t N) const { return (short*)((char*)this+6+14*N); }
- __forceinline short* bounds_vy_upper(size_t N) { return (short*)((int8_t*)this+6+16*N); }
- __forceinline const short* bounds_vy_upper(size_t N) const { return (short*)((int8_t*)this+6+16*N); }
+ __forceinline short* bounds_vy_upper(size_t N) { return (short*)((char*)this+6+16*N); }
+ __forceinline const short* bounds_vy_upper(size_t N) const { return (short*)((char*)this+6+16*N); }
- __forceinline int8_t* bounds_vz_x(size_t N) { return (int8_t*)((int8_t*)this+6+18*N); }
- __forceinline const int8_t* bounds_vz_x(size_t N) const { return (int8_t*)((int8_t*)this+6+18*N); }
+ __forceinline char* bounds_vz_x(size_t N) { return (char*)((char*)this+6+18*N); }
+ __forceinline const char* bounds_vz_x(size_t N) const { return (char*)((char*)this+6+18*N); }
- __forceinline int8_t* bounds_vz_y(size_t N) { return (int8_t*)((int8_t*)this+6+19*N); }
- __forceinline const int8_t* bounds_vz_y(size_t N) const { return (int8_t*)((int8_t*)this+6+19*N); }
+ __forceinline char* bounds_vz_y(size_t N) { return (char*)((char*)this+6+19*N); }
+ __forceinline const char* bounds_vz_y(size_t N) const { return (char*)((char*)this+6+19*N); }
- __forceinline int8_t* bounds_vz_z(size_t N) { return (int8_t*)((int8_t*)this+6+20*N); }
- __forceinline const int8_t* bounds_vz_z(size_t N) const { return (int8_t*)((int8_t*)this+6+20*N); }
+ __forceinline char* bounds_vz_z(size_t N) { return (char*)((char*)this+6+20*N); }
+ __forceinline const char* bounds_vz_z(size_t N) const { return (char*)((char*)this+6+20*N); }
- __forceinline short* bounds_vz_lower(size_t N) { return (short*)((int8_t*)this+6+21*N); }
- __forceinline const short* bounds_vz_lower(size_t N) const { return (short*)((int8_t*)this+6+21*N); }
+ __forceinline short* bounds_vz_lower(size_t N) { return (short*)((char*)this+6+21*N); }
+ __forceinline const short* bounds_vz_lower(size_t N) const { return (short*)((char*)this+6+21*N); }
- __forceinline short* bounds_vz_upper(size_t N) { return (short*)((int8_t*)this+6+23*N); }
- __forceinline const short* bounds_vz_upper(size_t N) const { return (short*)((int8_t*)this+6+23*N); }
+ __forceinline short* bounds_vz_upper(size_t N) { return (short*)((char*)this+6+23*N); }
+ __forceinline const short* bounds_vz_upper(size_t N) const { return (short*)((char*)this+6+23*N); }
- __forceinline Vec3f* offset(size_t N) { return (Vec3f*)((int8_t*)this+6+25*N); }
- __forceinline const Vec3f* offset(size_t N) const { return (Vec3f*)((int8_t*)this+6+25*N); }
+ __forceinline Vec3f* offset(size_t N) { return (Vec3f*)((char*)this+6+25*N); }
+ __forceinline const Vec3f* offset(size_t N) const { return (Vec3f*)((char*)this+6+25*N); }
- __forceinline float* scale(size_t N) { return (float*)((int8_t*)this+6+25*N+12); }
- __forceinline const float* scale(size_t N) const { return (float*)((int8_t*)this+6+25*N+12); }
+ __forceinline float* scale(size_t N) { return (float*)((char*)this+6+25*N+12); }
+ __forceinline const float* scale(size_t N) const { return (float*)((char*)this+6+25*N+12); }
- __forceinline int8_t* end(size_t N) { return (int8_t*)this+6+25*N+16; }
- __forceinline const int8_t* end(size_t N) const { return (int8_t*)this+6+25*N+16; }
+ __forceinline char* end(size_t N) { return (char*)this+6+25*N+16; }
+ __forceinline const char* end(size_t N) const { return (char*)this+6+25*N+16; }
};
template<int M>
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curveNi_intersector.h b/thirdparty/embree/kernels/geometry/curveNi_intersector.h
index 0f9038c9fc..c0b66515c1 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curveNi_intersector.h
+++ b/thirdparty/embree/kernels/geometry/curveNi_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curveNi_mb.h b/thirdparty/embree/kernels/geometry/curveNi_mb.h
index 0cd8f833fd..5d972b43a0 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curveNi_mb.h
+++ b/thirdparty/embree/kernels/geometry/curveNi_mb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -43,10 +43,10 @@ namespace embree
__forceinline LBBox3fa fillMB(const PrimRefMB* prims, size_t& begin, size_t _end, Scene* scene, const BBox1f time_range)
{
size_t end = min(begin+M,_end);
- N = (uint8_t)(end-begin);
+ N = (unsigned char)(end-begin);
const unsigned int geomID0 = prims[begin].geomID();
this->geomID(N) = geomID0;
- ty = (uint8_t) scene->get(geomID0)->getType();
+ ty = (unsigned char) scene->get(geomID0)->getType();
/* encode all primitives */
LBBox3fa lbounds = empty;
@@ -79,10 +79,10 @@ namespace embree
const LinearSpace3fa space3(trunc(126.0f*space2.vx),trunc(126.0f*space2.vy),trunc(126.0f*space2.vz));
const LBBox3fa bounds = scene->get(geomID)->vlinearBounds(loffset,lscale,max(length(space3.vx),length(space3.vy),length(space3.vz)),space3.transposed(),primID,time_range);
- // NOTE: this weird (int8_t) (short) cast works around VS2015 Win32 compiler bug
- bounds_vx_x(N)[i] = (int8_t) (short) space3.vx.x;
- bounds_vx_y(N)[i] = (int8_t) (short) space3.vx.y;
- bounds_vx_z(N)[i] = (int8_t) (short) space3.vx.z;
+ // NOTE: this weird (char) (short) cast works around VS2015 Win32 compiler bug
+ bounds_vx_x(N)[i] = (char) (short) space3.vx.x;
+ bounds_vx_y(N)[i] = (char) (short) space3.vx.y;
+ bounds_vx_z(N)[i] = (char) (short) space3.vx.z;
bounds_vx_lower0(N)[i] = (short) clamp(floor(bounds.bounds0.lower.x),-32767.0f,32767.0f);
bounds_vx_upper0(N)[i] = (short) clamp(ceil (bounds.bounds0.upper.x),-32767.0f,32767.0f);
bounds_vx_lower1(N)[i] = (short) clamp(floor(bounds.bounds1.lower.x),-32767.0f,32767.0f);
@@ -92,9 +92,9 @@ namespace embree
assert(-32767.0f <= floor(bounds.bounds1.lower.x) && floor(bounds.bounds1.lower.x) <= 32767.0f);
assert(-32767.0f <= ceil (bounds.bounds1.upper.x) && ceil (bounds.bounds1.upper.x) <= 32767.0f);
- bounds_vy_x(N)[i] = (int8_t) (short) space3.vy.x;
- bounds_vy_y(N)[i] = (int8_t) (short) space3.vy.y;
- bounds_vy_z(N)[i] = (int8_t) (short) space3.vy.z;
+ bounds_vy_x(N)[i] = (char) (short) space3.vy.x;
+ bounds_vy_y(N)[i] = (char) (short) space3.vy.y;
+ bounds_vy_z(N)[i] = (char) (short) space3.vy.z;
bounds_vy_lower0(N)[i] = (short) clamp(floor(bounds.bounds0.lower.y),-32767.0f,32767.0f);
bounds_vy_upper0(N)[i] = (short) clamp(ceil (bounds.bounds0.upper.y),-32767.0f,32767.0f);
bounds_vy_lower1(N)[i] = (short) clamp(floor(bounds.bounds1.lower.y),-32767.0f,32767.0f);
@@ -104,9 +104,9 @@ namespace embree
assert(-32767.0f <= floor(bounds.bounds1.lower.y) && floor(bounds.bounds1.lower.y) <= 32767.0f);
assert(-32767.0f <= ceil (bounds.bounds1.upper.y) && ceil (bounds.bounds1.upper.y) <= 32767.0f);
- bounds_vz_x(N)[i] = (int8_t) (short) space3.vz.x;
- bounds_vz_y(N)[i] = (int8_t) (short) space3.vz.y;
- bounds_vz_z(N)[i] = (int8_t) (short) space3.vz.z;
+ bounds_vz_x(N)[i] = (char) (short) space3.vz.x;
+ bounds_vz_y(N)[i] = (char) (short) space3.vz.y;
+ bounds_vz_z(N)[i] = (char) (short) space3.vz.z;
bounds_vz_lower0(N)[i] = (short) clamp(floor(bounds.bounds0.lower.z),-32767.0f,32767.0f);
bounds_vz_upper0(N)[i] = (short) clamp(ceil (bounds.bounds0.upper.z),-32767.0f,32767.0f);
bounds_vz_lower1(N)[i] = (short) clamp(floor(bounds.bounds1.lower.z),-32767.0f,32767.0f);
@@ -130,7 +130,7 @@ namespace embree
size_t items = CurveNiMB::blocks(prims.size());
size_t numbytes = CurveNiMB::bytes(prims.size());
CurveNiMB* accel = (CurveNiMB*) alloc.malloc1(numbytes,BVH::byteAlignment);
- const typename BVH::NodeRef node = bvh->encodeLeaf((int8_t*)accel,items);
+ const typename BVH::NodeRef node = bvh->encodeLeaf((char*)accel,items);
LBBox3fa bounds = empty;
for (size_t i=0; i<items; i++)
@@ -143,9 +143,9 @@ namespace embree
public:
// 27.6 - 46 bytes per primitive
- uint8_t ty;
- uint8_t N;
- uint8_t data[4+37*M+24];
+ unsigned char ty;
+ unsigned char N;
+ unsigned char data[4+37*M+24];
/*
struct Layout
@@ -153,25 +153,25 @@ namespace embree
unsigned int geomID;
unsigned int primID[N];
- int8_t bounds_vx_x[N];
- int8_t bounds_vx_y[N];
- int8_t bounds_vx_z[N];
+ char bounds_vx_x[N];
+ char bounds_vx_y[N];
+ char bounds_vx_z[N];
short bounds_vx_lower0[N];
short bounds_vx_upper0[N];
short bounds_vx_lower1[N];
short bounds_vx_upper1[N];
- int8_t bounds_vy_x[N];
- int8_t bounds_vy_y[N];
- int8_t bounds_vy_z[N];
+ char bounds_vy_x[N];
+ char bounds_vy_y[N];
+ char bounds_vy_z[N];
short bounds_vy_lower0[N];
short bounds_vy_upper0[N];
short bounds_vy_lower1[N];
short bounds_vy_upper1[N];
- int8_t bounds_vz_x[N];
- int8_t bounds_vz_y[N];
- int8_t bounds_vz_z[N];
+ char bounds_vz_x[N];
+ char bounds_vz_y[N];
+ char bounds_vz_z[N];
short bounds_vz_lower0[N];
short bounds_vz_upper0[N];
short bounds_vz_lower1[N];
@@ -185,89 +185,89 @@ namespace embree
};
*/
- __forceinline unsigned int& geomID(size_t N) { return *(unsigned int*)((int8_t*)this+2); }
- __forceinline const unsigned int& geomID(size_t N) const { return *(unsigned int*)((int8_t*)this+2); }
+ __forceinline unsigned int& geomID(size_t N) { return *(unsigned int*)((char*)this+2); }
+ __forceinline const unsigned int& geomID(size_t N) const { return *(unsigned int*)((char*)this+2); }
- __forceinline unsigned int* primID(size_t N) { return (unsigned int*)((int8_t*)this+6); }
- __forceinline const unsigned int* primID(size_t N) const { return (unsigned int*)((int8_t*)this+6); }
+ __forceinline unsigned int* primID(size_t N) { return (unsigned int*)((char*)this+6); }
+ __forceinline const unsigned int* primID(size_t N) const { return (unsigned int*)((char*)this+6); }
- __forceinline int8_t* bounds_vx_x(size_t N) { return (int8_t*)((int8_t*)this+6+4*N); }
- __forceinline const int8_t* bounds_vx_x(size_t N) const { return (int8_t*)((int8_t*)this+6+4*N); }
+ __forceinline char* bounds_vx_x(size_t N) { return (char*)((char*)this+6+4*N); }
+ __forceinline const char* bounds_vx_x(size_t N) const { return (char*)((char*)this+6+4*N); }
- __forceinline int8_t* bounds_vx_y(size_t N) { return (int8_t*)((int8_t*)this+6+5*N); }
- __forceinline const int8_t* bounds_vx_y(size_t N) const { return (int8_t*)((int8_t*)this+6+5*N); }
+ __forceinline char* bounds_vx_y(size_t N) { return (char*)((char*)this+6+5*N); }
+ __forceinline const char* bounds_vx_y(size_t N) const { return (char*)((char*)this+6+5*N); }
- __forceinline int8_t* bounds_vx_z(size_t N) { return (int8_t*)((int8_t*)this+6+6*N); }
- __forceinline const int8_t* bounds_vx_z(size_t N) const { return (int8_t*)((int8_t*)this+6+6*N); }
+ __forceinline char* bounds_vx_z(size_t N) { return (char*)((char*)this+6+6*N); }
+ __forceinline const char* bounds_vx_z(size_t N) const { return (char*)((char*)this+6+6*N); }
- __forceinline short* bounds_vx_lower0(size_t N) { return (short*)((int8_t*)this+6+7*N); }
- __forceinline const short* bounds_vx_lower0(size_t N) const { return (short*)((int8_t*)this+6+7*N); }
+ __forceinline short* bounds_vx_lower0(size_t N) { return (short*)((char*)this+6+7*N); }
+ __forceinline const short* bounds_vx_lower0(size_t N) const { return (short*)((char*)this+6+7*N); }
- __forceinline short* bounds_vx_upper0(size_t N) { return (short*)((int8_t*)this+6+9*N); }
- __forceinline const short* bounds_vx_upper0(size_t N) const { return (short*)((int8_t*)this+6+9*N); }
+ __forceinline short* bounds_vx_upper0(size_t N) { return (short*)((char*)this+6+9*N); }
+ __forceinline const short* bounds_vx_upper0(size_t N) const { return (short*)((char*)this+6+9*N); }
- __forceinline short* bounds_vx_lower1(size_t N) { return (short*)((int8_t*)this+6+11*N); }
- __forceinline const short* bounds_vx_lower1(size_t N) const { return (short*)((int8_t*)this+6+11*N); }
+ __forceinline short* bounds_vx_lower1(size_t N) { return (short*)((char*)this+6+11*N); }
+ __forceinline const short* bounds_vx_lower1(size_t N) const { return (short*)((char*)this+6+11*N); }
- __forceinline short* bounds_vx_upper1(size_t N) { return (short*)((int8_t*)this+6+13*N); }
- __forceinline const short* bounds_vx_upper1(size_t N) const { return (short*)((int8_t*)this+6+13*N); }
+ __forceinline short* bounds_vx_upper1(size_t N) { return (short*)((char*)this+6+13*N); }
+ __forceinline const short* bounds_vx_upper1(size_t N) const { return (short*)((char*)this+6+13*N); }
- __forceinline int8_t* bounds_vy_x(size_t N) { return (int8_t*)((int8_t*)this+6+15*N); }
- __forceinline const int8_t* bounds_vy_x(size_t N) const { return (int8_t*)((int8_t*)this+6+15*N); }
+ __forceinline char* bounds_vy_x(size_t N) { return (char*)((char*)this+6+15*N); }
+ __forceinline const char* bounds_vy_x(size_t N) const { return (char*)((char*)this+6+15*N); }
- __forceinline int8_t* bounds_vy_y(size_t N) { return (int8_t*)((int8_t*)this+6+16*N); }
- __forceinline const int8_t* bounds_vy_y(size_t N) const { return (int8_t*)((int8_t*)this+6+16*N); }
+ __forceinline char* bounds_vy_y(size_t N) { return (char*)((char*)this+6+16*N); }
+ __forceinline const char* bounds_vy_y(size_t N) const { return (char*)((char*)this+6+16*N); }
- __forceinline int8_t* bounds_vy_z(size_t N) { return (int8_t*)((int8_t*)this+6+17*N); }
- __forceinline const int8_t* bounds_vy_z(size_t N) const { return (int8_t*)((int8_t*)this+6+17*N); }
+ __forceinline char* bounds_vy_z(size_t N) { return (char*)((char*)this+6+17*N); }
+ __forceinline const char* bounds_vy_z(size_t N) const { return (char*)((char*)this+6+17*N); }
- __forceinline short* bounds_vy_lower0(size_t N) { return (short*)((int8_t*)this+6+18*N); }
- __forceinline const short* bounds_vy_lower0(size_t N) const { return (short*)((int8_t*)this+6+18*N); }
+ __forceinline short* bounds_vy_lower0(size_t N) { return (short*)((char*)this+6+18*N); }
+ __forceinline const short* bounds_vy_lower0(size_t N) const { return (short*)((char*)this+6+18*N); }
- __forceinline short* bounds_vy_upper0(size_t N) { return (short*)((int8_t*)this+6+20*N); }
- __forceinline const short* bounds_vy_upper0(size_t N) const { return (short*)((int8_t*)this+6+20*N); }
+ __forceinline short* bounds_vy_upper0(size_t N) { return (short*)((char*)this+6+20*N); }
+ __forceinline const short* bounds_vy_upper0(size_t N) const { return (short*)((char*)this+6+20*N); }
- __forceinline short* bounds_vy_lower1(size_t N) { return (short*)((int8_t*)this+6+22*N); }
- __forceinline const short* bounds_vy_lower1(size_t N) const { return (short*)((int8_t*)this+6+22*N); }
+ __forceinline short* bounds_vy_lower1(size_t N) { return (short*)((char*)this+6+22*N); }
+ __forceinline const short* bounds_vy_lower1(size_t N) const { return (short*)((char*)this+6+22*N); }
- __forceinline short* bounds_vy_upper1(size_t N) { return (short*)((int8_t*)this+6+24*N); }
- __forceinline const short* bounds_vy_upper1(size_t N) const { return (short*)((int8_t*)this+6+24*N); }
+ __forceinline short* bounds_vy_upper1(size_t N) { return (short*)((char*)this+6+24*N); }
+ __forceinline const short* bounds_vy_upper1(size_t N) const { return (short*)((char*)this+6+24*N); }
- __forceinline int8_t* bounds_vz_x(size_t N) { return (int8_t*)((int8_t*)this+6+26*N); }
- __forceinline const int8_t* bounds_vz_x(size_t N) const { return (int8_t*)((int8_t*)this+6+26*N); }
+ __forceinline char* bounds_vz_x(size_t N) { return (char*)((char*)this+6+26*N); }
+ __forceinline const char* bounds_vz_x(size_t N) const { return (char*)((char*)this+6+26*N); }
- __forceinline int8_t* bounds_vz_y(size_t N) { return (int8_t*)((int8_t*)this+6+27*N); }
- __forceinline const int8_t* bounds_vz_y(size_t N) const { return (int8_t*)((int8_t*)this+6+27*N); }
+ __forceinline char* bounds_vz_y(size_t N) { return (char*)((char*)this+6+27*N); }
+ __forceinline const char* bounds_vz_y(size_t N) const { return (char*)((char*)this+6+27*N); }
- __forceinline int8_t* bounds_vz_z(size_t N) { return (int8_t*)((int8_t*)this+6+28*N); }
- __forceinline const int8_t* bounds_vz_z(size_t N) const { return (int8_t*)((int8_t*)this+6+28*N); }
+ __forceinline char* bounds_vz_z(size_t N) { return (char*)((char*)this+6+28*N); }
+ __forceinline const char* bounds_vz_z(size_t N) const { return (char*)((char*)this+6+28*N); }
- __forceinline short* bounds_vz_lower0(size_t N) { return (short*)((int8_t*)this+6+29*N); }
- __forceinline const short* bounds_vz_lower0(size_t N) const { return (short*)((int8_t*)this+6+29*N); }
+ __forceinline short* bounds_vz_lower0(size_t N) { return (short*)((char*)this+6+29*N); }
+ __forceinline const short* bounds_vz_lower0(size_t N) const { return (short*)((char*)this+6+29*N); }
- __forceinline short* bounds_vz_upper0(size_t N) { return (short*)((int8_t*)this+6+31*N); }
- __forceinline const short* bounds_vz_upper0(size_t N) const { return (short*)((int8_t*)this+6+31*N); }
+ __forceinline short* bounds_vz_upper0(size_t N) { return (short*)((char*)this+6+31*N); }
+ __forceinline const short* bounds_vz_upper0(size_t N) const { return (short*)((char*)this+6+31*N); }
- __forceinline short* bounds_vz_lower1(size_t N) { return (short*)((int8_t*)this+6+33*N); }
- __forceinline const short* bounds_vz_lower1(size_t N) const { return (short*)((int8_t*)this+6+33*N); }
+ __forceinline short* bounds_vz_lower1(size_t N) { return (short*)((char*)this+6+33*N); }
+ __forceinline const short* bounds_vz_lower1(size_t N) const { return (short*)((char*)this+6+33*N); }
- __forceinline short* bounds_vz_upper1(size_t N) { return (short*)((int8_t*)this+6+35*N); }
- __forceinline const short* bounds_vz_upper1(size_t N) const { return (short*)((int8_t*)this+6+35*N); }
+ __forceinline short* bounds_vz_upper1(size_t N) { return (short*)((char*)this+6+35*N); }
+ __forceinline const short* bounds_vz_upper1(size_t N) const { return (short*)((char*)this+6+35*N); }
- __forceinline Vec3f* offset(size_t N) { return (Vec3f*)((int8_t*)this+6+37*N); }
- __forceinline const Vec3f* offset(size_t N) const { return (Vec3f*)((int8_t*)this+6+37*N); }
+ __forceinline Vec3f* offset(size_t N) { return (Vec3f*)((char*)this+6+37*N); }
+ __forceinline const Vec3f* offset(size_t N) const { return (Vec3f*)((char*)this+6+37*N); }
- __forceinline float* scale(size_t N) { return (float*)((int8_t*)this+6+37*N+12); }
- __forceinline const float* scale(size_t N) const { return (float*)((int8_t*)this+6+37*N+12); }
+ __forceinline float* scale(size_t N) { return (float*)((char*)this+6+37*N+12); }
+ __forceinline const float* scale(size_t N) const { return (float*)((char*)this+6+37*N+12); }
- __forceinline float& time_offset(size_t N) { return *(float*)((int8_t*)this+6+37*N+16); }
- __forceinline const float& time_offset(size_t N) const { return *(float*)((int8_t*)this+6+37*N+16); }
+ __forceinline float& time_offset(size_t N) { return *(float*)((char*)this+6+37*N+16); }
+ __forceinline const float& time_offset(size_t N) const { return *(float*)((char*)this+6+37*N+16); }
- __forceinline float& time_scale(size_t N) { return *(float*)((int8_t*)this+6+37*N+20); }
- __forceinline const float& time_scale(size_t N) const { return *(float*)((int8_t*)this+6+37*N+20); }
+ __forceinline float& time_scale(size_t N) { return *(float*)((char*)this+6+37*N+20); }
+ __forceinline const float& time_scale(size_t N) const { return *(float*)((char*)this+6+37*N+20); }
- __forceinline int8_t* end(size_t N) { return (int8_t*)this+6+37*N+24; }
- __forceinline const int8_t* end(size_t N) const { return (int8_t*)this+6+37*N+24; }
+ __forceinline char* end(size_t N) { return (char*)this+6+37*N+24; }
+ __forceinline const char* end(size_t N) const { return (char*)this+6+37*N+24; }
};
template<int M>
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curveNi_mb_intersector.h b/thirdparty/embree/kernels/geometry/curveNi_mb_intersector.h
index 0cbc764668..bab796b33b 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curveNi_mb_intersector.h
+++ b/thirdparty/embree/kernels/geometry/curveNi_mb_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curveNv.h b/thirdparty/embree/kernels/geometry/curveNv.h
index 6eb5e30b39..e41a381706 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curveNv.h
+++ b/thirdparty/embree/kernels/geometry/curveNv.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curveNv_intersector.h b/thirdparty/embree/kernels/geometry/curveNv_intersector.h
index e20da2882e..2742725aec 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curveNv_intersector.h
+++ b/thirdparty/embree/kernels/geometry/curveNv_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector.h b/thirdparty/embree/kernels/geometry/curve_intersector.h
index 204958f7cc..1e8ac26125 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector.h
+++ b/thirdparty/embree/kernels/geometry/curve_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -23,8 +23,8 @@ namespace embree
typedef unsigned char Primitive;
typedef CurvePrecalculations1 Precalculations;
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
assert(num == 1);
RTCGeometryType ty = (RTCGeometryType)(*prim);
@@ -33,8 +33,8 @@ namespace embree
leafIntersector.intersect<1>(&pre,&ray,context,prim);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
assert(num == 1);
RTCGeometryType ty = (RTCGeometryType)(*prim);
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_distance.h b/thirdparty/embree/kernels/geometry/curve_intersector_distance.h
index 343cc8ff28..748a9511a5 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_distance.h
+++ b/thirdparty/embree/kernels/geometry/curve_intersector_distance.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_oriented.h b/thirdparty/embree/kernels/geometry/curve_intersector_oriented.h
index 47531027fc..3d8900c2aa 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_oriented.h
+++ b/thirdparty/embree/kernels/geometry/curve_intersector_oriented.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_precalculations.h b/thirdparty/embree/kernels/geometry/curve_intersector_precalculations.h
index 6e9fc91925..de6b70be1b 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_precalculations.h
+++ b/thirdparty/embree/kernels/geometry/curve_intersector_precalculations.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_ribbon.h b/thirdparty/embree/kernels/geometry/curve_intersector_ribbon.h
index a99cf99d56..c3272e99fd 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_ribbon.h
+++ b/thirdparty/embree/kernels/geometry/curve_intersector_ribbon.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -32,9 +32,11 @@ namespace embree
__forceinline Vec2f uv (const size_t i) const { return Vec2f(vu[i],vv[i]); }
__forceinline float t (const size_t i) const { return vt[i]; }
- __forceinline Vec3fa Ng(const size_t i) const {
- return curve3D.eval_du(vu[i]);
- }
+ __forceinline Vec3fa Ng(const size_t i) const { return curve3D.eval_du(vu[i]); }
+
+ __forceinline Vec2vf<M> uv() const { return Vec2vf<M>(vu,vv); }
+ __forceinline vfloat<M> t () const { return vt; }
+ __forceinline Vec3vf<M> Ng() const { return (Vec3vf<M>) curve3D.template veval_du<M>(vu); }
public:
vfloat<M> U;
@@ -98,7 +100,7 @@ namespace embree
const Vec3vfx up1 = nmadd(p1.w,nn1,Vec3vfx(p1));
vfloatx vu,vv,vt;
- vboolx valid0 = intersect_quad_backface_culling(valid,zero,Vec3fa(0,0,1),ray_tnear,ray_tfar,lp0,lp1,up1,up0,vu,vv,vt);
+ vboolx valid0 = intersect_quad_backface_culling<VSIZEX>(valid,zero,Vec3fa(0,0,1),ray_tnear,ray_tfar,lp0,lp1,up1,up0,vu,vv,vt);
if (any(valid0))
{
@@ -143,7 +145,7 @@ namespace embree
const Vec3vfx up1 = nmadd(p1.w,nn1,Vec3vfx(p1));
vfloatx vu,vv,vt;
- vboolx valid0 = intersect_quad_backface_culling(valid,zero,Vec3fa(0,0,1),ray_tnear,ray_tfar,lp0,lp1,up1,up0,vu,vv,vt);
+ vboolx valid0 = intersect_quad_backface_culling<VSIZEX>(valid,zero,Vec3fa(0,0,1),ray_tnear,ray_tfar,lp0,lp1,up1,up0,vu,vv,vt);
if (any(valid0))
{
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_sweep.h b/thirdparty/embree/kernels/geometry/curve_intersector_sweep.h
index 883cedc3d2..2d4abd73ac 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_sweep.h
+++ b/thirdparty/embree/kernels/geometry/curve_intersector_sweep.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -137,10 +137,12 @@ namespace embree
float u0, float u1, unsigned int depth, const Epilog& epilog)
{
#if defined(__AVX__)
+ enum { VSIZEX_ = 8 };
typedef vbool8 vboolx; // maximally 8-wide to work around KNL issues
typedef vint8 vintx;
typedef vfloat8 vfloatx;
#else
+ enum { VSIZEX_ = 4 };
typedef vbool4 vboolx;
typedef vint4 vintx;
typedef vfloat4 vfloatx;
@@ -192,7 +194,7 @@ namespace embree
/* subdivide curve */
const float dscale = (u1-u0)*(1.0f/(3.0f*(vfloatx::size-1)));
const vfloatx vu0 = lerp(u0,u1,vfloatx(step)*(1.0f/(vfloatx::size-1)));
- Vec4vfx P0, dP0du; curve.veval(vu0,P0,dP0du); dP0du = dP0du * Vec4vfx(dscale);
+ Vec4vfx P0, dP0du; curve.template veval<VSIZEX_>(vu0,P0,dP0du); dP0du = dP0du * Vec4vfx(dscale);
const Vec4vfx P3 = shift_right_1(P0);
const Vec4vfx dP3du = shift_right_1(dP0du);
const Vec4vfx P1 = P0 + dP0du;
diff --git a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual.h b/thirdparty/embree/kernels/geometry/curve_intersector_virtual.h
index e1f4238130..cffa8e46ad 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/curve_intersector_virtual.h
+++ b/thirdparty/embree/kernels/geometry/curve_intersector_virtual.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -97,8 +97,8 @@ namespace embree
typedef unsigned char Primitive;
typedef CurvePrecalculations1 Precalculations;
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
assert(num == 1);
RTCGeometryType ty = (RTCGeometryType)(*prim);
@@ -107,8 +107,8 @@ namespace embree
leafIntersector.intersect<1>(&pre,&ray,context,prim);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
assert(num == 1);
RTCGeometryType ty = (RTCGeometryType)(*prim);
@@ -152,8 +152,8 @@ namespace embree
return valid_o;
}
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
assert(num == 1);
RTCGeometryType ty = (RTCGeometryType)(*prim);
@@ -162,8 +162,8 @@ namespace embree
leafIntersector.intersect<K>(&pre,&ray,k,context,prim);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
assert(num == 1);
RTCGeometryType ty = (RTCGeometryType)(*prim);
@@ -177,17 +177,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors LinearRoundConeNiIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &RoundLinearCurveMiIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &RoundLinearCurveMiIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &RoundLinearCurveMiIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &RoundLinearCurveMiIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &RoundLinearCurveMiIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &RoundLinearCurveMiIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &RoundLinearCurveMiIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &RoundLinearCurveMiIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&RoundLinearCurveMiIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &RoundLinearCurveMiIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&RoundLinearCurveMiIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &RoundLinearCurveMiIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&RoundLinearCurveMiIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &RoundLinearCurveMiIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&RoundLinearCurveMiIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &RoundLinearCurveMiIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -196,17 +196,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors LinearConeNiIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &ConeCurveMiIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &ConeCurveMiIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &ConeCurveMiIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &ConeCurveMiIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &ConeCurveMiIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &ConeCurveMiIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &ConeCurveMiIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &ConeCurveMiIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&ConeCurveMiIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &ConeCurveMiIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&ConeCurveMiIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &ConeCurveMiIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&ConeCurveMiIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &ConeCurveMiIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&ConeCurveMiIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &ConeCurveMiIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -215,17 +215,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors LinearRoundConeNiMBIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &RoundLinearCurveMiMBIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &RoundLinearCurveMiMBIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &RoundLinearCurveMiMBIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &RoundLinearCurveMiMBIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &RoundLinearCurveMiMBIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &RoundLinearCurveMiMBIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &RoundLinearCurveMiMBIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &RoundLinearCurveMiMBIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&RoundLinearCurveMiMBIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &RoundLinearCurveMiMBIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&RoundLinearCurveMiMBIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &RoundLinearCurveMiMBIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&RoundLinearCurveMiMBIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &RoundLinearCurveMiMBIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&RoundLinearCurveMiMBIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &RoundLinearCurveMiMBIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -234,17 +234,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors LinearConeNiMBIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &ConeCurveMiMBIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &ConeCurveMiMBIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &ConeCurveMiMBIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &ConeCurveMiMBIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &ConeCurveMiMBIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &ConeCurveMiMBIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &ConeCurveMiMBIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &ConeCurveMiMBIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&ConeCurveMiMBIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &ConeCurveMiMBIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&ConeCurveMiMBIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &ConeCurveMiMBIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&ConeCurveMiMBIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &ConeCurveMiMBIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&ConeCurveMiMBIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &ConeCurveMiMBIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -254,17 +254,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors LinearRibbonNiIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &FlatLinearCurveMiIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &FlatLinearCurveMiIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &FlatLinearCurveMiIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &FlatLinearCurveMiIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &FlatLinearCurveMiIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &FlatLinearCurveMiIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &FlatLinearCurveMiIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &FlatLinearCurveMiIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&FlatLinearCurveMiIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &FlatLinearCurveMiIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&FlatLinearCurveMiIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &FlatLinearCurveMiIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&FlatLinearCurveMiIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &FlatLinearCurveMiIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&FlatLinearCurveMiIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &FlatLinearCurveMiIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -273,17 +273,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors LinearRibbonNiMBIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &FlatLinearCurveMiMBIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &FlatLinearCurveMiMBIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &FlatLinearCurveMiMBIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &FlatLinearCurveMiMBIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &FlatLinearCurveMiMBIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &FlatLinearCurveMiMBIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &FlatLinearCurveMiMBIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &FlatLinearCurveMiMBIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&FlatLinearCurveMiMBIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &FlatLinearCurveMiMBIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&FlatLinearCurveMiMBIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &FlatLinearCurveMiMBIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&FlatLinearCurveMiMBIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &FlatLinearCurveMiMBIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&FlatLinearCurveMiMBIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &FlatLinearCurveMiMBIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -292,17 +292,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors SphereNiIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &SphereMiIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &SphereMiIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &SphereMiIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &SphereMiIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &SphereMiIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &SphereMiIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &SphereMiIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &SphereMiIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&SphereMiIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &SphereMiIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&SphereMiIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &SphereMiIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&SphereMiIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &SphereMiIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&SphereMiIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &SphereMiIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -311,17 +311,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors SphereNiMBIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &SphereMiMBIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &SphereMiMBIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &SphereMiMBIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &SphereMiMBIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &SphereMiMBIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &SphereMiMBIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &SphereMiMBIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &SphereMiMBIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&SphereMiMBIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &SphereMiMBIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&SphereMiMBIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &SphereMiMBIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&SphereMiMBIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &SphereMiMBIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&SphereMiMBIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &SphereMiMBIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -330,17 +330,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors DiscNiIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &DiscMiIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &DiscMiIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &DiscMiIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &DiscMiIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &DiscMiIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &DiscMiIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &DiscMiIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &DiscMiIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&DiscMiIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &DiscMiIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&DiscMiIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &DiscMiIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&DiscMiIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &DiscMiIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&DiscMiIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &DiscMiIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -349,17 +349,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors DiscNiMBIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &DiscMiMBIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &DiscMiMBIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &DiscMiMBIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &DiscMiMBIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &DiscMiMBIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &DiscMiMBIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &DiscMiMBIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &DiscMiMBIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&DiscMiMBIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &DiscMiMBIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&DiscMiMBIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &DiscMiMBIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&DiscMiMBIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &DiscMiMBIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&DiscMiMBIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &DiscMiMBIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -368,17 +368,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors OrientedDiscNiIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &OrientedDiscMiIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &OrientedDiscMiIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &OrientedDiscMiIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &OrientedDiscMiIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &OrientedDiscMiIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &OrientedDiscMiIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &OrientedDiscMiIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &OrientedDiscMiIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&OrientedDiscMiIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &OrientedDiscMiIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&OrientedDiscMiIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &OrientedDiscMiIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&OrientedDiscMiIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &OrientedDiscMiIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&OrientedDiscMiIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &OrientedDiscMiIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
@@ -387,17 +387,17 @@ namespace embree
static VirtualCurveIntersector::Intersectors OrientedDiscNiMBIntersectors()
{
VirtualCurveIntersector::Intersectors intersectors;
- intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &OrientedDiscMiMBIntersector1<N,N,true>::intersect;
- intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &OrientedDiscMiMBIntersector1<N,N,true>::occluded;
- intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &OrientedDiscMiMBIntersectorK<N,N,4,true>::intersect;
- intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &OrientedDiscMiMBIntersectorK<N,N,4,true>::occluded;
+ intersectors.intersect1 = (VirtualCurveIntersector::Intersect1Ty) &OrientedDiscMiMBIntersector1<N,true>::intersect;
+ intersectors.occluded1 = (VirtualCurveIntersector::Occluded1Ty) &OrientedDiscMiMBIntersector1<N,true>::occluded;
+ intersectors.intersect4 = (VirtualCurveIntersector::Intersect4Ty) &OrientedDiscMiMBIntersectorK<N,4,true>::intersect;
+ intersectors.occluded4 = (VirtualCurveIntersector::Occluded4Ty) &OrientedDiscMiMBIntersectorK<N,4,true>::occluded;
#if defined(__AVX__)
- intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&OrientedDiscMiMBIntersectorK<N,N,8,true>::intersect;
- intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &OrientedDiscMiMBIntersectorK<N,N,8,true>::occluded;
+ intersectors.intersect8 = (VirtualCurveIntersector::Intersect8Ty)&OrientedDiscMiMBIntersectorK<N,8,true>::intersect;
+ intersectors.occluded8 = (VirtualCurveIntersector::Occluded8Ty) &OrientedDiscMiMBIntersectorK<N,8,true>::occluded;
#endif
#if defined(__AVX512F__)
- intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&OrientedDiscMiMBIntersectorK<N,N,16,true>::intersect;
- intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &OrientedDiscMiMBIntersectorK<N,N,16,true>::occluded;
+ intersectors.intersect16 = (VirtualCurveIntersector::Intersect16Ty)&OrientedDiscMiMBIntersectorK<N,16,true>::intersect;
+ intersectors.occluded16 = (VirtualCurveIntersector::Occluded16Ty) &OrientedDiscMiMBIntersectorK<N,16,true>::occluded;
#endif
return intersectors;
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/cylinder.h b/thirdparty/embree/kernels/geometry/cylinder.h
index 39a582864c..dab02989ce 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/cylinder.h
+++ b/thirdparty/embree/kernels/geometry/cylinder.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/disc_intersector.h b/thirdparty/embree/kernels/geometry/disc_intersector.h
index e8305780e5..816c066899 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/disc_intersector.h
+++ b/thirdparty/embree/kernels/geometry/disc_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -64,7 +64,7 @@ namespace embree
const Vec3vf<M> ray_dir(ray.dir.x, ray.dir.y, ray.dir.z);
const vfloat<M> rd2 = rcp(dot(ray_dir, ray_dir));
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
const Vec3vf<M> center = v0.xyz();
const vfloat<M> radius = v0.w;
@@ -101,7 +101,7 @@ namespace embree
vbool<M> valid = valid_i;
const Vec3vf<M> ray_org(ray.org.x, ray.org.y, ray.org.z);
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
const Vec3vf<M> center = v0.xyz();
const vfloat<M> radius = v0.w;
@@ -148,7 +148,7 @@ namespace embree
const Vec3vf<M> ray_dir(ray.dir.x[k], ray.dir.y[k], ray.dir.z[k]);
const vfloat<M> rd2 = rcp(dot(ray_dir, ray_dir));
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
const Vec3vf<M> center = v0.xyz();
const vfloat<M> radius = v0.w;
@@ -187,7 +187,7 @@ namespace embree
const Vec3vf<M> ray_org(ray.org.x[k], ray.org.y[k], ray.org.z[k]);
const Vec3vf<M> ray_dir(ray.dir.x[k], ray.dir.y[k], ray.dir.z[k]);
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
const Vec3vf<M> center = v0.xyz();
const vfloat<M> radius = v0.w;
diff --git a/thirdparty/embree-aarch64/kernels/geometry/disci_intersector.h b/thirdparty/embree/kernels/geometry/disci_intersector.h
index e1dc3aa98e..bb9d396f6e 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/disci_intersector.h
+++ b/thirdparty/embree/kernels/geometry/disci_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -11,7 +11,7 @@ namespace embree
{
namespace isa
{
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct DiscMiIntersector1
{
typedef PointMi<M> Primitive;
@@ -25,9 +25,9 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(const Precalculations& pre,
@@ -38,13 +38,13 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct DiscMiMBIntersector1
{
typedef PointMi<M> Primitive;
@@ -58,9 +58,9 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom, ray.time());
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(const Precalculations& pre,
@@ -71,13 +71,13 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom, ray.time());
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct DiscMiIntersectorK
{
typedef PointMi<M> Primitive;
@@ -89,10 +89,10 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = Disc.valid();
+ DiscIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0,
- Intersect1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ Intersect1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(
@@ -101,14 +101,14 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0,
- Occluded1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ Occluded1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct DiscMiMBIntersectorK
{
typedef PointMi<M> Primitive;
@@ -120,10 +120,10 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom, ray.time()[k]);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = Disc.valid();
+ DiscIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0,
- Intersect1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ Intersect1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(
@@ -132,13 +132,13 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Disc.gather(v0, geom, ray.time()[k]);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersectorK<Mx, K>::intersect(
- valid, ray, k, context, geom, pre, v0, Occluded1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersectorK<M, K>::intersect(
+ valid, ray, k, context, geom, pre, v0, Occluded1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct OrientedDiscMiIntersector1
{
typedef PointMi<M> Primitive;
@@ -153,9 +153,9 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, n0, Intersect1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, n0, Intersect1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(const Precalculations& pre,
@@ -167,13 +167,13 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, n0, Occluded1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, n0, Occluded1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct OrientedDiscMiMBIntersector1
{
typedef PointMi<M> Primitive;
@@ -188,9 +188,9 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom, ray.time());
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, n0, Intersect1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, n0, Intersect1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(const Precalculations& pre,
@@ -202,13 +202,13 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom, ray.time());
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, n0, Occluded1EpilogM<M, Mx, filter>(ray, context, Disc.geomID(), Disc.primID()));
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, n0, Occluded1EpilogM<M, filter>(ray, context, Disc.geomID(), Disc.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct OrientedDiscMiIntersectorK
{
typedef PointMi<M> Primitive;
@@ -221,10 +221,10 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = Disc.valid();
+ DiscIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0, n0,
- Intersect1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ Intersect1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(
@@ -234,14 +234,14 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0, n0,
- Occluded1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ Occluded1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct OrientedDiscMiMBIntersectorK
{
typedef PointMi<M> Primitive;
@@ -254,10 +254,10 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom, ray.time()[k]);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- DiscIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = Disc.valid();
+ DiscIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0, n0,
- Intersect1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ Intersect1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
static __forceinline bool occluded(
@@ -267,10 +267,10 @@ namespace embree
const Points* geom = context->scene->get<Points>(Disc.geomID());
Vec4vf<M> v0; Vec3vf<M> n0;
Disc.gather(v0, n0, geom, ray.time()[k]);
- const vbool<Mx> valid = Disc.template valid<Mx>();
- return DiscIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = Disc.valid();
+ return DiscIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0, n0,
- Occluded1KEpilogM<M, Mx, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
+ Occluded1KEpilogM<M, K, filter>(ray, k, context, Disc.geomID(), Disc.primID()));
}
};
} // namespace isa
diff --git a/thirdparty/embree-aarch64/kernels/geometry/filter.h b/thirdparty/embree/kernels/geometry/filter.h
index 4cdf7a395a..3b4d924ea7 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/filter.h
+++ b/thirdparty/embree/kernels/geometry/filter.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/grid_intersector.h b/thirdparty/embree/kernels/geometry/grid_intersector.h
index 46a0af0827..9c59cef119 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/grid_intersector.h
+++ b/thirdparty/embree/kernels/geometry/grid_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/grid_soa.h b/thirdparty/embree/kernels/geometry/grid_soa.h
index d3b275586c..cea90aedf6 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/grid_soa.h
+++ b/thirdparty/embree/kernels/geometry/grid_soa.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -41,7 +41,7 @@ namespace embree
}
const size_t gridBytes = 4*size_t(width)*size_t(height)*sizeof(float);
size_t rootBytes = time_steps*sizeof(BVH4::NodeRef);
-#if !defined(__X86_64__) && !defined(__aarch64__)
+#if !defined(__64BIT__)
rootBytes += 4; // We read 2 elements behind the grid. As we store at least 8 root bytes after the grid we are fine in 64 bit mode. But in 32 bit mode we have to do additional padding.
#endif
void* data = alloc(offsetof(GridSOA,data)+bvhBytes+time_steps*gridBytes+rootBytes);
@@ -62,8 +62,8 @@ namespace embree
__forceinline const BVH4::NodeRef& root(size_t t = 0) const { return (BVH4::NodeRef&)data[rootOffset + t*sizeof(BVH4::NodeRef)]; }
/*! returns pointer to BVH array */
- __forceinline int8_t* bvhData() { return &data[0]; }
- __forceinline const int8_t* bvhData() const { return &data[0]; }
+ __forceinline char* bvhData() { return &data[0]; }
+ __forceinline const char* bvhData() const { return &data[0]; }
/*! returns pointer to Grid array */
__forceinline float* gridData(size_t t = 0) { return (float*) &data[gridOffset + t*gridBytes]; }
@@ -132,7 +132,7 @@ namespace embree
__forceinline MapUV(const float* const grid_uv, size_t line_offset, const size_t lines)
: grid_uv(grid_uv), line_offset(line_offset), lines(lines) {}
- __forceinline void operator() (vfloat& u, vfloat& v) const {
+ __forceinline void operator() (vfloat& u, vfloat& v, Vec3<vfloat>& Ng) const {
const Vec3<vfloat> tri_v012_uv = Loader::gather(grid_uv,line_offset,lines);
const Vec2<vfloat> uv0 = GridSOA::decodeUV(tri_v012_uv[0]);
const Vec2<vfloat> uv1 = GridSOA::decodeUV(tri_v012_uv[1]);
@@ -253,7 +253,7 @@ namespace embree
public:
BVH4::NodeRef troot;
-#if !defined(__X86_64__) && !defined(__aarch64__)
+#if !defined(__64BIT__)
unsigned align1;
#endif
unsigned time_steps;
@@ -269,7 +269,7 @@ namespace embree
unsigned gridBytes;
unsigned rootOffset;
- int8_t data[1]; //!< after the struct we first store the BVH, then the grid, and finally the roots
+ char data[1]; //!< after the struct we first store the BVH, then the grid, and finally the roots
};
}
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/grid_soa_intersector1.h b/thirdparty/embree/kernels/geometry/grid_soa_intersector1.h
index 2ed922a5ae..8fbf0d4bdf 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/grid_soa_intersector1.h
+++ b/thirdparty/embree/kernels/geometry/grid_soa_intersector1.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/grid_soa_intersector_packet.h b/thirdparty/embree/kernels/geometry/grid_soa_intersector_packet.h
index 41d66e1e28..14cacab5fe 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/grid_soa_intersector_packet.h
+++ b/thirdparty/embree/kernels/geometry/grid_soa_intersector_packet.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -20,7 +20,7 @@ namespace embree
__forceinline MapUV0(const float* const grid_uv, size_t ofs00, size_t ofs01, size_t ofs10, size_t ofs11)
: grid_uv(grid_uv), ofs00(ofs00), ofs01(ofs01), ofs10(ofs10), ofs11(ofs11) {}
- __forceinline void operator() (vfloat<K>& u, vfloat<K>& v) const {
+ __forceinline void operator() (vfloat<K>& u, vfloat<K>& v, Vec3vf<K>& Ng) const {
const vfloat<K> uv00(grid_uv[ofs00]);
const vfloat<K> uv01(grid_uv[ofs01]);
const vfloat<K> uv10(grid_uv[ofs10]);
@@ -42,7 +42,7 @@ namespace embree
__forceinline MapUV1(const float* const grid_uv, size_t ofs00, size_t ofs01, size_t ofs10, size_t ofs11)
: grid_uv(grid_uv), ofs00(ofs00), ofs01(ofs01), ofs10(ofs10), ofs11(ofs11) {}
- __forceinline void operator() (vfloat<K>& u, vfloat<K>& v) const {
+ __forceinline void operator() (vfloat<K>& u, vfloat<K>& v, Vec3vf<K>& Ng) const {
const vfloat<K> uv00(grid_uv[ofs00]);
const vfloat<K> uv01(grid_uv[ofs01]);
const vfloat<K> uv10(grid_uv[ofs10]);
@@ -222,7 +222,7 @@ namespace embree
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const Primitive* prim, size_t& lazy_node)
{
vfloat<K> vftime;
- vint<K> vitime = getTimeSegment(ray.time(), vfloat<K>((float)(pre.grid->time_steps-1)), vftime);
+ vint<K> vitime = getTimeSegment<K>(ray.time(), vfloat<K>((float)(pre.grid->time_steps-1)), vftime);
vbool<K> valid1 = valid_i;
while (any(valid1)) {
@@ -282,7 +282,7 @@ namespace embree
static __forceinline vbool<K> occluded(const vbool<K>& valid_i, Precalculations& pre, RayK<K>& ray, IntersectContext* context, const Primitive* prim, size_t& lazy_node)
{
vfloat<K> vftime;
- vint<K> vitime = getTimeSegment(ray.time(), vfloat<K>((float)(pre.grid->time_steps-1)), vftime);
+ vint<K> vitime = getTimeSegment<K>(ray.time(), vfloat<K>((float)(pre.grid->time_steps-1)), vftime);
vbool<K> valid_o = valid_i;
vbool<K> valid1 = valid_i;
diff --git a/thirdparty/embree-aarch64/kernels/geometry/instance.h b/thirdparty/embree/kernels/geometry/instance.h
index 66893d581f..7c0e7e0f49 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/instance.h
+++ b/thirdparty/embree/kernels/geometry/instance.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/instance_intersector.h b/thirdparty/embree/kernels/geometry/instance_intersector.h
index 91731a39c5..28a7b728e5 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/instance_intersector.h
+++ b/thirdparty/embree/kernels/geometry/instance_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/intersector_epilog.h b/thirdparty/embree/kernels/geometry/intersector_epilog.h
index 0df49dd6e9..7bf134cc54 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/intersector_epilog.h
+++ b/thirdparty/embree/kernels/geometry/intersector_epilog.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -13,7 +13,7 @@ namespace embree
{
template<int M>
struct UVIdentity {
- __forceinline void operator() (vfloat<M>& u, vfloat<M>& v) const {}
+ __forceinline void operator() (vfloat<M>& u, vfloat<M>& v, Vec3vf<M>& Ng) const {}
};
@@ -63,7 +63,7 @@ namespace embree
ray.v = hit.v;
ray.primID = primID;
ray.geomID = geomID;
- instance_id_stack::copy(context->user->instID, ray.instID);
+ instance_id_stack::copy_UU(context->user->instID, ray.instID);
return true;
}
};
@@ -162,7 +162,7 @@ namespace embree
ray.v[k] = hit.v;
ray.primID[k] = primID;
ray.geomID[k] = geomID;
- instance_id_stack::copy<const unsigned*, vuint<K>*, const size_t&>(context->user->instID, ray.instID, k);
+ instance_id_stack::copy_UV<K>(context->user->instID, ray.instID, k);
return true;
}
};
@@ -211,7 +211,7 @@ namespace embree
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct Intersect1EpilogM
{
RayHit& ray;
@@ -226,11 +226,10 @@ namespace embree
: ray(ray), context(context), geomIDs(geomIDs), primIDs(primIDs) {}
template<typename Hit>
- __forceinline bool operator() (const vbool<Mx>& valid_i, Hit& hit) const
+ __forceinline bool operator() (const vbool<M>& valid_i, Hit& hit) const
{
Scene* scene MAYBE_UNUSED = context->scene;
- vbool<Mx> valid = valid_i;
- if (Mx > M) valid &= (1<<M)-1;
+ vbool<M> valid = valid_i;
hit.finalize();
size_t i = select_min(valid,hit.vt);
unsigned int geomID = geomIDs[i];
@@ -287,94 +286,13 @@ namespace embree
ray.v = uv.y;
ray.primID = primIDs[i];
ray.geomID = geomID;
- instance_id_stack::copy(context->user->instID, ray.instID);
+ instance_id_stack::copy_UU(context->user->instID, ray.instID);
return true;
}
};
-#if 0 && defined(__AVX512F__) // do not enable, this reduced frequency for BVH4
template<int M, bool filter>
- struct Intersect1EpilogM<M,16,filter>
- {
- static const size_t Mx = 16;
- RayHit& ray;
- IntersectContext* context;
- const vuint<M>& geomIDs;
- const vuint<M>& primIDs;
-
- __forceinline Intersect1EpilogM(RayHit& ray,
- IntersectContext* context,
- const vuint<M>& geomIDs,
- const vuint<M>& primIDs)
- : ray(ray), context(context), geomIDs(geomIDs), primIDs(primIDs) {}
-
- template<typename Hit>
- __forceinline bool operator() (const vbool<Mx>& valid_i, Hit& hit) const
- {
- Scene* MAYBE_UNUSED scene = context->scene;
- vbool<Mx> valid = valid_i;
- if (Mx > M) valid &= (1<<M)-1;
- hit.finalize();
- size_t i = select_min(valid,hit.vt);
- unsigned int geomID = geomIDs[i];
-
- /* intersection filter test */
-#if defined(EMBREE_FILTER_FUNCTION) || defined(EMBREE_RAY_MASK)
- bool foundhit = false;
- goto entry;
- while (true)
- {
- if (unlikely(none(valid))) return foundhit;
- i = select_min(valid,hit.vt);
-
- geomID = geomIDs[i];
- entry:
- Geometry* geometry MAYBE_UNUSED = scene->get(geomID);
-
-#if defined(EMBREE_RAY_MASK)
- /* goto next hit if mask test fails */
- if ((geometry->mask & ray.mask) == 0) {
- clear(valid,i);
- continue;
- }
-#endif
-
-#if defined(EMBREE_FILTER_FUNCTION)
- /* call intersection filter function */
- if (filter) {
- if (unlikely(context->hasContextFilter() || geometry->hasIntersectionFilter())) {
- const Vec2f uv = hit.uv(i);
- HitK<1> h(context->user,geomID,primIDs[i],uv.x,uv.y,hit.Ng(i));
- const float old_t = ray.tfar;
- ray.tfar = hit.t(i);
- const bool found = runIntersectionFilter1(geometry,ray,context,h);
- if (!found) ray.tfar = old_t;
- foundhit |= found;
- clear(valid,i);
- valid &= hit.vt <= ray.tfar; // intersection filters may modify tfar value
- continue;
- }
- }
-#endif
- break;
- }
-#endif
-
- vbool<Mx> finalMask(((unsigned int)1 << i));
- ray.update(finalMask,hit.vt,hit.vu,hit.vv,hit.vNg.x,hit.vNg.y,hit.vNg.z,geomID,primIDs);
- instance_id_stack::foreach([&](unsigned level)
- {
- ray.instID[level] = context->user->instID[level];
- return (context->user->instID[level] != RTC_INVALID_GEOMETRY_ID);
- });
- return true;
-
- }
- };
-#endif
-
- template<int M, int Mx, bool filter>
struct Occluded1EpilogM
{
Ray& ray;
@@ -389,7 +307,7 @@ namespace embree
: ray(ray), context(context), geomIDs(geomIDs), primIDs(primIDs) {}
template<typename Hit>
- __forceinline bool operator() (const vbool<Mx>& valid_i, Hit& hit) const
+ __forceinline bool operator() (const vbool<M>& valid_i, Hit& hit) const
{
Scene* scene MAYBE_UNUSED = context->scene;
/* intersection filter test */
@@ -397,8 +315,7 @@ namespace embree
if (unlikely(filter))
hit.finalize(); /* called only once */
- vbool<Mx> valid = valid_i;
- if (Mx > M) valid &= (1<<M)-1;
+ vbool<M> valid = valid_i;
size_t m=movemask(valid);
goto entry;
while (true)
@@ -506,7 +423,7 @@ namespace embree
ray.v = uv.y;
ray.primID = primID;
ray.geomID = geomID;
- instance_id_stack::copy(context->user->instID, ray.instID);
+ instance_id_stack::copy_UU(context->user->instID, ray.instID);
return true;
}
};
@@ -616,7 +533,7 @@ namespace embree
vfloat<K>::store(valid,&ray.v,v);
vuint<K>::store(valid,&ray.primID,primID);
vuint<K>::store(valid,&ray.geomID,geomID);
- instance_id_stack::copy<const unsigned*, vuint<K>*, const vbool<K>&>(context->user->instID, ray.instID, valid);
+ instance_id_stack::copy_UV<K>(context->user->instID, ray.instID, valid);
return valid;
}
};
@@ -646,8 +563,8 @@ namespace embree
/* ray masking test */
Scene* scene MAYBE_UNUSED = context->scene;
- const unsigned int geomID = geomIDs[i];
- const unsigned int primID = primIDs[i];
+ const unsigned int geomID MAYBE_UNUSED = geomIDs[i];
+ const unsigned int primID MAYBE_UNUSED = primIDs[i];
Geometry* geometry MAYBE_UNUSED = scene->get(geomID);
#if defined(EMBREE_RAY_MASK)
valid &= (geometry->mask & ray.mask) != 0;
@@ -731,8 +648,7 @@ namespace embree
vfloat<K>::store(valid,&ray.v,v);
vuint<K>::store(valid,&ray.primID,primID);
vuint<K>::store(valid,&ray.geomID,geomID);
- instance_id_stack::copy<const unsigned*, vuint<K>*, const vbool<K>&>(context->user->instID, ray.instID, valid);
-
+ instance_id_stack::copy_UV<K>(context->user->instID, ray.instID, valid);
return valid;
}
};
@@ -788,7 +704,7 @@ namespace embree
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct Intersect1KEpilogM
{
RayHitK<K>& ray;
@@ -804,12 +720,11 @@ namespace embree
: ray(ray), k(k), context(context), geomIDs(geomIDs), primIDs(primIDs) {}
template<typename Hit>
- __forceinline bool operator() (const vbool<Mx>& valid_i, Hit& hit) const
+ __forceinline bool operator() (const vbool<M>& valid_i, Hit& hit) const
{
Scene* scene MAYBE_UNUSED = context->scene;
- vbool<Mx> valid = valid_i;
+ vbool<M> valid = valid_i;
hit.finalize();
- if (Mx > M) valid &= (1<<M)-1;
size_t i = select_min(valid,hit.vt);
assert(i<M);
unsigned int geomID = geomIDs[i];
@@ -858,9 +773,6 @@ namespace embree
#endif
assert(i<M);
/* update hit information */
-#if 0 && defined(__AVX512F__) // do not enable, this reduced frequency for BVH4
- ray.updateK(i,k,hit.vt,hit.vu,hit.vv,vfloat<Mx>(hit.vNg.x),vfloat<Mx>(hit.vNg.y),vfloat<Mx>(hit.vNg.z),geomID,vuint<Mx>(primIDs));
-#else
const Vec2f uv = hit.uv(i);
ray.tfar[k] = hit.t(i);
ray.Ng.x[k] = hit.vNg.x[i];
@@ -870,13 +782,12 @@ namespace embree
ray.v[k] = uv.y;
ray.primID[k] = primIDs[i];
ray.geomID[k] = geomID;
- instance_id_stack::copy<const unsigned*, vuint<K>*, const size_t&>(context->user->instID, ray.instID, k);
-#endif
+ instance_id_stack::copy_UV<K>(context->user->instID, ray.instID, k);
return true;
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct Occluded1KEpilogM
{
RayK<K>& ray;
@@ -892,7 +803,7 @@ namespace embree
: ray(ray), k(k), context(context), geomIDs(geomIDs), primIDs(primIDs) {}
template<typename Hit>
- __forceinline bool operator() (const vbool<Mx>& valid_i, Hit& hit) const
+ __forceinline bool operator() (const vbool<M>& valid_i, Hit& hit) const
{
Scene* scene MAYBE_UNUSED = context->scene;
@@ -901,8 +812,7 @@ namespace embree
if (unlikely(filter))
hit.finalize(); /* called only once */
- vbool<Mx> valid = valid_i;
- if (Mx > M) valid &= (1<<M)-1;
+ vbool<M> valid = valid_i;
size_t m=movemask(valid);
goto entry;
while (true)
@@ -1002,10 +912,6 @@ namespace embree
#endif
/* update hit information */
-#if 0 && defined(__AVX512F__) // do not enable, this reduced frequency for BVH4
- const Vec3fa Ng = hit.Ng(i);
- ray.updateK(i,k,hit.vt,hit.vu,hit.vv,vfloat<M>(Ng.x),vfloat<M>(Ng.y),vfloat<M>(Ng.z),geomID,vuint<M>(primID));
-#else
const Vec2f uv = hit.uv(i);
const Vec3fa Ng = hit.Ng(i);
ray.tfar[k] = hit.t(i);
@@ -1016,8 +922,7 @@ namespace embree
ray.v[k] = uv.y;
ray.primID[k] = primID;
ray.geomID[k] = geomID;
- instance_id_stack::copy<const unsigned*, vuint<K>*, const size_t&>(context->user->instID, ray.instID, k);
-#endif
+ instance_id_stack::copy_UV<K>(context->user->instID, ray.instID, k);
return true;
}
};
diff --git a/thirdparty/embree-aarch64/kernels/geometry/intersector_iterators.h b/thirdparty/embree/kernels/geometry/intersector_iterators.h
index 5c1ba5cb61..9cac1cd25c 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/intersector_iterators.h
+++ b/thirdparty/embree/kernels/geometry/intersector_iterators.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -19,15 +19,15 @@ namespace embree
typedef typename Intersector::Primitive Primitive;
typedef typename Intersector::Precalculations Precalculations;
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
for (size_t i=0; i<num; i++)
Intersector::intersect(pre,ray,context,prim[i]);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
for (size_t i=0; i<num; i++) {
if (Intersector::occluded(pre,ray,context,prim[i]))
@@ -82,16 +82,16 @@ namespace embree
return !valid0;
}
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
for (size_t i=0; i<num; i++) {
Intersector::intersect(pre,ray,k,context,prim[i]);
}
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
for (size_t i=0; i<num; i++) {
if (Intersector::occluded(pre,ray,k,context,prim[i]))
diff --git a/thirdparty/embree-aarch64/kernels/geometry/line_intersector.h b/thirdparty/embree/kernels/geometry/line_intersector.h
index eef5b0b1fd..41096d8794 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/line_intersector.h
+++ b/thirdparty/embree/kernels/geometry/line_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -23,6 +23,10 @@ namespace embree
__forceinline Vec2f uv (const size_t i) const { return Vec2f(vu[i],vv[i]); }
__forceinline float t (const size_t i) const { return vt[i]; }
__forceinline Vec3fa Ng(const size_t i) const { return Vec3fa(vNg.x[i],vNg.y[i],vNg.z[i]); }
+
+ __forceinline Vec2vf<M> uv() const { return Vec2vf<M>(vu,vv); }
+ __forceinline vfloat<M> t () const { return vt; }
+ __forceinline Vec3vf<M> Ng() const { return vNg; }
public:
vfloat<M> vu;
@@ -36,7 +40,7 @@ namespace embree
{
typedef CurvePrecalculations1 Precalculations;
- template<typename Epilog>
+ template<typename Ray, typename Epilog>
static __forceinline bool intersect(const vbool<M>& valid_i,
Ray& ray,
IntersectContext* context,
@@ -51,8 +55,8 @@ namespace embree
LinearSpace3<Vec3vf<M>> ray_space = pre.ray_space;
const Vec3vf<M> ray_org ((Vec3fa)ray.org);
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
- const Vec4vf<M> v1 = enlargeRadiusToMinWidth(context,geom,ray_org,v1i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
+ const Vec4vf<M> v1 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v1i);
Vec4vf<M> p0(xfmVector(ray_space,v0.xyz()-ray_org), v0.w);
Vec4vf<M> p1(xfmVector(ray_space,v1.xyz()-ray_org), v1.w);
@@ -105,8 +109,8 @@ namespace embree
const Vec3vf<M> ray_org(ray.org.x[k],ray.org.y[k],ray.org.z[k]);
const Vec3vf<M> ray_dir(ray.dir.x[k],ray.dir.y[k],ray.dir.z[k]);
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
- const Vec4vf<M> v1 = enlargeRadiusToMinWidth(context,geom,ray_org,v1i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
+ const Vec4vf<M> v1 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v1i);
Vec4vf<M> p0(xfmVector(ray_space,v0.xyz()-ray_org), v0.w);
Vec4vf<M> p1(xfmVector(ray_space,v1.xyz()-ray_org), v1.w);
diff --git a/thirdparty/embree-aarch64/kernels/geometry/linei.h b/thirdparty/embree/kernels/geometry/linei.h
index a72029ca53..3ee70ac012 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/linei.h
+++ b/thirdparty/embree/kernels/geometry/linei.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -49,10 +49,6 @@ namespace embree
/* Returns a mask that tells which line segments are valid */
__forceinline vbool<M> valid() const { return primIDs != vuint<M>(-1); }
- /* Returns a mask that tells which line segments are valid */
- template<int Mx>
- __forceinline vbool<Mx> valid() const { return vuint<Mx>(primIDs) != vuint<Mx>(-1); }
-
/* Returns if the specified line segment is valid */
__forceinline bool valid(const size_t i) const { assert(i<M); return primIDs[i] != -1; }
diff --git a/thirdparty/embree-aarch64/kernels/geometry/linei_intersector.h b/thirdparty/embree/kernels/geometry/linei_intersector.h
index a431796a88..5992827f5b 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/linei_intersector.h
+++ b/thirdparty/embree/kernels/geometry/linei_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -11,7 +11,7 @@ namespace embree
{
namespace isa
{
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct FlatLinearCurveMiIntersector1
{
typedef LineMi<M> Primitive;
@@ -22,8 +22,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- FlatLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,Intersect1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ FlatLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
@@ -31,8 +31,8 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- return FlatLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,Occluded1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return FlatLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& line)
@@ -41,7 +41,7 @@ namespace embree
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct FlatLinearCurveMiMBIntersector1
{
typedef LineMi<M> Primitive;
@@ -52,8 +52,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom,ray.time());
- const vbool<Mx> valid = line.template valid<Mx>();
- FlatLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,Intersect1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ FlatLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
@@ -61,8 +61,8 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom,ray.time());
- const vbool<Mx> valid = line.template valid<Mx>();
- return FlatLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,Occluded1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return FlatLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& line)
@@ -71,7 +71,7 @@ namespace embree
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct FlatLinearCurveMiIntersectorK
{
typedef LineMi<M> Primitive;
@@ -82,8 +82,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- FlatLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ FlatLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
@@ -91,12 +91,12 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- return FlatLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return FlatLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct FlatLinearCurveMiMBIntersectorK
{
typedef LineMi<M> Primitive;
@@ -107,8 +107,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom,ray.time()[k]);
- const vbool<Mx> valid = line.template valid<Mx>();
- FlatLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ FlatLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
@@ -116,8 +116,8 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1; line.gather(v0,v1,geom,ray.time()[k]);
- const vbool<Mx> valid = line.template valid<Mx>();
- return FlatLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return FlatLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/object.h b/thirdparty/embree/kernels/geometry/object.h
index f26391de52..2a61829ffd 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/object.h
+++ b/thirdparty/embree/kernels/geometry/object.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/object_intersector.h b/thirdparty/embree/kernels/geometry/object_intersector.h
index 97882e0e59..11ceb2f7fe 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/object_intersector.h
+++ b/thirdparty/embree/kernels/geometry/object_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/plane.h b/thirdparty/embree/kernels/geometry/plane.h
index ebe45db558..e447122eab 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/plane.h
+++ b/thirdparty/embree/kernels/geometry/plane.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/pointi.h b/thirdparty/embree/kernels/geometry/pointi.h
index 4ba298e86b..bed04116b0 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/pointi.h
+++ b/thirdparty/embree/kernels/geometry/pointi.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -61,11 +61,6 @@ namespace embree
return vint<M>(step) < vint<M>(numPrimitives);
}
- /* Returns a mask that tells which line segments are valid */
- template<int Mx> __forceinline vbool<Mx> valid() const {
- return vint<Mx>(step) < vint<Mx>(numPrimitives);
- }
-
/* Returns if the specified line segment is valid */
__forceinline bool valid(const size_t i) const
{
diff --git a/thirdparty/embree-aarch64/kernels/geometry/primitive.h b/thirdparty/embree/kernels/geometry/primitive.h
index 41e5b2b304..608d981dd7 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/primitive.h
+++ b/thirdparty/embree/kernels/geometry/primitive.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/primitive4.cpp b/thirdparty/embree/kernels/geometry/primitive4.cpp
index f93574c9c8..9c953c5d35 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/primitive4.cpp
+++ b/thirdparty/embree/kernels/geometry/primitive4.cpp
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "primitive.h"
diff --git a/thirdparty/embree-aarch64/kernels/geometry/quad_intersector.h b/thirdparty/embree/kernels/geometry/quad_intersector.h
index 57ff4e60e5..93c9526912 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/quad_intersector.h
+++ b/thirdparty/embree/kernels/geometry/quad_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/quad_intersector_moeller.h b/thirdparty/embree/kernels/geometry/quad_intersector_moeller.h
index 74e8c7720c..3abc9d6f70 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/quad_intersector_moeller.h
+++ b/thirdparty/embree/kernels/geometry/quad_intersector_moeller.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -126,16 +126,17 @@ namespace embree
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- MoellerTrumboreHitM<M> hit;
+ UVIdentity<M> mapUV;
+ MoellerTrumboreHitM<M,UVIdentity<M>> hit(mapUV);
MoellerTrumboreIntersector1<M> intersector(ray,nullptr);
- Intersect1EpilogM<M,M,filter> epilog(ray,context,geomID,primID);
+ Intersect1EpilogM<M,filter> epilog(ray,context,geomID,primID);
/* intersect first triangle */
- if (intersector.intersect(ray,v0,v1,v3,hit))
+ if (intersector.intersect(ray,v0,v1,v3,mapUV,hit))
epilog(hit.valid,hit);
/* intersect second triangle */
- if (intersector.intersect(ray,v2,v3,v1,hit))
+ if (intersector.intersect(ray,v2,v3,v1,mapUV,hit))
{
hit.U = hit.absDen - hit.U;
hit.V = hit.absDen - hit.V;
@@ -147,19 +148,20 @@ namespace embree
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- MoellerTrumboreHitM<M> hit;
+ UVIdentity<M> mapUV;
+ MoellerTrumboreHitM<M,UVIdentity<M>> hit(mapUV);
MoellerTrumboreIntersector1<M> intersector(ray,nullptr);
- Occluded1EpilogM<M,M,filter> epilog(ray,context,geomID,primID);
+ Occluded1EpilogM<M,filter> epilog(ray,context,geomID,primID);
/* intersect first triangle */
- if (intersector.intersect(ray,v0,v1,v3,hit))
+ if (intersector.intersect(ray,v0,v1,v3,mapUV,hit))
{
if (epilog(hit.valid,hit))
return true;
}
/* intersect second triangle */
- if (intersector.intersect(ray,v2,v3,v1,hit))
+ if (intersector.intersect(ray,v2,v3,v1,mapUV,hit))
{
hit.U = hit.absDen - hit.U;
hit.V = hit.absDen - hit.V;
@@ -170,70 +172,7 @@ namespace embree
}
};
-#if defined(__AVX512ER__) // KNL
-
- /*! Intersects 4 quads with 1 ray using AVX512 */
- template<bool filter>
- struct QuadMIntersector1MoellerTrumbore<4,filter>
- {
- __forceinline QuadMIntersector1MoellerTrumbore() {}
-
- __forceinline QuadMIntersector1MoellerTrumbore(const Ray& ray, const void* ptr) {}
-
- template<typename Epilog>
- __forceinline bool intersect(Ray& ray, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const Epilog& epilog) const
- {
- const Vec3vf16 vtx0(select(0x0f0f,vfloat16(v0.x),vfloat16(v2.x)),
- select(0x0f0f,vfloat16(v0.y),vfloat16(v2.y)),
- select(0x0f0f,vfloat16(v0.z),vfloat16(v2.z)));
-#if !defined(EMBREE_BACKFACE_CULLING)
- const Vec3vf16 vtx1(vfloat16(v1.x),vfloat16(v1.y),vfloat16(v1.z));
- const Vec3vf16 vtx2(vfloat16(v3.x),vfloat16(v3.y),vfloat16(v3.z));
-#else
- const Vec3vf16 vtx1(select(0x0f0f,vfloat16(v1.x),vfloat16(v3.x)),
- select(0x0f0f,vfloat16(v1.y),vfloat16(v3.y)),
- select(0x0f0f,vfloat16(v1.z),vfloat16(v3.z)));
- const Vec3vf16 vtx2(select(0x0f0f,vfloat16(v3.x),vfloat16(v1.x)),
- select(0x0f0f,vfloat16(v3.y),vfloat16(v1.y)),
- select(0x0f0f,vfloat16(v3.z),vfloat16(v1.z)));
-#endif
- const vbool16 flags(0xf0f0);
-
- MoellerTrumboreHitM<16> hit;
- MoellerTrumboreIntersector1<16> intersector(ray,nullptr);
- if (unlikely(intersector.intersect(ray,vtx0,vtx1,vtx2,hit)))
- {
- vfloat16 U = hit.U, V = hit.V, absDen = hit.absDen;
-#if !defined(EMBREE_BACKFACE_CULLING)
- hit.U = select(flags,absDen-V,U);
- hit.V = select(flags,absDen-U,V);
- hit.vNg *= select(flags,vfloat16(-1.0f),vfloat16(1.0f)); // FIXME: use XOR
-#else
- hit.U = select(flags,absDen-U,U);
- hit.V = select(flags,absDen-V,V);
-#endif
- if (likely(epilog(hit.valid,hit)))
- return true;
- }
- return false;
- }
-
- __forceinline bool intersect(RayHit& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect(ray,v0,v1,v2,v3,Intersect1EpilogM<8,16,filter>(ray,context,vuint8(geomID),vuint8(primID)));
- }
-
- __forceinline bool occluded(Ray& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect(ray,v0,v1,v2,v3,Occluded1EpilogM<8,16,filter>(ray,context,vuint8(geomID),vuint8(primID)));
- }
- };
-
-#elif defined(__AVX__)
+#if defined(__AVX__)
/*! Intersects 4 quads with 1 ray using AVX */
template<bool filter>
@@ -254,10 +193,11 @@ namespace embree
const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
#endif
- MoellerTrumboreHitM<8> hit;
+ UVIdentity<8> mapUV;
+ MoellerTrumboreHitM<8,UVIdentity<8>> hit(mapUV);
MoellerTrumboreIntersector1<8> intersector(ray,nullptr);
const vbool8 flags(0,0,0,0,1,1,1,1);
- if (unlikely(intersector.intersect(ray,vtx0,vtx1,vtx2,hit)))
+ if (unlikely(intersector.intersect(ray,vtx0,vtx1,vtx2,mapUV,hit)))
{
vfloat8 U = hit.U, V = hit.V, absDen = hit.absDen;
@@ -279,14 +219,14 @@ namespace embree
const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect(ray,v0,v1,v2,v3,Intersect1EpilogM<8,8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
+ return intersect(ray,v0,v1,v2,v3,Intersect1EpilogM<8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
}
__forceinline bool occluded(Ray& ray, IntersectContext* context,
const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect(ray,v0,v1,v2,v3,Occluded1EpilogM<8,8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
+ return intersect(ray,v0,v1,v2,v3,Occluded1EpilogM<8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
}
};
@@ -353,7 +293,7 @@ namespace embree
const Vec3vf<M> e1 = v0-v1;
const Vec3vf<M> e2 = v2-v0;
const Vec3vf<M> Ng = cross(e2,e1);
- return intersect(ray,k,v0,e1,e2,Ng,flags,epilog);
+ return intersect<M,K>(ray,k,v0,e1,e2,Ng,flags,epilog);
}
};
@@ -458,70 +398,24 @@ namespace embree
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- Intersect1KEpilogM<M,M,K,filter> epilog(ray,k,context,geomID,primID);
- MoellerTrumboreIntersector1KTriangleM::intersect1(ray,k,v0,v1,v3,vbool<M>(false),epilog);
- MoellerTrumboreIntersector1KTriangleM::intersect1(ray,k,v2,v3,v1,vbool<M>(true ),epilog);
+ Intersect1KEpilogM<M,K,filter> epilog(ray,k,context,geomID,primID);
+ MoellerTrumboreIntersector1KTriangleM::intersect1<M,K>(ray,k,v0,v1,v3,vbool<M>(false),epilog);
+ MoellerTrumboreIntersector1KTriangleM::intersect1<M,K>(ray,k,v2,v3,v1,vbool<M>(true ),epilog);
}
__forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- Occluded1KEpilogM<M,M,K,filter> epilog(ray,k,context,geomID,primID);
- if (MoellerTrumboreIntersector1KTriangleM::intersect1(ray,k,v0,v1,v3,vbool<M>(false),epilog)) return true;
- if (MoellerTrumboreIntersector1KTriangleM::intersect1(ray,k,v2,v3,v1,vbool<M>(true ),epilog)) return true;
+ Occluded1KEpilogM<M,K,filter> epilog(ray,k,context,geomID,primID);
+ if (MoellerTrumboreIntersector1KTriangleM::intersect1<M,K>(ray,k,v0,v1,v3,vbool<M>(false),epilog)) return true;
+ if (MoellerTrumboreIntersector1KTriangleM::intersect1<M,K>(ray,k,v2,v3,v1,vbool<M>(true ),epilog)) return true;
return false;
}
};
-#if defined(__AVX512ER__) // KNL
-
- /*! Intersects 4 quads with 1 ray using AVX512 */
- template<int K, bool filter>
- struct QuadMIntersectorKMoellerTrumbore<4,K,filter> : public QuadMIntersectorKMoellerTrumboreBase<4,K,filter>
- {
- __forceinline QuadMIntersectorKMoellerTrumbore(const vbool<K>& valid, const RayK<K>& ray)
- : QuadMIntersectorKMoellerTrumboreBase<4,K,filter>(valid,ray) {}
-
- template<typename Epilog>
- __forceinline bool intersect1(RayK<K>& ray, size_t k,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const Epilog& epilog) const
- {
- const Vec3vf16 vtx0(select(0x0f0f,vfloat16(v0.x),vfloat16(v2.x)),
- select(0x0f0f,vfloat16(v0.y),vfloat16(v2.y)),
- select(0x0f0f,vfloat16(v0.z),vfloat16(v2.z)));
-#if !defined(EMBREE_BACKFACE_CULLING)
- const Vec3vf16 vtx1(vfloat16(v1.x),vfloat16(v1.y),vfloat16(v1.z));
- const Vec3vf16 vtx2(vfloat16(v3.x),vfloat16(v3.y),vfloat16(v3.z));
-#else
- const Vec3vf16 vtx1(select(0x0f0f,vfloat16(v1.x),vfloat16(v3.x)),
- select(0x0f0f,vfloat16(v1.y),vfloat16(v3.y)),
- select(0x0f0f,vfloat16(v1.z),vfloat16(v3.z)));
- const Vec3vf16 vtx2(select(0x0f0f,vfloat16(v3.x),vfloat16(v1.x)),
- select(0x0f0f,vfloat16(v3.y),vfloat16(v1.y)),
- select(0x0f0f,vfloat16(v3.z),vfloat16(v1.z)));
-#endif
- const vbool16 flags(0xf0f0);
- return MoellerTrumboreIntersector1KTriangleM::intersect1(ray,k,vtx0,vtx1,vtx2,flags,epilog);
- }
-
- __forceinline bool intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect1(ray,k,v0,v1,v2,v3,Intersect1KEpilogM<8,16,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
- }
-
- __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect1(ray,k,v0,v1,v2,v3,Occluded1KEpilogM<8,16,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
- }
- };
-
-#elif defined(__AVX__)
+#if defined(__AVX__)
/*! Intersects 4 quads with 1 ray using AVX */
template<int K, bool filter>
@@ -543,21 +437,21 @@ namespace embree
const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
#endif
const vbool8 flags(0,0,0,0,1,1,1,1);
- return MoellerTrumboreIntersector1KTriangleM::intersect1(ray,k,vtx0,vtx1,vtx2,flags,epilog);
+ return MoellerTrumboreIntersector1KTriangleM::intersect1<8,K>(ray,k,vtx0,vtx1,vtx2,flags,epilog);
}
__forceinline bool intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect1(ray,k,v0,v1,v2,v3,Intersect1KEpilogM<8,8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
+ return intersect1(ray,k,v0,v1,v2,v3,Intersect1KEpilogM<8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
}
__forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect1(ray,k,v0,v1,v2,v3,Occluded1KEpilogM<8,8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
+ return intersect1(ray,k,v0,v1,v2,v3,Occluded1KEpilogM<8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
}
};
diff --git a/thirdparty/embree-aarch64/kernels/geometry/quad_intersector_pluecker.h b/thirdparty/embree/kernels/geometry/quad_intersector_pluecker.h
index 7ca3aed0a0..9873ff76ac 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/quad_intersector_pluecker.h
+++ b/thirdparty/embree/kernels/geometry/quad_intersector_pluecker.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -175,69 +175,23 @@ namespace embree
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- Intersect1EpilogM<M,M,filter> epilog(ray,context,geomID,primID);
- PlueckerIntersectorTriangle1::intersect(ray,v0,v1,v3,vbool<M>(false),epilog);
- PlueckerIntersectorTriangle1::intersect(ray,v2,v3,v1,vbool<M>(true),epilog);
+ Intersect1EpilogM<M,filter> epilog(ray,context,geomID,primID);
+ PlueckerIntersectorTriangle1::intersect<M>(ray,v0,v1,v3,vbool<M>(false),epilog);
+ PlueckerIntersectorTriangle1::intersect<M>(ray,v2,v3,v1,vbool<M>(true),epilog);
}
__forceinline bool occluded(Ray& ray, IntersectContext* context,
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- Occluded1EpilogM<M,M,filter> epilog(ray,context,geomID,primID);
- if (PlueckerIntersectorTriangle1::intersect(ray,v0,v1,v3,vbool<M>(false),epilog)) return true;
- if (PlueckerIntersectorTriangle1::intersect(ray,v2,v3,v1,vbool<M>(true ),epilog)) return true;
+ Occluded1EpilogM<M,filter> epilog(ray,context,geomID,primID);
+ if (PlueckerIntersectorTriangle1::intersect<M>(ray,v0,v1,v3,vbool<M>(false),epilog)) return true;
+ if (PlueckerIntersectorTriangle1::intersect<M>(ray,v2,v3,v1,vbool<M>(true ),epilog)) return true;
return false;
}
};
-#if defined(__AVX512ER__) // KNL
-
- /*! Intersects 4 quads with 1 ray using AVX512 */
- template<bool filter>
- struct QuadMIntersector1Pluecker<4,filter>
- {
- __forceinline QuadMIntersector1Pluecker() {}
-
- __forceinline QuadMIntersector1Pluecker(const Ray& ray, const void* ptr) {}
-
- template<typename Epilog>
- __forceinline bool intersect(Ray& ray, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const Epilog& epilog) const
- {
- const Vec3vf16 vtx0(select(0x0f0f,vfloat16(v0.x),vfloat16(v2.x)),
- select(0x0f0f,vfloat16(v0.y),vfloat16(v2.y)),
- select(0x0f0f,vfloat16(v0.z),vfloat16(v2.z)));
-#if !defined(EMBREE_BACKFACE_CULLING)
- const Vec3vf16 vtx1(vfloat16(v1.x),vfloat16(v1.y),vfloat16(v1.z));
- const Vec3vf16 vtx2(vfloat16(v3.x),vfloat16(v3.y),vfloat16(v3.z));
-#else
- const Vec3vf16 vtx1(select(0x0f0f,vfloat16(v1.x),vfloat16(v3.x)),
- select(0x0f0f,vfloat16(v1.y),vfloat16(v3.y)),
- select(0x0f0f,vfloat16(v1.z),vfloat16(v3.z)));
- const Vec3vf16 vtx2(select(0x0f0f,vfloat16(v3.x),vfloat16(v1.x)),
- select(0x0f0f,vfloat16(v3.y),vfloat16(v1.y)),
- select(0x0f0f,vfloat16(v3.z),vfloat16(v1.z)));
-#endif
- const vbool16 flags(0xf0f0);
- return PlueckerIntersectorTriangle1::intersect(ray,vtx0,vtx1,vtx2,flags,epilog);
- }
-
- __forceinline bool intersect(RayHit& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect(ray,v0,v1,v2,v3,Intersect1EpilogM<8,16,filter>(ray,context,vuint8(geomID),vuint8(primID)));
- }
-
- __forceinline bool occluded(Ray& ray, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect(ray,v0,v1,v2,v3,Occluded1EpilogM<8,16,filter>(ray,context,vuint8(geomID),vuint8(primID)));
- }
- };
-
-#elif defined(__AVX__)
+#if defined(__AVX__)
/*! Intersects 4 quads with 1 ray using AVX */
template<bool filter>
@@ -259,19 +213,19 @@ namespace embree
const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
#endif
const vbool8 flags(0,0,0,0,1,1,1,1);
- return PlueckerIntersectorTriangle1::intersect(ray,vtx0,vtx1,vtx2,flags,epilog);
+ return PlueckerIntersectorTriangle1::intersect<8>(ray,vtx0,vtx1,vtx2,flags,epilog);
}
__forceinline bool intersect(RayHit& ray, IntersectContext* context, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect(ray,v0,v1,v2,v3,Intersect1EpilogM<8,8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
+ return intersect(ray,v0,v1,v2,v3,Intersect1EpilogM<8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
}
__forceinline bool occluded(Ray& ray, IntersectContext* context, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect(ray,v0,v1,v2,v3,Occluded1EpilogM<8,8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
+ return intersect(ray,v0,v1,v2,v3,Occluded1EpilogM<8,filter>(ray,context,vuint8(geomID),vuint8(primID)));
}
};
@@ -305,18 +259,19 @@ namespace embree
const Vec3vf<M> e0 = v2-v0;
const Vec3vf<M> e1 = v0-v1;
const Vec3vf<M> e2 = v1-v2;
-
+
/* perform edge tests */
const vfloat<M> U = dot(cross(e0,v2+v0),D);
const vfloat<M> V = dot(cross(e1,v0+v1),D);
const vfloat<M> W = dot(cross(e2,v1+v2),D);
+
const vfloat<M> UVW = U+V+W;
const vfloat<M> eps = float(ulp)*abs(UVW);
#if defined(EMBREE_BACKFACE_CULLING)
vbool<M> valid = max(U,V,W) <= eps;
#else
vbool<M> valid = (min(U,V,W) >= -eps) | (max(U,V,W) <= eps);
-#endif
+#endif
if (unlikely(none(valid))) return false;
/* calculate geometry normal and denominator */
@@ -423,69 +378,23 @@ namespace embree
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- Intersect1KEpilogM<M,M,K,filter> epilog(ray,k,context,geomID,primID);
- PlueckerIntersector1KTriangleM::intersect1(ray,k,v0,v1,v3,vbool<M>(false),epilog);
- PlueckerIntersector1KTriangleM::intersect1(ray,k,v2,v3,v1,vbool<M>(true ),epilog);
+ Intersect1KEpilogM<M,K,filter> epilog(ray,k,context,geomID,primID);
+ PlueckerIntersector1KTriangleM::intersect1<M,K>(ray,k,v0,v1,v3,vbool<M>(false),epilog);
+ PlueckerIntersector1KTriangleM::intersect1<M,K>(ray,k,v2,v3,v1,vbool<M>(true ),epilog);
}
__forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
const vuint<M>& geomID, const vuint<M>& primID) const
{
- Occluded1KEpilogM<M,M,K,filter> epilog(ray,k,context,geomID,primID);
- if (PlueckerIntersector1KTriangleM::intersect1(ray,k,v0,v1,v3,vbool<M>(false),epilog)) return true;
- if (PlueckerIntersector1KTriangleM::intersect1(ray,k,v2,v3,v1,vbool<M>(true ),epilog)) return true;
+ Occluded1KEpilogM<M,K,filter> epilog(ray,k,context,geomID,primID);
+ if (PlueckerIntersector1KTriangleM::intersect1<M,K>(ray,k,v0,v1,v3,vbool<M>(false),epilog)) return true;
+ if (PlueckerIntersector1KTriangleM::intersect1<M,K>(ray,k,v2,v3,v1,vbool<M>(true ),epilog)) return true;
return false;
}
};
-#if defined(__AVX512ER__) // KNL
-
- /*! Intersects 4 quads with 1 ray using AVX512 */
- template<int K, bool filter>
- struct QuadMIntersectorKPluecker<4,K,filter> : public QuadMIntersectorKPlueckerBase<4,K,filter>
- {
- __forceinline QuadMIntersectorKPluecker(const vbool<K>& valid, const RayK<K>& ray)
- : QuadMIntersectorKPlueckerBase<4,K,filter>(valid,ray) {}
-
- template<typename Epilog>
- __forceinline bool intersect1(RayK<K>& ray, size_t k, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const Epilog& epilog) const
- {
- const Vec3vf16 vtx0(select(0x0f0f,vfloat16(v0.x),vfloat16(v2.x)),
- select(0x0f0f,vfloat16(v0.y),vfloat16(v2.y)),
- select(0x0f0f,vfloat16(v0.z),vfloat16(v2.z)));
-#if !defined(EMBREE_BACKFACE_CULLING)
- const Vec3vf16 vtx1(vfloat16(v1.x),vfloat16(v1.y),vfloat16(v1.z));
- const Vec3vf16 vtx2(vfloat16(v3.x),vfloat16(v3.y),vfloat16(v3.z));
-#else
- const Vec3vf16 vtx1(select(0x0f0f,vfloat16(v1.x),vfloat16(v3.x)),
- select(0x0f0f,vfloat16(v1.y),vfloat16(v3.y)),
- select(0x0f0f,vfloat16(v1.z),vfloat16(v3.z)));
- const Vec3vf16 vtx2(select(0x0f0f,vfloat16(v3.x),vfloat16(v1.x)),
- select(0x0f0f,vfloat16(v3.y),vfloat16(v1.y)),
- select(0x0f0f,vfloat16(v3.z),vfloat16(v1.z)));
-#endif
-
- const vbool16 flags(0xf0f0);
- return PlueckerIntersector1KTriangleM::intersect1(ray,k,vtx0,vtx1,vtx2,flags,epilog);
- }
-
- __forceinline bool intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect1(ray,k,v0,v1,v2,v3,Intersect1KEpilogM<8,16,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
- }
-
- __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
- const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
- const vuint4& geomID, const vuint4& primID) const
- {
- return intersect1(ray,k,v0,v1,v2,v3,Occluded1KEpilogM<8,16,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
- }
- };
-
-#elif defined(__AVX__)
+#if defined(__AVX__)
/*! Intersects 4 quads with 1 ray using AVX */
template<int K, bool filter>
@@ -506,21 +415,21 @@ namespace embree
const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
#endif
- return PlueckerIntersector1KTriangleM::intersect1(ray,k,vtx0,vtx1,vtx2,flags,epilog);
+ return PlueckerIntersector1KTriangleM::intersect1<8,K>(ray,k,vtx0,vtx1,vtx2,flags,epilog);
}
__forceinline bool intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect1(ray,k,v0,v1,v2,v3,Intersect1KEpilogM<8,8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
+ return intersect1(ray,k,v0,v1,v2,v3,Intersect1KEpilogM<8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
}
__forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
const vuint4& geomID, const vuint4& primID) const
{
- return intersect1(ray,k,v0,v1,v2,v3,Occluded1KEpilogM<8,8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
+ return intersect1(ray,k,v0,v1,v2,v3,Occluded1KEpilogM<8,K,filter>(ray,k,context,vuint8(geomID),vuint8(primID)));
}
};
diff --git a/thirdparty/embree-aarch64/kernels/geometry/quadi.h b/thirdparty/embree/kernels/geometry/quadi.h
index 741ec519ab..70a7bdf158 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/quadi.h
+++ b/thirdparty/embree/kernels/geometry/quadi.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -349,7 +349,7 @@ namespace embree
const QuadMesh* mesh = scene->get<QuadMesh>(geomID(index));
vfloat<K> ftime;
- const vint<K> itime = mesh->timeSegment(time, ftime);
+ const vint<K> itime = mesh->timeSegment<K>(time, ftime);
const size_t first = bsf(movemask(valid));
if (likely(all(valid,itime[first] == itime)))
@@ -361,10 +361,10 @@ namespace embree
}
else
{
- p0 = getVertex<0>(valid, index, scene, itime, ftime);
- p1 = getVertex<1>(valid, index, scene, itime, ftime);
- p2 = getVertex<2>(valid, index, scene, itime, ftime);
- p3 = getVertex<3>(valid, index, scene, itime, ftime);
+ p0 = getVertex<0,K>(valid, index, scene, itime, ftime);
+ p1 = getVertex<1,K>(valid, index, scene, itime, ftime);
+ p2 = getVertex<2,K>(valid, index, scene, itime, ftime);
+ p3 = getVertex<3,K>(valid, index, scene, itime, ftime);
}
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/quadi_intersector.h b/thirdparty/embree/kernels/geometry/quadi_intersector.h
index 96cf7f1ca2..20a98c3406 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/quadi_intersector.h
+++ b/thirdparty/embree/kernels/geometry/quadi_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -230,7 +230,7 @@ namespace embree
{
if (!quad.valid(i)) break;
STAT3(normal.trav_prims,1,popcnt(valid_i),K);
- Vec3vf<K> v0,v1,v2,v3; quad.gather(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
+ Vec3vf<K> v0,v1,v2,v3; quad.template gather<K>(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
pre.intersectK(valid_i,ray,v0,v1,v2,v3,IntersectKEpilogM<M,K,filter>(ray,context,quad.geomID(),quad.primID(),i));
}
}
@@ -243,7 +243,7 @@ namespace embree
{
if (!quad.valid(i)) break;
STAT3(shadow.trav_prims,1,popcnt(valid0),K);
- Vec3vf<K> v0,v1,v2,v3; quad.gather(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
+ Vec3vf<K> v0,v1,v2,v3; quad.template gather<K>(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
if (pre.intersectK(valid0,ray,v0,v1,v2,v3,OccludedKEpilogM<M,K,filter>(valid0,ray,context,quad.geomID(),quad.primID(),i)))
break;
}
@@ -310,7 +310,7 @@ namespace embree
{
if (!quad.valid(i)) break;
STAT3(normal.trav_prims,1,popcnt(valid_i),K);
- Vec3vf<K> v0,v1,v2,v3; quad.gather(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
+ Vec3vf<K> v0,v1,v2,v3; quad.template gather<K>(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
pre.intersectK(valid_i,ray,v0,v1,v2,v3,IntersectKEpilogM<M,K,filter>(ray,context,quad.geomID(),quad.primID(),i));
}
}
@@ -323,7 +323,7 @@ namespace embree
{
if (!quad.valid(i)) break;
STAT3(shadow.trav_prims,1,popcnt(valid0),K);
- Vec3vf<K> v0,v1,v2,v3; quad.gather(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
+ Vec3vf<K> v0,v1,v2,v3; quad.template gather<K>(valid_i,v0,v1,v2,v3,i,context->scene,ray.time());
if (pre.intersectK(valid0,ray,v0,v1,v2,v3,OccludedKEpilogM<M,K,filter>(valid0,ray,context,quad.geomID(),quad.primID(),i)))
break;
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/quadv.h b/thirdparty/embree/kernels/geometry/quadv.h
index 0a1fe4d128..2137356ff2 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/quadv.h
+++ b/thirdparty/embree/kernels/geometry/quadv.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/quadv_intersector.h b/thirdparty/embree/kernels/geometry/quadv_intersector.h
index 30a24b291a..9b28e05614 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/quadv_intersector.h
+++ b/thirdparty/embree/kernels/geometry/quadv_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/roundline_intersector.h b/thirdparty/embree/kernels/geometry/roundline_intersector.h
index cdf68f486b..0e9393442b 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/roundline_intersector.h
+++ b/thirdparty/embree/kernels/geometry/roundline_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -81,7 +81,11 @@ namespace embree
__forceinline Vec2f uv (const size_t i) const { return Vec2f(vu[i],vv[i]); }
__forceinline float t (const size_t i) const { return vt[i]; }
__forceinline Vec3fa Ng(const size_t i) const { return Vec3fa(vNg.x[i],vNg.y[i],vNg.z[i]); }
-
+
+ __forceinline Vec2vf<M> uv() const { return Vec2vf<M>(vu,vv); }
+ __forceinline vfloat<M> t () const { return vt; }
+ __forceinline Vec3vf<M> Ng() const { return vNg; }
+
public:
vfloat<M> vu;
vfloat<M> vv;
@@ -646,14 +650,15 @@ namespace embree
struct RoundLinearCurveIntersector1
{
typedef CurvePrecalculations1 Precalculations;
-
+
+ template<typename Ray>
struct ray_tfar {
Ray& ray;
__forceinline ray_tfar(Ray& ray) : ray(ray) {}
__forceinline vfloat<M> operator() () const { return ray.tfar; };
};
-
- template<typename Epilog>
+
+ template<typename Ray, typename Epilog>
static __forceinline bool intersect(const vbool<M>& valid_i,
Ray& ray,
IntersectContext* context,
@@ -666,11 +671,11 @@ namespace embree
const Vec3vf<M> ray_org(ray.org.x, ray.org.y, ray.org.z);
const Vec3vf<M> ray_dir(ray.dir.x, ray.dir.y, ray.dir.z);
const vfloat<M> ray_tnear(ray.tnear());
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
- const Vec4vf<M> v1 = enlargeRadiusToMinWidth(context,geom,ray_org,v1i);
- const Vec4vf<M> vL = enlargeRadiusToMinWidth(context,geom,ray_org,vLi);
- const Vec4vf<M> vR = enlargeRadiusToMinWidth(context,geom,ray_org,vRi);
- return __roundline_internal::intersectConeSphere(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar(ray),v0,v1,vL,vR,epilog);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
+ const Vec4vf<M> v1 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v1i);
+ const Vec4vf<M> vL = enlargeRadiusToMinWidth<M>(context,geom,ray_org,vLi);
+ const Vec4vf<M> vR = enlargeRadiusToMinWidth<M>(context,geom,ray_org,vRi);
+ return __roundline_internal::intersectConeSphere<M>(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar<Ray>(ray),v0,v1,vL,vR,epilog);
}
};
@@ -699,11 +704,11 @@ namespace embree
const Vec3vf<M> ray_org(ray.org.x[k], ray.org.y[k], ray.org.z[k]);
const Vec3vf<M> ray_dir(ray.dir.x[k], ray.dir.y[k], ray.dir.z[k]);
const vfloat<M> ray_tnear = ray.tnear()[k];
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
- const Vec4vf<M> v1 = enlargeRadiusToMinWidth(context,geom,ray_org,v1i);
- const Vec4vf<M> vL = enlargeRadiusToMinWidth(context,geom,ray_org,vLi);
- const Vec4vf<M> vR = enlargeRadiusToMinWidth(context,geom,ray_org,vRi);
- return __roundline_internal::intersectConeSphere(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar(ray,k),v0,v1,vL,vR,epilog);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
+ const Vec4vf<M> v1 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v1i);
+ const Vec4vf<M> vL = enlargeRadiusToMinWidth<M>(context,geom,ray_org,vLi);
+ const Vec4vf<M> vR = enlargeRadiusToMinWidth<M>(context,geom,ray_org,vRi);
+ return __roundline_internal::intersectConeSphere<M>(valid_i,ray_org,ray_dir,ray_tnear,ray_tfar(ray,k),v0,v1,vL,vR,epilog);
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/roundlinei_intersector.h b/thirdparty/embree/kernels/geometry/roundlinei_intersector.h
index 079817335e..29061d6475 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/roundlinei_intersector.h
+++ b/thirdparty/embree/kernels/geometry/roundlinei_intersector.h
@@ -1,18 +1,5 @@
-// ======================================================================== //
-// Copyright 2009-2020 Intel Corporation //
-// //
-// Licensed under the Apache License, Version 2.0 (the "License"); //
-// you may not use this file except in compliance with the License. //
-// You may obtain a copy of the License at //
-// //
-// http://www.apache.org/licenses/LICENSE-2.0 //
-// //
-// Unless required by applicable law or agreed to in writing, software //
-// distributed under the License is distributed on an "AS IS" BASIS, //
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
-// See the License for the specific language governing permissions and //
-// limitations under the License. //
-// ======================================================================== //
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -23,7 +10,7 @@ namespace embree
{
namespace isa
{
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct RoundLinearCurveMiIntersector1
{
typedef LineMi<M> Primitive;
@@ -34,8 +21,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- RoundLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Intersect1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ RoundLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
@@ -43,8 +30,8 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- return RoundLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Occluded1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return RoundLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& line)
@@ -53,7 +40,7 @@ namespace embree
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct RoundLinearCurveMiMBIntersector1
{
typedef LineMi<M> Primitive;
@@ -64,8 +51,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom,ray.time());
- const vbool<Mx> valid = line.template valid<Mx>();
- RoundLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Intersect1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ RoundLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Intersect1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& line)
@@ -73,8 +60,8 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom,ray.time());
- const vbool<Mx> valid = line.template valid<Mx>();
- return RoundLinearCurveIntersector1<Mx>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Occluded1EpilogM<M,Mx,filter>(ray,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return RoundLinearCurveIntersector1<M>::intersect(valid,ray,context,geom,pre,v0,v1,vL,vR,Occluded1EpilogM<M,filter>(ray,context,line.geomID(),line.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& line)
@@ -83,7 +70,7 @@ namespace embree
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct RoundLinearCurveMiIntersectorK
{
typedef LineMi<M> Primitive;
@@ -94,8 +81,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- RoundLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ RoundLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
@@ -103,12 +90,12 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom);
- const vbool<Mx> valid = line.template valid<Mx>();
- return RoundLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return RoundLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct RoundLinearCurveMiMBIntersectorK
{
typedef LineMi<M> Primitive;
@@ -119,8 +106,8 @@ namespace embree
STAT3(normal.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom,ray.time()[k]);
- const vbool<Mx> valid = line.template valid<Mx>();
- RoundLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ RoundLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Intersect1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& line)
@@ -128,8 +115,8 @@ namespace embree
STAT3(shadow.trav_prims,1,1,1);
const LineSegments* geom = context->scene->get<LineSegments>(line.geomID());
Vec4vf<M> v0,v1,vL,vR; line.gather(v0,v1,vL,vR,geom,ray.time()[k]);
- const vbool<Mx> valid = line.template valid<Mx>();
- return RoundLinearCurveIntersectorK<Mx,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,line.geomID(),line.primID()));
+ const vbool<M> valid = line.valid();
+ return RoundLinearCurveIntersectorK<M,K>::intersect(valid,ray,k,context,geom,pre,v0,v1,vL,vR,Occluded1KEpilogM<M,K,filter>(ray,k,context,line.geomID(),line.primID()));
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/sphere_intersector.h b/thirdparty/embree/kernels/geometry/sphere_intersector.h
index 3ab90c29ef..2670f9762d 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/sphere_intersector.h
+++ b/thirdparty/embree/kernels/geometry/sphere_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -105,7 +105,7 @@ namespace embree
const Precalculations& pre, const Vec4vf<M>& v0i, const Epilog& epilog)
{
const Vec3vf<M> ray_org(ray.org.x, ray.org.y, ray.org.z);
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
return intersect(valid_i,ray,pre,v0,epilog);
}
};
@@ -130,7 +130,7 @@ namespace embree
const Vec3vf<M> ray_dir(ray.dir.x[k], ray.dir.y[k], ray.dir.z[k]);
const vfloat<M> rd2 = rcp(dot(ray_dir, ray_dir));
- const Vec4vf<M> v0 = enlargeRadiusToMinWidth(context,geom,ray_org,v0i);
+ const Vec4vf<M> v0 = enlargeRadiusToMinWidth<M>(context,geom,ray_org,v0i);
const Vec3vf<M> center = v0.xyz();
const vfloat<M> radius = v0.w;
diff --git a/thirdparty/embree-aarch64/kernels/geometry/spherei_intersector.h b/thirdparty/embree/kernels/geometry/spherei_intersector.h
index 1146847602..7a0b428117 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/spherei_intersector.h
+++ b/thirdparty/embree/kernels/geometry/spherei_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -11,7 +11,7 @@ namespace embree
{
namespace isa
{
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct SphereMiIntersector1
{
typedef PointMi<M> Primitive;
@@ -25,9 +25,9 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom);
- const vbool<Mx> valid = sphere.template valid<Mx>();
- SphereIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, Mx, filter>(ray, context, sphere.geomID(), sphere.primID()));
+ const vbool<M> valid = sphere.valid();
+ SphereIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, filter>(ray, context, sphere.geomID(), sphere.primID()));
}
static __forceinline bool occluded(const Precalculations& pre,
@@ -38,9 +38,9 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom);
- const vbool<Mx> valid = sphere.template valid<Mx>();
- return SphereIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, Mx, filter>(ray, context, sphere.geomID(), sphere.primID()));
+ const vbool<M> valid = sphere.valid();
+ return SphereIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, filter>(ray, context, sphere.geomID(), sphere.primID()));
}
static __forceinline bool pointQuery(PointQuery* query,
@@ -51,7 +51,7 @@ namespace embree
}
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct SphereMiMBIntersector1
{
typedef PointMi<M> Primitive;
@@ -65,9 +65,9 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom, ray.time());
- const vbool<Mx> valid = sphere.template valid<Mx>();
- SphereIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, Mx, filter>(ray, context, sphere.geomID(), sphere.primID()));
+ const vbool<M> valid = sphere.valid();
+ SphereIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Intersect1EpilogM<M, filter>(ray, context, sphere.geomID(), sphere.primID()));
}
static __forceinline bool occluded(const Precalculations& pre,
@@ -78,9 +78,9 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom, ray.time());
- const vbool<Mx> valid = sphere.template valid<Mx>();
- return SphereIntersector1<Mx>::intersect(
- valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, Mx, filter>(ray, context, sphere.geomID(), sphere.primID()));
+ const vbool<M> valid = sphere.valid();
+ return SphereIntersector1<M>::intersect(
+ valid, ray, context, geom, pre, v0, Occluded1EpilogM<M, filter>(ray, context, sphere.geomID(), sphere.primID()));
}
static __forceinline bool pointQuery(PointQuery* query,
@@ -91,7 +91,7 @@ namespace embree
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct SphereMiIntersectorK
{
typedef PointMi<M> Primitive;
@@ -103,10 +103,10 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom);
- const vbool<Mx> valid = sphere.template valid<Mx>();
- SphereIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = sphere.valid();
+ SphereIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0,
- Intersect1KEpilogM<M, Mx, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
+ Intersect1KEpilogM<M, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
}
static __forceinline bool occluded(
@@ -115,14 +115,14 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom);
- const vbool<Mx> valid = sphere.template valid<Mx>();
- return SphereIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = sphere.valid();
+ return SphereIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0,
- Occluded1KEpilogM<M, Mx, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
+ Occluded1KEpilogM<M, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
}
};
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct SphereMiMBIntersectorK
{
typedef PointMi<M> Primitive;
@@ -134,10 +134,10 @@ namespace embree
STAT3(normal.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom, ray.time()[k]);
- const vbool<Mx> valid = sphere.template valid<Mx>();
- SphereIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = sphere.valid();
+ SphereIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0,
- Intersect1KEpilogM<M, Mx, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
+ Intersect1KEpilogM<M, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
}
static __forceinline bool occluded(
@@ -146,10 +146,10 @@ namespace embree
STAT3(shadow.trav_prims, 1, 1, 1);
const Points* geom = context->scene->get<Points>(sphere.geomID());
Vec4vf<M> v0; sphere.gather(v0, geom, ray.time()[k]);
- const vbool<Mx> valid = sphere.template valid<Mx>();
- return SphereIntersectorK<Mx, K>::intersect(
+ const vbool<M> valid = sphere.valid();
+ return SphereIntersectorK<M, K>::intersect(
valid, ray, k, context, geom, pre, v0,
- Occluded1KEpilogM<M, Mx, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
+ Occluded1KEpilogM<M, K, filter>(ray, k, context, sphere.geomID(), sphere.primID()));
}
};
} // namespace isa
diff --git a/thirdparty/embree-aarch64/kernels/geometry/subdivpatch1.h b/thirdparty/embree/kernels/geometry/subdivpatch1.h
index 94ad46ad87..ae0d4e2616 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/subdivpatch1.h
+++ b/thirdparty/embree/kernels/geometry/subdivpatch1.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/subdivpatch1_intersector.h b/thirdparty/embree/kernels/geometry/subdivpatch1_intersector.h
index 74ec1de258..b4b15a1210 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/subdivpatch1_intersector.h
+++ b/thirdparty/embree/kernels/geometry/subdivpatch1_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -43,28 +43,28 @@ namespace embree
}
/*! Intersect a ray with the primitive. */
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) GridSOAIntersector1::intersect(pre,ray,context,prim,lazy_node);
else processLazyNode(pre,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node) {
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node) {
intersect(This,pre,ray,context,prim,ty,tray,lazy_node);
}
/*! Test if the ray is occluded by the primitive */
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) return GridSOAIntersector1::occluded(pre,ray,context,prim,lazy_node);
else return processLazyNode(pre,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node) {
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node) {
return occluded(This,pre,ray,context,prim,ty,tray,lazy_node);
}
@@ -100,28 +100,28 @@ namespace embree
}
/*! Intersect a ray with the primitive. */
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) GridSOAMBIntersector1::intersect(pre,ray,context,prim,lazy_node);
else processLazyNode(pre,ray,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node) {
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node) {
intersect(This,pre,ray,context,prim,ty,tray,lazy_node);
}
/*! Test if the ray is occluded by the primitive */
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) return GridSOAMBIntersector1::occluded(pre,ray,context,prim,lazy_node);
else return processLazyNode(pre,ray,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node) {
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node) {
return occluded(This,pre,ray,context,prim,ty,tray,lazy_node);
}
@@ -133,7 +133,7 @@ namespace embree
return false;
}
- template<int N, int Nx, bool robust>
+ template<int N, bool robust>
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context, size_t ty0, const Primitive* prim, size_t ty, const TravPointQuery<N> &tquery, size_t& lazy_node) {
return pointQuery(This,query,context,prim,ty,tquery,lazy_node);
}
@@ -166,15 +166,15 @@ namespace embree
else return processLazyNode(pre,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) GridSOAIntersectorK<K>::intersect(pre,ray,k,context,prim,lazy_node);
else processLazyNode(pre,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) return GridSOAIntersectorK<K>::occluded(pre,ray,k,context,prim,lazy_node);
else return processLazyNode(pre,context,prim,lazy_node);
@@ -215,15 +215,15 @@ namespace embree
else return processLazyNode(pre,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) GridSOAMBIntersectorK<K>::intersect(pre,ray,k,context,prim,lazy_node);
else processLazyNode(pre,context,prim,lazy_node);
}
- template<int N, int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<int N, bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, const TravRay<N,robust> &tray, size_t& lazy_node)
{
if (likely(ty == 0)) return GridSOAMBIntersectorK<K>::occluded(pre,ray,k,context,prim,lazy_node);
else return processLazyNode(pre,context,prim,lazy_node);
diff --git a/thirdparty/embree-aarch64/kernels/geometry/subgrid.h b/thirdparty/embree/kernels/geometry/subgrid.h
index 39fa6fb0f0..ce54421cab 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/subgrid.h
+++ b/thirdparty/embree/kernels/geometry/subgrid.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector.h b/thirdparty/embree/kernels/geometry/subgrid_intersector.h
index 045eee4329..ad5fee2e4e 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/subgrid_intersector.h
+++ b/thirdparty/embree/kernels/geometry/subgrid_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -53,14 +53,14 @@ namespace embree
return accel->pointQuery(query, context);
}
- template<int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(mask)],1,1,1);
@@ -75,15 +75,15 @@ namespace embree
}
}
}
- template<int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
while(mask != 0)
{
@@ -155,14 +155,14 @@ namespace embree
return accel->pointQuery(query, context);
}
- template<int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(mask)],1,1,1);
@@ -178,14 +178,14 @@ namespace embree
}
}
- template<int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
while(mask != 0)
{
@@ -326,14 +326,14 @@ namespace embree
return !valid0;
}
- template<int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
while(mask != 0)
{
@@ -346,14 +346,14 @@ namespace embree
}
}
- template<int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
while(mask != 0)
{
@@ -408,7 +408,9 @@ namespace embree
const Vec3vf<K> p2 = vtx[i*4+2];
const Vec3vf<K> p3 = vtx[i*4+3];
STAT3(shadow.trav_prims,1,popcnt(valid0),K);
- if (pre.intersectK(valid0,ray,p0,p1,p2,p3,g,subgrid,i,OccludedKEpilogM<4,K,filter>(valid0,ray,context,subgrid.geomID(),subgrid.primID(),i)))
+ //if (pre.intersectK(valid0,ray,p0,p1,p2,p3,g,subgrid,i,OccludedKEpilogM<4,K,filter>(valid0,ray,context,subgrid.geomID(),subgrid.primID(),i)))
+ if (pre.occludedK(valid0,ray,p0,p1,p2,p3,g,subgrid,i,OccludedKEpilogM<4,K,filter>(valid0,ray,context,subgrid.geomID(),subgrid.primID(),i)))
+
break;
}
return !valid0;
@@ -470,14 +472,14 @@ namespace embree
return !valid0;
}
- template<int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
while(mask != 0)
{
@@ -490,14 +492,14 @@ namespace embree
}
}
- template<int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,dist);
while(mask != 0)
{
@@ -511,8 +513,5 @@ namespace embree
return false;
}
};
-
-
-
}
}
diff --git a/thirdparty/embree/kernels/geometry/subgrid_intersector_moeller.h b/thirdparty/embree/kernels/geometry/subgrid_intersector_moeller.h
new file mode 100644
index 0000000000..64937d34fe
--- /dev/null
+++ b/thirdparty/embree/kernels/geometry/subgrid_intersector_moeller.h
@@ -0,0 +1,382 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#pragma once
+
+#include "subgrid.h"
+#include "quad_intersector_moeller.h"
+
+namespace embree
+{
+ namespace isa
+ {
+
+ /* ----------------------------- */
+ /* -- single ray intersectors -- */
+ /* ----------------------------- */
+
+ template<int M>
+ __forceinline void interpolateUV(MoellerTrumboreHitM<M,UVIdentity<M>> &hit,const GridMesh::Grid &g, const SubGrid& subgrid, const vint<M> &stepX, const vint<M> &stepY)
+ {
+ /* correct U,V interpolation across the entire grid */
+ const vint<M> sx((int)subgrid.x());
+ const vint<M> sy((int)subgrid.y());
+ const vint<M> sxM(sx + stepX);
+ const vint<M> syM(sy + stepY);
+ const float inv_resX = rcp((float)((int)g.resX-1));
+ const float inv_resY = rcp((float)((int)g.resY-1));
+ hit.U = (hit.U + (vfloat<M>)sxM * hit.absDen) * inv_resX;
+ hit.V = (hit.V + (vfloat<M>)syM * hit.absDen) * inv_resY;
+ }
+
+ template<int M, bool filter>
+ struct SubGridQuadMIntersector1MoellerTrumbore;
+
+ template<int M, bool filter>
+ struct SubGridQuadMIntersector1MoellerTrumbore
+ {
+ __forceinline SubGridQuadMIntersector1MoellerTrumbore() {}
+
+ __forceinline SubGridQuadMIntersector1MoellerTrumbore(const Ray& ray, const void* ptr) {}
+
+ __forceinline void intersect(RayHit& ray, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ MoellerTrumboreHitM<M,UVIdentity<M>> hit(mapUV);
+ MoellerTrumboreIntersector1<M> intersector(ray,nullptr);
+ Intersect1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
+
+ /* intersect first triangle */
+ if (intersector.intersect(ray,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.absDen - hit.U;
+ hit.V = hit.absDen - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+ }
+
+ __forceinline bool occluded(Ray& ray, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ MoellerTrumboreHitM<M,UVIdentity<M>> hit(mapUV);
+ MoellerTrumboreIntersector1<M> intersector(ray,nullptr);
+ Occluded1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
+
+ /* intersect first triangle */
+ if (intersector.intersect(ray,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit))
+ return true;
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.absDen - hit.U;
+ hit.V = hit.absDen - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit))
+ return true;
+ }
+ return false;
+ }
+ };
+
+#if defined (__AVX__)
+
+ /*! Intersects 4 quads with 1 ray using AVX */
+ template<bool filter>
+ struct SubGridQuadMIntersector1MoellerTrumbore<4,filter>
+ {
+ __forceinline SubGridQuadMIntersector1MoellerTrumbore() {}
+
+ __forceinline SubGridQuadMIntersector1MoellerTrumbore(const Ray& ray, const void* ptr) {}
+
+ template<typename Epilog>
+ __forceinline bool intersect(Ray& ray, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid& subgrid, const Epilog& epilog) const
+ {
+ const Vec3vf8 vtx0(vfloat8(v0.x,v2.x),vfloat8(v0.y,v2.y),vfloat8(v0.z,v2.z));
+#if !defined(EMBREE_BACKFACE_CULLING)
+ const Vec3vf8 vtx1(vfloat8(v1.x),vfloat8(v1.y),vfloat8(v1.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x),vfloat8(v3.y),vfloat8(v3.z));
+#else
+ const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
+#endif
+ UVIdentity<8> mapUV;
+ MoellerTrumboreHitM<8,UVIdentity<8>> hit(mapUV);
+ MoellerTrumboreIntersector1<8> intersector(ray,nullptr);
+ const vbool8 flags(0,0,0,0,1,1,1,1);
+ if (unlikely(intersector.intersect(ray,vtx0,vtx1,vtx2,mapUV,hit)))
+ {
+ /* correct U,V interpolation across the entire grid */
+ const vfloat8 U = select(flags,hit.absDen - hit.V,hit.U);
+ const vfloat8 V = select(flags,hit.absDen - hit.U,hit.V);
+ hit.U = U;
+ hit.V = V;
+ hit.vNg *= select(flags,vfloat8(-1.0f),vfloat8(1.0f));
+ interpolateUV<8>(hit,g,subgrid,vint<8>(0,1,1,0,0,1,1,0),vint<8>(0,0,1,1,0,0,1,1));
+ if (unlikely(epilog(hit.valid,hit)))
+ return true;
+ }
+ return false;
+ }
+
+ __forceinline bool intersect(RayHit& ray, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ return intersect(ray,v0,v1,v2,v3,g,subgrid,Intersect1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
+ }
+
+ __forceinline bool occluded(Ray& ray, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ return intersect(ray,v0,v1,v2,v3,g,subgrid,Occluded1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
+ }
+ };
+
+#endif
+
+ // ============================================================================================================================
+ // ============================================================================================================================
+ // ============================================================================================================================
+
+
+ /* ----------------------------- */
+ /* -- ray packet intersectors -- */
+ /* ----------------------------- */
+
+ template<int K>
+ __forceinline void interpolateUV(const vbool<K>& valid, MoellerTrumboreHitK<K,UVIdentity<K>> &hit,const GridMesh::Grid &g, const SubGrid& subgrid, const unsigned int i)
+ {
+ /* correct U,V interpolation across the entire grid */
+ const unsigned int sx = subgrid.x() + (unsigned int)(i % 2);
+ const unsigned int sy = subgrid.y() + (unsigned int)(i >>1);
+ const float inv_resX = rcp((float)(int)(g.resX-1));
+ const float inv_resY = rcp((float)(int)(g.resY-1));
+ hit.U = select(valid,(hit.U + vfloat<K>((float)sx) * hit.absDen) * inv_resX,hit.U);
+ hit.V = select(valid,(hit.V + vfloat<K>((float)sy) * hit.absDen) * inv_resY,hit.V);
+ }
+
+ template<int M, int K, bool filter>
+ struct SubGridQuadMIntersectorKMoellerTrumboreBase
+ {
+ __forceinline SubGridQuadMIntersectorKMoellerTrumboreBase(const vbool<K>& valid, const RayK<K>& ray) {}
+
+ template<typename Epilog>
+ __forceinline bool intersectK(const vbool<K>& valid,
+ RayK<K>& ray,
+ const Vec3vf<K>& v0,
+ const Vec3vf<K>& v1,
+ const Vec3vf<K>& v2,
+ const Vec3vf<K>& v3,
+ const GridMesh::Grid &g,
+ const SubGrid &subgrid,
+ const unsigned int i,
+ const Epilog& epilog) const
+ {
+ UVIdentity<K> mapUV;
+ MoellerTrumboreHitK<K,UVIdentity<K>> hit(mapUV);
+ MoellerTrumboreIntersectorK<M,K> intersector;
+
+ const vbool<K> valid0 = intersector.intersectK(valid,ray,v0,v1,v3,mapUV,hit);
+ if (any(valid0))
+ {
+ interpolateUV(valid0,hit,g,subgrid,i);
+ epilog(valid0,hit);
+ }
+ const vbool<K> valid1 = intersector.intersectK(valid,ray,v2,v3,v1,mapUV,hit);
+ if (any(valid1))
+ {
+ hit.U = hit.absDen - hit.U;
+ hit.V = hit.absDen - hit.V;
+ interpolateUV(valid1,hit,g,subgrid,i);
+ epilog(valid1,hit);
+ }
+ return any(valid0|valid1);
+ }
+
+ template<typename Epilog>
+ __forceinline bool occludedK(const vbool<K>& valid,
+ RayK<K>& ray,
+ const Vec3vf<K>& v0,
+ const Vec3vf<K>& v1,
+ const Vec3vf<K>& v2,
+ const Vec3vf<K>& v3,
+ const GridMesh::Grid &g,
+ const SubGrid &subgrid,
+ const unsigned int i,
+ const Epilog& epilog) const
+ {
+ UVIdentity<K> mapUV;
+ MoellerTrumboreHitK<K,UVIdentity<K>> hit(mapUV);
+ MoellerTrumboreIntersectorK<M,K> intersector;
+
+ vbool<K> valid_final = valid;
+ const vbool<K> valid0 = intersector.intersectK(valid,ray,v0,v1,v3,mapUV,hit);
+ if (any(valid0))
+ {
+ interpolateUV(valid0,hit,g,subgrid,i);
+ epilog(valid0,hit);
+ valid_final &= !valid0;
+ }
+ if (none(valid_final)) return true;
+ const vbool<K> valid1 = intersector.intersectK(valid,ray,v2,v3,v1,mapUV,hit);
+ if (any(valid1))
+ {
+ hit.U = hit.absDen - hit.U;
+ hit.V = hit.absDen - hit.V;
+ interpolateUV(valid1,hit,g,subgrid,i);
+ epilog(valid1,hit);
+ valid_final &= !valid1;
+ }
+ return none(valid_final);
+ }
+
+ static __forceinline bool intersect1(RayK<K>& ray,
+ size_t k,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& v1,
+ const Vec3vf<M>& v2,
+ MoellerTrumboreHitM<M,UVIdentity<M>> &hit)
+ {
+ const Vec3vf<M> e1 = v0-v1;
+ const Vec3vf<M> e2 = v2-v0;
+ MoellerTrumboreIntersectorK<8,K> intersector;
+ UVIdentity<M> mapUV;
+ return intersector.intersectEdge(ray,k,v0,e1,e2,mapUV,hit);
+ }
+
+ };
+
+ template<int M, int K, bool filter>
+ struct SubGridQuadMIntersectorKMoellerTrumbore : public SubGridQuadMIntersectorKMoellerTrumboreBase<M,K,filter>
+ {
+ __forceinline SubGridQuadMIntersectorKMoellerTrumbore(const vbool<K>& valid, const RayK<K>& ray)
+ : SubGridQuadMIntersectorKMoellerTrumboreBase<M,K,filter>(valid,ray) {}
+
+ __forceinline void intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ MoellerTrumboreHitM<M,UVIdentity<M>> hit(mapUV);
+ Intersect1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
+ MoellerTrumboreIntersectorK<M,K> intersector;
+ /* intersect first triangle */
+ if (intersector.intersect(ray,k,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,k,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.absDen - hit.U;
+ hit.V = hit.absDen - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+ }
+
+ __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ MoellerTrumboreHitM<M,UVIdentity<M>> hit(mapUV);
+ Occluded1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
+ MoellerTrumboreIntersectorK<M,K> intersector;
+ /* intersect first triangle */
+ if (intersector.intersect(ray,k,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit)) return true;
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,k,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.absDen - hit.U;
+ hit.V = hit.absDen - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit)) return true;
+ }
+ return false;
+ }
+ };
+
+
+#if defined (__AVX__)
+
+ /*! Intersects 4 quads with 1 ray using AVX */
+ template<int K, bool filter>
+ struct SubGridQuadMIntersectorKMoellerTrumbore<4,K,filter> : public SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>
+ {
+ __forceinline SubGridQuadMIntersectorKMoellerTrumbore(const vbool<K>& valid, const RayK<K>& ray)
+ : SubGridQuadMIntersectorKMoellerTrumboreBase<4,K,filter>(valid,ray) {}
+
+ template<typename Epilog>
+ __forceinline bool intersect1(RayK<K>& ray, size_t k,const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
+ const GridMesh::Grid &g, const SubGrid &subgrid, const Epilog& epilog) const
+ {
+ const Vec3vf8 vtx0(vfloat8(v0.x,v2.x),vfloat8(v0.y,v2.y),vfloat8(v0.z,v2.z));
+#if !defined(EMBREE_BACKFACE_CULLING)
+ const Vec3vf8 vtx1(vfloat8(v1.x),vfloat8(v1.y),vfloat8(v1.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x),vfloat8(v3.y),vfloat8(v3.z));
+#else
+ const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
+#endif
+ const vbool8 flags(0,0,0,0,1,1,1,1);
+
+ UVIdentity<8> mapUV;
+ MoellerTrumboreHitM<8,UVIdentity<8>> hit(mapUV);
+ if (SubGridQuadMIntersectorKMoellerTrumboreBase<8,K,filter>::intersect1(ray,k,vtx0,vtx1,vtx2,hit))
+ {
+ const vfloat8 U = select(flags,hit.absDen - hit.V,hit.U);
+ const vfloat8 V = select(flags,hit.absDen - hit.U,hit.V);
+ hit.U = U;
+ hit.V = V;
+ hit.vNg *= select(flags,vfloat8(-1.0f),vfloat8(1.0f));
+ interpolateUV<8>(hit,g,subgrid,vint<8>(0,1,1,0,0,1,1,0),vint<8>(0,0,1,1,0,0,1,1));
+ if (unlikely(epilog(hit.valid,hit)))
+ return true;
+
+ }
+ return false;
+ }
+
+ __forceinline bool intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ return intersect1(ray,k,v0,v1,v2,v3,g,subgrid,Intersect1KEpilogMU<8,K,filter>(ray,k,context,subgrid.geomID(),subgrid.primID()));
+ }
+
+ __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ return intersect1(ray,k,v0,v1,v2,v3,g,subgrid,Occluded1KEpilogMU<8,K,filter>(ray,k,context,subgrid.geomID(),subgrid.primID()));
+ }
+ };
+
+#endif
+
+
+
+ }
+}
diff --git a/thirdparty/embree/kernels/geometry/subgrid_intersector_pluecker.h b/thirdparty/embree/kernels/geometry/subgrid_intersector_pluecker.h
new file mode 100644
index 0000000000..5ded56e1f7
--- /dev/null
+++ b/thirdparty/embree/kernels/geometry/subgrid_intersector_pluecker.h
@@ -0,0 +1,367 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#pragma once
+
+#include "subgrid.h"
+#include "quad_intersector_moeller.h"
+#include "quad_intersector_pluecker.h"
+
+namespace embree
+{
+ namespace isa
+ {
+
+ template<int M>
+ __forceinline void interpolateUV(PlueckerHitM<M,UVIdentity<M>> &hit,const GridMesh::Grid &g, const SubGrid& subgrid, const vint<M> &stepX, const vint<M> &stepY)
+ {
+ /* correct U,V interpolation across the entire grid */
+ const vint<M> sx((int)subgrid.x());
+ const vint<M> sy((int)subgrid.y());
+ const vint<M> sxM(sx + stepX);
+ const vint<M> syM(sy + stepY);
+ const float inv_resX = rcp((float)((int)g.resX-1));
+ const float inv_resY = rcp((float)((int)g.resY-1));
+ hit.U = (hit.U + vfloat<M>(sxM) * hit.UVW) * inv_resX;
+ hit.V = (hit.V + vfloat<M>(syM) * hit.UVW) * inv_resY;
+ }
+
+ template<int M, bool filter>
+ struct SubGridQuadMIntersector1Pluecker;
+
+ template<int M, bool filter>
+ struct SubGridQuadMIntersector1Pluecker
+ {
+ __forceinline SubGridQuadMIntersector1Pluecker() {}
+
+ __forceinline SubGridQuadMIntersector1Pluecker(const Ray& ray, const void* ptr) {}
+
+ __forceinline void intersect(RayHit& ray, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ PlueckerHitM<M,UVIdentity<M>> hit(mapUV);
+ PlueckerIntersector1<M> intersector(ray,nullptr);
+
+ Intersect1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
+
+ /* intersect first triangle */
+ if (intersector.intersect(ray,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.UVW - hit.U;
+ hit.V = hit.UVW - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+ }
+
+ __forceinline bool occluded(Ray& ray, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ PlueckerHitM<M,UVIdentity<M>> hit(mapUV);
+ PlueckerIntersector1<M> intersector(ray,nullptr);
+ Occluded1EpilogMU<M,filter> epilog(ray,context,subgrid.geomID(),subgrid.primID());
+
+ /* intersect first triangle */
+ if (intersector.intersect(ray,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit))
+ return true;
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.UVW - hit.U;
+ hit.V = hit.UVW - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit))
+ return true;
+ }
+ return false;
+ }
+ };
+
+#if defined (__AVX__)
+
+ /*! Intersects 4 quads with 1 ray using AVX */
+ template<bool filter>
+ struct SubGridQuadMIntersector1Pluecker<4,filter>
+ {
+ __forceinline SubGridQuadMIntersector1Pluecker() {}
+
+ __forceinline SubGridQuadMIntersector1Pluecker(const Ray& ray, const void* ptr) {}
+
+ template<typename Epilog>
+ __forceinline bool intersect(Ray& ray, const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid& subgrid, const Epilog& epilog) const
+ {
+ const Vec3vf8 vtx0(vfloat8(v0.x,v2.x),vfloat8(v0.y,v2.y),vfloat8(v0.z,v2.z));
+#if !defined(EMBREE_BACKFACE_CULLING)
+ const Vec3vf8 vtx1(vfloat8(v1.x),vfloat8(v1.y),vfloat8(v1.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x),vfloat8(v3.y),vfloat8(v3.z));
+#else
+ const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
+#endif
+
+ UVIdentity<8> mapUV;
+ PlueckerHitM<8,UVIdentity<8>> hit(mapUV);
+ PlueckerIntersector1<8> intersector(ray,nullptr);
+ const vbool8 flags(0,0,0,0,1,1,1,1);
+ if (unlikely(intersector.intersect(ray,vtx0,vtx1,vtx2,mapUV,hit)))
+ {
+ /* correct U,V interpolation across the entire grid */
+ const vfloat8 U = select(flags,hit.UVW - hit.V,hit.U);
+ const vfloat8 V = select(flags,hit.UVW - hit.U,hit.V);
+ hit.U = U;
+ hit.V = V;
+ hit.vNg *= select(flags,vfloat8(-1.0f),vfloat8(1.0f));
+ interpolateUV<8>(hit,g,subgrid,vint<8>(0,1,1,0,0,1,1,0),vint<8>(0,0,1,1,0,0,1,1));
+ if (unlikely(epilog(hit.valid,hit)))
+ return true;
+ }
+ return false;
+ }
+
+ __forceinline bool intersect(RayHit& ray, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ return intersect(ray,v0,v1,v2,v3,g,subgrid,Intersect1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
+ }
+
+ __forceinline bool occluded(Ray& ray, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
+ const GridMesh::Grid &g, const SubGrid& subgrid) const
+ {
+ return intersect(ray,v0,v1,v2,v3,g,subgrid,Occluded1EpilogMU<8,filter>(ray,context,subgrid.geomID(),subgrid.primID()));
+ }
+ };
+
+#endif
+
+
+ /* ----------------------------- */
+ /* -- ray packet intersectors -- */
+ /* ----------------------------- */
+
+ template<int K>
+ __forceinline void interpolateUV(const vbool<K>& valid, PlueckerHitK<K,UVIdentity<K>> &hit,const GridMesh::Grid &g, const SubGrid& subgrid, const unsigned int i)
+ {
+ /* correct U,V interpolation across the entire grid */
+ const unsigned int sx = subgrid.x() + (unsigned int)(i % 2);
+ const unsigned int sy = subgrid.y() + (unsigned int)(i >>1);
+ const float inv_resX = rcp((float)(int)(g.resX-1));
+ const float inv_resY = rcp((float)(int)(g.resY-1));
+ hit.U = select(valid,(hit.U + vfloat<K>((float)sx) * hit.UVW) * inv_resX,hit.U);
+ hit.V = select(valid,(hit.V + vfloat<K>((float)sy) * hit.UVW) * inv_resY,hit.V);
+ }
+
+ template<int M, int K, bool filter>
+ struct SubGridQuadMIntersectorKPlueckerBase
+ {
+ __forceinline SubGridQuadMIntersectorKPlueckerBase(const vbool<K>& valid, const RayK<K>& ray) {}
+
+ template<typename Epilog>
+ __forceinline bool intersectK(const vbool<K>& valid,
+ RayK<K>& ray,
+ const Vec3vf<K>& v0,
+ const Vec3vf<K>& v1,
+ const Vec3vf<K>& v2,
+ const Vec3vf<K>& v3,
+ const GridMesh::Grid &g,
+ const SubGrid &subgrid,
+ const unsigned int i,
+ const Epilog& epilog) const
+ {
+ UVIdentity<K> mapUV;
+ PlueckerHitK<K,UVIdentity<K>> hit(mapUV);
+ PlueckerIntersectorK<M,K> intersector;
+
+ const vbool<K> valid0 = intersector.intersectK(valid,ray,v0,v1,v3,mapUV,hit);
+ if (any(valid0))
+ {
+ interpolateUV(valid0,hit,g,subgrid,i);
+ epilog(valid0,hit);
+ }
+ const vbool<K> valid1 = intersector.intersectK(valid,ray,v2,v3,v1,mapUV,hit);
+ if (any(valid1))
+ {
+ hit.U = hit.UVW - hit.U;
+ hit.V = hit.UVW - hit.V;
+ interpolateUV(valid1,hit,g,subgrid,i);
+ epilog(valid1,hit);
+ }
+ return any(valid0|valid1);
+ }
+
+ template<typename Epilog>
+ __forceinline bool occludedK(const vbool<K>& valid,
+ RayK<K>& ray,
+ const Vec3vf<K>& v0,
+ const Vec3vf<K>& v1,
+ const Vec3vf<K>& v2,
+ const Vec3vf<K>& v3,
+ const GridMesh::Grid &g,
+ const SubGrid &subgrid,
+ const unsigned int i,
+ const Epilog& epilog) const
+ {
+ UVIdentity<K> mapUV;
+ PlueckerHitK<K,UVIdentity<K>> hit(mapUV);
+ PlueckerIntersectorK<M,K> intersector;
+
+ vbool<K> valid_final = valid;
+ const vbool<K> valid0 = intersector.intersectK(valid,ray,v0,v1,v3,mapUV,hit);
+ if (any(valid0))
+ {
+ interpolateUV(valid0,hit,g,subgrid,i);
+ epilog(valid0,hit);
+ valid_final &= !valid0;
+ }
+ if (none(valid_final)) return true;
+ const vbool<K> valid1 = intersector.intersectK(valid,ray,v2,v3,v1,mapUV,hit);
+ if (any(valid1))
+ {
+ hit.U = hit.UVW - hit.U;
+ hit.V = hit.UVW - hit.V;
+ interpolateUV(valid1,hit,g,subgrid,i);
+ epilog(valid1,hit);
+ valid_final &= !valid1;
+ }
+ return none(valid_final);
+ }
+
+
+ };
+
+
+
+
+ template<int M, int K, bool filter>
+ struct SubGridQuadMIntersectorKPluecker : public SubGridQuadMIntersectorKPlueckerBase<M,K,filter>
+ {
+ __forceinline SubGridQuadMIntersectorKPluecker(const vbool<K>& valid, const RayK<K>& ray)
+ : SubGridQuadMIntersectorKPlueckerBase<M,K,filter>(valid,ray) {}
+
+ __forceinline void intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ PlueckerHitM<M,UVIdentity<M>> hit(mapUV);
+ Intersect1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
+ PlueckerIntersectorK<M,K> intersector;
+
+ /* intersect first triangle */
+ if (intersector.intersect(ray,k,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,k,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.UVW - hit.U;
+ hit.V = hit.UVW - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ epilog(hit.valid,hit);
+ }
+ }
+
+ __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf<M>& v0, const Vec3vf<M>& v1, const Vec3vf<M>& v2, const Vec3vf<M>& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ UVIdentity<M> mapUV;
+ PlueckerHitM<M,UVIdentity<M>> hit(mapUV);
+ Occluded1KEpilogMU<M,K,filter> epilog(ray,k,context,subgrid.geomID(),subgrid.primID());
+ PlueckerIntersectorK<M,K> intersector;
+
+ /* intersect first triangle */
+ if (intersector.intersect(ray,k,v0,v1,v3,mapUV,hit))
+ {
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit)) return true;
+ }
+
+ /* intersect second triangle */
+ if (intersector.intersect(ray,k,v2,v3,v1,mapUV,hit))
+ {
+ hit.U = hit.UVW - hit.U;
+ hit.V = hit.UVW - hit.V;
+ interpolateUV<M>(hit,g,subgrid,vint<M>(0,1,1,0),vint<M>(0,0,1,1));
+ if (epilog(hit.valid,hit)) return true;
+ }
+ return false;
+ }
+ };
+
+
+#if defined (__AVX__)
+
+ /*! Intersects 4 quads with 1 ray using AVX */
+ template<int K, bool filter>
+ struct SubGridQuadMIntersectorKPluecker<4,K,filter> : public SubGridQuadMIntersectorKPlueckerBase<4,K,filter>
+ {
+ __forceinline SubGridQuadMIntersectorKPluecker(const vbool<K>& valid, const RayK<K>& ray)
+ : SubGridQuadMIntersectorKPlueckerBase<4,K,filter>(valid,ray) {}
+
+ template<typename Epilog>
+ __forceinline bool intersect1(RayK<K>& ray, size_t k,const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3,
+ const GridMesh::Grid &g, const SubGrid &subgrid, const Epilog& epilog) const
+ {
+ const Vec3vf8 vtx0(vfloat8(v0.x,v2.x),vfloat8(v0.y,v2.y),vfloat8(v0.z,v2.z));
+#if !defined(EMBREE_BACKFACE_CULLING)
+ const Vec3vf8 vtx1(vfloat8(v1.x),vfloat8(v1.y),vfloat8(v1.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x),vfloat8(v3.y),vfloat8(v3.z));
+#else
+ const Vec3vf8 vtx1(vfloat8(v1.x,v3.x),vfloat8(v1.y,v3.y),vfloat8(v1.z,v3.z));
+ const Vec3vf8 vtx2(vfloat8(v3.x,v1.x),vfloat8(v3.y,v1.y),vfloat8(v3.z,v1.z));
+#endif
+ UVIdentity<8> mapUV;
+ PlueckerHitM<8,UVIdentity<8>> hit(mapUV);
+ PlueckerIntersectorK<8,K> intersector;
+ const vbool8 flags(0,0,0,0,1,1,1,1);
+ if (unlikely(intersector.intersect(ray,k,vtx0,vtx1,vtx2,mapUV,hit)))
+ {
+ /* correct U,V interpolation across the entire grid */
+ const vfloat8 U = select(flags,hit.UVW - hit.V,hit.U);
+ const vfloat8 V = select(flags,hit.UVW - hit.U,hit.V);
+ hit.U = U;
+ hit.V = V;
+ hit.vNg *= select(flags,vfloat8(-1.0f),vfloat8(1.0f));
+ interpolateUV<8>(hit,g,subgrid,vint<8>(0,1,1,0,0,1,1,0),vint<8>(0,0,1,1,0,0,1,1));
+ if (unlikely(epilog(hit.valid,hit)))
+ return true;
+ }
+ return false;
+ }
+
+ __forceinline bool intersect1(RayHitK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ return intersect1(ray,k,v0,v1,v2,v3,g,subgrid,Intersect1KEpilogMU<8,K,filter>(ray,k,context,subgrid.geomID(),subgrid.primID()));
+ }
+
+ __forceinline bool occluded1(RayK<K>& ray, size_t k, IntersectContext* context,
+ const Vec3vf4& v0, const Vec3vf4& v1, const Vec3vf4& v2, const Vec3vf4& v3, const GridMesh::Grid &g, const SubGrid &subgrid) const
+ {
+ return intersect1(ray,k,v0,v1,v2,v3,g,subgrid,Occluded1KEpilogMU<8,K,filter>(ray,k,context,subgrid.geomID(),subgrid.primID()));
+ }
+ };
+#endif
+
+
+ }
+}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/subgrid_mb_intersector.h b/thirdparty/embree/kernels/geometry/subgrid_mb_intersector.h
index 400a88b985..473d656e24 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/subgrid_mb_intersector.h
+++ b/thirdparty/embree/kernels/geometry/subgrid_mb_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -45,13 +45,13 @@ namespace embree
return PrimitivePointQuery1<Primitive>::pointQuery(query, context, subgrid);
}
- template<int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
- vfloat<Nx> dist;
+ vfloat<N> dist;
const float time = prim[i].adjustTime(ray.time());
assert(time <= 1.0f);
@@ -68,15 +68,15 @@ namespace embree
}
}
- template<int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
const float time = prim[i].adjustTime(ray.time());
assert(time <= 1.0f);
- vfloat<Nx> dist;
+ vfloat<N> dist;
size_t mask = isec1.intersect(&prim[i].qnode,tray,time,dist);
while(mask != 0)
{
@@ -132,7 +132,7 @@ namespace embree
const GridMesh::Grid &g = mesh->grid(subgrid.primID());
vfloat<K> ftime;
- const vint<K> itime = mesh->timeSegment(ray.time(), ftime);
+ const vint<K> itime = mesh->timeSegment<K>(ray.time(), ftime);
Vec3vf4 v0,v1,v2,v3; subgrid.gatherMB(v0,v1,v2,v3,context->scene,itime[k],ftime[k]);
pre.intersect1(ray,k,context,v0,v1,v2,v3,g,subgrid);
}
@@ -144,7 +144,7 @@ namespace embree
const GridMesh::Grid &g = mesh->grid(subgrid.primID());
vfloat<K> ftime;
- const vint<K> itime = mesh->timeSegment(ray.time(), ftime);
+ const vint<K> itime = mesh->timeSegment<K>(ray.time(), ftime);
Vec3vf4 v0,v1,v2,v3; subgrid.gatherMB(v0,v1,v2,v3,context->scene,itime[k],ftime[k]);
return pre.occluded1(ray,k,context,v0,v1,v2,v3,g,subgrid);
}
@@ -156,7 +156,7 @@ namespace embree
for (size_t j=0;j<num;j++)
{
size_t m_valid = movemask(prim[j].qnode.validMask());
- const vfloat<K> time = prim[j].adjustTime(ray.time());
+ const vfloat<K> time = prim[j].template adjustTime<K>(ray.time());
vfloat<K> dist;
while(m_valid)
@@ -177,7 +177,7 @@ namespace embree
for (size_t j=0;j<num;j++)
{
size_t m_valid = movemask(prim[j].qnode.validMask());
- const vfloat<K> time = prim[j].adjustTime(ray.time());
+ const vfloat<K> time = prim[j].template adjustTime<K>(ray.time());
vfloat<K> dist;
while(m_valid)
{
@@ -190,10 +190,10 @@ namespace embree
return !valid0;
}
- template<int Nx, bool robust>
- static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline void intersect(const Accel::Intersectors* This, Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
vfloat<N> dist;
@@ -210,10 +210,10 @@ namespace embree
}
}
- template<int Nx, bool robust>
- static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,Nx,robust> &tray, size_t& lazy_node)
+ template<bool robust>
+ static __forceinline bool occluded(const Accel::Intersectors* This, Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t num, const TravRay<N,robust> &tray, size_t& lazy_node)
{
- BVHNQuantizedBaseNodeIntersector1<N,Nx,robust> isec1;
+ BVHNQuantizedBaseNodeIntersector1<N,robust> isec1;
for (size_t i=0;i<num;i++)
{
diff --git a/thirdparty/embree-aarch64/kernels/geometry/triangle.h b/thirdparty/embree/kernels/geometry/triangle.h
index 0dedf6dc4c..24b758ae48 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/triangle.h
+++ b/thirdparty/embree/kernels/geometry/triangle.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector.h b/thirdparty/embree/kernels/geometry/triangle_intersector.h
index 125a42c5fe..2cdff78ec8 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector.h
+++ b/thirdparty/embree/kernels/geometry/triangle_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -11,24 +11,24 @@ namespace embree
namespace isa
{
/*! Intersects M triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMIntersector1Moeller
{
typedef TriangleM<M> Primitive;
- typedef MoellerTrumboreIntersector1<Mx> Precalculations;
+ typedef MoellerTrumboreIntersector1<M> Precalculations;
/*! Intersect a ray with the M triangles and updates the hit. */
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const TriangleM<M>& tri)
{
STAT3(normal.trav_prims,1,1,1);
- pre.intersectEdge(ray,tri.v0,tri.e1,tri.e2,Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ pre.intersectEdge(ray,tri.v0,tri.e1,tri.e2,UVIdentity<M>(),Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of M triangles. */
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const TriangleM<M>& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- return pre.intersectEdge(ray,tri.v0,tri.e1,tri.e2,Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return pre.intersectEdge(ray,tri.v0,tri.e1,tri.e2,UVIdentity<M>(),Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -39,11 +39,11 @@ namespace embree
};
/*! Intersects M triangles with K rays. */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMIntersectorKMoeller
{
typedef TriangleM<M> Primitive;
- typedef MoellerTrumboreIntersectorK<Mx,K> Precalculations;
+ typedef MoellerTrumboreIntersectorK<M,K> Precalculations;
/*! Intersects K rays with M triangles. */
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const TriangleM<M>& tri)
@@ -56,7 +56,7 @@ namespace embree
const Vec3vf<K> p0 = broadcast<vfloat<K>>(tri.v0,i);
const Vec3vf<K> e1 = broadcast<vfloat<K>>(tri.e1,i);
const Vec3vf<K> e2 = broadcast<vfloat<K>>(tri.e2,i);
- pre.intersectEdgeK(valid_i,ray,p0,e1,e2,IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
+ pre.intersectEdgeK(valid_i,ray,p0,e1,e2,UVIdentity<K>(),IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
}
}
@@ -72,7 +72,7 @@ namespace embree
const Vec3vf<K> p0 = broadcast<vfloat<K>>(tri.v0,i);
const Vec3vf<K> e1 = broadcast<vfloat<K>>(tri.e1,i);
const Vec3vf<K> e2 = broadcast<vfloat<K>>(tri.e2,i);
- pre.intersectEdgeK(valid0,ray,p0,e1,e2,OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
+ pre.intersectEdgeK(valid0,ray,p0,e1,e2,UVIdentity<K>(),OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
if (none(valid0)) break;
}
return !valid0;
@@ -82,14 +82,14 @@ namespace embree
static __forceinline void intersect(Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const TriangleM<M>& tri)
{
STAT3(normal.trav_prims,1,1,1);
- pre.intersectEdge(ray,k,tri.v0,tri.e1,tri.e2,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ pre.intersectEdge(ray,k,tri.v0,tri.e1,tri.e2,UVIdentity<M>(),Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const TriangleM<M>& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- return pre.intersectEdge(ray,k,tri.v0,tri.e1,tri.e2,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ return pre.intersectEdge(ray,k,tri.v0,tri.e1,tri.e2,UVIdentity<M>(),Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_moeller.h b/thirdparty/embree/kernels/geometry/triangle_intersector_moeller.h
index b5a8519236..0a42d8f08b 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_moeller.h
+++ b/thirdparty/embree/kernels/geometry/triangle_intersector_moeller.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -18,13 +18,13 @@ namespace embree
{
namespace isa
{
- template<int M>
+ template<int M, typename UVMapper>
struct MoellerTrumboreHitM
{
- __forceinline MoellerTrumboreHitM() {}
+ __forceinline MoellerTrumboreHitM(const UVMapper& mapUV) : mapUV(mapUV) {}
- __forceinline MoellerTrumboreHitM(const vbool<M>& valid, const vfloat<M>& U, const vfloat<M>& V, const vfloat<M>& T, const vfloat<M>& absDen, const Vec3vf<M>& Ng)
- : U(U), V(V), T(T), absDen(absDen), valid(valid), vNg(Ng) {}
+ __forceinline MoellerTrumboreHitM(const vbool<M>& valid, const vfloat<M>& U, const vfloat<M>& V, const vfloat<M>& T, const vfloat<M>& absDen, const Vec3vf<M>& Ng, const UVMapper& mapUV)
+ : U(U), V(V), T(T), absDen(absDen), mapUV(mapUV), valid(valid), vNg(Ng) {}
__forceinline void finalize()
{
@@ -32,8 +32,13 @@ namespace embree
vt = T * rcpAbsDen;
vu = U * rcpAbsDen;
vv = V * rcpAbsDen;
+ mapUV(vu,vv,vNg);
}
+ __forceinline Vec2vf<M> uv() const { return Vec2vf<M>(vu,vv); }
+ __forceinline vfloat<M> t () const { return vt; }
+ __forceinline Vec3vf<M> Ng() const { return vNg; }
+
__forceinline Vec2f uv (const size_t i) const { return Vec2f(vu[i],vv[i]); }
__forceinline float t (const size_t i) const { return vt[i]; }
__forceinline Vec3fa Ng(const size_t i) const { return Vec3fa(vNg.x[i],vNg.y[i],vNg.z[i]); }
@@ -43,6 +48,7 @@ namespace embree
vfloat<M> V;
vfloat<M> T;
vfloat<M> absDen;
+ UVMapper mapUV;
public:
vbool<M> valid;
@@ -52,20 +58,22 @@ namespace embree
Vec3vf<M> vNg;
};
- template<int M>
+ template<int M, bool early_out = true>
struct MoellerTrumboreIntersector1
{
__forceinline MoellerTrumboreIntersector1() {}
__forceinline MoellerTrumboreIntersector1(const Ray& ray, const void* ptr) {}
+ template<typename UVMapper>
__forceinline bool intersect(const vbool<M>& valid0,
Ray& ray,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_e1,
const Vec3vf<M>& tri_e2,
const Vec3vf<M>& tri_Ng,
- MoellerTrumboreHitM<M>& hit) const
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
{
/* calculate denominator */
vbool<M> valid = valid0;
@@ -88,122 +96,160 @@ namespace embree
#else
valid &= (den != vfloat<M>(zero)) & (U >= 0.0f) & (V >= 0.0f) & (U+V<=absDen);
#endif
- if (likely(none(valid))) return false;
+ if (likely(early_out && none(valid))) return false;
/* perform depth test */
const vfloat<M> T = dot(Vec3vf<M>(tri_Ng),C) ^ sgnDen;
valid &= (absDen*vfloat<M>(ray.tnear()) < T) & (T <= absDen*vfloat<M>(ray.tfar));
- if (likely(none(valid))) return false;
-
-
+ if (likely(early_out && none(valid))) return false;
+
/* update hit information */
- new (&hit) MoellerTrumboreHitM<M>(valid,U,V,T,absDen,tri_Ng);
+ new (&hit) MoellerTrumboreHitM<M,UVMapper>(valid,U,V,T,absDen,tri_Ng,mapUV);
return true;
}
+ template<typename UVMapper>
+ __forceinline bool intersectEdge(const vbool<M>& valid,
+ Ray& ray,
+ const Vec3vf<M>& tri_v0,
+ const Vec3vf<M>& tri_e1,
+ const Vec3vf<M>& tri_e2,
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
+ {
+ const Vec3<vfloat<M>> tri_Ng = cross(tri_e2,tri_e1);
+ return intersect(valid,ray,tri_v0,tri_e1,tri_e2,tri_Ng,mapUV,hit);
+ }
+
+ template<typename UVMapper>
__forceinline bool intersectEdge(Ray& ray,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_e1,
const Vec3vf<M>& tri_e2,
- MoellerTrumboreHitM<M>& hit) const
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
{
vbool<M> valid = true;
const Vec3<vfloat<M>> tri_Ng = cross(tri_e2,tri_e1);
- return intersect(valid,ray,tri_v0,tri_e1,tri_e2,tri_Ng,hit);
+ return intersect(valid,ray,tri_v0,tri_e1,tri_e2,tri_Ng,mapUV,hit);
}
-
+
+ template<typename UVMapper>
__forceinline bool intersect(Ray& ray,
const Vec3vf<M>& v0,
const Vec3vf<M>& v1,
const Vec3vf<M>& v2,
- MoellerTrumboreHitM<M>& hit) const
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
{
const Vec3vf<M> e1 = v0-v1;
const Vec3vf<M> e2 = v2-v0;
- return intersectEdge(ray,v0,e1,e2,hit);
+ return intersectEdge(ray,v0,e1,e2,mapUV,hit);
}
+ template<typename UVMapper>
__forceinline bool intersect(const vbool<M>& valid,
Ray& ray,
const Vec3vf<M>& v0,
const Vec3vf<M>& v1,
const Vec3vf<M>& v2,
- MoellerTrumboreHitM<M>& hit) const
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
{
const Vec3vf<M> e1 = v0-v1;
const Vec3vf<M> e2 = v2-v0;
- return intersectEdge(valid,ray,v0,e1,e2,hit);
+ return intersectEdge(valid,ray,v0,e1,e2,mapUV,hit);
}
- template<typename Epilog>
+ template<typename UVMapper, typename Epilog>
__forceinline bool intersectEdge(Ray& ray,
const Vec3vf<M>& v0,
const Vec3vf<M>& e1,
const Vec3vf<M>& e2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
- MoellerTrumboreHitM<M> hit;
- if (likely(intersectEdge(ray,v0,e1,e2,hit))) return epilog(hit.valid,hit);
+ MoellerTrumboreHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersectEdge(ray,v0,e1,e2,mapUV,hit))) return epilog(hit.valid,hit);
return false;
}
- template<typename Epilog>
+ template<typename UVMapper, typename Epilog>
__forceinline bool intersect(Ray& ray,
const Vec3vf<M>& v0,
const Vec3vf<M>& v1,
const Vec3vf<M>& v2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
- MoellerTrumboreHitM<M> hit;
- if (likely(intersect(ray,v0,v1,v2,hit))) return epilog(hit.valid,hit);
+ MoellerTrumboreHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersect(ray,v0,v1,v2,mapUV,hit))) return epilog(hit.valid,hit);
return false;
}
template<typename Epilog>
+ __forceinline bool intersect(Ray& ray,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& v1,
+ const Vec3vf<M>& v2,
+ const Epilog& epilog) const
+ {
+ auto mapUV = UVIdentity<M>();
+ MoellerTrumboreHitM<M,UVIdentity<M>> hit(mapUV);
+ if (likely(intersect(ray,v0,v1,v2,mapUV,hit))) return epilog(hit.valid,hit);
+ return false;
+ }
+
+ template<typename UVMapper, typename Epilog>
__forceinline bool intersect(const vbool<M>& valid,
Ray& ray,
const Vec3vf<M>& v0,
const Vec3vf<M>& v1,
const Vec3vf<M>& v2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
- MoellerTrumboreHitM<M> hit;
- if (likely(intersect(valid,ray,v0,v1,v2,hit))) return epilog(hit.valid,hit);
+ MoellerTrumboreHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersect(valid,ray,v0,v1,v2,mapUV,hit))) return epilog(hit.valid,hit);
return false;
}
};
- template<int K>
+ template<int K, typename UVMapper>
struct MoellerTrumboreHitK
{
- __forceinline MoellerTrumboreHitK(const vfloat<K>& U, const vfloat<K>& V, const vfloat<K>& T, const vfloat<K>& absDen, const Vec3vf<K>& Ng)
- : U(U), V(V), T(T), absDen(absDen), Ng(Ng) {}
+ __forceinline MoellerTrumboreHitK(const UVMapper& mapUV) : mapUV(mapUV) {}
+ __forceinline MoellerTrumboreHitK(const vfloat<K>& U, const vfloat<K>& V, const vfloat<K>& T, const vfloat<K>& absDen, const Vec3vf<K>& Ng, const UVMapper& mapUV)
+ : U(U), V(V), T(T), absDen(absDen), Ng(Ng), mapUV(mapUV) {}
__forceinline std::tuple<vfloat<K>,vfloat<K>,vfloat<K>,Vec3vf<K>> operator() () const
{
const vfloat<K> rcpAbsDen = rcp(absDen);
const vfloat<K> t = T * rcpAbsDen;
- const vfloat<K> u = U * rcpAbsDen;
- const vfloat<K> v = V * rcpAbsDen;
- return std::make_tuple(u,v,t,Ng);
+ vfloat<K> u = U * rcpAbsDen;
+ vfloat<K> v = V * rcpAbsDen;
+ Vec3vf<K> vNg = Ng;
+ mapUV(u,v,vNg);
+ return std::make_tuple(u,v,t,vNg);
}
- private:
- const vfloat<K> U;
- const vfloat<K> V;
+ vfloat<K> U;
+ vfloat<K> V;
const vfloat<K> T;
const vfloat<K> absDen;
const Vec3vf<K> Ng;
+ const UVMapper& mapUV;
};
template<int M, int K>
struct MoellerTrumboreIntersectorK
{
+ __forceinline MoellerTrumboreIntersectorK() {}
__forceinline MoellerTrumboreIntersectorK(const vbool<K>& valid, const RayK<K>& ray) {}
/*! Intersects K rays with one of M triangles. */
- template<typename Epilog>
+ template<typename UVMapper>
__forceinline vbool<K> intersectK(const vbool<K>& valid0,
//RayK<K>& ray,
const Vec3vf<K>& ray_org,
@@ -214,7 +260,8 @@ namespace embree
const Vec3vf<K>& tri_e1,
const Vec3vf<K>& tri_e2,
const Vec3vf<K>& tri_Ng,
- const Epilog& epilog) const
+ const UVMapper& mapUV,
+ MoellerTrumboreHitK<K,UVMapper> &hit) const
{
/* calculate denominator */
vbool<K> valid = valid0;
@@ -254,11 +301,47 @@ namespace embree
#endif
/* calculate hit information */
- MoellerTrumboreHitK<K> hit(U,V,T,absDen,tri_Ng);
- return epilog(valid,hit);
+ new (&hit) MoellerTrumboreHitK<K,UVMapper>(U,V,T,absDen,tri_Ng,mapUV);
+ return valid;
+ }
+
+ /*! Intersects K rays with one of M triangles. */
+ template<typename UVMapper>
+ __forceinline vbool<K> intersectK(const vbool<K>& valid0,
+ RayK<K>& ray,
+ const Vec3vf<K>& tri_v0,
+ const Vec3vf<K>& tri_v1,
+ const Vec3vf<K>& tri_v2,
+ const UVMapper& mapUV,
+ MoellerTrumboreHitK<K,UVMapper> &hit) const
+ {
+ const Vec3vf<K> e1 = tri_v0-tri_v1;
+ const Vec3vf<K> e2 = tri_v2-tri_v0;
+ const Vec3vf<K> Ng = cross(e2,e1);
+ return intersectK(valid0,ray.org,ray.dir,ray.tnear(),ray.tfar,tri_v0,e1,e2,Ng,mapUV,hit);
}
+
/*! Intersects K rays with one of M triangles. */
+ template<typename UVMapper, typename Epilog>
+ __forceinline vbool<K> intersectK(const vbool<K>& valid0,
+ RayK<K>& ray,
+ const Vec3vf<K>& tri_v0,
+ const Vec3vf<K>& tri_v1,
+ const Vec3vf<K>& tri_v2,
+ const UVMapper& mapUV,
+ const Epilog& epilog) const
+ {
+ MoellerTrumboreHitK<K,UVIdentity<K>> hit(mapUV);
+ const Vec3vf<K> e1 = tri_v0-tri_v1;
+ const Vec3vf<K> e2 = tri_v2-tri_v0;
+ const Vec3vf<K> Ng = cross(e2,e1);
+ const vbool<K> valid = intersectK(valid0,ray.org,ray.dir,ray.tnear(),ray.tfar,tri_v0,e1,e2,Ng,mapUV,hit);
+ return epilog(valid,hit);
+ }
+
+
+
template<typename Epilog>
__forceinline vbool<K> intersectK(const vbool<K>& valid0,
RayK<K>& ray,
@@ -267,32 +350,40 @@ namespace embree
const Vec3vf<K>& tri_v2,
const Epilog& epilog) const
{
+ UVIdentity<K> mapUV;
+ MoellerTrumboreHitK<K,UVIdentity<K>> hit(mapUV);
const Vec3vf<K> e1 = tri_v0-tri_v1;
const Vec3vf<K> e2 = tri_v2-tri_v0;
const Vec3vf<K> Ng = cross(e2,e1);
- return intersectK(valid0,ray.org,ray.dir,ray.tnear(),ray.tfar,tri_v0,e1,e2,Ng,epilog);
+ const vbool<K> valid = intersectK(valid0,ray.org,ray.dir,ray.tnear(),ray.tfar,tri_v0,e1,e2,Ng,mapUV,hit);
+ return epilog(valid,hit);
}
/*! Intersects K rays with one of M triangles. */
- template<typename Epilog>
+ template<typename UVMapper, typename Epilog>
__forceinline vbool<K> intersectEdgeK(const vbool<K>& valid0,
RayK<K>& ray,
const Vec3vf<K>& tri_v0,
const Vec3vf<K>& tri_e1,
- const Vec3vf<K>& tri_e2,
+ const Vec3vf<K>& tri_e2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
+ MoellerTrumboreHitK<K,UVIdentity<K>> hit(mapUV);
const Vec3vf<K> tri_Ng = cross(tri_e2,tri_e1);
- return intersectK(valid0,ray.org,ray.dir,ray.tnear(),ray.tfar,tri_v0,tri_e1,tri_e2,tri_Ng,epilog);
+ const vbool<K> valid = intersectK(valid0,ray.org,ray.dir,ray.tnear(),ray.tfar,tri_v0,tri_e1,tri_e2,tri_Ng,mapUV,hit);
+ return epilog(valid,hit);
}
/*! Intersect k'th ray from ray packet of size K with M triangles. */
+ template<typename UVMapper>
__forceinline bool intersectEdge(RayK<K>& ray,
size_t k,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_e1,
const Vec3vf<M>& tri_e2,
- MoellerTrumboreHitM<M>& hit) const
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
{
/* calculate denominator */
typedef Vec3vf<M> Vec3vfM;
@@ -324,19 +415,21 @@ namespace embree
if (likely(none(valid))) return false;
/* calculate hit information */
- new (&hit) MoellerTrumboreHitM<M>(valid,U,V,T,absDen,tri_Ng);
+ new (&hit) MoellerTrumboreHitM<M,UVMapper>(valid,U,V,T,absDen,tri_Ng,mapUV);
return true;
}
+ template<typename UVMapper>
__forceinline bool intersectEdge(RayK<K>& ray,
size_t k,
const BBox<vfloat<M>>& time_range,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_e1,
- const Vec3vf<M>& tri_e2,
- MoellerTrumboreHitM<M>& hit) const
+ const Vec3vf<M>& tri_e2,
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
{
- if (likely(intersect(ray,k,tri_v0,tri_e1,tri_e2,hit)))
+ if (likely(intersect(ray,k,tri_v0,tri_e1,tri_e2,mapUV,hit)))
{
hit.valid &= time_range.lower <= vfloat<M>(ray.time[k]);
hit.valid &= vfloat<M>(ray.time[k]) < time_range.upper;
@@ -345,58 +438,87 @@ namespace embree
return false;
}
- template<typename Epilog>
+ template<typename UVMapper>
+ __forceinline bool intersect(RayK<K>& ray,
+ size_t k,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& v1,
+ const Vec3vf<M>& v2,
+ const UVMapper& mapUV,
+ MoellerTrumboreHitM<M,UVMapper>& hit) const
+ {
+ const Vec3vf<M> e1 = v0-v1;
+ const Vec3vf<M> e2 = v2-v0;
+ return intersectEdge(ray,k,v0,e1,e2,mapUV,hit);
+ }
+
+ template<typename UVMapper, typename Epilog>
__forceinline bool intersectEdge(RayK<K>& ray,
size_t k,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_e1,
- const Vec3vf<M>& tri_e2,
+ const Vec3vf<M>& tri_e2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
- MoellerTrumboreHitM<M> hit;
- if (likely(intersectEdge(ray,k,tri_v0,tri_e1,tri_e2,hit))) return epilog(hit.valid,hit);
+ MoellerTrumboreHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersectEdge(ray,k,tri_v0,tri_e1,tri_e2,mapUV,hit))) return epilog(hit.valid,hit);
return false;
}
- template<typename Epilog>
+ template<typename UVMapper, typename Epilog>
__forceinline bool intersectEdge(RayK<K>& ray,
size_t k,
const BBox<vfloat<M>>& time_range,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_e1,
- const Vec3vf<M>& tri_e2,
+ const Vec3vf<M>& tri_e2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
- MoellerTrumboreHitM<M> hit;
- if (likely(intersectEdge(ray,k,time_range,tri_v0,tri_e1,tri_e2,hit))) return epilog(hit.valid,hit);
+ MoellerTrumboreHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersectEdge(ray,k,time_range,tri_v0,tri_e1,tri_e2,mapUV,hit))) return epilog(hit.valid,hit);
return false;
}
- template<typename Epilog>
+ template<typename UVMapper, typename Epilog>
__forceinline bool intersect(RayK<K>& ray,
size_t k,
const Vec3vf<M>& v0,
const Vec3vf<M>& v1,
- const Vec3vf<M>& v2,
+ const Vec3vf<M>& v2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
const Vec3vf<M> e1 = v0-v1;
const Vec3vf<M> e2 = v2-v0;
- return intersectEdge(ray,k,v0,e1,e2,epilog);
+ return intersectEdge(ray,k,v0,e1,e2,mapUV,epilog);
}
template<typename Epilog>
__forceinline bool intersect(RayK<K>& ray,
size_t k,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& v1,
+ const Vec3vf<M>& v2,
+ const Epilog& epilog) const
+ {
+ return intersect(ray,k,v0,v1,v2,UVIdentity<M>(),epilog);
+ }
+
+ template<typename UVMapper, typename Epilog>
+ __forceinline bool intersect(RayK<K>& ray,
+ size_t k,
const BBox<vfloat<M>>& time_range,
const Vec3vf<M>& v0,
const Vec3vf<M>& v1,
const Vec3vf<M>& v2,
+ const UVMapper& mapUV,
const Epilog& epilog) const
{
const Vec3vf<M> e1 = v0-v1;
const Vec3vf<M> e2 = v2-v0;
- return intersectEdge(ray,k,time_range,v0,e1,e2,epilog);
+ return intersectEdge(ray,k,time_range,v0,e1,e2,mapUV,epilog);
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_pluecker.h b/thirdparty/embree/kernels/geometry/triangle_intersector_pluecker.h
index f1de99d208..8fbefcea88 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_pluecker.h
+++ b/thirdparty/embree/kernels/geometry/triangle_intersector_pluecker.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -22,50 +22,60 @@ namespace embree
template<int M, typename UVMapper>
struct PlueckerHitM
{
- __forceinline PlueckerHitM(const vfloat<M>& U, const vfloat<M>& V, const vfloat<M>& UVW, const vfloat<M>& t, const Vec3vf<M>& Ng, const UVMapper& mapUV)
- : U(U), V(V), UVW(UVW), mapUV(mapUV), vt(t), vNg(Ng) {}
+ __forceinline PlueckerHitM(const UVMapper& mapUV) : mapUV(mapUV) {}
+
+ __forceinline PlueckerHitM(const vbool<M>& valid, const vfloat<M>& U, const vfloat<M>& V, const vfloat<M>& UVW, const vfloat<M>& t, const Vec3vf<M>& Ng, const UVMapper& mapUV)
+ : U(U), V(V), UVW(UVW), mapUV(mapUV), valid(valid), vt(t), vNg(Ng) {}
__forceinline void finalize()
{
const vbool<M> invalid = abs(UVW) < min_rcp_input;
const vfloat<M> rcpUVW = select(invalid,vfloat<M>(0.0f),rcp(UVW));
- vu = U * rcpUVW;
- vv = V * rcpUVW;
- mapUV(vu,vv);
+ vu = min(U * rcpUVW,1.0f);
+ vv = min(V * rcpUVW,1.0f);
+ mapUV(vu,vv,vNg);
}
-
+
+ __forceinline Vec2vf<M> uv() const { return Vec2vf<M>(vu,vv); }
+ __forceinline vfloat<M> t () const { return vt; }
+ __forceinline Vec3vf<M> Ng() const { return vNg; }
+
__forceinline Vec2f uv (const size_t i) const { return Vec2f(vu[i],vv[i]); }
__forceinline float t (const size_t i) const { return vt[i]; }
__forceinline Vec3fa Ng(const size_t i) const { return Vec3fa(vNg.x[i],vNg.y[i],vNg.z[i]); }
- private:
- const vfloat<M> U;
- const vfloat<M> V;
- const vfloat<M> UVW;
+ public:
+ vfloat<M> U;
+ vfloat<M> V;
+ vfloat<M> UVW;
const UVMapper& mapUV;
public:
+ vbool<M> valid;
vfloat<M> vu;
vfloat<M> vv;
vfloat<M> vt;
Vec3vf<M> vNg;
};
- template<int M>
+ template<int M, bool early_out = true>
struct PlueckerIntersector1
{
__forceinline PlueckerIntersector1() {}
__forceinline PlueckerIntersector1(const Ray& ray, const void* ptr) {}
- template<typename UVMapper, typename Epilog>
- __forceinline bool intersect(Ray& ray,
+ template<typename UVMapper>
+ __forceinline bool intersect(const vbool<M>& valid0,
+ Ray& ray,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_v1,
const Vec3vf<M>& tri_v2,
const UVMapper& mapUV,
- const Epilog& epilog) const
+ PlueckerHitM<M,UVMapper>& hit) const
{
+ vbool<M> valid = valid0;
+
/* calculate vertices relative to ray origin */
const Vec3vf<M> O = Vec3vf<M>((Vec3fa)ray.org);
const Vec3vf<M> D = Vec3vf<M>((Vec3fa)ray.dir);
@@ -85,11 +95,11 @@ namespace embree
const vfloat<M> UVW = U+V+W;
const vfloat<M> eps = float(ulp)*abs(UVW);
#if defined(EMBREE_BACKFACE_CULLING)
- vbool<M> valid = max(U,V,W) <= eps;
+ valid &= max(U,V,W) <= eps;
#else
- vbool<M> valid = (min(U,V,W) >= -eps) | (max(U,V,W) <= eps);
+ valid &= (min(U,V,W) >= -eps) | (max(U,V,W) <= eps);
#endif
- if (unlikely(none(valid))) return false;
+ if (unlikely(early_out && none(valid))) return false;
/* calculate geometry normal and denominator */
const Vec3vf<M> Ng = stable_triangle_normal(e0,e1,e2);
@@ -100,33 +110,123 @@ namespace embree
const vfloat<M> t = rcp(den)*T;
valid &= vfloat<M>(ray.tnear()) <= t & t <= vfloat<M>(ray.tfar);
valid &= den != vfloat<M>(zero);
- if (unlikely(none(valid))) return false;
+ if (unlikely(early_out && none(valid))) return false;
/* update hit information */
- PlueckerHitM<M,UVMapper> hit(U,V,UVW,t,Ng,mapUV);
- return epilog(valid,hit);
+ new (&hit) PlueckerHitM<M,UVMapper>(valid,U,V,UVW,t,Ng,mapUV);
+ return true;
+ }
+
+ template<typename UVMapper>
+ __forceinline bool intersectEdge(const vbool<M>& valid,
+ Ray& ray,
+ const Vec3vf<M>& tri_v0,
+ const Vec3vf<M>& tri_v1,
+ const Vec3vf<M>& tri_v2,
+ const UVMapper& mapUV,
+ PlueckerHitM<M,UVMapper>& hit) const
+ {
+ return intersect(valid,ray,tri_v0,tri_v1,tri_v2,mapUV,hit);
+ }
+
+ template<typename UVMapper>
+ __forceinline bool intersectEdge(Ray& ray,
+ const Vec3vf<M>& tri_v0,
+ const Vec3vf<M>& tri_v1,
+ const Vec3vf<M>& tri_v2,
+ const UVMapper& mapUV,
+ PlueckerHitM<M,UVMapper>& hit) const
+ {
+ vbool<M> valid = true;
+ return intersect(valid,ray,tri_v0,tri_v1,tri_v2,mapUV,hit);
+ }
+
+ template<typename UVMapper>
+ __forceinline bool intersect(Ray& ray,
+ const Vec3vf<M>& tri_v0,
+ const Vec3vf<M>& tri_v1,
+ const Vec3vf<M>& tri_v2,
+ const UVMapper& mapUV,
+ PlueckerHitM<M,UVMapper>& hit) const
+ {
+ return intersectEdge(ray,tri_v0,tri_v1,tri_v2,mapUV,hit);
+ }
+
+ template<typename UVMapper, typename Epilog>
+ __forceinline bool intersectEdge(Ray& ray,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& e1,
+ const Vec3vf<M>& e2,
+ const UVMapper& mapUV,
+ const Epilog& epilog) const
+ {
+ PlueckerHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersectEdge(ray,v0,e1,e2,mapUV,hit))) return epilog(hit.valid,hit);
+ return false;
+ }
+
+ template<typename UVMapper, typename Epilog>
+ __forceinline bool intersect(Ray& ray,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& v1,
+ const Vec3vf<M>& v2,
+ const UVMapper& mapUV,
+ const Epilog& epilog) const
+ {
+ PlueckerHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersect(ray,v0,v1,v2,mapUV,hit))) return epilog(hit.valid,hit);
+ return false;
}
+
+ template<typename Epilog>
+ __forceinline bool intersect(Ray& ray,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& v1,
+ const Vec3vf<M>& v2,
+ const Epilog& epilog) const
+ {
+ auto mapUV = UVIdentity<M>();
+ PlueckerHitM<M,UVIdentity<M>> hit(mapUV);
+ if (likely(intersect(ray,v0,v1,v2,mapUV,hit))) return epilog(hit.valid,hit);
+ return false;
+ }
+
+ template<typename UVMapper, typename Epilog>
+ __forceinline bool intersect(const vbool<M>& valid,
+ Ray& ray,
+ const Vec3vf<M>& v0,
+ const Vec3vf<M>& v1,
+ const Vec3vf<M>& v2,
+ const UVMapper& mapUV,
+ const Epilog& epilog) const
+ {
+ PlueckerHitM<M,UVMapper> hit(mapUV);
+ if (likely(intersect(valid,ray,v0,v1,v2,mapUV,hit))) return epilog(hit.valid,hit);
+ return false;
+ }
+
};
template<int K, typename UVMapper>
struct PlueckerHitK
{
+ __forceinline PlueckerHitK(const UVMapper& mapUV) : mapUV(mapUV) {}
+
__forceinline PlueckerHitK(const vfloat<K>& U, const vfloat<K>& V, const vfloat<K>& UVW, const vfloat<K>& t, const Vec3vf<K>& Ng, const UVMapper& mapUV)
- : U(U), V(V), UVW(UVW), t(t), Ng(Ng), mapUV(mapUV) {}
+ : U(U), V(V), UVW(UVW), t(t), Ng(Ng), mapUV(mapUV) {}
__forceinline std::tuple<vfloat<K>,vfloat<K>,vfloat<K>,Vec3vf<K>> operator() () const
{
const vbool<K> invalid = abs(UVW) < min_rcp_input;
const vfloat<K> rcpUVW = select(invalid,vfloat<K>(0.0f),rcp(UVW));
- vfloat<K> u = U * rcpUVW;
- vfloat<K> v = V * rcpUVW;
- mapUV(u,v);
- return std::make_tuple(u,v,t,Ng);
+ vfloat<K> u = min(U * rcpUVW,1.0f);
+ vfloat<K> v = min(V * rcpUVW,1.0f);
+ Vec3vf<K> vNg = Ng;
+ mapUV(u,v,vNg);
+ return std::make_tuple(u,v,t,vNg);
}
-
- private:
- const vfloat<K> U;
- const vfloat<K> V;
+ vfloat<K> U;
+ vfloat<K> V;
const vfloat<K> UVW;
const vfloat<K> t;
const Vec3vf<K> Ng;
@@ -136,17 +236,18 @@ namespace embree
template<int M, int K>
struct PlueckerIntersectorK
{
+ __forceinline PlueckerIntersectorK() {}
__forceinline PlueckerIntersectorK(const vbool<K>& valid, const RayK<K>& ray) {}
/*! Intersects K rays with one of M triangles. */
- template<typename UVMapper, typename Epilog>
+ template<typename UVMapper>
__forceinline vbool<K> intersectK(const vbool<K>& valid0,
- RayK<K>& ray,
- const Vec3vf<K>& tri_v0,
- const Vec3vf<K>& tri_v1,
- const Vec3vf<K>& tri_v2,
- const UVMapper& mapUV,
- const Epilog& epilog) const
+ RayK<K>& ray,
+ const Vec3vf<K>& tri_v0,
+ const Vec3vf<K>& tri_v1,
+ const Vec3vf<K>& tri_v2,
+ const UVMapper& mapUV,
+ PlueckerHitK<K,UVMapper> &hit) const
{
/* calculate vertices relative to ray origin */
vbool<K> valid = valid0;
@@ -172,7 +273,7 @@ namespace embree
#else
valid &= (min(U,V,W) >= -eps) | (max(U,V,W) <= eps);
#endif
- if (unlikely(none(valid))) return false;
+ if (unlikely(none(valid))) return valid;
/* calculate geometry normal and denominator */
const Vec3vf<K> Ng = stable_triangle_normal(e0,e1,e2);
@@ -183,21 +284,49 @@ namespace embree
const vfloat<K> t = rcp(den)*T;
valid &= ray.tnear() <= t & t <= ray.tfar;
valid &= den != vfloat<K>(zero);
- if (unlikely(none(valid))) return false;
+ if (unlikely(none(valid))) return valid;
/* calculate hit information */
- PlueckerHitK<K,UVMapper> hit(U,V,UVW,t,Ng,mapUV);
- return epilog(valid,hit);
+ new (&hit) PlueckerHitK<K,UVMapper>(U,V,UVW,t,Ng,mapUV);
+ return valid;
+ }
+
+ template<typename Epilog>
+ __forceinline vbool<K> intersectK(const vbool<K>& valid0,
+ RayK<K>& ray,
+ const Vec3vf<K>& tri_v0,
+ const Vec3vf<K>& tri_v1,
+ const Vec3vf<K>& tri_v2,
+ const Epilog& epilog) const
+ {
+ UVIdentity<K> mapUV;
+ PlueckerHitK<K,UVIdentity<K>> hit(mapUV);
+ const vbool<K> valid = intersectK(valid0,ray,tri_v0,tri_v1,tri_v2,mapUV,hit);
+ return epilog(valid,hit);
}
- /*! Intersect k'th ray from ray packet of size K with M triangles. */
template<typename UVMapper, typename Epilog>
+ __forceinline vbool<K> intersectK(const vbool<K>& valid0,
+ RayK<K>& ray,
+ const Vec3vf<K>& tri_v0,
+ const Vec3vf<K>& tri_v1,
+ const Vec3vf<K>& tri_v2,
+ const UVMapper& mapUV,
+ const Epilog& epilog) const
+ {
+ PlueckerHitK<K,UVMapper> hit(mapUV);
+ const vbool<K> valid = intersectK(valid0,ray,tri_v0,tri_v1,tri_v2,mapUV,hit);
+ return epilog(valid,hit);
+ }
+
+ /*! Intersect k'th ray from ray packet of size K with M triangles. */
+ template<typename UVMapper>
__forceinline bool intersect(RayK<K>& ray, size_t k,
const Vec3vf<M>& tri_v0,
const Vec3vf<M>& tri_v1,
const Vec3vf<M>& tri_v2,
const UVMapper& mapUV,
- const Epilog& epilog) const
+ PlueckerHitM<M,UVMapper> &hit) const
{
/* calculate vertices relative to ray origin */
const Vec3vf<M> O = broadcast<vfloat<M>>(ray.org,k);
@@ -211,10 +340,12 @@ namespace embree
const Vec3vf<M> e1 = v0-v1;
const Vec3vf<M> e2 = v1-v2;
+
/* perform edge tests */
const vfloat<M> U = dot(cross(e0,v2+v0),D);
const vfloat<M> V = dot(cross(e1,v0+v1),D);
const vfloat<M> W = dot(cross(e2,v1+v2),D);
+
const vfloat<M> UVW = U+V+W;
const vfloat<M> eps = float(ulp)*abs(UVW);
#if defined(EMBREE_BACKFACE_CULLING)
@@ -239,9 +370,38 @@ namespace embree
if (unlikely(none(valid))) return false;
/* update hit information */
- PlueckerHitM<M,UVMapper> hit(U,V,UVW,t,Ng,mapUV);
- return epilog(valid,hit);
+ new (&hit) PlueckerHitM<M,UVMapper>(valid,U,V,UVW,t,Ng,mapUV);
+ return true;
}
+
+ template<typename UVMapper, typename Epilog>
+ __forceinline bool intersect(RayK<K>& ray, size_t k,
+ const Vec3vf<M>& tri_v0,
+ const Vec3vf<M>& tri_v1,
+ const Vec3vf<M>& tri_v2,
+ const UVMapper& mapUV,
+ const Epilog& epilog) const
+ {
+ PlueckerHitM<M,UVMapper> hit(mapUV);
+ if (intersect(ray,k,tri_v0,tri_v1,tri_v2,mapUV,hit))
+ return epilog(hit.valid,hit);
+ return false;
+ }
+
+ template<typename Epilog>
+ __forceinline bool intersect(RayK<K>& ray, size_t k,
+ const Vec3vf<M>& tri_v0,
+ const Vec3vf<M>& tri_v1,
+ const Vec3vf<M>& tri_v2,
+ const Epilog& epilog) const
+ {
+ UVIdentity<M> mapUV;
+ PlueckerHitM<M,UVIdentity<M>> hit(mapUV);
+ if (intersect(ray,k,tri_v0,tri_v1,tri_v2,mapUV,hit))
+ return epilog(hit.valid,hit);
+ return false;
+ }
+
};
}
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_woop.h b/thirdparty/embree/kernels/geometry/triangle_intersector_woop.h
index 63e649d8fb..f05dcc4537 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/triangle_intersector_woop.h
+++ b/thirdparty/embree/kernels/geometry/triangle_intersector_woop.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/triangle_triangle_intersector.h b/thirdparty/embree/kernels/geometry/triangle_triangle_intersector.h
index 91b35c36f3..50106bcc16 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/triangle_triangle_intersector.h
+++ b/thirdparty/embree/kernels/geometry/triangle_triangle_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "primitive.h"
diff --git a/thirdparty/embree-aarch64/kernels/geometry/trianglei.h b/thirdparty/embree/kernels/geometry/trianglei.h
index 4f3118cc0c..6aad48a5ef 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/trianglei.h
+++ b/thirdparty/embree/kernels/geometry/trianglei.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -343,7 +343,7 @@ namespace embree
const TriangleMesh* mesh = scene->get<TriangleMesh>(geomID(index));
vfloat<K> ftime;
- const vint<K> itime = mesh->timeSegment(time, ftime);
+ const vint<K> itime = mesh->timeSegment<K>(time, ftime);
const size_t first = bsf(movemask(valid));
if (likely(all(valid,itime[first] == itime)))
@@ -352,9 +352,9 @@ namespace embree
p1 = getVertex<1>(index, scene, itime[first], ftime);
p2 = getVertex<2>(index, scene, itime[first], ftime);
} else {
- p0 = getVertex<0>(valid, index, scene, itime, ftime);
- p1 = getVertex<1>(valid, index, scene, itime, ftime);
- p2 = getVertex<2>(valid, index, scene, itime, ftime);
+ p0 = getVertex<0,K>(valid, index, scene, itime, ftime);
+ p1 = getVertex<1,K>(valid, index, scene, itime, ftime);
+ p2 = getVertex<2,K>(valid, index, scene, itime, ftime);
}
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/trianglei_intersector.h b/thirdparty/embree/kernels/geometry/trianglei_intersector.h
index e2f106a62c..f7deb9e72d 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/trianglei_intersector.h
+++ b/thirdparty/embree/kernels/geometry/trianglei_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -12,24 +12,24 @@ namespace embree
namespace isa
{
/*! Intersects M triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMiIntersector1Moeller
{
typedef TriangleMi<M> Primitive;
- typedef MoellerTrumboreIntersector1<Mx> Precalculations;
+ typedef MoellerTrumboreIntersector1<M> Precalculations;
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- pre.intersect(ray,v0,v1,v2,/*UVIdentity<Mx>(),*/Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,v0,v1,v2,Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- return pre.intersect(ray,v0,v1,v2,/*UVIdentity<Mx>(),*/Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,v0,v1,v2,Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -39,11 +39,11 @@ namespace embree
};
/*! Intersects M triangles with K rays */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMiIntersectorKMoeller
{
typedef TriangleMi<M> Primitive;
- typedef MoellerTrumboreIntersectorK<Mx,K> Precalculations;
+ typedef MoellerTrumboreIntersectorK<M,K> Precalculations;
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const Primitive& tri)
{
@@ -55,7 +55,7 @@ namespace embree
const Vec3vf<K> v0 = tri.template getVertex<0>(i,scene);
const Vec3vf<K> v1 = tri.template getVertex<1>(i,scene);
const Vec3vf<K> v2 = tri.template getVertex<2>(i,scene);
- pre.intersectK(valid_i,ray,v0,v1,v2,/*UVIdentity<K>(),*/IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
+ pre.intersectK(valid_i,ray,v0,v1,v2,IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
}
}
@@ -71,7 +71,7 @@ namespace embree
const Vec3vf<K> v0 = tri.template getVertex<0>(i,scene);
const Vec3vf<K> v1 = tri.template getVertex<1>(i,scene);
const Vec3vf<K> v2 = tri.template getVertex<2>(i,scene);
- pre.intersectK(valid0,ray,v0,v1,v2,/*UVIdentity<K>(),*/OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
+ pre.intersectK(valid0,ray,v0,v1,v2,OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
if (none(valid0)) break;
}
return !valid0;
@@ -81,36 +81,36 @@ namespace embree
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- pre.intersect(ray,k,v0,v1,v2,/*UVIdentity<Mx>(),*/Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,k,v0,v1,v2,Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- return pre.intersect(ray,k,v0,v1,v2,/*UVIdentity<Mx>(),*/Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,k,v0,v1,v2,Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
/*! Intersects M triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMiIntersector1Pluecker
{
typedef TriangleMi<M> Primitive;
- typedef PlueckerIntersector1<Mx> Precalculations;
+ typedef PlueckerIntersector1<M> Precalculations;
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- pre.intersect(ray,v0,v1,v2,UVIdentity<Mx>(),Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,v0,v1,v2,Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- return pre.intersect(ray,v0,v1,v2,UVIdentity<Mx>(),Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,v0,v1,v2,Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -120,11 +120,11 @@ namespace embree
};
/*! Intersects M triangles with K rays */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMiIntersectorKPluecker
{
typedef TriangleMi<M> Primitive;
- typedef PlueckerIntersectorK<Mx,K> Precalculations;
+ typedef PlueckerIntersectorK<M,K> Precalculations;
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const Primitive& tri)
{
@@ -136,7 +136,7 @@ namespace embree
const Vec3vf<K> v0 = tri.template getVertex<0>(i,scene);
const Vec3vf<K> v1 = tri.template getVertex<1>(i,scene);
const Vec3vf<K> v2 = tri.template getVertex<2>(i,scene);
- pre.intersectK(valid_i,ray,v0,v1,v2,UVIdentity<K>(),IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
+ pre.intersectK(valid_i,ray,v0,v1,v2,IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
}
}
@@ -152,7 +152,7 @@ namespace embree
const Vec3vf<K> v0 = tri.template getVertex<0>(i,scene);
const Vec3vf<K> v1 = tri.template getVertex<1>(i,scene);
const Vec3vf<K> v2 = tri.template getVertex<2>(i,scene);
- pre.intersectK(valid0,ray,v0,v1,v2,UVIdentity<K>(),OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
+ pre.intersectK(valid0,ray,v0,v1,v2,OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
if (none(valid0)) break;
}
return !valid0;
@@ -162,30 +162,30 @@ namespace embree
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- pre.intersect(ray,k,v0,v1,v2,UVIdentity<Mx>(),Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,k,v0,v1,v2,Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0, v1, v2; tri.gather(v0,v1,v2,context->scene);
- return pre.intersect(ray,k,v0,v1,v2,UVIdentity<Mx>(),Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,k,v0,v1,v2,Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
/*! Intersects M motion blur triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMiMBIntersector1Moeller
{
typedef TriangleMi<M> Primitive;
- typedef MoellerTrumboreIntersector1<Mx> Precalculations;
+ typedef MoellerTrumboreIntersector1<M> Precalculations;
/*! Intersect a ray with the M triangles and updates the hit. */
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time());
- pre.intersect(ray,v0,v1,v2,/*UVIdentity<Mx>(),*/Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,v0,v1,v2,Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of M triangles. */
@@ -193,7 +193,7 @@ namespace embree
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time());
- return pre.intersect(ray,v0,v1,v2,/*UVIdentity<Mx>(),*/Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,v0,v1,v2,Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -203,11 +203,11 @@ namespace embree
};
/*! Intersects M motion blur triangles with K rays. */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMiMBIntersectorKMoeller
{
typedef TriangleMi<M> Primitive;
- typedef MoellerTrumboreIntersectorK<Mx,K> Precalculations;
+ typedef MoellerTrumboreIntersectorK<M,K> Precalculations;
/*! Intersects K rays with M triangles. */
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const TriangleMi<M>& tri)
@@ -216,8 +216,8 @@ namespace embree
{
if (!tri.valid(i)) break;
STAT3(normal.trav_prims,1,popcnt(valid_i),K);
- Vec3vf<K> v0,v1,v2; tri.gather(valid_i,v0,v1,v2,i,context->scene,ray.time());
- pre.intersectK(valid_i,ray,v0,v1,v2,/*UVIdentity<K>(),*/IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
+ Vec3vf<K> v0,v1,v2; tri.template gather<K>(valid_i,v0,v1,v2,i,context->scene,ray.time());
+ pre.intersectK(valid_i,ray,v0,v1,v2,IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
}
}
@@ -229,8 +229,8 @@ namespace embree
{
if (!tri.valid(i)) break;
STAT3(shadow.trav_prims,1,popcnt(valid0),K);
- Vec3vf<K> v0,v1,v2; tri.gather(valid_i,v0,v1,v2,i,context->scene,ray.time());
- pre.intersectK(valid0,ray,v0,v1,v2,/*UVIdentity<K>(),*/OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
+ Vec3vf<K> v0,v1,v2; tri.template gather<K>(valid_i,v0,v1,v2,i,context->scene,ray.time());
+ pre.intersectK(valid0,ray,v0,v1,v2,OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
if (none(valid0)) break;
}
return !valid0;
@@ -241,7 +241,7 @@ namespace embree
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time()[k]);
- pre.intersect(ray,k,v0,v1,v2,/*UVIdentity<Mx>(),*/Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,k,v0,v1,v2,Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
@@ -249,23 +249,23 @@ namespace embree
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time()[k]);
- return pre.intersect(ray,k,v0,v1,v2,/*UVIdentity<Mx>(),*/Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,k,v0,v1,v2,Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
/*! Intersects M motion blur triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMiMBIntersector1Pluecker
{
typedef TriangleMi<M> Primitive;
- typedef PlueckerIntersector1<Mx> Precalculations;
+ typedef PlueckerIntersector1<M> Precalculations;
/*! Intersect a ray with the M triangles and updates the hit. */
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time());
- pre.intersect(ray,v0,v1,v2,UVIdentity<Mx>(),Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,v0,v1,v2,Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of M triangles. */
@@ -273,7 +273,7 @@ namespace embree
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time());
- return pre.intersect(ray,v0,v1,v2,UVIdentity<Mx>(),Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,v0,v1,v2,Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -283,11 +283,11 @@ namespace embree
};
/*! Intersects M motion blur triangles with K rays. */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMiMBIntersectorKPluecker
{
typedef TriangleMi<M> Primitive;
- typedef PlueckerIntersectorK<Mx,K> Precalculations;
+ typedef PlueckerIntersectorK<M,K> Precalculations;
/*! Intersects K rays with M triangles. */
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const TriangleMi<M>& tri)
@@ -296,8 +296,8 @@ namespace embree
{
if (!tri.valid(i)) break;
STAT3(normal.trav_prims,1,popcnt(valid_i),K);
- Vec3vf<K> v0,v1,v2; tri.gather(valid_i,v0,v1,v2,i,context->scene,ray.time());
- pre.intersectK(valid_i,ray,v0,v1,v2,UVIdentity<K>(),IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
+ Vec3vf<K> v0,v1,v2; tri.template gather<K>(valid_i,v0,v1,v2,i,context->scene,ray.time());
+ pre.intersectK(valid_i,ray,v0,v1,v2,IntersectKEpilogM<M,K,filter>(ray,context,tri.geomID(),tri.primID(),i));
}
}
@@ -309,8 +309,8 @@ namespace embree
{
if (!tri.valid(i)) break;
STAT3(shadow.trav_prims,1,popcnt(valid0),K);
- Vec3vf<K> v0,v1,v2; tri.gather(valid_i,v0,v1,v2,i,context->scene,ray.time());
- pre.intersectK(valid0,ray,v0,v1,v2,UVIdentity<K>(),OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
+ Vec3vf<K> v0,v1,v2; tri.template gather<K>(valid_i,v0,v1,v2,i,context->scene,ray.time());
+ pre.intersectK(valid0,ray,v0,v1,v2,OccludedKEpilogM<M,K,filter>(valid0,ray,context,tri.geomID(),tri.primID(),i));
if (none(valid0)) break;
}
return !valid0;
@@ -321,7 +321,7 @@ namespace embree
{
STAT3(normal.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time()[k]);
- pre.intersect(ray,k,v0,v1,v2,UVIdentity<Mx>(),Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,k,v0,v1,v2,Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
@@ -329,7 +329,7 @@ namespace embree
{
STAT3(shadow.trav_prims,1,1,1);
Vec3vf<M> v0,v1,v2; tri.gather(v0,v1,v2,context->scene,ray.time()[k]);
- return pre.intersect(ray,k,v0,v1,v2,UVIdentity<Mx>(),Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,k,v0,v1,v2,Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/trianglev.h b/thirdparty/embree/kernels/geometry/trianglev.h
index 19af389e73..cd94756b9e 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/trianglev.h
+++ b/thirdparty/embree/kernels/geometry/trianglev.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/trianglev_intersector.h b/thirdparty/embree/kernels/geometry/trianglev_intersector.h
index 6af0d5a11c..3abb7f8e32 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/trianglev_intersector.h
+++ b/thirdparty/embree/kernels/geometry/trianglev_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -13,24 +13,24 @@ namespace embree
namespace isa
{
/*! Intersects M triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMvIntersector1Moeller
{
typedef TriangleMv<M> Primitive;
- typedef MoellerTrumboreIntersector1<Mx> Precalculations;
+ typedef MoellerTrumboreIntersector1<M> Precalculations;
/*! Intersect a ray with M triangles and updates the hit. */
static __forceinline void intersect(Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
- pre.intersect(ray,tri.v0,tri.v1,tri.v2,/*UVIdentity<Mx>(),*/Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,tri.v0,tri.v1,tri.v2,/*UVIdentity<M>(),*/Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- return pre.intersect(ray,tri.v0,tri.v1,tri.v2,/*UVIdentity<Mx>(),*/Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,tri.v0,tri.v1,tri.v2,/*UVIdentity<M>(),*/Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -40,25 +40,25 @@ namespace embree
};
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMvIntersector1Woop
{
typedef TriangleMv<M> Primitive;
- typedef WoopIntersector1<Mx> intersec;
+ typedef WoopIntersector1<M> intersec;
typedef WoopPrecalculations1<M> Precalculations;
/*! Intersect a ray with M triangles and updates the hit. */
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
- intersec::intersect(ray,pre,tri.v0,tri.v1,tri.v2,Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ intersec::intersect(ray,pre,tri.v0,tri.v1,tri.v2,Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- return intersec::intersect(ray,pre,tri.v0,tri.v1,tri.v2,Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return intersec::intersect(ray,pre,tri.v0,tri.v1,tri.v2,Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -69,11 +69,11 @@ namespace embree
/*! Intersects M triangles with K rays */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMvIntersectorKMoeller
{
typedef TriangleMv<M> Primitive;
- typedef MoellerTrumboreIntersectorK<Mx,K> Precalculations;
+ typedef MoellerTrumboreIntersectorK<M,K> Precalculations;
/*! Intersects K rays with M triangles. */
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const Primitive& tri)
@@ -111,36 +111,36 @@ namespace embree
static __forceinline void intersect(Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
- pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,/*UVIdentity<Mx>(),*/Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID())); //FIXME: M,Mx
+ pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,/*UVIdentity<M>(),*/Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID())); //FIXME: M
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- return pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,/*UVIdentity<Mx>(),*/Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID())); //FIXME: M,Mx
+ return pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,/*UVIdentity<M>(),*/Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID())); //FIXME: M
}
};
/*! Intersects M triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMvIntersector1Pluecker
{
typedef TriangleMv<M> Primitive;
- typedef PlueckerIntersector1<Mx> Precalculations;
+ typedef PlueckerIntersector1<M> Precalculations;
/*! Intersect a ray with M triangles and updates the hit. */
static __forceinline void intersect(Precalculations& pre, RayHit& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
- pre.intersect(ray,tri.v0,tri.v1,tri.v2,UVIdentity<Mx>(),Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ pre.intersect(ray,tri.v0,tri.v1,tri.v2,UVIdentity<M>(),Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- return pre.intersect(ray,tri.v0,tri.v1,tri.v2,UVIdentity<Mx>(),Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ return pre.intersect(ray,tri.v0,tri.v1,tri.v2,UVIdentity<M>(),Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -150,11 +150,11 @@ namespace embree
};
/*! Intersects M triangles with K rays */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMvIntersectorKPluecker
{
typedef TriangleMv<M> Primitive;
- typedef PlueckerIntersectorK<Mx,K> Precalculations;
+ typedef PlueckerIntersectorK<M,K> Precalculations;
/*! Intersects K rays with M triangles. */
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const Primitive& tri)
@@ -192,14 +192,14 @@ namespace embree
static __forceinline void intersect(Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const Primitive& tri)
{
STAT3(normal.trav_prims,1,1,1);
- pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,UVIdentity<Mx>(),Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID())); //FIXME: M,Mx
+ pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,UVIdentity<M>(),Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- return pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,UVIdentity<Mx>(),Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID())); //FIXME: M,Mx
+ return pre.intersect(ray,k,tri.v0,tri.v1,tri.v2,UVIdentity<M>(),Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/geometry/trianglev_mb.h b/thirdparty/embree/kernels/geometry/trianglev_mb.h
index 63137aee16..b550a29fd5 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/trianglev_mb.h
+++ b/thirdparty/embree/kernels/geometry/trianglev_mb.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/geometry/trianglev_mb_intersector.h b/thirdparty/embree/kernels/geometry/trianglev_mb_intersector.h
index 35a260d826..38cd52e85d 100644
--- a/thirdparty/embree-aarch64/kernels/geometry/trianglev_mb_intersector.h
+++ b/thirdparty/embree/kernels/geometry/trianglev_mb_intersector.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -11,32 +11,32 @@ namespace embree
namespace isa
{
/*! Intersects M motion blur triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMvMBIntersector1Moeller
{
typedef TriangleMvMB<M> Primitive;
- typedef MoellerTrumboreIntersector1<Mx> Precalculations;
+ typedef MoellerTrumboreIntersector1<M> Precalculations;
/*! Intersect a ray with the M triangles and updates the hit. */
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(normal.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time());
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- pre.intersect(ray,v0,v1,v2,Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time());
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ pre.intersect(ray,v0,v1,v2,Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of M triangles. */
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time());
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- return pre.intersect(ray,v0,v1,v2,Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time());
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ return pre.intersect(ray,v0,v1,v2,Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -46,11 +46,11 @@ namespace embree
};
/*! Intersects M motion blur triangles with K rays. */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMvMBIntersectorKMoeller
{
typedef TriangleMvMB<M> Primitive;
- typedef MoellerTrumboreIntersectorK<Mx,K> Precalculations;
+ typedef MoellerTrumboreIntersectorK<M,K> Precalculations;
/*! Intersects K rays with M triangles. */
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const TriangleMvMB<M>& tri)
@@ -90,52 +90,52 @@ namespace embree
static __forceinline void intersect(Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(normal.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time()[k]);
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- pre.intersect(ray,k,v0,v1,v2,Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time()[k]);
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ pre.intersect(ray,k,v0,v1,v2,Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time()[k]);
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- return pre.intersect(ray,k,v0,v1,v2,Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time()[k]);
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ return pre.intersect(ray,k,v0,v1,v2,Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
/*! Intersects M motion blur triangles with 1 ray */
- template<int M, int Mx, bool filter>
+ template<int M, bool filter>
struct TriangleMvMBIntersector1Pluecker
{
typedef TriangleMvMB<M> Primitive;
- typedef PlueckerIntersector1<Mx> Precalculations;
+ typedef PlueckerIntersector1<M> Precalculations;
/*! Intersect a ray with the M triangles and updates the hit. */
static __forceinline void intersect(const Precalculations& pre, RayHit& ray, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(normal.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time());
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- pre.intersect(ray,v0,v1,v2,UVIdentity<Mx>(),Intersect1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time());
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ pre.intersect(ray,v0,v1,v2,UVIdentity<M>(),Intersect1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of M triangles. */
static __forceinline bool occluded(const Precalculations& pre, Ray& ray, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time());
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- return pre.intersect(ray,v0,v1,v2,UVIdentity<Mx>(),Occluded1EpilogM<M,Mx,filter>(ray,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time());
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ return pre.intersect(ray,v0,v1,v2,UVIdentity<M>(),Occluded1EpilogM<M,filter>(ray,context,tri.geomID(),tri.primID()));
}
static __forceinline bool pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& tri)
@@ -145,11 +145,11 @@ namespace embree
};
/*! Intersects M motion blur triangles with K rays. */
- template<int M, int Mx, int K, bool filter>
+ template<int M, int K, bool filter>
struct TriangleMvMBIntersectorKPluecker
{
typedef TriangleMvMB<M> Primitive;
- typedef PlueckerIntersectorK<Mx,K> Precalculations;
+ typedef PlueckerIntersectorK<M,K> Precalculations;
/*! Intersects K rays with M triangles. */
static __forceinline void intersect(const vbool<K>& valid_i, Precalculations& pre, RayHitK<K>& ray, IntersectContext* context, const TriangleMvMB<M>& tri)
@@ -189,22 +189,22 @@ namespace embree
static __forceinline void intersect(Precalculations& pre, RayHitK<K>& ray, size_t k, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(normal.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time()[k]);
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- pre.intersect(ray,k,v0,v1,v2,UVIdentity<Mx>(),Intersect1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time()[k]);
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ pre.intersect(ray,k,v0,v1,v2,UVIdentity<M>(),Intersect1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
/*! Test if the ray is occluded by one of the M triangles. */
static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const TriangleMvMB<M>& tri)
{
STAT3(shadow.trav_prims,1,1,1);
- const Vec3vf<Mx> time(ray.time()[k]);
- const Vec3vf<Mx> v0 = madd(time,Vec3vf<Mx>(tri.dv0),Vec3vf<Mx>(tri.v0));
- const Vec3vf<Mx> v1 = madd(time,Vec3vf<Mx>(tri.dv1),Vec3vf<Mx>(tri.v1));
- const Vec3vf<Mx> v2 = madd(time,Vec3vf<Mx>(tri.dv2),Vec3vf<Mx>(tri.v2));
- return pre.intersect(ray,k,v0,v1,v2,UVIdentity<Mx>(),Occluded1KEpilogM<M,Mx,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
+ const Vec3vf<M> time(ray.time()[k]);
+ const Vec3vf<M> v0 = madd(time,Vec3vf<M>(tri.dv0),Vec3vf<M>(tri.v0));
+ const Vec3vf<M> v1 = madd(time,Vec3vf<M>(tri.dv1),Vec3vf<M>(tri.v1));
+ const Vec3vf<M> v2 = madd(time,Vec3vf<M>(tri.dv2),Vec3vf<M>(tri.v2));
+ return pre.intersect(ray,k,v0,v1,v2,UVIdentity<M>(),Occluded1KEpilogM<M,K,filter>(ray,k,context,tri.geomID(),tri.primID()));
}
};
}
diff --git a/thirdparty/embree-aarch64/kernels/hash.h b/thirdparty/embree/kernels/hash.h
index 4abbe203d6..10f315cee7 100644
--- a/thirdparty/embree-aarch64/kernels/hash.h
+++ b/thirdparty/embree/kernels/hash.h
@@ -2,4 +2,4 @@
// Copyright 2009-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
-#define RTC_HASH "6ef362f99af80c9dfe8dd2bfc582d9067897edc6"
+#define RTC_HASH "7c53133eb21424f7f0ae1e25bf357e358feaf6ab"
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/bezier_curve.h b/thirdparty/embree/kernels/subdiv/bezier_curve.h
index c0e78820f8..a5adad5cc9 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/bezier_curve.h
+++ b/thirdparty/embree/kernels/subdiv/bezier_curve.h
@@ -1,10 +1,11 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "../common/default.h"
-#include "../common/scene_curves.h"
+//#include "../common/scene_curves.h"
+#include "../common/context.h"
namespace embree
{
@@ -659,6 +660,7 @@ namespace embree
return numRoots(v0,v1) + numRoots(v1,v2) + numRoots(v2,v3);
}
+ template<typename CurveGeometry>
__forceinline CubicBezierCurve<Vec3ff> enlargeRadiusToMinWidth(const IntersectContext* context, const CurveGeometry* geom, const Vec3fa& ray_org, const CubicBezierCurve<Vec3ff>& curve)
{
return CubicBezierCurve<Vec3ff>(enlargeRadiusToMinWidth(context,geom,ray_org,curve.v0),
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/bezier_patch.h b/thirdparty/embree/kernels/subdiv/bezier_patch.h
index d87ed41ccb..2ff03902a7 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/bezier_patch.h
+++ b/thirdparty/embree/kernels/subdiv/bezier_patch.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/bilinear_patch.h b/thirdparty/embree/kernels/subdiv/bilinear_patch.h
index 35748754bd..cade104a6c 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/bilinear_patch.h
+++ b/thirdparty/embree/kernels/subdiv/bilinear_patch.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/bspline_curve.h b/thirdparty/embree/kernels/subdiv/bspline_curve.h
index a325667328..51489ef37c 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/bspline_curve.h
+++ b/thirdparty/embree/kernels/subdiv/bspline_curve.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -161,8 +161,8 @@ namespace embree
template<int M>
__forceinline void veval(const vfloat<M>& t, Vec4vf<M>& p, Vec4vf<M>& dp) const
{
- p = veval(t);
- dp = veval_du(t);
+ p = veval<M>(t);
+ dp = veval_du<M>(t);
}
template<int M>
@@ -306,6 +306,7 @@ namespace embree
ocurve = BezierCurveT<Vertex>(v0,v1,v2,v3);
}
+ template<typename CurveGeometry>
__forceinline BSplineCurveT<Vec3ff> enlargeRadiusToMinWidth(const IntersectContext* context, const CurveGeometry* geom, const Vec3fa& ray_org, const BSplineCurveT<Vec3ff>& curve)
{
return BSplineCurveT<Vec3ff>(enlargeRadiusToMinWidth(context,geom,ray_org,curve.v0),
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/bspline_patch.h b/thirdparty/embree/kernels/subdiv/bspline_patch.h
index 9769bc17bd..ff47f01c7a 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/bspline_patch.h
+++ b/thirdparty/embree/kernels/subdiv/bspline_patch.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/catmullclark_coefficients.h b/thirdparty/embree/kernels/subdiv/catmullclark_coefficients.h
index 05031cf6b9..46959797bf 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/catmullclark_coefficients.h
+++ b/thirdparty/embree/kernels/subdiv/catmullclark_coefficients.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/catmullclark_patch.h b/thirdparty/embree/kernels/subdiv/catmullclark_patch.h
index ab1d63594a..91772d94ed 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/catmullclark_patch.h
+++ b/thirdparty/embree/kernels/subdiv/catmullclark_patch.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/catmullclark_ring.h b/thirdparty/embree/kernels/subdiv/catmullclark_ring.h
index 73b41fd4ff..e5ad5dadfe 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/catmullclark_ring.h
+++ b/thirdparty/embree/kernels/subdiv/catmullclark_ring.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/catmullrom_curve.h b/thirdparty/embree/kernels/subdiv/catmullrom_curve.h
index b244af481c..74fc4c1230 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/catmullrom_curve.h
+++ b/thirdparty/embree/kernels/subdiv/catmullrom_curve.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -168,8 +168,8 @@ namespace embree
template<int M>
__forceinline void veval(const vfloat<M>& t, Vec4vf<M>& p, Vec4vf<M>& dp) const
{
- p = veval(t);
- dp = veval_du(t);
+ p = veval<M>(t);
+ dp = veval_du<M>(t);
}
template<int M>
@@ -283,6 +283,7 @@ namespace embree
}
};
+ template<typename CurveGeometry>
__forceinline CatmullRomCurveT<Vec3ff> enlargeRadiusToMinWidth(const IntersectContext* context, const CurveGeometry* geom, const Vec3fa& ray_org, const CatmullRomCurveT<Vec3ff>& curve)
{
return CatmullRomCurveT<Vec3ff>(enlargeRadiusToMinWidth(context,geom,ray_org,curve.v0),
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval.h b/thirdparty/embree/kernels/subdiv/feature_adaptive_eval.h
index 23f24c360c..58c0b63e62 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval.h
+++ b/thirdparty/embree/kernels/subdiv/feature_adaptive_eval.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval_grid.h b/thirdparty/embree/kernels/subdiv/feature_adaptive_eval_grid.h
index 76583b2e5d..4755aba28d 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval_grid.h
+++ b/thirdparty/embree/kernels/subdiv/feature_adaptive_eval_grid.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval_simd.h b/thirdparty/embree/kernels/subdiv/feature_adaptive_eval_simd.h
index fa3216730f..edab0db12f 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/feature_adaptive_eval_simd.h
+++ b/thirdparty/embree/kernels/subdiv/feature_adaptive_eval_simd.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/gregory_patch.h b/thirdparty/embree/kernels/subdiv/gregory_patch.h
index 2a7c4b1f2c..9026d5c407 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/gregory_patch.h
+++ b/thirdparty/embree/kernels/subdiv/gregory_patch.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/gregory_patch_dense.h b/thirdparty/embree/kernels/subdiv/gregory_patch_dense.h
index 85effd02cf..4cf9a7e98f 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/gregory_patch_dense.h
+++ b/thirdparty/embree/kernels/subdiv/gregory_patch_dense.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/gridrange.h b/thirdparty/embree/kernels/subdiv/gridrange.h
index 4fd741c879..4f2b90d7bd 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/gridrange.h
+++ b/thirdparty/embree/kernels/subdiv/gridrange.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/half_edge.h b/thirdparty/embree/kernels/subdiv/half_edge.h
index fb350ca71f..baf019cd79 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/half_edge.h
+++ b/thirdparty/embree/kernels/subdiv/half_edge.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -186,7 +186,7 @@ namespace embree
{
const HalfEdge* p = this;
do {
- if (p->vertexHasBorder()) return true;
+ if (p->vertexHasBorder() && (p->vertex_type != HalfEdge::NON_MANIFOLD_EDGE_VERTEX)) return true;
p = p->next();
} while (p != this);
return false;
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/hermite_curve.h b/thirdparty/embree/kernels/subdiv/hermite_curve.h
index 9fab79cf0c..ffef5a4315 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/hermite_curve.h
+++ b/thirdparty/embree/kernels/subdiv/hermite_curve.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -29,6 +29,7 @@ namespace embree
}
};
+ template<typename CurveGeometry>
__forceinline HermiteCurveT<Vec3ff> enlargeRadiusToMinWidth(const IntersectContext* context, const CurveGeometry* geom, const Vec3fa& ray_org, const HermiteCurveT<Vec3ff>& curve) {
return HermiteCurveT<Vec3ff>(enlargeRadiusToMinWidth(context,geom,ray_org,BezierCurveT<Vec3ff>(curve)));
}
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/linear_bezier_patch.h b/thirdparty/embree/kernels/subdiv/linear_bezier_patch.h
index f4a854af7f..f8e8a25f35 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/linear_bezier_patch.h
+++ b/thirdparty/embree/kernels/subdiv/linear_bezier_patch.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/patch.h b/thirdparty/embree/kernels/subdiv/patch.h
index d58241b96d..c4340ea9b6 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/patch.h
+++ b/thirdparty/embree/kernels/subdiv/patch.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/patch_eval.h b/thirdparty/embree/kernels/subdiv/patch_eval.h
index 482d015fa3..a3fafa72f4 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/patch_eval.h
+++ b/thirdparty/embree/kernels/subdiv/patch_eval.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/patch_eval_grid.h b/thirdparty/embree/kernels/subdiv/patch_eval_grid.h
index c05db55f4c..167e1ebe1c 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/patch_eval_grid.h
+++ b/thirdparty/embree/kernels/subdiv/patch_eval_grid.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/patch_eval_simd.h b/thirdparty/embree/kernels/subdiv/patch_eval_simd.h
index 28016d9e20..fef88a4492 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/patch_eval_simd.h
+++ b/thirdparty/embree/kernels/subdiv/patch_eval_simd.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/subdivpatch1base.h b/thirdparty/embree/kernels/subdiv/subdivpatch1base.h
index d5bc403cca..c3069dadee 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/subdivpatch1base.h
+++ b/thirdparty/embree/kernels/subdiv/subdivpatch1base.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/tessellation.h b/thirdparty/embree/kernels/subdiv/tessellation.h
index bda1e2d559..abde4f2bde 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/tessellation.h
+++ b/thirdparty/embree/kernels/subdiv/tessellation.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
diff --git a/thirdparty/embree-aarch64/kernels/subdiv/tessellation_cache.h b/thirdparty/embree/kernels/subdiv/tessellation_cache.h
index 5c215288b6..99edf49be4 100644
--- a/thirdparty/embree-aarch64/kernels/subdiv/tessellation_cache.h
+++ b/thirdparty/embree/kernels/subdiv/tessellation_cache.h
@@ -1,4 +1,4 @@
-// Copyright 2009-2020 Intel Corporation
+// Copyright 2009-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
@@ -63,7 +63,7 @@ namespace embree
static const size_t NUM_CACHE_SEGMENTS = 8;
static const size_t NUM_PREALLOC_THREAD_WORK_STATES = 512;
static const size_t COMMIT_INDEX_SHIFT = 32+8;
-#if defined(__X86_64__) || defined(__aarch64__)
+#if defined(__64BIT__)
static const size_t REF_TAG_MASK = 0xffffffffff;
#else
static const size_t REF_TAG_MASK = 0x7FFFFFFF;
diff --git a/thirdparty/embree/patches/godot-changes-android.patch b/thirdparty/embree/patches/godot-changes-android.patch
new file mode 100644
index 0000000000..a27f924bde
--- /dev/null
+++ b/thirdparty/embree/patches/godot-changes-android.patch
@@ -0,0 +1,103 @@
+diff --git a/thirdparty/embree/common/sys/sysinfo.cpp b/thirdparty/embree/common/sys/sysinfo.cpp
+index ba97dc227b..1679599608 100644
+--- a/thirdparty/embree/common/sys/sysinfo.cpp
++++ b/thirdparty/embree/common/sys/sysinfo.cpp
+@@ -618,7 +618,10 @@ namespace embree
+ static int nThreads = -1;
+ if (nThreads != -1) return nThreads;
+
+-#if defined(__MACOSX__)
++// -- GODOT start --
++// #if defined(__MACOSX__)
++#if defined(__MACOSX__) || defined(__ANDROID__)
++// -- GODOT end --
+ nThreads = sysconf(_SC_NPROCESSORS_ONLN); // does not work in Linux LXC container
+ assert(nThreads);
+ #else
+diff --git a/thirdparty/embree/common/sys/thread.cpp b/thirdparty/embree/common/sys/thread.cpp
+index a7827e18f7..f4014be89b 100644
+--- a/thirdparty/embree/common/sys/thread.cpp
++++ b/thirdparty/embree/common/sys/thread.cpp
+@@ -158,7 +158,9 @@ namespace embree
+ /// Linux Platform
+ ////////////////////////////////////////////////////////////////////////////////
+
+-#if defined(__LINUX__)
++// -- GODOT start --
++#if defined(__LINUX__) && !defined(__ANDROID__)
++// -- GODOT end --
+
+ #include <fstream>
+ #include <sstream>
+@@ -247,6 +249,28 @@ namespace embree
+ }
+ #endif
+
++// -- GODOT start --
++////////////////////////////////////////////////////////////////////////////////
++/// Android Platform
++////////////////////////////////////////////////////////////////////////////////
++
++#if defined(__ANDROID__)
++
++namespace embree
++{
++ /*! set affinity of the calling thread */
++ void setAffinity(ssize_t affinity)
++ {
++ cpu_set_t cset;
++ CPU_ZERO(&cset);
++ CPU_SET(affinity, &cset);
++
++ sched_setaffinity(0, sizeof(cset), &cset);
++ }
++}
++#endif
++// -- GODOT end --
++
+ ////////////////////////////////////////////////////////////////////////////////
+ /// FreeBSD Platform
+ ////////////////////////////////////////////////////////////////////////////////
+@@ -355,7 +379,9 @@ namespace embree
+ pthread_attr_destroy(&attr);
+
+ /* set affinity */
+-#if defined(__LINUX__)
++// -- GODOT start --
++#if defined(__LINUX__) && !defined(__ANDROID__)
++// -- GODOT end --
+ if (threadID >= 0) {
+ cpu_set_t cset;
+ CPU_ZERO(&cset);
+@@ -370,7 +396,16 @@ namespace embree
+ CPU_SET(threadID, &cset);
+ pthread_setaffinity_np(*tid, sizeof(cset), &cset);
+ }
++// -- GODOT start --
++#elif defined(__ANDROID__)
++ if (threadID >= 0) {
++ cpu_set_t cset;
++ CPU_ZERO(&cset);
++ CPU_SET(threadID, &cset);
++ sched_setaffinity(pthread_gettid_np(*tid), sizeof(cset), &cset);
++ }
+ #endif
++// -- GODOT end --
+
+ return thread_t(tid);
+ }
+@@ -389,8 +424,14 @@ namespace embree
+
+ /*! destroy a hardware thread by its handle */
+ void destroyThread(thread_t tid) {
++// -- GODOT start --
++#if defined(__ANDROID__)
++ FATAL("Can't destroy threads on Android.");
++#else
+ pthread_cancel(*(pthread_t*)tid);
+ delete (pthread_t*)tid;
++#endif
++// -- GODOT end --
+ }
+
+ /*! creates thread local storage */
diff --git a/thirdparty/embree/patches/godot-changes-misc.patch b/thirdparty/embree/patches/godot-changes-misc.patch
new file mode 100644
index 0000000000..8bf0d9fa97
--- /dev/null
+++ b/thirdparty/embree/patches/godot-changes-misc.patch
@@ -0,0 +1,105 @@
+diff --git a/thirdparty/embree/common/sys/intrinsics.h b/thirdparty/embree/common/sys/intrinsics.h
+index 79729c87ab..ed8dd7d40a 100644
+--- a/thirdparty/embree/common/sys/intrinsics.h
++++ b/thirdparty/embree/common/sys/intrinsics.h
+@@ -34,8 +34,14 @@
+ #endif
+
+ #if defined(__WIN32__)
+-# define NOMINMAX
+-# include <windows.h>
++// -- GODOT start --
++#if !defined(NOMINMAX)
++// -- GODOT end --
++#define NOMINMAX
++// -- GODOT start --
++#endif
++#include "windows.h"
++// -- GODOT end --
+ #endif
+
+ /* normally defined in pmmintrin.h, but we always need this */
+diff --git a/thirdparty/embree/common/sys/platform.h b/thirdparty/embree/common/sys/platform.h
+index 3fc5e99b8d..697e07bb86 100644
+--- a/thirdparty/embree/common/sys/platform.h
++++ b/thirdparty/embree/common/sys/platform.h
+@@ -99,7 +99,9 @@
+ #define dll_import
+ #endif
+
+-#ifdef __WIN32__
++// -- GODOT start --
++#if defined(__WIN32__) && !defined(__MINGW32__)
++// -- GODOT end --
+ #if !defined(__noinline)
+ #define __noinline __declspec(noinline)
+ #endif
+@@ -149,6 +151,9 @@
+ #define DELETED = delete
+ #endif
+
++// -- GODOT start --
++#if !defined(likely)
++// -- GODOT end --
+ #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+ #define likely(expr) (expr)
+ #define unlikely(expr) (expr)
+@@ -156,6 +161,9 @@
+ #define likely(expr) __builtin_expect((bool)(expr),true )
+ #define unlikely(expr) __builtin_expect((bool)(expr),false)
+ #endif
++// -- GODOT start --
++#endif
++// -- GODOT end --
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Error handling and debugging
+diff --git a/thirdparty/embree/common/sys/sysinfo.cpp b/thirdparty/embree/common/sys/sysinfo.cpp
+index ba97dc227b..f1a59e511e 100644
+--- a/thirdparty/embree/common/sys/sysinfo.cpp
++++ b/thirdparty/embree/common/sys/sysinfo.cpp
+@@ -248,7 +248,9 @@ namespace embree
+ #if defined(__X86_ASM__)
+ __noinline int64_t get_xcr0()
+ {
+-#if defined (__WIN32__)
++// -- GODOT start --
++#if defined (__WIN32__) && !defined (__MINGW32__)
++// -- GODOT end --
+ int64_t xcr0 = 0; // int64_t is workaround for compiler bug under VS2013, Win32
+ xcr0 = _xgetbv(0);
+ return xcr0;
+diff --git a/thirdparty/embree/include/embree3/rtcore_common.h b/thirdparty/embree/include/embree3/rtcore_common.h
+index 9c14b28745..4857e1e05e 100644
+--- a/thirdparty/embree/include/embree3/rtcore_common.h
++++ b/thirdparty/embree/include/embree3/rtcore_common.h
+@@ -19,7 +19,9 @@ typedef int ssize_t;
+ #endif
+ #endif
+
+-#ifdef _WIN32
++// -- GODOT start --
++#if defined(_WIN32) && defined(_MSC_VER)
++// -- GODOT end --
+ # define RTC_ALIGN(...) __declspec(align(__VA_ARGS__))
+ #else
+ # define RTC_ALIGN(...) __attribute__((aligned(__VA_ARGS__)))
+diff --git a/thirdparty/embree/common/tasking/taskschedulertbb.h b/thirdparty/embree/common/tasking/taskschedulertbb.h
+index 3fd15816e9..35bd49849f 100644
+--- a/thirdparty/embree/common/tasking/taskschedulertbb.h
++++ b/thirdparty/embree/common/tasking/taskschedulertbb.h
+@@ -12,7 +12,13 @@
+ #include "../sys/ref.h"
+
+ #if defined(__WIN32__)
++// -- GODOT start --
++#if !defined(NOMINMAX)
++// -- GODOT end --
+ # define NOMINMAX
++// -- GODOT start --
++#endif
++// -- GODOT end --
+ #endif
+
+ // We need to define these to avoid implicit linkage against
+ \ No newline at end of file
diff --git a/thirdparty/embree-aarch64/patches/godot-changes.patch b/thirdparty/embree/patches/godot-changes-noexcept.patch
index 86fbf226d2..c587a0e2be 100644
--- a/thirdparty/embree-aarch64/patches/godot-changes.patch
+++ b/thirdparty/embree/patches/godot-changes-noexcept.patch
@@ -1,27 +1,27 @@
-diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_for.h b/thirdparty/embree-aarch64/common/algorithms/parallel_for.h
-index 76c6b740aa..51d296fb16 100644
---- a/thirdparty/embree-aarch64/common/algorithms/parallel_for.h
-+++ b/thirdparty/embree-aarch64/common/algorithms/parallel_for.h
-@@ -27,7 +27,10 @@ namespace embree
+diff --git a/thirdparty/embree/common/algorithms/parallel_for.h b/thirdparty/embree/common/algorithms/parallel_for.h
+index f052d8b468..645681ac63 100644
+--- a/thirdparty/embree/common/algorithms/parallel_for.h
++++ b/thirdparty/embree/common/algorithms/parallel_for.h
+@@ -21,7 +21,10 @@ namespace embree
func(r.begin());
});
if (!TaskScheduler::wait())
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
}
- #elif defined(TASKING_GCD) && defined(BUILD_IOS)
-
-@@ -55,13 +58,19 @@ namespace embree
+
+ #elif defined(TASKING_TBB)
+@@ -31,13 +34,19 @@ namespace embree
func(i);
},context);
if (context.is_group_execution_cancelled())
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
#else
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
@@ -31,31 +31,31 @@ index 76c6b740aa..51d296fb16 100644
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
#endif
#elif defined(TASKING_PPL)
-@@ -81,7 +90,10 @@ namespace embree
+@@ -57,7 +66,10 @@ namespace embree
#if defined(TASKING_INTERNAL)
TaskScheduler::spawn(first,last,minStepSize,func);
if (!TaskScheduler::wait())
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
- #elif defined(TASKING_GCD) && defined(BUILD_IOS)
-
-@@ -109,13 +121,19 @@ namespace embree
+ #elif defined(TASKING_TBB)
+ #if TBB_INTERFACE_VERSION >= 12002
+@@ -66,13 +78,19 @@ namespace embree
func(range<Index>(r.begin(),r.end()));
},context);
if (context.is_group_execution_cancelled())
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
#else
tbb::parallel_for(tbb::blocked_range<Index>(first,last,minStepSize),[&](const tbb::blocked_range<Index>& r) {
@@ -65,19 +65,19 @@ index 76c6b740aa..51d296fb16 100644
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
#endif
#elif defined(TASKING_PPL)
-@@ -147,13 +165,19 @@ namespace embree
+@@ -104,13 +122,19 @@ namespace embree
func(i);
},tbb::simple_partitioner(),context);
if (context.is_group_execution_cancelled())
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
#else
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
@@ -87,20 +87,20 @@ index 76c6b740aa..51d296fb16 100644
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
#endif
}
-@@ -168,13 +192,19 @@ namespace embree
+@@ -125,13 +149,19 @@ namespace embree
func(i);
},ap,context);
if (context.is_group_execution_cancelled())
- throw std::runtime_error("task cancelled");
-+ // -- GODOT start --
-+ // throw std::runtime_error("task cancelled");
-+ abort();
-+ // -- GODOT end --
++ // -- GODOT start --
++ // throw std::runtime_error("task cancelled");
++ abort();
++ // -- GODOT end --
#else
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
func(i);
@@ -109,15 +109,15 @@ index 76c6b740aa..51d296fb16 100644
- throw std::runtime_error("task cancelled");
+ // -- GODOT start --
+ // throw std::runtime_error("task cancelled");
-+ abort();
++ abort();
+ // -- GODOT end --
#endif
}
-diff --git a/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.h b/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.h
-index d444b6a2e4..0daf94e50e 100644
---- a/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.h
-+++ b/thirdparty/embree-aarch64/common/algorithms/parallel_reduce.h
+diff --git a/thirdparty/embree/common/algorithms/parallel_reduce.h b/thirdparty/embree/common/algorithms/parallel_reduce.h
+index f42ae2ec50..8271372ea4 100644
+--- a/thirdparty/embree/common/algorithms/parallel_reduce.h
++++ b/thirdparty/embree/common/algorithms/parallel_reduce.h
@@ -58,15 +58,19 @@ namespace embree
const Value v = tbb::parallel_reduce(tbb::blocked_range<Index>(first,last,minStepSize),identity,
[&](const tbb::blocked_range<Index>& r, const Value& start) { return reduction(start,func(range<Index>(r.begin(),r.end()))); },
@@ -142,10 +142,10 @@ index d444b6a2e4..0daf94e50e 100644
return v;
#endif
#else // TASKING_PPL
-diff --git a/thirdparty/embree-aarch64/common/lexers/stringstream.cpp b/thirdparty/embree-aarch64/common/lexers/stringstream.cpp
-index 7e7b9faef8..98dc80ad59 100644
---- a/thirdparty/embree-aarch64/common/lexers/stringstream.cpp
-+++ b/thirdparty/embree-aarch64/common/lexers/stringstream.cpp
+diff --git a/thirdparty/embree/common/lexers/stringstream.cpp b/thirdparty/embree/common/lexers/stringstream.cpp
+index 42ffb10176..a037869506 100644
+--- a/thirdparty/embree/common/lexers/stringstream.cpp
++++ b/thirdparty/embree/common/lexers/stringstream.cpp
@@ -39,7 +39,10 @@ namespace embree
std::vector<char> str; str.reserve(64);
while (cin->peek() != EOF && !isSeparator(cin->peek())) {
@@ -158,10 +158,10 @@ index 7e7b9faef8..98dc80ad59 100644
str.push_back((char)c);
}
str.push_back(0);
-diff --git a/thirdparty/embree-aarch64/common/sys/alloc.cpp b/thirdparty/embree-aarch64/common/sys/alloc.cpp
-index 4e8928242e..12f143f131 100644
---- a/thirdparty/embree-aarch64/common/sys/alloc.cpp
-+++ b/thirdparty/embree-aarch64/common/sys/alloc.cpp
+diff --git a/thirdparty/embree/common/sys/alloc.cpp b/thirdparty/embree/common/sys/alloc.cpp
+index 1bc30fe9a5..abdd269069 100644
+--- a/thirdparty/embree/common/sys/alloc.cpp
++++ b/thirdparty/embree/common/sys/alloc.cpp
@@ -21,7 +21,10 @@ namespace embree
void* ptr = _mm_malloc(size,align);
@@ -169,7 +169,7 @@ index 4e8928242e..12f143f131 100644
- throw std::bad_alloc();
+ // -- GODOT start --
+ // throw std::bad_alloc();
-+ abort();
++ abort();
+ // -- GODOT end --
return ptr;
@@ -246,11 +246,11 @@ index 4e8928242e..12f143f131 100644
}
/* hint for transparent huge pages (THP) */
-diff --git a/thirdparty/embree-aarch64/common/sys/platform.h b/thirdparty/embree-aarch64/common/sys/platform.h
-index 7914eb7a52..737f14aa6e 100644
---- a/thirdparty/embree-aarch64/common/sys/platform.h
-+++ b/thirdparty/embree-aarch64/common/sys/platform.h
-@@ -174,11 +174,19 @@
+diff --git a/thirdparty/embree/common/sys/platform.h b/thirdparty/embree/common/sys/platform.h
+index 8a6d9fa0a9..697e07bb86 100644
+--- a/thirdparty/embree/common/sys/platform.h
++++ b/thirdparty/embree/common/sys/platform.h
+@@ -179,11 +179,19 @@
#define PRINT4(x,y,z,w) embree_cout << STRING(x) << " = " << (x) << ", " << STRING(y) << " = " << (y) << ", " << STRING(z) << " = " << (z) << ", " << STRING(w) << " = " << (w) << embree_endl
#if defined(DEBUG) // only report file and line in debug mode
@@ -272,10 +272,10 @@ index 7914eb7a52..737f14aa6e 100644
#endif
#define FATAL(x) THROW_RUNTIME_ERROR(x)
-diff --git a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.cpp b/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.cpp
-index 98d7fb9249..ebf656d1a0 100644
---- a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.cpp
-+++ b/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.cpp
+diff --git a/thirdparty/embree/common/tasking/taskschedulerinternal.cpp b/thirdparty/embree/common/tasking/taskschedulerinternal.cpp
+index dca835a716..ad438588a3 100644
+--- a/thirdparty/embree/common/tasking/taskschedulerinternal.cpp
++++ b/thirdparty/embree/common/tasking/taskschedulerinternal.cpp
@@ -48,13 +48,15 @@ namespace embree
{
Task* prevTask = thread.task;
@@ -298,7 +298,7 @@ index 98d7fb9249..ebf656d1a0 100644
thread.task = prevTask;
add_dependencies(-1);
}
-@@ -297,8 +299,11 @@ namespace embree
+@@ -291,8 +293,11 @@ namespace embree
size_t threadIndex = allocThreadIndex();
condition.wait(mutex, [&] () { return hasRootTask.load(); });
mutex.unlock();
@@ -312,7 +312,7 @@ index 98d7fb9249..ebf656d1a0 100644
}
void TaskScheduler::reset() {
-@@ -330,7 +335,10 @@ namespace embree
+@@ -324,7 +329,10 @@ namespace embree
return thread->scheduler->cancellingException == nullptr;
}
@@ -324,7 +324,7 @@ index 98d7fb9249..ebf656d1a0 100644
{
/* allocate thread structure */
std::unique_ptr<Thread> mthread(new Thread(threadIndex,this)); // too large for stack allocation
-@@ -353,9 +361,10 @@ namespace embree
+@@ -347,9 +355,10 @@ namespace embree
swapThread(oldThread);
/* remember exception to throw */
@@ -338,22 +338,22 @@ index 98d7fb9249..ebf656d1a0 100644
/* wait for all threads to terminate */
threadCounter--;
#if defined(__WIN32__)
-@@ -373,7 +382,10 @@ namespace embree
+@@ -367,7 +376,10 @@ namespace embree
yield();
#endif
}
- return except;
-+ // -- GODOT start --
-+ // return except;
-+ return;
-+ // -- GODOT end --
++ // -- GODOT start --
++ // return except;
++ return;
++ // -- GODOT end --
}
bool TaskScheduler::steal_from_other_threads(Thread& thread)
-diff --git a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.h b/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.h
-index c2a9391aea..8bd70b2b8c 100644
---- a/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.h
-+++ b/thirdparty/embree-aarch64/common/tasking/taskschedulerinternal.h
+diff --git a/thirdparty/embree/common/tasking/taskschedulerinternal.h b/thirdparty/embree/common/tasking/taskschedulerinternal.h
+index c766a0bb6a..8fa6bb12fa 100644
+--- a/thirdparty/embree/common/tasking/taskschedulerinternal.h
++++ b/thirdparty/embree/common/tasking/taskschedulerinternal.h
@@ -123,7 +123,10 @@ namespace embree
{
size_t ofs = bytes + ((align - stackPtr) & (align-1));
@@ -371,14 +371,14 @@ index c2a9391aea..8bd70b2b8c 100644
{
if (right >= TASK_STACK_SIZE)
- throw std::runtime_error("task stack overflow");
-+ // -- GODOT start --
-+ // throw std::runtime_error("task stack overflow");
-+ abort();
-+ // -- GODOT end --
++ // -- GODOT start --
++ // throw std::runtime_error("task stack overflow");
++ abort();
++ // -- GODOT end --
/* allocate new task on right side of stack */
size_t oldStackPtr = stackPtr;
-@@ -239,7 +245,10 @@ namespace embree
+@@ -238,7 +244,10 @@ namespace embree
void wait_for_threads(size_t threadCount);
/*! thread loop for all worker threads */
@@ -390,10 +390,10 @@ index c2a9391aea..8bd70b2b8c 100644
/*! steals a task from a different thread */
bool steal_from_other_threads(Thread& thread);
-diff --git a/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.cpp b/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.cpp
-index 20cdd2d320..aa56035026 100644
---- a/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.cpp
-+++ b/thirdparty/embree-aarch64/kernels/bvh/bvh_statistics.cpp
+diff --git a/thirdparty/embree/kernels/bvh/bvh_statistics.cpp b/thirdparty/embree/kernels/bvh/bvh_statistics.cpp
+index d8da78eed7..d857ff7d95 100644
+--- a/thirdparty/embree/kernels/bvh/bvh_statistics.cpp
++++ b/thirdparty/embree/kernels/bvh/bvh_statistics.cpp
@@ -150,7 +150,10 @@ namespace embree
}
}
@@ -406,11 +406,11 @@ index 20cdd2d320..aa56035026 100644
}
return s;
}
-diff --git a/thirdparty/embree-aarch64/kernels/common/rtcore.cpp b/thirdparty/embree-aarch64/kernels/common/rtcore.cpp
-index ee5c37b238..625fbf6d4f 100644
---- a/thirdparty/embree-aarch64/kernels/common/rtcore.cpp
-+++ b/thirdparty/embree-aarch64/kernels/common/rtcore.cpp
-@@ -230,7 +230,10 @@ RTC_NAMESPACE_BEGIN;
+diff --git a/thirdparty/embree/kernels/common/rtcore.cpp b/thirdparty/embree/kernels/common/rtcore.cpp
+index 74e9fb335c..94b3819e42 100644
+--- a/thirdparty/embree/kernels/common/rtcore.cpp
++++ b/thirdparty/embree/kernels/common/rtcore.cpp
+@@ -197,7 +197,10 @@ RTC_NAMESPACE_BEGIN;
if (quality != RTC_BUILD_QUALITY_LOW &&
quality != RTC_BUILD_QUALITY_MEDIUM &&
quality != RTC_BUILD_QUALITY_HIGH)
@@ -422,7 +422,7 @@ index ee5c37b238..625fbf6d4f 100644
scene->setBuildQuality(quality);
RTC_CATCH_END2(scene);
}
-@@ -1383,7 +1386,10 @@ RTC_NAMESPACE_BEGIN;
+@@ -1350,7 +1353,10 @@ RTC_NAMESPACE_BEGIN;
quality != RTC_BUILD_QUALITY_MEDIUM &&
quality != RTC_BUILD_QUALITY_HIGH &&
quality != RTC_BUILD_QUALITY_REFIT)
@@ -434,10 +434,10 @@ index ee5c37b238..625fbf6d4f 100644
geometry->setBuildQuality(quality);
RTC_CATCH_END2(geometry);
}
-diff --git a/thirdparty/embree-aarch64/kernels/common/rtcore.h b/thirdparty/embree-aarch64/kernels/common/rtcore.h
-index 6583d12d57..4b070e122b 100644
---- a/thirdparty/embree-aarch64/kernels/common/rtcore.h
-+++ b/thirdparty/embree-aarch64/kernels/common/rtcore.h
+diff --git a/thirdparty/embree/kernels/common/rtcore.h b/thirdparty/embree/kernels/common/rtcore.h
+index 4e4b24e9c2..373e49a689 100644
+--- a/thirdparty/embree/kernels/common/rtcore.h
++++ b/thirdparty/embree/kernels/common/rtcore.h
@@ -25,52 +25,58 @@ namespace embree
#endif
@@ -596,11 +596,11 @@ index 6583d12d57..4b070e122b 100644
#endif
#define RTC_BUILD_ARGUMENTS_HAS(settings,member) \
-diff --git a/thirdparty/embree-aarch64/kernels/common/scene.cpp b/thirdparty/embree-aarch64/kernels/common/scene.cpp
-index e75aa968f9..1e23aeb415 100644
---- a/thirdparty/embree-aarch64/kernels/common/scene.cpp
-+++ b/thirdparty/embree-aarch64/kernels/common/scene.cpp
-@@ -800,16 +800,18 @@ namespace embree
+diff --git a/thirdparty/embree/kernels/common/scene.cpp b/thirdparty/embree/kernels/common/scene.cpp
+index 0149055f2c..408d7eae6f 100644
+--- a/thirdparty/embree/kernels/common/scene.cpp
++++ b/thirdparty/embree/kernels/common/scene.cpp
+@@ -792,16 +792,18 @@ namespace embree
}
/* initiate build */
diff --git a/thirdparty/embree/patches/godot-changes-ubsan.patch b/thirdparty/embree/patches/godot-changes-ubsan.patch
new file mode 100644
index 0000000000..1336246f0d
--- /dev/null
+++ b/thirdparty/embree/patches/godot-changes-ubsan.patch
@@ -0,0 +1,24 @@
+diff --git a/thirdparty/embree/kernels/builders/primrefgen.cpp b/thirdparty/embree/kernels/builders/primrefgen.cpp
+index bb4fc81dfe..d279dc4993 100644
+--- a/thirdparty/embree/kernels/builders/primrefgen.cpp
++++ b/thirdparty/embree/kernels/builders/primrefgen.cpp
+@@ -184,6 +184,9 @@ namespace embree
+
+ // special variants for grid meshes
+
++// -- GODOT start --
++#if defined(EMBREE_GEOMETRY_GRID)
++// -- GODOT end --
+ PrimInfo createPrimRefArrayGrids(Scene* scene, mvector<PrimRef>& prims, mvector<SubGridBuildData>& sgrids)
+ {
+ PrimInfo pinfo(empty);
+@@ -293,6 +296,9 @@ namespace embree
+
+ return pinfo;
+ }
++// -- GODOT start --
++#endif
++// -- GODOT end --
+
+ // ====================================================================================================
+ // ====================================================================================================
diff --git a/thirdparty/misc/patches/polypartition-godot-types.patch b/thirdparty/misc/patches/polypartition-godot-types.patch
index 59fdb2707c..782f02e8dc 100644
--- a/thirdparty/misc/patches/polypartition-godot-types.patch
+++ b/thirdparty/misc/patches/polypartition-godot-types.patch
@@ -1,5 +1,5 @@
diff --git a/thirdparty/misc/polypartition.cpp b/thirdparty/misc/polypartition.cpp
-index 3a8a6efa8319..4f1b6dcb21d8 100644
+index 3a8a6efa83..5e94793b79 100644
--- a/thirdparty/misc/polypartition.cpp
+++ b/thirdparty/misc/polypartition.cpp
@@ -23,10 +23,7 @@
@@ -510,7 +510,7 @@ index 3a8a6efa8319..4f1b6dcb21d8 100644
- return 0;
- }
- numvertices += iter->GetNumPoints();
-+ for (iter = inpolys->front(); iter; iter++) {
++ for (iter = inpolys->front(); iter; iter = iter->next()) {
+ numvertices += iter->get().GetNumPoints();
}
@@ -521,7 +521,7 @@ index 3a8a6efa8319..4f1b6dcb21d8 100644
polystartindex = 0;
- for (iter = inpolys->begin(); iter != inpolys->end(); iter++) {
- poly = &(*iter);
-+ for (iter = inpolys->front(); iter; iter++) {
++ for (iter = inpolys->front(); iter; iter = iter->next()) {
+ poly = &(iter->get());
polyendindex = polystartindex + poly->GetNumPoints() - 1;
for (i = 0; i < poly->GetNumPoints(); i++) {
@@ -569,7 +569,7 @@ index 3a8a6efa8319..4f1b6dcb21d8 100644
newedge.p2 = v->p;
edgeIter = edgeTree.lower_bound(newedge);
- if (edgeIter == edgeTree.begin()) {
-+ if (edgeIter == edgeTree.front()) {
++ if (edgeIter == nullptr || edgeIter == edgeTree.front()) {
error = true;
break;
}
@@ -606,7 +606,7 @@ index 3a8a6efa8319..4f1b6dcb21d8 100644
newedge.p2 = v->p;
edgeIter = edgeTree.lower_bound(newedge);
- if (edgeIter == edgeTree.begin()) {
-+ if (edgeIter == edgeTree.front()) {
++ if (edgeIter == nullptr || edgeIter == edgeTree.front()) {
error = true;
break;
}
@@ -648,7 +648,7 @@ index 3a8a6efa8319..4f1b6dcb21d8 100644
newedge.p2 = v->p;
edgeIter = edgeTree.lower_bound(newedge);
- if (edgeIter == edgeTree.begin()) {
-+ if (edgeIter == edgeTree.front()) {
++ if (edgeIter == nullptr || edgeIter == edgeTree.front()) {
error = true;
break;
}
@@ -716,7 +716,7 @@ index 3a8a6efa8319..4f1b6dcb21d8 100644
}
}
diff --git a/thirdparty/misc/polypartition.h b/thirdparty/misc/polypartition.h
-index f163f5d2173f..b2d905a3ef76 100644
+index f163f5d217..b2d905a3ef 100644
--- a/thirdparty/misc/polypartition.h
+++ b/thirdparty/misc/polypartition.h
@@ -24,8 +24,9 @@
diff --git a/thirdparty/misc/polypartition.cpp b/thirdparty/misc/polypartition.cpp
index 4f1b6dcb21..5e94793b79 100644
--- a/thirdparty/misc/polypartition.cpp
+++ b/thirdparty/misc/polypartition.cpp
@@ -1289,7 +1289,7 @@ int TPPLPartition::MonotonePartition(TPPLPolyList *inpolys, TPPLPolyList *monoto
bool error = false;
numvertices = 0;
- for (iter = inpolys->front(); iter; iter++) {
+ for (iter = inpolys->front(); iter; iter = iter->next()) {
numvertices += iter->get().GetNumPoints();
}
@@ -1298,7 +1298,7 @@ int TPPLPartition::MonotonePartition(TPPLPolyList *inpolys, TPPLPolyList *monoto
newnumvertices = numvertices;
polystartindex = 0;
- for (iter = inpolys->front(); iter; iter++) {
+ for (iter = inpolys->front(); iter; iter = iter->next()) {
poly = &(iter->get());
polyendindex = polystartindex + poly->GetNumPoints() - 1;
for (i = 0; i < poly->GetNumPoints(); i++) {
@@ -1408,7 +1408,7 @@ int TPPLPartition::MonotonePartition(TPPLPolyList *inpolys, TPPLPolyList *monoto
newedge.p1 = v->p;
newedge.p2 = v->p;
edgeIter = edgeTree.lower_bound(newedge);
- if (edgeIter == edgeTree.front()) {
+ if (edgeIter == nullptr || edgeIter == edgeTree.front()) {
error = true;
break;
}
@@ -1449,7 +1449,7 @@ int TPPLPartition::MonotonePartition(TPPLPolyList *inpolys, TPPLPolyList *monoto
newedge.p1 = v->p;
newedge.p2 = v->p;
edgeIter = edgeTree.lower_bound(newedge);
- if (edgeIter == edgeTree.front()) {
+ if (edgeIter == nullptr || edgeIter == edgeTree.front()) {
error = true;
break;
}
@@ -1494,7 +1494,7 @@ int TPPLPartition::MonotonePartition(TPPLPolyList *inpolys, TPPLPolyList *monoto
newedge.p1 = v->p;
newedge.p2 = v->p;
edgeIter = edgeTree.lower_bound(newedge);
- if (edgeIter == edgeTree.front()) {
+ if (edgeIter == nullptr || edgeIter == edgeTree.front()) {
error = true;
break;
}