summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/oa_hash_map.h16
-rw-r--r--drivers/SCsub1
-rw-r--r--drivers/dummy/rasterizer_dummy.h2
-rw-r--r--drivers/vulkan/SCsub66
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp5164
-rw-r--r--drivers/vulkan/rendering_device_vulkan.h830
-rw-r--r--drivers/vulkan/vk_enum_string_helper.h3722
-rw-r--r--drivers/vulkan/vk_mem_alloc.cpp2
-rw-r--r--drivers/vulkan/vk_mem_alloc.h15448
-rw-r--r--drivers/vulkan/vulkan_context.cpp1314
-rw-r--r--drivers/vulkan/vulkan_context.h158
-rw-r--r--platform/x11/SCsub3
-rw-r--r--platform/x11/detect.py4
-rw-r--r--platform/x11/os_x11.cpp314
-rw-r--r--platform/x11/os_x11.h9
-rw-r--r--platform/x11/vulkan_context_x11.cpp22
-rw-r--r--platform/x11/vulkan_context_x11.h18
-rw-r--r--servers/visual/rendering_device.cpp6
-rw-r--r--servers/visual/rendering_device.h849
-rw-r--r--servers/visual/visual_server_viewport.cpp5
-rw-r--r--thirdparty/glslang/OGLCompilersDLL/InitializeDll.cpp165
-rw-r--r--thirdparty/glslang/OGLCompilersDLL/InitializeDll.h49
-rw-r--r--thirdparty/glslang/SPIRV/GLSL.ext.AMD.h108
-rw-r--r--thirdparty/glslang/SPIRV/GLSL.ext.EXT.h38
-rw-r--r--thirdparty/glslang/SPIRV/GLSL.ext.KHR.h45
-rw-r--r--thirdparty/glslang/SPIRV/GLSL.ext.NV.h78
-rw-r--r--thirdparty/glslang/SPIRV/GLSL.std.450.h131
-rw-r--r--thirdparty/glslang/SPIRV/GlslangToSpv.cpp8066
-rw-r--r--thirdparty/glslang/SPIRV/GlslangToSpv.h61
-rw-r--r--thirdparty/glslang/SPIRV/InReadableOrder.cpp113
-rw-r--r--thirdparty/glslang/SPIRV/Logger.cpp68
-rw-r--r--thirdparty/glslang/SPIRV/Logger.h74
-rw-r--r--thirdparty/glslang/SPIRV/SPVRemapper.cpp1487
-rw-r--r--thirdparty/glslang/SPIRV/SPVRemapper.h304
-rw-r--r--thirdparty/glslang/SPIRV/SpvBuilder.cpp3058
-rw-r--r--thirdparty/glslang/SPIRV/SpvBuilder.h758
-rw-r--r--thirdparty/glslang/SPIRV/SpvPostProcess.cpp426
-rw-r--r--thirdparty/glslang/SPIRV/SpvTools.cpp214
-rw-r--r--thirdparty/glslang/SPIRV/SpvTools.h80
-rw-r--r--thirdparty/glslang/SPIRV/bitutils.h81
-rw-r--r--thirdparty/glslang/SPIRV/disassemble.cpp759
-rw-r--r--thirdparty/glslang/SPIRV/disassemble.h53
-rw-r--r--thirdparty/glslang/SPIRV/doc.cpp2767
-rw-r--r--thirdparty/glslang/SPIRV/doc.h258
-rw-r--r--thirdparty/glslang/SPIRV/hex_float.h1078
-rw-r--r--thirdparty/glslang/SPIRV/spirv.hpp1881
-rw-r--r--thirdparty/glslang/SPIRV/spvIR.h441
-rw-r--r--thirdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp76
-rw-r--r--thirdparty/glslang/glslang/GenericCodeGen/Link.cpp91
-rw-r--r--thirdparty/glslang/glslang/Include/BaseTypes.h545
-rw-r--r--thirdparty/glslang/glslang/Include/Common.h292
-rw-r--r--thirdparty/glslang/glslang/Include/ConstantUnion.h938
-rw-r--r--thirdparty/glslang/glslang/Include/InfoSink.h144
-rw-r--r--thirdparty/glslang/glslang/Include/InitializeGlobals.h44
-rw-r--r--thirdparty/glslang/glslang/Include/PoolAlloc.h317
-rw-r--r--thirdparty/glslang/glslang/Include/ResourceLimits.h149
-rw-r--r--thirdparty/glslang/glslang/Include/ShHandle.h176
-rw-r--r--thirdparty/glslang/glslang/Include/Types.h2276
-rw-r--r--thirdparty/glslang/glslang/Include/arrays.h341
-rw-r--r--thirdparty/glslang/glslang/Include/intermediate.h1764
-rw-r--r--thirdparty/glslang/glslang/Include/revision.h3
-rw-r--r--thirdparty/glslang/glslang/Include/revision.template13
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Constant.cpp1405
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/InfoSink.cpp113
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp9634
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Initialize.h110
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp302
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp4095
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/LiveTraverser.h138
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp628
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp8062
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h510
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp315
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp118
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/RemoveTree.h41
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Scan.cpp1793
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Scan.h276
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/ScanContext.h93
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp2056
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp436
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h872
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Versions.cpp1130
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/Versions.h300
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/attribute.cpp343
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/attribute.h107
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/gl_types.h214
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/glslang.y3796
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp10468
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h509
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp1519
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp818
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/iomapper.h63
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/limits.cpp198
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp1756
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/localintermediate.h900
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/parseConst.cpp204
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/parseVersions.h159
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/pch.cpp35
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/pch.h49
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp1320
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp181
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp119
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h702
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp1246
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp219
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h179
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp866
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h55
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/reflection.cpp1200
-rw-r--r--thirdparty/glslang/glslang/MachineIndependent/reflection.h203
-rw-r--r--thirdparty/glslang/glslang/OSDependent/Unix/ossource.cpp207
-rw-r--r--thirdparty/glslang/glslang/OSDependent/Windows/main.cpp74
-rw-r--r--thirdparty/glslang/glslang/OSDependent/Windows/ossource.cpp147
-rw-r--r--thirdparty/glslang/glslang/OSDependent/osinclude.h63
-rw-r--r--thirdparty/glslang/glslang/Public/ShaderLang.h847
115 files changed, 118949 insertions, 7 deletions
diff --git a/core/oa_hash_map.h b/core/oa_hash_map.h
index 7407c52816..182ed8b116 100644
--- a/core/oa_hash_map.h
+++ b/core/oa_hash_map.h
@@ -240,6 +240,22 @@ public:
return false;
}
+ /**
+ * returns true if the value was found, false otherwise.
+ *
+ * if r_data is not NULL then the value will be written to the object
+ * it points to.
+ */
+ TValue *lookup_ptr(const TKey &p_key) const {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+
+ if (exists) {
+ return &values[pos];
+ }
+ return NULL;
+ }
+
_FORCE_INLINE_ bool has(const TKey &p_key) const {
uint32_t _pos = 0;
return _lookup_pos(p_key, _pos);
diff --git a/drivers/SCsub b/drivers/SCsub
index d91d98a713..3ea7f23bc4 100644
--- a/drivers/SCsub
+++ b/drivers/SCsub
@@ -26,6 +26,7 @@ SConscript('winmidi/SCsub')
if (env["platform"] != "server"):
SConscript('gles3/SCsub')
SConscript('gles2/SCsub')
+ SConscript('vulkan/SCsub')
SConscript('gl_context/SCsub')
else:
SConscript('dummy/SCsub')
diff --git a/drivers/dummy/rasterizer_dummy.h b/drivers/dummy/rasterizer_dummy.h
index 00758a73a4..418e18cb78 100644
--- a/drivers/dummy/rasterizer_dummy.h
+++ b/drivers/dummy/rasterizer_dummy.h
@@ -793,7 +793,7 @@ public:
void clear_render_target(const Color &p_color) {}
void blit_render_target_to_screen(RID p_render_target, const Rect2 &p_screen_rect, int p_screen = 0) {}
void output_lens_distorted_to_screen(RID p_render_target, const Rect2 &p_screen_rect, float p_k1, float p_k2, const Vector2 &p_eye_center, float p_oversample) {}
- void end_frame(bool p_swap_buffers) {}
+ void end_frame(bool p_swap_buffers) { OS::get_singleton()->swap_buffers(); }
void finalize() {}
static Error is_viable() {
diff --git a/drivers/vulkan/SCsub b/drivers/vulkan/SCsub
new file mode 100644
index 0000000000..8ecfd47a0a
--- /dev/null
+++ b/drivers/vulkan/SCsub
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+Import('env')
+
+env.add_source_files(env.drivers_sources,"*.cpp")
+
+# Thirdparty source files
+# Not unbundled so far since not widespread as shared library
+thirdparty_dir = "#thirdparty/glslang/"
+thirdparty_sources = [
+"glslang/MachineIndependent/RemoveTree.cpp",
+"glslang/MachineIndependent/ParseHelper.cpp",
+"glslang/MachineIndependent/iomapper.cpp",
+"glslang/MachineIndependent/propagateNoContraction.cpp",
+"glslang/MachineIndependent/Intermediate.cpp",
+"glslang/MachineIndependent/linkValidate.cpp",
+"glslang/MachineIndependent/attribute.cpp",
+"glslang/MachineIndependent/Scan.cpp",
+"glslang/MachineIndependent/Initialize.cpp",
+"glslang/MachineIndependent/Constant.cpp",
+"glslang/MachineIndependent/reflection.cpp",
+"glslang/MachineIndependent/limits.cpp",
+"glslang/MachineIndependent/preprocessor/PpScanner.cpp",
+"glslang/MachineIndependent/preprocessor/PpTokens.cpp",
+"glslang/MachineIndependent/preprocessor/PpAtom.cpp",
+"glslang/MachineIndependent/preprocessor/PpContext.cpp",
+"glslang/MachineIndependent/preprocessor/Pp.cpp",
+"glslang/MachineIndependent/InfoSink.cpp",
+"glslang/MachineIndependent/intermOut.cpp",
+"glslang/MachineIndependent/SymbolTable.cpp",
+"glslang/MachineIndependent/glslang_tab.cpp",
+"glslang/MachineIndependent/pch.cpp",
+"glslang/MachineIndependent/Versions.cpp",
+"glslang/MachineIndependent/ShaderLang.cpp",
+"glslang/MachineIndependent/parseConst.cpp",
+"glslang/MachineIndependent/PoolAlloc.cpp",
+"glslang/MachineIndependent/ParseContextBase.cpp",
+"glslang/MachineIndependent/IntermTraverse.cpp",
+"glslang/GenericCodeGen/Link.cpp",
+"glslang/GenericCodeGen/CodeGen.cpp",
+"OGLCompilersDLL/InitializeDll.cpp",
+"SPIRV/InReadableOrder.cpp",
+"SPIRV/GlslangToSpv.cpp",
+"SPIRV/SpvBuilder.cpp",
+"SPIRV/SpvTools.cpp",
+"SPIRV/disassemble.cpp",
+"SPIRV/doc.cpp",
+"SPIRV/SPVRemapper.cpp",
+"SPIRV/SpvPostProcess.cpp",
+"SPIRV/Logger.cpp"
+]
+
+if (env["platform"]=="windows"):
+ thirdparty_sources.append("glslang/OSDependent/Windows/ossource.cpp")
+else:
+ thirdparty_sources.append("glslang/OSDependent/Unix/ossource.cpp")
+
+thirdparty_sources = [thirdparty_dir + file for file in thirdparty_sources]
+
+env_thirdparty = env.Clone()
+#env_thirdparty.disable_warnings()
+env_thirdparty.add_source_files(env.drivers_sources, thirdparty_sources)
+
+env.Prepend(CPPPATH=[thirdparty_dir])
+
+#SConscript("shaders/SCsub")
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
new file mode 100644
index 0000000000..bcd103b67b
--- /dev/null
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -0,0 +1,5164 @@
+#include "rendering_device_vulkan.h"
+#include "drivers/vulkan/vulkan_context.h"
+
+#include "core/hashfuncs.h"
+#include "core/project_settings.h"
+#include "thirdparty/glslang/SPIRV/GlslangToSpv.h"
+#include "thirdparty/glslang/glslang/Include/Types.h"
+
+void RenderingDeviceVulkan::_add_dependency(ID p_id, ID p_depends_on) {
+
+ if (!dependency_map.has(p_depends_on)) {
+ dependency_map[p_depends_on] = Set<ID>();
+ }
+
+ dependency_map[p_depends_on].insert(p_id);
+
+ if (!reverse_dependency_map.has(p_id)) {
+ reverse_dependency_map[p_id] = Set<ID>();
+ }
+
+ reverse_dependency_map[p_id].insert(p_depends_on);
+}
+
+void RenderingDeviceVulkan::_free_dependencies(ID p_id) {
+
+ //direct dependencies must be freed
+ List<ID> to_free;
+ Map<ID, Set<ID> >::Element *E = dependency_map.find(p_id);
+ if (E) {
+
+ for (Set<ID>::Element *F = E->get().front(); F; F = F->next()) {
+ to_free.push_back(F->get());
+ }
+
+ dependency_map.erase(E);
+
+ while (to_free.front()) {
+ free(to_free.front()->get());
+ to_free.pop_front();
+ }
+ }
+
+ //reverse depenencies must be unreferenced
+ E = reverse_dependency_map.find(p_id);
+
+ if (E) {
+
+ for (Set<ID>::Element *F = E->get().front(); F; F = F->next()) {
+ Map<ID, Set<ID> >::Element *G = dependency_map.find(F->get());
+ if (G) {
+ G->get().erase(p_id);
+ }
+ }
+
+ reverse_dependency_map.erase(E);
+ }
+}
+
+const VkFormat RenderingDeviceVulkan::vulkan_formats[RenderingDevice::DATA_FORMAT_MAX] = {
+ VK_FORMAT_R4G4_UNORM_PACK8,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+ VK_FORMAT_R5G6B5_UNORM_PACK16,
+ VK_FORMAT_B5G6R5_UNORM_PACK16,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+ VK_FORMAT_R8_UNORM,
+ VK_FORMAT_R8_SNORM,
+ VK_FORMAT_R8_USCALED,
+ VK_FORMAT_R8_SSCALED,
+ VK_FORMAT_R8_UINT,
+ VK_FORMAT_R8_SINT,
+ VK_FORMAT_R8_SRGB,
+ VK_FORMAT_R8G8_UNORM,
+ VK_FORMAT_R8G8_SNORM,
+ VK_FORMAT_R8G8_USCALED,
+ VK_FORMAT_R8G8_SSCALED,
+ VK_FORMAT_R8G8_UINT,
+ VK_FORMAT_R8G8_SINT,
+ VK_FORMAT_R8G8_SRGB,
+ VK_FORMAT_R8G8B8_UNORM,
+ VK_FORMAT_R8G8B8_SNORM,
+ VK_FORMAT_R8G8B8_USCALED,
+ VK_FORMAT_R8G8B8_SSCALED,
+ VK_FORMAT_R8G8B8_UINT,
+ VK_FORMAT_R8G8B8_SINT,
+ VK_FORMAT_R8G8B8_SRGB,
+ VK_FORMAT_B8G8R8_UNORM,
+ VK_FORMAT_B8G8R8_SNORM,
+ VK_FORMAT_B8G8R8_USCALED,
+ VK_FORMAT_B8G8R8_SSCALED,
+ VK_FORMAT_B8G8R8_UINT,
+ VK_FORMAT_B8G8R8_SINT,
+ VK_FORMAT_B8G8R8_SRGB,
+ VK_FORMAT_R8G8B8A8_UNORM,
+ VK_FORMAT_R8G8B8A8_SNORM,
+ VK_FORMAT_R8G8B8A8_USCALED,
+ VK_FORMAT_R8G8B8A8_SSCALED,
+ VK_FORMAT_R8G8B8A8_UINT,
+ VK_FORMAT_R8G8B8A8_SINT,
+ VK_FORMAT_R8G8B8A8_SRGB,
+ VK_FORMAT_B8G8R8A8_UNORM,
+ VK_FORMAT_B8G8R8A8_SNORM,
+ VK_FORMAT_B8G8R8A8_USCALED,
+ VK_FORMAT_B8G8R8A8_SSCALED,
+ VK_FORMAT_B8G8R8A8_UINT,
+ VK_FORMAT_B8G8R8A8_SINT,
+ VK_FORMAT_B8G8R8A8_SRGB,
+ VK_FORMAT_A8B8G8R8_UNORM_PACK32,
+ VK_FORMAT_A8B8G8R8_SNORM_PACK32,
+ VK_FORMAT_A8B8G8R8_USCALED_PACK32,
+ VK_FORMAT_A8B8G8R8_SSCALED_PACK32,
+ VK_FORMAT_A8B8G8R8_UINT_PACK32,
+ VK_FORMAT_A8B8G8R8_SINT_PACK32,
+ VK_FORMAT_A8B8G8R8_SRGB_PACK32,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ VK_FORMAT_A2R10G10B10_SNORM_PACK32,
+ VK_FORMAT_A2R10G10B10_USCALED_PACK32,
+ VK_FORMAT_A2R10G10B10_SSCALED_PACK32,
+ VK_FORMAT_A2R10G10B10_UINT_PACK32,
+ VK_FORMAT_A2R10G10B10_SINT_PACK32,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ VK_FORMAT_A2B10G10R10_SNORM_PACK32,
+ VK_FORMAT_A2B10G10R10_USCALED_PACK32,
+ VK_FORMAT_A2B10G10R10_SSCALED_PACK32,
+ VK_FORMAT_A2B10G10R10_UINT_PACK32,
+ VK_FORMAT_A2B10G10R10_SINT_PACK32,
+ VK_FORMAT_R16_UNORM,
+ VK_FORMAT_R16_SNORM,
+ VK_FORMAT_R16_USCALED,
+ VK_FORMAT_R16_SSCALED,
+ VK_FORMAT_R16_UINT,
+ VK_FORMAT_R16_SINT,
+ VK_FORMAT_R16_SFLOAT,
+ VK_FORMAT_R16G16_UNORM,
+ VK_FORMAT_R16G16_SNORM,
+ VK_FORMAT_R16G16_USCALED,
+ VK_FORMAT_R16G16_SSCALED,
+ VK_FORMAT_R16G16_UINT,
+ VK_FORMAT_R16G16_SINT,
+ VK_FORMAT_R16G16_SFLOAT,
+ VK_FORMAT_R16G16B16_UNORM,
+ VK_FORMAT_R16G16B16_SNORM,
+ VK_FORMAT_R16G16B16_USCALED,
+ VK_FORMAT_R16G16B16_SSCALED,
+ VK_FORMAT_R16G16B16_UINT,
+ VK_FORMAT_R16G16B16_SINT,
+ VK_FORMAT_R16G16B16_SFLOAT,
+ VK_FORMAT_R16G16B16A16_UNORM,
+ VK_FORMAT_R16G16B16A16_SNORM,
+ VK_FORMAT_R16G16B16A16_USCALED,
+ VK_FORMAT_R16G16B16A16_SSCALED,
+ VK_FORMAT_R16G16B16A16_UINT,
+ VK_FORMAT_R16G16B16A16_SINT,
+ VK_FORMAT_R16G16B16A16_SFLOAT,
+ VK_FORMAT_R32_UINT,
+ VK_FORMAT_R32_SINT,
+ VK_FORMAT_R32_SFLOAT,
+ VK_FORMAT_R32G32_UINT,
+ VK_FORMAT_R32G32_SINT,
+ VK_FORMAT_R32G32_SFLOAT,
+ VK_FORMAT_R32G32B32_UINT,
+ VK_FORMAT_R32G32B32_SINT,
+ VK_FORMAT_R32G32B32_SFLOAT,
+ VK_FORMAT_R32G32B32A32_UINT,
+ VK_FORMAT_R32G32B32A32_SINT,
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+ VK_FORMAT_R64_UINT,
+ VK_FORMAT_R64_SINT,
+ VK_FORMAT_R64_SFLOAT,
+ VK_FORMAT_R64G64_UINT,
+ VK_FORMAT_R64G64_SINT,
+ VK_FORMAT_R64G64_SFLOAT,
+ VK_FORMAT_R64G64B64_UINT,
+ VK_FORMAT_R64G64B64_SINT,
+ VK_FORMAT_R64G64B64_SFLOAT,
+ VK_FORMAT_R64G64B64A64_UINT,
+ VK_FORMAT_R64G64B64A64_SINT,
+ VK_FORMAT_R64G64B64A64_SFLOAT,
+ VK_FORMAT_B10G11R11_UFLOAT_PACK32,
+ VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
+ VK_FORMAT_D16_UNORM,
+ VK_FORMAT_X8_D24_UNORM_PACK32,
+ VK_FORMAT_D32_SFLOAT,
+ VK_FORMAT_S8_UINT,
+ VK_FORMAT_D16_UNORM_S8_UINT,
+ VK_FORMAT_D24_UNORM_S8_UINT,
+ VK_FORMAT_D32_SFLOAT_S8_UINT,
+ VK_FORMAT_BC1_RGB_UNORM_BLOCK,
+ VK_FORMAT_BC1_RGB_SRGB_BLOCK,
+ VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
+ VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
+ VK_FORMAT_BC2_UNORM_BLOCK,
+ VK_FORMAT_BC2_SRGB_BLOCK,
+ VK_FORMAT_BC3_UNORM_BLOCK,
+ VK_FORMAT_BC3_SRGB_BLOCK,
+ VK_FORMAT_BC4_UNORM_BLOCK,
+ VK_FORMAT_BC4_SNORM_BLOCK,
+ VK_FORMAT_BC5_UNORM_BLOCK,
+ VK_FORMAT_BC5_SNORM_BLOCK,
+ VK_FORMAT_BC6H_UFLOAT_BLOCK,
+ VK_FORMAT_BC6H_SFLOAT_BLOCK,
+ VK_FORMAT_BC7_UNORM_BLOCK,
+ VK_FORMAT_BC7_SRGB_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK,
+ VK_FORMAT_EAC_R11_UNORM_BLOCK,
+ VK_FORMAT_EAC_R11_SNORM_BLOCK,
+ VK_FORMAT_EAC_R11G11_UNORM_BLOCK,
+ VK_FORMAT_EAC_R11G11_SNORM_BLOCK,
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK,
+ VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK,
+ VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK,
+ VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
+ VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ VK_FORMAT_G8B8G8R8_422_UNORM,
+ VK_FORMAT_B8G8R8G8_422_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_420_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_422_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM,
+ VK_FORMAT_R10X6_UNORM_PACK16,
+ VK_FORMAT_R10X6G10X6_UNORM_2PACK16,
+ VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16,
+ VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16,
+ VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_R12X4_UNORM_PACK16,
+ VK_FORMAT_R12X4G12X4_UNORM_2PACK16,
+ VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16,
+ VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16,
+ VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_G16B16G16R16_422_UNORM,
+ VK_FORMAT_B16G16R16G16_422_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM,
+ VK_FORMAT_G16_B16R16_2PLANE_420_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM,
+ VK_FORMAT_G16_B16R16_2PLANE_422_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM,
+ VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG,
+ VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG,
+};
+
+const char *RenderingDeviceVulkan::named_formats[RenderingDevice::DATA_FORMAT_MAX] = {
+ "R4G4_Unorm_Pack8",
+ "R4G4B4A4_Unorm_Pack16",
+ "B4G4R4A4_Unorm_Pack16",
+ "R5G6B5_Unorm_Pack16",
+ "B5G6R5_Unorm_Pack16",
+ "R5G5B5A1_Unorm_Pack16",
+ "B5G5R5A1_Unorm_Pack16",
+ "A1R5G5B5_Unorm_Pack16",
+ "R8_Unorm",
+ "R8_Snorm",
+ "R8_Uscaled",
+ "R8_Sscaled",
+ "R8_Uint",
+ "R8_Sint",
+ "R8_Srgb",
+ "R8G8_Unorm",
+ "R8G8_Snorm",
+ "R8G8_Uscaled",
+ "R8G8_Sscaled",
+ "R8G8_Uint",
+ "R8G8_Sint",
+ "R8G8_Srgb",
+ "R8G8B8_Unorm",
+ "R8G8B8_Snorm",
+ "R8G8B8_Uscaled",
+ "R8G8B8_Sscaled",
+ "R8G8B8_Uint",
+ "R8G8B8_Sint",
+ "R8G8B8_Srgb",
+ "B8G8R8_Unorm",
+ "B8G8R8_Snorm",
+ "B8G8R8_Uscaled",
+ "B8G8R8_Sscaled",
+ "B8G8R8_Uint",
+ "B8G8R8_Sint",
+ "B8G8R8_Srgb",
+ "R8G8B8A8_Unorm",
+ "R8G8B8A8_Snorm",
+ "R8G8B8A8_Uscaled",
+ "R8G8B8A8_Sscaled",
+ "R8G8B8A8_Uint",
+ "R8G8B8A8_Sint",
+ "R8G8B8A8_Srgb",
+ "B8G8R8A8_Unorm",
+ "B8G8R8A8_Snorm",
+ "B8G8R8A8_Uscaled",
+ "B8G8R8A8_Sscaled",
+ "B8G8R8A8_Uint",
+ "B8G8R8A8_Sint",
+ "B8G8R8A8_Srgb",
+ "A8B8G8R8_Unorm_Pack32",
+ "A8B8G8R8_Snorm_Pack32",
+ "A8B8G8R8_Uscaled_Pack32",
+ "A8B8G8R8_Sscaled_Pack32",
+ "A8B8G8R8_Uint_Pack32",
+ "A8B8G8R8_Sint_Pack32",
+ "A8B8G8R8_Srgb_Pack32",
+ "A2R10G10B10_Unorm_Pack32",
+ "A2R10G10B10_Snorm_Pack32",
+ "A2R10G10B10_Uscaled_Pack32",
+ "A2R10G10B10_Sscaled_Pack32",
+ "A2R10G10B10_Uint_Pack32",
+ "A2R10G10B10_Sint_Pack32",
+ "A2B10G10R10_Unorm_Pack32",
+ "A2B10G10R10_Snorm_Pack32",
+ "A2B10G10R10_Uscaled_Pack32",
+ "A2B10G10R10_Sscaled_Pack32",
+ "A2B10G10R10_Uint_Pack32",
+ "A2B10G10R10_Sint_Pack32",
+ "R16_Unorm",
+ "R16_Snorm",
+ "R16_Uscaled",
+ "R16_Sscaled",
+ "R16_Uint",
+ "R16_Sint",
+ "R16_Sfloat",
+ "R16G16_Unorm",
+ "R16G16_Snorm",
+ "R16G16_Uscaled",
+ "R16G16_Sscaled",
+ "R16G16_Uint",
+ "R16G16_Sint",
+ "R16G16_Sfloat",
+ "R16G16B16_Unorm",
+ "R16G16B16_Snorm",
+ "R16G16B16_Uscaled",
+ "R16G16B16_Sscaled",
+ "R16G16B16_Uint",
+ "R16G16B16_Sint",
+ "R16G16B16_Sfloat",
+ "R16G16B16A16_Unorm",
+ "R16G16B16A16_Snorm",
+ "R16G16B16A16_Uscaled",
+ "R16G16B16A16_Sscaled",
+ "R16G16B16A16_Uint",
+ "R16G16B16A16_Sint",
+ "R16G16B16A16_Sfloat",
+ "R32_Uint",
+ "R32_Sint",
+ "R32_Sfloat",
+ "R32G32_Uint",
+ "R32G32_Sint",
+ "R32G32_Sfloat",
+ "R32G32B32_Uint",
+ "R32G32B32_Sint",
+ "R32G32B32_Sfloat",
+ "R32G32B32A32_Uint",
+ "R32G32B32A32_Sint",
+ "R32G32B32A32_Sfloat",
+ "R64_Uint",
+ "R64_Sint",
+ "R64_Sfloat",
+ "R64G64_Uint",
+ "R64G64_Sint",
+ "R64G64_Sfloat",
+ "R64G64B64_Uint",
+ "R64G64B64_Sint",
+ "R64G64B64_Sfloat",
+ "R64G64B64A64_Uint",
+ "R64G64B64A64_Sint",
+ "R64G64B64A64_Sfloat",
+ "B10G11R11_Ufloat_Pack32",
+ "E5B9G9R9_Ufloat_Pack32",
+ "D16_Unorm",
+ "X8_D24_Unorm_Pack32",
+ "D32_Sfloat",
+ "S8_Uint",
+ "D16_Unorm_S8_Uint",
+ "D24_Unorm_S8_Uint",
+ "D32_Sfloat_S8_Uint",
+ "Bc1_Rgb_Unorm_Block",
+ "Bc1_Rgb_Srgb_Block",
+ "Bc1_Rgba_Unorm_Block",
+ "Bc1_Rgba_Srgb_Block",
+ "Bc2_Unorm_Block",
+ "Bc2_Srgb_Block",
+ "Bc3_Unorm_Block",
+ "Bc3_Srgb_Block",
+ "Bc4_Unorm_Block",
+ "Bc4_Snorm_Block",
+ "Bc5_Unorm_Block",
+ "Bc5_Snorm_Block",
+ "Bc6H_Ufloat_Block",
+ "Bc6H_Sfloat_Block",
+ "Bc7_Unorm_Block",
+ "Bc7_Srgb_Block",
+ "Etc2_R8G8B8_Unorm_Block",
+ "Etc2_R8G8B8_Srgb_Block",
+ "Etc2_R8G8B8A1_Unorm_Block",
+ "Etc2_R8G8B8A1_Srgb_Block",
+ "Etc2_R8G8B8A8_Unorm_Block",
+ "Etc2_R8G8B8A8_Srgb_Block",
+ "Eac_R11_Unorm_Block",
+ "Eac_R11_Snorm_Block",
+ "Eac_R11G11_Unorm_Block",
+ "Eac_R11G11_Snorm_Block",
+ "Astc_4X4_Unorm_Block",
+ "Astc_4X4_Srgb_Block",
+ "Astc_5X4_Unorm_Block",
+ "Astc_5X4_Srgb_Block",
+ "Astc_5X5_Unorm_Block",
+ "Astc_5X5_Srgb_Block",
+ "Astc_6X5_Unorm_Block",
+ "Astc_6X5_Srgb_Block",
+ "Astc_6X6_Unorm_Block",
+ "Astc_6X6_Srgb_Block",
+ "Astc_8X5_Unorm_Block",
+ "Astc_8X5_Srgb_Block",
+ "Astc_8X6_Unorm_Block",
+ "Astc_8X6_Srgb_Block",
+ "Astc_8X8_Unorm_Block",
+ "Astc_8X8_Srgb_Block",
+ "Astc_10X5_Unorm_Block",
+ "Astc_10X5_Srgb_Block",
+ "Astc_10X6_Unorm_Block",
+ "Astc_10X6_Srgb_Block",
+ "Astc_10X8_Unorm_Block",
+ "Astc_10X8_Srgb_Block",
+ "Astc_10X10_Unorm_Block",
+ "Astc_10X10_Srgb_Block",
+ "Astc_12X10_Unorm_Block",
+ "Astc_12X10_Srgb_Block",
+ "Astc_12X12_Unorm_Block",
+ "Astc_12X12_Srgb_Block",
+ "G8B8G8R8_422_Unorm",
+ "B8G8R8G8_422_Unorm",
+ "G8_B8_R8_3Plane_420_Unorm",
+ "G8_B8R8_2Plane_420_Unorm",
+ "G8_B8_R8_3Plane_422_Unorm",
+ "G8_B8R8_2Plane_422_Unorm",
+ "G8_B8_R8_3Plane_444_Unorm",
+ "R10X6_Unorm_Pack16",
+ "R10X6G10X6_Unorm_2Pack16",
+ "R10X6G10X6B10X6A10X6_Unorm_4Pack16",
+ "G10X6B10X6G10X6R10X6_422_Unorm_4Pack16",
+ "B10X6G10X6R10X6G10X6_422_Unorm_4Pack16",
+ "G10X6_B10X6_R10X6_3Plane_420_Unorm_3Pack16",
+ "G10X6_B10X6R10X6_2Plane_420_Unorm_3Pack16",
+ "G10X6_B10X6_R10X6_3Plane_422_Unorm_3Pack16",
+ "G10X6_B10X6R10X6_2Plane_422_Unorm_3Pack16",
+ "G10X6_B10X6_R10X6_3Plane_444_Unorm_3Pack16",
+ "R12X4_Unorm_Pack16",
+ "R12X4G12X4_Unorm_2Pack16",
+ "R12X4G12X4B12X4A12X4_Unorm_4Pack16",
+ "G12X4B12X4G12X4R12X4_422_Unorm_4Pack16",
+ "B12X4G12X4R12X4G12X4_422_Unorm_4Pack16",
+ "G12X4_B12X4_R12X4_3Plane_420_Unorm_3Pack16",
+ "G12X4_B12X4R12X4_2Plane_420_Unorm_3Pack16",
+ "G12X4_B12X4_R12X4_3Plane_422_Unorm_3Pack16",
+ "G12X4_B12X4R12X4_2Plane_422_Unorm_3Pack16",
+ "G12X4_B12X4_R12X4_3Plane_444_Unorm_3Pack16",
+ "G16B16G16R16_422_Unorm",
+ "B16G16R16G16_422_Unorm",
+ "G16_B16_R16_3Plane_420_Unorm",
+ "G16_B16R16_2Plane_420_Unorm",
+ "G16_B16_R16_3Plane_422_Unorm",
+ "G16_B16R16_2Plane_422_Unorm",
+ "G16_B16_R16_3Plane_444_Unorm",
+ "Pvrtc1_2Bpp_Unorm_Block_Img",
+ "Pvrtc1_4Bpp_Unorm_Block_Img",
+ "Pvrtc2_2Bpp_Unorm_Block_Img",
+ "Pvrtc2_4Bpp_Unorm_Block_Img",
+ "Pvrtc1_2Bpp_Srgb_Block_Img",
+ "Pvrtc1_4Bpp_Srgb_Block_Img",
+ "Pvrtc2_2Bpp_Srgb_Block_Img",
+ "Pvrtc2_4Bpp_Srgb_Block_Img"
+};
+
+int RenderingDeviceVulkan::get_format_vertex_size(DataFormat p_format) {
+ switch (p_format) {
+ case DATA_FORMAT_R8_UNORM:
+ case DATA_FORMAT_R8_SNORM:
+ case DATA_FORMAT_R8_UINT:
+ case DATA_FORMAT_R8_SINT:
+ case DATA_FORMAT_R8G8_UNORM:
+ case DATA_FORMAT_R8G8_SNORM:
+ case DATA_FORMAT_R8G8_UINT:
+ case DATA_FORMAT_R8G8_SINT:
+ case DATA_FORMAT_R8G8B8_UNORM:
+ case DATA_FORMAT_R8G8B8_SNORM:
+ case DATA_FORMAT_R8G8B8_UINT:
+ case DATA_FORMAT_R8G8B8_SINT:
+ case DATA_FORMAT_B8G8R8_UNORM:
+ case DATA_FORMAT_B8G8R8_SNORM:
+ case DATA_FORMAT_B8G8R8_UINT:
+ case DATA_FORMAT_B8G8R8_SINT:
+ case DATA_FORMAT_R8G8B8A8_UNORM:
+ case DATA_FORMAT_R8G8B8A8_SNORM:
+ case DATA_FORMAT_R8G8B8A8_UINT:
+ case DATA_FORMAT_R8G8B8A8_SINT:
+ case DATA_FORMAT_B8G8R8A8_UNORM:
+ case DATA_FORMAT_B8G8R8A8_SNORM:
+ case DATA_FORMAT_B8G8R8A8_UINT:
+ case DATA_FORMAT_B8G8R8A8_SINT: return 4;
+ case DATA_FORMAT_R16_UNORM:
+ case DATA_FORMAT_R16_SNORM:
+ case DATA_FORMAT_R16_UINT:
+ case DATA_FORMAT_R16_SINT:
+ case DATA_FORMAT_R16_SFLOAT: return 4;
+ case DATA_FORMAT_R16G16_UNORM:
+ case DATA_FORMAT_R16G16_SNORM:
+ case DATA_FORMAT_R16G16_UINT:
+ case DATA_FORMAT_R16G16_SINT:
+ case DATA_FORMAT_R16G16_SFLOAT: return 4;
+ case DATA_FORMAT_R16G16B16_UNORM:
+ case DATA_FORMAT_R16G16B16_SNORM:
+ case DATA_FORMAT_R16G16B16_UINT:
+ case DATA_FORMAT_R16G16B16_SINT:
+ case DATA_FORMAT_R16G16B16_SFLOAT: return 8;
+ case DATA_FORMAT_R16G16B16A16_UNORM:
+ case DATA_FORMAT_R16G16B16A16_SNORM:
+ case DATA_FORMAT_R16G16B16A16_UINT:
+ case DATA_FORMAT_R16G16B16A16_SINT:
+ case DATA_FORMAT_R16G16B16A16_SFLOAT: return 8;
+ case DATA_FORMAT_R32_UINT:
+ case DATA_FORMAT_R32_SINT:
+ case DATA_FORMAT_R32_SFLOAT: return 4;
+ case DATA_FORMAT_R32G32_UINT:
+ case DATA_FORMAT_R32G32_SINT:
+ case DATA_FORMAT_R32G32_SFLOAT: return 8;
+ case DATA_FORMAT_R32G32B32_UINT:
+ case DATA_FORMAT_R32G32B32_SINT:
+ case DATA_FORMAT_R32G32B32_SFLOAT: return 12;
+ case DATA_FORMAT_R32G32B32A32_UINT:
+ case DATA_FORMAT_R32G32B32A32_SINT:
+ case DATA_FORMAT_R32G32B32A32_SFLOAT: return 16;
+ case DATA_FORMAT_R64_UINT:
+ case DATA_FORMAT_R64_SINT:
+ case DATA_FORMAT_R64_SFLOAT: return 8;
+ case DATA_FORMAT_R64G64_UINT:
+ case DATA_FORMAT_R64G64_SINT:
+ case DATA_FORMAT_R64G64_SFLOAT: return 16;
+ case DATA_FORMAT_R64G64B64_UINT:
+ case DATA_FORMAT_R64G64B64_SINT:
+ case DATA_FORMAT_R64G64B64_SFLOAT: return 24;
+ case DATA_FORMAT_R64G64B64A64_UINT:
+ case DATA_FORMAT_R64G64B64A64_SINT:
+ case DATA_FORMAT_R64G64B64A64_SFLOAT: return 32;
+ default: return 0;
+ }
+}
+
+uint32_t RenderingDeviceVulkan::get_image_format_pixel_size(DataFormat p_format) {
+
+ switch (p_format) {
+
+ case DATA_FORMAT_R4G4_UNORM_PACK8: return 1;
+ case DATA_FORMAT_R4G4B4A4_UNORM_PACK16:
+ case DATA_FORMAT_B4G4R4A4_UNORM_PACK16:
+ case DATA_FORMAT_R5G6B5_UNORM_PACK16:
+ case DATA_FORMAT_B5G6R5_UNORM_PACK16:
+ case DATA_FORMAT_R5G5B5A1_UNORM_PACK16:
+ case DATA_FORMAT_B5G5R5A1_UNORM_PACK16:
+ case DATA_FORMAT_A1R5G5B5_UNORM_PACK16: return 2;
+ case DATA_FORMAT_R8_UNORM:
+ case DATA_FORMAT_R8_SNORM:
+ case DATA_FORMAT_R8_USCALED:
+ case DATA_FORMAT_R8_SSCALED:
+ case DATA_FORMAT_R8_UINT:
+ case DATA_FORMAT_R8_SINT:
+ case DATA_FORMAT_R8_SRGB: return 1;
+ case DATA_FORMAT_R8G8_UNORM:
+ case DATA_FORMAT_R8G8_SNORM:
+ case DATA_FORMAT_R8G8_USCALED:
+ case DATA_FORMAT_R8G8_SSCALED:
+ case DATA_FORMAT_R8G8_UINT:
+ case DATA_FORMAT_R8G8_SINT:
+ case DATA_FORMAT_R8G8_SRGB: return 2;
+ case DATA_FORMAT_R8G8B8_UNORM:
+ case DATA_FORMAT_R8G8B8_SNORM:
+ case DATA_FORMAT_R8G8B8_USCALED:
+ case DATA_FORMAT_R8G8B8_SSCALED:
+ case DATA_FORMAT_R8G8B8_UINT:
+ case DATA_FORMAT_R8G8B8_SINT:
+ case DATA_FORMAT_R8G8B8_SRGB:
+ case DATA_FORMAT_B8G8R8_UNORM:
+ case DATA_FORMAT_B8G8R8_SNORM:
+ case DATA_FORMAT_B8G8R8_USCALED:
+ case DATA_FORMAT_B8G8R8_SSCALED:
+ case DATA_FORMAT_B8G8R8_UINT:
+ case DATA_FORMAT_B8G8R8_SINT:
+ case DATA_FORMAT_B8G8R8_SRGB: return 3;
+ case DATA_FORMAT_R8G8B8A8_UNORM:
+ case DATA_FORMAT_R8G8B8A8_SNORM:
+ case DATA_FORMAT_R8G8B8A8_USCALED:
+ case DATA_FORMAT_R8G8B8A8_SSCALED:
+ case DATA_FORMAT_R8G8B8A8_UINT:
+ case DATA_FORMAT_R8G8B8A8_SINT:
+ case DATA_FORMAT_R8G8B8A8_SRGB:
+ case DATA_FORMAT_B8G8R8A8_UNORM:
+ case DATA_FORMAT_B8G8R8A8_SNORM:
+ case DATA_FORMAT_B8G8R8A8_USCALED:
+ case DATA_FORMAT_B8G8R8A8_SSCALED:
+ case DATA_FORMAT_B8G8R8A8_UINT:
+ case DATA_FORMAT_B8G8R8A8_SINT:
+ case DATA_FORMAT_B8G8R8A8_SRGB: return 4;
+ case DATA_FORMAT_A8B8G8R8_UNORM_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SNORM_PACK32:
+ case DATA_FORMAT_A8B8G8R8_USCALED_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SSCALED_PACK32:
+ case DATA_FORMAT_A8B8G8R8_UINT_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SINT_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SRGB_PACK32:
+ case DATA_FORMAT_A2R10G10B10_UNORM_PACK32:
+ case DATA_FORMAT_A2R10G10B10_SNORM_PACK32:
+ case DATA_FORMAT_A2R10G10B10_USCALED_PACK32:
+ case DATA_FORMAT_A2R10G10B10_SSCALED_PACK32:
+ case DATA_FORMAT_A2R10G10B10_UINT_PACK32:
+ case DATA_FORMAT_A2R10G10B10_SINT_PACK32:
+ case DATA_FORMAT_A2B10G10R10_UNORM_PACK32:
+ case DATA_FORMAT_A2B10G10R10_SNORM_PACK32:
+ case DATA_FORMAT_A2B10G10R10_USCALED_PACK32:
+ case DATA_FORMAT_A2B10G10R10_SSCALED_PACK32:
+ case DATA_FORMAT_A2B10G10R10_UINT_PACK32:
+ case DATA_FORMAT_A2B10G10R10_SINT_PACK32: return 4;
+ case DATA_FORMAT_R16_UNORM:
+ case DATA_FORMAT_R16_SNORM:
+ case DATA_FORMAT_R16_USCALED:
+ case DATA_FORMAT_R16_SSCALED:
+ case DATA_FORMAT_R16_UINT:
+ case DATA_FORMAT_R16_SINT:
+ case DATA_FORMAT_R16_SFLOAT: return 2;
+ case DATA_FORMAT_R16G16_UNORM:
+ case DATA_FORMAT_R16G16_SNORM:
+ case DATA_FORMAT_R16G16_USCALED:
+ case DATA_FORMAT_R16G16_SSCALED:
+ case DATA_FORMAT_R16G16_UINT:
+ case DATA_FORMAT_R16G16_SINT:
+ case DATA_FORMAT_R16G16_SFLOAT: return 4;
+ case DATA_FORMAT_R16G16B16_UNORM:
+ case DATA_FORMAT_R16G16B16_SNORM:
+ case DATA_FORMAT_R16G16B16_USCALED:
+ case DATA_FORMAT_R16G16B16_SSCALED:
+ case DATA_FORMAT_R16G16B16_UINT:
+ case DATA_FORMAT_R16G16B16_SINT:
+ case DATA_FORMAT_R16G16B16_SFLOAT: return 6;
+ case DATA_FORMAT_R16G16B16A16_UNORM:
+ case DATA_FORMAT_R16G16B16A16_SNORM:
+ case DATA_FORMAT_R16G16B16A16_USCALED:
+ case DATA_FORMAT_R16G16B16A16_SSCALED:
+ case DATA_FORMAT_R16G16B16A16_UINT:
+ case DATA_FORMAT_R16G16B16A16_SINT:
+ case DATA_FORMAT_R16G16B16A16_SFLOAT: return 8;
+ case DATA_FORMAT_R32_UINT:
+ case DATA_FORMAT_R32_SINT:
+ case DATA_FORMAT_R32_SFLOAT: return 4;
+ case DATA_FORMAT_R32G32_UINT:
+ case DATA_FORMAT_R32G32_SINT:
+ case DATA_FORMAT_R32G32_SFLOAT: return 8;
+ case DATA_FORMAT_R32G32B32_UINT:
+ case DATA_FORMAT_R32G32B32_SINT:
+ case DATA_FORMAT_R32G32B32_SFLOAT: return 12;
+ case DATA_FORMAT_R32G32B32A32_UINT:
+ case DATA_FORMAT_R32G32B32A32_SINT:
+ case DATA_FORMAT_R32G32B32A32_SFLOAT: return 16;
+ case DATA_FORMAT_R64_UINT:
+ case DATA_FORMAT_R64_SINT:
+ case DATA_FORMAT_R64_SFLOAT: return 8;
+ case DATA_FORMAT_R64G64_UINT:
+ case DATA_FORMAT_R64G64_SINT:
+ case DATA_FORMAT_R64G64_SFLOAT: return 16;
+ case DATA_FORMAT_R64G64B64_UINT:
+ case DATA_FORMAT_R64G64B64_SINT:
+ case DATA_FORMAT_R64G64B64_SFLOAT: return 24;
+ case DATA_FORMAT_R64G64B64A64_UINT:
+ case DATA_FORMAT_R64G64B64A64_SINT:
+ case DATA_FORMAT_R64G64B64A64_SFLOAT: return 32;
+ case DATA_FORMAT_B10G11R11_UFLOAT_PACK32:
+ case DATA_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4;
+ case DATA_FORMAT_D16_UNORM: return 2;
+ case DATA_FORMAT_X8_D24_UNORM_PACK32: return 4;
+ case DATA_FORMAT_D32_SFLOAT: return 4;
+ case DATA_FORMAT_S8_UINT: return 1;
+ case DATA_FORMAT_D16_UNORM_S8_UINT: return 4;
+ case DATA_FORMAT_D24_UNORM_S8_UINT: return 4;
+ case DATA_FORMAT_D32_SFLOAT_S8_UINT: return 5; //?
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ case DATA_FORMAT_BC2_UNORM_BLOCK:
+ case DATA_FORMAT_BC2_SRGB_BLOCK:
+ case DATA_FORMAT_BC3_UNORM_BLOCK:
+ case DATA_FORMAT_BC3_SRGB_BLOCK:
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK:
+ case DATA_FORMAT_BC5_UNORM_BLOCK:
+ case DATA_FORMAT_BC5_SNORM_BLOCK:
+ case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
+ case DATA_FORMAT_BC6H_SFLOAT_BLOCK:
+ case DATA_FORMAT_BC7_UNORM_BLOCK:
+ case DATA_FORMAT_BC7_SRGB_BLOCK: return 1;
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return 1;
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK: return 1;
+ case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK: return 1;
+ case DATA_FORMAT_G8B8G8R8_422_UNORM:
+ case DATA_FORMAT_B8G8R8G8_422_UNORM: return 4;
+ case DATA_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+ case DATA_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+ case DATA_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
+ case DATA_FORMAT_G8_B8R8_2PLANE_422_UNORM:
+ case DATA_FORMAT_G8_B8_R8_3PLANE_444_UNORM: return 4;
+ case DATA_FORMAT_R10X6_UNORM_PACK16:
+ case DATA_FORMAT_R10X6G10X6_UNORM_2PACK16:
+ case DATA_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:
+ case DATA_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:
+ case DATA_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:
+ case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
+ case DATA_FORMAT_R12X4_UNORM_PACK16:
+ case DATA_FORMAT_R12X4G12X4_UNORM_2PACK16:
+ case DATA_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:
+ case DATA_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:
+ case DATA_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:
+ case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16: return 2;
+ case DATA_FORMAT_G16B16G16R16_422_UNORM:
+ case DATA_FORMAT_B16G16R16G16_422_UNORM:
+ case DATA_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
+ case DATA_FORMAT_G16_B16R16_2PLANE_420_UNORM:
+ case DATA_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
+ case DATA_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+ case DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM: return 8;
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return 1;
+ default: {
+ ERR_PRINT("Format not handled, bug");
+ }
+ }
+
+ return 1;
+}
+
+// https://www.khronos.org/registry/DataFormat/specs/1.1/dataformat.1.1.pdf
+
+void RenderingDeviceVulkan::get_compressed_image_format_block_dimensions(DataFormat p_format, uint32_t &r_w, uint32_t &r_h) {
+
+ switch (p_format) {
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ case DATA_FORMAT_BC2_UNORM_BLOCK:
+ case DATA_FORMAT_BC2_SRGB_BLOCK:
+ case DATA_FORMAT_BC3_UNORM_BLOCK:
+ case DATA_FORMAT_BC3_SRGB_BLOCK:
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK:
+ case DATA_FORMAT_BC5_UNORM_BLOCK:
+ case DATA_FORMAT_BC5_SNORM_BLOCK:
+ case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
+ case DATA_FORMAT_BC6H_SFLOAT_BLOCK:
+ case DATA_FORMAT_BC7_UNORM_BLOCK:
+ case DATA_FORMAT_BC7_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK:
+ case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: //again, not sure about astc
+ case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ r_w = 4;
+ r_h = 4;
+ return;
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
+ r_w = 4;
+ r_h = 4;
+ return;
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
+ r_w = 8;
+ r_h = 4;
+ return;
+ default: {
+ r_w = 1;
+ r_h = 1;
+ }
+ }
+}
+
+uint32_t RenderingDeviceVulkan::get_compressed_image_format_block_byte_size(DataFormat p_format) {
+
+ switch (p_format) {
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK: return 8;
+ case DATA_FORMAT_BC2_UNORM_BLOCK:
+ case DATA_FORMAT_BC2_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_BC3_UNORM_BLOCK:
+ case DATA_FORMAT_BC3_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK: return 8;
+ case DATA_FORMAT_BC5_UNORM_BLOCK:
+ case DATA_FORMAT_BC5_SNORM_BLOCK: return 16;
+ case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
+ case DATA_FORMAT_BC6H_SFLOAT_BLOCK: return 16;
+ case DATA_FORMAT_BC7_UNORM_BLOCK:
+ case DATA_FORMAT_BC7_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return 8;
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return 8;
+ case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK: return 8;
+ case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK: return 16;
+ case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: //again, not sure about astc
+ case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK: return 8; //wrong
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: return 8; //what varies is resolution
+ default: {
+ }
+ }
+ return 1;
+}
+
+uint32_t RenderingDeviceVulkan::get_compressed_image_format_pixel_rshift(DataFormat p_format) {
+
+ switch (p_format) {
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK: //these formats are half byte size, so rshift is 1
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return 1;
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG: //these formats are quarter byte size, so rshift is 1
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: return 2;
+ default: {
+ }
+ }
+
+ return 0;
+}
+
+uint32_t RenderingDeviceVulkan::get_image_format_required_size(DataFormat p_format, uint32_t p_width, uint32_t p_height, uint32_t p_depth, uint32_t p_mipmap, uint32_t *r_blockw, uint32_t *r_blockh) {
+
+ uint32_t w = p_width;
+ uint32_t h = p_height;
+ uint32_t d = p_depth;
+
+ uint32_t size = 0;
+
+ uint32_t pixel_size = get_image_format_pixel_size(p_format);
+ uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(p_format);
+ uint32_t blockw, blockh;
+ get_compressed_image_format_block_dimensions(p_format, blockw, blockh);
+
+ for (uint32_t i = 0; i <= p_mipmap; i++) {
+ uint32_t bw = w % blockw != 0 ? w + (blockw - w % blockw) : w;
+ uint32_t bh = h % blockh != 0 ? h + (blockh - h % blockh) : h;
+
+ print_line("bw " + itos(bw) + " bh " + itos(bh) + " pixsize " + itos(pixel_size) + " shift " + itos(pixel_rshift));
+ uint32_t s = bw * bh;
+
+ s *= pixel_size;
+ s >>= pixel_rshift;
+ size = s * d;
+ if (r_blockw) {
+ *r_blockw = bw;
+ }
+ if (r_blockh) {
+ *r_blockh = bh;
+ }
+ w = MAX(blockw, w >> 1);
+ h = MAX(blockh, h >> 1);
+ d = MAX(1, d >> 1);
+ }
+
+ return size;
+}
+
+uint32_t RenderingDeviceVulkan::get_image_required_mipmaps(uint32_t p_width, uint32_t p_height, uint32_t p_depth) {
+
+ //formats and block size don't really matter here since they can all go down to 1px (even if block is larger)
+ int w = p_width;
+ int h = p_height;
+ int d = p_depth;
+
+ int mipmaps = 1;
+
+ while (true) {
+
+ if (w == 1 && h == 1 && d == 1) {
+ break;
+ }
+
+ w = MAX(1, w >> 1);
+ h = MAX(1, h >> 1);
+ d = MAX(1, d >> 1);
+
+ mipmaps++;
+ };
+
+ return mipmaps;
+}
+
+///////////////////////
+
+const VkCompareOp RenderingDeviceVulkan::compare_operators[RenderingDevice::COMPARE_OP_MAX] = {
+ VK_COMPARE_OP_NEVER,
+ VK_COMPARE_OP_LESS,
+ VK_COMPARE_OP_EQUAL,
+ VK_COMPARE_OP_LESS_OR_EQUAL,
+ VK_COMPARE_OP_GREATER,
+ VK_COMPARE_OP_NOT_EQUAL,
+ VK_COMPARE_OP_GREATER_OR_EQUAL,
+ VK_COMPARE_OP_ALWAYS
+};
+
+const VkStencilOp RenderingDeviceVulkan::stencil_operations[RenderingDevice::STENCIL_OP_MAX] = {
+ VK_STENCIL_OP_KEEP,
+ VK_STENCIL_OP_ZERO,
+ VK_STENCIL_OP_REPLACE,
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP,
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP,
+ VK_STENCIL_OP_INVERT,
+ VK_STENCIL_OP_INCREMENT_AND_WRAP,
+ VK_STENCIL_OP_DECREMENT_AND_WRAP
+};
+
+const VkSampleCountFlagBits RenderingDeviceVulkan::rasterization_sample_count[RenderingDevice::TEXTURE_SAMPLES_MAX] = {
+ VK_SAMPLE_COUNT_1_BIT,
+ VK_SAMPLE_COUNT_2_BIT,
+ VK_SAMPLE_COUNT_4_BIT,
+ VK_SAMPLE_COUNT_8_BIT,
+ VK_SAMPLE_COUNT_16_BIT,
+ VK_SAMPLE_COUNT_32_BIT,
+ VK_SAMPLE_COUNT_64_BIT,
+};
+
+const VkLogicOp RenderingDeviceVulkan::logic_operations[RenderingDevice::LOGIC_OP_MAX] = {
+ VK_LOGIC_OP_CLEAR,
+ VK_LOGIC_OP_AND,
+ VK_LOGIC_OP_AND_REVERSE,
+ VK_LOGIC_OP_COPY,
+ VK_LOGIC_OP_AND_INVERTED,
+ VK_LOGIC_OP_NO_OP,
+ VK_LOGIC_OP_XOR,
+ VK_LOGIC_OP_OR,
+ VK_LOGIC_OP_NOR,
+ VK_LOGIC_OP_EQUIVALENT,
+ VK_LOGIC_OP_INVERT,
+ VK_LOGIC_OP_OR_REVERSE,
+ VK_LOGIC_OP_COPY_INVERTED,
+ VK_LOGIC_OP_OR_INVERTED,
+ VK_LOGIC_OP_NAND,
+ VK_LOGIC_OP_SET
+};
+
+const VkBlendFactor RenderingDeviceVulkan::blend_factors[RenderingDevice::BLEND_FACTOR_MAX] = {
+ VK_BLEND_FACTOR_ZERO,
+ VK_BLEND_FACTOR_ONE,
+ VK_BLEND_FACTOR_SRC_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR,
+ VK_BLEND_FACTOR_DST_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR,
+ VK_BLEND_FACTOR_SRC_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
+ VK_BLEND_FACTOR_DST_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA,
+ VK_BLEND_FACTOR_CONSTANT_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,
+ VK_BLEND_FACTOR_CONSTANT_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA,
+ VK_BLEND_FACTOR_SRC_ALPHA_SATURATE,
+ VK_BLEND_FACTOR_SRC1_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR,
+ VK_BLEND_FACTOR_SRC1_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
+};
+const VkBlendOp RenderingDeviceVulkan::blend_operations[RenderingDevice::BLEND_OP_MAX] = {
+ VK_BLEND_OP_ADD,
+ VK_BLEND_OP_SUBTRACT,
+ VK_BLEND_OP_REVERSE_SUBTRACT,
+ VK_BLEND_OP_MIN,
+ VK_BLEND_OP_MAX
+};
+
+const VkSamplerAddressMode RenderingDeviceVulkan::address_modes[RenderingDevice::SAMPLER_REPEAT_MODE_MAX] = {
+ VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
+ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE
+};
+
+const VkBorderColor RenderingDeviceVulkan::sampler_border_colors[RenderingDevice::SAMPLER_BORDER_COLOR_MAX] = {
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE
+};
+/***************************/
+/**** BUFFER MANAGEMENT ****/
+/***************************/
+
+Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mapping) {
+ VkBufferCreateInfo bufferInfo;
+ bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufferInfo.pNext = NULL;
+ bufferInfo.flags = 0;
+ bufferInfo.size = p_size;
+ bufferInfo.usage = p_usage;
+ bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufferInfo.queueFamilyIndexCount = 0;
+ bufferInfo.pQueueFamilyIndices = 0;
+
+ VmaAllocationCreateInfo allocInfo;
+ allocInfo.flags = 0;
+ allocInfo.usage = p_mapping;
+ allocInfo.requiredFlags = 0;
+ allocInfo.preferredFlags = 0;
+ allocInfo.memoryTypeBits = 0;
+ allocInfo.pool = NULL;
+ allocInfo.pUserData = NULL;
+
+ VkResult err = vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &p_buffer->buffer, &p_buffer->allocation, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ p_buffer->size = p_size;
+ p_buffer->buffer_info.buffer = p_buffer->buffer;
+ p_buffer->buffer_info.offset = 0;
+ p_buffer->buffer_info.range = p_size;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_buffer_free(Buffer *p_buffer) {
+ ERR_FAIL_COND_V(p_buffer->size == 0, ERR_INVALID_PARAMETER);
+
+ vmaDestroyBuffer(allocator, p_buffer->buffer, p_buffer->allocation);
+ vmaFreeMemory(allocator, p_buffer->allocation);
+ p_buffer->buffer = NULL;
+ p_buffer->allocation = NULL;
+ p_buffer->size = 0;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_insert_staging_block() {
+
+ VkBufferCreateInfo bufferInfo;
+ bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufferInfo.pNext = NULL;
+ bufferInfo.flags = 0;
+ bufferInfo.size = staging_buffer_block_size;
+ bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufferInfo.queueFamilyIndexCount = 0;
+ bufferInfo.pQueueFamilyIndices = 0;
+
+ VmaAllocationCreateInfo allocInfo;
+ allocInfo.flags = 0;
+ allocInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
+ allocInfo.requiredFlags = 0;
+ allocInfo.preferredFlags = 0;
+ allocInfo.memoryTypeBits = 0;
+ allocInfo.pool = NULL;
+ allocInfo.pUserData = NULL;
+
+ StagingBufferBlock block;
+
+ VkResult err = vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &block.buffer, &block.allocation, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ block.frame_used = 0;
+ block.fill_amount = 0;
+
+ staging_buffer_blocks.insert(staging_buffer_current, block);
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment, bool p_on_draw_command_buffer) {
+ //determine a block to use
+
+ r_alloc_size = p_amount;
+
+ while (true) {
+
+ r_alloc_offset = 0;
+
+ //see if we can use current block
+ if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) {
+ //we used this block this frame, let's see if there is still room
+
+ uint32_t write_from = staging_buffer_blocks[staging_buffer_current].fill_amount;
+
+ {
+ uint32_t align_remainder = write_from % p_required_align;
+ if (align_remainder != 0) {
+ write_from += p_required_align - align_remainder;
+ }
+ }
+
+ int32_t available_bytes = int32_t(staging_buffer_block_size) - int32_t(write_from);
+
+ if ((int32_t)p_amount < available_bytes) {
+ //all is good, we should be ok, all will fit
+ r_alloc_offset = write_from;
+ } else if (p_can_segment && available_bytes >= (int32_t)p_required_align) {
+ //ok all won't fit but at least we can fit a chunkie
+ //all is good, update what needs to be written to
+ r_alloc_offset = write_from;
+ r_alloc_size = available_bytes - (available_bytes % p_required_align);
+
+ } else {
+ //can't fit it into this buffer.
+ //will need to try next buffer
+
+ staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size();
+
+ // before doing anything, though, let's check that we didn't manage to fill all blocks
+ // possible in a single frame
+ if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) {
+ //guess we did.. ok, let's see if we can insert a new block..
+ if (staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) {
+ //we can, so we are safe
+ Error err = _insert_staging_block();
+ if (err) {
+ return err;
+ }
+ //claim for this frame
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ } else {
+ // Ok, worst case scenario, all the staging buffers belong to this frame
+ // and this frame is not even done.
+ // If this is the main thread, it means the user is likely loading a lot of resources at once,
+ // otherwise, the thread should just be blocked until the next frame (currently unimplemented)
+
+ if (false) { //separate thread from render
+
+ //block_until_next_frame()
+ continue;
+ } else {
+
+ //flush EVERYTHING including setup commands. IF not immediate, also need to flush the draw commands
+ context->flush(true, p_on_draw_command_buffer);
+ //re-create the setup command
+ {
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_begin.pInheritanceInfo = NULL;
+
+ VkResult err = vkBeginCommandBuffer(frames[frame].setup_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ context->set_setup_buffer(frames[frame].setup_command_buffer); //append now so it's added before everything else
+
+ if (p_on_draw_command_buffer) {
+
+ err = vkBeginCommandBuffer(frames[frame].draw_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ context->append_command_buffer(frames[frame].draw_command_buffer);
+ }
+ }
+
+ //clear the whole staging buffer
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ staging_buffer_blocks.write[i].frame_used = 0;
+ staging_buffer_blocks.write[i].fill_amount = 0;
+ }
+ //claim current
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ }
+ }
+
+ } else {
+ //not from current frame, so continue and try again
+ continue;
+ }
+ }
+
+ } else if (staging_buffer_blocks[staging_buffer_current].frame_used <= frames_drawn - frame_count) {
+ //this is an old block, which was already processed, let's reuse
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ staging_buffer_blocks.write[staging_buffer_current].fill_amount = 0;
+ } else if (staging_buffer_blocks[staging_buffer_current].frame_used > frames_drawn - frame_count) {
+ //this block may still be in use, let's not touch it unless we have to, so.. can we create a new one?
+ if (staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) {
+ //we are still allowed to create a new block, so let's do that and insert it for current pos
+ Error err = _insert_staging_block();
+ if (err) {
+ return err;
+ }
+ //claim for this frame
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ } else {
+ // oops, we are out of room and we can't create more.
+ // let's flush older frames.
+ // The logic here is that if a game is loading a lot of data from the main thread, it will need to be stalled anyway.
+ // If loading from a separate thread, we can block that thread until next frame when more room is made (not currently implemented, though).
+
+ if (false) {
+ //separate thread from render
+ //block_until_next_frame()
+ continue; //and try again
+ } else {
+
+ context->flush(false); // flush previous frames (but don't touch setup command, so this frame)
+
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ //clear all blocks but the ones from this frame
+ int block_idx = (i + staging_buffer_current) % staging_buffer_blocks.size();
+ if (staging_buffer_blocks[block_idx].frame_used == frames_drawn) {
+ break; //ok, we reached something from this frame, abort
+ }
+
+ staging_buffer_blocks.write[block_idx].frame_used = 0;
+ staging_buffer_blocks.write[block_idx].fill_amount = 0;
+ }
+
+ //claim for current frame
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ }
+ }
+ }
+
+ //all was good, break
+ break;
+ }
+
+ staging_buffer_used = true;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer, uint32_t p_required_align) {
+
+ //submitting may get chunked for various reasons, so convert this to a task
+ size_t to_submit = p_data_size;
+ size_t submit_from = 0;
+
+ while (to_submit > 0) {
+
+ uint32_t block_write_offset;
+ uint32_t block_write_amount;
+
+ Error err = _staging_buffer_allocate(MIN(to_submit, staging_buffer_block_size), p_required_align, block_write_offset, block_write_amount, p_use_draw_command_buffer);
+ if (err) {
+ return err;
+ }
+
+ //map staging buffer (It's CPU and coherent)
+
+ void *data_ptr = NULL;
+ {
+ VkResult vkerr = vmaMapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation, &data_ptr);
+ if (vkerr) {
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ }
+
+ //copy to staging buffer
+ copymem(((uint8_t *)data_ptr) + block_write_offset, p_data + submit_from, block_write_amount);
+
+ //unmap
+ vmaUnmapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation);
+ //insert a command to copy this
+
+ VkBufferCopy region;
+ region.srcOffset = block_write_offset;
+ region.dstOffset = submit_from + p_offset;
+ region.size = block_write_amount;
+
+ vkCmdCopyBuffer(p_use_draw_command_buffer ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, staging_buffer_blocks[staging_buffer_current].buffer, p_buffer->buffer, 1, &region);
+
+ staging_buffer_blocks.write[staging_buffer_current].fill_amount = block_write_offset + block_write_amount;
+
+ to_submit -= block_write_amount;
+ submit_from += block_write_amount;
+ }
+
+ return OK;
+}
+
+/*****************/
+/**** TEXTURE ****/
+/*****************/
+
+RenderingDevice::ID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<PoolVector<uint8_t> > &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ VkImageCreateInfo image_create_info;
+ image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ image_create_info.pNext = NULL;
+ image_create_info.flags = 0;
+
+ if (p_format.type == TEXTURE_TYPE_CUBE || p_format.type == TEXTURE_TYPE_CUBE_ARRAY) {
+ image_create_info.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ }
+ /*if (p_format.type == TEXTURE_TYPE_2D || p_format.type == TEXTURE_TYPE_2D_ARRAY) {
+ image_create_info.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
+ }*/
+
+ const VkImageType image_type[TEXTURE_TYPE_MAX] = {
+ VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TYPE_3D,
+ VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TYPE_3D
+ };
+
+ ERR_FAIL_INDEX_V(p_format.type, TEXTURE_TYPE_MAX, INVALID_ID);
+
+ image_create_info.imageType = image_type[p_format.type];
+
+ ERR_FAIL_COND_V_MSG(p_format.width < 1, INVALID_ID, "Width must be equal or greater than 1 for all textures");
+
+ image_create_info.format = vulkan_formats[p_format.format];
+
+ image_create_info.extent.width = p_format.width;
+ if (image_create_info.imageType == VK_IMAGE_TYPE_3D || image_create_info.imageType == VK_IMAGE_TYPE_2D) {
+ ERR_FAIL_COND_V_MSG(p_format.height < 1, INVALID_ID, "Height must be equal or greater than 1 for 2D and 3D textures");
+ image_create_info.extent.height = p_format.height;
+ } else {
+ image_create_info.extent.height = 1;
+ }
+
+ if (image_create_info.imageType == VK_IMAGE_TYPE_3D) {
+ ERR_FAIL_COND_V_MSG(p_format.depth < 1, INVALID_ID, "Depth must be equal or greater than 1 for 3D textures");
+ image_create_info.extent.depth = p_format.depth;
+ } else {
+ image_create_info.extent.depth = 1;
+ }
+
+ ERR_FAIL_COND_V(p_format.mipmaps < 1, INVALID_ID);
+
+ image_create_info.mipLevels = p_format.mipmaps;
+
+ uint32_t array_layer_multiplier = 1;
+ if (p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) {
+ array_layer_multiplier = 6;
+ }
+ if (p_format.type == TEXTURE_TYPE_1D_ARRAY || p_format.type == TEXTURE_TYPE_2D_ARRAY || p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) {
+ ERR_FAIL_COND_V_MSG(p_format.array_layers < 1, INVALID_ID,
+ "Amount of layers must be equal or greater than 1 for arrays and cubemaps.");
+ if ((p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) && (p_format.array_layers % 6) != 0) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Cubemap and cubemap array textures must provide a layer number that is multiple of 6");
+ }
+ image_create_info.arrayLayers = p_format.array_layers;
+ } else {
+ image_create_info.arrayLayers = 1;
+ }
+
+ image_create_info.arrayLayers = p_format.array_layers;
+
+ ERR_FAIL_INDEX_V(p_format.samples, TEXTURE_SAMPLES_MAX, INVALID_ID);
+
+ image_create_info.samples = rasterization_sample_count[p_format.samples];
+ image_create_info.tiling = (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+
+ //usage
+ image_create_info.usage = 0;
+
+ if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+
+ image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ image_create_info.queueFamilyIndexCount = 0;
+ image_create_info.pQueueFamilyIndices = NULL;
+ image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ uint32_t required_mipmaps = get_image_required_mipmaps(image_create_info.extent.width, image_create_info.extent.height, image_create_info.extent.depth);
+
+ ERR_FAIL_COND_V_MSG(required_mipmaps < image_create_info.mipLevels, INVALID_ID,
+ "Too many mipmaps requested for texture format and dimensions (" + itos(image_create_info.mipLevels) + "), maximum allowed: (" + itos(required_mipmaps) + ").");
+
+ if (p_data.size()) {
+
+ ERR_FAIL_COND_V_MSG(!(p_format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT), INVALID_ID,
+ "Texture needs the TEXTURE_USAGE_CAN_UPDATE_BIT usage flag in order to be updated at initialization or later");
+
+ int expected_images = image_create_info.mipLevels * image_create_info.arrayLayers * array_layer_multiplier;
+ ERR_FAIL_COND_V_MSG(p_data.size() != expected_images, INVALID_ID,
+ "Default supplied data for image format is of invalid length (" + itos(p_data.size()) + "), should be (" + itos(expected_images) + ").");
+
+ int idx = 0;
+ for (uint32_t i = 0; i < image_create_info.arrayLayers * array_layer_multiplier; i++) {
+ for (uint32_t j = 0; j < image_create_info.mipLevels; j++) {
+ print_line("computed size from " + Vector3(image_create_info.extent.width, image_create_info.extent.height, image_create_info.extent.depth));
+ uint32_t required_size = get_image_format_required_size(p_format.format, image_create_info.extent.width, image_create_info.extent.height, image_create_info.extent.depth, j);
+ ERR_FAIL_COND_V_MSG((uint32_t)p_data[idx].size() != required_size, INVALID_ID,
+ "Data for slice index " + itos(idx) + " (mapped to layer " + itos(i) + ", mipmap " + itos(j) + ") differs in size (supplied: " + itos(p_data[idx].size()) + ") than what is required by the format (" + itos(required_size) + ").");
+ idx++;
+ }
+ }
+ }
+
+ {
+ //validate that this image is supported for the intended use
+ VkFormatProperties properties;
+ vkGetPhysicalDeviceFormatProperties(context->get_physical_device(), image_create_info.format, &properties);
+ VkFormatFeatureFlags flags;
+
+ String format_text = "'" + String(named_formats[p_format.format]) + "'";
+
+ if (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) {
+ flags = properties.linearTilingFeatures;
+ format_text += " (with CPU read bit)";
+ } else {
+ flags = properties.optimalTilingFeatures;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT && !(flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Format " + format_text + " does not support usage as sampling texture.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Format " + format_text + " does not support usage as color attachment.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Format " + format_text + " does not support usage as depth-stencil attachment.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Format " + format_text + " does not support usage as storage image.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_ATOMIC_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Format " + format_text + " does not support usage as atomic storage image.");
+ }
+ }
+
+ //some view validation
+
+ if (p_view.format_override != DATA_FORMAT_MAX) {
+ ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, INVALID_ID);
+ }
+ ERR_FAIL_INDEX_V(p_view.swizzle_r, TEXTURE_SWIZZLE_MAX, INVALID_ID);
+ ERR_FAIL_INDEX_V(p_view.swizzle_g, TEXTURE_SWIZZLE_MAX, INVALID_ID);
+ ERR_FAIL_INDEX_V(p_view.swizzle_b, TEXTURE_SWIZZLE_MAX, INVALID_ID);
+ ERR_FAIL_INDEX_V(p_view.swizzle_a, TEXTURE_SWIZZLE_MAX, INVALID_ID);
+
+ //allocate memory
+
+ VmaAllocationCreateInfo allocInfo;
+ allocInfo.flags = 0;
+ allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+ allocInfo.requiredFlags = 0;
+ allocInfo.preferredFlags = 0;
+ allocInfo.memoryTypeBits = 0;
+ allocInfo.pool = NULL;
+ allocInfo.pUserData = NULL;
+
+ Texture texture;
+
+ VkResult err = vmaCreateImage(allocator, &image_create_info, &allocInfo, &texture.image, &texture.allocation, &texture.allocation_info);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ texture.type = p_format.type;
+ texture.format = p_format.format;
+ texture.width = image_create_info.extent.width;
+ texture.height = image_create_info.extent.height;
+ texture.depth = image_create_info.extent.depth;
+ texture.layers = image_create_info.arrayLayers;
+ texture.mipmaps = image_create_info.mipLevels;
+ texture.usage_flags = p_format.usage_bits;
+ texture.samples = p_format.samples;
+
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ texture.aspect_mask = TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ texture.reading_layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL;
+ texture.bound_layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ texture.aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT;
+ texture.reading_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ texture.bound_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ }
+
+ texture.bound = false;
+ texture.owner = INVALID_ID;
+
+ //create view
+
+ VkImageViewCreateInfo image_view_create_info;
+ image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ image_view_create_info.pNext = NULL;
+ image_view_create_info.flags = 0;
+ image_view_create_info.image = texture.image;
+
+ static const VkImageViewType view_types[TEXTURE_TYPE_MAX] = {
+ VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_3D,
+ VK_IMAGE_VIEW_TYPE_CUBE,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+ };
+
+ image_view_create_info.viewType = view_types[p_format.type];
+ if (p_view.format_override == DATA_FORMAT_MAX) {
+ image_view_create_info.format = image_create_info.format;
+ } else {
+ image_view_create_info.format = vulkan_formats[p_view.format_override];
+ }
+
+ static const VkComponentSwizzle component_swizzles[TEXTURE_SWIZZLE_MAX] = {
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_ZERO,
+ VK_COMPONENT_SWIZZLE_ONE,
+ VK_COMPONENT_SWIZZLE_R,
+ VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B,
+ VK_COMPONENT_SWIZZLE_A
+ };
+
+ image_view_create_info.components.r = component_swizzles[p_view.swizzle_r];
+ image_view_create_info.components.g = component_swizzles[p_view.swizzle_g];
+ image_view_create_info.components.b = component_swizzles[p_view.swizzle_b];
+ image_view_create_info.components.a = component_swizzles[p_view.swizzle_a];
+
+ image_view_create_info.subresourceRange.baseMipLevel = 0;
+ image_view_create_info.subresourceRange.levelCount = image_create_info.mipLevels;
+ image_view_create_info.subresourceRange.baseArrayLayer = 0;
+ image_view_create_info.subresourceRange.layerCount = array_layer_multiplier * image_create_info.arrayLayers;
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ } else {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ err = vkCreateImageView(device, &image_view_create_info, NULL, &texture.view);
+
+ if (err) {
+ vmaDestroyImage(allocator, texture.image, texture.allocation);
+ vmaFreeMemory(allocator, texture.allocation);
+ ERR_FAIL_V(INVALID_ID);
+ }
+
+ //barrier to set layout
+ {
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ image_memory_barrier.newLayout = texture.reading_layout;
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = texture.image;
+ image_memory_barrier.subresourceRange.aspectMask = texture.aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = image_create_info.mipLevels;
+ image_memory_barrier.subresourceRange.baseArrayLayer = 0;
+ image_memory_barrier.subresourceRange.layerCount = image_create_info.arrayLayers * array_layer_multiplier;
+
+ vkCmdPipelineBarrier(frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ ID id = texture_owner.make_id(texture);
+
+ if (p_data.size()) {
+
+ for (uint32_t i = 0; i < image_create_info.arrayLayers; i++) {
+ for (uint32_t j = 0; j < image_create_info.mipLevels; j++) {
+ texture_update(id, j, i, p_data[i * image_create_info.mipLevels + j], true);
+ }
+ }
+ }
+ return id;
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, ID p_with_texture) {
+
+ Texture *src_texture = texture_owner.getornull(p_with_texture);
+ ERR_FAIL_COND_V(!src_texture, INVALID_ID);
+
+ if (src_texture->owner != INVALID_ID) { //ahh this is a share
+ p_with_texture = src_texture->owner;
+ src_texture = texture_owner.getornull(src_texture->owner);
+ ERR_FAIL_COND_V(!src_texture, INVALID_ID); //this is a bug
+ }
+
+ //create view
+
+ Texture texture = *src_texture;
+
+ uint32_t array_layer_multiplier = 1;
+ if (texture.type == TEXTURE_TYPE_CUBE_ARRAY || texture.type == TEXTURE_TYPE_CUBE) {
+ array_layer_multiplier = 6;
+ }
+
+ VkImageViewCreateInfo image_view_create_info;
+ image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ image_view_create_info.pNext = NULL;
+ image_view_create_info.flags = 0;
+ image_view_create_info.image = texture.image;
+
+ static const VkImageViewType view_types[TEXTURE_TYPE_MAX] = {
+ VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_3D,
+ VK_IMAGE_VIEW_TYPE_CUBE,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+ };
+
+ image_view_create_info.viewType = view_types[texture.type];
+ if (p_view.format_override == DATA_FORMAT_MAX) {
+ image_view_create_info.format = vulkan_formats[texture.format];
+ } else {
+ ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, INVALID_ID);
+ image_view_create_info.format = vulkan_formats[p_view.format_override];
+ }
+
+ static const VkComponentSwizzle component_swizzles[TEXTURE_SWIZZLE_MAX] = {
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_ZERO,
+ VK_COMPONENT_SWIZZLE_ONE,
+ VK_COMPONENT_SWIZZLE_R,
+ VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B,
+ VK_COMPONENT_SWIZZLE_A
+ };
+
+ image_view_create_info.components.r = component_swizzles[p_view.swizzle_r];
+ image_view_create_info.components.g = component_swizzles[p_view.swizzle_g];
+ image_view_create_info.components.b = component_swizzles[p_view.swizzle_b];
+ image_view_create_info.components.a = component_swizzles[p_view.swizzle_a];
+
+ image_view_create_info.subresourceRange.baseMipLevel = 0;
+ image_view_create_info.subresourceRange.levelCount = texture.mipmaps;
+ image_view_create_info.subresourceRange.layerCount = array_layer_multiplier * texture.layers;
+ image_view_create_info.subresourceRange.baseArrayLayer = 0;
+
+ if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ } else {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ VkResult err = vkCreateImageView(device, &image_view_create_info, NULL, &texture.view);
+
+ if (err) {
+ ERR_FAIL_V(INVALID_ID);
+ }
+
+ texture.owner = p_with_texture;
+ ID id = texture_owner.make_id(texture);
+ _add_dependency(id, p_with_texture);
+
+ return id;
+}
+
+Error RenderingDeviceVulkan::texture_update(ID p_texture, uint32_t p_mipmap, uint32_t p_layer, const PoolVector<uint8_t> &p_data, bool p_sync_with_draw) {
+
+ _THREAD_SAFE_METHOD_
+
+ Texture *texture = texture_owner.getornull(p_texture);
+ ERR_FAIL_COND_V(!texture, ERR_INVALID_PARAMETER);
+
+ if (texture->owner != INVALID_ID) {
+ p_texture = texture->owner;
+ texture = texture_owner.getornull(texture->owner);
+ ERR_FAIL_COND_V(!texture, ERR_BUG); //this is a bug
+ }
+
+ ERR_FAIL_COND_V_MSG(texture->bound, ERR_CANT_ACQUIRE_RESOURCE,
+ "Texture can't be updated while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
+
+ ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT), ERR_INVALID_PARAMETER,
+ "Texture requires the TEXTURE_USAGE_CAN_UPDATE_BIT in order to be updatable.");
+
+ ERR_FAIL_COND_V(p_mipmap >= texture->mipmaps, ERR_INVALID_PARAMETER);
+ uint32_t layer_count = texture->layers;
+ if (texture->type == TEXTURE_TYPE_CUBE || texture->type == TEXTURE_TYPE_CUBE_ARRAY) {
+ layer_count *= 6;
+ }
+ ERR_FAIL_COND_V(p_layer >= layer_count, ERR_INVALID_PARAMETER);
+
+ uint32_t width, height;
+ uint32_t image_size = get_image_format_required_size(texture->format, texture->width, texture->height, 1, p_mipmap, &width, &height);
+ uint32_t required_size = image_size * texture->depth;
+
+ ERR_FAIL_COND_V_MSG(required_size != (uint32_t)p_data.size(), ERR_INVALID_PARAMETER,
+ "Required size for texture update (" + itos(required_size) + ") does not match data supplied size (" + itos(p_data.size()) + ").");
+
+ uint32_t region_size = texture_upload_region_size_px;
+
+ PoolVector<uint8_t>::Read r = p_data.read();
+
+ VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+
+ //barrier to transfer
+ {
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.oldLayout = texture->reading_layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = texture->image;
+ image_memory_barrier.subresourceRange.aspectMask = texture->aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = 1;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ for (uint32_t z = 0; z < texture->depth; z++) { //for 3D textures, depth may be > 0
+
+ const uint8_t *read_ptr = r.ptr();
+ read_ptr += image_size * z;
+
+ for (uint32_t x = 0; x < width; x += region_size) {
+ for (uint32_t y = 0; y < height; y += region_size) {
+
+ uint32_t region_w = MIN(region_size, width - x);
+ uint32_t region_h = MIN(region_size, height - y);
+
+ uint32_t pixel_size = get_image_format_pixel_size(texture->format);
+ uint32_t to_allocate = region_w * region_h * pixel_size;
+ to_allocate >>= get_compressed_image_format_pixel_rshift(texture->format);
+
+ uint32_t alloc_offset, alloc_size;
+ Error err = _staging_buffer_allocate(to_allocate, 32, alloc_offset, alloc_size, false, p_sync_with_draw);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ uint8_t *write_ptr;
+
+ { //map
+ void *data_ptr = NULL;
+ VkResult vkerr = vmaMapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation, &data_ptr);
+ if (vkerr) {
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ write_ptr = (uint8_t *)data_ptr;
+ write_ptr += alloc_offset;
+ }
+
+ uint32_t block_w, block_h;
+ get_compressed_image_format_block_dimensions(texture->format, block_w, block_h);
+
+ ERR_FAIL_COND_V(region_w % block_w, ERR_BUG);
+ ERR_FAIL_COND_V(region_h % block_h, ERR_BUG);
+
+ if (block_w != 1 || block_h != 1) {
+ //compressed image (blocks)
+ //must copy a block region
+
+ uint32_t block_size = get_compressed_image_format_block_byte_size(texture->format);
+ //re-create current variables in blocky format
+ uint32_t xb = x / block_w;
+ uint32_t yb = y / block_h;
+ uint32_t wb = width / block_w;
+ //uint32_t hb = height / block_h;
+ uint32_t region_wb = region_w / block_w;
+ uint32_t region_hb = region_h / block_h;
+ for (uint32_t xr = 0; xr < region_wb; xr++) {
+ for (uint32_t yr = 0; yr < region_hb; yr++) {
+ uint32_t src_offset = ((yr + yb) * wb + xr + xb) * block_size;
+ uint32_t dst_offset = (yr * region_wb + xr) * block_size;
+ //copy block
+ for (uint32_t i = 0; i < block_size; i++) {
+ write_ptr[dst_offset + i] = read_ptr[src_offset + i];
+ }
+ }
+ }
+
+ } else {
+ //regular image (pixels)
+ //must copy a pixel region
+
+ for (uint32_t xr = 0; xr < region_w; xr++) {
+ for (uint32_t yr = 0; yr < region_h; yr++) {
+ uint32_t src_offset = ((yr + y) * width + xr + x) * pixel_size;
+ uint32_t dst_offset = (yr * region_w + xr) * pixel_size;
+ //copy block
+ for (uint32_t i = 0; i < pixel_size; i++) {
+
+ write_ptr[dst_offset + i] = read_ptr[src_offset + i];
+ }
+ }
+ }
+ }
+
+ { //unmap
+ vmaUnmapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation);
+ }
+
+ VkBufferImageCopy buffer_image_copy;
+ buffer_image_copy.bufferOffset = alloc_offset;
+ buffer_image_copy.bufferRowLength = 0; //tigthly packed
+ buffer_image_copy.bufferImageHeight = 0; //tigthly packed
+
+ buffer_image_copy.imageSubresource.aspectMask = texture->aspect_mask;
+ buffer_image_copy.imageSubresource.baseArrayLayer = p_layer;
+ buffer_image_copy.imageSubresource.mipLevel = p_mipmap;
+ buffer_image_copy.imageSubresource.layerCount = 1;
+
+ buffer_image_copy.imageOffset.x = x;
+ buffer_image_copy.imageOffset.y = y;
+ buffer_image_copy.imageOffset.z = z;
+
+ buffer_image_copy.imageExtent.width = region_w;
+ buffer_image_copy.imageExtent.height = region_h;
+ buffer_image_copy.imageExtent.depth = 1;
+
+ vkCmdCopyBufferToImage(command_buffer, staging_buffer_blocks[staging_buffer_current].buffer, texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &buffer_image_copy);
+
+ staging_buffer_blocks.write[staging_buffer_current].fill_amount += alloc_size;
+ }
+ }
+ }
+
+ //barrier to restore layout
+ {
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ image_memory_barrier.newLayout = texture->reading_layout;
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = texture->image;
+ image_memory_barrier.subresourceRange.aspectMask = texture->aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = 1;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ return OK;
+}
+
+bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_format, TextureUsageBits p_usage) const {
+ ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
+
+ _THREAD_SAFE_METHOD_
+
+ //validate that this image is supported for the intended use
+ VkFormatProperties properties;
+ vkGetPhysicalDeviceFormatProperties(context->get_physical_device(), vulkan_formats[p_format], &properties);
+ VkFormatFeatureFlags flags;
+
+ if (p_usage & TEXTURE_USAGE_CPU_READ_BIT) {
+ flags = properties.linearTilingFeatures;
+ } else {
+ flags = properties.optimalTilingFeatures;
+ }
+
+ if (p_usage & TEXTURE_USAGE_SAMPLING_BIT && !(flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_STORAGE_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_STORAGE_ATOMIC_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)) {
+ return false;
+ }
+
+ return true;
+}
+
+/********************/
+/**** ATTACHMENT ****/
+/********************/
+
+VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentFormat> &p_format, InitialAction p_initial_action, FinalAction p_final_action, int *r_color_attachment_count) {
+
+ Vector<VkAttachmentDescription> attachments;
+ Vector<VkAttachmentReference> color_references;
+ Vector<VkAttachmentReference> depth_stencil_references;
+ Vector<VkAttachmentReference> resolve_references;
+
+ for (int i = 0; i < p_format.size(); i++) {
+
+ VkAttachmentDescription description;
+
+ description.flags = 0;
+ ERR_FAIL_INDEX_V(p_format[i].format, DATA_FORMAT_MAX, VK_NULL_HANDLE);
+ description.format = vulkan_formats[p_format[i].format];
+ ERR_FAIL_INDEX_V(p_format[i].samples, TEXTURE_SAMPLES_MAX, VK_NULL_HANDLE);
+ description.samples = rasterization_sample_count[p_format[i].samples];
+ //anything below does not really matter, as vulkan just ignores it when creating a pipeline
+
+ switch (p_initial_action) {
+
+ case INITIAL_ACTION_CLEAR: {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ } break;
+ case INITIAL_ACTION_KEEP_COLOR: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ case INITIAL_ACTION_KEEP_COLOR_AND_DEPTH: {
+
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL; //don't care what is there
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+
+ } break;
+ case INITIAL_ACTION_CONTINUE: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; //don't care what is there
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ default: {
+ ERR_FAIL_V(VK_NULL_HANDLE); //should never reach here
+ }
+ }
+
+ switch (p_final_action) {
+ case FINAL_ACTION_READ_COLOR_AND_DEPTH: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ break;
+ case FINAL_ACTION_READ_COLOR_DISCARD_DEPTH: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ case FINAL_ACTION_DISCARD: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ case FINAL_ACTION_CONTINUE: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+
+ } break;
+ default: {
+ ERR_FAIL_V(VK_NULL_HANDLE); //should never reach here
+ }
+ }
+ }
+
+ attachments.push_back(description);
+
+ VkAttachmentReference reference;
+ reference.attachment = i;
+
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ color_references.push_back(reference);
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ depth_stencil_references.push_back(reference);
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT) {
+ reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ resolve_references.push_back(reference);
+ } else {
+ ERR_FAIL_V_MSG(VK_NULL_HANDLE, "Texture index " + itos(i) + " is neither color, depth stencil or resolve so it can't be used as attachment.");
+ }
+ }
+
+ ERR_FAIL_COND_V(depth_stencil_references.size() > 1, VK_NULL_HANDLE);
+ ERR_FAIL_COND_V(resolve_references.size() > 1, VK_NULL_HANDLE);
+
+ VkSubpassDescription subpass;
+ subpass.flags = 0;
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.inputAttachmentCount = 0; //unsupported for now
+ subpass.pInputAttachments = NULL;
+ subpass.colorAttachmentCount = color_references.size();
+ subpass.pColorAttachments = color_references.ptr();
+ subpass.pDepthStencilAttachment = depth_stencil_references.ptr();
+ subpass.pResolveAttachments = resolve_references.ptr();
+ subpass.preserveAttachmentCount = 0;
+ subpass.pPreserveAttachments = NULL;
+
+ VkRenderPassCreateInfo render_pass_create_info;
+ render_pass_create_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ render_pass_create_info.pNext = NULL;
+ render_pass_create_info.flags = 0;
+ render_pass_create_info.attachmentCount = attachments.size();
+ render_pass_create_info.pAttachments = attachments.ptr();
+ render_pass_create_info.subpassCount = 1;
+ render_pass_create_info.pSubpasses = &subpass;
+ render_pass_create_info.dependencyCount = 0;
+ render_pass_create_info.pDependencies = NULL;
+
+ VkRenderPass render_pass;
+ VkResult res = vkCreateRenderPass(device, &render_pass_create_info, NULL, &render_pass);
+ ERR_FAIL_COND_V(res, VK_NULL_HANDLE);
+
+ if (r_color_attachment_count) {
+ *r_color_attachment_count = color_references.size();
+ }
+ return render_pass;
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::framebuffer_format_create(const Vector<AttachmentFormat> &p_format) {
+
+ _THREAD_SAFE_METHOD_
+
+ FramebufferFormatKey key;
+ key.attachments = p_format;
+
+ const Map<FramebufferFormatKey, ID>::Element *E = framebuffer_format_cache.find(key);
+ if (E) {
+ //exists, return
+ return E->get();
+ }
+
+ int color_references;
+ VkRenderPass render_pass = _render_pass_create(p_format, INITIAL_ACTION_CLEAR, FINAL_ACTION_DISCARD, &color_references); //actions don't matter for this use case
+
+ ID id = ID(framebuffer_format_cache.size()) | (ID(ID_TYPE_FRAMEBUFFER_FORMAT) << ID(ID_BASE_SHIFT));
+
+ E = framebuffer_format_cache.insert(key, id);
+ FramebufferFormat fb_format;
+ fb_format.E = E;
+ fb_format.color_attachments = color_references;
+ fb_format.render_pass = render_pass;
+ framebuffer_formats[id] = fb_format;
+ return id;
+}
+
+/***********************/
+/**** RENDER TARGET ****/
+/***********************/
+
+RenderingDevice::ID RenderingDeviceVulkan::framebuffer_create(const Vector<ID> &p_texture_attachments, ID p_format_check) {
+
+ _THREAD_SAFE_METHOD_
+
+ Vector<AttachmentFormat> attachments;
+ Size2i size;
+
+ for (int i = 0; i < p_texture_attachments.size(); i++) {
+ Texture *texture = texture_owner.getornull(p_texture_attachments[i]);
+ ERR_FAIL_COND_V_MSG(!texture, INVALID_ID, "Texture index supplied for framebuffer (" + itos(i) + ") is not a valid texture.");
+
+ if (i == 0) {
+ size.width = texture->width;
+ size.height = texture->height;
+ } else {
+ ERR_FAIL_COND_V_MSG((uint32_t)size.width != texture->width || (uint32_t)size.height != texture->height, INVALID_ID,
+ "All textures in a framebuffer should be the same size.");
+ }
+
+ AttachmentFormat af;
+ af.format = texture->format;
+ af.samples = texture->samples;
+ af.usage_flags = texture->usage_flags;
+ attachments.push_back(af);
+ }
+
+ ID format_id = framebuffer_format_create(attachments);
+ if (format_id == INVALID_ID) {
+ return INVALID_ID;
+ }
+
+ ERR_FAIL_COND_V_MSG(p_format_check != INVALID_ID && format_id != p_format_check, INVALID_ID,
+ "The format used to check this framebuffer differs from the intended framebuffer format.");
+
+ Framebuffer framebuffer;
+ framebuffer.format_id = format_id;
+ framebuffer.texture_ids = p_texture_attachments;
+ framebuffer.size = size;
+
+ ID id = framebuffer_owner.make_id(framebuffer);
+
+ for (int i = 0; i < p_texture_attachments.size(); i++) {
+ _add_dependency(id, p_texture_attachments[i]);
+ }
+
+ return id;
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::framebuffer_get_format(ID p_framebuffer) {
+
+ _THREAD_SAFE_METHOD_
+
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_framebuffer);
+ ERR_FAIL_COND_V(!framebuffer, INVALID_ID);
+
+ return framebuffer->format_id;
+}
+
+/*****************/
+/**** SAMPLER ****/
+/*****************/
+
+RenderingDevice::ID RenderingDeviceVulkan::sampler_create(const SamplerState &p_state) {
+
+ _THREAD_SAFE_METHOD_
+
+ VkSamplerCreateInfo sampler_create_info;
+ sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ sampler_create_info.pNext = NULL;
+ sampler_create_info.flags = 0;
+ sampler_create_info.magFilter = p_state.mag_filter == SAMPLER_FILTER_LINEAR ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
+ sampler_create_info.minFilter = p_state.min_filter == SAMPLER_FILTER_LINEAR ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
+ sampler_create_info.mipmapMode = p_state.mip_filter == SAMPLER_FILTER_LINEAR ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST;
+
+ ERR_FAIL_INDEX_V(p_state.repeat_u, SAMPLER_REPEAT_MODE_MAX, INVALID_ID);
+ sampler_create_info.addressModeU = address_modes[p_state.repeat_u];
+ ERR_FAIL_INDEX_V(p_state.repeat_v, SAMPLER_REPEAT_MODE_MAX, INVALID_ID);
+ sampler_create_info.addressModeV = address_modes[p_state.repeat_v];
+ ERR_FAIL_INDEX_V(p_state.repeat_w, SAMPLER_REPEAT_MODE_MAX, INVALID_ID);
+ sampler_create_info.addressModeW = address_modes[p_state.repeat_w];
+
+ sampler_create_info.mipLodBias = p_state.lod_bias;
+ sampler_create_info.anisotropyEnable = p_state.use_anisotropy;
+ sampler_create_info.maxAnisotropy = p_state.anisotropy_max;
+ sampler_create_info.compareEnable = p_state.enable_compare;
+
+ ERR_FAIL_INDEX_V(p_state.compare_op, COMPARE_OP_MAX, INVALID_ID);
+ sampler_create_info.compareOp = compare_operators[p_state.compare_op];
+
+ sampler_create_info.minLod = p_state.min_lod;
+ sampler_create_info.maxLod = p_state.max_lod;
+
+ ERR_FAIL_INDEX_V(p_state.border_color, SAMPLER_BORDER_COLOR_MAX, INVALID_ID);
+ sampler_create_info.borderColor = sampler_border_colors[p_state.border_color];
+
+ sampler_create_info.unnormalizedCoordinates = p_state.unnormalized_uvw;
+
+ VkSampler sampler;
+ VkResult res = vkCreateSampler(device, &sampler_create_info, NULL, &sampler);
+ ERR_FAIL_COND_V(res, INVALID_ID);
+
+ return sampler_owner.make_id(sampler);
+}
+
+/**********************/
+/**** VERTEX ARRAY ****/
+/**********************/
+
+RenderingDevice::ID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const PoolVector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, INVALID_ID);
+
+ Buffer buffer;
+ _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ PoolVector<uint8_t>::Read r = p_data.read();
+ _buffer_update(&buffer, 0, r.ptr(), data_size);
+ }
+ return vertex_buffer_owner.make_id(buffer);
+}
+
+// Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated
+RenderingDevice::ID RenderingDeviceVulkan::vertex_description_create(const Vector<VertexDescription> &p_vertex_descriptions) {
+
+ _THREAD_SAFE_METHOD_
+
+ VertexDescriptionKey key;
+ key.vertex_descriptions = p_vertex_descriptions;
+ const Map<VertexDescriptionKey, ID>::Element *E = vertex_description_cache.find(key);
+ if (E) {
+ return E->get();
+ }
+ //does not exist, create one and cache it
+ VertexDescriptionCache vdcache;
+ vdcache.bindings = memnew_arr(VkVertexInputBindingDescription, p_vertex_descriptions.size());
+ vdcache.attributes = memnew_arr(VkVertexInputAttributeDescription, p_vertex_descriptions.size());
+ Set<int> used_locations;
+ for (int i = 0; i < p_vertex_descriptions.size(); i++) {
+ ERR_CONTINUE(p_vertex_descriptions[i].format >= DATA_FORMAT_MAX);
+ ERR_FAIL_COND_V(used_locations.has(p_vertex_descriptions[i].location), INVALID_ID);
+
+ ERR_FAIL_COND_V_MSG(get_format_vertex_size(p_vertex_descriptions[i].format) == 0, INVALID_ID,
+ "Data format for attachment (" + itos(i) + ") is not valid for a vertex array.");
+
+ vdcache.bindings[i].binding = i;
+ vdcache.bindings[i].stride = p_vertex_descriptions[i].stride;
+ vdcache.bindings[i].inputRate = p_vertex_descriptions[i].frequency == VERTEX_FREQUENCY_INSTANCE ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
+ vdcache.attributes[i].binding = i;
+ vdcache.attributes[i].location = p_vertex_descriptions[i].location;
+ vdcache.attributes[i].format = vulkan_formats[p_vertex_descriptions[i].format];
+ vdcache.attributes[i].offset = p_vertex_descriptions[i].offset;
+ used_locations.insert(p_vertex_descriptions[i].location);
+ }
+
+ vdcache.create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vdcache.create_info.pNext = NULL;
+ vdcache.create_info.flags = 0;
+
+ vdcache.create_info.vertexAttributeDescriptionCount = p_vertex_descriptions.size();
+ vdcache.create_info.pVertexAttributeDescriptions = vdcache.attributes;
+
+ vdcache.create_info.vertexBindingDescriptionCount = p_vertex_descriptions.size();
+ vdcache.create_info.pVertexBindingDescriptions = vdcache.bindings;
+
+ ID id = ID(vertex_description_cache.size()) | (ID(ID_TYPE_VERTEX_DESCRIPTION) << ID_BASE_SHIFT);
+ vdcache.E = vertex_description_cache.insert(key, id);
+ vertex_descriptions[id] = vdcache;
+ return id;
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, ID p_vertex_description, const Vector<ID> &p_src_buffers) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(!vertex_descriptions.has(p_vertex_description), INVALID_ID);
+ const VertexDescriptionCache &vd = vertex_descriptions[p_vertex_description];
+
+ ERR_FAIL_COND_V(vd.E->key().vertex_descriptions.size() != p_src_buffers.size(), INVALID_ID);
+
+ for (int i = 0; i < p_src_buffers.size(); i++) {
+ ERR_FAIL_COND_V(!vertex_buffer_owner.owns(p_src_buffers[i]), INVALID_ID);
+ }
+
+ VertexArray vertex_array;
+
+ vertex_array.vertex_count = p_vertex_count;
+ vertex_array.description = p_vertex_description;
+ vertex_array.max_instances_allowed = 0xFFFFFFFF; //by default as many as you want
+ for (int i = 0; i < p_src_buffers.size(); i++) {
+ Buffer *buffer = vertex_buffer_owner.getornull(p_src_buffers[i]);
+
+ //validate with buffer
+ {
+ const VertexDescription &atf = vd.E->key().vertex_descriptions[i];
+ uint32_t element_size = get_format_vertex_size(atf.format);
+ ERR_FAIL_COND_V(element_size == 0, INVALID_ID); //should never happens since this was prevalidated
+
+ if (atf.frequency == VERTEX_FREQUENCY_VERTEX) {
+ //validate size for regular drawing
+ uint64_t total_size = uint64_t(atf.stride) * (p_vertex_count - 1) + atf.offset + element_size;
+ ERR_FAIL_COND_V_MSG(total_size > buffer->size, INVALID_ID,
+ "Attachment (" + itos(i) + ") will read past the end of the buffer.");
+
+ } else {
+ //validate size for instances drawing
+ uint64_t available = buffer->size - atf.offset;
+ ERR_FAIL_COND_V_MSG(available < element_size, INVALID_ID,
+ "Attachment (" + itos(i) + ") uses instancing, but it's just too small.");
+
+ uint32_t instances_allowed = available / atf.stride;
+ vertex_array.max_instances_allowed = MIN(instances_allowed, vertex_array.max_instances_allowed);
+ }
+ }
+
+ vertex_array.buffers.push_back(buffer->buffer);
+ vertex_array.offsets.push_back(0); //offset unused, but passing anyway
+ }
+
+ ID id = vertex_array_owner.make_id(vertex_array);
+ for (int i = 0; i < p_src_buffers.size(); i++) {
+ _add_dependency(id, p_src_buffers[i]);
+ }
+
+ return id;
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, const PoolVector<uint8_t> &p_data, bool p_use_restart_indices) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_index_count == 0, INVALID_ID);
+
+ IndexBuffer index_buffer;
+ index_buffer.index_type = (p_format == INDEX_BUFFER_FORMAT_UINT16) ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
+ index_buffer.supports_restart_indices = p_use_restart_indices;
+ index_buffer.index_count = p_index_count;
+ uint32_t size_bytes = p_index_count * ((p_format == INDEX_BUFFER_FORMAT_UINT16) ? 2 : 4);
+#ifdef DEBUG_ENABLED
+ if (p_data.size()) {
+ index_buffer.max_index = 0;
+ ERR_FAIL_COND_V_MSG((uint32_t)p_data.size() != size_bytes, INVALID_ID,
+ "Default index buffer initializer array size (" + itos(p_data.size()) + ") does not match format required size (" + itos(size_bytes) + ").");
+ PoolVector<uint8_t>::Read r = p_data.read();
+ if (p_format == INDEX_BUFFER_FORMAT_UINT16) {
+ const uint16_t *index16 = (const uint16_t *)r.ptr();
+ for (uint32_t i = 0; i < p_index_count; i++) {
+ if (p_use_restart_indices && index16[i] == 0xFFFF) {
+ continue; //restart index, ingnore
+ }
+ index_buffer.max_index = MAX(index16[i], index_buffer.max_index);
+ }
+ } else {
+ const uint32_t *index32 = (const uint32_t *)r.ptr();
+ for (uint32_t i = 0; i < p_index_count; i++) {
+ if (p_use_restart_indices && index32[i] == 0xFFFFFFFF) {
+ continue; //restart index, ingnore
+ }
+ index_buffer.max_index = MAX(index32[i], index_buffer.max_index);
+ }
+ }
+ } else {
+ index_buffer.max_index = 0xFFFFFFFF;
+ }
+#else
+ index_buffer.max_index = 0xFFFFFFFF;
+#endif
+ _buffer_allocate(&index_buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ PoolVector<uint8_t>::Read r = p_data.read();
+ _buffer_update(&index_buffer, 0, r.ptr(), data_size);
+ }
+ return index_buffer_owner.make_id(index_buffer);
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::index_array_create(ID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(!index_buffer_owner.owns(p_index_buffer), INVALID_ID);
+
+ IndexBuffer *index_buffer = index_buffer_owner.getornull(p_index_buffer);
+
+ ERR_FAIL_COND_V(p_index_count == 0, INVALID_ID);
+ ERR_FAIL_COND_V(p_index_offset + p_index_count > index_buffer->index_count, INVALID_ID);
+
+ IndexArray index_array;
+ index_array.max_index = index_buffer->max_index;
+ index_array.buffer = index_buffer->buffer;
+ index_array.offset = p_index_offset;
+ index_array.indices = p_index_count;
+ index_array.index_type = index_buffer->index_type;
+ index_array.supports_restart_indices = index_buffer->supports_restart_indices;
+
+ ID id = index_array_owner.make_id(index_array);
+ _add_dependency(id, p_index_buffer);
+ return id;
+}
+
+/****************/
+/**** SHADER ****/
+/****************/
+
+static const TBuiltInResource default_builtin_resource = {
+ .maxLights = 32,
+ .maxClipPlanes = 6,
+ .maxTextureUnits = 32,
+ .maxTextureCoords = 32,
+ .maxVertexAttribs = 64,
+ .maxVertexUniformComponents = 4096,
+ .maxVaryingFloats = 64,
+ .maxVertexTextureImageUnits = 32,
+ .maxCombinedTextureImageUnits = 80,
+ .maxTextureImageUnits = 32,
+ .maxFragmentUniformComponents = 4096,
+ .maxDrawBuffers = 32,
+ .maxVertexUniformVectors = 128,
+ .maxVaryingVectors = 8,
+ .maxFragmentUniformVectors = 16,
+ .maxVertexOutputVectors = 16,
+ .maxFragmentInputVectors = 15,
+ .minProgramTexelOffset = -8,
+ .maxProgramTexelOffset = 7,
+ .maxClipDistances = 8,
+ .maxComputeWorkGroupCountX = 65535,
+ .maxComputeWorkGroupCountY = 65535,
+ .maxComputeWorkGroupCountZ = 65535,
+ .maxComputeWorkGroupSizeX = 1024,
+ .maxComputeWorkGroupSizeY = 1024,
+ .maxComputeWorkGroupSizeZ = 64,
+ .maxComputeUniformComponents = 1024,
+ .maxComputeTextureImageUnits = 16,
+ .maxComputeImageUniforms = 8,
+ .maxComputeAtomicCounters = 8,
+ .maxComputeAtomicCounterBuffers = 1,
+ .maxVaryingComponents = 60,
+ .maxVertexOutputComponents = 64,
+ .maxGeometryInputComponents = 64,
+ .maxGeometryOutputComponents = 128,
+ .maxFragmentInputComponents = 128,
+ .maxImageUnits = 8,
+ .maxCombinedImageUnitsAndFragmentOutputs = 8,
+ .maxCombinedShaderOutputResources = 8,
+ .maxImageSamples = 0,
+ .maxVertexImageUniforms = 0,
+ .maxTessControlImageUniforms = 0,
+ .maxTessEvaluationImageUniforms = 0,
+ .maxGeometryImageUniforms = 0,
+ .maxFragmentImageUniforms = 8,
+ .maxCombinedImageUniforms = 8,
+ .maxGeometryTextureImageUnits = 16,
+ .maxGeometryOutputVertices = 256,
+ .maxGeometryTotalOutputComponents = 1024,
+ .maxGeometryUniformComponents = 1024,
+ .maxGeometryVaryingComponents = 64,
+ .maxTessControlInputComponents = 128,
+ .maxTessControlOutputComponents = 128,
+ .maxTessControlTextureImageUnits = 16,
+ .maxTessControlUniformComponents = 1024,
+ .maxTessControlTotalOutputComponents = 4096,
+ .maxTessEvaluationInputComponents = 128,
+ .maxTessEvaluationOutputComponents = 128,
+ .maxTessEvaluationTextureImageUnits = 16,
+ .maxTessEvaluationUniformComponents = 1024,
+ .maxTessPatchComponents = 120,
+ .maxPatchVertices = 32,
+ .maxTessGenLevel = 64,
+ .maxViewports = 16,
+ .maxVertexAtomicCounters = 0,
+ .maxTessControlAtomicCounters = 0,
+ .maxTessEvaluationAtomicCounters = 0,
+ .maxGeometryAtomicCounters = 0,
+ .maxFragmentAtomicCounters = 8,
+ .maxCombinedAtomicCounters = 8,
+ .maxAtomicCounterBindings = 1,
+ .maxVertexAtomicCounterBuffers = 0,
+ .maxTessControlAtomicCounterBuffers = 0,
+ .maxTessEvaluationAtomicCounterBuffers = 0,
+ .maxGeometryAtomicCounterBuffers = 0,
+ .maxFragmentAtomicCounterBuffers = 1,
+ .maxCombinedAtomicCounterBuffers = 1,
+ .maxAtomicCounterBufferSize = 16384,
+ .maxTransformFeedbackBuffers = 4,
+ .maxTransformFeedbackInterleavedComponents = 64,
+ .maxCullDistances = 8,
+ .maxCombinedClipAndCullDistances = 8,
+ .maxSamples = 4,
+ .limits = {
+ .nonInductiveForLoops = 1,
+ .whileLoops = 1,
+ .doWhileLoops = 1,
+ .generalUniformIndexing = 1,
+ .generalAttributeMatrixVectorIndexing = 1,
+ .generalVaryingIndexing = 1,
+ .generalSamplerIndexing = 1,
+ .generalVariableIndexing = 1,
+ .generalConstantMatrixVectorIndexing = 1,
+ }
+};
+
+static const char *shader_stage_names[RenderingDevice::SHADER_STAGE_MAX] = {
+ "Vertex",
+ "Fragment",
+ "TesselationControl",
+ "TesselationEvaluation",
+ "Compute"
+};
+
+static VkShaderStageFlagBits shader_stage_masks[RenderingDevice::SHADER_STAGE_MAX] = {
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ VK_SHADER_STAGE_COMPUTE_BIT,
+};
+
+bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLayoutBinding> > &bindings, Vector<Vector<Shader::UniformInfo> > &uniform_infos, const glslang::TObjectReflection &reflection, RenderingDevice::ShaderStage p_stage, String *r_error) {
+
+ VkDescriptorSetLayoutBinding layout_binding;
+ Shader::UniformInfo info;
+
+ print_line("*** Stage " + itos(p_stage) + " uniform: " + reflection.name.c_str());
+
+ switch (reflection.getType()->getBasicType()) {
+ case glslang::EbtSampler: {
+
+ print_line("DEBUG: IsSampler");
+ if (reflection.getType()->getSampler().dim == glslang::EsdBuffer) {
+ //texture buffers
+ if (reflection.getType()->getSampler().isCombined()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER;
+ print_line("DEBUG: texel combined");
+ } else if (reflection.getType()->getSampler().isTexture()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_TEXTURE_BUFFER;
+ print_line("DEBUG: texel alone");
+ } else if (reflection.getType()->getSampler().isImage()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_IMAGE_BUFFER;
+ print_line("DEBUG: texel buffer");
+ } else {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' is of unsupported buffer type.";
+ }
+ return false;
+ }
+ } else if (reflection.getType()->getSampler().isCombined()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
+ print_line("DEBUG: combined");
+ } else if (reflection.getType()->getSampler().isPureSampler()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ info.type = UNIFORM_TYPE_SAMPLER;
+ print_line("DEBUG: sampler");
+ } else if (reflection.getType()->getSampler().isTexture()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ info.type = UNIFORM_TYPE_TEXTURE;
+ print_line("DEBUG: image");
+ } else if (reflection.getType()->getSampler().isImage()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ info.type = UNIFORM_TYPE_IMAGE;
+ print_line("DEBUG: storage image");
+ } else {
+ print_line("DEBUG: sampler unknown");
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' is of unsupported sampler type.";
+ }
+ return false;
+ }
+
+ if (reflection.getType()->isArray()) {
+ layout_binding.descriptorCount = reflection.getType()->getArraySizes()->getCumulativeSize();
+ print_line("DEBUG: array of size: " + itos(layout_binding.descriptorCount));
+ } else {
+ layout_binding.descriptorCount = 1;
+ }
+
+ info.length = layout_binding.descriptorCount;
+
+ } break;
+ /*case glslang::EbtStruct: {
+ print_line("DEBUG: Struct");
+
+ } break;*/
+ case glslang::EbtBlock: {
+ print_line("DEBUG: Block");
+ if (reflection.getType()->getQualifier().storage == glslang::EvqUniform) {
+ print_line("DEBUG: Uniform buffer");
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ info.type = UNIFORM_TYPE_UNIFORM_BUFFER;
+
+ } else if (reflection.getType()->getQualifier().storage == glslang::EvqBuffer) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ info.type = UNIFORM_TYPE_STORAGE_BUFFER;
+ print_line("DEBUG: Storage buffer");
+ } else {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' is of unsupported block type.";
+ }
+ return false;
+ }
+
+ if (reflection.getType()->isArray()) {
+ layout_binding.descriptorCount = reflection.getType()->getArraySizes()->getCumulativeSize();
+ print_line("DEBUG: array of size: " + itos(layout_binding.descriptorCount));
+ } else {
+ layout_binding.descriptorCount = 1;
+ }
+
+ info.length = reflection.size;
+
+ } break;
+ /*case glslang::EbtReference: {
+
+ } break;*/
+ /*case glslang::EbtAtomicUint: {
+
+ } break;*/
+ default: {
+
+ if (reflection.getType()->getQualifier().hasOffset()) {
+ //member of uniform block?
+ return true;
+ }
+
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' unsupported uniform type.";
+ }
+ return false;
+ }
+ }
+
+ if (!reflection.getType()->getQualifier().hasBinding()) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' lacks a binding number.";
+ }
+ return false;
+ }
+
+ uint32_t set = reflection.getType()->getQualifier().hasSet() ? reflection.getType()->getQualifier().layoutSet : 0;
+
+ if (set >= limits.maxBoundDescriptorSets) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' uses a set (" + itos(set) + ") index larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").";
+ }
+ return false;
+ }
+
+ uint32_t binding = reflection.getType()->getQualifier().layoutBinding;
+
+ if (set < (uint32_t)bindings.size()) {
+ //check if this already exists
+ for (int i = 0; i < bindings[set].size(); i++) {
+ if (bindings[set][i].binding == binding) {
+ //already exists, verify that it's the same type
+ if (bindings[set][i].descriptorType != layout_binding.descriptorType) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform type.";
+ }
+ return false;
+ }
+
+ //also, verify that it's the same size
+ if (bindings[set][i].descriptorCount != layout_binding.descriptorCount || uniform_infos[set][i].length != info.length) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform size.";
+ }
+ return false;
+ }
+
+ //just append stage mask and return
+ bindings.write[set].write[i].stageFlags |= shader_stage_masks[p_stage];
+ uniform_infos.write[set].write[i].stages |= 1 << p_stage;
+ return true;
+ }
+ }
+ }
+ layout_binding.binding = binding;
+ layout_binding.stageFlags = shader_stage_masks[p_stage];
+ layout_binding.pImmutableSamplers = NULL; //no support for this yet
+
+ info.stages = 1 << p_stage;
+ info.binding = binding;
+
+ if (set >= (uint32_t)bindings.size()) {
+ bindings.resize(set + 1);
+ uniform_infos.resize(set + 1);
+ }
+
+ bindings.write[set].push_back(layout_binding);
+ uniform_infos.write[set].push_back(info);
+
+ return true;
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::shader_create_from_source(const Vector<ShaderStageSource> &p_stages, String *r_error, bool p_allow_cache) {
+
+ _THREAD_SAFE_METHOD_
+
+ // initialize in case it's not initialized. This is done once per thread
+ // and it's safe to call multiple times
+ glslang::InitializeProcess();
+ EShLanguage stages[SHADER_STAGE_MAX] = {
+ EShLangVertex,
+ EShLangFragment,
+ EShLangTessControl,
+ EShLangTessEvaluation,
+ EShLangCompute
+ };
+
+ int ClientInputSemanticsVersion = 100; // maps to, say, #define VULKAN 100
+ glslang::EShTargetClientVersion VulkanClientVersion = glslang::EShTargetVulkan_1_0;
+ glslang::EShTargetLanguageVersion TargetVersion = glslang::EShTargetSpv_1_0;
+
+ Vector<std::vector<unsigned int> > spirv_code;
+
+ glslang::TShader::ForbidIncluder includer;
+
+ //descriptor layouts
+ Vector<Vector<VkDescriptorSetLayoutBinding> > bindings;
+ Vector<Vector<Shader::UniformInfo> > uniform_info;
+ Vector<int> vertex_input_locations;
+ int fragment_outputs = 0;
+
+ uint32_t stages_processed = 0;
+
+ for (int i = 0; i < p_stages.size(); i++) {
+
+ if (stages_processed & (1 << p_stages[i].shader_stage)) {
+ if (r_error) {
+ (*r_error) = "Stage " + String(shader_stage_names[p_stages[i].shader_stage]) + " submitted more than once.";
+ }
+ return false;
+ }
+ glslang::TShader shader(stages[p_stages[i].shader_stage]);
+ CharString cs = p_stages[i].shader_source.utf8();
+ const char *cs_strings = cs.get_data();
+ shader.setStrings(&cs_strings, 1);
+ shader.setEnvInput(glslang::EShSourceGlsl, stages[p_stages[i].shader_stage], glslang::EShClientVulkan, ClientInputSemanticsVersion);
+ shader.setEnvClient(glslang::EShClientVulkan, VulkanClientVersion);
+ shader.setEnvTarget(glslang::EShTargetSpv, TargetVersion);
+
+ EShMessages messages = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules);
+ const int DefaultVersion = 100;
+ std::string pre_processed_code;
+
+ //preprocess
+ if (!shader.preprocess(&default_builtin_resource, DefaultVersion, ENoProfile, false, false, messages, &pre_processed_code, includer)) {
+
+ if (r_error) {
+ (*r_error) = "Failed pre-processing on shader stage: " + String(shader_stage_names[p_stages[i].shader_stage]) + "\n";
+ (*r_error) += shader.getInfoLog();
+ (*r_error) += "\n";
+ (*r_error) += shader.getInfoDebugLog();
+ }
+
+ return INVALID_ID;
+ }
+ //set back..
+ cs_strings = pre_processed_code.c_str();
+ shader.setStrings(&cs_strings, 1);
+
+ //parse
+ if (!shader.parse(&default_builtin_resource, DefaultVersion, false, messages)) {
+ if (r_error) {
+ (*r_error) = "Failed parsing on shader stage: " + String(shader_stage_names[p_stages[i].shader_stage]) + "\n";
+ (*r_error) += shader.getInfoLog();
+ (*r_error) += "\n";
+ (*r_error) += shader.getInfoDebugLog();
+ }
+ return INVALID_ID;
+ }
+
+ //link
+ glslang::TProgram program;
+ program.addShader(&shader);
+
+ if (!program.link(messages)) {
+ if (r_error) {
+ (*r_error) = "Failed linking on shader stage: " + String(shader_stage_names[p_stages[i].shader_stage]) + "\n";
+ (*r_error) += program.getInfoLog();
+ (*r_error) += "\n";
+ (*r_error) += program.getInfoDebugLog();
+ }
+ return INVALID_ID;
+ }
+
+ //obtain bindings for descriptor layout
+ program.mapIO();
+ program.buildReflection();
+ program.dumpReflection();
+
+ for (int j = 0; j < program.getNumUniformVariables(); j++) {
+ if (!_uniform_add_binding(bindings, uniform_info, program.getUniform(j), p_stages[i].shader_stage, r_error)) {
+ return INVALID_ID;
+ }
+ }
+
+ for (int j = 0; j < program.getNumUniformBlocks(); j++) {
+ if (!_uniform_add_binding(bindings, uniform_info, program.getUniformBlock(j), p_stages[i].shader_stage, r_error)) {
+ return INVALID_ID;
+ }
+ }
+
+ for (int j = 0; j < program.getNumBufferVariables(); j++) {
+ if (!_uniform_add_binding(bindings, uniform_info, program.getBufferVariable(j), p_stages[i].shader_stage, r_error)) {
+ return INVALID_ID;
+ }
+ }
+
+ for (int j = 0; j < program.getNumBufferBlocks(); j++) {
+ if (!_uniform_add_binding(bindings, uniform_info, program.getBufferBlock(j), p_stages[i].shader_stage, r_error)) {
+ return INVALID_ID;
+ }
+ }
+
+ if (p_stages[i].shader_stage == SHADER_STAGE_VERTEX) {
+ for (int j = 0; j < program.getNumPipeInputs(); j++) {
+ if (program.getPipeInput(i).getType()->getQualifier().hasLocation()) {
+ int location = program.getPipeInput(i).getType()->getQualifier().layoutLocation;
+ print_line("found vertex location: " + itos(location));
+ if (vertex_input_locations.find(location) == -1) {
+ vertex_input_locations.push_back(location);
+ }
+ }
+ }
+ }
+
+ if (p_stages[i].shader_stage == SHADER_STAGE_FRAGMENT) {
+
+ fragment_outputs = program.getNumPipeOutputs();
+ }
+
+ std::vector<uint32_t> SpirV;
+ spv::SpvBuildLogger logger;
+ glslang::SpvOptions spvOptions;
+ glslang::GlslangToSpv(*program.getIntermediate(stages[p_stages[i].shader_stage]), SpirV, &logger, &spvOptions);
+
+ spirv_code.push_back(SpirV);
+
+ stages_processed |= (1 << p_stages[i].shader_stage);
+ }
+
+ //all good, let's create modules
+
+ Shader shader;
+
+ shader.vertex_input_locations = vertex_input_locations;
+ shader.fragment_outputs = fragment_outputs;
+
+ bool success = true;
+ for (int i = 0; i < p_stages.size(); i++) {
+ VkShaderModuleCreateInfo shader_module_create_info;
+ shader_module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ shader_module_create_info.pNext = NULL;
+ shader_module_create_info.flags = 0;
+ shader_module_create_info.codeSize = spirv_code[i].size() * sizeof(uint32_t);
+ shader_module_create_info.pCode = &spirv_code[i][0];
+
+ VkShaderModule module;
+ VkResult res = vkCreateShaderModule(device, &shader_module_create_info, NULL, &module);
+ if (res) {
+ success = false;
+ ERR_PRINT("Error creating shader module for stage: " + String(shader_stage_names[p_stages[i].shader_stage]));
+ break;
+ }
+
+ const VkShaderStageFlagBits shader_stage_bits[SHADER_STAGE_MAX] = {
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ VK_SHADER_STAGE_COMPUTE_BIT,
+ };
+
+ VkPipelineShaderStageCreateInfo shader_stage;
+ shader_stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shader_stage.pNext = NULL;
+ shader_stage.flags = 0;
+ shader_stage.stage = shader_stage_bits[p_stages[i].shader_stage];
+ shader_stage.module = module;
+ shader_stage.pName = "main";
+ shader_stage.pSpecializationInfo = NULL;
+
+ shader.pipeline_stages.push_back(shader_stage);
+ }
+ //proceed to create descriptor sets
+
+ if (success) {
+
+ for (int i = 0; i < bindings.size(); i++) {
+
+ //empty ones are fine if they were not used according to spec (binding count will be 0)
+ VkDescriptorSetLayoutCreateInfo layout_create_info;
+ layout_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ layout_create_info.pNext = NULL;
+ layout_create_info.flags = 0;
+ layout_create_info.bindingCount = bindings[i].size();
+ layout_create_info.pBindings = bindings[i].ptr();
+
+ VkDescriptorSetLayout layout;
+ VkResult res = vkCreateDescriptorSetLayout(device, &layout_create_info, NULL, &layout);
+ if (res) {
+ ERR_PRINT("Error creating descriptor set layout for set " + itos(i));
+ success = false;
+ break;
+ }
+
+ Shader::Set set;
+ set.descriptor_set_layout = layout;
+ set.uniform_info = uniform_info[i];
+ //sort and hash
+ set.uniform_info.sort();
+
+ uint32_t h = set.uniform_info.size() ? hash_djb2_one_32(0) : 0;
+ for (int j = 0; j < set.uniform_info.size(); j++) {
+ const Shader::UniformInfo &ui = set.uniform_info[j];
+ h = hash_djb2_one_32(ui.type, h);
+ h = hash_djb2_one_32(ui.binding, h);
+ h = hash_djb2_one_32(ui.length, h);
+ h = hash_djb2_one_32(ui.stages, h);
+ }
+
+ shader.sets.push_back(set);
+ shader.set_hashes.push_back(h);
+ }
+ }
+
+ if (success) {
+ //create pipeline layout
+ VkPipelineLayoutCreateInfo pipeline_layout_create_info;
+ pipeline_layout_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ pipeline_layout_create_info.pNext = NULL;
+ pipeline_layout_create_info.flags = 0;
+ pipeline_layout_create_info.setLayoutCount = shader.sets.size();
+
+ Vector<VkDescriptorSetLayout> layouts;
+ layouts.resize(shader.sets.size());
+
+ for (int i = 0; i < layouts.size(); i++) {
+ layouts.write[i] = shader.sets[i].descriptor_set_layout;
+ }
+
+ //unsupported for now
+ pipeline_layout_create_info.pSetLayouts = layouts.ptr();
+ pipeline_layout_create_info.pushConstantRangeCount = 0;
+ pipeline_layout_create_info.pPushConstantRanges = NULL;
+
+ VkResult err = vkCreatePipelineLayout(device, &pipeline_layout_create_info, NULL, &shader.pipeline_layout);
+
+ if (err) {
+ ERR_PRINT("Error creating pipeline layout.");
+ success = false;
+ }
+ }
+
+ if (!success) {
+ //clean up if failed
+ for (int i = 0; i < shader.pipeline_stages.size(); i++) {
+ vkDestroyShaderModule(device, shader.pipeline_stages[i].module, NULL);
+ }
+
+ for (int i = 0; i < shader.sets.size(); i++) {
+ vkDestroyDescriptorSetLayout(device, shader.sets[i].descriptor_set_layout, NULL);
+ }
+
+ return INVALID_ID;
+ }
+
+ return shader_owner.make_id(shader);
+}
+
+/******************/
+/**** UNIFORMS ****/
+/******************/
+
+RenderingDevice::ID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const PoolVector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, INVALID_ID);
+
+ Buffer buffer;
+ Error err = _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ ERR_FAIL_COND_V(err != OK, INVALID_ID);
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ PoolVector<uint8_t>::Read r = p_data.read();
+ _buffer_update(&buffer, 0, r.ptr(), data_size);
+ }
+ return uniform_buffer_owner.make_id(buffer);
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const PoolVector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, INVALID_ID);
+
+ Buffer buffer;
+ Error err = _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ ERR_FAIL_COND_V(err != OK, INVALID_ID);
+
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ PoolVector<uint8_t>::Read r = p_data.read();
+ _buffer_update(&buffer, 0, r.ptr(), data_size);
+ }
+ return storage_buffer_owner.make_id(buffer);
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const PoolVector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ uint32_t element_size = get_format_vertex_size(p_format);
+ ERR_FAIL_COND_V_MSG(element_size == 0, INVALID_ID, "Format requested is not supported for texture buffers");
+ uint64_t size_bytes = uint64_t(element_size) * p_size_elements;
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != size_bytes, INVALID_ID);
+
+ TextureBuffer texture_buffer;
+ Error err = _buffer_allocate(&texture_buffer.buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ ERR_FAIL_COND_V(err != OK, INVALID_ID);
+
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ PoolVector<uint8_t>::Read r = p_data.read();
+ _buffer_update(&texture_buffer.buffer, 0, r.ptr(), data_size);
+ }
+
+ VkBufferViewCreateInfo view_create_info;
+ view_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
+ view_create_info.pNext = NULL;
+ view_create_info.flags = 0;
+ view_create_info.buffer = texture_buffer.buffer.buffer;
+ view_create_info.format = vulkan_formats[p_format];
+ view_create_info.offset = 0;
+ view_create_info.range = size_bytes;
+
+ texture_buffer.view = VK_NULL_HANDLE;
+
+ VkResult res = vkCreateBufferView(device, &view_create_info, NULL, &texture_buffer.view);
+ if (res) {
+ _buffer_free(&texture_buffer.buffer);
+ ERR_FAIL_V_MSG(INVALID_ID, "Unable to create buffer view");
+ }
+
+ //allocate the view
+ return texture_buffer_owner.make_id(texture_buffer);
+}
+
+RenderingDeviceVulkan::DescriptorPool *RenderingDeviceVulkan::_descriptor_pool_allocate(const DescriptorPoolKey &p_key) {
+ if (!descriptor_pools.has(p_key)) {
+ descriptor_pools[p_key] = Set<DescriptorPool *>();
+ }
+
+ DescriptorPool *pool = NULL;
+
+ for (Set<DescriptorPool *>::Element *E = descriptor_pools[p_key].front(); E; E = E->next()) {
+ if (E->get()->usage < max_descriptors_per_pool) {
+ pool = E->get();
+ break;
+ }
+ }
+
+ if (!pool) {
+ //create a new one
+ pool = memnew(DescriptorPool);
+ pool->usage = 0;
+
+ VkDescriptorPoolCreateInfo descriptor_pool_create_info;
+ descriptor_pool_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ descriptor_pool_create_info.pNext = NULL;
+ descriptor_pool_create_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // can't think how somebody may NOT need this flag..
+ descriptor_pool_create_info.maxSets = max_descriptors_per_pool;
+ Vector<VkDescriptorPoolSize> sizes;
+ //here comes more vulkan API strangeness
+
+ if (p_key.uniform_type[UNIFORM_TYPE_SAMPLER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_SAMPLER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_SAMPLER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_TEXTURE]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_TEXTURE] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_IMAGE]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_IMAGE] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_TEXTURE_BUFFER] || p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ s.descriptorCount = (p_key.uniform_type[UNIFORM_TYPE_TEXTURE_BUFFER] + p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER]) * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_IMAGE_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_IMAGE_BUFFER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_UNIFORM_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_UNIFORM_BUFFER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+
+ if (p_key.uniform_type[UNIFORM_TYPE_STORAGE_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_STORAGE_BUFFER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+
+ if (p_key.uniform_type[UNIFORM_TYPE_INPUT_ATTACHMENT]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_INPUT_ATTACHMENT] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+
+ descriptor_pool_create_info.poolSizeCount = sizes.size();
+ descriptor_pool_create_info.pPoolSizes = sizes.ptr();
+ VkResult res = vkCreateDescriptorPool(device, &descriptor_pool_create_info, NULL, &pool->pool);
+ ERR_FAIL_COND_V(res, NULL);
+ descriptor_pools[p_key].insert(pool);
+ }
+
+ pool->usage++;
+
+ return pool;
+}
+
+void RenderingDeviceVulkan::_descriptor_pool_free(const DescriptorPoolKey &p_key, DescriptorPool *p_pool) {
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND(!descriptor_pools[p_key].has(p_pool));
+#endif
+ ERR_FAIL_COND(p_pool->usage == 0);
+ p_pool->usage--;
+ if (p_pool->usage == 0) {
+ vkDestroyDescriptorPool(device, p_pool->pool, NULL);
+ descriptor_pools[p_key].erase(p_pool);
+ memdelete(p_pool);
+ }
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, ID p_shader, uint32_t p_shader_set) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_uniforms.size() == 0, INVALID_ID);
+
+ Shader *shader = shader_owner.getornull(p_shader);
+ ERR_FAIL_COND_V(!shader, INVALID_ID);
+
+ ERR_FAIL_COND_V_MSG(p_shader_set >= (uint32_t)shader->sets.size() || shader->sets[p_shader_set].uniform_info.size() == 0, INVALID_ID,
+ "Desired set (" + itos(p_shader_set) + ") not used by shader.");
+ //see that all sets in shader are satisfied
+
+ const Shader::Set &set = shader->sets[p_shader_set];
+
+ uint32_t uniform_count = p_uniforms.size();
+ const Uniform *uniforms = p_uniforms.ptr();
+ print_line("uniform count: " + itos(uniform_count));
+
+ uint32_t set_uniform_count = set.uniform_info.size();
+ const Shader::UniformInfo *set_uniforms = set.uniform_info.ptr();
+
+ print_line("set_uniform count: " + itos(set_uniform_count));
+
+ Vector<VkWriteDescriptorSet> writes;
+ DescriptorPoolKey pool_key;
+
+ //to keep them alive until update call
+ List<Vector<VkDescriptorBufferInfo> > buffer_infos;
+ List<Vector<VkBufferView> > buffer_views;
+ List<Vector<VkDescriptorImageInfo> > image_infos;
+#ifdef DEBUG_ENABLED
+ //used for verification to make sure a uniform set does not use a framebuffer bound texture
+ Vector<ID> bound_textures;
+#endif
+
+ for (uint32_t i = 0; i < set_uniform_count; i++) {
+ const Shader::UniformInfo &set_uniform = set_uniforms[i];
+ int uniform_idx = -1;
+ for (int j = 0; j < (int)uniform_count; j++) {
+ if (uniforms[j].binding == set_uniform.binding) {
+ uniform_idx = j;
+ }
+ }
+ ERR_FAIL_COND_V_MSG(uniform_idx == -1, INVALID_ID,
+ "All the shader bindings for the given set must be covered by the uniforms provided.");
+
+ const Uniform &uniform = uniforms[uniform_idx];
+
+ ERR_FAIL_COND_V_MSG(uniform.type != set_uniform.type, INVALID_ID,
+ "Mismatch uniform type for binding (" + itos(set_uniform.binding) + ").");
+
+ VkWriteDescriptorSet write; //common header
+ write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write.pNext = NULL;
+ write.dstSet = NULL; //will assign afterwards when everything is valid
+ write.dstBinding = set_uniform.binding;
+ uint32_t type_size = 1;
+
+ switch (uniform.type) {
+ case UNIFORM_TYPE_SAMPLER: {
+ if (uniform.ids.size() != set_uniform.length) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Sampler (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler elements, so it should be provided equal number of sampler IDs to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(INVALID_ID, "Sampler (binding: " + itos(uniform.binding) + ") should provide one ID referencing a sampler (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+
+ for (int j = 0; j < uniform.ids.size(); j++) {
+ VkSampler *sampler = sampler_owner.getornull(uniform.ids[j]);
+ ERR_FAIL_COND_V_MSG(!sampler, INVALID_ID, "Sampler (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid sampler.");
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = *sampler;
+ img_info.imageView = VK_NULL_HANDLE;
+ img_info.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ image_info.push_back(img_info);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size();
+ write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = NULL;
+ write.pTexelBufferView = NULL;
+
+ type_size = uniform.ids.size();
+
+ } break;
+ case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
+
+ if (uniform.ids.size() != set_uniform.length * 2) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(INVALID_ID, "SamplerTexture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler&texture elements, so it should provided twice the amount of IDs (sampler,texture pairs) to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(INVALID_ID, "SamplerTexture (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+
+ for (int j = 0; j < uniform.ids.size(); j += 2) {
+ VkSampler *sampler = sampler_owner.getornull(uniform.ids[j + 0]);
+ ERR_FAIL_COND_V_MSG(!sampler, INVALID_ID, "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
+
+ Texture *texture = texture_owner.getornull(uniform.ids[j + 1]);
+ ERR_FAIL_COND_V_MSG(!texture, INVALID_ID, "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
+
+ ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), INVALID_ID,
+ "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = *sampler;
+ img_info.imageView = texture->view;
+
+#ifdef DEBUG_ENABLED
+ bound_textures.push_back(texture->owner != INVALID_ID ? texture->owner : uniform.ids[j + 1]);
+#endif
+
+ if (texture->owner != INVALID_ID) {
+ texture = texture_owner.getornull(texture->owner);
+ ERR_FAIL_COND_V(!texture, INVALID_ID); //bug, should never happen
+ }
+
+ img_info.imageLayout = texture->reading_layout;
+
+ image_info.push_back(img_info);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size() / 2;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = NULL;
+ write.pTexelBufferView = NULL;
+
+ type_size = uniform.ids.size() / 2;
+
+ } break;
+ case UNIFORM_TYPE_TEXTURE: {
+
+ if (uniform.ids.size() != set_uniform.length) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Texture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(INVALID_ID, "Texture (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+
+ for (int j = 0; j < uniform.ids.size(); j++) {
+ Texture *texture = texture_owner.getornull(uniform.ids[j]);
+ ERR_FAIL_COND_V_MSG(!texture, INVALID_ID, "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
+
+ ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), INVALID_ID,
+ "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = NULL;
+ img_info.imageView = texture->view;
+
+#ifdef DEBUG_ENABLED
+ bound_textures.push_back(texture->owner != INVALID_ID ? texture->owner : uniform.ids[j]);
+#endif
+
+ if (texture->owner != INVALID_ID) {
+ texture = texture_owner.getornull(texture->owner);
+ ERR_FAIL_COND_V(!texture, INVALID_ID); //bug, should never happen
+ }
+
+ img_info.imageLayout = texture->reading_layout;
+
+ image_info.push_back(img_info);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size();
+ write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = NULL;
+ write.pTexelBufferView = NULL;
+
+ type_size = uniform.ids.size();
+ } break;
+ case UNIFORM_TYPE_IMAGE: {
+ //todo
+ } break;
+ case UNIFORM_TYPE_TEXTURE_BUFFER: {
+ if (uniform.ids.size() != set_uniform.length) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(INVALID_ID, "Buffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") texture buffer elements, so it should be provided equal number of texture buffer IDs to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(INVALID_ID, "Buffer (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture buffer (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorBufferInfo> buffer_info;
+ Vector<VkBufferView> buffer_view;
+
+ for (int j = 0; j < uniform.ids.size(); j++) {
+ TextureBuffer *buffer = texture_buffer_owner.getornull(uniform.ids[j]);
+ ERR_FAIL_COND_V_MSG(!buffer, INVALID_ID, "Texture Buffer (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture buffer.");
+
+ buffer_info.push_back(buffer->buffer.buffer_info);
+ buffer_view.push_back(buffer->view);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size();
+ write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ write.pImageInfo = NULL;
+ write.pBufferInfo = buffer_infos.push_back(buffer_info)->get().ptr();
+ write.pTexelBufferView = buffer_views.push_back(buffer_view)->get().ptr();
+
+ type_size = uniform.ids.size();
+
+ } break;
+ case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
+
+ if (uniform.ids.size() != set_uniform.length * 2) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(INVALID_ID, "SamplerBuffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler buffer elements, so it should provided twice the amount of IDs (sampler,buffer pairs) to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(INVALID_ID, "SamplerBuffer (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture buffer (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+ Vector<VkDescriptorBufferInfo> buffer_info;
+ Vector<VkBufferView> buffer_view;
+
+ for (int j = 0; j < uniform.ids.size(); j += 2) {
+ VkSampler *sampler = sampler_owner.getornull(uniform.ids[j + 0]);
+ ERR_FAIL_COND_V_MSG(!sampler, INVALID_ID, "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
+
+ TextureBuffer *buffer = texture_buffer_owner.getornull(uniform.ids[j + 1]);
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = *sampler;
+ img_info.imageView = VK_NULL_HANDLE;
+ img_info.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ image_info.push_back(img_info);
+
+ ERR_FAIL_COND_V_MSG(!buffer, INVALID_ID, "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid texture buffer.");
+
+ buffer_info.push_back(buffer->buffer.buffer_info);
+ buffer_view.push_back(buffer->view);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size() / 2;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = buffer_infos.push_back(buffer_info)->get().ptr();
+ write.pTexelBufferView = buffer_views.push_back(buffer_view)->get().ptr();
+
+ type_size = uniform.ids.size() / 2;
+ } break;
+ case UNIFORM_TYPE_IMAGE_BUFFER: {
+ //todo
+
+ } break;
+ case UNIFORM_TYPE_UNIFORM_BUFFER: {
+ ERR_FAIL_COND_V_MSG(uniform.ids.size() != 1, INVALID_ID,
+ "Uniform buffer (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.ids.size()) + " provided).");
+
+ Buffer *buffer = uniform_buffer_owner.getornull(uniform.ids[0]);
+ ERR_FAIL_COND_V_MSG(!buffer, INVALID_ID, "Uniform buffer (binding: " + itos(uniform.binding) + ") is invalid.");
+
+ ERR_FAIL_COND_V_MSG(buffer->size != (uint32_t)set_uniform.length, INVALID_ID,
+ "Uniform buffer (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " does not match size of shader uniform: (" + itos(set_uniform.length) + ").");
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = 1;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ write.pImageInfo = NULL;
+ write.pBufferInfo = &buffer->buffer_info;
+ write.pTexelBufferView = NULL;
+
+ } break;
+ case UNIFORM_TYPE_STORAGE_BUFFER: {
+ ERR_FAIL_COND_V_MSG(uniform.ids.size() != 1, INVALID_ID,
+ "Storage buffer (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.ids.size()) + " provided).");
+
+ Buffer *buffer = storage_buffer_owner.getornull(uniform.ids[0]);
+ ERR_FAIL_COND_V_MSG(!buffer, INVALID_ID, "Storage buffer (binding: " + itos(uniform.binding) + ") is invalid.");
+
+ ERR_FAIL_COND_V_MSG(buffer->size != (uint32_t)set_uniform.length, INVALID_ID,
+ "Storage buffer (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " does not match size of shader uniform: (" + itos(set_uniform.length) + ").");
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = 1;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ write.pImageInfo = NULL;
+ write.pBufferInfo = &buffer->buffer_info;
+ write.pTexelBufferView = NULL;
+ } break;
+ case UNIFORM_TYPE_INPUT_ATTACHMENT: {
+
+ } break;
+ default: {
+ }
+ }
+
+ writes.push_back(write);
+
+ ERR_FAIL_COND_V_MSG(pool_key.uniform_type[set_uniform.type] == MAX_DESCRIPTOR_POOL_ELEMENT, INVALID_ID,
+ "Uniform set reached the limit of bindings for the same type (" + itos(MAX_DESCRIPTOR_POOL_ELEMENT) + ").");
+ pool_key.uniform_type[set_uniform.type] += type_size;
+ }
+
+ //need a descriptor pool
+ DescriptorPool *pool = _descriptor_pool_allocate(pool_key);
+
+ ERR_FAIL_COND_V(!pool, INVALID_ID);
+
+ VkDescriptorSetAllocateInfo descriptor_set_allocate_info;
+
+ descriptor_set_allocate_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ descriptor_set_allocate_info.pNext = NULL;
+ descriptor_set_allocate_info.descriptorPool = pool->pool;
+ descriptor_set_allocate_info.descriptorSetCount = 1;
+ descriptor_set_allocate_info.pSetLayouts = &shader->sets[p_shader_set].descriptor_set_layout;
+
+ VkDescriptorSet descriptor_set;
+
+ VkResult res = vkAllocateDescriptorSets(device, &descriptor_set_allocate_info, &descriptor_set);
+ if (res) {
+ _descriptor_pool_free(pool_key, pool); // meh
+ ERR_FAIL_V_MSG(INVALID_ID, "Cannot allocate descriptor sets.");
+ }
+
+ UniformSet uniform_set;
+ uniform_set.pool = pool;
+ uniform_set.pool_key = pool_key;
+ uniform_set.descriptor_set = descriptor_set;
+ uniform_set.pipeline_layout = shader->pipeline_layout;
+ uniform_set.hash = shader->set_hashes[p_shader_set];
+
+ ID id = uniform_set_owner.make_id(uniform_set);
+ //add dependencies
+ _add_dependency(id, p_shader);
+ for (uint32_t i = 0; i < uniform_count; i++) {
+ const Uniform &uniform = uniforms[i];
+ int id_count = uniform.ids.size();
+ const ID *ids = uniform.ids.ptr();
+ for (int j = 0; j < id_count; j++) {
+ _add_dependency(id, ids[j]);
+ }
+ }
+
+ //write the contents
+ if (writes.size()) {
+ for (int i = 0; i < writes.size(); i++) {
+ writes.write[i].dstSet = descriptor_set;
+ }
+ vkUpdateDescriptorSets(device, writes.size(), writes.ptr(), 0, NULL);
+ }
+
+ return id;
+}
+
+Error RenderingDeviceVulkan::buffer_update(ID p_buffer, uint32_t p_offset, uint32_t p_size, void *p_data, bool p_sync_with_draw) {
+ _THREAD_SAFE_METHOD_
+
+ Buffer *buffer = NULL;
+ if (vertex_buffer_owner.owns(p_buffer)) {
+ buffer = vertex_buffer_owner.getornull(p_buffer);
+ } else if (index_buffer_owner.owns(p_buffer)) {
+ buffer = index_buffer_owner.getornull(p_buffer);
+ } else if (uniform_buffer_owner.owns(p_buffer)) {
+ buffer = uniform_buffer_owner.getornull(p_buffer);
+ } else if (texture_buffer_owner.owns(p_buffer)) {
+ buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
+ } else if (storage_buffer_owner.owns(p_buffer)) {
+ buffer = storage_buffer_owner.getornull(p_buffer);
+ } else {
+ ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
+ }
+
+ ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
+ "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
+
+ return _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw);
+}
+
+/*************************/
+/**** RENDER PIPELINE ****/
+/*************************/
+
+RenderingDevice::ID RenderingDeviceVulkan::render_pipeline_create(ID p_shader, ID p_framebuffer_format, ID p_vertex_description, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags) {
+
+ _THREAD_SAFE_METHOD_
+
+ //needs a shader
+ Shader *shader = shader_owner.getornull(p_shader);
+ ERR_FAIL_COND_V(!shader, INVALID_ID);
+
+ if (p_framebuffer_format == INVALID_ID) {
+ //if nothing provided, use an empty one (no attachments)
+ p_framebuffer_format = framebuffer_format_create(Vector<AttachmentFormat>());
+ }
+ ERR_FAIL_COND_V(!framebuffer_formats.has(p_framebuffer_format), INVALID_ID);
+ const FramebufferFormat &fb_format = framebuffer_formats[p_framebuffer_format];
+
+ { //validate shader vs framebuffer
+
+ ERR_FAIL_COND_V_MSG(shader->fragment_outputs != fb_format.color_attachments, INVALID_ID,
+ "Mismatch fragment output bindings (" + itos(shader->fragment_outputs) + ") and framebuffer color buffers (" + itos(fb_format.color_attachments) + ") when binding both in render pipeline.");
+ }
+ //vertex
+ VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info;
+
+ if (p_vertex_description != INVALID_ID) {
+ //uses vertices, else it does not
+ ERR_FAIL_COND_V(!vertex_descriptions.has(p_vertex_description), INVALID_ID);
+ VertexDescriptionCache &vd = vertex_descriptions[p_vertex_description];
+
+ pipeline_vertex_input_state_create_info = vd.create_info;
+
+ //validate with inputs
+ for (int i = 0; i < shader->vertex_input_locations.size(); i++) {
+ uint32_t location = shader->vertex_input_locations[i];
+ const VertexDescriptionKey &k = vd.E->key();
+ bool found = false;
+ for (int j = 0; j < k.vertex_descriptions.size(); j++) {
+ if (k.vertex_descriptions[j].location == location) {
+ found = true;
+ }
+ }
+
+ ERR_FAIL_COND_V_MSG(!found, INVALID_ID,
+ "Shader vertex input location (" + itos(location) + ") not provided in vertex input description for pipeline creation.");
+ }
+
+ } else {
+ //does not use vertices
+ pipeline_vertex_input_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ pipeline_vertex_input_state_create_info.pNext = NULL;
+ pipeline_vertex_input_state_create_info.flags = 0;
+ pipeline_vertex_input_state_create_info.vertexBindingDescriptionCount = 0;
+ pipeline_vertex_input_state_create_info.pVertexBindingDescriptions = NULL;
+ pipeline_vertex_input_state_create_info.vertexAttributeDescriptionCount = 0;
+ pipeline_vertex_input_state_create_info.pVertexAttributeDescriptions = NULL;
+
+ ERR_FAIL_COND_V_MSG(shader->vertex_input_locations.size(), INVALID_ID,
+ "Shader contains vertex inputs (" + itos(shader->vertex_input_locations.size()) + ") but no vertex input description was provided for pipeline creation.");
+ }
+ //input assembly
+
+ ERR_FAIL_INDEX_V(p_render_primitive, RENDER_PRIMITIVE_MAX, INVALID_ID);
+
+ VkPipelineInputAssemblyStateCreateInfo input_assembly_create_info;
+ input_assembly_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ input_assembly_create_info.pNext = NULL;
+ input_assembly_create_info.flags = 0;
+
+ static const VkPrimitiveTopology topology_list[RENDER_PRIMITIVE_MAX] = {
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
+ };
+
+ input_assembly_create_info.topology = topology_list[p_render_primitive];
+ input_assembly_create_info.primitiveRestartEnable = (p_render_primitive == RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX);
+
+ //tesselation
+ VkPipelineTessellationStateCreateInfo tesselation_create_info;
+ tesselation_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
+ tesselation_create_info.pNext = NULL;
+ tesselation_create_info.flags = 0;
+ ERR_FAIL_COND_V(p_rasterization_state.patch_control_points < 1 || p_rasterization_state.patch_control_points > limits.maxTessellationPatchSize, INVALID_ID);
+ tesselation_create_info.patchControlPoints = p_rasterization_state.patch_control_points;
+
+ VkPipelineViewportStateCreateInfo viewport_state_create_info;
+ viewport_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport_state_create_info.pNext = NULL;
+ viewport_state_create_info.flags = 0;
+ viewport_state_create_info.viewportCount = 1; //if VR extensions are supported at some point, this will have to be customizable in the framebuffer format
+ viewport_state_create_info.pViewports = NULL;
+ viewport_state_create_info.scissorCount = 1;
+ viewport_state_create_info.pScissors = NULL;
+
+ //rasterization
+ VkPipelineRasterizationStateCreateInfo rasterization_state_create_info;
+ rasterization_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization_state_create_info.pNext = NULL;
+ rasterization_state_create_info.flags = 0;
+ rasterization_state_create_info.depthClampEnable = p_rasterization_state.enable_depth_clamp;
+ rasterization_state_create_info.rasterizerDiscardEnable = p_rasterization_state.discard_primitives;
+ rasterization_state_create_info.polygonMode = (p_rasterization_state.wireframe ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL);
+ static VkCullModeFlags cull_mode[3] = {
+ VK_CULL_MODE_NONE,
+ VK_CULL_MODE_FRONT_BIT,
+ VK_CULL_MODE_BACK_BIT
+ };
+
+ ERR_FAIL_INDEX_V(p_rasterization_state.cull_mode, 3, INVALID_ID);
+ rasterization_state_create_info.cullMode = cull_mode[p_rasterization_state.cull_mode];
+ rasterization_state_create_info.frontFace = (p_rasterization_state.front_face == POLYGON_FRONT_FACE_CLOCKWISE ? VK_FRONT_FACE_CLOCKWISE : VK_FRONT_FACE_COUNTER_CLOCKWISE);
+ rasterization_state_create_info.depthBiasEnable = p_rasterization_state.depth_bias_enable;
+ rasterization_state_create_info.depthBiasConstantFactor = p_rasterization_state.depth_bias_constant_factor;
+ rasterization_state_create_info.depthBiasClamp = p_rasterization_state.depth_bias_clamp;
+ rasterization_state_create_info.depthBiasSlopeFactor = p_rasterization_state.depth_bias_slope_factor;
+ rasterization_state_create_info.lineWidth = p_rasterization_state.line_width;
+
+ //multisample
+ VkPipelineMultisampleStateCreateInfo multisample_state_create_info;
+ multisample_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample_state_create_info.pNext = NULL;
+ multisample_state_create_info.flags = 0;
+
+ multisample_state_create_info.rasterizationSamples = rasterization_sample_count[p_multisample_state.sample_count];
+ multisample_state_create_info.sampleShadingEnable = p_multisample_state.enable_sample_shading;
+ multisample_state_create_info.minSampleShading = p_multisample_state.min_sample_shading;
+ Vector<VkSampleMask> sample_mask;
+ if (p_multisample_state.sample_mask.size()) {
+ //use sample mask
+ int rasterization_sample_mask_expected_size[TEXTURE_SAMPLES_MAX] = {
+ 1, 2, 4, 8, 16, 32, 64
+ };
+ ERR_FAIL_COND_V(rasterization_sample_mask_expected_size[p_multisample_state.sample_count] != p_multisample_state.sample_mask.size(), INVALID_ID);
+ sample_mask.resize(p_multisample_state.sample_mask.size());
+ for (int i = 0; i < p_multisample_state.sample_mask.size(); i++) {
+ VkSampleMask mask = p_multisample_state.sample_mask[i];
+ sample_mask.push_back(mask);
+ }
+ multisample_state_create_info.pSampleMask = sample_mask.ptr();
+ } else {
+ multisample_state_create_info.pSampleMask = NULL;
+ }
+
+ multisample_state_create_info.alphaToCoverageEnable = p_multisample_state.enable_alpha_to_coverage;
+ multisample_state_create_info.alphaToOneEnable = p_multisample_state.enable_alpha_to_one;
+
+ //depth stencil
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil_state_create_info;
+ depth_stencil_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ depth_stencil_state_create_info.pNext = NULL;
+ depth_stencil_state_create_info.flags = 0;
+ depth_stencil_state_create_info.depthTestEnable = p_depth_stencil_state.enable_depth_test;
+ depth_stencil_state_create_info.depthWriteEnable = p_depth_stencil_state.enable_depth_write;
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.depth_compare_operator, COMPARE_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.depthCompareOp = compare_operators[p_depth_stencil_state.depth_compare_operator];
+ depth_stencil_state_create_info.depthBoundsTestEnable = p_depth_stencil_state.enable_depth_range;
+ depth_stencil_state_create_info.stencilTestEnable = p_depth_stencil_state.enable_stencil;
+
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.fail, STENCIL_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.front.failOp = stencil_operations[p_depth_stencil_state.stencil_operation_front.fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.pass, STENCIL_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.front.passOp = stencil_operations[p_depth_stencil_state.stencil_operation_front.pass];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.depth_fail, STENCIL_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.front.depthFailOp = stencil_operations[p_depth_stencil_state.stencil_operation_front.depth_fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.compare, COMPARE_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.front.compareOp = compare_operators[p_depth_stencil_state.stencil_operation_front.compare];
+ depth_stencil_state_create_info.front.compareMask = p_depth_stencil_state.stencil_operation_front.compare_mask;
+ depth_stencil_state_create_info.front.writeMask = p_depth_stencil_state.stencil_operation_front.write_mask;
+ depth_stencil_state_create_info.front.reference = p_depth_stencil_state.stencil_operation_front.reference;
+
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.fail, STENCIL_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.back.failOp = stencil_operations[p_depth_stencil_state.stencil_operation_back.fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.pass, STENCIL_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.back.passOp = stencil_operations[p_depth_stencil_state.stencil_operation_back.pass];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.depth_fail, STENCIL_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.back.depthFailOp = stencil_operations[p_depth_stencil_state.stencil_operation_back.depth_fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.compare, COMPARE_OP_MAX, INVALID_ID);
+ depth_stencil_state_create_info.back.compareOp = compare_operators[p_depth_stencil_state.stencil_operation_back.compare];
+ depth_stencil_state_create_info.back.compareMask = p_depth_stencil_state.stencil_operation_back.compare_mask;
+ depth_stencil_state_create_info.back.writeMask = p_depth_stencil_state.stencil_operation_back.write_mask;
+ depth_stencil_state_create_info.back.reference = p_depth_stencil_state.stencil_operation_back.reference;
+
+ depth_stencil_state_create_info.minDepthBounds = p_depth_stencil_state.depth_range_min;
+ depth_stencil_state_create_info.maxDepthBounds = p_depth_stencil_state.depth_range_max;
+
+ //blend state
+ VkPipelineColorBlendStateCreateInfo color_blend_state_create_info;
+ color_blend_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ color_blend_state_create_info.pNext = NULL;
+ color_blend_state_create_info.flags = 0;
+ color_blend_state_create_info.logicOpEnable = p_blend_state.enable_logic_op;
+ ERR_FAIL_INDEX_V(p_blend_state.logic_op, LOGIC_OP_MAX, INVALID_ID);
+ color_blend_state_create_info.logicOp = logic_operations[p_blend_state.logic_op];
+
+ ERR_FAIL_COND_V(fb_format.color_attachments != p_blend_state.attachments.size(), INVALID_ID);
+
+ Vector<VkPipelineColorBlendAttachmentState> attachment_states;
+
+ for (int i = 0; i < p_blend_state.attachments.size(); i++) {
+ VkPipelineColorBlendAttachmentState state;
+ state.blendEnable = p_blend_state.attachments[i].enable_blend;
+
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_color_blend_factor, BLEND_FACTOR_MAX, INVALID_ID);
+ state.srcColorBlendFactor = blend_factors[p_blend_state.attachments[i].src_color_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_color_blend_factor, BLEND_FACTOR_MAX, INVALID_ID);
+ state.dstColorBlendFactor = blend_factors[p_blend_state.attachments[i].dst_color_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].color_blend_op, BLEND_OP_MAX, INVALID_ID);
+ state.colorBlendOp = blend_operations[p_blend_state.attachments[i].color_blend_op];
+
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_alpha_blend_factor, BLEND_FACTOR_MAX, INVALID_ID);
+ state.srcAlphaBlendFactor = blend_factors[p_blend_state.attachments[i].src_alpha_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_alpha_blend_factor, BLEND_FACTOR_MAX, INVALID_ID);
+ state.dstAlphaBlendFactor = blend_factors[p_blend_state.attachments[i].dst_alpha_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].alpha_blend_op, BLEND_OP_MAX, INVALID_ID);
+ state.alphaBlendOp = blend_operations[p_blend_state.attachments[i].alpha_blend_op];
+
+ state.colorWriteMask = 0;
+ if (p_blend_state.attachments[i].write_r) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_R_BIT;
+ }
+ if (p_blend_state.attachments[i].write_g) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_G_BIT;
+ }
+ if (p_blend_state.attachments[i].write_b) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_B_BIT;
+ }
+ if (p_blend_state.attachments[i].write_a) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_A_BIT;
+ }
+
+ attachment_states.push_back(state);
+ };
+
+ color_blend_state_create_info.attachmentCount = attachment_states.size();
+ color_blend_state_create_info.pAttachments = attachment_states.ptr();
+
+ color_blend_state_create_info.blendConstants[0] = p_blend_state.blend_constant.r;
+ color_blend_state_create_info.blendConstants[1] = p_blend_state.blend_constant.g;
+ color_blend_state_create_info.blendConstants[2] = p_blend_state.blend_constant.b;
+ color_blend_state_create_info.blendConstants[3] = p_blend_state.blend_constant.a;
+
+ //dynamic state
+
+ VkPipelineDynamicStateCreateInfo dynamic_state_create_info;
+ dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic_state_create_info.pNext = NULL;
+ dynamic_state_create_info.flags = 0;
+ Vector<VkDynamicState> dynamic_states; //vulkan is weird..
+
+ dynamic_states.push_back(VK_DYNAMIC_STATE_VIEWPORT); //viewport and scissor are always dynamic
+ dynamic_states.push_back(VK_DYNAMIC_STATE_SCISSOR);
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_LINE_WIDTH) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_WIDTH);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_DEPTH_BIAS) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_BIAS);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_BLEND_CONSTANTS) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_DEPTH_BOUNDS) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_BOUNDS);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_COMPARE_MASK) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_WRITE_MASK) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_REFERENCE) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_REFERENCE);
+ }
+
+ dynamic_state_create_info.dynamicStateCount = dynamic_states.size();
+ dynamic_state_create_info.pDynamicStates = dynamic_states.ptr();
+
+ //finally, pipeline create info
+ VkGraphicsPipelineCreateInfo graphics_pipeline_create_info;
+
+ graphics_pipeline_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ graphics_pipeline_create_info.pNext = NULL;
+
+ graphics_pipeline_create_info.stageCount = shader->pipeline_stages.size();
+ graphics_pipeline_create_info.pStages = shader->pipeline_stages.ptr();
+ graphics_pipeline_create_info.pVertexInputState = &pipeline_vertex_input_state_create_info;
+ graphics_pipeline_create_info.pInputAssemblyState = &input_assembly_create_info;
+ graphics_pipeline_create_info.pTessellationState = &tesselation_create_info;
+ graphics_pipeline_create_info.pViewportState = &viewport_state_create_info;
+ graphics_pipeline_create_info.pRasterizationState = &rasterization_state_create_info;
+ graphics_pipeline_create_info.pMultisampleState = &multisample_state_create_info;
+ graphics_pipeline_create_info.pDepthStencilState = &depth_stencil_state_create_info;
+ graphics_pipeline_create_info.pColorBlendState = &color_blend_state_create_info;
+ graphics_pipeline_create_info.pDynamicState = &dynamic_state_create_info;
+ graphics_pipeline_create_info.layout = shader->pipeline_layout;
+ graphics_pipeline_create_info.renderPass = fb_format.render_pass;
+
+ graphics_pipeline_create_info.subpass = 0;
+ graphics_pipeline_create_info.basePipelineHandle = NULL;
+ graphics_pipeline_create_info.basePipelineIndex = 0;
+
+ RenderPipeline pipeline;
+ VkResult err = vkCreateGraphicsPipelines(device, NULL, 1, &graphics_pipeline_create_info, NULL, &pipeline.pipeline);
+ ERR_FAIL_COND_V(err, INVALID_ID);
+
+ pipeline.dynamic_state = p_dynamic_state_flags;
+ pipeline.framebuffer_format = p_framebuffer_format;
+ pipeline.vertex_format = p_vertex_description;
+ pipeline.uses_restart_indices = input_assembly_create_info.primitiveRestartEnable;
+ pipeline.set_hashes = shader->set_hashes;
+
+ static const uint32_t primitive_divisor[RENDER_PRIMITIVE_MAX] = {
+ 1, 2, 1, 1, 1, 3, 1, 1, 1, 1, 1
+ };
+ pipeline.primitive_divisor = primitive_divisor[p_render_primitive];
+ static const uint32_t primitive_minimum[RENDER_PRIMITIVE_MAX] = {
+ 1,
+ 2,
+ 2,
+ 2,
+ 2,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 1,
+ };
+ pipeline.primitive_minimum = primitive_minimum[p_render_primitive];
+
+ //create ID to associate with this pipeline
+ ID id = pipeline_owner.make_id(pipeline);
+ //now add aall the dependencies
+ _add_dependency(id, p_shader);
+ return id;
+}
+
+/****************/
+/**** SCREEN ****/
+/****************/
+
+int RenderingDeviceVulkan::screen_get_width(int p_screen) const {
+ _THREAD_SAFE_METHOD_
+
+ return context->get_screen_width(p_screen);
+}
+int RenderingDeviceVulkan::screen_get_height(int p_screen) const {
+ _THREAD_SAFE_METHOD_
+
+ return context->get_screen_height(p_screen);
+}
+RenderingDevice::ID RenderingDeviceVulkan::screen_get_framebuffer_format() const {
+
+ _THREAD_SAFE_METHOD_
+
+ //very hacky, but not used often per frame so I guess ok
+ VkFormat vkformat = context->get_screen_format();
+ DataFormat format = DATA_FORMAT_MAX;
+ for (int i = 0; i < DATA_FORMAT_MAX; i++) {
+ if (vkformat == vulkan_formats[i]) {
+ format = DataFormat(i);
+ break;
+ }
+ }
+
+ ERR_FAIL_COND_V(format == DATA_FORMAT_MAX, INVALID_ID);
+
+ AttachmentFormat attachment;
+ attachment.format = format;
+ attachment.samples = TEXTURE_SAMPLES_1;
+ attachment.usage_flags = TEXTURE_USAGE_COLOR_ATTACHMENT_BIT;
+ Vector<AttachmentFormat> screen_attachment;
+ screen_attachment.push_back(attachment);
+ return const_cast<RenderingDeviceVulkan *>(this)->framebuffer_format_create(screen_attachment);
+}
+
+/*******************/
+/**** DRAW LIST ****/
+/*******************/
+
+RenderingDevice::ID RenderingDeviceVulkan::draw_list_begin_for_screen(int p_screen, const Color &p_clear_color) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V_MSG(draw_list != NULL, INVALID_ID, "Only one draw list can be active at the same time.");
+ VkCommandBuffer command_buffer = frames[frame].draw_command_buffer;
+ draw_list = memnew(DrawList);
+ draw_list->command_buffer = command_buffer;
+ draw_list->validation.framebuffer_format = screen_get_framebuffer_format();
+ draw_list_count = 0;
+ draw_list_split = false;
+
+ VkRenderPassBeginInfo render_pass_begin;
+ render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ render_pass_begin.pNext = NULL;
+ render_pass_begin.renderPass = context->get_render_pass();
+ render_pass_begin.framebuffer = context->get_frame_framebuffer(frame);
+
+ render_pass_begin.renderArea.extent.width = context->get_screen_width(p_screen);
+ render_pass_begin.renderArea.extent.height = context->get_screen_height(p_screen);
+ render_pass_begin.renderArea.offset.x = 0;
+ render_pass_begin.renderArea.offset.y = 0;
+
+ render_pass_begin.clearValueCount = 1;
+
+ VkClearValue clear_value;
+ clear_value.color.float32[0] = p_clear_color.r;
+ clear_value.color.float32[1] = p_clear_color.g;
+ clear_value.color.float32[2] = p_clear_color.b;
+ clear_value.color.float32[3] = p_clear_color.a;
+
+ render_pass_begin.pClearValues = &clear_value;
+
+ vkCmdBeginRenderPass(command_buffer, &render_pass_begin, VK_SUBPASS_CONTENTS_INLINE);
+
+ uint32_t size_x = screen_get_width(p_screen);
+ uint32_t size_y = screen_get_height(p_screen);
+
+ VkViewport viewport;
+ viewport.x = 0;
+ viewport.y = 0;
+ viewport.width = size_x;
+ viewport.height = size_y;
+ viewport.minDepth = 0;
+ viewport.maxDepth = 1.0;
+
+ vkCmdSetViewport(command_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor;
+ scissor.offset.x = 0;
+ scissor.offset.y = 0;
+ scissor.extent.width = size_x;
+ scissor.extent.height = size_x;
+
+ vkCmdSetScissor(command_buffer, 0, 1, &scissor);
+
+ return ID_TYPE_DRAW_LIST;
+}
+
+Error RenderingDeviceVulkan::_draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_action, FinalAction p_final_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass) {
+
+ Framebuffer::VersionKey vk;
+ vk.initial_action = p_initial_action;
+ vk.final_action = p_final_action;
+
+ if (!p_framebuffer->framebuffers.has(vk)) {
+ //need to create this version
+ Framebuffer::Version version;
+
+ version.render_pass = _render_pass_create(framebuffer_formats[p_framebuffer->format_id].E->key().attachments, p_initial_action, p_final_action);
+
+ VkFramebufferCreateInfo framebuffer_create_info;
+ framebuffer_create_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ framebuffer_create_info.pNext = NULL;
+ framebuffer_create_info.flags = 0;
+ framebuffer_create_info.renderPass = version.render_pass;
+ Vector<VkImageView> attachments;
+ for (int i = 0; i < p_framebuffer->texture_ids.size(); i++) {
+ Texture *texture = texture_owner.getornull(p_framebuffer->texture_ids[i]);
+ ERR_FAIL_COND_V(!texture, ERR_BUG);
+ attachments.push_back(texture->view);
+ }
+ framebuffer_create_info.attachmentCount = attachments.size();
+ framebuffer_create_info.pAttachments = attachments.ptr();
+ framebuffer_create_info.width = p_framebuffer->size.width;
+ framebuffer_create_info.height = p_framebuffer->size.height;
+ framebuffer_create_info.layers = 1;
+
+ VkResult err = vkCreateFramebuffer(device, &framebuffer_create_info, NULL, &version.framebuffer);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ p_framebuffer->framebuffers.insert(vk, version);
+ }
+ const Framebuffer::Version &version = p_framebuffer->framebuffers[vk];
+ *r_framebuffer = version.framebuffer;
+ *r_render_pass = version.render_pass;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_colors, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents) {
+
+ VkRenderPassBeginInfo render_pass_begin;
+ render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ render_pass_begin.pNext = NULL;
+ render_pass_begin.renderPass = render_pass;
+ render_pass_begin.framebuffer = vkframebuffer;
+
+ render_pass_begin.renderArea.extent.width = viewport_size.width;
+ render_pass_begin.renderArea.extent.height = viewport_size.height;
+ render_pass_begin.renderArea.offset.x = viewport_offset.x;
+ render_pass_begin.renderArea.offset.y = viewport_offset.y;
+
+ Vector<VkClearValue> clear_values;
+ if (p_initial_action == INITIAL_ACTION_CLEAR) {
+ int color_index = 0;
+ for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
+ Texture *texture = texture_owner.getornull(framebuffer->texture_ids[i]);
+ VkClearValue clear_value;
+ if (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ ERR_FAIL_INDEX_V(color_index, p_clear_colors.size(), ERR_BUG); //a bug
+ Color clear_color = p_clear_colors[color_index];
+ clear_value.color.float32[0] = clear_color.r;
+ clear_value.color.float32[1] = clear_color.g;
+ clear_value.color.float32[2] = clear_color.b;
+ clear_value.color.float32[3] = clear_color.a;
+ color_index++;
+ } else if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ clear_value.depthStencil.depth = 1.0;
+ clear_value.depthStencil.stencil = 0;
+ } else {
+ clear_value.color.float32[0] = 0;
+ clear_value.color.float32[1] = 0;
+ clear_value.color.float32[2] = 0;
+ clear_value.color.float32[3] = 0;
+ }
+ clear_values.push_back(clear_value);
+ }
+ }
+
+ render_pass_begin.clearValueCount = clear_values.size();
+ render_pass_begin.pClearValues = clear_values.ptr();
+
+ vkCmdBeginRenderPass(command_buffer, &render_pass_begin, subpass_contents);
+
+ //mark textures as bound
+ draw_list_bound_textures.clear();
+ draw_list_unbind_textures = p_final_action != FINAL_ACTION_CONTINUE;
+ for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
+ Texture *texture = texture_owner.getornull(framebuffer->texture_ids[i]);
+ texture->bound = true;
+ draw_list_bound_textures.push_back(framebuffer->texture_ids[i]);
+ }
+
+ return OK;
+}
+
+RenderingDevice::ID RenderingDeviceVulkan::draw_list_begin(ID p_framebuffer, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_colors, const Rect2 &p_region) {
+
+ _THREAD_SAFE_METHOD_
+
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_framebuffer);
+ ERR_FAIL_COND_V(!framebuffer, INVALID_ID);
+
+ Point2i viewport_offset;
+ Point2i viewport_size = framebuffer->size;
+
+ if (p_region != Rect2()) { //check custom region
+ Rect2i viewport(viewport_offset, viewport_size);
+ Rect2i regioni = p_region;
+ if (!(regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) &&
+ ((regioni.position.x + regioni.size.x) <= (viewport.position.x + viewport.size.x)) &&
+ ((regioni.position.y + regioni.size.y) <= (viewport.position.y + viewport.size.y))) {
+ ERR_FAIL_V_MSG(INVALID_ID, "When supplying a custom region, it must be contained within the framebuffer rectangle");
+ }
+
+ viewport_offset = regioni.position;
+ viewport_size = regioni.size;
+ }
+
+ if (p_initial_action == INITIAL_ACTION_CLEAR) { //check clear values
+
+ int color_attachments = framebuffer_formats[framebuffer->format_id].color_attachments;
+ ERR_FAIL_COND_V_MSG(p_clear_colors.size() != color_attachments, INVALID_ID,
+ "Clear color values supplied (" + itos(p_clear_colors.size()) + ") differ from the amount required for framebuffer (" + itos(color_attachments) + ").");
+ }
+
+ VkFramebuffer vkframebuffer;
+ VkRenderPass render_pass;
+
+ Error err = _draw_list_setup_framebuffer(framebuffer, p_initial_action, p_final_action, &vkframebuffer, &render_pass);
+ ERR_FAIL_COND_V(err != OK, INVALID_ID);
+
+ VkCommandBuffer command_buffer = frames[frame].draw_command_buffer;
+ err = _draw_list_render_pass_begin(framebuffer, p_initial_action, p_final_action, p_clear_colors, viewport_offset, viewport_size, vkframebuffer, render_pass, command_buffer, VK_SUBPASS_CONTENTS_INLINE);
+
+ if (err != OK) {
+ return INVALID_ID;
+ }
+
+ draw_list = memnew(DrawList);
+ draw_list->command_buffer = command_buffer;
+ draw_list->validation.framebuffer_format = framebuffer->format_id;
+ draw_list_count = 0;
+ draw_list_split = false;
+
+ VkViewport viewport;
+ viewport.x = viewport_offset.x;
+ viewport.y = viewport_offset.y;
+ viewport.width = viewport_size.width;
+ viewport.height = viewport_size.height;
+ viewport.minDepth = 0;
+ viewport.maxDepth = 1.0;
+
+ vkCmdSetViewport(command_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor;
+ scissor.offset.x = viewport_offset.x;
+ scissor.offset.y = viewport_offset.y;
+ scissor.extent.width = viewport_size.width;
+ scissor.extent.height = viewport_size.height;
+
+ vkCmdSetScissor(command_buffer, 0, 1, &scissor);
+
+ return ID_TYPE_DRAW_LIST;
+}
+
+Error RenderingDeviceVulkan::draw_list_begin_split(ID p_framebuffer, uint32_t p_splits, ID *r_split_ids, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_colors, const Rect2 &p_region) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_splits < 1, ERR_INVALID_DECLARATION);
+
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_framebuffer);
+ ERR_FAIL_COND_V(!framebuffer, ERR_INVALID_DECLARATION);
+
+ Point2i viewport_offset;
+ Point2i viewport_size = framebuffer->size;
+
+ if (p_region != Rect2()) { //check custom region
+ Rect2i viewport(viewport_offset, viewport_size);
+ Rect2i regioni = p_region;
+ if (!(regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) &&
+ ((regioni.position.x + regioni.size.x) <= (viewport.position.x + viewport.size.x)) &&
+ ((regioni.position.y + regioni.size.y) <= (viewport.position.y + viewport.size.y))) {
+ ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "When supplying a custom region, it must be contained within the framebuffer rectangle");
+ }
+
+ viewport_offset = regioni.position;
+ viewport_size = regioni.size;
+ }
+
+ if (p_initial_action == INITIAL_ACTION_CLEAR) { //check clear values
+
+ int color_attachments = framebuffer_formats[framebuffer->format_id].color_attachments;
+ ERR_FAIL_COND_V_MSG(p_clear_colors.size() != color_attachments, ERR_INVALID_PARAMETER,
+ "Clear color values supplied (" + itos(p_clear_colors.size()) + ") differ from the amount required for framebuffer (" + itos(color_attachments) + ").");
+ }
+
+ if (p_splits > (uint32_t)split_draw_list_allocators.size()) {
+ uint32_t from = split_draw_list_allocators.size();
+ split_draw_list_allocators.resize(p_splits);
+ for (uint32_t i = from; i < p_splits; i++) {
+
+ VkCommandPoolCreateInfo cmd_pool_info;
+ cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ cmd_pool_info.pNext = NULL;
+ cmd_pool_info.queueFamilyIndex = context->get_graphics_queue();
+ cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+
+ VkResult res = vkCreateCommandPool(device, &cmd_pool_info, NULL, &split_draw_list_allocators.write[i].command_pool);
+ ERR_FAIL_COND_V(res, ERR_CANT_CREATE);
+
+ for (int j = 0; j < frame_count; j++) {
+
+ VkCommandBuffer command_buffer;
+
+ VkCommandBufferAllocateInfo cmdbuf;
+ //no command buffer exists, create it.
+ cmdbuf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cmdbuf.pNext = NULL;
+ cmdbuf.commandPool = split_draw_list_allocators[i].command_pool;
+ cmdbuf.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
+ cmdbuf.commandBufferCount = 1;
+
+ VkResult err = vkAllocateCommandBuffers(device, &cmdbuf, &command_buffer);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ split_draw_list_allocators.write[i].command_buffers.push_back(command_buffer);
+ }
+ }
+ }
+
+ VkFramebuffer vkframebuffer;
+ VkRenderPass render_pass;
+
+ Error err = _draw_list_setup_framebuffer(framebuffer, p_initial_action, p_final_action, &vkframebuffer, &render_pass);
+ ERR_FAIL_COND_V(err != OK, ERR_CANT_CREATE);
+
+ VkCommandBuffer frame_command_buffer = frames[frame].draw_command_buffer;
+ err = _draw_list_render_pass_begin(framebuffer, p_initial_action, p_final_action, p_clear_colors, viewport_offset, viewport_size, vkframebuffer, render_pass, frame_command_buffer, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
+
+ if (err != OK) {
+ return ERR_CANT_CREATE;
+ }
+
+ draw_list = memnew_arr(DrawList, p_splits);
+ draw_list_count = p_splits;
+ draw_list_split = true;
+
+ for (uint32_t i = 0; i < p_splits; i++) {
+
+ //take a command buffer and initialize it
+ VkCommandBuffer command_buffer = split_draw_list_allocators[p_splits].command_buffers[frame];
+
+ VkCommandBufferInheritanceInfo inheritance_info;
+ inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
+ inheritance_info.pNext = NULL;
+ inheritance_info.renderPass = render_pass;
+ inheritance_info.subpass = 0;
+ inheritance_info.framebuffer = vkframebuffer;
+ inheritance_info.occlusionQueryEnable = false;
+ inheritance_info.queryFlags = 0; //?
+ inheritance_info.pipelineStatistics = 0;
+
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
+ cmdbuf_begin.pInheritanceInfo = &inheritance_info;
+
+ VkResult res = vkResetCommandBuffer(command_buffer, 0);
+ if (res) {
+ memdelete_arr(draw_list);
+ draw_list = NULL;
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ res = vkBeginCommandBuffer(command_buffer, &cmdbuf_begin);
+ if (res) {
+ memdelete_arr(draw_list);
+ draw_list = NULL;
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ draw_list[i].command_buffer = command_buffer;
+ draw_list[i].validation.framebuffer_format = framebuffer->format_id;
+
+ VkViewport viewport;
+ viewport.x = viewport_offset.x;
+ viewport.y = viewport_offset.y;
+ viewport.width = viewport_size.width;
+ viewport.height = viewport_size.height;
+ viewport.minDepth = 0;
+ viewport.maxDepth = 1.0;
+
+ vkCmdSetViewport(command_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor;
+ scissor.offset.x = viewport_offset.x;
+ scissor.offset.y = viewport_offset.y;
+ scissor.extent.width = viewport_size.width;
+ scissor.extent.height = viewport_size.height;
+
+ vkCmdSetScissor(command_buffer, 0, 1, &scissor);
+ r_split_ids[i] = (ID(1) << ID(ID_TYPE_SPLIT_DRAW_LIST)) + i;
+ }
+
+ return OK;
+}
+
+RenderingDeviceVulkan::DrawList *RenderingDeviceVulkan::_get_draw_list_ptr(ID p_id) {
+ if (p_id < 0) {
+ return NULL;
+ }
+
+ if (!draw_list) {
+ return NULL;
+ } else if (p_id == ID_TYPE_DRAW_LIST) {
+ if (draw_list_split) {
+ return NULL;
+ }
+ return draw_list;
+ } else if (p_id >> ID(ID_BASE_SHIFT) == ID_TYPE_SPLIT_DRAW_LIST) {
+ if (!draw_list_split) {
+ return NULL;
+ }
+
+ uint64_t index = p_id & ((ID(1) << ID(ID_BASE_SHIFT)) - 1); //mask
+
+ if (index >= draw_list_count) {
+ return NULL;
+ }
+
+ return &draw_list[index];
+ } else {
+ return NULL;
+ }
+}
+
+void RenderingDeviceVulkan::draw_list_bind_render_pipeline(ID p_list, ID p_render_pipeline) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ const RenderPipeline *pipeline = pipeline_owner.getornull(p_render_pipeline);
+ ERR_FAIL_COND(!pipeline);
+
+ ERR_FAIL_COND(pipeline->framebuffer_format != dl->validation.framebuffer_format);
+
+ vkCmdBindPipeline(dl->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->pipeline);
+
+ //update render pass pipeline info
+ dl->validation.pipeline_active = true;
+ dl->validation.pipeline_dynamic_state = pipeline->dynamic_state;
+ dl->validation.pipeline_vertex_format = pipeline->vertex_format;
+ dl->validation.pipeline_uses_restart_indices = pipeline->uses_restart_indices;
+ dl->validation.pipeline_primitive_divisor = pipeline->primitive_divisor;
+
+ dl->validation.pipeline_primitive_minimum = pipeline->primitive_minimum;
+ dl->validation.pipeline_set_hashes = pipeline->set_hashes;
+}
+
+void RenderingDeviceVulkan::draw_list_bind_uniform_set(ID p_list, ID p_uniform_set, uint32_t p_index) {
+ ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets,
+ "Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(limits.maxBoundDescriptorSets) + ").");
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ const UniformSet *uniform_set = uniform_set_owner.getornull(p_uniform_set);
+ ERR_FAIL_COND(!uniform_set);
+
+ if ((uint32_t)dl->validation.set_hashes.size() <= p_index) {
+ uint32_t csize = dl->validation.set_hashes.size();
+ uint32_t new_size = p_uniform_set + 1;
+ dl->validation.set_hashes.resize(new_size);
+ for (uint32_t i = csize; i < new_size; i++) {
+ dl->validation.set_hashes.write[i] = 0;
+ }
+ }
+#ifdef DEBUG_ENABLED
+ //validate that textures used are not bound
+ //this can be a bit slow in large descriptor sets,
+ //so it's disabled on release
+ uint32_t tb_count = uniform_set->textures.size();
+ const ID *tb_ptr = uniform_set->textures.ptr();
+ uint32_t bound_count = draw_list_bound_textures.size();
+ const ID *bound_ptr = draw_list_bound_textures.ptr();
+ for (uint32_t i = 0; i < tb_count; i++) {
+ for (uint32_t j = 0; j < bound_count; j++) {
+ ERR_FAIL_COND_MSG(tb_ptr[i] == bound_ptr[j],
+ "Attempted to use the same texture in framebuffer attachment and a uniform set, this is not allowed.");
+ }
+ }
+#endif
+ dl->validation.set_hashes.write[p_index] = uniform_set->hash;
+
+ vkCmdBindDescriptorSets(dl->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, uniform_set->pipeline_layout, p_index, 1, &uniform_set->descriptor_set, 0, NULL);
+}
+
+void RenderingDeviceVulkan::draw_list_bind_vertex_array(ID p_list, ID p_vertex_array) {
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ const VertexArray *vertex_array = vertex_array_owner.getornull(p_vertex_array);
+ ERR_FAIL_COND(!vertex_array);
+ dl->validation.vertex_format = vertex_array->description;
+ dl->validation.vertex_array_size = vertex_array->vertex_count;
+ dl->validation.vertex_max_instances_allowed = vertex_array->max_instances_allowed;
+
+ vkCmdBindVertexBuffers(dl->command_buffer, 0, vertex_array->buffers.size(), vertex_array->buffers.ptr(), vertex_array->offsets.ptr());
+}
+void RenderingDeviceVulkan::draw_list_bind_index_array(ID p_list, ID p_index_array) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ const IndexArray *index_array = index_array_owner.getornull(p_index_array);
+ ERR_FAIL_COND(!index_array);
+
+ dl->validation.index_array_size = index_array->indices;
+ dl->validation.index_array_max_index = index_array->max_index;
+ dl->validation.index_array_offset = index_array->offset;
+
+ vkCmdBindIndexBuffer(dl->command_buffer, index_array->buffer, index_array->offset, index_array->index_type);
+}
+
+void RenderingDeviceVulkan::draw_list_draw(ID p_list, bool p_use_indices, uint32_t p_instances) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ ERR_FAIL_COND_MSG(!dl->validation.pipeline_active,
+ "No render pipeline was set before attempting to draw.");
+ if (dl->validation.pipeline_vertex_format != INVALID_ID) {
+ //pipeline uses vertices, validate format
+ ERR_FAIL_COND_MSG(dl->validation.vertex_format == INVALID_ID,
+ "No vertex array was bound, and render pipeline expects vertices.");
+ //make sure format is right
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != dl->validation.vertex_format,
+ "The vertex format used to create the pipeline does not match the vertex format bound.");
+ //make sure amount of instances is valid
+ ERR_FAIL_COND_MSG(p_instances > dl->validation.vertex_max_instances_allowed,
+ "Amount of instances requested (" + itos(p_instances) + " is larger than the maximum amount suported by the bound vertex array (" + itos(dl->validation.vertex_max_instances_allowed) + ").");
+ }
+ //compare hashes
+ if (dl->validation.pipeline_set_hashes.size()) {
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_set_hashes.size() > dl->validation.set_hashes.size(),
+ "Render pipeline requires uniform sets which were not set at the time of drawing.");
+
+ uint32_t hash_count = dl->validation.pipeline_set_hashes.size();
+ const uint32_t *phashes = dl->validation.pipeline_set_hashes.ptr();
+ const uint32_t *shashes = dl->validation.set_hashes.ptr();
+
+ for (uint32_t i = 0; i < hash_count; i++) {
+ if (phashes[i] == 0) {
+ continue; //not used by pipeline, no need to check
+ }
+ if (phashes[i] != shashes[i]) {
+ if (shashes[i] == 0) {
+ ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline");
+ } else {
+ ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ") are not the same format as required by the pipeline shader.");
+ }
+ }
+ }
+ }
+
+ if (p_use_indices) {
+ ERR_FAIL_COND_MSG(!dl->validation.index_array_size,
+ "Draw command requested indices, but no index buffer was set.");
+ if (dl->validation.pipeline_vertex_format != INVALID_ID) {
+ //uses vertices, do some vertex validations
+ ERR_FAIL_COND_MSG(dl->validation.vertex_array_size < dl->validation.index_array_max_index,
+ "Index array references (max index: " + itos(dl->validation.index_array_max_index) + ") indices beyond the vertex array size (" + itos(dl->validation.vertex_array_size) + ").");
+ }
+
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_uses_restart_indices != dl->validation.index_buffer_uses_restart_indices,
+ "The usage of restart indices in index buffer does not match the render primitive in the pipeline.");
+
+ uint32_t to_draw = dl->validation.index_array_size;
+
+ ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
+ "Too few indices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
+
+ ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
+ "Index amount (" + itos(to_draw) + ") must be a multiple of the amount of indices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
+
+ vkCmdDrawIndexed(dl->command_buffer, to_draw, p_instances, dl->validation.index_array_offset, 0, 0);
+ } else {
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format == INVALID_ID,
+ "Draw command lacks indices, but pipeline format does not use vertices.");
+
+ uint32_t to_draw = dl->validation.vertex_array_size;
+
+ ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
+ "Too few vertices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
+
+ ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
+ "Vertex amount (" + itos(to_draw) + ") must be a multiple of the amount of vertices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
+
+ vkCmdDraw(dl->command_buffer, to_draw, p_instances, 0, 0);
+ }
+}
+
+void RenderingDeviceVulkan::draw_list_enable_scissor(ID p_list, const Rect2 &p_rect) {
+}
+void RenderingDeviceVulkan::draw_list_disable_scissor(ID p_list) {
+}
+
+void RenderingDeviceVulkan::draw_list_end() {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive.");
+
+ if (draw_list_split) {
+ //send all command buffers
+ VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * draw_list_count);
+ for (uint32_t i = 0; i < draw_list_count; i++) {
+ vkEndCommandBuffer(draw_list->command_buffer);
+ command_buffers[i] = draw_list->command_buffer;
+ }
+
+ vkCmdExecuteCommands(frames[frame].draw_command_buffer, draw_list_count, command_buffers);
+ vkCmdEndRenderPass(frames[frame].draw_command_buffer);
+ memdelete_arr(draw_list);
+ draw_list = NULL;
+
+ } else {
+ //just end the list
+ vkCmdEndRenderPass(draw_list->command_buffer);
+ memdelete(draw_list);
+ draw_list = NULL;
+ }
+
+ if (draw_list_unbind_textures) {
+ for (int i = 0; i < draw_list_bound_textures.size(); i++) {
+ Texture *texture = texture_owner.getornull(draw_list_bound_textures[i]);
+ ERR_CONTINUE(!texture); //wtf
+ texture->bound = false;
+ }
+ }
+ draw_list_bound_textures.clear();
+}
+#if 0
+void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_framebuffer, ID *p_draw_lists, uint32_t p_draw_list_count, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Variant> &p_clear_colors) {
+
+ VkCommandBuffer frame_cmdbuf = frames[frame].frame_buffer;
+ ERR_FAIL_COND(!frame_cmdbuf);
+
+ VkRenderPassBeginInfo render_pass_begin;
+ render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ render_pass_begin.pNext = NULL;
+ render_pass_begin.renderPass = context->get_render_pass();
+ render_pass_begin.framebuffer = context->get_frame_framebuffer(frame);
+
+ render_pass_begin.renderArea.extent.width = context->get_screen_width(p_screen);
+ render_pass_begin.renderArea.extent.height = context->get_screen_height(p_screen);
+ render_pass_begin.renderArea.offset.x = 0;
+ render_pass_begin.renderArea.offset.y = 0;
+
+ render_pass_begin.clearValueCount = 1;
+
+ VkClearValue clear_value;
+ clear_value.color.float32[0] = p_clear_color.r;
+ clear_value.color.float32[1] = p_clear_color.g;
+ clear_value.color.float32[2] = p_clear_color.b;
+ clear_value.color.float32[3] = p_clear_color.a;
+
+ render_pass_begin.pClearValues = &clear_value;
+
+ vkCmdBeginRenderPass(frame_cmdbuf, &render_pass_begin, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
+
+ ID screen_format = screen_get_framebuffer_format();
+ {
+
+ VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * p_draw_list_count);
+ uint32_t command_buffer_count = 0;
+
+ for (uint32_t i = 0; i < p_draw_list_count; i++) {
+ DrawList *dl = _get_draw_list_ptr(p_draw_lists[i]);
+ ERR_CONTINUE_MSG(!dl, "Draw list index (" + itos(i) + ") is not a valid draw list ID.");
+ ERR_CONTINUE_MSG(dl->validation.framebuffer_format != p_format_check,
+ "Draw list index (" + itos(i) + ") is created with a framebuffer format incompatible with this render pass.");
+
+ if (dl->validation.active) {
+ //needs to be closed, so close it.
+ vkEndCommandBuffer(dl->command_buffer);
+ dl->validation.active = false;
+ }
+
+ command_buffers[command_buffer_count++] = dl->command_buffer;
+ }
+
+ print_line("to draw: " + itos(command_buffer_count));
+ vkCmdExecuteCommands(p_primary, command_buffer_count, command_buffers);
+ }
+
+ vkCmdEndRenderPass(frame_cmdbuf);
+
+}
+#endif
+
+void RenderingDeviceVulkan::_free_internal(ID p_id) {
+
+ //push everything so it's disposed of next time this frame index is processed (means, it's safe to do it)
+ if (texture_owner.owns(p_id)) {
+ Texture *texture = texture_owner.getornull(p_id);
+ frames[frame].textures_to_dispose_of.push_back(*texture);
+ texture_owner.free(p_id);
+ } else if (framebuffer_owner.owns(p_id)) {
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_id);
+ frames[frame].framebuffers_to_dispose_of.push_back(*framebuffer);
+ framebuffer_owner.free(p_id);
+ } else if (sampler_owner.owns(p_id)) {
+ VkSampler *sampler = sampler_owner.getornull(p_id);
+ frames[frame].samplers_to_dispose_of.push_back(*sampler);
+ sampler_owner.free(p_id);
+ } else if (vertex_buffer_owner.owns(p_id)) {
+ Buffer *vertex_buffer = vertex_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(*vertex_buffer);
+ vertex_buffer_owner.free(p_id);
+ } else if (vertex_array_owner.owns(p_id)) {
+ vertex_array_owner.free(p_id);
+ } else if (index_buffer_owner.owns(p_id)) {
+ IndexBuffer *index_buffer = index_buffer_owner.getornull(p_id);
+ Buffer b;
+ b.allocation = index_buffer->allocation;
+ b.buffer = index_buffer->buffer;
+ frames[frame].buffers_to_dispose_of.push_back(b);
+ index_buffer_owner.free(p_id);
+ } else if (index_array_owner.owns(p_id)) {
+ index_array_owner.free(p_id);
+ } else if (shader_owner.owns(p_id)) {
+ Shader *shader = shader_owner.getornull(p_id);
+ frames[frame].shaders_to_dispose_of.push_back(*shader);
+ shader_owner.free(p_id);
+ } else if (uniform_buffer_owner.owns(p_id)) {
+ Buffer *uniform_buffer = uniform_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(*uniform_buffer);
+ uniform_buffer_owner.free(p_id);
+ } else if (texture_buffer_owner.owns(p_id)) {
+ TextureBuffer *texture_buffer = texture_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(texture_buffer->buffer);
+ frames[frame].buffer_views_to_dispose_of.push_back(texture_buffer->view);
+ texture_buffer_owner.free(p_id);
+ } else if (storage_buffer_owner.owns(p_id)) {
+ Buffer *storage_buffer = storage_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(*storage_buffer);
+ storage_buffer_owner.free(p_id);
+ } else if (uniform_set_owner.owns(p_id)) {
+ UniformSet *uniform_set = uniform_set_owner.getornull(p_id);
+ frames[frame].uniform_sets_to_dispose_of.push_back(*uniform_set);
+ uniform_set_owner.free(p_id);
+ } else if (pipeline_owner.owns(p_id)) {
+ RenderPipeline *pipeline = pipeline_owner.getornull(p_id);
+ frames[frame].pipelines_to_dispose_of.push_back(*pipeline);
+ pipeline_owner.free(p_id);
+ } else {
+ ERR_PRINT("Attempted to free invalid ID: " + itos(p_id));
+ }
+}
+void RenderingDeviceVulkan::free(ID p_id) {
+
+ _THREAD_SAFE_METHOD_
+
+ _free_dependencies(p_id); //recursively erase dependencies first, to avoid potential API problems
+ _free_internal(p_id);
+}
+
+void RenderingDeviceVulkan::finalize_frame() {
+
+ _THREAD_SAFE_METHOD_
+
+ if (draw_list) {
+ ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work).");
+ }
+
+ { //complete the setup buffer (that needs to be processed before anything else)
+ vkEndCommandBuffer(frames[frame].setup_command_buffer);
+ vkEndCommandBuffer(frames[frame].draw_command_buffer);
+ }
+}
+
+void RenderingDeviceVulkan::_free_pending_resources() {
+ //free in dependency usage order, so nothing weird happens
+
+ //pipelines
+ while (frames[frame].pipelines_to_dispose_of.front()) {
+ RenderPipeline *pipeline = &frames[frame].pipelines_to_dispose_of.front()->get();
+
+ vkDestroyPipeline(device, pipeline->pipeline, NULL);
+
+ frames[frame].pipelines_to_dispose_of.pop_front();
+ }
+
+ //uniform sets
+ while (frames[frame].uniform_sets_to_dispose_of.front()) {
+ UniformSet *uniform_set = &frames[frame].uniform_sets_to_dispose_of.front()->get();
+
+ vkFreeDescriptorSets(device, uniform_set->pool->pool, 1, &uniform_set->descriptor_set);
+ _descriptor_pool_free(uniform_set->pool_key, uniform_set->pool);
+
+ frames[frame].uniform_sets_to_dispose_of.pop_front();
+ }
+
+ //buffer views
+ while (frames[frame].buffer_views_to_dispose_of.front()) {
+ VkBufferView buffer_view = frames[frame].buffer_views_to_dispose_of.front()->get();
+
+ vkDestroyBufferView(device, buffer_view, NULL);
+
+ frames[frame].buffer_views_to_dispose_of.pop_front();
+ }
+
+ //shaders
+ while (frames[frame].shaders_to_dispose_of.front()) {
+ Shader *shader = &frames[frame].shaders_to_dispose_of.front()->get();
+
+ //descriptor set layout for each set
+ for (int i = 0; i < shader->sets.size(); i++) {
+ vkDestroyDescriptorSetLayout(device, shader->sets[i].descriptor_set_layout, NULL);
+ }
+
+ //pipeline layout
+ vkDestroyPipelineLayout(device, shader->pipeline_layout, NULL);
+
+ //shaders themselves
+ for (int i = 0; i < shader->pipeline_stages.size(); i++) {
+ vkDestroyShaderModule(device, shader->pipeline_stages[i].module, NULL);
+ }
+
+ frames[frame].shaders_to_dispose_of.pop_front();
+ }
+
+ //samplers
+ while (frames[frame].samplers_to_dispose_of.front()) {
+ VkSampler sampler = frames[frame].samplers_to_dispose_of.front()->get();
+
+ vkDestroySampler(device, sampler, NULL);
+
+ frames[frame].samplers_to_dispose_of.pop_front();
+ }
+
+ //framebuffers
+ while (frames[frame].framebuffers_to_dispose_of.front()) {
+ Framebuffer *framebuffer = &frames[frame].framebuffers_to_dispose_of.front()->get();
+
+ for (Map<Framebuffer::VersionKey, Framebuffer::Version>::Element *E = framebuffer->framebuffers.front(); E; E = E->next()) {
+ //first framebuffer, then render pass because it depends on it
+ vkDestroyFramebuffer(device, E->get().framebuffer, NULL);
+ vkDestroyRenderPass(device, E->get().render_pass, NULL);
+ }
+
+ frames[frame].framebuffers_to_dispose_of.pop_front();
+ }
+
+ //textures
+ while (frames[frame].textures_to_dispose_of.front()) {
+ Texture *texture = &frames[frame].textures_to_dispose_of.front()->get();
+
+ if (texture->bound) {
+ WARN_PRINT("Deleted a texture while it was bound..");
+ }
+ vkDestroyImageView(device, texture->view, NULL);
+ if (texture->owner == INVALID_ID) {
+ //actually owns the image and the allocation too
+ vmaDestroyImage(allocator, texture->image, texture->allocation);
+ vmaFreeMemory(allocator, texture->allocation);
+ }
+ frames[frame].textures_to_dispose_of.pop_front();
+ }
+
+ //buffers
+ while (frames[frame].buffers_to_dispose_of.front()) {
+ _buffer_free(&frames[frame].buffers_to_dispose_of.front()->get());
+
+ frames[frame].buffers_to_dispose_of.pop_front();
+ }
+}
+
+void RenderingDeviceVulkan::advance_frame() {
+
+ _THREAD_SAFE_METHOD_
+
+ //advance the frame
+ frame = (frame + 1) % frame_count;
+
+ //erase pending resources
+ _free_pending_resources();
+
+ //create setup command buffer and set as the setup buffer
+
+ {
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_begin.pInheritanceInfo = NULL;
+
+ VkResult err = vkResetCommandBuffer(frames[frame].setup_command_buffer, 0);
+ ERR_FAIL_COND(err);
+
+ err = vkBeginCommandBuffer(frames[frame].setup_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->set_setup_buffer(frames[frame].setup_command_buffer); //append now so it's added before everything else
+ err = vkBeginCommandBuffer(frames[frame].draw_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->append_command_buffer(frames[frame].draw_command_buffer);
+ }
+
+ //advance current frame
+ frames_drawn++;
+ //advance staging buffer if used
+ if (staging_buffer_used) {
+ staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size();
+ staging_buffer_used = false;
+ }
+}
+
+void RenderingDeviceVulkan::initialize(VulkanContext *p_context) {
+
+ context = p_context;
+ device = p_context->get_device();
+ frame_count = p_context->get_frame_count();
+ limits = p_context->get_device_limits();
+
+ { //initialize allocator
+
+ VmaAllocatorCreateInfo allocatorInfo;
+ memset(&allocatorInfo, 0, sizeof(VmaAllocatorCreateInfo));
+ allocatorInfo.physicalDevice = p_context->get_physical_device();
+ allocatorInfo.device = device;
+ vmaCreateAllocator(&allocatorInfo, &allocator);
+ }
+
+ frames = memnew_arr(Frame, frame_count);
+ frame = 0;
+ //create setup and frame buffers
+ for (int i = 0; i < frame_count; i++) {
+
+ { //create command pool, one per frame is recommended
+ VkCommandPoolCreateInfo cmd_pool_info;
+ cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ cmd_pool_info.pNext = NULL;
+ cmd_pool_info.queueFamilyIndex = p_context->get_graphics_queue();
+ cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+
+ VkResult res = vkCreateCommandPool(device, &cmd_pool_info, NULL, &frames[i].command_pool);
+ ERR_FAIL_COND(res);
+ }
+
+ { //create command buffers
+
+ VkCommandBufferAllocateInfo cmdbuf;
+ //no command buffer exists, create it.
+ cmdbuf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cmdbuf.pNext = NULL;
+ cmdbuf.commandPool = frames[i].command_pool;
+ cmdbuf.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ cmdbuf.commandBufferCount = 1;
+
+ VkResult err = vkAllocateCommandBuffers(device, &cmdbuf, &frames[i].setup_command_buffer);
+ ERR_CONTINUE(err);
+
+ err = vkAllocateCommandBuffers(device, &cmdbuf, &frames[i].draw_command_buffer);
+ ERR_CONTINUE(err);
+ }
+ }
+
+ {
+ //begin the first command buffer for the first frame, so
+ //setting up things can be done in the meantime until finalize_frame(), which is called before advance.
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_begin.pInheritanceInfo = NULL;
+
+ VkResult err = vkBeginCommandBuffer(frames[0].setup_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->set_setup_buffer(frames[0].setup_command_buffer); //append now so it's added before everything else
+
+ err = vkBeginCommandBuffer(frames[0].draw_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->append_command_buffer(frames[0].draw_command_buffer);
+ }
+
+ staging_buffer_block_size = GLOBAL_DEF("rendering/vulkan/staging_buffer/block_size_kb", 256);
+ staging_buffer_block_size = MAX(4, staging_buffer_block_size);
+ staging_buffer_block_size *= 1024; //kb -> bytes
+ staging_buffer_max_size = GLOBAL_DEF("rendering/vulkan/staging_buffer/max_size_mb", 128);
+ staging_buffer_max_size = MAX(1, staging_buffer_max_size);
+ staging_buffer_max_size *= 1024 * 1024;
+
+ if (staging_buffer_max_size < staging_buffer_block_size * 4) {
+ //validate enough blocks
+ staging_buffer_max_size = staging_buffer_block_size * 4;
+ }
+ texture_upload_region_size_px = GLOBAL_DEF("rendering/vulkan/staging_buffer/texture_upload_region_size_px", 64);
+ texture_upload_region_size_px = nearest_power_of_2_templated(texture_upload_region_size_px);
+ print_line("update size: " + itos(texture_upload_region_size_px));
+
+ frames_drawn = frame_count; //start from frame count, so everything else is immediately old
+
+ //ensure current staging block is valid and at least one per frame exists
+ staging_buffer_current = 0;
+ staging_buffer_used = false;
+
+ for (int i = 0; i < frame_count; i++) {
+ //staging was never used, create a block
+ Error err = _insert_staging_block();
+ ERR_CONTINUE(err != OK);
+ }
+
+ max_descriptors_per_pool = GLOBAL_DEF("rendering/vulkan/descriptor_pools/max_descriptors_per_pool", 64);
+
+ //check to make sure DescriptorPoolKey is good
+ ERR_FAIL_COND(sizeof(uint64_t) * 3 < UNIFORM_TYPE_MAX * sizeof(uint16_t));
+
+ draw_list = NULL;
+ draw_list_count = 0;
+ draw_list_split = false;
+}
+void RenderingDeviceVulkan::finalize() {
+
+ memdelete_arr(frames);
+}
+
+RenderingDeviceVulkan::RenderingDeviceVulkan() {
+}
diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h
new file mode 100644
index 0000000000..ca1cb89480
--- /dev/null
+++ b/drivers/vulkan/rendering_device_vulkan.h
@@ -0,0 +1,830 @@
+#ifndef RENDERING_DEVICE_VULKAN_H
+#define RENDERING_DEVICE_VULKAN_H
+
+#include "core/oa_hash_map.h"
+#include "core/os/thread_safe.h"
+#include "servers/visual/rendering_device.h"
+#include "thirdparty/glslang/glslang/Public/ShaderLang.h"
+#include "vk_mem_alloc.h"
+#include <vulkan/vulkan.h>
+
+//todo:
+//compute
+//push constants
+//views of texture slices
+
+class VulkanContext;
+
+class RenderingDeviceVulkan : public RenderingDevice {
+
+ _THREAD_SAFE_CLASS_
+
+ // Miscellaneous tables that map
+ // our enums to enums used
+ // by vulkan.
+
+ VkPhysicalDeviceLimits limits;
+ static const VkFormat vulkan_formats[DATA_FORMAT_MAX];
+ static const char *named_formats[DATA_FORMAT_MAX];
+ static const VkCompareOp compare_operators[COMPARE_OP_MAX];
+ static const VkStencilOp stencil_operations[STENCIL_OP_MAX];
+ static const VkSampleCountFlagBits rasterization_sample_count[TEXTURE_SAMPLES_MAX];
+ static const VkLogicOp logic_operations[RenderingDevice::LOGIC_OP_MAX];
+ static const VkBlendFactor blend_factors[RenderingDevice::BLEND_FACTOR_MAX];
+ static const VkBlendOp blend_operations[RenderingDevice::BLEND_OP_MAX];
+ static const VkSamplerAddressMode address_modes[SAMPLER_REPEAT_MODE_MAX];
+ static const VkBorderColor sampler_border_colors[SAMPLER_BORDER_COLOR_MAX];
+
+ // Functions used for format
+ // validation, and ensures the
+ // user passes valid data.
+
+ static int get_format_vertex_size(DataFormat p_format);
+ static uint32_t get_image_format_pixel_size(DataFormat p_format);
+ static void get_compressed_image_format_block_dimensions(DataFormat p_format, uint32_t &r_w, uint32_t &r_h);
+ uint32_t get_compressed_image_format_block_byte_size(DataFormat p_format);
+ static uint32_t get_compressed_image_format_pixel_rshift(DataFormat p_format);
+ static uint32_t get_image_format_required_size(DataFormat p_format, uint32_t p_width, uint32_t p_height, uint32_t p_depth, uint32_t p_mipmap, uint32_t *r_blockw = NULL, uint32_t *r_blockh = NULL);
+ static uint32_t get_image_required_mipmaps(uint32_t p_width, uint32_t p_height, uint32_t p_depth);
+
+ /***************************/
+ /**** ID INFRASTRUCTURE ****/
+ /***************************/
+
+ // Everything is exposed to the user
+ // as IDs instead of pointers. This
+ // has a negligible CPU performance
+ // impact (Open Addressing is used to
+ // improve cache efficiency), but
+ // makes sure the user can't screw up
+ // by providing a safety layer.
+
+ enum IDType {
+ ID_TYPE_TEXTURE,
+ ID_TYPE_FRAMEBUFFER_FORMAT,
+ ID_TYPE_FRAMEBUFFER,
+ ID_TYPE_SAMPLER,
+ ID_TYPE_VERTEX_DESCRIPTION,
+ ID_TYPE_VERTEX_BUFFER,
+ ID_TYPE_INDEX_BUFFER,
+ ID_TYPE_VERTEX_ARRAY,
+ ID_TYPE_INDEX_ARRAY,
+ ID_TYPE_SHADER,
+ ID_TYPE_UNIFORM_BUFFER,
+ ID_TYPE_STORAGE_BUFFER,
+ ID_TYPE_TEXTURE_BUFFER,
+ ID_TYPE_UNIFORM_SET,
+ ID_TYPE_RENDER_PIPELINE,
+ ID_TYPE_DRAW_LIST_THREAD_CONTEXT,
+ ID_TYPE_DRAW_LIST,
+ ID_TYPE_SPLIT_DRAW_LIST,
+ ID_TYPE_MAX,
+ ID_BASE_SHIFT = 58 //5 bits for ID types
+ };
+
+ VkDevice device;
+
+ // this is meant to be fast, not flexible
+ // so never keep pointers to the elements
+ // inside this structure
+
+ template <class T, IDType id_type>
+ class ID_Pool {
+ ID counter;
+ OAHashMap<ID, T> map;
+
+ public:
+ ID make_id(const T &p_instance) {
+ ID new_id = (ID(id_type) << ID_BASE_SHIFT) + counter;
+ counter++;
+ map.insert(new_id, p_instance);
+ return new_id;
+ }
+
+ bool owns(ID p_id) const {
+ if (p_id <= 0 || (p_id >> ID_BASE_SHIFT) != id_type) {
+ return false;
+ }
+
+ return map.has(p_id);
+ }
+
+ T *getornull(ID p_id) const {
+ if (p_id <= 0 || (p_id >> ID_BASE_SHIFT) != id_type) {
+ return NULL;
+ }
+
+ return map.lookup_ptr(p_id);
+ }
+
+ void free(ID p_id) {
+ ERR_FAIL_COND(p_id <= 0 || (p_id >> ID_BASE_SHIFT) != id_type);
+ map.remove(p_id);
+ }
+
+ ID_Pool() {
+ counter = 1;
+ }
+ };
+
+ Map<ID, Set<ID> > dependency_map; //IDs to IDs that depend on it
+ Map<ID, Set<ID> > reverse_dependency_map; //same as above, but in reverse
+
+ void _add_dependency(ID p_id, ID p_depends_on);
+ void _free_dependencies(ID p_id);
+
+ /*****************/
+ /**** TEXTURE ****/
+ /*****************/
+
+ // In Vulkan, the concept of textures does not exist,
+ // intead there is the image (the memory prety much,
+ // the view (how the memory is interpreted) and the
+ // sampler (how it's sampled from the shader).
+ //
+ // Texture here includes the first two stages, but
+ // It's possible to create textures sharing the image
+ // but with different views. The main use case for this
+ // is textures that can be read as both SRGB/Linear,
+ // or slices of a texture (a mipmap, a layer, a 3D slice)
+ // for a framebuffer to render into it.
+
+ struct Texture {
+
+ VkImage image;
+ VmaAllocation allocation;
+ VmaAllocationInfo allocation_info;
+ VkImageView view;
+
+ TextureType type;
+ DataFormat format;
+ TextureSamples samples;
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ uint32_t layers;
+ uint32_t mipmaps;
+ uint32_t usage_flags;
+
+ VkImageLayout bound_layout; //layout used for reading
+ VkImageLayout reading_layout; //layout used for reading
+ uint32_t aspect_mask;
+ bool bound; //bound to framebffer
+ ID owner;
+ };
+
+ ID_Pool<Texture, ID_TYPE_TEXTURE> texture_owner;
+ uint32_t texture_upload_region_size_px;
+
+ /*****************/
+ /**** SAMPLER ****/
+ /*****************/
+
+ ID_Pool<VkSampler, ID_TYPE_SAMPLER> sampler_owner;
+
+ /***************************/
+ /**** BUFFER MANAGEMENT ****/
+ /***************************/
+
+ // These are temporary buffers on CPU memory that hold
+ // the information until the CPU fetches it and places it
+ // either on GPU buffers, or images (textures). It ensures
+ // updates are properly synchronized with whathever the
+ // GPU is doing.
+ //
+ // The logic here is as follows, only 3 of these
+ // blocks are created at the beginning (one per frame)
+ // they can each belong to a frame (assigned to current when
+ // used) and they can only be reused after the same frame is
+ // recycled.
+ //
+ // When CPU requires to allocate more than what is available,
+ // more of these buffers are created. If a limit is reached,
+ // then a fence will ensure will wait for blocks allocated
+ // in previous frames are processed. If that fails, then
+ // another fence will ensure everything pending for the current
+ // frame is processed (effectively stalling).
+ //
+ // See the comments in the code to understand better how it works.
+
+ struct StagingBufferBlock {
+ VkBuffer buffer;
+ VmaAllocation allocation;
+ uint64_t frame_used;
+ uint32_t fill_amount;
+ };
+
+ Vector<StagingBufferBlock> staging_buffer_blocks;
+ int staging_buffer_current;
+ uint32_t staging_buffer_block_size;
+ uint64_t staging_buffer_max_size;
+ bool staging_buffer_used;
+
+ Error _staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment = true, bool p_on_draw_command_buffer = false);
+ Error _insert_staging_block();
+
+ struct Buffer {
+
+ uint32_t size;
+ VkBuffer buffer;
+ VmaAllocation allocation;
+ VkDescriptorBufferInfo buffer_info; //used for binding
+ Buffer() {
+ size = 0;
+ buffer = NULL;
+ allocation = NULL;
+ }
+ };
+
+ Error _buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mapping);
+ Error _buffer_free(Buffer *p_buffer);
+ Error _buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer = false, uint32_t p_required_align = 32);
+
+ /*********************/
+ /**** FRAMEBUFFER ****/
+ /*********************/
+
+ // In Vulkan, framebuffers work similar to how they
+ // do in OpenGL, with the exception that
+ // the "format" (vkRenderPass) is not dynamic
+ // and must be more or less the same as the one
+ // used for the render pipelines.
+
+ struct FramebufferFormatKey {
+ Vector<AttachmentFormat> attachments;
+ bool operator<(const FramebufferFormatKey &p_key) const {
+
+ int as = attachments.size();
+ int bs = p_key.attachments.size();
+ if (as != bs) {
+ return as < bs;
+ }
+
+ const AttachmentFormat *af_a = attachments.ptr();
+ const AttachmentFormat *af_b = p_key.attachments.ptr();
+ for (int i = 0; i < as; i++) {
+ const AttachmentFormat &a = af_a[i];
+ const AttachmentFormat &b = af_b[i];
+ if (a.format != b.format) {
+ return a.format < b.format;
+ }
+ if (a.samples != b.samples) {
+ return a.samples < b.samples;
+ }
+ if (a.usage_flags != b.usage_flags) {
+ return a.usage_flags < b.usage_flags;
+ }
+ }
+
+ return false; //equal
+ }
+ };
+
+ VkRenderPass _render_pass_create(const Vector<AttachmentFormat> &p_format, InitialAction p_initial_action, FinalAction p_final_action, int *r_color_attachment_count = NULL);
+
+ // This is a cache and it's never freed, it ensures
+ // IDs for a given format are always unique.
+ Map<FramebufferFormatKey, ID> framebuffer_format_cache;
+ struct FramebufferFormat {
+ const Map<FramebufferFormatKey, ID>::Element *E;
+ VkRenderPass render_pass; //here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec)
+ int color_attachments; //used for pipeline validation
+ };
+
+ Map<ID, FramebufferFormat> framebuffer_formats;
+
+ struct Framebuffer {
+ ID format_id;
+ struct VersionKey {
+ InitialAction initial_action;
+ FinalAction final_action;
+ bool operator<(const VersionKey &p_key) const {
+ if (initial_action == p_key.initial_action) {
+ return final_action < p_key.final_action;
+ } else {
+ return initial_action < p_key.initial_action;
+ }
+ }
+ };
+
+ Vector<ID> texture_ids;
+
+ struct Version {
+ VkFramebuffer framebuffer;
+ VkRenderPass render_pass; //this one is owned
+ };
+
+ Map<VersionKey, Version> framebuffers;
+ Size2 size;
+ };
+
+ ID_Pool<Framebuffer, ID_TYPE_FRAMEBUFFER> framebuffer_owner;
+
+ /***********************/
+ /**** VERTEX BUFFER ****/
+ /***********************/
+
+ // Vertex buffers in Vulkan are similar to how
+ // they work in OpenGL, except that instead of
+ // an attribtue index, there is a buffer binding
+ // index (for binding the buffers in real-time)
+ // and a location index (what is used in the shader).
+ //
+ // This mapping is done here internally, and it's not
+ // exposed.
+
+ ID_Pool<Buffer, ID_TYPE_VERTEX_BUFFER> vertex_buffer_owner;
+
+ struct VertexDescriptionKey {
+ Vector<VertexDescription> vertex_descriptions;
+ int buffer_count;
+ bool operator<(const VertexDescriptionKey &p_key) const {
+ if (buffer_count != p_key.buffer_count) {
+ return buffer_count < p_key.buffer_count;
+ }
+ if (vertex_descriptions.size() != p_key.vertex_descriptions.size()) {
+ return vertex_descriptions.size() < p_key.vertex_descriptions.size();
+ } else {
+ int vdc = vertex_descriptions.size();
+ const VertexDescription *a_ptr = vertex_descriptions.ptr();
+ const VertexDescription *b_ptr = p_key.vertex_descriptions.ptr();
+ for (int i = 0; i < vdc; i++) {
+ const VertexDescription &a = a_ptr[i];
+ const VertexDescription &b = b_ptr[i];
+
+ if (a.location != b.location) {
+ return a.location < b.location;
+ }
+ if (a.offset != b.offset) {
+ return a.offset < b.offset;
+ }
+ if (a.format != b.format) {
+ return a.format < b.format;
+ }
+ if (a.stride != b.stride) {
+ return a.stride < b.stride;
+ }
+ return a.frequency < b.frequency;
+ }
+ return false; //they are equal
+ }
+ }
+ };
+
+ // This is a cache and it's never freed, it ensures that
+ // ID used for a specific format always remain the same.
+ Map<VertexDescriptionKey, ID> vertex_description_cache;
+ struct VertexDescriptionCache {
+ const Map<VertexDescriptionKey, ID>::Element *E;
+ VkVertexInputBindingDescription *bindings;
+ VkVertexInputAttributeDescription *attributes;
+ VkPipelineVertexInputStateCreateInfo create_info;
+ };
+
+ Map<ID, VertexDescriptionCache> vertex_descriptions;
+
+ struct VertexArray {
+ ID buffer;
+ ID description;
+ int vertex_count;
+ uint32_t max_instances_allowed;
+
+ Vector<VkBuffer> buffers; //not owned, just referenced
+ Vector<VkDeviceSize> offsets;
+ };
+
+ ID_Pool<VertexArray, ID_TYPE_VERTEX_ARRAY> vertex_array_owner;
+
+ struct IndexBuffer : public Buffer {
+ uint32_t max_index; //used for validation
+ uint32_t index_count;
+ VkIndexType index_type;
+ bool supports_restart_indices;
+ };
+
+ ID_Pool<IndexBuffer, ID_TYPE_INDEX_BUFFER> index_buffer_owner;
+
+ struct IndexArray {
+ uint32_t max_index; //remember the maximum index here too, for validation
+ VkBuffer buffer; //not owned, inherited from index buffer
+ uint32_t offset;
+ uint32_t indices;
+ VkIndexType index_type;
+ bool supports_restart_indices;
+ };
+
+ ID_Pool<IndexArray, ID_TYPE_INDEX_ARRAY> index_array_owner;
+
+ /****************/
+ /**** SHADER ****/
+ /****************/
+
+ // Shaders in Vulkan are just pretty much
+ // precompiled blocks of SPIR-V bytecode. They
+ // are most likely not really compiled to host
+ // assembly until a pipeline is created.
+ //
+ // When supplying the shaders, this implementation
+ // will use the reflection abilities of glslang to
+ // understand and cache everything required to
+ // create and use the descriptor sets (Vulkan's
+ // biggest pain).
+ //
+ // Additionally, hashes are created for every set
+ // to do quick validation and ensuring the user
+ // does not submit something invalid.
+
+ struct Shader {
+
+ struct UniformInfo {
+ UniformType type;
+ int binding;
+ uint32_t stages;
+ int length; //size of arrays (in total elements), or ubos (in bytes * total elements)
+ bool operator<(const UniformInfo &p_info) const {
+ if (type != p_info.type) {
+ return type < p_info.type;
+ }
+ if (binding != p_info.binding) {
+ return binding < p_info.binding;
+ }
+ if (stages != p_info.stages) {
+ return stages < p_info.stages;
+ }
+ return length < p_info.length;
+ }
+ };
+
+ struct Set {
+
+ Vector<UniformInfo> uniform_info;
+ VkDescriptorSetLayout descriptor_set_layout;
+ };
+
+ Vector<int> vertex_input_locations; //inputs used, this is mostly for validation
+ int fragment_outputs;
+
+ int max_output;
+ Vector<Set> sets;
+ Vector<uint32_t> set_hashes;
+ Vector<VkPipelineShaderStageCreateInfo> pipeline_stages;
+ VkPipelineLayout pipeline_layout;
+ };
+
+ bool _uniform_add_binding(Vector<Vector<VkDescriptorSetLayoutBinding> > &bindings, Vector<Vector<Shader::UniformInfo> > &uniform_infos, const glslang::TObjectReflection &reflection, RenderingDevice::ShaderStage p_stage, String *r_error);
+
+ ID_Pool<Shader, ID_TYPE_SHADER> shader_owner;
+
+ /******************/
+ /**** UNIFORMS ****/
+ /******************/
+
+ // Descriptor sets require allocation from a pool.
+ // The documentation on how to use pools properly
+ // is scarce, and the documentation is strange.
+ //
+ // Basically, you can mix and match pools as you
+ // like, but you'll run into fragmentation issues.
+ // Because of this, the recommended approach is to
+ // create a a pool for every descriptor set type,
+ // as this prevents fragmentation.
+ //
+ // This is implemented here as a having a list of
+ // pools (each can contain up to 64 sets) for each
+ // set layout. The amount of sets for each type
+ // is used as the key.
+
+ enum {
+ MAX_DESCRIPTOR_POOL_ELEMENT = 65535
+ };
+
+ struct DescriptorPoolKey {
+ union {
+ struct {
+ uint16_t uniform_type[UNIFORM_TYPE_MAX]; //using 16 bits because, for sending arrays, each element is a pool set.
+ };
+ struct {
+ uint64_t key1;
+ uint64_t key2;
+ uint64_t key3;
+ };
+ };
+ bool operator<(const DescriptorPoolKey &p_key) const {
+ if (key1 != p_key.key1) {
+ return key1 < p_key.key1;
+ }
+ if (key2 != p_key.key2) {
+ return key2 < p_key.key2;
+ }
+
+ return key3 < p_key.key3;
+ }
+ DescriptorPoolKey() {
+ key1 = 0;
+ key2 = 0;
+ key3 = 0;
+ }
+ };
+
+ struct DescriptorPool {
+ VkDescriptorPool pool;
+ uint32_t usage;
+ };
+
+ Map<DescriptorPoolKey, Set<DescriptorPool *> > descriptor_pools;
+ uint32_t max_descriptors_per_pool;
+
+ DescriptorPool *_descriptor_pool_allocate(const DescriptorPoolKey &p_key);
+ void _descriptor_pool_free(const DescriptorPoolKey &p_key, DescriptorPool *p_pool);
+
+ ID_Pool<Buffer, ID_TYPE_UNIFORM_BUFFER> uniform_buffer_owner;
+ ID_Pool<Buffer, ID_TYPE_STORAGE_BUFFER> storage_buffer_owner;
+
+ //texture buffer needs a view
+ struct TextureBuffer {
+ Buffer buffer;
+ VkBufferView view;
+ };
+
+ ID_Pool<TextureBuffer, ID_TYPE_TEXTURE_BUFFER> texture_buffer_owner;
+
+ // This structure contains the descriptor set. They _need_ to be allocated
+ // for a shader (and will be erased when this shader is erased), but should
+ // work for other shaders as long as the hash matches. This covers using
+ // them in shader variants.
+ //
+ // Keep also in mind that you can share buffers between descriptor sets, so
+ // the above restriction is not too serious.
+
+ struct UniformSet {
+ uint32_t hash;
+ ID shader_id;
+ DescriptorPool *pool;
+ DescriptorPoolKey pool_key;
+ VkDescriptorSet descriptor_set;
+ VkPipelineLayout pipeline_layout; //not owned, inherited from shader
+ Vector<ID> textures;
+ };
+
+ ID_Pool<UniformSet, ID_TYPE_UNIFORM_SET> uniform_set_owner;
+
+ /*******************/
+ /**** PIPELINES ****/
+ /*******************/
+
+ // Render pipeline contains ALL the
+ // information required for drawing.
+ // This includes all the rasterizer state
+ // as well as shader used, framebuffer format,
+ // etc.
+ // While the pipeline is just a single object
+ // (VkPipeline) a lot of values are also saved
+ // here to do validation (vulkan does none by
+ // default) and warn the user if something
+ // was not supplied as intended.
+
+ struct RenderPipeline {
+ //Cached values for validation
+ ID framebuffer_format;
+ uint32_t dynamic_state;
+ ID vertex_format;
+ bool uses_restart_indices;
+ uint32_t primitive_minimum;
+ uint32_t primitive_divisor;
+ Vector<uint32_t> set_hashes;
+ //Actual pipeline
+ VkPipeline pipeline;
+ };
+
+ ID_Pool<RenderPipeline, ID_TYPE_RENDER_PIPELINE> pipeline_owner;
+
+ /*******************/
+ /**** DRAW LIST ****/
+ /*******************/
+
+ // Draw list contains both the command buffer
+ // used for drawing as well as a LOT of
+ // information used for validation. This
+ // validation is cheap so most of it can
+ // also run in release builds.
+
+ // When using split command lists, this is
+ // implemented internally using secondary command
+ // buffers. As they can be created in threads,
+ // each needs it's own command pool.
+
+ struct SplitDrawListAllocator {
+ VkCommandPool command_pool;
+ Vector<VkCommandBuffer> command_buffers; //one for each frame
+ };
+
+ Vector<SplitDrawListAllocator> split_draw_list_allocators;
+
+ struct DrawList {
+
+ VkCommandBuffer command_buffer; //if persistent, this is owned, otherwise it's shared with the ringbuffer
+
+ struct Validation {
+ bool active; //means command buffer was not closes, so you can keep adding things
+ ID framebuffer_format;
+ //actual render pass values
+ uint32_t dynamic_state;
+ ID vertex_format; //INVALID_ID if not set
+ uint32_t vertex_array_size; //0 if not set
+ uint32_t vertex_max_instances_allowed;
+ bool index_buffer_uses_restart_indices;
+ uint32_t index_array_size; //0 if index buffer not set
+ uint32_t index_array_max_index;
+ uint32_t index_array_offset;
+ Vector<uint32_t> set_hashes;
+ //last pipeline set values
+ bool pipeline_active;
+ uint32_t pipeline_dynamic_state;
+ ID pipeline_vertex_format;
+ bool pipeline_uses_restart_indices;
+ uint32_t pipeline_primitive_divisor;
+ uint32_t pipeline_primitive_minimum;
+ Vector<uint32_t> pipeline_set_hashes;
+
+ Validation() {
+ active = true;
+ dynamic_state = 0;
+ vertex_format = INVALID_ID;
+ vertex_array_size = INVALID_ID;
+ vertex_max_instances_allowed = 0xFFFFFFFF;
+ framebuffer_format = INVALID_ID;
+ index_array_size = 0; //not sent
+ index_array_max_index = 0; //not set
+ index_buffer_uses_restart_indices = false;
+
+ //pipeline state initalize
+ pipeline_active = false;
+ pipeline_dynamic_state = 0;
+ pipeline_vertex_format = INVALID_ID;
+ pipeline_uses_restart_indices = false;
+ }
+ } validation;
+ };
+
+ DrawList *draw_list; //one for regular draw lists, multiple for split.
+ uint32_t draw_list_count;
+ bool draw_list_split;
+ Vector<ID> draw_list_bound_textures;
+ bool draw_list_unbind_textures;
+
+ Error _draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_action, FinalAction p_final_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass);
+ Error _draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_colors, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents);
+ _FORCE_INLINE_ DrawList *_get_draw_list_ptr(ID p_id);
+
+ /**************************/
+ /**** FRAME MANAGEMENT ****/
+ /**************************/
+
+ // This is the frame structure. There are normally
+ // 3 of these (used for triple buffering), or 2
+ // (double buffering). They are cycled constantly.
+ //
+ // It contains two command buffers, one that is
+ // used internally for setting up (creating stuff)
+ // and another used mostly for drawing.
+ //
+ // They also contains a list of things that need
+ // to be disposed of when deleted, which can't
+ // happen immediately due to the asynchronous
+ // nature of the GPU. They will get deleted
+ // when the frame is cycled.
+
+ struct Frame {
+ //list in usage order, from last to free to first to free
+ List<Buffer> buffers_to_dispose_of;
+ List<Texture> textures_to_dispose_of;
+ List<Framebuffer> framebuffers_to_dispose_of;
+ List<VkSampler> samplers_to_dispose_of;
+ List<Shader> shaders_to_dispose_of;
+ List<VkBufferView> buffer_views_to_dispose_of;
+ List<UniformSet> uniform_sets_to_dispose_of;
+ List<RenderPipeline> pipelines_to_dispose_of;
+
+ VkCommandPool command_pool;
+ VkCommandBuffer setup_command_buffer; //used at the begining of every frame for set-up
+ VkCommandBuffer draw_command_buffer; //used at the begining of every frame for set-up
+ };
+
+ Frame *frames; //frames available, they are cycled (usually 3)
+ int frame; //current frame
+ int frame_count; //total amount of frames
+ uint64_t frames_drawn;
+
+ void _free_pending_resources();
+
+ VmaAllocator allocator;
+
+ VulkanContext *context;
+
+ void _free_internal(ID p_id);
+
+public:
+ virtual ID texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<PoolVector<uint8_t> > &p_data = Vector<PoolVector<uint8_t> >());
+ virtual ID texture_create_shared(const TextureView &p_view, ID p_with_texture);
+ virtual Error texture_update(ID p_texture, uint32_t p_mipmap, uint32_t p_layer, const PoolVector<uint8_t> &p_data, bool p_sync_with_draw = false);
+
+ virtual bool texture_is_format_supported_for_usage(DataFormat p_format, TextureUsageBits p_usage) const;
+
+ /*********************/
+ /**** FRAMEBUFFER ****/
+ /*********************/
+
+ ID framebuffer_format_create(const Vector<AttachmentFormat> &p_format);
+
+ virtual ID framebuffer_create(const Vector<ID> &p_texture_attachments, ID p_format_check = INVALID_ID);
+
+ virtual ID framebuffer_get_format(ID p_framebuffer);
+
+ /*****************/
+ /**** SAMPLER ****/
+ /*****************/
+
+ virtual ID sampler_create(const SamplerState &p_state);
+
+ /**********************/
+ /**** VERTEX ARRAY ****/
+ /**********************/
+
+ virtual ID vertex_buffer_create(uint32_t p_size_bytes, const PoolVector<uint8_t> &p_data = PoolVector<uint8_t>());
+
+ // Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated
+ virtual ID vertex_description_create(const Vector<VertexDescription> &p_vertex_descriptions);
+ virtual ID vertex_array_create(uint32_t p_vertex_count, ID p_vertex_description, const Vector<ID> &p_src_buffers);
+
+ virtual ID index_buffer_create(uint32_t p_size_indices, IndexBufferFormat p_format, const PoolVector<uint8_t> &p_data = PoolVector<uint8_t>(), bool p_use_restart_indices = false);
+
+ virtual ID index_array_create(ID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count);
+
+ /****************/
+ /**** SHADER ****/
+ /****************/
+
+ virtual ID shader_create_from_source(const Vector<ShaderStageSource> &p_stages, String *r_error = NULL, bool p_allow_cache = true);
+
+ /*****************/
+ /**** UNIFORM ****/
+ /*****************/
+
+ virtual ID uniform_buffer_create(uint32_t p_size_bytes, const PoolVector<uint8_t> &p_data = PoolVector<uint8_t>());
+ virtual ID storage_buffer_create(uint32_t p_size_bytes, const PoolVector<uint8_t> &p_data = PoolVector<uint8_t>());
+ virtual ID texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const PoolVector<uint8_t> &p_data = PoolVector<uint8_t>());
+
+ virtual ID uniform_set_create(const Vector<Uniform> &p_uniforms, ID p_shader, uint32_t p_shader_set);
+
+ virtual Error buffer_update(ID p_buffer, uint32_t p_offset, uint32_t p_size, void *p_data, bool p_sync_with_draw = false); //works for any buffer
+
+ /*************************/
+ /**** RENDER PIPELINE ****/
+ /*************************/
+
+ virtual ID render_pipeline_create(ID p_shader, ID p_framebuffer_format, ID p_vertex_description, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags = 0);
+
+ /****************/
+ /**** SCREEN ****/
+ /****************/
+
+ virtual int screen_get_width(int p_screen = 0) const;
+ virtual int screen_get_height(int p_screen = 0) const;
+ virtual ID screen_get_framebuffer_format() const;
+
+ /********************/
+ /**** DRAW LISTS ****/
+ /********************/
+
+ virtual ID draw_list_begin_for_screen(int p_screen = 0, const Color &p_clear_color = Color());
+ virtual ID draw_list_begin(ID p_framebuffer, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_colors = Vector<Color>(), const Rect2 &p_region = Rect2());
+ virtual Error draw_list_begin_split(ID p_framebuffer, uint32_t p_splits, ID *r_split_ids, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_colors = Vector<Color>(), const Rect2 &p_region = Rect2());
+
+ virtual void draw_list_bind_render_pipeline(ID p_list, ID p_render_pipeline);
+ virtual void draw_list_bind_uniform_set(ID p_list, ID p_uniform_set, uint32_t p_index);
+ virtual void draw_list_bind_vertex_array(ID p_list, ID p_vertex_array);
+ virtual void draw_list_bind_index_array(ID p_list, ID p_index_array);
+
+ virtual void draw_list_draw(ID p_list, bool p_use_indices, uint32_t p_instances = 1);
+
+ virtual void draw_list_enable_scissor(ID p_list, const Rect2 &p_rect);
+ virtual void draw_list_disable_scissor(ID p_list);
+
+ virtual void draw_list_end();
+
+ virtual void free(ID p_id);
+
+ /**************/
+ /**** FREE ****/
+ /**************/
+
+ void initialize(VulkanContext *p_context);
+ void finalize();
+
+ void finalize_frame();
+ void advance_frame();
+
+ RenderingDeviceVulkan();
+};
+
+#endif // RENDERING_DEVICE_VULKAN_H
diff --git a/drivers/vulkan/vk_enum_string_helper.h b/drivers/vulkan/vk_enum_string_helper.h
new file mode 100644
index 0000000000..a0b955e32b
--- /dev/null
+++ b/drivers/vulkan/vk_enum_string_helper.h
@@ -0,0 +1,3722 @@
+// *** THIS FILE IS GENERATED - DO NOT EDIT ***
+// See helper_file_generator.py for modifications
+
+
+/***************************************************************************
+ *
+ * Copyright (c) 2015-2017 The Khronos Group Inc.
+ * Copyright (c) 2015-2017 Valve Corporation
+ * Copyright (c) 2015-2017 LunarG, Inc.
+ * Copyright (c) 2015-2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Mark Lobodzinski <mark@lunarg.com>
+ * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
+ * Author: Tobin Ehlis <tobine@google.com>
+ * Author: Chris Forbes <chrisforbes@google.com>
+ * Author: John Zulauf<jzulauf@lunarg.com>
+ *
+ ****************************************************************************/
+
+
+#pragma once
+#ifdef _WIN32
+#pragma warning( disable : 4065 )
+#endif
+
+#include <vulkan/vulkan.h>
+
+
+static inline const char* string_VkPipelineCacheHeaderVersion(VkPipelineCacheHeaderVersion input_value)
+{
+ switch ((VkPipelineCacheHeaderVersion)input_value)
+ {
+ case VK_PIPELINE_CACHE_HEADER_VERSION_ONE:
+ return "VK_PIPELINE_CACHE_HEADER_VERSION_ONE";
+ default:
+ return "Unhandled VkPipelineCacheHeaderVersion";
+ }
+}
+
+static inline const char* string_VkResult(VkResult input_value)
+{
+ switch ((VkResult)input_value)
+ {
+ case VK_ERROR_INITIALIZATION_FAILED:
+ return "VK_ERROR_INITIALIZATION_FAILED";
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+ return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+ case VK_ERROR_NOT_PERMITTED_EXT:
+ return "VK_ERROR_NOT_PERMITTED_EXT";
+ case VK_ERROR_INVALID_EXTERNAL_HANDLE:
+ return "VK_ERROR_INVALID_EXTERNAL_HANDLE";
+ case VK_NOT_READY:
+ return "VK_NOT_READY";
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ return "VK_ERROR_FEATURE_NOT_PRESENT";
+ case VK_TIMEOUT:
+ return "VK_TIMEOUT";
+ case VK_ERROR_FRAGMENTED_POOL:
+ return "VK_ERROR_FRAGMENTED_POOL";
+ case VK_ERROR_LAYER_NOT_PRESENT:
+ return "VK_ERROR_LAYER_NOT_PRESENT";
+ case VK_ERROR_FRAGMENTATION_EXT:
+ return "VK_ERROR_FRAGMENTATION_EXT";
+ case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
+ return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
+ case VK_SUCCESS:
+ return "VK_SUCCESS";
+ case VK_ERROR_INVALID_SHADER_NV:
+ return "VK_ERROR_INVALID_SHADER_NV";
+ case VK_ERROR_FORMAT_NOT_SUPPORTED:
+ return "VK_ERROR_FORMAT_NOT_SUPPORTED";
+ case VK_ERROR_SURFACE_LOST_KHR:
+ return "VK_ERROR_SURFACE_LOST_KHR";
+ case VK_ERROR_VALIDATION_FAILED_EXT:
+ return "VK_ERROR_VALIDATION_FAILED_EXT";
+ case VK_SUBOPTIMAL_KHR:
+ return "VK_SUBOPTIMAL_KHR";
+ case VK_ERROR_TOO_MANY_OBJECTS:
+ return "VK_ERROR_TOO_MANY_OBJECTS";
+ case VK_EVENT_RESET:
+ return "VK_EVENT_RESET";
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ return "VK_ERROR_OUT_OF_DATE_KHR";
+ case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
+ return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
+ case VK_ERROR_MEMORY_MAP_FAILED:
+ return "VK_ERROR_MEMORY_MAP_FAILED";
+ case VK_EVENT_SET:
+ return "VK_EVENT_SET";
+ case VK_ERROR_INCOMPATIBLE_DRIVER:
+ return "VK_ERROR_INCOMPATIBLE_DRIVER";
+ case VK_INCOMPLETE:
+ return "VK_INCOMPLETE";
+ case VK_ERROR_DEVICE_LOST:
+ return "VK_ERROR_DEVICE_LOST";
+ case VK_ERROR_EXTENSION_NOT_PRESENT:
+ return "VK_ERROR_EXTENSION_NOT_PRESENT";
+ case VK_ERROR_OUT_OF_POOL_MEMORY:
+ return "VK_ERROR_OUT_OF_POOL_MEMORY";
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ return "VK_ERROR_OUT_OF_HOST_MEMORY";
+ default:
+ return "Unhandled VkResult";
+ }
+}
+
+static inline const char* string_VkStructureType(VkStructureType input_value)
+{
+ switch ((VkStructureType)input_value)
+ {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER:
+ return "VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER";
+ case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+ return "VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO";
+ case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR";
+ case VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2";
+ case VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT";
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT";
+ case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
+ return "VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2:
+ return "VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2";
+ case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO:
+ return "VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO";
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
+ return "VK_STRUCTURE_TYPE_BIND_SPARSE_INFO";
+ case VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2:
+ return "VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2";
+ case VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR";
+ case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES";
+ case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR";
+ case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_APPLICATION_INFO:
+ return "VK_STRUCTURE_TYPE_APPLICATION_INFO";
+ case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO";
+ case VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT";
+ case VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT";
+ case VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES";
+ case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT:
+ return "VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT";
+ case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_EVENT_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_EVENT_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES";
+ case VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR:
+ return "VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR";
+ case VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX:
+ return "VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD";
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT";
+ case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT";
+ case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN:
+ return "VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN";
+ case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT";
+ case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
+ return "VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD";
+ case VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR:
+ return "VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES";
+ case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR";
+ case VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT";
+ case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR";
+ case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO:
+ return "VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR";
+ case VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX:
+ return "VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX";
+ case VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2:
+ return "VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2";
+ case VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_SUBMIT_INFO:
+ return "VK_STRUCTURE_TYPE_SUBMIT_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD:
+ return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD";
+ case VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT";
+ case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES";
+ case VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT";
+ case VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV";
+ case VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR";
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
+ return "VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR";
+ case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
+ return "VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID";
+ case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_FENCE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_FENCE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+ return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO:
+ return "VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO";
+ case VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX:
+ return "VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX";
+ case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID:
+ return "VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID";
+ case VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+ return "VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID";
+ case VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
+ return "VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO";
+ case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES";
+ case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX";
+ case VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX:
+ return "VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX";
+ case VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE:
+ return "VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE";
+ case VK_STRUCTURE_TYPE_PRESENT_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_PRESENT_INFO_KHR";
+ case VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE:
+ return "VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE";
+ case VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+ return "VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT";
+ case VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK:
+ return "VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK";
+ case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR:
+ return "VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER:
+ return "VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER";
+ case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR:
+ return "VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR";
+ case VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2:
+ return "VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2";
+ case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES";
+ case VK_STRUCTURE_TYPE_HDR_METADATA_EXT:
+ return "VK_STRUCTURE_TYPE_HDR_METADATA_EXT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2";
+ case VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES";
+ case VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2:
+ return "VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO";
+ case VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO:
+ return "VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO";
+ case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX:
+ return "VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX";
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+ return "VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES";
+ case VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2:
+ return "VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_MEMORY_BARRIER:
+ return "VK_STRUCTURE_TYPE_MEMORY_BARRIER";
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO:
+ return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO";
+ case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT:
+ return "VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES";
+ case VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2:
+ return "VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2";
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO:
+ return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO";
+ case VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT";
+ case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+ return "VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID";
+ case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES";
+ case VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2:
+ return "VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2";
+ case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV";
+ case VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT";
+ case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO:
+ return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO";
+ case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
+ return "VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO";
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES";
+ case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
+ return "VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO";
+ case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT";
+ case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR:
+ return "VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR";
+ case VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
+ return "VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET";
+ case VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV:
+ return "VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV";
+ case VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
+ return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES";
+ case VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK:
+ return "VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK";
+ case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
+ return "VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID";
+ case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO:
+ return "VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO";
+ case VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR";
+ case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT:
+ return "VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT";
+ case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+ return "VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO";
+ case VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO:
+ return "VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO";
+ case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID:
+ return "VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID";
+ case VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX:
+ return "VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX";
+ case VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2:
+ return "VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2";
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+ return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2";
+ case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+ return "VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR";
+ case VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2:
+ return "VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2";
+ default:
+ return "Unhandled VkStructureType";
+ }
+}
+
+static inline const char* string_VkSystemAllocationScope(VkSystemAllocationScope input_value)
+{
+ switch ((VkSystemAllocationScope)input_value)
+ {
+ case VK_SYSTEM_ALLOCATION_SCOPE_COMMAND:
+ return "VK_SYSTEM_ALLOCATION_SCOPE_COMMAND";
+ case VK_SYSTEM_ALLOCATION_SCOPE_CACHE:
+ return "VK_SYSTEM_ALLOCATION_SCOPE_CACHE";
+ case VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE:
+ return "VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE";
+ case VK_SYSTEM_ALLOCATION_SCOPE_OBJECT:
+ return "VK_SYSTEM_ALLOCATION_SCOPE_OBJECT";
+ case VK_SYSTEM_ALLOCATION_SCOPE_DEVICE:
+ return "VK_SYSTEM_ALLOCATION_SCOPE_DEVICE";
+ default:
+ return "Unhandled VkSystemAllocationScope";
+ }
+}
+
+static inline const char* string_VkInternalAllocationType(VkInternalAllocationType input_value)
+{
+ switch ((VkInternalAllocationType)input_value)
+ {
+ case VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE:
+ return "VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE";
+ default:
+ return "Unhandled VkInternalAllocationType";
+ }
+}
+
+static inline const char* string_VkFormat(VkFormat input_value)
+{
+ switch ((VkFormat)input_value)
+ {
+ case VK_FORMAT_R32G32B32_SINT:
+ return "VK_FORMAT_R32G32B32_SINT";
+ case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
+ return "VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM";
+ case VK_FORMAT_B8G8R8A8_UINT:
+ return "VK_FORMAT_B8G8R8A8_UINT";
+ case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_5x5_SRGB_BLOCK";
+ case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+ return "VK_FORMAT_A2R10G10B10_UINT_PACK32";
+ case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ return "VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK";
+ case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_8x6_UNORM_BLOCK";
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+ return "VK_FORMAT_B4G4R4A4_UNORM_PACK16";
+ case VK_FORMAT_R16G16_SINT:
+ return "VK_FORMAT_R16G16_SINT";
+ case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+ return "VK_FORMAT_BC1_RGB_SRGB_BLOCK";
+ case VK_FORMAT_R8G8_USCALED:
+ return "VK_FORMAT_R8G8_USCALED";
+ case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_10x8_UNORM_BLOCK";
+ case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+ return "VK_FORMAT_G8_B8R8_2PLANE_420_UNORM";
+ case VK_FORMAT_B8G8R8A8_SNORM:
+ return "VK_FORMAT_B8G8R8A8_SNORM";
+ case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+ return "VK_FORMAT_B5G5R5A1_UNORM_PACK16";
+ case VK_FORMAT_R64G64_UINT:
+ return "VK_FORMAT_R64G64_UINT";
+ case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+ return "VK_FORMAT_R5G5B5A1_UNORM_PACK16";
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+ return "VK_FORMAT_A2B10G10R10_UNORM_PACK32";
+ case VK_FORMAT_R16G16_USCALED:
+ return "VK_FORMAT_R16G16_USCALED";
+ case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
+ return "VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM";
+ case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_8x8_UNORM_BLOCK";
+ case VK_FORMAT_R8G8_SSCALED:
+ return "VK_FORMAT_R8G8_SSCALED";
+ case VK_FORMAT_R16G16_SSCALED:
+ return "VK_FORMAT_R16G16_SSCALED";
+ case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_8x5_UNORM_BLOCK";
+ case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+ return "VK_FORMAT_EAC_R11_UNORM_BLOCK";
+ case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+ return "VK_FORMAT_A1R5G5B5_UNORM_PACK16";
+ case VK_FORMAT_R16_USCALED:
+ return "VK_FORMAT_R16_USCALED";
+ case VK_FORMAT_BC2_UNORM_BLOCK:
+ return "VK_FORMAT_BC2_UNORM_BLOCK";
+ case VK_FORMAT_R16_UNORM:
+ return "VK_FORMAT_R16_UNORM";
+ case VK_FORMAT_R8_USCALED:
+ return "VK_FORMAT_R8_USCALED";
+ case VK_FORMAT_R16G16_UNORM:
+ return "VK_FORMAT_R16G16_UNORM";
+ case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_10x5_UNORM_BLOCK";
+ case VK_FORMAT_R16G16B16_SFLOAT:
+ return "VK_FORMAT_R16G16B16_SFLOAT";
+ case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG";
+ case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+ return "VK_FORMAT_A2R10G10B10_SNORM_PACK32";
+ case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_10x6_SRGB_BLOCK";
+ case VK_FORMAT_R8_UNORM:
+ return "VK_FORMAT_R8_UNORM";
+ case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG";
+ case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+ return "VK_FORMAT_A8B8G8R8_SINT_PACK32";
+ case VK_FORMAT_B8G8R8_UNORM:
+ return "VK_FORMAT_B8G8R8_UNORM";
+ case VK_FORMAT_R8G8_UINT:
+ return "VK_FORMAT_R8G8_UINT";
+ case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ return "VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK";
+ case VK_FORMAT_R8_SSCALED:
+ return "VK_FORMAT_R8_SSCALED";
+ case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+ return "VK_FORMAT_A8B8G8R8_SRGB_PACK32";
+ case VK_FORMAT_BC7_UNORM_BLOCK:
+ return "VK_FORMAT_BC7_UNORM_BLOCK";
+ case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+ return "VK_FORMAT_A2R10G10B10_SSCALED_PACK32";
+ case VK_FORMAT_R16G16B16A16_SINT:
+ return "VK_FORMAT_R16G16B16A16_SINT";
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
+ return "VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16";
+ case VK_FORMAT_B8G8R8A8_SSCALED:
+ return "VK_FORMAT_B8G8R8A8_SSCALED";
+ case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
+ return "VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM";
+ case VK_FORMAT_R8G8B8_USCALED:
+ return "VK_FORMAT_R8G8B8_USCALED";
+ case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG";
+ case VK_FORMAT_B8G8R8_SRGB:
+ return "VK_FORMAT_B8G8R8_SRGB";
+ case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+ return "VK_FORMAT_A2B10G10R10_UINT_PACK32";
+ case VK_FORMAT_R64G64_SINT:
+ return "VK_FORMAT_R64G64_SINT";
+ case VK_FORMAT_B8G8R8G8_422_UNORM:
+ return "VK_FORMAT_B8G8R8G8_422_UNORM";
+ case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
+ return "VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM";
+ case VK_FORMAT_R64_UINT:
+ return "VK_FORMAT_R64_UINT";
+ case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ return "VK_FORMAT_EAC_R11G11_UNORM_BLOCK";
+ case VK_FORMAT_BC5_SNORM_BLOCK:
+ return "VK_FORMAT_BC5_SNORM_BLOCK";
+ case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_6x5_SRGB_BLOCK";
+ case VK_FORMAT_R16G16B16A16_SSCALED:
+ return "VK_FORMAT_R16G16B16A16_SSCALED";
+ case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
+ return "VK_FORMAT_G8_B8R8_2PLANE_422_UNORM";
+ case VK_FORMAT_R32G32B32_UINT:
+ return "VK_FORMAT_R32G32B32_UINT";
+ case VK_FORMAT_R8G8_SNORM:
+ return "VK_FORMAT_R8G8_SNORM";
+ case VK_FORMAT_B8G8R8_USCALED:
+ return "VK_FORMAT_B8G8R8_USCALED";
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ return "VK_FORMAT_R16G16B16A16_SFLOAT";
+ case VK_FORMAT_R16G16B16_USCALED:
+ return "VK_FORMAT_R16G16B16_USCALED";
+ case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+ return "VK_FORMAT_A2R10G10B10_SINT_PACK32";
+ case VK_FORMAT_R32_SINT:
+ return "VK_FORMAT_R32_SINT";
+ case VK_FORMAT_R64_SINT:
+ return "VK_FORMAT_R64_SINT";
+ case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+ return "VK_FORMAT_A8B8G8R8_USCALED_PACK32";
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ return "VK_FORMAT_D24_UNORM_S8_UINT";
+ case VK_FORMAT_G8B8G8R8_422_UNORM:
+ return "VK_FORMAT_G8B8G8R8_422_UNORM";
+ case VK_FORMAT_BC4_SNORM_BLOCK:
+ return "VK_FORMAT_BC4_SNORM_BLOCK";
+ case VK_FORMAT_R16G16_SFLOAT:
+ return "VK_FORMAT_R16G16_SFLOAT";
+ case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+ return "VK_FORMAT_BC1_RGB_UNORM_BLOCK";
+ case VK_FORMAT_R64_SFLOAT:
+ return "VK_FORMAT_R64_SFLOAT";
+ case VK_FORMAT_R64G64B64_SFLOAT:
+ return "VK_FORMAT_R64G64B64_SFLOAT";
+ case VK_FORMAT_BC3_SRGB_BLOCK:
+ return "VK_FORMAT_BC3_SRGB_BLOCK";
+ case VK_FORMAT_S8_UINT:
+ return "VK_FORMAT_S8_UINT";
+ case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG";
+ case VK_FORMAT_R8G8B8_SNORM:
+ return "VK_FORMAT_R8G8B8_SNORM";
+ case VK_FORMAT_D32_SFLOAT:
+ return "VK_FORMAT_D32_SFLOAT";
+ case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_10x10_SRGB_BLOCK";
+ case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_4x4_SRGB_BLOCK";
+ case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:
+ return "VK_FORMAT_R12X4G12X4_UNORM_2PACK16";
+ case VK_FORMAT_G16B16G16R16_422_UNORM:
+ return "VK_FORMAT_G16B16G16R16_422_UNORM";
+ case VK_FORMAT_BC7_SRGB_BLOCK:
+ return "VK_FORMAT_BC7_SRGB_BLOCK";
+ case VK_FORMAT_R16G16_SNORM:
+ return "VK_FORMAT_R16G16_SNORM";
+ case VK_FORMAT_R32_UINT:
+ return "VK_FORMAT_R32_UINT";
+ case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+ return "VK_FORMAT_R4G4B4A4_UNORM_PACK16";
+ case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+ return "VK_FORMAT_A2R10G10B10_USCALED_PACK32";
+ case VK_FORMAT_R32_SFLOAT:
+ return "VK_FORMAT_R32_SFLOAT";
+ case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_10x5_SRGB_BLOCK";
+ case VK_FORMAT_R32G32B32_SFLOAT:
+ return "VK_FORMAT_R32G32B32_SFLOAT";
+ case VK_FORMAT_R16_UINT:
+ return "VK_FORMAT_R16_UINT";
+ case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_12x12_UNORM_BLOCK";
+ case VK_FORMAT_R8G8_SRGB:
+ return "VK_FORMAT_R8G8_SRGB";
+ case VK_FORMAT_R64G64B64A64_UINT:
+ return "VK_FORMAT_R64G64B64A64_UINT";
+ case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_12x10_SRGB_BLOCK";
+ case VK_FORMAT_R16G16B16_SNORM:
+ return "VK_FORMAT_R16G16B16_SNORM";
+ case VK_FORMAT_R32G32_UINT:
+ return "VK_FORMAT_R32G32_UINT";
+ case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ return "VK_FORMAT_BC1_RGBA_UNORM_BLOCK";
+ case VK_FORMAT_R8G8B8_UNORM:
+ return "VK_FORMAT_R8G8B8_UNORM";
+ case VK_FORMAT_R8G8B8A8_SSCALED:
+ return "VK_FORMAT_R8G8B8A8_SSCALED";
+ case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
+ return "VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16";
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
+ return "VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16";
+ case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
+ return "VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM";
+ case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG";
+ case VK_FORMAT_R16G16B16A16_USCALED:
+ return "VK_FORMAT_R16G16B16A16_USCALED";
+ case VK_FORMAT_R8G8B8_SINT:
+ return "VK_FORMAT_R8G8B8_SINT";
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
+ return "VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16";
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+ return "VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16";
+ case VK_FORMAT_B16G16R16G16_422_UNORM:
+ return "VK_FORMAT_B16G16R16G16_422_UNORM";
+ case VK_FORMAT_R16G16B16_SINT:
+ return "VK_FORMAT_R16G16B16_SINT";
+ case VK_FORMAT_UNDEFINED:
+ return "VK_FORMAT_UNDEFINED";
+ case VK_FORMAT_B5G6R5_UNORM_PACK16:
+ return "VK_FORMAT_B5G6R5_UNORM_PACK16";
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ return "VK_FORMAT_R8G8B8A8_SRGB";
+ case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+ return "VK_FORMAT_A2B10G10R10_SSCALED_PACK32";
+ case VK_FORMAT_B8G8R8_SINT:
+ return "VK_FORMAT_B8G8R8_SINT";
+ case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+ return "VK_FORMAT_B10G11R11_UFLOAT_PACK32";
+ case VK_FORMAT_BC5_UNORM_BLOCK:
+ return "VK_FORMAT_BC5_UNORM_BLOCK";
+ case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_5x4_SRGB_BLOCK";
+ case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_5x4_UNORM_BLOCK";
+ case VK_FORMAT_R8G8B8A8_SINT:
+ return "VK_FORMAT_R8G8B8A8_SINT";
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
+ return "VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16";
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ return "VK_FORMAT_R8G8B8A8_UNORM";
+ case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
+ return "VK_FORMAT_G16_B16R16_2PLANE_420_UNORM";
+ case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+ return "VK_FORMAT_G16_B16R16_2PLANE_422_UNORM";
+ case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+ return "VK_FORMAT_EAC_R11G11_SNORM_BLOCK";
+ case VK_FORMAT_R8G8_UNORM:
+ return "VK_FORMAT_R8G8_UNORM";
+ case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+ return "VK_FORMAT_A2B10G10R10_SINT_PACK32";
+ case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_4x4_UNORM_BLOCK";
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
+ return "VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16";
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
+ return "VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16";
+ case VK_FORMAT_R16_SINT:
+ return "VK_FORMAT_R16_SINT";
+ case VK_FORMAT_R8G8B8_SRGB:
+ return "VK_FORMAT_R8G8B8_SRGB";
+ case VK_FORMAT_B8G8R8_SNORM:
+ return "VK_FORMAT_B8G8R8_SNORM";
+ case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_12x12_SRGB_BLOCK";
+ case VK_FORMAT_BC2_SRGB_BLOCK:
+ return "VK_FORMAT_BC2_SRGB_BLOCK";
+ case VK_FORMAT_R10X6_UNORM_PACK16:
+ return "VK_FORMAT_R10X6_UNORM_PACK16";
+ case VK_FORMAT_R64G64_SFLOAT:
+ return "VK_FORMAT_R64G64_SFLOAT";
+ case VK_FORMAT_R4G4_UNORM_PACK8:
+ return "VK_FORMAT_R4G4_UNORM_PACK8";
+ case VK_FORMAT_R16_SSCALED:
+ return "VK_FORMAT_R16_SSCALED";
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
+ return "VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16";
+ case VK_FORMAT_R32G32B32A32_SINT:
+ return "VK_FORMAT_R32G32B32A32_SINT";
+ case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+ return "VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK";
+ case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG";
+ case VK_FORMAT_R8G8B8_UINT:
+ return "VK_FORMAT_R8G8B8_UINT";
+ case VK_FORMAT_R16G16B16_UNORM:
+ return "VK_FORMAT_R16G16B16_UNORM";
+ case VK_FORMAT_R16G16B16_UINT:
+ return "VK_FORMAT_R16G16B16_UINT";
+ case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+ return "VK_FORMAT_A8B8G8R8_UNORM_PACK32";
+ case VK_FORMAT_B8G8R8_SSCALED:
+ return "VK_FORMAT_B8G8R8_SSCALED";
+ case VK_FORMAT_X8_D24_UNORM_PACK32:
+ return "VK_FORMAT_X8_D24_UNORM_PACK32";
+ case VK_FORMAT_R32G32_SFLOAT:
+ return "VK_FORMAT_R32G32_SFLOAT";
+ case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+ return "VK_FORMAT_E5B9G9R9_UFLOAT_PACK32";
+ case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_6x6_SRGB_BLOCK";
+ case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG";
+ case VK_FORMAT_R16G16B16A16_UINT:
+ return "VK_FORMAT_R16G16B16A16_UINT";
+ case VK_FORMAT_R8G8B8A8_USCALED:
+ return "VK_FORMAT_R8G8B8A8_USCALED";
+ case VK_FORMAT_R16G16B16A16_SNORM:
+ return "VK_FORMAT_R16G16B16A16_SNORM";
+ case VK_FORMAT_R16G16B16A16_UNORM:
+ return "VK_FORMAT_R16G16B16A16_UNORM";
+ case VK_FORMAT_D16_UNORM:
+ return "VK_FORMAT_D16_UNORM";
+ case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:
+ return "VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16";
+ case VK_FORMAT_BC3_UNORM_BLOCK:
+ return "VK_FORMAT_BC3_UNORM_BLOCK";
+ case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+ return "VK_FORMAT_A2B10G10R10_USCALED_PACK32";
+ case VK_FORMAT_R8_SRGB:
+ return "VK_FORMAT_R8_SRGB";
+ case VK_FORMAT_R32G32B32A32_SFLOAT:
+ return "VK_FORMAT_R32G32B32A32_SFLOAT";
+ case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+ return "VK_FORMAT_A2R10G10B10_UNORM_PACK32";
+ case VK_FORMAT_R8G8_SINT:
+ return "VK_FORMAT_R8G8_SINT";
+ case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:
+ return "VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16";
+ case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:
+ return "VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16";
+ case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+ return "VK_FORMAT_A2B10G10R10_SNORM_PACK32";
+ case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ return "VK_FORMAT_BC1_RGBA_SRGB_BLOCK";
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ return "VK_FORMAT_D32_SFLOAT_S8_UINT";
+ case VK_FORMAT_B8G8R8A8_USCALED:
+ return "VK_FORMAT_B8G8R8A8_USCALED";
+ case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_6x6_UNORM_BLOCK";
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ return "VK_FORMAT_R5G6B5_UNORM_PACK16";
+ case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ return "VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK";
+ case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:
+ return "VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16";
+ case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ return "VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG";
+ case VK_FORMAT_R8G8B8A8_SNORM:
+ return "VK_FORMAT_R8G8B8A8_SNORM";
+ case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_10x10_UNORM_BLOCK";
+ case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+ return "VK_FORMAT_BC6H_SFLOAT_BLOCK";
+ case VK_FORMAT_R16_SFLOAT:
+ return "VK_FORMAT_R16_SFLOAT";
+ case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+ return "VK_FORMAT_A8B8G8R8_SSCALED_PACK32";
+ case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_10x8_SRGB_BLOCK";
+ case VK_FORMAT_B8G8R8A8_SINT:
+ return "VK_FORMAT_B8G8R8A8_SINT";
+ case VK_FORMAT_R8_SNORM:
+ return "VK_FORMAT_R8_SNORM";
+ case VK_FORMAT_R32G32_SINT:
+ return "VK_FORMAT_R32G32_SINT";
+ case VK_FORMAT_R32G32B32A32_UINT:
+ return "VK_FORMAT_R32G32B32A32_UINT";
+ case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+ return "VK_FORMAT_A8B8G8R8_SNORM_PACK32";
+ case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+ return "VK_FORMAT_A8B8G8R8_UINT_PACK32";
+ case VK_FORMAT_BC4_UNORM_BLOCK:
+ return "VK_FORMAT_BC4_UNORM_BLOCK";
+ case VK_FORMAT_B8G8R8_UINT:
+ return "VK_FORMAT_B8G8R8_UINT";
+ case VK_FORMAT_D16_UNORM_S8_UINT:
+ return "VK_FORMAT_D16_UNORM_S8_UINT";
+ case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ return "VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK";
+ case VK_FORMAT_R8G8B8A8_UINT:
+ return "VK_FORMAT_R8G8B8A8_UINT";
+ case VK_FORMAT_R12X4_UNORM_PACK16:
+ return "VK_FORMAT_R12X4_UNORM_PACK16";
+ case VK_FORMAT_R64G64B64_SINT:
+ return "VK_FORMAT_R64G64B64_SINT";
+ case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+ return "VK_FORMAT_EAC_R11_SNORM_BLOCK";
+ case VK_FORMAT_R64G64B64_UINT:
+ return "VK_FORMAT_R64G64B64_UINT";
+ case VK_FORMAT_R64G64B64A64_SINT:
+ return "VK_FORMAT_R64G64B64A64_SINT";
+ case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ return "VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK";
+ case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_6x5_UNORM_BLOCK";
+ case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_8x5_SRGB_BLOCK";
+ case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_12x10_UNORM_BLOCK";
+ case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_8x6_SRGB_BLOCK";
+ case VK_FORMAT_R8G8B8_SSCALED:
+ return "VK_FORMAT_R8G8B8_SSCALED";
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ return "VK_FORMAT_B8G8R8A8_UNORM";
+ case VK_FORMAT_R16_SNORM:
+ return "VK_FORMAT_R16_SNORM";
+ case VK_FORMAT_R8_UINT:
+ return "VK_FORMAT_R8_UINT";
+ case VK_FORMAT_R64G64B64A64_SFLOAT:
+ return "VK_FORMAT_R64G64B64A64_SFLOAT";
+ case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_5x5_UNORM_BLOCK";
+ case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ return "VK_FORMAT_ASTC_8x8_SRGB_BLOCK";
+ case VK_FORMAT_R8_SINT:
+ return "VK_FORMAT_R8_SINT";
+ case VK_FORMAT_B8G8R8A8_SRGB:
+ return "VK_FORMAT_B8G8R8A8_SRGB";
+ case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
+ return "VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16";
+ case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:
+ return "VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16";
+ case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+ return "VK_FORMAT_BC6H_UFLOAT_BLOCK";
+ case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+ return "VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM";
+ case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
+ return "VK_FORMAT_R10X6G10X6_UNORM_2PACK16";
+ case VK_FORMAT_R16G16_UINT:
+ return "VK_FORMAT_R16G16_UINT";
+ case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ return "VK_FORMAT_ASTC_10x6_UNORM_BLOCK";
+ case VK_FORMAT_R16G16B16_SSCALED:
+ return "VK_FORMAT_R16G16B16_SSCALED";
+ case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:
+ return "VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16";
+ default:
+ return "Unhandled VkFormat";
+ }
+}
+
+static inline const char* string_VkFormatFeatureFlagBits(VkFormatFeatureFlagBits input_value)
+{
+ switch ((VkFormatFeatureFlagBits)input_value)
+ {
+ case VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT:
+ return "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT";
+ case VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT:
+ return "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT";
+ case VK_FORMAT_FEATURE_TRANSFER_DST_BIT:
+ return "VK_FORMAT_FEATURE_TRANSFER_DST_BIT";
+ case VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT:
+ return "VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT";
+ case VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT:
+ return "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT";
+ case VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT:
+ return "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT";
+ case VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT:
+ return "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT";
+ case VK_FORMAT_FEATURE_DISJOINT_BIT:
+ return "VK_FORMAT_FEATURE_DISJOINT_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT";
+ case VK_FORMAT_FEATURE_TRANSFER_SRC_BIT:
+ return "VK_FORMAT_FEATURE_TRANSFER_SRC_BIT";
+ case VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT:
+ return "VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG";
+ case VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT:
+ return "VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT";
+ case VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT:
+ return "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT";
+ case VK_FORMAT_FEATURE_BLIT_DST_BIT:
+ return "VK_FORMAT_FEATURE_BLIT_DST_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT";
+ case VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT:
+ return "VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT";
+ case VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT:
+ return "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT";
+ case VK_FORMAT_FEATURE_BLIT_SRC_BIT:
+ return "VK_FORMAT_FEATURE_BLIT_SRC_BIT";
+ case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT:
+ return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT";
+ default:
+ return "Unhandled VkFormatFeatureFlagBits";
+ }
+}
+
+static inline const char* string_VkImageType(VkImageType input_value)
+{
+ switch ((VkImageType)input_value)
+ {
+ case VK_IMAGE_TYPE_2D:
+ return "VK_IMAGE_TYPE_2D";
+ case VK_IMAGE_TYPE_1D:
+ return "VK_IMAGE_TYPE_1D";
+ case VK_IMAGE_TYPE_3D:
+ return "VK_IMAGE_TYPE_3D";
+ default:
+ return "Unhandled VkImageType";
+ }
+}
+
+static inline const char* string_VkImageTiling(VkImageTiling input_value)
+{
+ switch ((VkImageTiling)input_value)
+ {
+ case VK_IMAGE_TILING_OPTIMAL:
+ return "VK_IMAGE_TILING_OPTIMAL";
+ case VK_IMAGE_TILING_LINEAR:
+ return "VK_IMAGE_TILING_LINEAR";
+ default:
+ return "Unhandled VkImageTiling";
+ }
+}
+
+static inline const char* string_VkImageUsageFlagBits(VkImageUsageFlagBits input_value)
+{
+ switch ((VkImageUsageFlagBits)input_value)
+ {
+ case VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT:
+ return "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT";
+ case VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT:
+ return "VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT";
+ case VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT:
+ return "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT";
+ case VK_IMAGE_USAGE_SAMPLED_BIT:
+ return "VK_IMAGE_USAGE_SAMPLED_BIT";
+ case VK_IMAGE_USAGE_TRANSFER_DST_BIT:
+ return "VK_IMAGE_USAGE_TRANSFER_DST_BIT";
+ case VK_IMAGE_USAGE_STORAGE_BIT:
+ return "VK_IMAGE_USAGE_STORAGE_BIT";
+ case VK_IMAGE_USAGE_TRANSFER_SRC_BIT:
+ return "VK_IMAGE_USAGE_TRANSFER_SRC_BIT";
+ case VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT:
+ return "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT";
+ default:
+ return "Unhandled VkImageUsageFlagBits";
+ }
+}
+
+static inline const char* string_VkImageCreateFlagBits(VkImageCreateFlagBits input_value)
+{
+ switch ((VkImageCreateFlagBits)input_value)
+ {
+ case VK_IMAGE_CREATE_ALIAS_BIT:
+ return "VK_IMAGE_CREATE_ALIAS_BIT";
+ case VK_IMAGE_CREATE_PROTECTED_BIT:
+ return "VK_IMAGE_CREATE_PROTECTED_BIT";
+ case VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT:
+ return "VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT";
+ case VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT:
+ return "VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT";
+ case VK_IMAGE_CREATE_EXTENDED_USAGE_BIT:
+ return "VK_IMAGE_CREATE_EXTENDED_USAGE_BIT";
+ case VK_IMAGE_CREATE_DISJOINT_BIT:
+ return "VK_IMAGE_CREATE_DISJOINT_BIT";
+ case VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT:
+ return "VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT";
+ case VK_IMAGE_CREATE_SPARSE_BINDING_BIT:
+ return "VK_IMAGE_CREATE_SPARSE_BINDING_BIT";
+ case VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT:
+ return "VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT";
+ case VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT:
+ return "VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT";
+ case VK_IMAGE_CREATE_SPARSE_ALIASED_BIT:
+ return "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT";
+ case VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT:
+ return "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT";
+ case VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT:
+ return "VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT";
+ default:
+ return "Unhandled VkImageCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkSampleCountFlagBits(VkSampleCountFlagBits input_value)
+{
+ switch ((VkSampleCountFlagBits)input_value)
+ {
+ case VK_SAMPLE_COUNT_32_BIT:
+ return "VK_SAMPLE_COUNT_32_BIT";
+ case VK_SAMPLE_COUNT_1_BIT:
+ return "VK_SAMPLE_COUNT_1_BIT";
+ case VK_SAMPLE_COUNT_2_BIT:
+ return "VK_SAMPLE_COUNT_2_BIT";
+ case VK_SAMPLE_COUNT_64_BIT:
+ return "VK_SAMPLE_COUNT_64_BIT";
+ case VK_SAMPLE_COUNT_16_BIT:
+ return "VK_SAMPLE_COUNT_16_BIT";
+ case VK_SAMPLE_COUNT_4_BIT:
+ return "VK_SAMPLE_COUNT_4_BIT";
+ case VK_SAMPLE_COUNT_8_BIT:
+ return "VK_SAMPLE_COUNT_8_BIT";
+ default:
+ return "Unhandled VkSampleCountFlagBits";
+ }
+}
+
+static inline const char* string_VkPhysicalDeviceType(VkPhysicalDeviceType input_value)
+{
+ switch ((VkPhysicalDeviceType)input_value)
+ {
+ case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
+ return "VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU";
+ case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
+ return "VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU";
+ case VK_PHYSICAL_DEVICE_TYPE_OTHER:
+ return "VK_PHYSICAL_DEVICE_TYPE_OTHER";
+ case VK_PHYSICAL_DEVICE_TYPE_CPU:
+ return "VK_PHYSICAL_DEVICE_TYPE_CPU";
+ case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
+ return "VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU";
+ default:
+ return "Unhandled VkPhysicalDeviceType";
+ }
+}
+
+static inline const char* string_VkQueueFlagBits(VkQueueFlagBits input_value)
+{
+ switch ((VkQueueFlagBits)input_value)
+ {
+ case VK_QUEUE_SPARSE_BINDING_BIT:
+ return "VK_QUEUE_SPARSE_BINDING_BIT";
+ case VK_QUEUE_GRAPHICS_BIT:
+ return "VK_QUEUE_GRAPHICS_BIT";
+ case VK_QUEUE_COMPUTE_BIT:
+ return "VK_QUEUE_COMPUTE_BIT";
+ case VK_QUEUE_PROTECTED_BIT:
+ return "VK_QUEUE_PROTECTED_BIT";
+ case VK_QUEUE_TRANSFER_BIT:
+ return "VK_QUEUE_TRANSFER_BIT";
+ default:
+ return "Unhandled VkQueueFlagBits";
+ }
+}
+
+static inline const char* string_VkMemoryPropertyFlagBits(VkMemoryPropertyFlagBits input_value)
+{
+ switch ((VkMemoryPropertyFlagBits)input_value)
+ {
+ case VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT:
+ return "VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT";
+ case VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT:
+ return "VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT";
+ case VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT:
+ return "VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT";
+ case VK_MEMORY_PROPERTY_HOST_CACHED_BIT:
+ return "VK_MEMORY_PROPERTY_HOST_CACHED_BIT";
+ case VK_MEMORY_PROPERTY_HOST_COHERENT_BIT:
+ return "VK_MEMORY_PROPERTY_HOST_COHERENT_BIT";
+ case VK_MEMORY_PROPERTY_PROTECTED_BIT:
+ return "VK_MEMORY_PROPERTY_PROTECTED_BIT";
+ default:
+ return "Unhandled VkMemoryPropertyFlagBits";
+ }
+}
+
+static inline const char* string_VkMemoryHeapFlagBits(VkMemoryHeapFlagBits input_value)
+{
+ switch ((VkMemoryHeapFlagBits)input_value)
+ {
+ case VK_MEMORY_HEAP_DEVICE_LOCAL_BIT:
+ return "VK_MEMORY_HEAP_DEVICE_LOCAL_BIT";
+ case VK_MEMORY_HEAP_MULTI_INSTANCE_BIT:
+ return "VK_MEMORY_HEAP_MULTI_INSTANCE_BIT";
+ default:
+ return "Unhandled VkMemoryHeapFlagBits";
+ }
+}
+
+static inline const char* string_VkDeviceQueueCreateFlagBits(VkDeviceQueueCreateFlagBits input_value)
+{
+ switch ((VkDeviceQueueCreateFlagBits)input_value)
+ {
+ case VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT:
+ return "VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT";
+ default:
+ return "Unhandled VkDeviceQueueCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkPipelineStageFlagBits(VkPipelineStageFlagBits input_value)
+{
+ switch ((VkPipelineStageFlagBits)input_value)
+ {
+ case VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT:
+ return "VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT";
+ case VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT:
+ return "VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT";
+ case VK_PIPELINE_STAGE_TRANSFER_BIT:
+ return "VK_PIPELINE_STAGE_TRANSFER_BIT";
+ case VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT:
+ return "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT";
+ case VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX:
+ return "VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX";
+ case VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT:
+ return "VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT";
+ case VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT:
+ return "VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT";
+ case VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT:
+ return "VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT";
+ case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
+ return "VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT";
+ case VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT:
+ return "VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT";
+ case VK_PIPELINE_STAGE_ALL_COMMANDS_BIT:
+ return "VK_PIPELINE_STAGE_ALL_COMMANDS_BIT";
+ case VK_PIPELINE_STAGE_VERTEX_SHADER_BIT:
+ return "VK_PIPELINE_STAGE_VERTEX_SHADER_BIT";
+ case VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT:
+ return "VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT";
+ case VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT:
+ return "VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT";
+ case VK_PIPELINE_STAGE_HOST_BIT:
+ return "VK_PIPELINE_STAGE_HOST_BIT";
+ case VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT:
+ return "VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT";
+ case VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT:
+ return "VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT";
+ case VK_PIPELINE_STAGE_VERTEX_INPUT_BIT:
+ return "VK_PIPELINE_STAGE_VERTEX_INPUT_BIT";
+ default:
+ return "Unhandled VkPipelineStageFlagBits";
+ }
+}
+
+static inline const char* string_VkImageAspectFlagBits(VkImageAspectFlagBits input_value)
+{
+ switch ((VkImageAspectFlagBits)input_value)
+ {
+ case VK_IMAGE_ASPECT_PLANE_0_BIT:
+ return "VK_IMAGE_ASPECT_PLANE_0_BIT";
+ case VK_IMAGE_ASPECT_PLANE_2_BIT:
+ return "VK_IMAGE_ASPECT_PLANE_2_BIT";
+ case VK_IMAGE_ASPECT_STENCIL_BIT:
+ return "VK_IMAGE_ASPECT_STENCIL_BIT";
+ case VK_IMAGE_ASPECT_PLANE_1_BIT:
+ return "VK_IMAGE_ASPECT_PLANE_1_BIT";
+ case VK_IMAGE_ASPECT_COLOR_BIT:
+ return "VK_IMAGE_ASPECT_COLOR_BIT";
+ case VK_IMAGE_ASPECT_METADATA_BIT:
+ return "VK_IMAGE_ASPECT_METADATA_BIT";
+ case VK_IMAGE_ASPECT_DEPTH_BIT:
+ return "VK_IMAGE_ASPECT_DEPTH_BIT";
+ default:
+ return "Unhandled VkImageAspectFlagBits";
+ }
+}
+
+static inline const char* string_VkSparseImageFormatFlagBits(VkSparseImageFormatFlagBits input_value)
+{
+ switch ((VkSparseImageFormatFlagBits)input_value)
+ {
+ case VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT:
+ return "VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT";
+ case VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT:
+ return "VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT";
+ case VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT:
+ return "VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT";
+ default:
+ return "Unhandled VkSparseImageFormatFlagBits";
+ }
+}
+
+static inline const char* string_VkSparseMemoryBindFlagBits(VkSparseMemoryBindFlagBits input_value)
+{
+ switch ((VkSparseMemoryBindFlagBits)input_value)
+ {
+ case VK_SPARSE_MEMORY_BIND_METADATA_BIT:
+ return "VK_SPARSE_MEMORY_BIND_METADATA_BIT";
+ default:
+ return "Unhandled VkSparseMemoryBindFlagBits";
+ }
+}
+
+static inline const char* string_VkFenceCreateFlagBits(VkFenceCreateFlagBits input_value)
+{
+ switch ((VkFenceCreateFlagBits)input_value)
+ {
+ case VK_FENCE_CREATE_SIGNALED_BIT:
+ return "VK_FENCE_CREATE_SIGNALED_BIT";
+ default:
+ return "Unhandled VkFenceCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkQueryType(VkQueryType input_value)
+{
+ switch ((VkQueryType)input_value)
+ {
+ case VK_QUERY_TYPE_TIMESTAMP:
+ return "VK_QUERY_TYPE_TIMESTAMP";
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+ return "VK_QUERY_TYPE_PIPELINE_STATISTICS";
+ case VK_QUERY_TYPE_OCCLUSION:
+ return "VK_QUERY_TYPE_OCCLUSION";
+ default:
+ return "Unhandled VkQueryType";
+ }
+}
+
+static inline const char* string_VkQueryPipelineStatisticFlagBits(VkQueryPipelineStatisticFlagBits input_value)
+{
+ switch ((VkQueryPipelineStatisticFlagBits)input_value)
+ {
+ case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT";
+ case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT:
+ return "VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT";
+ default:
+ return "Unhandled VkQueryPipelineStatisticFlagBits";
+ }
+}
+
+static inline const char* string_VkQueryResultFlagBits(VkQueryResultFlagBits input_value)
+{
+ switch ((VkQueryResultFlagBits)input_value)
+ {
+ case VK_QUERY_RESULT_64_BIT:
+ return "VK_QUERY_RESULT_64_BIT";
+ case VK_QUERY_RESULT_WITH_AVAILABILITY_BIT:
+ return "VK_QUERY_RESULT_WITH_AVAILABILITY_BIT";
+ case VK_QUERY_RESULT_WAIT_BIT:
+ return "VK_QUERY_RESULT_WAIT_BIT";
+ case VK_QUERY_RESULT_PARTIAL_BIT:
+ return "VK_QUERY_RESULT_PARTIAL_BIT";
+ default:
+ return "Unhandled VkQueryResultFlagBits";
+ }
+}
+
+static inline const char* string_VkBufferCreateFlagBits(VkBufferCreateFlagBits input_value)
+{
+ switch ((VkBufferCreateFlagBits)input_value)
+ {
+ case VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT:
+ return "VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT";
+ case VK_BUFFER_CREATE_SPARSE_BINDING_BIT:
+ return "VK_BUFFER_CREATE_SPARSE_BINDING_BIT";
+ case VK_BUFFER_CREATE_PROTECTED_BIT:
+ return "VK_BUFFER_CREATE_PROTECTED_BIT";
+ case VK_BUFFER_CREATE_SPARSE_ALIASED_BIT:
+ return "VK_BUFFER_CREATE_SPARSE_ALIASED_BIT";
+ default:
+ return "Unhandled VkBufferCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkBufferUsageFlagBits(VkBufferUsageFlagBits input_value)
+{
+ switch ((VkBufferUsageFlagBits)input_value)
+ {
+ case VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT:
+ return "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT";
+ case VK_BUFFER_USAGE_STORAGE_BUFFER_BIT:
+ return "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT";
+ case VK_BUFFER_USAGE_VERTEX_BUFFER_BIT:
+ return "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT";
+ case VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT:
+ return "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT";
+ case VK_BUFFER_USAGE_TRANSFER_SRC_BIT:
+ return "VK_BUFFER_USAGE_TRANSFER_SRC_BIT";
+ case VK_BUFFER_USAGE_TRANSFER_DST_BIT:
+ return "VK_BUFFER_USAGE_TRANSFER_DST_BIT";
+ case VK_BUFFER_USAGE_INDEX_BUFFER_BIT:
+ return "VK_BUFFER_USAGE_INDEX_BUFFER_BIT";
+ case VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT:
+ return "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT";
+ case VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT:
+ return "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT";
+ default:
+ return "Unhandled VkBufferUsageFlagBits";
+ }
+}
+
+static inline const char* string_VkSharingMode(VkSharingMode input_value)
+{
+ switch ((VkSharingMode)input_value)
+ {
+ case VK_SHARING_MODE_EXCLUSIVE:
+ return "VK_SHARING_MODE_EXCLUSIVE";
+ case VK_SHARING_MODE_CONCURRENT:
+ return "VK_SHARING_MODE_CONCURRENT";
+ default:
+ return "Unhandled VkSharingMode";
+ }
+}
+
+static inline const char* string_VkImageLayout(VkImageLayout input_value)
+{
+ switch ((VkImageLayout)input_value)
+ {
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL";
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL";
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL";
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return "VK_IMAGE_LAYOUT_GENERAL";
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL";
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL";
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ return "VK_IMAGE_LAYOUT_UNDEFINED";
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL";
+ case VK_IMAGE_LAYOUT_PREINITIALIZED:
+ return "VK_IMAGE_LAYOUT_PREINITIALIZED";
+ case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
+ return "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR";
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL";
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL";
+ case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
+ return "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR";
+ default:
+ return "Unhandled VkImageLayout";
+ }
+}
+
+static inline const char* string_VkImageViewType(VkImageViewType input_value)
+{
+ switch ((VkImageViewType)input_value)
+ {
+ case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+ return "VK_IMAGE_VIEW_TYPE_2D_ARRAY";
+ case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+ return "VK_IMAGE_VIEW_TYPE_1D_ARRAY";
+ case VK_IMAGE_VIEW_TYPE_1D:
+ return "VK_IMAGE_VIEW_TYPE_1D";
+ case VK_IMAGE_VIEW_TYPE_3D:
+ return "VK_IMAGE_VIEW_TYPE_3D";
+ case VK_IMAGE_VIEW_TYPE_CUBE:
+ return "VK_IMAGE_VIEW_TYPE_CUBE";
+ case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+ return "VK_IMAGE_VIEW_TYPE_CUBE_ARRAY";
+ case VK_IMAGE_VIEW_TYPE_2D:
+ return "VK_IMAGE_VIEW_TYPE_2D";
+ default:
+ return "Unhandled VkImageViewType";
+ }
+}
+
+static inline const char* string_VkComponentSwizzle(VkComponentSwizzle input_value)
+{
+ switch ((VkComponentSwizzle)input_value)
+ {
+ case VK_COMPONENT_SWIZZLE_ONE:
+ return "VK_COMPONENT_SWIZZLE_ONE";
+ case VK_COMPONENT_SWIZZLE_R:
+ return "VK_COMPONENT_SWIZZLE_R";
+ case VK_COMPONENT_SWIZZLE_ZERO:
+ return "VK_COMPONENT_SWIZZLE_ZERO";
+ case VK_COMPONENT_SWIZZLE_IDENTITY:
+ return "VK_COMPONENT_SWIZZLE_IDENTITY";
+ case VK_COMPONENT_SWIZZLE_G:
+ return "VK_COMPONENT_SWIZZLE_G";
+ case VK_COMPONENT_SWIZZLE_A:
+ return "VK_COMPONENT_SWIZZLE_A";
+ case VK_COMPONENT_SWIZZLE_B:
+ return "VK_COMPONENT_SWIZZLE_B";
+ default:
+ return "Unhandled VkComponentSwizzle";
+ }
+}
+
+static inline const char* string_VkPipelineCreateFlagBits(VkPipelineCreateFlagBits input_value)
+{
+ switch ((VkPipelineCreateFlagBits)input_value)
+ {
+ case VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT:
+ return "VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT";
+ case VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT:
+ return "VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT";
+ case VK_PIPELINE_CREATE_DISPATCH_BASE:
+ return "VK_PIPELINE_CREATE_DISPATCH_BASE";
+ case VK_PIPELINE_CREATE_DERIVATIVE_BIT:
+ return "VK_PIPELINE_CREATE_DERIVATIVE_BIT";
+ case VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT:
+ return "VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT";
+ default:
+ return "Unhandled VkPipelineCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkShaderStageFlagBits(VkShaderStageFlagBits input_value)
+{
+ switch ((VkShaderStageFlagBits)input_value)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT:
+ return "VK_SHADER_STAGE_VERTEX_BIT";
+ case VK_SHADER_STAGE_ALL:
+ return "VK_SHADER_STAGE_ALL";
+ case VK_SHADER_STAGE_FRAGMENT_BIT:
+ return "VK_SHADER_STAGE_FRAGMENT_BIT";
+ case VK_SHADER_STAGE_COMPUTE_BIT:
+ return "VK_SHADER_STAGE_COMPUTE_BIT";
+ case VK_SHADER_STAGE_ALL_GRAPHICS:
+ return "VK_SHADER_STAGE_ALL_GRAPHICS";
+ case VK_SHADER_STAGE_GEOMETRY_BIT:
+ return "VK_SHADER_STAGE_GEOMETRY_BIT";
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+ return "VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT";
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+ return "VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT";
+ default:
+ return "Unhandled VkShaderStageFlagBits";
+ }
+}
+
+static inline const char* string_VkVertexInputRate(VkVertexInputRate input_value)
+{
+ switch ((VkVertexInputRate)input_value)
+ {
+ case VK_VERTEX_INPUT_RATE_VERTEX:
+ return "VK_VERTEX_INPUT_RATE_VERTEX";
+ case VK_VERTEX_INPUT_RATE_INSTANCE:
+ return "VK_VERTEX_INPUT_RATE_INSTANCE";
+ default:
+ return "Unhandled VkVertexInputRate";
+ }
+}
+
+static inline const char* string_VkPrimitiveTopology(VkPrimitiveTopology input_value)
+{
+ switch ((VkPrimitiveTopology)input_value)
+ {
+ case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
+ return "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+ return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+ return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
+ return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
+ return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+ return "VK_PRIMITIVE_TOPOLOGY_LINE_STRIP";
+ case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+ return "VK_PRIMITIVE_TOPOLOGY_POINT_LIST";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
+ return "VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
+ return "VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY";
+ case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+ return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP";
+ case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+ return "VK_PRIMITIVE_TOPOLOGY_LINE_LIST";
+ default:
+ return "Unhandled VkPrimitiveTopology";
+ }
+}
+
+static inline const char* string_VkPolygonMode(VkPolygonMode input_value)
+{
+ switch ((VkPolygonMode)input_value)
+ {
+ case VK_POLYGON_MODE_POINT:
+ return "VK_POLYGON_MODE_POINT";
+ case VK_POLYGON_MODE_FILL:
+ return "VK_POLYGON_MODE_FILL";
+ case VK_POLYGON_MODE_LINE:
+ return "VK_POLYGON_MODE_LINE";
+ case VK_POLYGON_MODE_FILL_RECTANGLE_NV:
+ return "VK_POLYGON_MODE_FILL_RECTANGLE_NV";
+ default:
+ return "Unhandled VkPolygonMode";
+ }
+}
+
+static inline const char* string_VkCullModeFlagBits(VkCullModeFlagBits input_value)
+{
+ switch ((VkCullModeFlagBits)input_value)
+ {
+ case VK_CULL_MODE_FRONT_BIT:
+ return "VK_CULL_MODE_FRONT_BIT";
+ case VK_CULL_MODE_FRONT_AND_BACK:
+ return "VK_CULL_MODE_FRONT_AND_BACK";
+ case VK_CULL_MODE_BACK_BIT:
+ return "VK_CULL_MODE_BACK_BIT";
+ case VK_CULL_MODE_NONE:
+ return "VK_CULL_MODE_NONE";
+ default:
+ return "Unhandled VkCullModeFlagBits";
+ }
+}
+
+static inline const char* string_VkFrontFace(VkFrontFace input_value)
+{
+ switch ((VkFrontFace)input_value)
+ {
+ case VK_FRONT_FACE_CLOCKWISE:
+ return "VK_FRONT_FACE_CLOCKWISE";
+ case VK_FRONT_FACE_COUNTER_CLOCKWISE:
+ return "VK_FRONT_FACE_COUNTER_CLOCKWISE";
+ default:
+ return "Unhandled VkFrontFace";
+ }
+}
+
+static inline const char* string_VkCompareOp(VkCompareOp input_value)
+{
+ switch ((VkCompareOp)input_value)
+ {
+ case VK_COMPARE_OP_ALWAYS:
+ return "VK_COMPARE_OP_ALWAYS";
+ case VK_COMPARE_OP_NOT_EQUAL:
+ return "VK_COMPARE_OP_NOT_EQUAL";
+ case VK_COMPARE_OP_LESS:
+ return "VK_COMPARE_OP_LESS";
+ case VK_COMPARE_OP_LESS_OR_EQUAL:
+ return "VK_COMPARE_OP_LESS_OR_EQUAL";
+ case VK_COMPARE_OP_NEVER:
+ return "VK_COMPARE_OP_NEVER";
+ case VK_COMPARE_OP_GREATER:
+ return "VK_COMPARE_OP_GREATER";
+ case VK_COMPARE_OP_EQUAL:
+ return "VK_COMPARE_OP_EQUAL";
+ case VK_COMPARE_OP_GREATER_OR_EQUAL:
+ return "VK_COMPARE_OP_GREATER_OR_EQUAL";
+ default:
+ return "Unhandled VkCompareOp";
+ }
+}
+
+static inline const char* string_VkStencilOp(VkStencilOp input_value)
+{
+ switch ((VkStencilOp)input_value)
+ {
+ case VK_STENCIL_OP_INVERT:
+ return "VK_STENCIL_OP_INVERT";
+ case VK_STENCIL_OP_KEEP:
+ return "VK_STENCIL_OP_KEEP";
+ case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
+ return "VK_STENCIL_OP_DECREMENT_AND_CLAMP";
+ case VK_STENCIL_OP_REPLACE:
+ return "VK_STENCIL_OP_REPLACE";
+ case VK_STENCIL_OP_INCREMENT_AND_WRAP:
+ return "VK_STENCIL_OP_INCREMENT_AND_WRAP";
+ case VK_STENCIL_OP_ZERO:
+ return "VK_STENCIL_OP_ZERO";
+ case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
+ return "VK_STENCIL_OP_INCREMENT_AND_CLAMP";
+ case VK_STENCIL_OP_DECREMENT_AND_WRAP:
+ return "VK_STENCIL_OP_DECREMENT_AND_WRAP";
+ default:
+ return "Unhandled VkStencilOp";
+ }
+}
+
+static inline const char* string_VkLogicOp(VkLogicOp input_value)
+{
+ switch ((VkLogicOp)input_value)
+ {
+ case VK_LOGIC_OP_NOR:
+ return "VK_LOGIC_OP_NOR";
+ case VK_LOGIC_OP_OR:
+ return "VK_LOGIC_OP_OR";
+ case VK_LOGIC_OP_NO_OP:
+ return "VK_LOGIC_OP_NO_OP";
+ case VK_LOGIC_OP_NAND:
+ return "VK_LOGIC_OP_NAND";
+ case VK_LOGIC_OP_XOR:
+ return "VK_LOGIC_OP_XOR";
+ case VK_LOGIC_OP_AND_REVERSE:
+ return "VK_LOGIC_OP_AND_REVERSE";
+ case VK_LOGIC_OP_COPY:
+ return "VK_LOGIC_OP_COPY";
+ case VK_LOGIC_OP_AND:
+ return "VK_LOGIC_OP_AND";
+ case VK_LOGIC_OP_CLEAR:
+ return "VK_LOGIC_OP_CLEAR";
+ case VK_LOGIC_OP_COPY_INVERTED:
+ return "VK_LOGIC_OP_COPY_INVERTED";
+ case VK_LOGIC_OP_SET:
+ return "VK_LOGIC_OP_SET";
+ case VK_LOGIC_OP_INVERT:
+ return "VK_LOGIC_OP_INVERT";
+ case VK_LOGIC_OP_AND_INVERTED:
+ return "VK_LOGIC_OP_AND_INVERTED";
+ case VK_LOGIC_OP_OR_REVERSE:
+ return "VK_LOGIC_OP_OR_REVERSE";
+ case VK_LOGIC_OP_OR_INVERTED:
+ return "VK_LOGIC_OP_OR_INVERTED";
+ case VK_LOGIC_OP_EQUIVALENT:
+ return "VK_LOGIC_OP_EQUIVALENT";
+ default:
+ return "Unhandled VkLogicOp";
+ }
+}
+
+static inline const char* string_VkBlendFactor(VkBlendFactor input_value)
+{
+ switch ((VkBlendFactor)input_value)
+ {
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
+ return "VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA";
+ case VK_BLEND_FACTOR_CONSTANT_ALPHA:
+ return "VK_BLEND_FACTOR_CONSTANT_ALPHA";
+ case VK_BLEND_FACTOR_ONE:
+ return "VK_BLEND_FACTOR_ONE";
+ case VK_BLEND_FACTOR_DST_COLOR:
+ return "VK_BLEND_FACTOR_DST_COLOR";
+ case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
+ return "VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR";
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
+ return "VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR";
+ case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
+ return "VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA";
+ case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
+ return "VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA";
+ case VK_BLEND_FACTOR_SRC1_COLOR:
+ return "VK_BLEND_FACTOR_SRC1_COLOR";
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
+ return "VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA";
+ case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
+ return "VK_BLEND_FACTOR_SRC_ALPHA_SATURATE";
+ case VK_BLEND_FACTOR_SRC_COLOR:
+ return "VK_BLEND_FACTOR_SRC_COLOR";
+ case VK_BLEND_FACTOR_DST_ALPHA:
+ return "VK_BLEND_FACTOR_DST_ALPHA";
+ case VK_BLEND_FACTOR_SRC_ALPHA:
+ return "VK_BLEND_FACTOR_SRC_ALPHA";
+ case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
+ return "VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR";
+ case VK_BLEND_FACTOR_SRC1_ALPHA:
+ return "VK_BLEND_FACTOR_SRC1_ALPHA";
+ case VK_BLEND_FACTOR_CONSTANT_COLOR:
+ return "VK_BLEND_FACTOR_CONSTANT_COLOR";
+ case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
+ return "VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR";
+ case VK_BLEND_FACTOR_ZERO:
+ return "VK_BLEND_FACTOR_ZERO";
+ default:
+ return "Unhandled VkBlendFactor";
+ }
+}
+
+static inline const char* string_VkBlendOp(VkBlendOp input_value)
+{
+ switch ((VkBlendOp)input_value)
+ {
+ case VK_BLEND_OP_ADD:
+ return "VK_BLEND_OP_ADD";
+ case VK_BLEND_OP_SRC_EXT:
+ return "VK_BLEND_OP_SRC_EXT";
+ case VK_BLEND_OP_DST_EXT:
+ return "VK_BLEND_OP_DST_EXT";
+ case VK_BLEND_OP_DIFFERENCE_EXT:
+ return "VK_BLEND_OP_DIFFERENCE_EXT";
+ case VK_BLEND_OP_MINUS_EXT:
+ return "VK_BLEND_OP_MINUS_EXT";
+ case VK_BLEND_OP_MINUS_CLAMPED_EXT:
+ return "VK_BLEND_OP_MINUS_CLAMPED_EXT";
+ case VK_BLEND_OP_SOFTLIGHT_EXT:
+ return "VK_BLEND_OP_SOFTLIGHT_EXT";
+ case VK_BLEND_OP_LINEARDODGE_EXT:
+ return "VK_BLEND_OP_LINEARDODGE_EXT";
+ case VK_BLEND_OP_HARDMIX_EXT:
+ return "VK_BLEND_OP_HARDMIX_EXT";
+ case VK_BLEND_OP_MIN:
+ return "VK_BLEND_OP_MIN";
+ case VK_BLEND_OP_HSL_LUMINOSITY_EXT:
+ return "VK_BLEND_OP_HSL_LUMINOSITY_EXT";
+ case VK_BLEND_OP_SRC_ATOP_EXT:
+ return "VK_BLEND_OP_SRC_ATOP_EXT";
+ case VK_BLEND_OP_SUBTRACT:
+ return "VK_BLEND_OP_SUBTRACT";
+ case VK_BLEND_OP_HSL_HUE_EXT:
+ return "VK_BLEND_OP_HSL_HUE_EXT";
+ case VK_BLEND_OP_REVERSE_SUBTRACT:
+ return "VK_BLEND_OP_REVERSE_SUBTRACT";
+ case VK_BLEND_OP_DST_OVER_EXT:
+ return "VK_BLEND_OP_DST_OVER_EXT";
+ case VK_BLEND_OP_VIVIDLIGHT_EXT:
+ return "VK_BLEND_OP_VIVIDLIGHT_EXT";
+ case VK_BLEND_OP_HSL_COLOR_EXT:
+ return "VK_BLEND_OP_HSL_COLOR_EXT";
+ case VK_BLEND_OP_EXCLUSION_EXT:
+ return "VK_BLEND_OP_EXCLUSION_EXT";
+ case VK_BLEND_OP_PLUS_DARKER_EXT:
+ return "VK_BLEND_OP_PLUS_DARKER_EXT";
+ case VK_BLEND_OP_DST_IN_EXT:
+ return "VK_BLEND_OP_DST_IN_EXT";
+ case VK_BLEND_OP_INVERT_OVG_EXT:
+ return "VK_BLEND_OP_INVERT_OVG_EXT";
+ case VK_BLEND_OP_CONTRAST_EXT:
+ return "VK_BLEND_OP_CONTRAST_EXT";
+ case VK_BLEND_OP_SRC_OUT_EXT:
+ return "VK_BLEND_OP_SRC_OUT_EXT";
+ case VK_BLEND_OP_COLORDODGE_EXT:
+ return "VK_BLEND_OP_COLORDODGE_EXT";
+ case VK_BLEND_OP_SRC_IN_EXT:
+ return "VK_BLEND_OP_SRC_IN_EXT";
+ case VK_BLEND_OP_MAX:
+ return "VK_BLEND_OP_MAX";
+ case VK_BLEND_OP_HSL_SATURATION_EXT:
+ return "VK_BLEND_OP_HSL_SATURATION_EXT";
+ case VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT:
+ return "VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT";
+ case VK_BLEND_OP_DARKEN_EXT:
+ return "VK_BLEND_OP_DARKEN_EXT";
+ case VK_BLEND_OP_BLUE_EXT:
+ return "VK_BLEND_OP_BLUE_EXT";
+ case VK_BLEND_OP_XOR_EXT:
+ return "VK_BLEND_OP_XOR_EXT";
+ case VK_BLEND_OP_HARDLIGHT_EXT:
+ return "VK_BLEND_OP_HARDLIGHT_EXT";
+ case VK_BLEND_OP_RED_EXT:
+ return "VK_BLEND_OP_RED_EXT";
+ case VK_BLEND_OP_INVERT_EXT:
+ return "VK_BLEND_OP_INVERT_EXT";
+ case VK_BLEND_OP_ZERO_EXT:
+ return "VK_BLEND_OP_ZERO_EXT";
+ case VK_BLEND_OP_LIGHTEN_EXT:
+ return "VK_BLEND_OP_LIGHTEN_EXT";
+ case VK_BLEND_OP_SCREEN_EXT:
+ return "VK_BLEND_OP_SCREEN_EXT";
+ case VK_BLEND_OP_DST_OUT_EXT:
+ return "VK_BLEND_OP_DST_OUT_EXT";
+ case VK_BLEND_OP_MULTIPLY_EXT:
+ return "VK_BLEND_OP_MULTIPLY_EXT";
+ case VK_BLEND_OP_OVERLAY_EXT:
+ return "VK_BLEND_OP_OVERLAY_EXT";
+ case VK_BLEND_OP_LINEARLIGHT_EXT:
+ return "VK_BLEND_OP_LINEARLIGHT_EXT";
+ case VK_BLEND_OP_PLUS_EXT:
+ return "VK_BLEND_OP_PLUS_EXT";
+ case VK_BLEND_OP_PLUS_CLAMPED_EXT:
+ return "VK_BLEND_OP_PLUS_CLAMPED_EXT";
+ case VK_BLEND_OP_INVERT_RGB_EXT:
+ return "VK_BLEND_OP_INVERT_RGB_EXT";
+ case VK_BLEND_OP_DST_ATOP_EXT:
+ return "VK_BLEND_OP_DST_ATOP_EXT";
+ case VK_BLEND_OP_LINEARBURN_EXT:
+ return "VK_BLEND_OP_LINEARBURN_EXT";
+ case VK_BLEND_OP_GREEN_EXT:
+ return "VK_BLEND_OP_GREEN_EXT";
+ case VK_BLEND_OP_COLORBURN_EXT:
+ return "VK_BLEND_OP_COLORBURN_EXT";
+ case VK_BLEND_OP_PINLIGHT_EXT:
+ return "VK_BLEND_OP_PINLIGHT_EXT";
+ case VK_BLEND_OP_SRC_OVER_EXT:
+ return "VK_BLEND_OP_SRC_OVER_EXT";
+ default:
+ return "Unhandled VkBlendOp";
+ }
+}
+
+static inline const char* string_VkColorComponentFlagBits(VkColorComponentFlagBits input_value)
+{
+ switch ((VkColorComponentFlagBits)input_value)
+ {
+ case VK_COLOR_COMPONENT_R_BIT:
+ return "VK_COLOR_COMPONENT_R_BIT";
+ case VK_COLOR_COMPONENT_B_BIT:
+ return "VK_COLOR_COMPONENT_B_BIT";
+ case VK_COLOR_COMPONENT_G_BIT:
+ return "VK_COLOR_COMPONENT_G_BIT";
+ case VK_COLOR_COMPONENT_A_BIT:
+ return "VK_COLOR_COMPONENT_A_BIT";
+ default:
+ return "Unhandled VkColorComponentFlagBits";
+ }
+}
+
+static inline const char* string_VkDynamicState(VkDynamicState input_value)
+{
+ switch ((VkDynamicState)input_value)
+ {
+ case VK_DYNAMIC_STATE_LINE_WIDTH:
+ return "VK_DYNAMIC_STATE_LINE_WIDTH";
+ case VK_DYNAMIC_STATE_DEPTH_BIAS:
+ return "VK_DYNAMIC_STATE_DEPTH_BIAS";
+ case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
+ return "VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK";
+ case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
+ return "VK_DYNAMIC_STATE_STENCIL_REFERENCE";
+ case VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV:
+ return "VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV";
+ case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
+ return "VK_DYNAMIC_STATE_STENCIL_WRITE_MASK";
+ case VK_DYNAMIC_STATE_SCISSOR:
+ return "VK_DYNAMIC_STATE_SCISSOR";
+ case VK_DYNAMIC_STATE_VIEWPORT:
+ return "VK_DYNAMIC_STATE_VIEWPORT";
+ case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
+ return "VK_DYNAMIC_STATE_DEPTH_BOUNDS";
+ case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT:
+ return "VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT";
+ case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
+ return "VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT";
+ case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
+ return "VK_DYNAMIC_STATE_BLEND_CONSTANTS";
+ default:
+ return "Unhandled VkDynamicState";
+ }
+}
+
+static inline const char* string_VkFilter(VkFilter input_value)
+{
+ switch ((VkFilter)input_value)
+ {
+ case VK_FILTER_LINEAR:
+ return "VK_FILTER_LINEAR";
+ case VK_FILTER_CUBIC_IMG:
+ return "VK_FILTER_CUBIC_IMG";
+ case VK_FILTER_NEAREST:
+ return "VK_FILTER_NEAREST";
+ default:
+ return "Unhandled VkFilter";
+ }
+}
+
+static inline const char* string_VkSamplerMipmapMode(VkSamplerMipmapMode input_value)
+{
+ switch ((VkSamplerMipmapMode)input_value)
+ {
+ case VK_SAMPLER_MIPMAP_MODE_NEAREST:
+ return "VK_SAMPLER_MIPMAP_MODE_NEAREST";
+ case VK_SAMPLER_MIPMAP_MODE_LINEAR:
+ return "VK_SAMPLER_MIPMAP_MODE_LINEAR";
+ default:
+ return "Unhandled VkSamplerMipmapMode";
+ }
+}
+
+static inline const char* string_VkSamplerAddressMode(VkSamplerAddressMode input_value)
+{
+ switch ((VkSamplerAddressMode)input_value)
+ {
+ case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
+ return "VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE";
+ case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
+ return "VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER";
+ case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
+ return "VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT";
+ case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
+ return "VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE";
+ case VK_SAMPLER_ADDRESS_MODE_REPEAT:
+ return "VK_SAMPLER_ADDRESS_MODE_REPEAT";
+ default:
+ return "Unhandled VkSamplerAddressMode";
+ }
+}
+
+static inline const char* string_VkBorderColor(VkBorderColor input_value)
+{
+ switch ((VkBorderColor)input_value)
+ {
+ case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
+ return "VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK";
+ case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
+ return "VK_BORDER_COLOR_INT_OPAQUE_BLACK";
+ case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
+ return "VK_BORDER_COLOR_INT_TRANSPARENT_BLACK";
+ case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
+ return "VK_BORDER_COLOR_INT_OPAQUE_WHITE";
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
+ return "VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE";
+ case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
+ return "VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK";
+ default:
+ return "Unhandled VkBorderColor";
+ }
+}
+
+static inline const char* string_VkDescriptorSetLayoutCreateFlagBits(VkDescriptorSetLayoutCreateFlagBits input_value)
+{
+ switch ((VkDescriptorSetLayoutCreateFlagBits)input_value)
+ {
+ case VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT:
+ return "VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT";
+ case VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR:
+ return "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR";
+ default:
+ return "Unhandled VkDescriptorSetLayoutCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkDescriptorType(VkDescriptorType input_value)
+{
+ switch ((VkDescriptorType)input_value)
+ {
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return "VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER";
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ return "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER";
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ return "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC";
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ return "VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT";
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ return "VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER";
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ return "VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER";
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ return "VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE";
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ return "VK_DESCRIPTOR_TYPE_SAMPLER";
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ return "VK_DESCRIPTOR_TYPE_STORAGE_IMAGE";
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ return "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC";
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ return "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER";
+ default:
+ return "Unhandled VkDescriptorType";
+ }
+}
+
+static inline const char* string_VkDescriptorPoolCreateFlagBits(VkDescriptorPoolCreateFlagBits input_value)
+{
+ switch ((VkDescriptorPoolCreateFlagBits)input_value)
+ {
+ case VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT:
+ return "VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT";
+ case VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT:
+ return "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT";
+ default:
+ return "Unhandled VkDescriptorPoolCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkAttachmentDescriptionFlagBits(VkAttachmentDescriptionFlagBits input_value)
+{
+ switch ((VkAttachmentDescriptionFlagBits)input_value)
+ {
+ case VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT:
+ return "VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT";
+ default:
+ return "Unhandled VkAttachmentDescriptionFlagBits";
+ }
+}
+
+static inline const char* string_VkAttachmentLoadOp(VkAttachmentLoadOp input_value)
+{
+ switch ((VkAttachmentLoadOp)input_value)
+ {
+ case VK_ATTACHMENT_LOAD_OP_DONT_CARE:
+ return "VK_ATTACHMENT_LOAD_OP_DONT_CARE";
+ case VK_ATTACHMENT_LOAD_OP_CLEAR:
+ return "VK_ATTACHMENT_LOAD_OP_CLEAR";
+ case VK_ATTACHMENT_LOAD_OP_LOAD:
+ return "VK_ATTACHMENT_LOAD_OP_LOAD";
+ default:
+ return "Unhandled VkAttachmentLoadOp";
+ }
+}
+
+static inline const char* string_VkAttachmentStoreOp(VkAttachmentStoreOp input_value)
+{
+ switch ((VkAttachmentStoreOp)input_value)
+ {
+ case VK_ATTACHMENT_STORE_OP_DONT_CARE:
+ return "VK_ATTACHMENT_STORE_OP_DONT_CARE";
+ case VK_ATTACHMENT_STORE_OP_STORE:
+ return "VK_ATTACHMENT_STORE_OP_STORE";
+ default:
+ return "Unhandled VkAttachmentStoreOp";
+ }
+}
+
+static inline const char* string_VkSubpassDescriptionFlagBits(VkSubpassDescriptionFlagBits input_value)
+{
+ switch ((VkSubpassDescriptionFlagBits)input_value)
+ {
+ case VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX:
+ return "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX";
+ case VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX:
+ return "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX";
+ default:
+ return "Unhandled VkSubpassDescriptionFlagBits";
+ }
+}
+
+static inline const char* string_VkPipelineBindPoint(VkPipelineBindPoint input_value)
+{
+ switch ((VkPipelineBindPoint)input_value)
+ {
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ return "VK_PIPELINE_BIND_POINT_COMPUTE";
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ return "VK_PIPELINE_BIND_POINT_GRAPHICS";
+ default:
+ return "Unhandled VkPipelineBindPoint";
+ }
+}
+
+static inline const char* string_VkAccessFlagBits(VkAccessFlagBits input_value)
+{
+ switch ((VkAccessFlagBits)input_value)
+ {
+ case VK_ACCESS_UNIFORM_READ_BIT:
+ return "VK_ACCESS_UNIFORM_READ_BIT";
+ case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
+ return "VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT";
+ case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
+ return "VK_ACCESS_INDIRECT_COMMAND_READ_BIT";
+ case VK_ACCESS_HOST_READ_BIT:
+ return "VK_ACCESS_HOST_READ_BIT";
+ case VK_ACCESS_HOST_WRITE_BIT:
+ return "VK_ACCESS_HOST_WRITE_BIT";
+ case VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT:
+ return "VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT";
+ case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
+ return "VK_ACCESS_COLOR_ATTACHMENT_READ_BIT";
+ case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
+ return "VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT";
+ case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
+ return "VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT";
+ case VK_ACCESS_TRANSFER_WRITE_BIT:
+ return "VK_ACCESS_TRANSFER_WRITE_BIT";
+ case VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX:
+ return "VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX";
+ case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
+ return "VK_ACCESS_INPUT_ATTACHMENT_READ_BIT";
+ case VK_ACCESS_SHADER_READ_BIT:
+ return "VK_ACCESS_SHADER_READ_BIT";
+ case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
+ return "VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT";
+ case VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX:
+ return "VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX";
+ case VK_ACCESS_MEMORY_READ_BIT:
+ return "VK_ACCESS_MEMORY_READ_BIT";
+ case VK_ACCESS_SHADER_WRITE_BIT:
+ return "VK_ACCESS_SHADER_WRITE_BIT";
+ case VK_ACCESS_INDEX_READ_BIT:
+ return "VK_ACCESS_INDEX_READ_BIT";
+ case VK_ACCESS_MEMORY_WRITE_BIT:
+ return "VK_ACCESS_MEMORY_WRITE_BIT";
+ case VK_ACCESS_TRANSFER_READ_BIT:
+ return "VK_ACCESS_TRANSFER_READ_BIT";
+ default:
+ return "Unhandled VkAccessFlagBits";
+ }
+}
+
+static inline const char* string_VkDependencyFlagBits(VkDependencyFlagBits input_value)
+{
+ switch ((VkDependencyFlagBits)input_value)
+ {
+ case VK_DEPENDENCY_DEVICE_GROUP_BIT:
+ return "VK_DEPENDENCY_DEVICE_GROUP_BIT";
+ case VK_DEPENDENCY_BY_REGION_BIT:
+ return "VK_DEPENDENCY_BY_REGION_BIT";
+ case VK_DEPENDENCY_VIEW_LOCAL_BIT:
+ return "VK_DEPENDENCY_VIEW_LOCAL_BIT";
+ default:
+ return "Unhandled VkDependencyFlagBits";
+ }
+}
+
+static inline const char* string_VkCommandPoolCreateFlagBits(VkCommandPoolCreateFlagBits input_value)
+{
+ switch ((VkCommandPoolCreateFlagBits)input_value)
+ {
+ case VK_COMMAND_POOL_CREATE_TRANSIENT_BIT:
+ return "VK_COMMAND_POOL_CREATE_TRANSIENT_BIT";
+ case VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT:
+ return "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT";
+ case VK_COMMAND_POOL_CREATE_PROTECTED_BIT:
+ return "VK_COMMAND_POOL_CREATE_PROTECTED_BIT";
+ default:
+ return "Unhandled VkCommandPoolCreateFlagBits";
+ }
+}
+
+static inline const char* string_VkCommandPoolResetFlagBits(VkCommandPoolResetFlagBits input_value)
+{
+ switch ((VkCommandPoolResetFlagBits)input_value)
+ {
+ case VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT:
+ return "VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT";
+ default:
+ return "Unhandled VkCommandPoolResetFlagBits";
+ }
+}
+
+static inline const char* string_VkCommandBufferLevel(VkCommandBufferLevel input_value)
+{
+ switch ((VkCommandBufferLevel)input_value)
+ {
+ case VK_COMMAND_BUFFER_LEVEL_SECONDARY:
+ return "VK_COMMAND_BUFFER_LEVEL_SECONDARY";
+ case VK_COMMAND_BUFFER_LEVEL_PRIMARY:
+ return "VK_COMMAND_BUFFER_LEVEL_PRIMARY";
+ default:
+ return "Unhandled VkCommandBufferLevel";
+ }
+}
+
+static inline const char* string_VkCommandBufferUsageFlagBits(VkCommandBufferUsageFlagBits input_value)
+{
+ switch ((VkCommandBufferUsageFlagBits)input_value)
+ {
+ case VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT:
+ return "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT";
+ case VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT:
+ return "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT";
+ case VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT:
+ return "VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT";
+ default:
+ return "Unhandled VkCommandBufferUsageFlagBits";
+ }
+}
+
+static inline const char* string_VkQueryControlFlagBits(VkQueryControlFlagBits input_value)
+{
+ switch ((VkQueryControlFlagBits)input_value)
+ {
+ case VK_QUERY_CONTROL_PRECISE_BIT:
+ return "VK_QUERY_CONTROL_PRECISE_BIT";
+ default:
+ return "Unhandled VkQueryControlFlagBits";
+ }
+}
+
+static inline const char* string_VkCommandBufferResetFlagBits(VkCommandBufferResetFlagBits input_value)
+{
+ switch ((VkCommandBufferResetFlagBits)input_value)
+ {
+ case VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT:
+ return "VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT";
+ default:
+ return "Unhandled VkCommandBufferResetFlagBits";
+ }
+}
+
+static inline const char* string_VkStencilFaceFlagBits(VkStencilFaceFlagBits input_value)
+{
+ switch ((VkStencilFaceFlagBits)input_value)
+ {
+ case VK_STENCIL_FACE_BACK_BIT:
+ return "VK_STENCIL_FACE_BACK_BIT";
+ case VK_STENCIL_FRONT_AND_BACK:
+ return "VK_STENCIL_FRONT_AND_BACK";
+ case VK_STENCIL_FACE_FRONT_BIT:
+ return "VK_STENCIL_FACE_FRONT_BIT";
+ default:
+ return "Unhandled VkStencilFaceFlagBits";
+ }
+}
+
+static inline const char* string_VkIndexType(VkIndexType input_value)
+{
+ switch ((VkIndexType)input_value)
+ {
+ case VK_INDEX_TYPE_UINT16:
+ return "VK_INDEX_TYPE_UINT16";
+ case VK_INDEX_TYPE_UINT32:
+ return "VK_INDEX_TYPE_UINT32";
+ default:
+ return "Unhandled VkIndexType";
+ }
+}
+
+static inline const char* string_VkSubpassContents(VkSubpassContents input_value)
+{
+ switch ((VkSubpassContents)input_value)
+ {
+ case VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS:
+ return "VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS";
+ case VK_SUBPASS_CONTENTS_INLINE:
+ return "VK_SUBPASS_CONTENTS_INLINE";
+ default:
+ return "Unhandled VkSubpassContents";
+ }
+}
+
+static inline const char* string_VkObjectType(VkObjectType input_value)
+{
+ switch ((VkObjectType)input_value)
+ {
+ case VK_OBJECT_TYPE_SEMAPHORE:
+ return "VK_OBJECT_TYPE_SEMAPHORE";
+ case VK_OBJECT_TYPE_PIPELINE:
+ return "VK_OBJECT_TYPE_PIPELINE";
+ case VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT:
+ return "VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT";
+ case VK_OBJECT_TYPE_SURFACE_KHR:
+ return "VK_OBJECT_TYPE_SURFACE_KHR";
+ case VK_OBJECT_TYPE_BUFFER:
+ return "VK_OBJECT_TYPE_BUFFER";
+ case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
+ return "VK_OBJECT_TYPE_PHYSICAL_DEVICE";
+ case VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION:
+ return "VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION";
+ case VK_OBJECT_TYPE_QUEUE:
+ return "VK_OBJECT_TYPE_QUEUE";
+ case VK_OBJECT_TYPE_DEVICE:
+ return "VK_OBJECT_TYPE_DEVICE";
+ case VK_OBJECT_TYPE_COMMAND_BUFFER:
+ return "VK_OBJECT_TYPE_COMMAND_BUFFER";
+ case VK_OBJECT_TYPE_DESCRIPTOR_SET:
+ return "VK_OBJECT_TYPE_DESCRIPTOR_SET";
+ case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
+ return "VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT";
+ case VK_OBJECT_TYPE_COMMAND_POOL:
+ return "VK_OBJECT_TYPE_COMMAND_POOL";
+ case VK_OBJECT_TYPE_UNKNOWN:
+ return "VK_OBJECT_TYPE_UNKNOWN";
+ case VK_OBJECT_TYPE_DESCRIPTOR_POOL:
+ return "VK_OBJECT_TYPE_DESCRIPTOR_POOL";
+ case VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE:
+ return "VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE";
+ case VK_OBJECT_TYPE_BUFFER_VIEW:
+ return "VK_OBJECT_TYPE_BUFFER_VIEW";
+ case VK_OBJECT_TYPE_DEVICE_MEMORY:
+ return "VK_OBJECT_TYPE_DEVICE_MEMORY";
+ case VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT:
+ return "VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT";
+ case VK_OBJECT_TYPE_IMAGE:
+ return "VK_OBJECT_TYPE_IMAGE";
+ case VK_OBJECT_TYPE_INSTANCE:
+ return "VK_OBJECT_TYPE_INSTANCE";
+ case VK_OBJECT_TYPE_DISPLAY_MODE_KHR:
+ return "VK_OBJECT_TYPE_DISPLAY_MODE_KHR";
+ case VK_OBJECT_TYPE_IMAGE_VIEW:
+ return "VK_OBJECT_TYPE_IMAGE_VIEW";
+ case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
+ return "VK_OBJECT_TYPE_PIPELINE_LAYOUT";
+ case VK_OBJECT_TYPE_EVENT:
+ return "VK_OBJECT_TYPE_EVENT";
+ case VK_OBJECT_TYPE_RENDER_PASS:
+ return "VK_OBJECT_TYPE_RENDER_PASS";
+ case VK_OBJECT_TYPE_FRAMEBUFFER:
+ return "VK_OBJECT_TYPE_FRAMEBUFFER";
+ case VK_OBJECT_TYPE_SAMPLER:
+ return "VK_OBJECT_TYPE_SAMPLER";
+ case VK_OBJECT_TYPE_SWAPCHAIN_KHR:
+ return "VK_OBJECT_TYPE_SWAPCHAIN_KHR";
+ case VK_OBJECT_TYPE_QUERY_POOL:
+ return "VK_OBJECT_TYPE_QUERY_POOL";
+ case VK_OBJECT_TYPE_DISPLAY_KHR:
+ return "VK_OBJECT_TYPE_DISPLAY_KHR";
+ case VK_OBJECT_TYPE_SHADER_MODULE:
+ return "VK_OBJECT_TYPE_SHADER_MODULE";
+ case VK_OBJECT_TYPE_PIPELINE_CACHE:
+ return "VK_OBJECT_TYPE_PIPELINE_CACHE";
+ case VK_OBJECT_TYPE_FENCE:
+ return "VK_OBJECT_TYPE_FENCE";
+ case VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX:
+ return "VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX";
+ case VK_OBJECT_TYPE_OBJECT_TABLE_NVX:
+ return "VK_OBJECT_TYPE_OBJECT_TABLE_NVX";
+ case VK_OBJECT_TYPE_VALIDATION_CACHE_EXT:
+ return "VK_OBJECT_TYPE_VALIDATION_CACHE_EXT";
+ default:
+ return "Unhandled VkObjectType";
+ }
+}
+
+static inline const char* string_VkSubgroupFeatureFlagBits(VkSubgroupFeatureFlagBits input_value)
+{
+ switch ((VkSubgroupFeatureFlagBits)input_value)
+ {
+ case VK_SUBGROUP_FEATURE_SHUFFLE_BIT:
+ return "VK_SUBGROUP_FEATURE_SHUFFLE_BIT";
+ case VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT:
+ return "VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT";
+ case VK_SUBGROUP_FEATURE_QUAD_BIT:
+ return "VK_SUBGROUP_FEATURE_QUAD_BIT";
+ case VK_SUBGROUP_FEATURE_BALLOT_BIT:
+ return "VK_SUBGROUP_FEATURE_BALLOT_BIT";
+ case VK_SUBGROUP_FEATURE_CLUSTERED_BIT:
+ return "VK_SUBGROUP_FEATURE_CLUSTERED_BIT";
+ case VK_SUBGROUP_FEATURE_ARITHMETIC_BIT:
+ return "VK_SUBGROUP_FEATURE_ARITHMETIC_BIT";
+ case VK_SUBGROUP_FEATURE_VOTE_BIT:
+ return "VK_SUBGROUP_FEATURE_VOTE_BIT";
+ case VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV:
+ return "VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV";
+ case VK_SUBGROUP_FEATURE_BASIC_BIT:
+ return "VK_SUBGROUP_FEATURE_BASIC_BIT";
+ default:
+ return "Unhandled VkSubgroupFeatureFlagBits";
+ }
+}
+
+static inline const char* string_VkPeerMemoryFeatureFlagBits(VkPeerMemoryFeatureFlagBits input_value)
+{
+ switch ((VkPeerMemoryFeatureFlagBits)input_value)
+ {
+ case VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT:
+ return "VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT";
+ case VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT:
+ return "VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT";
+ case VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT:
+ return "VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT";
+ case VK_PEER_MEMORY_FEATURE_COPY_DST_BIT:
+ return "VK_PEER_MEMORY_FEATURE_COPY_DST_BIT";
+ default:
+ return "Unhandled VkPeerMemoryFeatureFlagBits";
+ }
+}
+
+static inline const char* string_VkMemoryAllocateFlagBits(VkMemoryAllocateFlagBits input_value)
+{
+ switch ((VkMemoryAllocateFlagBits)input_value)
+ {
+ case VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT:
+ return "VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT";
+ default:
+ return "Unhandled VkMemoryAllocateFlagBits";
+ }
+}
+
+static inline const char* string_VkPointClippingBehavior(VkPointClippingBehavior input_value)
+{
+ switch ((VkPointClippingBehavior)input_value)
+ {
+ case VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES:
+ return "VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES";
+ case VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY:
+ return "VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY";
+ default:
+ return "Unhandled VkPointClippingBehavior";
+ }
+}
+
+static inline const char* string_VkTessellationDomainOrigin(VkTessellationDomainOrigin input_value)
+{
+ switch ((VkTessellationDomainOrigin)input_value)
+ {
+ case VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT:
+ return "VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT";
+ case VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT:
+ return "VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT";
+ default:
+ return "Unhandled VkTessellationDomainOrigin";
+ }
+}
+
+static inline const char* string_VkSamplerYcbcrModelConversion(VkSamplerYcbcrModelConversion input_value)
+{
+ switch ((VkSamplerYcbcrModelConversion)input_value)
+ {
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601";
+ default:
+ return "Unhandled VkSamplerYcbcrModelConversion";
+ }
+}
+
+static inline const char* string_VkSamplerYcbcrRange(VkSamplerYcbcrRange input_value)
+{
+ switch ((VkSamplerYcbcrRange)input_value)
+ {
+ case VK_SAMPLER_YCBCR_RANGE_ITU_FULL:
+ return "VK_SAMPLER_YCBCR_RANGE_ITU_FULL";
+ case VK_SAMPLER_YCBCR_RANGE_ITU_NARROW:
+ return "VK_SAMPLER_YCBCR_RANGE_ITU_NARROW";
+ default:
+ return "Unhandled VkSamplerYcbcrRange";
+ }
+}
+
+static inline const char* string_VkChromaLocation(VkChromaLocation input_value)
+{
+ switch ((VkChromaLocation)input_value)
+ {
+ case VK_CHROMA_LOCATION_COSITED_EVEN:
+ return "VK_CHROMA_LOCATION_COSITED_EVEN";
+ case VK_CHROMA_LOCATION_MIDPOINT:
+ return "VK_CHROMA_LOCATION_MIDPOINT";
+ default:
+ return "Unhandled VkChromaLocation";
+ }
+}
+
+static inline const char* string_VkDescriptorUpdateTemplateType(VkDescriptorUpdateTemplateType input_value)
+{
+ switch ((VkDescriptorUpdateTemplateType)input_value)
+ {
+ case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR:
+ return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR";
+ case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET:
+ return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET";
+ default:
+ return "Unhandled VkDescriptorUpdateTemplateType";
+ }
+}
+
+static inline const char* string_VkExternalMemoryHandleTypeFlagBits(VkExternalMemoryHandleTypeFlagBits input_value)
+{
+ switch ((VkExternalMemoryHandleTypeFlagBits)input_value)
+ {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT";
+ default:
+ return "Unhandled VkExternalMemoryHandleTypeFlagBits";
+ }
+}
+
+static inline const char* string_VkExternalMemoryFeatureFlagBits(VkExternalMemoryFeatureFlagBits input_value)
+{
+ switch ((VkExternalMemoryFeatureFlagBits)input_value)
+ {
+ case VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT:
+ return "VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT";
+ case VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT:
+ return "VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT";
+ case VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT:
+ return "VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT";
+ default:
+ return "Unhandled VkExternalMemoryFeatureFlagBits";
+ }
+}
+
+static inline const char* string_VkExternalFenceHandleTypeFlagBits(VkExternalFenceHandleTypeFlagBits input_value)
+{
+ switch ((VkExternalFenceHandleTypeFlagBits)input_value)
+ {
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT";
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT";
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT";
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT";
+ default:
+ return "Unhandled VkExternalFenceHandleTypeFlagBits";
+ }
+}
+
+static inline const char* string_VkExternalFenceFeatureFlagBits(VkExternalFenceFeatureFlagBits input_value)
+{
+ switch ((VkExternalFenceFeatureFlagBits)input_value)
+ {
+ case VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT:
+ return "VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT";
+ case VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT:
+ return "VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT";
+ default:
+ return "Unhandled VkExternalFenceFeatureFlagBits";
+ }
+}
+
+static inline const char* string_VkFenceImportFlagBits(VkFenceImportFlagBits input_value)
+{
+ switch ((VkFenceImportFlagBits)input_value)
+ {
+ case VK_FENCE_IMPORT_TEMPORARY_BIT:
+ return "VK_FENCE_IMPORT_TEMPORARY_BIT";
+ default:
+ return "Unhandled VkFenceImportFlagBits";
+ }
+}
+
+static inline const char* string_VkSemaphoreImportFlagBits(VkSemaphoreImportFlagBits input_value)
+{
+ switch ((VkSemaphoreImportFlagBits)input_value)
+ {
+ case VK_SEMAPHORE_IMPORT_TEMPORARY_BIT:
+ return "VK_SEMAPHORE_IMPORT_TEMPORARY_BIT";
+ default:
+ return "Unhandled VkSemaphoreImportFlagBits";
+ }
+}
+
+static inline const char* string_VkExternalSemaphoreHandleTypeFlagBits(VkExternalSemaphoreHandleTypeFlagBits input_value)
+{
+ switch ((VkExternalSemaphoreHandleTypeFlagBits)input_value)
+ {
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT";
+ default:
+ return "Unhandled VkExternalSemaphoreHandleTypeFlagBits";
+ }
+}
+
+static inline const char* string_VkExternalSemaphoreFeatureFlagBits(VkExternalSemaphoreFeatureFlagBits input_value)
+{
+ switch ((VkExternalSemaphoreFeatureFlagBits)input_value)
+ {
+ case VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT";
+ case VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT";
+ default:
+ return "Unhandled VkExternalSemaphoreFeatureFlagBits";
+ }
+}
+
+static inline const char* string_VkSurfaceTransformFlagBitsKHR(VkSurfaceTransformFlagBitsKHR input_value)
+{
+ switch ((VkSurfaceTransformFlagBitsKHR)input_value)
+ {
+ case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR";
+ case VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR:
+ return "VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR";
+ default:
+ return "Unhandled VkSurfaceTransformFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkCompositeAlphaFlagBitsKHR(VkCompositeAlphaFlagBitsKHR input_value)
+{
+ switch ((VkCompositeAlphaFlagBitsKHR)input_value)
+ {
+ case VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR:
+ return "VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR";
+ case VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR:
+ return "VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR";
+ case VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR:
+ return "VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR";
+ case VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR:
+ return "VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR";
+ default:
+ return "Unhandled VkCompositeAlphaFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkColorSpaceKHR(VkColorSpaceKHR input_value)
+{
+ switch ((VkColorSpaceKHR)input_value)
+ {
+ case VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT:
+ return "VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_DCI_P3_LINEAR_EXT:
+ return "VK_COLOR_SPACE_DCI_P3_LINEAR_EXT";
+ case VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT:
+ return "VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_BT709_NONLINEAR_EXT:
+ return "VK_COLOR_SPACE_BT709_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT:
+ return "VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT:
+ return "VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_HDR10_HLG_EXT:
+ return "VK_COLOR_SPACE_HDR10_HLG_EXT";
+ case VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT:
+ return "VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT";
+ case VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT:
+ return "VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT";
+ case VK_COLOR_SPACE_PASS_THROUGH_EXT:
+ return "VK_COLOR_SPACE_PASS_THROUGH_EXT";
+ case VK_COLOR_SPACE_HDR10_ST2084_EXT:
+ return "VK_COLOR_SPACE_HDR10_ST2084_EXT";
+ case VK_COLOR_SPACE_SRGB_NONLINEAR_KHR:
+ return "VK_COLOR_SPACE_SRGB_NONLINEAR_KHR";
+ case VK_COLOR_SPACE_BT2020_LINEAR_EXT:
+ return "VK_COLOR_SPACE_BT2020_LINEAR_EXT";
+ case VK_COLOR_SPACE_BT709_LINEAR_EXT:
+ return "VK_COLOR_SPACE_BT709_LINEAR_EXT";
+ case VK_COLOR_SPACE_DOLBYVISION_EXT:
+ return "VK_COLOR_SPACE_DOLBYVISION_EXT";
+ default:
+ return "Unhandled VkColorSpaceKHR";
+ }
+}
+
+static inline const char* string_VkPresentModeKHR(VkPresentModeKHR input_value)
+{
+ switch ((VkPresentModeKHR)input_value)
+ {
+ case VK_PRESENT_MODE_FIFO_KHR:
+ return "VK_PRESENT_MODE_FIFO_KHR";
+ case VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR:
+ return "VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR";
+ case VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR:
+ return "VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR";
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ return "VK_PRESENT_MODE_MAILBOX_KHR";
+ case VK_PRESENT_MODE_IMMEDIATE_KHR:
+ return "VK_PRESENT_MODE_IMMEDIATE_KHR";
+ case VK_PRESENT_MODE_FIFO_RELAXED_KHR:
+ return "VK_PRESENT_MODE_FIFO_RELAXED_KHR";
+ default:
+ return "Unhandled VkPresentModeKHR";
+ }
+}
+
+static inline const char* string_VkSwapchainCreateFlagBitsKHR(VkSwapchainCreateFlagBitsKHR input_value)
+{
+ switch ((VkSwapchainCreateFlagBitsKHR)input_value)
+ {
+ case VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR:
+ return "VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR";
+ case VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR:
+ return "VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR";
+ default:
+ return "Unhandled VkSwapchainCreateFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkDeviceGroupPresentModeFlagBitsKHR(VkDeviceGroupPresentModeFlagBitsKHR input_value)
+{
+ switch ((VkDeviceGroupPresentModeFlagBitsKHR)input_value)
+ {
+ case VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR:
+ return "VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR";
+ case VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR:
+ return "VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR";
+ case VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR:
+ return "VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR";
+ case VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR:
+ return "VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR";
+ default:
+ return "Unhandled VkDeviceGroupPresentModeFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkDisplayPlaneAlphaFlagBitsKHR(VkDisplayPlaneAlphaFlagBitsKHR input_value)
+{
+ switch ((VkDisplayPlaneAlphaFlagBitsKHR)input_value)
+ {
+ case VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR:
+ return "VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR";
+ case VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR:
+ return "VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR";
+ case VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR:
+ return "VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR";
+ case VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR:
+ return "VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR";
+ default:
+ return "Unhandled VkDisplayPlaneAlphaFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkPeerMemoryFeatureFlagBitsKHR(VkPeerMemoryFeatureFlagBitsKHR input_value)
+{
+ switch ((VkPeerMemoryFeatureFlagBitsKHR)input_value)
+ {
+ case VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT:
+ return "VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT";
+ case VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT:
+ return "VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT";
+ case VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT:
+ return "VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT";
+ case VK_PEER_MEMORY_FEATURE_COPY_DST_BIT:
+ return "VK_PEER_MEMORY_FEATURE_COPY_DST_BIT";
+ default:
+ return "Unhandled VkPeerMemoryFeatureFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkMemoryAllocateFlagBitsKHR(VkMemoryAllocateFlagBitsKHR input_value)
+{
+ switch ((VkMemoryAllocateFlagBitsKHR)input_value)
+ {
+ case VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT:
+ return "VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT";
+ default:
+ return "Unhandled VkMemoryAllocateFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkExternalMemoryHandleTypeFlagBitsKHR(VkExternalMemoryHandleTypeFlagBitsKHR input_value)
+{
+ switch ((VkExternalMemoryHandleTypeFlagBitsKHR)input_value)
+ {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT";
+ default:
+ return "Unhandled VkExternalMemoryHandleTypeFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkExternalMemoryFeatureFlagBitsKHR(VkExternalMemoryFeatureFlagBitsKHR input_value)
+{
+ switch ((VkExternalMemoryFeatureFlagBitsKHR)input_value)
+ {
+ case VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT:
+ return "VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT";
+ case VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT:
+ return "VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT";
+ case VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT:
+ return "VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT";
+ default:
+ return "Unhandled VkExternalMemoryFeatureFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkExternalSemaphoreHandleTypeFlagBitsKHR(VkExternalSemaphoreHandleTypeFlagBitsKHR input_value)
+{
+ switch ((VkExternalSemaphoreHandleTypeFlagBitsKHR)input_value)
+ {
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT";
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT";
+ default:
+ return "Unhandled VkExternalSemaphoreHandleTypeFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkExternalSemaphoreFeatureFlagBitsKHR(VkExternalSemaphoreFeatureFlagBitsKHR input_value)
+{
+ switch ((VkExternalSemaphoreFeatureFlagBitsKHR)input_value)
+ {
+ case VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT";
+ case VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT:
+ return "VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT";
+ default:
+ return "Unhandled VkExternalSemaphoreFeatureFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkSemaphoreImportFlagBitsKHR(VkSemaphoreImportFlagBitsKHR input_value)
+{
+ switch ((VkSemaphoreImportFlagBitsKHR)input_value)
+ {
+ case VK_SEMAPHORE_IMPORT_TEMPORARY_BIT:
+ return "VK_SEMAPHORE_IMPORT_TEMPORARY_BIT";
+ default:
+ return "Unhandled VkSemaphoreImportFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkDescriptorUpdateTemplateTypeKHR(VkDescriptorUpdateTemplateTypeKHR input_value)
+{
+ switch ((VkDescriptorUpdateTemplateTypeKHR)input_value)
+ {
+ case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR:
+ return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR";
+ case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET:
+ return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET";
+ default:
+ return "Unhandled VkDescriptorUpdateTemplateTypeKHR";
+ }
+}
+
+static inline const char* string_VkExternalFenceHandleTypeFlagBitsKHR(VkExternalFenceHandleTypeFlagBitsKHR input_value)
+{
+ switch ((VkExternalFenceHandleTypeFlagBitsKHR)input_value)
+ {
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT";
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT";
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT";
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT:
+ return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT";
+ default:
+ return "Unhandled VkExternalFenceHandleTypeFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkExternalFenceFeatureFlagBitsKHR(VkExternalFenceFeatureFlagBitsKHR input_value)
+{
+ switch ((VkExternalFenceFeatureFlagBitsKHR)input_value)
+ {
+ case VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT:
+ return "VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT";
+ case VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT:
+ return "VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT";
+ default:
+ return "Unhandled VkExternalFenceFeatureFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkFenceImportFlagBitsKHR(VkFenceImportFlagBitsKHR input_value)
+{
+ switch ((VkFenceImportFlagBitsKHR)input_value)
+ {
+ case VK_FENCE_IMPORT_TEMPORARY_BIT:
+ return "VK_FENCE_IMPORT_TEMPORARY_BIT";
+ default:
+ return "Unhandled VkFenceImportFlagBitsKHR";
+ }
+}
+
+static inline const char* string_VkPointClippingBehaviorKHR(VkPointClippingBehaviorKHR input_value)
+{
+ switch ((VkPointClippingBehaviorKHR)input_value)
+ {
+ case VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES:
+ return "VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES";
+ case VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY:
+ return "VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY";
+ default:
+ return "Unhandled VkPointClippingBehaviorKHR";
+ }
+}
+
+static inline const char* string_VkTessellationDomainOriginKHR(VkTessellationDomainOriginKHR input_value)
+{
+ switch ((VkTessellationDomainOriginKHR)input_value)
+ {
+ case VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT:
+ return "VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT";
+ case VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT:
+ return "VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT";
+ default:
+ return "Unhandled VkTessellationDomainOriginKHR";
+ }
+}
+
+static inline const char* string_VkSamplerYcbcrModelConversionKHR(VkSamplerYcbcrModelConversionKHR input_value)
+{
+ switch ((VkSamplerYcbcrModelConversionKHR)input_value)
+ {
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709";
+ case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601:
+ return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601";
+ default:
+ return "Unhandled VkSamplerYcbcrModelConversionKHR";
+ }
+}
+
+static inline const char* string_VkSamplerYcbcrRangeKHR(VkSamplerYcbcrRangeKHR input_value)
+{
+ switch ((VkSamplerYcbcrRangeKHR)input_value)
+ {
+ case VK_SAMPLER_YCBCR_RANGE_ITU_FULL:
+ return "VK_SAMPLER_YCBCR_RANGE_ITU_FULL";
+ case VK_SAMPLER_YCBCR_RANGE_ITU_NARROW:
+ return "VK_SAMPLER_YCBCR_RANGE_ITU_NARROW";
+ default:
+ return "Unhandled VkSamplerYcbcrRangeKHR";
+ }
+}
+
+static inline const char* string_VkChromaLocationKHR(VkChromaLocationKHR input_value)
+{
+ switch ((VkChromaLocationKHR)input_value)
+ {
+ case VK_CHROMA_LOCATION_COSITED_EVEN:
+ return "VK_CHROMA_LOCATION_COSITED_EVEN";
+ case VK_CHROMA_LOCATION_MIDPOINT:
+ return "VK_CHROMA_LOCATION_MIDPOINT";
+ default:
+ return "Unhandled VkChromaLocationKHR";
+ }
+}
+
+static inline const char* string_VkDebugReportObjectTypeEXT(VkDebugReportObjectTypeEXT input_value)
+{
+ switch ((VkDebugReportObjectTypeEXT)input_value)
+ {
+ case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT";
+ case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT:
+ return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT";
+ default:
+ return "Unhandled VkDebugReportObjectTypeEXT";
+ }
+}
+
+static inline const char* string_VkDebugReportFlagBitsEXT(VkDebugReportFlagBitsEXT input_value)
+{
+ switch ((VkDebugReportFlagBitsEXT)input_value)
+ {
+ case VK_DEBUG_REPORT_DEBUG_BIT_EXT:
+ return "VK_DEBUG_REPORT_DEBUG_BIT_EXT";
+ case VK_DEBUG_REPORT_ERROR_BIT_EXT:
+ return "VK_DEBUG_REPORT_ERROR_BIT_EXT";
+ case VK_DEBUG_REPORT_INFORMATION_BIT_EXT:
+ return "VK_DEBUG_REPORT_INFORMATION_BIT_EXT";
+ case VK_DEBUG_REPORT_WARNING_BIT_EXT:
+ return "VK_DEBUG_REPORT_WARNING_BIT_EXT";
+ case VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT:
+ return "VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT";
+ default:
+ return "Unhandled VkDebugReportFlagBitsEXT";
+ }
+}
+
+static inline const char* string_VkRasterizationOrderAMD(VkRasterizationOrderAMD input_value)
+{
+ switch ((VkRasterizationOrderAMD)input_value)
+ {
+ case VK_RASTERIZATION_ORDER_STRICT_AMD:
+ return "VK_RASTERIZATION_ORDER_STRICT_AMD";
+ case VK_RASTERIZATION_ORDER_RELAXED_AMD:
+ return "VK_RASTERIZATION_ORDER_RELAXED_AMD";
+ default:
+ return "Unhandled VkRasterizationOrderAMD";
+ }
+}
+
+static inline const char* string_VkShaderInfoTypeAMD(VkShaderInfoTypeAMD input_value)
+{
+ switch ((VkShaderInfoTypeAMD)input_value)
+ {
+ case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
+ return "VK_SHADER_INFO_TYPE_STATISTICS_AMD";
+ case VK_SHADER_INFO_TYPE_BINARY_AMD:
+ return "VK_SHADER_INFO_TYPE_BINARY_AMD";
+ case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
+ return "VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD";
+ default:
+ return "Unhandled VkShaderInfoTypeAMD";
+ }
+}
+
+static inline const char* string_VkExternalMemoryHandleTypeFlagBitsNV(VkExternalMemoryHandleTypeFlagBitsNV input_value)
+{
+ switch ((VkExternalMemoryHandleTypeFlagBitsNV)input_value)
+ {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV";
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV:
+ return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV";
+ default:
+ return "Unhandled VkExternalMemoryHandleTypeFlagBitsNV";
+ }
+}
+
+static inline const char* string_VkExternalMemoryFeatureFlagBitsNV(VkExternalMemoryFeatureFlagBitsNV input_value)
+{
+ switch ((VkExternalMemoryFeatureFlagBitsNV)input_value)
+ {
+ case VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV:
+ return "VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV";
+ case VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV:
+ return "VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV";
+ case VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV:
+ return "VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV";
+ default:
+ return "Unhandled VkExternalMemoryFeatureFlagBitsNV";
+ }
+}
+
+static inline const char* string_VkValidationCheckEXT(VkValidationCheckEXT input_value)
+{
+ switch ((VkValidationCheckEXT)input_value)
+ {
+ case VK_VALIDATION_CHECK_SHADERS_EXT:
+ return "VK_VALIDATION_CHECK_SHADERS_EXT";
+ case VK_VALIDATION_CHECK_ALL_EXT:
+ return "VK_VALIDATION_CHECK_ALL_EXT";
+ default:
+ return "Unhandled VkValidationCheckEXT";
+ }
+}
+
+static inline const char* string_VkIndirectCommandsLayoutUsageFlagBitsNVX(VkIndirectCommandsLayoutUsageFlagBitsNVX input_value)
+{
+ switch ((VkIndirectCommandsLayoutUsageFlagBitsNVX)input_value)
+ {
+ case VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX:
+ return "VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX";
+ case VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX:
+ return "VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX";
+ case VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX:
+ return "VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX";
+ case VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX:
+ return "VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX";
+ default:
+ return "Unhandled VkIndirectCommandsLayoutUsageFlagBitsNVX";
+ }
+}
+
+static inline const char* string_VkObjectEntryUsageFlagBitsNVX(VkObjectEntryUsageFlagBitsNVX input_value)
+{
+ switch ((VkObjectEntryUsageFlagBitsNVX)input_value)
+ {
+ case VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX:
+ return "VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX";
+ case VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX:
+ return "VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX";
+ default:
+ return "Unhandled VkObjectEntryUsageFlagBitsNVX";
+ }
+}
+
+static inline const char* string_VkIndirectCommandsTokenTypeNVX(VkIndirectCommandsTokenTypeNVX input_value)
+{
+ switch ((VkIndirectCommandsTokenTypeNVX)input_value)
+ {
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX";
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX";
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX";
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX";
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX";
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX";
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX";
+ case VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX:
+ return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX";
+ default:
+ return "Unhandled VkIndirectCommandsTokenTypeNVX";
+ }
+}
+
+static inline const char* string_VkObjectEntryTypeNVX(VkObjectEntryTypeNVX input_value)
+{
+ switch ((VkObjectEntryTypeNVX)input_value)
+ {
+ case VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX:
+ return "VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX";
+ case VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX:
+ return "VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX";
+ case VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX:
+ return "VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX";
+ case VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX:
+ return "VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX";
+ case VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX:
+ return "VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX";
+ default:
+ return "Unhandled VkObjectEntryTypeNVX";
+ }
+}
+
+static inline const char* string_VkSurfaceCounterFlagBitsEXT(VkSurfaceCounterFlagBitsEXT input_value)
+{
+ switch ((VkSurfaceCounterFlagBitsEXT)input_value)
+ {
+ case VK_SURFACE_COUNTER_VBLANK_EXT:
+ return "VK_SURFACE_COUNTER_VBLANK_EXT";
+ default:
+ return "Unhandled VkSurfaceCounterFlagBitsEXT";
+ }
+}
+
+static inline const char* string_VkDisplayPowerStateEXT(VkDisplayPowerStateEXT input_value)
+{
+ switch ((VkDisplayPowerStateEXT)input_value)
+ {
+ case VK_DISPLAY_POWER_STATE_SUSPEND_EXT:
+ return "VK_DISPLAY_POWER_STATE_SUSPEND_EXT";
+ case VK_DISPLAY_POWER_STATE_ON_EXT:
+ return "VK_DISPLAY_POWER_STATE_ON_EXT";
+ case VK_DISPLAY_POWER_STATE_OFF_EXT:
+ return "VK_DISPLAY_POWER_STATE_OFF_EXT";
+ default:
+ return "Unhandled VkDisplayPowerStateEXT";
+ }
+}
+
+static inline const char* string_VkDeviceEventTypeEXT(VkDeviceEventTypeEXT input_value)
+{
+ switch ((VkDeviceEventTypeEXT)input_value)
+ {
+ case VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT:
+ return "VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT";
+ default:
+ return "Unhandled VkDeviceEventTypeEXT";
+ }
+}
+
+static inline const char* string_VkDisplayEventTypeEXT(VkDisplayEventTypeEXT input_value)
+{
+ switch ((VkDisplayEventTypeEXT)input_value)
+ {
+ case VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT:
+ return "VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT";
+ default:
+ return "Unhandled VkDisplayEventTypeEXT";
+ }
+}
+
+static inline const char* string_VkViewportCoordinateSwizzleNV(VkViewportCoordinateSwizzleNV input_value)
+{
+ switch ((VkViewportCoordinateSwizzleNV)input_value)
+ {
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV";
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV";
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV";
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV";
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV";
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV";
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV";
+ case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV:
+ return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV";
+ default:
+ return "Unhandled VkViewportCoordinateSwizzleNV";
+ }
+}
+
+static inline const char* string_VkDiscardRectangleModeEXT(VkDiscardRectangleModeEXT input_value)
+{
+ switch ((VkDiscardRectangleModeEXT)input_value)
+ {
+ case VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT:
+ return "VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT";
+ case VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT:
+ return "VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT";
+ default:
+ return "Unhandled VkDiscardRectangleModeEXT";
+ }
+}
+
+static inline const char* string_VkConservativeRasterizationModeEXT(VkConservativeRasterizationModeEXT input_value)
+{
+ switch ((VkConservativeRasterizationModeEXT)input_value)
+ {
+ case VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT:
+ return "VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT";
+ case VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT:
+ return "VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT";
+ case VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT:
+ return "VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT";
+ default:
+ return "Unhandled VkConservativeRasterizationModeEXT";
+ }
+}
+
+static inline const char* string_VkDebugUtilsMessageSeverityFlagBitsEXT(VkDebugUtilsMessageSeverityFlagBitsEXT input_value)
+{
+ switch ((VkDebugUtilsMessageSeverityFlagBitsEXT)input_value)
+ {
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT";
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT";
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT";
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:
+ return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT";
+ default:
+ return "Unhandled VkDebugUtilsMessageSeverityFlagBitsEXT";
+ }
+}
+
+static inline const char* string_VkDebugUtilsMessageTypeFlagBitsEXT(VkDebugUtilsMessageTypeFlagBitsEXT input_value)
+{
+ switch ((VkDebugUtilsMessageTypeFlagBitsEXT)input_value)
+ {
+ case VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT:
+ return "VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT";
+ case VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT:
+ return "VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT";
+ case VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT:
+ return "VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT";
+ default:
+ return "Unhandled VkDebugUtilsMessageTypeFlagBitsEXT";
+ }
+}
+
+static inline const char* string_VkSamplerReductionModeEXT(VkSamplerReductionModeEXT input_value)
+{
+ switch ((VkSamplerReductionModeEXT)input_value)
+ {
+ case VK_SAMPLER_REDUCTION_MODE_MAX_EXT:
+ return "VK_SAMPLER_REDUCTION_MODE_MAX_EXT";
+ case VK_SAMPLER_REDUCTION_MODE_MIN_EXT:
+ return "VK_SAMPLER_REDUCTION_MODE_MIN_EXT";
+ case VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT:
+ return "VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT";
+ default:
+ return "Unhandled VkSamplerReductionModeEXT";
+ }
+}
+
+static inline const char* string_VkBlendOverlapEXT(VkBlendOverlapEXT input_value)
+{
+ switch ((VkBlendOverlapEXT)input_value)
+ {
+ case VK_BLEND_OVERLAP_DISJOINT_EXT:
+ return "VK_BLEND_OVERLAP_DISJOINT_EXT";
+ case VK_BLEND_OVERLAP_UNCORRELATED_EXT:
+ return "VK_BLEND_OVERLAP_UNCORRELATED_EXT";
+ case VK_BLEND_OVERLAP_CONJOINT_EXT:
+ return "VK_BLEND_OVERLAP_CONJOINT_EXT";
+ default:
+ return "Unhandled VkBlendOverlapEXT";
+ }
+}
+
+static inline const char* string_VkCoverageModulationModeNV(VkCoverageModulationModeNV input_value)
+{
+ switch ((VkCoverageModulationModeNV)input_value)
+ {
+ case VK_COVERAGE_MODULATION_MODE_RGBA_NV:
+ return "VK_COVERAGE_MODULATION_MODE_RGBA_NV";
+ case VK_COVERAGE_MODULATION_MODE_ALPHA_NV:
+ return "VK_COVERAGE_MODULATION_MODE_ALPHA_NV";
+ case VK_COVERAGE_MODULATION_MODE_RGB_NV:
+ return "VK_COVERAGE_MODULATION_MODE_RGB_NV";
+ case VK_COVERAGE_MODULATION_MODE_NONE_NV:
+ return "VK_COVERAGE_MODULATION_MODE_NONE_NV";
+ default:
+ return "Unhandled VkCoverageModulationModeNV";
+ }
+}
+
+static inline const char* string_VkValidationCacheHeaderVersionEXT(VkValidationCacheHeaderVersionEXT input_value)
+{
+ switch ((VkValidationCacheHeaderVersionEXT)input_value)
+ {
+ case VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT:
+ return "VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT";
+ default:
+ return "Unhandled VkValidationCacheHeaderVersionEXT";
+ }
+}
+
+static inline const char* string_VkDescriptorBindingFlagBitsEXT(VkDescriptorBindingFlagBitsEXT input_value)
+{
+ switch ((VkDescriptorBindingFlagBitsEXT)input_value)
+ {
+ case VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT:
+ return "VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT";
+ case VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT:
+ return "VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT";
+ case VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT:
+ return "VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT";
+ case VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT:
+ return "VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT";
+ default:
+ return "Unhandled VkDescriptorBindingFlagBitsEXT";
+ }
+}
+
+static inline const char* string_VkQueueGlobalPriorityEXT(VkQueueGlobalPriorityEXT input_value)
+{
+ switch ((VkQueueGlobalPriorityEXT)input_value)
+ {
+ case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
+ return "VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT";
+ case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
+ return "VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT";
+ case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
+ return "VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT";
+ case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
+ return "VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT";
+ default:
+ return "Unhandled VkQueueGlobalPriorityEXT";
+ }
+}
+
+static inline const char * GetPhysDevFeatureString(uint32_t index) {
+ const char * IndexToPhysDevFeatureString[] = {
+ "robustBufferAccess",
+ "fullDrawIndexUint32",
+ "imageCubeArray",
+ "independentBlend",
+ "geometryShader",
+ "tessellationShader",
+ "sampleRateShading",
+ "dualSrcBlend",
+ "logicOp",
+ "multiDrawIndirect",
+ "drawIndirectFirstInstance",
+ "depthClamp",
+ "depthBiasClamp",
+ "fillModeNonSolid",
+ "depthBounds",
+ "wideLines",
+ "largePoints",
+ "alphaToOne",
+ "multiViewport",
+ "samplerAnisotropy",
+ "textureCompressionETC2",
+ "textureCompressionASTC_LDR",
+ "textureCompressionBC",
+ "occlusionQueryPrecise",
+ "pipelineStatisticsQuery",
+ "vertexPipelineStoresAndAtomics",
+ "fragmentStoresAndAtomics",
+ "shaderTessellationAndGeometryPointSize",
+ "shaderImageGatherExtended",
+ "shaderStorageImageExtendedFormats",
+ "shaderStorageImageMultisample",
+ "shaderStorageImageReadWithoutFormat",
+ "shaderStorageImageWriteWithoutFormat",
+ "shaderUniformBufferArrayDynamicIndexing",
+ "shaderSampledImageArrayDynamicIndexing",
+ "shaderStorageBufferArrayDynamicIndexing",
+ "shaderStorageImageArrayDynamicIndexing",
+ "shaderClipDistance",
+ "shaderCullDistance",
+ "shaderFloat64",
+ "shaderInt64",
+ "shaderInt16",
+ "shaderResourceResidency",
+ "shaderResourceMinLod",
+ "sparseBinding",
+ "sparseResidencyBuffer",
+ "sparseResidencyImage2D",
+ "sparseResidencyImage3D",
+ "sparseResidency2Samples",
+ "sparseResidency4Samples",
+ "sparseResidency8Samples",
+ "sparseResidency16Samples",
+ "sparseResidencyAliased",
+ "variableMultisampleRate",
+ "inheritedQueries",
+ };
+
+ return IndexToPhysDevFeatureString[index];
+}
diff --git a/drivers/vulkan/vk_mem_alloc.cpp b/drivers/vulkan/vk_mem_alloc.cpp
new file mode 100644
index 0000000000..a2023d33b2
--- /dev/null
+++ b/drivers/vulkan/vk_mem_alloc.cpp
@@ -0,0 +1,2 @@
+#define VMA_IMPLEMENTATION
+#include "vk_mem_alloc.h"
diff --git a/drivers/vulkan/vk_mem_alloc.h b/drivers/vulkan/vk_mem_alloc.h
new file mode 100644
index 0000000000..862ea312a6
--- /dev/null
+++ b/drivers/vulkan/vk_mem_alloc.h
@@ -0,0 +1,15448 @@
+//
+// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
+#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
+#define AMD_VULKAN_MEMORY_ALLOCATOR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** \mainpage Vulkan Memory Allocator
+
+<b>Version 2.3.0-development</b> (2019-03-05)
+
+Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved. \n
+License: MIT
+
+Documentation of all members: vk_mem_alloc.h
+
+\section main_table_of_contents Table of contents
+
+- <b>User guide</b>
+ - \subpage quick_start
+ - [Project setup](@ref quick_start_project_setup)
+ - [Initialization](@ref quick_start_initialization)
+ - [Resource allocation](@ref quick_start_resource_allocation)
+ - \subpage choosing_memory_type
+ - [Usage](@ref choosing_memory_type_usage)
+ - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
+ - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
+ - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
+ - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
+ - \subpage memory_mapping
+ - [Mapping functions](@ref memory_mapping_mapping_functions)
+ - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
+ - [Cache control](@ref memory_mapping_cache_control)
+ - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable)
+ - \subpage custom_memory_pools
+ - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
+ - [Linear allocation algorithm](@ref linear_algorithm)
+ - [Free-at-once](@ref linear_algorithm_free_at_once)
+ - [Stack](@ref linear_algorithm_stack)
+ - [Double stack](@ref linear_algorithm_double_stack)
+ - [Ring buffer](@ref linear_algorithm_ring_buffer)
+ - [Buddy allocation algorithm](@ref buddy_algorithm)
+ - \subpage defragmentation
+ - [Defragmenting CPU memory](@ref defragmentation_cpu)
+ - [Defragmenting GPU memory](@ref defragmentation_gpu)
+ - [Additional notes](@ref defragmentation_additional_notes)
+ - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm)
+ - \subpage lost_allocations
+ - \subpage statistics
+ - [Numeric statistics](@ref statistics_numeric_statistics)
+ - [JSON dump](@ref statistics_json_dump)
+ - \subpage allocation_annotation
+ - [Allocation user data](@ref allocation_user_data)
+ - [Allocation names](@ref allocation_names)
+ - \subpage debugging_memory_usage
+ - [Memory initialization](@ref debugging_memory_usage_initialization)
+ - [Margins](@ref debugging_memory_usage_margins)
+ - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
+ - \subpage record_and_replay
+- \subpage usage_patterns
+ - [Simple patterns](@ref usage_patterns_simple)
+ - [Advanced patterns](@ref usage_patterns_advanced)
+- \subpage configuration
+ - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
+ - [Custom host memory allocator](@ref custom_memory_allocator)
+ - [Device memory allocation callbacks](@ref allocation_callbacks)
+ - [Device heap memory limit](@ref heap_memory_limit)
+ - \subpage vk_khr_dedicated_allocation
+- \subpage general_considerations
+ - [Thread safety](@ref general_considerations_thread_safety)
+ - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
+ - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
+ - [Features not supported](@ref general_considerations_features_not_supported)
+
+\section main_see_also See also
+
+- [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
+- [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
+
+
+
+
+\page quick_start Quick start
+
+\section quick_start_project_setup Project setup
+
+Vulkan Memory Allocator comes in form of a "stb-style" single header file.
+You don't need to build it as a separate library project.
+You can add this file directly to your project and submit it to code repository next to your other source files.
+
+"Single header" doesn't mean that everything is contained in C/C++ declarations,
+like it tends to be in case of inline functions or C++ templates.
+It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
+If you don't do it properly, you will get linker errors.
+
+To do it properly:
+
+-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
+ This includes declarations of all members of the library.
+-# In exacly one CPP file define following macro before this include.
+ It enables also internal definitions.
+
+\code
+#define VMA_IMPLEMENTATION
+#include "vk_mem_alloc.h"
+\endcode
+
+It may be a good idea to create dedicated CPP file just for this purpose.
+
+Note on language: This library is written in C++, but has C-compatible interface.
+Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
+implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
+
+Please note that this library includes header `<vulkan/vulkan.h>`, which in turn
+includes `<windows.h>` on Windows. If you need some specific macros defined
+before including these headers (like `WIN32_LEAN_AND_MEAN` or
+`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
+them before every `#include` of this library.
+
+
+\section quick_start_initialization Initialization
+
+At program startup:
+
+-# Initialize Vulkan to have `VkPhysicalDevice` and `VkDevice` object.
+-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
+ calling vmaCreateAllocator().
+
+\code
+VmaAllocatorCreateInfo allocatorInfo = {};
+allocatorInfo.physicalDevice = physicalDevice;
+allocatorInfo.device = device;
+
+VmaAllocator allocator;
+vmaCreateAllocator(&allocatorInfo, &allocator);
+\endcode
+
+\section quick_start_resource_allocation Resource allocation
+
+When you want to create a buffer or image:
+
+-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
+-# Fill VmaAllocationCreateInfo structure.
+-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
+ already allocated and bound to it.
+
+\code
+VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+bufferInfo.size = 65536;
+bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+VmaAllocationCreateInfo allocInfo = {};
+allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+
+VkBuffer buffer;
+VmaAllocation allocation;
+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+\endcode
+
+Don't forget to destroy your objects when no longer needed:
+
+\code
+vmaDestroyBuffer(allocator, buffer, allocation);
+vmaDestroyAllocator(allocator);
+\endcode
+
+
+\page choosing_memory_type Choosing memory type
+
+Physical devices in Vulkan support various combinations of memory heaps and
+types. Help with choosing correct and optimal memory type for your specific
+resource is one of the key features of this library. You can use it by filling
+appropriate members of VmaAllocationCreateInfo structure, as described below.
+You can also combine multiple methods.
+
+-# If you just want to find memory type index that meets your requirements, you
+ can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(),
+ vmaFindMemoryTypeIndexForImageInfo().
+-# If you want to allocate a region of device memory without association with any
+ specific image or buffer, you can use function vmaAllocateMemory(). Usage of
+ this function is not recommended and usually not needed.
+ vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
+ which may be useful for sparse binding.
+-# If you already have a buffer or an image created, you want to allocate memory
+ for it and then you will bind it yourself, you can use function
+ vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
+ For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory().
+-# If you want to create a buffer or an image, allocate memory for it and bind
+ them together, all in one call, you can use function vmaCreateBuffer(),
+ vmaCreateImage(). This is the easiest and recommended way to use this library.
+
+When using 3. or 4., the library internally queries Vulkan for memory types
+supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
+and uses only one of these types.
+
+If no memory type can be found that meets all the requirements, these functions
+return `VK_ERROR_FEATURE_NOT_PRESENT`.
+
+You can leave VmaAllocationCreateInfo structure completely filled with zeros.
+It means no requirements are specified for memory type.
+It is valid, although not very useful.
+
+\section choosing_memory_type_usage Usage
+
+The easiest way to specify memory requirements is to fill member
+VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
+It defines high level, common usage types.
+For more details, see description of this enum.
+
+For example, if you want to create a uniform buffer that will be filled using
+transfer only once or infrequently and used for rendering every frame, you can
+do it using following code:
+
+\code
+VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+bufferInfo.size = 65536;
+bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+VmaAllocationCreateInfo allocInfo = {};
+allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+
+VkBuffer buffer;
+VmaAllocation allocation;
+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+\endcode
+
+\section choosing_memory_type_required_preferred_flags Required and preferred flags
+
+You can specify more detailed requirements by filling members
+VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
+with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
+if you want to create a buffer that will be persistently mapped on host (so it
+must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
+use following code:
+
+\code
+VmaAllocationCreateInfo allocInfo = {};
+allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+allocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
+
+VkBuffer buffer;
+VmaAllocation allocation;
+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+\endcode
+
+A memory type is chosen that has all the required flags and as many preferred
+flags set as possible.
+
+If you use VmaAllocationCreateInfo::usage, it is just internally converted to
+a set of required and preferred flags.
+
+\section choosing_memory_type_explicit_memory_types Explicit memory types
+
+If you inspected memory types available on the physical device and you have
+a preference for memory types that you want to use, you can fill member
+VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
+means that a memory type with that index is allowed to be used for the
+allocation. Special value 0, just like `UINT32_MAX`, means there are no
+restrictions to memory type index.
+
+Please note that this member is NOT just a memory type index.
+Still you can use it to choose just one, specific memory type.
+For example, if you already determined that your buffer should be created in
+memory type 2, use following code:
+
+\code
+uint32_t memoryTypeIndex = 2;
+
+VmaAllocationCreateInfo allocInfo = {};
+allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
+
+VkBuffer buffer;
+VmaAllocation allocation;
+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+\endcode
+
+\section choosing_memory_type_custom_memory_pools Custom memory pools
+
+If you allocate from custom memory pool, all the ways of specifying memory
+requirements described above are not applicable and the aforementioned members
+of VmaAllocationCreateInfo structure are ignored. Memory type is selected
+explicitly when creating the pool and then used to make all the allocations from
+that pool. For further details, see \ref custom_memory_pools.
+
+\section choosing_memory_type_dedicated_allocations Dedicated allocations
+
+Memory for allocations is reserved out of larger block of `VkDeviceMemory`
+allocated from Vulkan internally. That's the main feature of this whole library.
+You can still request a separate memory block to be created for an allocation,
+just like you would do in a trivial solution without using any allocator.
+In that case, a buffer or image is always bound to that memory at offset 0.
+This is called a "dedicated allocation".
+You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+The library can also internally decide to use dedicated allocation in some cases, e.g.:
+
+- When the size of the allocation is large.
+- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
+ and it reports that dedicated allocation is required or recommended for the resource.
+- When allocation of next big memory block fails due to not enough device memory,
+ but allocation with the exact requested size succeeds.
+
+
+\page memory_mapping Memory mapping
+
+To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
+to be able to read from it or write to it in CPU code.
+Mapping is possible only of memory allocated from a memory type that has
+`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
+Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
+You can use them directly with memory allocated by this library,
+but it is not recommended because of following issue:
+Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
+This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
+Because of this, Vulkan Memory Allocator provides following facilities:
+
+\section memory_mapping_mapping_functions Mapping functions
+
+The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
+They are safer and more convenient to use than standard Vulkan functions.
+You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
+You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
+The way it's implemented is that the library always maps entire memory block, not just region of the allocation.
+For further details, see description of vmaMapMemory() function.
+Example:
+
+\code
+// Having these objects initialized:
+
+struct ConstantBuffer
+{
+ ...
+};
+ConstantBuffer constantBufferData;
+
+VmaAllocator allocator;
+VkBuffer constantBuffer;
+VmaAllocation constantBufferAllocation;
+
+// You can map and fill your buffer using following code:
+
+void* mappedData;
+vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
+memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
+vmaUnmapMemory(allocator, constantBufferAllocation);
+\endcode
+
+When mapping, you may see a warning from Vulkan validation layer similar to this one:
+
+<i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
+
+It happens because the library maps entire `VkDeviceMemory` block, where different
+types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
+You can safely ignore it if you are sure you access only memory of the intended
+object that you wanted to map.
+
+
+\section memory_mapping_persistently_mapped_memory Persistently mapped memory
+
+Kepping your memory persistently mapped is generally OK in Vulkan.
+You don't need to unmap it before using its data on the GPU.
+The library provides a special feature designed for that:
+Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
+VmaAllocationCreateInfo::flags stay mapped all the time,
+so you can just access CPU pointer to it any time
+without a need to call any "map" or "unmap" function.
+Example:
+
+\code
+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+bufCreateInfo.size = sizeof(ConstantBuffer);
+bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
+
+VkBuffer buf;
+VmaAllocation alloc;
+VmaAllocationInfo allocInfo;
+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+
+// Buffer is already mapped. You can access its memory.
+memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
+\endcode
+
+There are some exceptions though, when you should consider mapping memory only for a short period of time:
+
+- When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2),
+ device is discrete AMD GPU,
+ and memory type is the special 256 MiB pool of `DEVICE_LOCAL + HOST_VISIBLE` memory
+ (selected when you use #VMA_MEMORY_USAGE_CPU_TO_GPU),
+ then whenever a memory block allocated from this memory type stays mapped
+ for the time of any call to `vkQueueSubmit()` or `vkQueuePresentKHR()`, this
+ block is migrated by WDDM to system RAM, which degrades performance. It doesn't
+ matter if that particular memory block is actually used by the command buffer
+ being submitted.
+- On Mac/MoltenVK there is a known bug - [Issue #175](https://github.com/KhronosGroup/MoltenVK/issues/175)
+ which requires unmapping before GPU can see updated texture.
+- Keeping many large memory blocks mapped may impact performance or stability of some debugging tools.
+
+\section memory_mapping_cache_control Cache control
+
+Memory in Vulkan doesn't need to be unmapped before using it on GPU,
+but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
+you need to manually invalidate cache before reading of mapped pointer
+and flush cache after writing to mapped pointer.
+Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
+`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
+functions that refer to given allocation object: vmaFlushAllocation(),
+vmaInvalidateAllocation().
+
+Regions of memory specified for flush/invalidate must be aligned to
+`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
+In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
+within blocks are aligned to this value, so their offsets are always multiply of
+`nonCoherentAtomSize` and two different allocations never share same "line" of this size.
+
+Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`.
+
+Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
+currently provide `HOST_COHERENT` flag on all memory types that are
+`HOST_VISIBLE`, so on this platform you may not need to bother.
+
+\section memory_mapping_finding_if_memory_mappable Finding out if memory is mappable
+
+It may happen that your allocation ends up in memory that is `HOST_VISIBLE` (available for mapping)
+despite it wasn't explicitly requested.
+For example, application may work on integrated graphics with unified memory (like Intel) or
+allocation from video memory might have failed, so the library chose system memory as fallback.
+
+You can detect this case and map such allocation to access its memory on CPU directly,
+instead of launching a transfer operation.
+In order to do that: inspect `allocInfo.memoryType`, call vmaGetMemoryTypeProperties(),
+and look for `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag in properties of that memory type.
+
+\code
+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+bufCreateInfo.size = sizeof(ConstantBuffer);
+bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+
+VkBuffer buf;
+VmaAllocation alloc;
+VmaAllocationInfo allocInfo;
+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+
+VkMemoryPropertyFlags memFlags;
+vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags);
+if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+{
+ // Allocation ended up in mappable memory. You can map it and access it directly.
+ void* mappedData;
+ vmaMapMemory(allocator, alloc, &mappedData);
+ memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
+ vmaUnmapMemory(allocator, alloc);
+}
+else
+{
+ // Allocation ended up in non-mappable memory.
+ // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer.
+}
+\endcode
+
+You can even use #VMA_ALLOCATION_CREATE_MAPPED_BIT flag while creating allocations
+that are not necessarily `HOST_VISIBLE` (e.g. using #VMA_MEMORY_USAGE_GPU_ONLY).
+If the allocation ends up in memory type that is `HOST_VISIBLE`, it will be persistently mapped and you can use it directly.
+If not, the flag is just ignored.
+Example:
+
+\code
+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+bufCreateInfo.size = sizeof(ConstantBuffer);
+bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
+
+VkBuffer buf;
+VmaAllocation alloc;
+VmaAllocationInfo allocInfo;
+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+
+if(allocInfo.pUserData != nullptr)
+{
+ // Allocation ended up in mappable memory.
+ // It's persistently mapped. You can access it directly.
+ memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
+}
+else
+{
+ // Allocation ended up in non-mappable memory.
+ // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer.
+}
+\endcode
+
+
+\page custom_memory_pools Custom memory pools
+
+A memory pool contains a number of `VkDeviceMemory` blocks.
+The library automatically creates and manages default pool for each memory type available on the device.
+Default memory pool automatically grows in size.
+Size of allocated blocks is also variable and managed automatically.
+
+You can create custom pool and allocate memory out of it.
+It can be useful if you want to:
+
+- Keep certain kind of allocations separate from others.
+- Enforce particular, fixed size of Vulkan memory blocks.
+- Limit maximum amount of Vulkan memory allocated for that pool.
+- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
+
+To use custom memory pools:
+
+-# Fill VmaPoolCreateInfo structure.
+-# Call vmaCreatePool() to obtain #VmaPool handle.
+-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
+ You don't need to specify any other parameters of this structure, like `usage`.
+
+Example:
+
+\code
+// Create a pool that can have at most 2 blocks, 128 MiB each.
+VmaPoolCreateInfo poolCreateInfo = {};
+poolCreateInfo.memoryTypeIndex = ...
+poolCreateInfo.blockSize = 128ull * 1024 * 1024;
+poolCreateInfo.maxBlockCount = 2;
+
+VmaPool pool;
+vmaCreatePool(allocator, &poolCreateInfo, &pool);
+
+// Allocate a buffer out of it.
+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+bufCreateInfo.size = 1024;
+bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.pool = pool;
+
+VkBuffer buf;
+VmaAllocation alloc;
+VmaAllocationInfo allocInfo;
+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+\endcode
+
+You have to free all allocations made from this pool before destroying it.
+
+\code
+vmaDestroyBuffer(allocator, buf, alloc);
+vmaDestroyPool(allocator, pool);
+\endcode
+
+\section custom_memory_pools_MemTypeIndex Choosing memory type index
+
+When creating a pool, you must explicitly specify memory type index.
+To find the one suitable for your buffers or images, you can use helper functions
+vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
+You need to provide structures with example parameters of buffers or images
+that you are going to create in that pool.
+
+\code
+VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+exampleBufCreateInfo.size = 1024; // Whatever.
+exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed.
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed.
+
+uint32_t memTypeIndex;
+vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
+
+VmaPoolCreateInfo poolCreateInfo = {};
+poolCreateInfo.memoryTypeIndex = memTypeIndex;
+// ...
+\endcode
+
+When creating buffers/images allocated in that pool, provide following parameters:
+
+- `VkBufferCreateInfo`: Prefer to pass same parameters as above.
+ Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
+ Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
+ or the other way around.
+- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
+ Other members are ignored anyway.
+
+\section linear_algorithm Linear allocation algorithm
+
+Each Vulkan memory block managed by this library has accompanying metadata that
+keeps track of used and unused regions. By default, the metadata structure and
+algorithm tries to find best place for new allocations among free regions to
+optimize memory usage. This way you can allocate and free objects in any order.
+
+![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
+
+Sometimes there is a need to use simpler, linear allocation algorithm. You can
+create custom pool that uses such algorithm by adding flag
+#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
+#VmaPool object. Then an alternative metadata management is used. It always
+creates new allocations after last one and doesn't reuse free regions after
+allocations freed in the middle. It results in better allocation performance and
+less memory consumed by metadata.
+
+![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
+
+With this one flag, you can create a custom pool that can be used in many ways:
+free-at-once, stack, double stack, and ring buffer. See below for details.
+
+\subsection linear_algorithm_free_at_once Free-at-once
+
+In a pool that uses linear algorithm, you still need to free all the allocations
+individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
+them in any order. New allocations are always made after last one - free space
+in the middle is not reused. However, when you release all the allocation and
+the pool becomes empty, allocation starts from the beginning again. This way you
+can use linear algorithm to speed up creation of allocations that you are going
+to release all at once.
+
+![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
+
+This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
+value that allows multiple memory blocks.
+
+\subsection linear_algorithm_stack Stack
+
+When you free an allocation that was created last, its space can be reused.
+Thanks to this, if you always release allocations in the order opposite to their
+creation (LIFO - Last In First Out), you can achieve behavior of a stack.
+
+![Stack](../gfx/Linear_allocator_4_stack.png)
+
+This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
+value that allows multiple memory blocks.
+
+\subsection linear_algorithm_double_stack Double stack
+
+The space reserved by a custom pool with linear algorithm may be used by two
+stacks:
+
+- First, default one, growing up from offset 0.
+- Second, "upper" one, growing down from the end towards lower offsets.
+
+To make allocation from upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
+to VmaAllocationCreateInfo::flags.
+
+![Double stack](../gfx/Linear_allocator_7_double_stack.png)
+
+Double stack is available only in pools with one memory block -
+VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
+
+When the two stacks' ends meet so there is not enough space between them for a
+new allocation, such allocation fails with usual
+`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
+
+\subsection linear_algorithm_ring_buffer Ring buffer
+
+When you free some allocations from the beginning and there is not enough free space
+for a new one at the end of a pool, allocator's "cursor" wraps around to the
+beginning and starts allocation there. Thanks to this, if you always release
+allocations in the same order as you created them (FIFO - First In First Out),
+you can achieve behavior of a ring buffer / queue.
+
+![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
+
+Pools with linear algorithm support [lost allocations](@ref lost_allocations) when used as ring buffer.
+If there is not enough free space for a new allocation, but existing allocations
+from the front of the queue can become lost, they become lost and the allocation
+succeeds.
+
+![Ring buffer with lost allocations](../gfx/Linear_allocator_6_ring_buffer_lost.png)
+
+Ring buffer is available only in pools with one memory block -
+VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
+
+\section buddy_algorithm Buddy allocation algorithm
+
+There is another allocation algorithm that can be used with custom pools, called
+"buddy". Its internal data structure is based on a tree of blocks, each having
+size that is a power of two and a half of its parent's size. When you want to
+allocate memory of certain size, a free node in the tree is located. If it's too
+large, it is recursively split into two halves (called "buddies"). However, if
+requested allocation size is not a power of two, the size of a tree node is
+aligned up to the nearest power of two and the remaining space is wasted. When
+two buddy nodes become free, they are merged back into one larger node.
+
+![Buddy allocator](../gfx/Buddy_allocator.png)
+
+The advantage of buddy allocation algorithm over default algorithm is faster
+allocation and deallocation, as well as smaller external fragmentation. The
+disadvantage is more wasted space (internal fragmentation).
+
+For more information, please read ["Buddy memory allocation" on Wikipedia](https://en.wikipedia.org/wiki/Buddy_memory_allocation)
+or other sources that describe this concept in general.
+
+To use buddy allocation algorithm with a custom pool, add flag
+#VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
+#VmaPool object.
+
+Several limitations apply to pools that use buddy algorithm:
+
+- It is recommended to use VmaPoolCreateInfo::blockSize that is a power of two.
+ Otherwise, only largest power of two smaller than the size is used for
+ allocations. The remaining space always stays unused.
+- [Margins](@ref debugging_memory_usage_margins) and
+ [corruption detection](@ref debugging_memory_usage_corruption_detection)
+ don't work in such pools.
+- [Lost allocations](@ref lost_allocations) don't work in such pools. You can
+ use them, but they never become lost. Support may be added in the future.
+- [Defragmentation](@ref defragmentation) doesn't work with allocations made from
+ such pool.
+
+\page defragmentation Defragmentation
+
+Interleaved allocations and deallocations of many objects of varying size can
+cause fragmentation over time, which can lead to a situation where the library is unable
+to find a continuous range of free memory for a new allocation despite there is
+enough free space, just scattered across many small free ranges between existing
+allocations.
+
+To mitigate this problem, you can use defragmentation feature:
+structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd().
+Given set of allocations,
+this function can move them to compact used memory, ensure more continuous free
+space and possibly also free some `VkDeviceMemory` blocks.
+
+What the defragmentation does is:
+
+- Updates #VmaAllocation objects to point to new `VkDeviceMemory` and offset.
+ After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or
+ VmaAllocationInfo::offset changes. You must query them again using
+ vmaGetAllocationInfo() if you need them.
+- Moves actual data in memory.
+
+What it doesn't do, so you need to do it yourself:
+
+- Recreate buffers and images that were bound to allocations that were defragmented and
+ bind them with their new places in memory.
+ You must use `vkDestroyBuffer()`, `vkDestroyImage()`,
+ `vkCreateBuffer()`, `vkCreateImage()` for that purpose and NOT vmaDestroyBuffer(),
+ vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to
+ destroy or create allocation objects!
+- Recreate views and update descriptors that point to these buffers and images.
+
+\section defragmentation_cpu Defragmenting CPU memory
+
+Following example demonstrates how you can run defragmentation on CPU.
+Only allocations created in memory types that are `HOST_VISIBLE` can be defragmented.
+Others are ignored.
+
+The way it works is:
+
+- It temporarily maps entire memory blocks when necessary.
+- It moves data using `memmove()` function.
+
+\code
+// Given following variables already initialized:
+VkDevice device;
+VmaAllocator allocator;
+std::vector<VkBuffer> buffers;
+std::vector<VmaAllocation> allocations;
+
+
+const uint32_t allocCount = (uint32_t)allocations.size();
+std::vector<VkBool32> allocationsChanged(allocCount);
+
+VmaDefragmentationInfo2 defragInfo = {};
+defragInfo.allocationCount = allocCount;
+defragInfo.pAllocations = allocations.data();
+defragInfo.pAllocationsChanged = allocationsChanged.data();
+defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit.
+defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit.
+
+VmaDefragmentationContext defragCtx;
+vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
+vmaDefragmentationEnd(allocator, defragCtx);
+
+for(uint32_t i = 0; i < allocCount; ++i)
+{
+ if(allocationsChanged[i])
+ {
+ // Destroy buffer that is immutably bound to memory region which is no longer valid.
+ vkDestroyBuffer(device, buffers[i], nullptr);
+
+ // Create new buffer with same parameters.
+ VkBufferCreateInfo bufferInfo = ...;
+ vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
+
+ // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
+
+ // Bind new buffer to new memory region. Data contained in it is already moved.
+ VmaAllocationInfo allocInfo;
+ vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
+ vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
+ }
+}
+\endcode
+
+Setting VmaDefragmentationInfo2::pAllocationsChanged is optional.
+This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index
+has been modified during defragmentation.
+You can pass null, but you then need to query every allocation passed to defragmentation
+for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it.
+
+If you use [Custom memory pools](@ref choosing_memory_type_custom_memory_pools),
+you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools
+instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations
+to defragment all allocations in given pools.
+You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case.
+You can also combine both methods.
+
+\section defragmentation_gpu Defragmenting GPU memory
+
+It is also possible to defragment allocations created in memory types that are not `HOST_VISIBLE`.
+To do that, you need to pass a command buffer that meets requirements as described in
+VmaDefragmentationInfo2::commandBuffer. The way it works is:
+
+- It creates temporary buffers and binds them to entire memory blocks when necessary.
+- It issues `vkCmdCopyBuffer()` to passed command buffer.
+
+Example:
+
+\code
+// Given following variables already initialized:
+VkDevice device;
+VmaAllocator allocator;
+VkCommandBuffer commandBuffer;
+std::vector<VkBuffer> buffers;
+std::vector<VmaAllocation> allocations;
+
+
+const uint32_t allocCount = (uint32_t)allocations.size();
+std::vector<VkBool32> allocationsChanged(allocCount);
+
+VkCommandBufferBeginInfo cmdBufBeginInfo = ...;
+vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo);
+
+VmaDefragmentationInfo2 defragInfo = {};
+defragInfo.allocationCount = allocCount;
+defragInfo.pAllocations = allocations.data();
+defragInfo.pAllocationsChanged = allocationsChanged.data();
+defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time.
+defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time.
+defragInfo.commandBuffer = commandBuffer;
+
+VmaDefragmentationContext defragCtx;
+vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx);
+
+vkEndCommandBuffer(commandBuffer);
+
+// Submit commandBuffer.
+// Wait for a fence that ensures commandBuffer execution finished.
+
+vmaDefragmentationEnd(allocator, defragCtx);
+
+for(uint32_t i = 0; i < allocCount; ++i)
+{
+ if(allocationsChanged[i])
+ {
+ // Destroy buffer that is immutably bound to memory region which is no longer valid.
+ vkDestroyBuffer(device, buffers[i], nullptr);
+
+ // Create new buffer with same parameters.
+ VkBufferCreateInfo bufferInfo = ...;
+ vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
+
+ // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
+
+ // Bind new buffer to new memory region. Data contained in it is already moved.
+ VmaAllocationInfo allocInfo;
+ vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
+ vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset);
+ }
+}
+\endcode
+
+You can combine these two methods by specifying non-zero `maxGpu*` as well as `maxCpu*` parameters.
+The library automatically chooses best method to defragment each memory pool.
+
+You may try not to block your entire program to wait until defragmentation finishes,
+but do it in the background, as long as you carefully fullfill requirements described
+in function vmaDefragmentationBegin().
+
+\section defragmentation_additional_notes Additional notes
+
+It is only legal to defragment allocations bound to:
+
+- buffers
+- images created with `VK_IMAGE_CREATE_ALIAS_BIT`, `VK_IMAGE_TILING_LINEAR`, and
+ being currently in `VK_IMAGE_LAYOUT_GENERAL` or `VK_IMAGE_LAYOUT_PREINITIALIZED`.
+
+Defragmentation of images created with `VK_IMAGE_TILING_OPTIMAL` or in any other
+layout may give undefined results.
+
+If you defragment allocations bound to images, new images to be bound to new
+memory region after defragmentation should be created with `VK_IMAGE_LAYOUT_PREINITIALIZED`
+and then transitioned to their original layout from before defragmentation if
+needed using an image memory barrier.
+
+While using defragmentation, you may experience validation layer warnings, which you just need to ignore.
+See [Validation layer warnings](@ref general_considerations_validation_layer_warnings).
+
+Please don't expect memory to be fully compacted after defragmentation.
+Algorithms inside are based on some heuristics that try to maximize number of Vulkan
+memory blocks to make totally empty to release them, as well as to maximimze continuous
+empty space inside remaining blocks, while minimizing the number and size of allocations that
+need to be moved. Some fragmentation may still remain - this is normal.
+
+\section defragmentation_custom_algorithm Writing custom defragmentation algorithm
+
+If you want to implement your own, custom defragmentation algorithm,
+there is infrastructure prepared for that,
+but it is not exposed through the library API - you need to hack its source code.
+Here are steps needed to do this:
+
+-# Main thing you need to do is to define your own class derived from base abstract
+ class `VmaDefragmentationAlgorithm` and implement your version of its pure virtual methods.
+ See definition and comments of this class for details.
+-# Your code needs to interact with device memory block metadata.
+ If you need more access to its data than it's provided by its public interface,
+ declare your new class as a friend class e.g. in class `VmaBlockMetadata_Generic`.
+-# If you want to create a flag that would enable your algorithm or pass some additional
+ flags to configure it, add them to `VmaDefragmentationFlagBits` and use them in
+ VmaDefragmentationInfo2::flags.
+-# Modify function `VmaBlockVectorDefragmentationContext::Begin` to create object
+ of your new class whenever needed.
+
+
+\page lost_allocations Lost allocations
+
+If your game oversubscribes video memory, if may work OK in previous-generation
+graphics APIs (DirectX 9, 10, 11, OpenGL) because resources are automatically
+paged to system RAM. In Vulkan you can't do it because when you run out of
+memory, an allocation just fails. If you have more data (e.g. textures) that can
+fit into VRAM and you don't need it all at once, you may want to upload them to
+GPU on demand and "push out" ones that are not used for a long time to make room
+for the new ones, effectively using VRAM (or a cartain memory pool) as a form of
+cache. Vulkan Memory Allocator can help you with that by supporting a concept of
+"lost allocations".
+
+To create an allocation that can become lost, include #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
+flag in VmaAllocationCreateInfo::flags. Before using a buffer or image bound to
+such allocation in every new frame, you need to query it if it's not lost.
+To check it, call vmaTouchAllocation().
+If the allocation is lost, you should not use it or buffer/image bound to it.
+You mustn't forget to destroy this allocation and this buffer/image.
+vmaGetAllocationInfo() can also be used for checking status of the allocation.
+Allocation is lost when returned VmaAllocationInfo::deviceMemory == `VK_NULL_HANDLE`.
+
+To create an allocation that can make some other allocations lost to make room
+for it, use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag. You will
+usually use both flags #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT and
+#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT at the same time.
+
+Warning! Current implementation uses quite naive, brute force algorithm,
+which can make allocation calls that use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
+flag quite slow. A new, more optimal algorithm and data structure to speed this
+up is planned for the future.
+
+<b>Q: When interleaving creation of new allocations with usage of existing ones,
+how do you make sure that an allocation won't become lost while it's used in the
+current frame?</b>
+
+It is ensured because vmaTouchAllocation() / vmaGetAllocationInfo() not only returns allocation
+status/parameters and checks whether it's not lost, but when it's not, it also
+atomically marks it as used in the current frame, which makes it impossible to
+become lost in that frame. It uses lockless algorithm, so it works fast and
+doesn't involve locking any internal mutex.
+
+<b>Q: What if my allocation may still be in use by the GPU when it's rendering a
+previous frame while I already submit new frame on the CPU?</b>
+
+You can make sure that allocations "touched" by vmaTouchAllocation() / vmaGetAllocationInfo() will not
+become lost for a number of additional frames back from the current one by
+specifying this number as VmaAllocatorCreateInfo::frameInUseCount (for default
+memory pool) and VmaPoolCreateInfo::frameInUseCount (for custom pool).
+
+<b>Q: How do you inform the library when new frame starts?</b>
+
+You need to call function vmaSetCurrentFrameIndex().
+
+Example code:
+
+\code
+struct MyBuffer
+{
+ VkBuffer m_Buf = nullptr;
+ VmaAllocation m_Alloc = nullptr;
+
+ // Called when the buffer is really needed in the current frame.
+ void EnsureBuffer();
+};
+
+void MyBuffer::EnsureBuffer()
+{
+ // Buffer has been created.
+ if(m_Buf != VK_NULL_HANDLE)
+ {
+ // Check if its allocation is not lost + mark it as used in current frame.
+ if(vmaTouchAllocation(allocator, m_Alloc))
+ {
+ // It's all OK - safe to use m_Buf.
+ return;
+ }
+ }
+
+ // Buffer not yet exists or lost - destroy and recreate it.
+
+ vmaDestroyBuffer(allocator, m_Buf, m_Alloc);
+
+ VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+ bufCreateInfo.size = 1024;
+ bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+ VmaAllocationCreateInfo allocCreateInfo = {};
+ allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+ allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT |
+ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
+
+ vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr);
+}
+\endcode
+
+When using lost allocations, you may see some Vulkan validation layer warnings
+about overlapping regions of memory bound to different kinds of buffers and
+images. This is still valid as long as you implement proper handling of lost
+allocations (like in the example above) and don't use them.
+
+You can create an allocation that is already in lost state from the beginning using function
+vmaCreateLostAllocation(). It may be useful if you need a "dummy" allocation that is not null.
+
+You can call function vmaMakePoolAllocationsLost() to set all eligible allocations
+in a specified custom pool to lost state.
+Allocations that have been "touched" in current frame or VmaPoolCreateInfo::frameInUseCount frames back
+cannot become lost.
+
+<b>Q: Can I touch allocation that cannot become lost?</b>
+
+Yes, although it has no visible effect.
+Calls to vmaGetAllocationInfo() and vmaTouchAllocation() update last use frame index
+also for allocations that cannot become lost, but the only way to observe it is to dump
+internal allocator state using vmaBuildStatsString().
+You can use this feature for debugging purposes to explicitly mark allocations that you use
+in current frame and then analyze JSON dump to see for how long each allocation stays unused.
+
+
+\page statistics Statistics
+
+This library contains functions that return information about its internal state,
+especially the amount of memory allocated from Vulkan.
+Please keep in mind that these functions need to traverse all internal data structures
+to gather these information, so they may be quite time-consuming.
+Don't call them too often.
+
+\section statistics_numeric_statistics Numeric statistics
+
+You can query for overall statistics of the allocator using function vmaCalculateStats().
+Information are returned using structure #VmaStats.
+It contains #VmaStatInfo - number of allocated blocks, number of allocations
+(occupied ranges in these blocks), number of unused (free) ranges in these blocks,
+number of bytes used and unused (but still allocated from Vulkan) and other information.
+They are summed across memory heaps, memory types and total for whole allocator.
+
+You can query for statistics of a custom pool using function vmaGetPoolStats().
+Information are returned using structure #VmaPoolStats.
+
+You can query for information about specific allocation using function vmaGetAllocationInfo().
+It fill structure #VmaAllocationInfo.
+
+\section statistics_json_dump JSON dump
+
+You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
+The result is guaranteed to be correct JSON.
+It uses ANSI encoding.
+Any strings provided by user (see [Allocation names](@ref allocation_names))
+are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
+this JSON string can be treated as using this encoding.
+It must be freed using function vmaFreeStatsString().
+
+The format of this JSON string is not part of official documentation of the library,
+but it will not change in backward-incompatible way without increasing library major version number
+and appropriate mention in changelog.
+
+The JSON string contains all the data that can be obtained using vmaCalculateStats().
+It can also contain detailed map of allocated memory blocks and their regions -
+free and occupied by allocations.
+This allows e.g. to visualize the memory or assess fragmentation.
+
+
+\page allocation_annotation Allocation names and user data
+
+\section allocation_user_data Allocation user data
+
+You can annotate allocations with your own information, e.g. for debugging purposes.
+To do that, fill VmaAllocationCreateInfo::pUserData field when creating
+an allocation. It's an opaque `void*` pointer. You can use it e.g. as a pointer,
+some handle, index, key, ordinal number or any other value that would associate
+the allocation with your custom metadata.
+
+\code
+VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+// Fill bufferInfo...
+
+MyBufferMetadata* pMetadata = CreateBufferMetadata();
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+allocCreateInfo.pUserData = pMetadata;
+
+VkBuffer buffer;
+VmaAllocation allocation;
+vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
+\endcode
+
+The pointer may be later retrieved as VmaAllocationInfo::pUserData:
+
+\code
+VmaAllocationInfo allocInfo;
+vmaGetAllocationInfo(allocator, allocation, &allocInfo);
+MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
+\endcode
+
+It can also be changed using function vmaSetAllocationUserData().
+
+Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
+vmaBuildStatsString(), in hexadecimal form.
+
+\section allocation_names Allocation names
+
+There is alternative mode available where `pUserData` pointer is used to point to
+a null-terminated string, giving a name to the allocation. To use this mode,
+set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags.
+Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to
+vmaSetAllocationUserData() must be either null or pointer to a null-terminated string.
+The library creates internal copy of the string, so the pointer you pass doesn't need
+to be valid for whole lifetime of the allocation. You can free it after the call.
+
+\code
+VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+// Fill imageInfo...
+
+std::string imageName = "Texture: ";
+imageName += fileName;
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
+allocCreateInfo.pUserData = imageName.c_str();
+
+VkImage image;
+VmaAllocation allocation;
+vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr);
+\endcode
+
+The value of `pUserData` pointer of the allocation will be different than the one
+you passed when setting allocation's name - pointing to a buffer managed
+internally that holds copy of the string.
+
+\code
+VmaAllocationInfo allocInfo;
+vmaGetAllocationInfo(allocator, allocation, &allocInfo);
+const char* imageName = (const char*)allocInfo.pUserData;
+printf("Image name: %s\n", imageName);
+\endcode
+
+That string is also printed in JSON report created by vmaBuildStatsString().
+
+
+\page debugging_memory_usage Debugging incorrect memory usage
+
+If you suspect a bug with memory usage, like usage of uninitialized memory or
+memory being overwritten out of bounds of an allocation,
+you can use debug features of this library to verify this.
+
+\section debugging_memory_usage_initialization Memory initialization
+
+If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
+you can enable automatic memory initialization to verify this.
+To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
+
+\code
+#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
+#include "vk_mem_alloc.h"
+\endcode
+
+It makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`.
+Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
+Memory is automatically mapped and unmapped if necessary.
+
+If you find these values while debugging your program, good chances are that you incorrectly
+read Vulkan memory that is allocated but not initialized, or already freed, respectively.
+
+Memory initialization works only with memory types that are `HOST_VISIBLE`.
+It works also with dedicated allocations.
+It doesn't work with allocations created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
+as they cannot be mapped.
+
+\section debugging_memory_usage_margins Margins
+
+By default, allocations are laid out in memory blocks next to each other if possible
+(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
+
+![Allocations without margin](../gfx/Margins_1.png)
+
+Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
+number of bytes as a margin before and after every allocation.
+
+\code
+#define VMA_DEBUG_MARGIN 16
+#include "vk_mem_alloc.h"
+\endcode
+
+![Allocations with margin](../gfx/Margins_2.png)
+
+If your bug goes away after enabling margins, it means it may be caused by memory
+being overwritten outside of allocation boundaries. It is not 100% certain though.
+Change in application behavior may also be caused by different order and distribution
+of allocations across memory blocks after margins are applied.
+
+The margin is applied also before first and after last allocation in a block.
+It may occur only once between two adjacent allocations.
+
+Margins work with all types of memory.
+
+Margin is applied only to allocations made out of memory blocks and not to dedicated
+allocations, which have their own memory block of specific size.
+It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
+or those automatically decided to put into dedicated allocations, e.g. due to its
+large size or recommended by VK_KHR_dedicated_allocation extension.
+Margins are also not active in custom pools created with #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag.
+
+Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
+
+Note that enabling margins increases memory usage and fragmentation.
+
+\section debugging_memory_usage_corruption_detection Corruption detection
+
+You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
+of contents of the margins.
+
+\code
+#define VMA_DEBUG_MARGIN 16
+#define VMA_DEBUG_DETECT_CORRUPTION 1
+#include "vk_mem_alloc.h"
+\endcode
+
+When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
+(it must be multiply of 4) before and after every allocation is filled with a magic number.
+This idea is also know as "canary".
+Memory is automatically mapped and unmapped if necessary.
+
+This number is validated automatically when the allocation is destroyed.
+If it's not equal to the expected value, `VMA_ASSERT()` is executed.
+It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
+which indicates a serious bug.
+
+You can also explicitly request checking margins of all allocations in all memory blocks
+that belong to specified memory types by using function vmaCheckCorruption(),
+or in memory blocks that belong to specified custom pool, by using function
+vmaCheckPoolCorruption().
+
+Margin validation (corruption detection) works only for memory types that are
+`HOST_VISIBLE` and `HOST_COHERENT`.
+
+
+\page record_and_replay Record and replay
+
+\section record_and_replay_introduction Introduction
+
+While using the library, sequence of calls to its functions together with their
+parameters can be recorded to a file and later replayed using standalone player
+application. It can be useful to:
+
+- Test correctness - check if same sequence of calls will not cause crash or
+ failures on a target platform.
+- Gather statistics - see number of allocations, peak memory usage, number of
+ calls etc.
+- Benchmark performance - see how much time it takes to replay the whole
+ sequence.
+
+\section record_and_replay_usage Usage
+
+<b>To record sequence of calls to a file:</b> Fill in
+VmaAllocatorCreateInfo::pRecordSettings member while creating #VmaAllocator
+object. File is opened and written during whole lifetime of the allocator.
+
+<b>To replay file:</b> Use VmaReplay - standalone command-line program.
+Precompiled binary can be found in "bin" directory.
+Its source can be found in "src/VmaReplay" directory.
+Its project is generated by Premake.
+Command line syntax is printed when the program is launched without parameters.
+Basic usage:
+
+ VmaReplay.exe MyRecording.csv
+
+<b>Documentation of file format</b> can be found in file: "docs/Recording file format.md".
+It's a human-readable, text file in CSV format (Comma Separated Values).
+
+\section record_and_replay_additional_considerations Additional considerations
+
+- Replaying file that was recorded on a different GPU (with different parameters
+ like `bufferImageGranularity`, `nonCoherentAtomSize`, and especially different
+ set of memory heaps and types) may give different performance and memory usage
+ results, as well as issue some warnings and errors.
+- Current implementation of recording in VMA, as well as VmaReplay application, is
+ coded and tested only on Windows. Inclusion of recording code is driven by
+ `VMA_RECORDING_ENABLED` macro. Support for other platforms should be easy to
+ add. Contributions are welcomed.
+- Currently calls to vmaDefragment() function are not recorded.
+
+
+\page usage_patterns Recommended usage patterns
+
+See also slides from talk:
+[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
+
+
+\section usage_patterns_simple Simple patterns
+
+\subsection usage_patterns_simple_render_targets Render targets
+
+<b>When:</b>
+Any resources that you frequently write and read on GPU,
+e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
+images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
+
+<b>What to do:</b>
+Create them in video memory that is fastest to access from GPU using
+#VMA_MEMORY_USAGE_GPU_ONLY.
+
+Consider using [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension
+and/or manually creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
+especially if they are large or if you plan to destroy and recreate them e.g. when
+display resolution changes.
+Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
+
+\subsection usage_patterns_simple_immutable_resources Immutable resources
+
+<b>When:</b>
+Any resources that you fill on CPU only once (aka "immutable") or infrequently
+and then read frequently on GPU,
+e.g. textures, vertex and index buffers, constant buffers that don't change often.
+
+<b>What to do:</b>
+Create them in video memory that is fastest to access from GPU using
+#VMA_MEMORY_USAGE_GPU_ONLY.
+
+To initialize content of such resource, create a CPU-side (aka "staging") copy of it
+in system memory - #VMA_MEMORY_USAGE_CPU_ONLY, map it, fill it,
+and submit a transfer from it to the GPU resource.
+You can keep the staging copy if you need it for another upload transfer in the future.
+If you don't, you can destroy it or reuse this buffer for uploading different resource
+after the transfer finishes.
+
+Prefer to create just buffers in system memory rather than images, even for uploading textures.
+Use `vkCmdCopyBufferToImage()`.
+Dont use images with `VK_IMAGE_TILING_LINEAR`.
+
+\subsection usage_patterns_dynamic_resources Dynamic resources
+
+<b>When:</b>
+Any resources that change frequently (aka "dynamic"), e.g. every frame or every draw call,
+written on CPU, read on GPU.
+
+<b>What to do:</b>
+Create them using #VMA_MEMORY_USAGE_CPU_TO_GPU.
+You can map it and write to it directly on CPU, as well as read from it on GPU.
+
+This is a more complex situation. Different solutions are possible,
+and the best one depends on specific GPU type, but you can use this simple approach for the start.
+Prefer to write to such resource sequentially (e.g. using `memcpy`).
+Don't perform random access or any reads from it on CPU, as it may be very slow.
+
+\subsection usage_patterns_readback Readback
+
+<b>When:</b>
+Resources that contain data written by GPU that you want to read back on CPU,
+e.g. results of some computations.
+
+<b>What to do:</b>
+Create them using #VMA_MEMORY_USAGE_GPU_TO_CPU.
+You can write to them directly on GPU, as well as map and read them on CPU.
+
+\section usage_patterns_advanced Advanced patterns
+
+\subsection usage_patterns_integrated_graphics Detecting integrated graphics
+
+You can support integrated graphics (like Intel HD Graphics, AMD APU) better
+by detecting it in Vulkan.
+To do it, call `vkGetPhysicalDeviceProperties()`, inspect
+`VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`.
+When you find it, you can assume that memory is unified and all memory types are comparably fast
+to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+
+You can then sum up sizes of all available memory heaps and treat them as useful for
+your GPU resources, instead of only `DEVICE_LOCAL` ones.
+You can also prefer to create your resources in memory types that are `HOST_VISIBLE` to map them
+directly instead of submitting explicit transfer (see below).
+
+\subsection usage_patterns_direct_vs_transfer Direct access versus transfer
+
+For resources that you frequently write on CPU and read on GPU, many solutions are possible:
+
+-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY,
+ second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit tranfer each time.
+-# Create just single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU,
+ read it directly on GPU.
+-# Create just single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU,
+ read it directly on GPU.
+
+Which solution is the most efficient depends on your resource and especially on the GPU.
+It is best to measure it and then make the decision.
+Some general recommendations:
+
+- On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead
+ related to using a second copy and making transfer.
+- For small resources (e.g. constant buffers) use (2).
+ Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable.
+ Even if the resource ends up in system memory, its data may be cached on GPU after first
+ fetch over PCIe bus.
+- For larger resources (e.g. textures), decide between (1) and (2).
+ You may want to differentiate NVIDIA and AMD, e.g. by looking for memory type that is
+ both `DEVICE_LOCAL` and `HOST_VISIBLE`. When you find it, use (2), otherwise use (1).
+
+Similarly, for resources that you frequently write on GPU and read on CPU, multiple
+solutions are possible:
+
+-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY,
+ second copy in system memory using #VMA_MEMORY_USAGE_GPU_TO_CPU and submit explicit tranfer each time.
+-# Create just single copy using #VMA_MEMORY_USAGE_GPU_TO_CPU, write to it directly on GPU,
+ map it and read it on CPU.
+
+You should take some measurements to decide which option is faster in case of your specific
+resource.
+
+If you don't want to specialize your code for specific types of GPUs, you can still make
+an simple optimization for cases when your resource ends up in mappable memory to use it
+directly in this case instead of creating CPU-side staging copy.
+For details see [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable).
+
+
+\page configuration Configuration
+
+Please check "CONFIGURATION SECTION" in the code to find macros that you can define
+before each include of this file or change directly in this file to provide
+your own implementation of basic facilities like assert, `min()` and `max()` functions,
+mutex, atomic etc.
+The library uses its own implementation of containers by default, but you can switch to using
+STL containers instead.
+
+\section config_Vulkan_functions Pointers to Vulkan functions
+
+The library uses Vulkan functions straight from the `vulkan.h` header by default.
+If you want to provide your own pointers to these functions, e.g. fetched using
+`vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`:
+
+-# Define `VMA_STATIC_VULKAN_FUNCTIONS 0`.
+-# Provide valid pointers through VmaAllocatorCreateInfo::pVulkanFunctions.
+
+\section custom_memory_allocator Custom host memory allocator
+
+If you use custom allocator for CPU memory rather than default operator `new`
+and `delete` from C++, you can make this library using your allocator as well
+by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
+functions will be passed to Vulkan, as well as used by the library itself to
+make any CPU-side allocations.
+
+\section allocation_callbacks Device memory allocation callbacks
+
+The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
+You can setup callbacks to be informed about these calls, e.g. for the purpose
+of gathering some statistics. To do it, fill optional member
+VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
+
+\section heap_memory_limit Device heap memory limit
+
+When device memory of certain heap runs out of free space, new allocations may
+fail (returning error code) or they may succeed, silently pushing some existing
+memory blocks from GPU VRAM to system RAM (which degrades performance). This
+behavior is implementation-dependant - it depends on GPU vendor and graphics
+driver.
+
+On AMD cards it can be controlled while creating Vulkan device object by using
+VK_AMD_memory_allocation_behavior extension, if available.
+
+Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
+memory available without switching your graphics card to one that really has
+smaller VRAM, you can use a feature of this library intended for this purpose.
+To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
+
+
+
+\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
+
+VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
+performance on some GPUs. It augments Vulkan API with possibility to query
+driver whether it prefers particular buffer or image to have its own, dedicated
+allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
+to do some internal optimizations.
+
+The extension is supported by this library. It will be used automatically when
+enabled. To enable it:
+
+1 . When creating Vulkan device, check if following 2 device extensions are
+supported (call `vkEnumerateDeviceExtensionProperties()`).
+If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
+
+- VK_KHR_get_memory_requirements2
+- VK_KHR_dedicated_allocation
+
+If you enabled these extensions:
+
+2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
+your #VmaAllocator`to inform the library that you enabled required extensions
+and you want the library to use them.
+
+\code
+allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
+
+vmaCreateAllocator(&allocatorInfo, &allocator);
+\endcode
+
+That's all. The extension will be automatically used whenever you create a
+buffer using vmaCreateBuffer() or image using vmaCreateImage().
+
+When using the extension together with Vulkan Validation Layer, you will receive
+warnings like this:
+
+ vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer.
+
+It is OK, you should just ignore it. It happens because you use function
+`vkGetBufferMemoryRequirements2KHR()` instead of standard
+`vkGetBufferMemoryRequirements()`, while the validation layer seems to be
+unaware of it.
+
+To learn more about this extension, see:
+
+- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_dedicated_allocation)
+- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
+
+
+
+\page general_considerations General considerations
+
+\section general_considerations_thread_safety Thread safety
+
+- The library has no global state, so separate #VmaAllocator objects can be used
+ independently.
+ There should be no need to create multiple such objects though - one per `VkDevice` is enough.
+- By default, all calls to functions that take #VmaAllocator as first parameter
+ are safe to call from multiple threads simultaneously because they are
+ synchronized internally when needed.
+- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
+ flag, calls to functions that take such #VmaAllocator object must be
+ synchronized externally.
+- Access to a #VmaAllocation object must be externally synchronized. For example,
+ you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
+ threads at the same time if you pass the same #VmaAllocation object to these
+ functions.
+
+\section general_considerations_validation_layer_warnings Validation layer warnings
+
+When using this library, you can meet following types of warnings issued by
+Vulkan validation layer. They don't necessarily indicate a bug, so you may need
+to just ignore them.
+
+- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
+ - It happens when VK_KHR_dedicated_allocation extension is enabled.
+ `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
+- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
+ - It happens when you map a buffer or image, because the library maps entire
+ `VkDeviceMemory` block, where different types of images and buffers may end
+ up together, especially on GPUs with unified memory like Intel.
+- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
+ - It happens when you use lost allocations, and a new image or buffer is
+ created in place of an existing object that bacame lost.
+ - It may happen also when you use [defragmentation](@ref defragmentation).
+
+\section general_considerations_allocation_algorithm Allocation algorithm
+
+The library uses following algorithm for allocation, in order:
+
+-# Try to find free range of memory in existing blocks.
+-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
+-# If failed, try to create such block with size/2, size/4, size/8.
+-# If failed and #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag was
+ specified, try to find space in existing blocks, possilby making some other
+ allocations lost.
+-# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
+ just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+-# If failed, choose other memory type that meets the requirements specified in
+ VmaAllocationCreateInfo and go to point 1.
+-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+
+\section general_considerations_features_not_supported Features not supported
+
+Features deliberately excluded from the scope of this library:
+
+- Data transfer. Uploading (straming) and downloading data of buffers and images
+ between CPU and GPU memory and related synchronization is responsibility of the user.
+ Defining some "texture" object that would automatically stream its data from a
+ staging copy in CPU memory to GPU memory would rather be a feature of another,
+ higher-level library implemented on top of VMA.
+- Allocations for imported/exported external memory. They tend to require
+ explicit memory type index and dedicated allocation anyway, so they don't
+ interact with main features of this library. Such special purpose allocations
+ should be made manually, using `vkCreateBuffer()` and `vkAllocateMemory()`.
+- Recreation of buffers and images. Although the library has functions for
+ buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to
+ recreate these objects yourself after defragmentation. That's because the big
+ structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
+ #VmaAllocation object.
+- Handling CPU memory allocation failures. When dynamically creating small C++
+ objects in CPU memory (not Vulkan memory), allocation failures are not checked
+ and handled gracefully, because that would complicate code significantly and
+ is usually not needed in desktop PC applications anyway.
+- Code free of any compiler warnings. Maintaining the library to compile and
+ work correctly on so many different platforms is hard enough. Being free of
+ any warnings, on any version of any compiler, is simply not feasible.
+- This is a C++ library with C interface.
+ Bindings or ports to any other programming languages are welcomed as external projects and
+ are not going to be included into this repository.
+
+*/
+
+/*
+Define this macro to 0/1 to disable/enable support for recording functionality,
+available through VmaAllocatorCreateInfo::pRecordSettings.
+*/
+#ifndef VMA_RECORDING_ENABLED
+#ifdef _WIN32
+#define VMA_RECORDING_ENABLED 1
+#else
+#define VMA_RECORDING_ENABLED 0
+#endif
+#endif
+
+#ifndef NOMINMAX
+#define NOMINMAX // For windows.h
+#endif
+
+#ifndef VULKAN_H_
+#include <vulkan/vulkan.h>
+#endif
+
+#if VMA_RECORDING_ENABLED
+#include <windows.h>
+#endif
+
+#if !defined(VMA_DEDICATED_ALLOCATION)
+#if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
+#define VMA_DEDICATED_ALLOCATION 1
+#else
+#define VMA_DEDICATED_ALLOCATION 0
+#endif
+#endif
+
+/** \struct VmaAllocator
+\brief Represents main object of this library initialized.
+
+Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
+Call function vmaDestroyAllocator() to destroy it.
+
+It is recommended to create just one object of this type per `VkDevice` object,
+right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
+*/
+VK_DEFINE_HANDLE(VmaAllocator)
+
+/// Callback function called after successful vkAllocateMemory.
+typedef void(VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
+ VmaAllocator allocator,
+ uint32_t memoryType,
+ VkDeviceMemory memory,
+ VkDeviceSize size);
+/// Callback function called before vkFreeMemory.
+typedef void(VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
+ VmaAllocator allocator,
+ uint32_t memoryType,
+ VkDeviceMemory memory,
+ VkDeviceSize size);
+
+/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
+
+Provided for informative purpose, e.g. to gather statistics about number of
+allocations or total amount of memory allocated in Vulkan.
+
+Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
+*/
+typedef struct VmaDeviceMemoryCallbacks {
+ /// Optional, can be null.
+ PFN_vmaAllocateDeviceMemoryFunction pfnAllocate;
+ /// Optional, can be null.
+ PFN_vmaFreeDeviceMemoryFunction pfnFree;
+} VmaDeviceMemoryCallbacks;
+
+/// Flags for created #VmaAllocator.
+typedef enum VmaAllocatorCreateFlagBits {
+ /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
+
+ Using this flag may increase performance because internal mutexes are not used.
+ */
+ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
+ /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
+
+ Using this extenion will automatically allocate dedicated blocks of memory for
+ some buffers and images instead of suballocating place for them out of bigger
+ memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
+ flag) when it is recommended by the driver. It may improve performance on some
+ GPUs.
+
+ You may set this flag only if you found out that following device extensions are
+ supported, you enabled them while creating Vulkan device passed as
+ VmaAllocatorCreateInfo::device, and you want them to be used internally by this
+ library:
+
+ - VK_KHR_get_memory_requirements2
+ - VK_KHR_dedicated_allocation
+
+When this flag is set, you can experience following warnings reported by Vulkan
+validation layer. You can ignore them.
+
+> vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
+ */
+ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
+
+ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaAllocatorCreateFlagBits;
+typedef VkFlags VmaAllocatorCreateFlags;
+
+/** \brief Pointers to some Vulkan functions - a subset used by the library.
+
+Used in VmaAllocatorCreateInfo::pVulkanFunctions.
+*/
+typedef struct VmaVulkanFunctions {
+ PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
+ PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
+ PFN_vkAllocateMemory vkAllocateMemory;
+ PFN_vkFreeMemory vkFreeMemory;
+ PFN_vkMapMemory vkMapMemory;
+ PFN_vkUnmapMemory vkUnmapMemory;
+ PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
+ PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
+ PFN_vkBindBufferMemory vkBindBufferMemory;
+ PFN_vkBindImageMemory vkBindImageMemory;
+ PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
+ PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
+ PFN_vkCreateBuffer vkCreateBuffer;
+ PFN_vkDestroyBuffer vkDestroyBuffer;
+ PFN_vkCreateImage vkCreateImage;
+ PFN_vkDestroyImage vkDestroyImage;
+ PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
+#if VMA_DEDICATED_ALLOCATION
+ PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
+ PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
+#endif
+} VmaVulkanFunctions;
+
+/// Flags to be used in VmaRecordSettings::flags.
+typedef enum VmaRecordFlagBits {
+ /** \brief Enables flush after recording every function call.
+
+ Enable it if you expect your application to crash, which may leave recording file truncated.
+ It may degrade performance though.
+ */
+ VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001,
+
+ VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaRecordFlagBits;
+typedef VkFlags VmaRecordFlags;
+
+/// Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSettings.
+typedef struct VmaRecordSettings {
+ /// Flags for recording. Use #VmaRecordFlagBits enum.
+ VmaRecordFlags flags;
+ /** \brief Path to the file that should be written by the recording.
+
+ Suggested extension: "csv".
+ If the file already exists, it will be overwritten.
+ It will be opened for the whole time #VmaAllocator object is alive.
+ If opening this file fails, creation of the whole allocator object fails.
+ */
+ const char *pFilePath;
+} VmaRecordSettings;
+
+/// Description of a Allocator to be created.
+typedef struct VmaAllocatorCreateInfo {
+ /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
+ VmaAllocatorCreateFlags flags;
+ /// Vulkan physical device.
+ /** It must be valid throughout whole lifetime of created allocator. */
+ VkPhysicalDevice physicalDevice;
+ /// Vulkan device.
+ /** It must be valid throughout whole lifetime of created allocator. */
+ VkDevice device;
+ /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
+ /** Set to 0 to use default, which is currently 256 MiB. */
+ VkDeviceSize preferredLargeHeapBlockSize;
+ /// Custom CPU memory allocation callbacks. Optional.
+ /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
+ const VkAllocationCallbacks *pAllocationCallbacks;
+ /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
+ /** Optional, can be null. */
+ const VmaDeviceMemoryCallbacks *pDeviceMemoryCallbacks;
+ /** \brief Maximum number of additional frames that are in use at the same time as current frame.
+
+ This value is used only when you make allocations with
+ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become
+ lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount.
+
+ For example, if you double-buffer your command buffers, so resources used for
+ rendering in previous frame may still be in use by the GPU at the moment you
+ allocate resources needed for the current frame, set this value to 1.
+
+ If you want to allow any allocations other than used in the current frame to
+ become lost, set this value to 0.
+ */
+ uint32_t frameInUseCount;
+ /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
+
+ If not NULL, it must be a pointer to an array of
+ `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
+ maximum number of bytes that can be allocated out of particular Vulkan memory
+ heap.
+
+ Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
+ heap. This is also the default in case of `pHeapSizeLimit` = NULL.
+
+ If there is a limit defined for a heap:
+
+ - If user tries to allocate more memory from that heap using this allocator,
+ the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+ - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
+ value of this limit will be reported instead when using vmaGetMemoryProperties().
+
+ Warning! Using this feature may not be equivalent to installing a GPU with
+ smaller amount of memory, because graphics driver doesn't necessary fail new
+ allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
+ exceeded. It may return success and just silently migrate some device memory
+ blocks to system RAM. This driver behavior can also be controlled using
+ VK_AMD_memory_overallocation_behavior extension.
+ */
+ const VkDeviceSize *pHeapSizeLimit;
+ /** \brief Pointers to Vulkan functions. Can be null if you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1`.
+
+ If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section,
+ you can pass null as this member, because the library will fetch pointers to
+ Vulkan functions internally in a static way, like:
+
+ vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
+
+ Fill this member if you want to provide your own pointers to Vulkan functions,
+ e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`.
+ */
+ const VmaVulkanFunctions *pVulkanFunctions;
+ /** \brief Parameters for recording of VMA calls. Can be null.
+
+ If not null, it enables recording of calls to VMA functions to a file.
+ If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro,
+ creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`.
+ */
+ const VmaRecordSettings *pRecordSettings;
+} VmaAllocatorCreateInfo;
+
+/// Creates Allocator object.
+VkResult vmaCreateAllocator(
+ const VmaAllocatorCreateInfo *pCreateInfo,
+ VmaAllocator *pAllocator);
+
+/// Destroys allocator object.
+void vmaDestroyAllocator(
+ VmaAllocator allocator);
+
+/**
+PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
+You can access it here, without fetching it again on your own.
+*/
+void vmaGetPhysicalDeviceProperties(
+ VmaAllocator allocator,
+ const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties);
+
+/**
+PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
+You can access it here, without fetching it again on your own.
+*/
+void vmaGetMemoryProperties(
+ VmaAllocator allocator,
+ const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties);
+
+/**
+\brief Given Memory Type Index, returns Property Flags of this memory type.
+
+This is just a convenience function. Same information can be obtained using
+vmaGetMemoryProperties().
+*/
+void vmaGetMemoryTypeProperties(
+ VmaAllocator allocator,
+ uint32_t memoryTypeIndex,
+ VkMemoryPropertyFlags *pFlags);
+
+/** \brief Sets index of the current frame.
+
+This function must be used if you make allocations with
+#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and
+#VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator
+when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot
+become lost in the current frame.
+*/
+void vmaSetCurrentFrameIndex(
+ VmaAllocator allocator,
+ uint32_t frameIndex);
+
+/** \brief Calculated statistics of memory usage in entire allocator.
+*/
+typedef struct VmaStatInfo {
+ /// Number of `VkDeviceMemory` Vulkan memory blocks allocated.
+ uint32_t blockCount;
+ /// Number of #VmaAllocation allocation objects allocated.
+ uint32_t allocationCount;
+ /// Number of free ranges of memory between allocations.
+ uint32_t unusedRangeCount;
+ /// Total number of bytes occupied by all allocations.
+ VkDeviceSize usedBytes;
+ /// Total number of bytes occupied by unused ranges.
+ VkDeviceSize unusedBytes;
+ VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
+ VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
+} VmaStatInfo;
+
+/// General statistics from current state of Allocator.
+typedef struct VmaStats {
+ VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
+ VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
+ VmaStatInfo total;
+} VmaStats;
+
+/// Retrieves statistics from current state of the Allocator.
+void vmaCalculateStats(
+ VmaAllocator allocator,
+ VmaStats *pStats);
+
+#ifndef VMA_STATS_STRING_ENABLED
+#define VMA_STATS_STRING_ENABLED 1
+#endif
+
+#if VMA_STATS_STRING_ENABLED
+
+/// Builds and returns statistics as string in JSON format.
+/** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
+*/
+void vmaBuildStatsString(
+ VmaAllocator allocator,
+ char **ppStatsString,
+ VkBool32 detailedMap);
+
+void vmaFreeStatsString(
+ VmaAllocator allocator,
+ char *pStatsString);
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+/** \struct VmaPool
+\brief Represents custom memory pool
+
+Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
+Call function vmaDestroyPool() to destroy it.
+
+For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
+*/
+VK_DEFINE_HANDLE(VmaPool)
+
+typedef enum VmaMemoryUsage {
+ /** No intended memory usage specified.
+ Use other members of VmaAllocationCreateInfo to specify your requirements.
+ */
+ VMA_MEMORY_USAGE_UNKNOWN = 0,
+ /** Memory will be used on device only, so fast access from the device is preferred.
+ It usually means device-local GPU (video) memory.
+ No need to be mappable on host.
+ It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`.
+
+ Usage:
+
+ - Resources written and read by device, e.g. images used as attachments.
+ - Resources transferred from host once (immutable) or infrequently and read by
+ device multiple times, e.g. textures to be sampled, vertex buffers, uniform
+ (constant) buffers, and majority of other types of resources used on GPU.
+
+ Allocation may still end up in `HOST_VISIBLE` memory on some implementations.
+ In such case, you are free to map it.
+ You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type.
+ */
+ VMA_MEMORY_USAGE_GPU_ONLY = 1,
+ /** Memory will be mappable on host.
+ It usually means CPU (system) memory.
+ Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`.
+ CPU access is typically uncached. Writes may be write-combined.
+ Resources created in this pool may still be accessible to the device, but access to them can be slow.
+ It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`.
+
+ Usage: Staging copy of resources used as transfer source.
+ */
+ VMA_MEMORY_USAGE_CPU_ONLY = 2,
+ /**
+ Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU.
+ CPU access is typically uncached. Writes may be write-combined.
+
+ Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call.
+ */
+ VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
+ /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached.
+ It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`.
+
+ Usage:
+
+ - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping.
+ - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection.
+ */
+ VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
+ VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
+} VmaMemoryUsage;
+
+/// Flags to be passed as VmaAllocationCreateInfo::flags.
+typedef enum VmaAllocationCreateFlagBits {
+ /** \brief Set this flag if the allocation should have its own memory block.
+
+ Use it for special, big resources, like fullscreen images used as attachments.
+
+ You should not use this flag if VmaAllocationCreateInfo::pool is not null.
+ */
+ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
+
+ /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
+
+ If new allocation cannot be placed in any of the existing blocks, allocation
+ fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
+
+ You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
+ #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
+
+ If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */
+ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
+ /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
+
+ Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
+
+ Is it valid to use this flag for allocation made from memory type that is not
+ `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
+ useful if you need an allocation that is efficient to use on GPU
+ (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
+ support it (e.g. Intel GPU).
+
+ You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT.
+ */
+ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
+ /** Allocation created with this flag can become lost as a result of another
+ allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you
+ must check it before use.
+
+ To check if allocation is not lost, call vmaGetAllocationInfo() and check if
+ VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`.
+
+ For details about supporting lost allocations, see Lost Allocations
+ chapter of User Guide on Main Page.
+
+ You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT.
+ */
+ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008,
+ /** While creating allocation using this flag, other allocations that were
+ created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost.
+
+ For details about supporting lost allocations, see Lost Allocations
+ chapter of User Guide on Main Page.
+ */
+ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010,
+ /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
+ null-terminated string. Instead of copying pointer value, a local copy of the
+ string is made and stored in allocation's `pUserData`. The string is automatically
+ freed together with the allocation. It is also used in vmaBuildStatsString().
+ */
+ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
+ /** Allocation will be created from upper stack in a double stack pool.
+
+ This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
+ */
+ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
+ /** Create both buffer/image and allocation, but don't bind them together.
+ It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
+ The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
+ Otherwise it is ignored.
+ */
+ VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
+
+ /** Allocation strategy that chooses smallest possible free range for the
+ allocation.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000,
+ /** Allocation strategy that chooses biggest possible free range for the
+ allocation.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000,
+ /** Allocation strategy that chooses first suitable free range for the
+ allocation.
+
+ "First" doesn't necessarily means the one with smallest offset in memory,
+ but rather the one that is easiest and fastest to find.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000,
+
+ /** Allocation strategy that tries to minimize memory usage.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT,
+ /** Allocation strategy that tries to minimize allocation time.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT,
+ /** Allocation strategy that tries to minimize memory fragmentation.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT,
+
+ /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MASK =
+ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT |
+ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT |
+ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT,
+
+ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaAllocationCreateFlagBits;
+typedef VkFlags VmaAllocationCreateFlags;
+
+typedef struct VmaAllocationCreateInfo {
+ /// Use #VmaAllocationCreateFlagBits enum.
+ VmaAllocationCreateFlags flags;
+ /** \brief Intended usage of memory.
+
+ You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
+ If `pool` is not null, this member is ignored.
+ */
+ VmaMemoryUsage usage;
+ /** \brief Flags that must be set in a Memory Type chosen for an allocation.
+
+ Leave 0 if you specify memory requirements in other way. \n
+ If `pool` is not null, this member is ignored.*/
+ VkMemoryPropertyFlags requiredFlags;
+ /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
+
+ Set to 0 if no additional flags are prefered. \n
+ If `pool` is not null, this member is ignored. */
+ VkMemoryPropertyFlags preferredFlags;
+ /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
+
+ Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
+ it meets other requirements specified by this structure, with no further
+ restrictions on memory type index. \n
+ If `pool` is not null, this member is ignored.
+ */
+ uint32_t memoryTypeBits;
+ /** \brief Pool that this allocation should be created in.
+
+ Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
+ `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
+ */
+ VmaPool pool;
+ /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
+
+ If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
+ null or pointer to a null-terminated string. The string will be then copied to
+ internal buffer, so it doesn't need to be valid after allocation call.
+ */
+ void *pUserData;
+} VmaAllocationCreateInfo;
+
+/**
+\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
+
+This algorithm tries to find a memory type that:
+
+- Is allowed by memoryTypeBits.
+- Contains all the flags from pAllocationCreateInfo->requiredFlags.
+- Matches intended usage.
+- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
+
+\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
+from this function or any other allocating function probably means that your
+device doesn't support any memory type with requested features for the specific
+type of resource you want to use it for. Please check parameters of your
+resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
+*/
+VkResult vmaFindMemoryTypeIndex(
+ VmaAllocator allocator,
+ uint32_t memoryTypeBits,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ uint32_t *pMemoryTypeIndex);
+
+/**
+\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
+
+It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
+It internally creates a temporary, dummy buffer that never has memory bound.
+It is just a convenience function, equivalent to calling:
+
+- `vkCreateBuffer`
+- `vkGetBufferMemoryRequirements`
+- `vmaFindMemoryTypeIndex`
+- `vkDestroyBuffer`
+*/
+VkResult vmaFindMemoryTypeIndexForBufferInfo(
+ VmaAllocator allocator,
+ const VkBufferCreateInfo *pBufferCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ uint32_t *pMemoryTypeIndex);
+
+/**
+\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
+
+It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
+It internally creates a temporary, dummy image that never has memory bound.
+It is just a convenience function, equivalent to calling:
+
+- `vkCreateImage`
+- `vkGetImageMemoryRequirements`
+- `vmaFindMemoryTypeIndex`
+- `vkDestroyImage`
+*/
+VkResult vmaFindMemoryTypeIndexForImageInfo(
+ VmaAllocator allocator,
+ const VkImageCreateInfo *pImageCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ uint32_t *pMemoryTypeIndex);
+
+/// Flags to be passed as VmaPoolCreateInfo::flags.
+typedef enum VmaPoolCreateFlagBits {
+ /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
+
+ This is an optional optimization flag.
+
+ If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
+ vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
+ knows exact type of your allocations so it can handle Buffer-Image Granularity
+ in the optimal way.
+
+ If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
+ exact type of such allocations is not known, so allocator must be conservative
+ in handling Buffer-Image Granularity, which can lead to suboptimal allocation
+ (wasted memory). In that case, if you can make sure you always allocate only
+ buffers and linear images or only optimal images out of this pool, use this flag
+ to make allocator disregard Buffer-Image Granularity and so make allocations
+ faster and more optimal.
+ */
+ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
+
+ /** \brief Enables alternative, linear allocation algorithm in this pool.
+
+ Specify this flag to enable linear allocation algorithm, which always creates
+ new allocations after last one and doesn't reuse space from allocations freed in
+ between. It trades memory consumption for simplified algorithm and data
+ structure, which has better performance and uses less memory for metadata.
+
+ By using this flag, you can achieve behavior of free-at-once, stack,
+ ring buffer, and double stack. For details, see documentation chapter
+ \ref linear_algorithm.
+
+ When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default).
+
+ For more details, see [Linear allocation algorithm](@ref linear_algorithm).
+ */
+ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
+
+ /** \brief Enables alternative, buddy allocation algorithm in this pool.
+
+ It operates on a tree of blocks, each having size that is a power of two and
+ a half of its parent's size. Comparing to default algorithm, this one provides
+ faster allocation and deallocation and decreased external fragmentation,
+ at the expense of more memory wasted (internal fragmentation).
+
+ For more details, see [Buddy allocation algorithm](@ref buddy_algorithm).
+ */
+ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008,
+
+ /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
+ */
+ VMA_POOL_CREATE_ALGORITHM_MASK =
+ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT |
+ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT,
+
+ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaPoolCreateFlagBits;
+typedef VkFlags VmaPoolCreateFlags;
+
+/** \brief Describes parameter of created #VmaPool.
+*/
+typedef struct VmaPoolCreateInfo {
+ /** \brief Vulkan memory type index to allocate this pool from.
+ */
+ uint32_t memoryTypeIndex;
+ /** \brief Use combination of #VmaPoolCreateFlagBits.
+ */
+ VmaPoolCreateFlags flags;
+ /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
+
+ Specify nonzero to set explicit, constant size of memory blocks used by this
+ pool.
+
+ Leave 0 to use default and let the library manage block sizes automatically.
+ Sizes of particular blocks may vary.
+ */
+ VkDeviceSize blockSize;
+ /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
+
+ Set to 0 to have no preallocated blocks and allow the pool be completely empty.
+ */
+ size_t minBlockCount;
+ /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
+
+ Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
+
+ Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
+ throughout whole lifetime of this pool.
+ */
+ size_t maxBlockCount;
+ /** \brief Maximum number of additional frames that are in use at the same time as current frame.
+
+ This value is used only when you make allocations with
+ #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become
+ lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount.
+
+ For example, if you double-buffer your command buffers, so resources used for
+ rendering in previous frame may still be in use by the GPU at the moment you
+ allocate resources needed for the current frame, set this value to 1.
+
+ If you want to allow any allocations other than used in the current frame to
+ become lost, set this value to 0.
+ */
+ uint32_t frameInUseCount;
+} VmaPoolCreateInfo;
+
+/** \brief Describes parameter of existing #VmaPool.
+*/
+typedef struct VmaPoolStats {
+ /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes.
+ */
+ VkDeviceSize size;
+ /** \brief Total number of bytes in the pool not used by any #VmaAllocation.
+ */
+ VkDeviceSize unusedSize;
+ /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed or lost.
+ */
+ size_t allocationCount;
+ /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation.
+ */
+ size_t unusedRangeCount;
+ /** \brief Size of the largest continuous free memory region available for new allocation.
+
+ Making a new allocation of that size is not guaranteed to succeed because of
+ possible additional margin required to respect alignment and buffer/image
+ granularity.
+ */
+ VkDeviceSize unusedRangeSizeMax;
+ /** \brief Number of `VkDeviceMemory` blocks allocated for this pool.
+ */
+ size_t blockCount;
+} VmaPoolStats;
+
+/** \brief Allocates Vulkan device memory and creates #VmaPool object.
+
+@param allocator Allocator object.
+@param pCreateInfo Parameters of pool to create.
+@param[out] pPool Handle to created pool.
+*/
+VkResult vmaCreatePool(
+ VmaAllocator allocator,
+ const VmaPoolCreateInfo *pCreateInfo,
+ VmaPool *pPool);
+
+/** \brief Destroys #VmaPool object and frees Vulkan device memory.
+*/
+void vmaDestroyPool(
+ VmaAllocator allocator,
+ VmaPool pool);
+
+/** \brief Retrieves statistics of existing #VmaPool object.
+
+@param allocator Allocator object.
+@param pool Pool object.
+@param[out] pPoolStats Statistics of specified pool.
+*/
+void vmaGetPoolStats(
+ VmaAllocator allocator,
+ VmaPool pool,
+ VmaPoolStats *pPoolStats);
+
+/** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now.
+
+@param allocator Allocator object.
+@param pool Pool.
+@param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information.
+*/
+void vmaMakePoolAllocationsLost(
+ VmaAllocator allocator,
+ VmaPool pool,
+ size_t *pLostAllocationCount);
+
+/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
+
+Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
+`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
+`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
+
+Possible return values:
+
+- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
+- `VK_SUCCESS` - corruption detection has been performed and succeeded.
+- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations.
+ `VMA_ASSERT` is also fired in that case.
+- Other value: Error returned by Vulkan, e.g. memory mapping failure.
+*/
+VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
+
+/** \struct VmaAllocation
+\brief Represents single memory allocation.
+
+It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
+plus unique offset.
+
+There are multiple ways to create such object.
+You need to fill structure VmaAllocationCreateInfo.
+For more information see [Choosing memory type](@ref choosing_memory_type).
+
+Although the library provides convenience functions that create Vulkan buffer or image,
+allocate memory for it and bind them together,
+binding of the allocation to a buffer or an image is out of scope of the allocation itself.
+Allocation object can exist without buffer/image bound,
+binding can be done manually by the user, and destruction of it can be done
+independently of destruction of the allocation.
+
+The object also remembers its size and some other information.
+To retrieve this information, use function vmaGetAllocationInfo() and inspect
+returned structure VmaAllocationInfo.
+
+Some kinds allocations can be in lost state.
+For more information, see [Lost allocations](@ref lost_allocations).
+*/
+VK_DEFINE_HANDLE(VmaAllocation)
+
+/** \brief Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
+*/
+typedef struct VmaAllocationInfo {
+ /** \brief Memory type index that this allocation was allocated from.
+
+ It never changes.
+ */
+ uint32_t memoryType;
+ /** \brief Handle to Vulkan memory object.
+
+ Same memory object can be shared by multiple allocations.
+
+ It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
+
+ If the allocation is lost, it is equal to `VK_NULL_HANDLE`.
+ */
+ VkDeviceMemory deviceMemory;
+ /** \brief Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
+
+ It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
+ */
+ VkDeviceSize offset;
+ /** \brief Size of this allocation, in bytes.
+
+ It never changes, unless allocation is lost.
+ */
+ VkDeviceSize size;
+ /** \brief Pointer to the beginning of this allocation as mapped data.
+
+ If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
+ created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null.
+
+ It can change after call to vmaMapMemory(), vmaUnmapMemory().
+ It can also change after call to vmaDefragment() if this allocation is passed to the function.
+ */
+ void *pMappedData;
+ /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
+
+ It can change after call to vmaSetAllocationUserData() for this allocation.
+ */
+ void *pUserData;
+} VmaAllocationInfo;
+
+/** \brief General purpose memory allocation.
+
+@param[out] pAllocation Handle to allocated memory.
+@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+
+You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
+
+It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
+vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
+*/
+VkResult vmaAllocateMemory(
+ VmaAllocator allocator,
+ const VkMemoryRequirements *pVkMemoryRequirements,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo);
+
+/** \brief General purpose memory allocation for multiple allocation objects at once.
+
+@param allocator Allocator object.
+@param pVkMemoryRequirements Memory requirements for each allocation.
+@param pCreateInfo Creation parameters for each alloction.
+@param allocationCount Number of allocations to make.
+@param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
+@param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
+
+You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
+
+Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
+It is just a general purpose allocation function able to make multiple allocations at once.
+It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
+
+All allocations are made using same parameters. All of them are created out of the same memory pool and type.
+If any allocation fails, all allocations already made within this function call are also freed, so that when
+returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
+*/
+VkResult vmaAllocateMemoryPages(
+ VmaAllocator allocator,
+ const VkMemoryRequirements *pVkMemoryRequirements,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ size_t allocationCount,
+ VmaAllocation *pAllocations,
+ VmaAllocationInfo *pAllocationInfo);
+
+/**
+@param[out] pAllocation Handle to allocated memory.
+@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+
+You should free the memory using vmaFreeMemory().
+*/
+VkResult vmaAllocateMemoryForBuffer(
+ VmaAllocator allocator,
+ VkBuffer buffer,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo);
+
+/// Function similar to vmaAllocateMemoryForBuffer().
+VkResult vmaAllocateMemoryForImage(
+ VmaAllocator allocator,
+ VkImage image,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo);
+
+/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
+
+Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
+*/
+void vmaFreeMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation);
+
+/** \brief Frees memory and destroys multiple allocations.
+
+Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
+It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
+vmaAllocateMemoryPages() and other functions.
+It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
+
+Allocations in `pAllocations` array can come from any memory pools and types.
+Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
+*/
+void vmaFreeMemoryPages(
+ VmaAllocator allocator,
+ size_t allocationCount,
+ VmaAllocation *pAllocations);
+
+/** \brief Tries to resize an allocation in place, if there is enough free memory after it.
+
+Tries to change allocation's size without moving or reallocating it.
+You can both shrink and grow allocation size.
+When growing, it succeeds only when the allocation belongs to a memory block with enough
+free space after it.
+
+Returns `VK_SUCCESS` if allocation's size has been successfully changed.
+Returns `VK_ERROR_OUT_OF_POOL_MEMORY` if allocation's size could not be changed.
+
+After successful call to this function, VmaAllocationInfo::size of this allocation changes.
+All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer.
+
+- Calling this function on allocation that is in lost state fails with result `VK_ERROR_VALIDATION_FAILED_EXT`.
+- Calling this function with `newSize` same as current allocation size does nothing and returns `VK_SUCCESS`.
+- Resizing dedicated allocations, as well as allocations created in pools that use linear
+ or buddy algorithm, is not supported.
+ The function returns `VK_ERROR_FEATURE_NOT_PRESENT` in such cases.
+ Support may be added in the future.
+*/
+VkResult vmaResizeAllocation(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize newSize);
+
+/** \brief Returns current information about specified allocation and atomically marks it as used in current frame.
+
+Current paramters of given allocation are returned in `pAllocationInfo`.
+
+This function also atomically "touches" allocation - marks it as used in current frame,
+just like vmaTouchAllocation().
+If the allocation is in lost state, `pAllocationInfo->deviceMemory == VK_NULL_HANDLE`.
+
+Although this function uses atomics and doesn't lock any mutex, so it should be quite efficient,
+you can avoid calling it too often.
+
+- You can retrieve same VmaAllocationInfo structure while creating your resource, from function
+ vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
+ (e.g. due to defragmentation or allocation becoming lost).
+- If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster.
+*/
+void vmaGetAllocationInfo(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VmaAllocationInfo *pAllocationInfo);
+
+/** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame.
+
+If the allocation has been created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
+this function returns `VK_TRUE` if it's not in lost state, so it can still be used.
+It then also atomically "touches" the allocation - marks it as used in current frame,
+so that you can be sure it won't become lost in current frame or next `frameInUseCount` frames.
+
+If the allocation is in lost state, the function returns `VK_FALSE`.
+Memory of such allocation, as well as buffer or image bound to it, should not be used.
+Lost allocation and the buffer/image still need to be destroyed.
+
+If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag,
+this function always returns `VK_TRUE`.
+*/
+VkBool32 vmaTouchAllocation(
+ VmaAllocator allocator,
+ VmaAllocation allocation);
+
+/** \brief Sets pUserData in given allocation to new value.
+
+If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT,
+pUserData must be either null, or pointer to a null-terminated string. The function
+makes local copy of the string and sets it as allocation's `pUserData`. String
+passed as pUserData doesn't need to be valid for whole lifetime of the allocation -
+you can free it after this call. String previously pointed by allocation's
+pUserData is freed from memory.
+
+If the flag was not used, the value of pointer `pUserData` is just copied to
+allocation's `pUserData`. It is opaque, so you can use it however you want - e.g.
+as a pointer, ordinal number or some handle to you own data.
+*/
+void vmaSetAllocationUserData(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ void *pUserData);
+
+/** \brief Creates new allocation that is in lost state from the beginning.
+
+It can be useful if you need a dummy, non-null allocation.
+
+You still need to destroy created object using vmaFreeMemory().
+
+Returned allocation is not tied to any specific memory pool or memory type and
+not bound to any image or buffer. It has size = 0. It cannot be turned into
+a real, non-empty allocation.
+*/
+void vmaCreateLostAllocation(
+ VmaAllocator allocator,
+ VmaAllocation *pAllocation);
+
+/** \brief Maps memory represented by given allocation and returns pointer to it.
+
+Maps memory represented by given allocation to make it accessible to CPU code.
+When succeeded, `*ppData` contains pointer to first byte of this memory.
+If the allocation is part of bigger `VkDeviceMemory` block, the pointer is
+correctly offseted to the beginning of region assigned to this particular
+allocation.
+
+Mapping is internally reference-counted and synchronized, so despite raw Vulkan
+function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
+multiple times simultaneously, it is safe to call this function on allocations
+assigned to the same memory block. Actual Vulkan memory will be mapped on first
+mapping and unmapped on last unmapping.
+
+If the function succeeded, you must call vmaUnmapMemory() to unmap the
+allocation when mapping is no longer needed or before freeing the allocation, at
+the latest.
+
+It also safe to call this function multiple times on the same allocation. You
+must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
+
+It is also safe to call this function on allocation created with
+#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
+You must still call vmaUnmapMemory() same number of times as you called
+vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
+"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
+
+This function fails when used on allocation made in memory type that is not
+`HOST_VISIBLE`.
+
+This function always fails when called for allocation that was created with
+#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be
+mapped.
+*/
+VkResult vmaMapMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ void **ppData);
+
+/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
+
+For details, see description of vmaMapMemory().
+*/
+void vmaUnmapMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation);
+
+/** \brief Flushes memory of given allocation.
+
+Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
+
+- `offset` must be relative to the beginning of allocation.
+- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
+- `offset` and `size` don't have to be aligned.
+ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
+- If `size` is 0, this call is ignored.
+- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
+ this call is ignored.
+
+Warning! `offset` and `size` are relative to the contents of given `allocation`.
+If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
+Do not pass allocation's offset as `offset`!!!
+*/
+void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+
+/** \brief Invalidates memory of given allocation.
+
+Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
+
+- `offset` must be relative to the beginning of allocation.
+- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
+- `offset` and `size` don't have to be aligned.
+ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
+- If `size` is 0, this call is ignored.
+- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
+ this call is ignored.
+
+Warning! `offset` and `size` are relative to the contents of given `allocation`.
+If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
+Do not pass allocation's offset as `offset`!!!
+*/
+void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+
+/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
+
+@param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
+
+Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
+`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
+`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
+
+Possible return values:
+
+- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
+- `VK_SUCCESS` - corruption detection has been performed and succeeded.
+- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations.
+ `VMA_ASSERT` is also fired in that case.
+- Other value: Error returned by Vulkan, e.g. memory mapping failure.
+*/
+VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
+
+/** \struct VmaDefragmentationContext
+\brief Represents Opaque object that represents started defragmentation process.
+
+Fill structure #VmaDefragmentationInfo2 and call function vmaDefragmentationBegin() to create it.
+Call function vmaDefragmentationEnd() to destroy it.
+*/
+VK_DEFINE_HANDLE(VmaDefragmentationContext)
+
+/// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
+typedef enum VmaDefragmentationFlagBits {
+ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaDefragmentationFlagBits;
+typedef VkFlags VmaDefragmentationFlags;
+
+/** \brief Parameters for defragmentation.
+
+To be used with function vmaDefragmentationBegin().
+*/
+typedef struct VmaDefragmentationInfo2 {
+ /** \brief Reserved for future use. Should be 0.
+ */
+ VmaDefragmentationFlags flags;
+ /** \brief Number of allocations in `pAllocations` array.
+ */
+ uint32_t allocationCount;
+ /** \brief Pointer to array of allocations that can be defragmented.
+
+ The array should have `allocationCount` elements.
+ The array should not contain nulls.
+ Elements in the array should be unique - same allocation cannot occur twice.
+ It is safe to pass allocations that are in the lost state - they are ignored.
+ All allocations not present in this array are considered non-moveable during this defragmentation.
+ */
+ VmaAllocation *pAllocations;
+ /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation.
+
+ The array should have `allocationCount` elements.
+ You can pass null if you are not interested in this information.
+ */
+ VkBool32 *pAllocationsChanged;
+ /** \brief Numer of pools in `pPools` array.
+ */
+ uint32_t poolCount;
+ /** \brief Either null or pointer to array of pools to be defragmented.
+
+ All the allocations in the specified pools can be moved during defragmentation
+ and there is no way to check if they were really moved as in `pAllocationsChanged`,
+ so you must query all the allocations in all these pools for new `VkDeviceMemory`
+ and offset using vmaGetAllocationInfo() if you might need to recreate buffers
+ and images bound to them.
+
+ The array should have `poolCount` elements.
+ The array should not contain nulls.
+ Elements in the array should be unique - same pool cannot occur twice.
+
+ Using this array is equivalent to specifying all allocations from the pools in `pAllocations`.
+ It might be more efficient.
+ */
+ VmaPool *pPools;
+ /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`.
+
+ `VK_WHOLE_SIZE` means no limit.
+ */
+ VkDeviceSize maxCpuBytesToMove;
+ /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`.
+
+ `UINT32_MAX` means no limit.
+ */
+ uint32_t maxCpuAllocationsToMove;
+ /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`.
+
+ `VK_WHOLE_SIZE` means no limit.
+ */
+ VkDeviceSize maxGpuBytesToMove;
+ /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`.
+
+ `UINT32_MAX` means no limit.
+ */
+ uint32_t maxGpuAllocationsToMove;
+ /** \brief Optional. Command buffer where GPU copy commands will be posted.
+
+ If not null, it must be a valid command buffer handle that supports Transfer queue type.
+ It must be in the recording state and outside of a render pass instance.
+ You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd().
+
+ Passing null means that only CPU defragmentation will be performed.
+ */
+ VkCommandBuffer commandBuffer;
+} VmaDefragmentationInfo2;
+
+/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
+
+\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead.
+*/
+typedef struct VmaDefragmentationInfo {
+ /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places.
+
+ Default is `VK_WHOLE_SIZE`, which means no limit.
+ */
+ VkDeviceSize maxBytesToMove;
+ /** \brief Maximum number of allocations that can be moved to different place.
+
+ Default is `UINT32_MAX`, which means no limit.
+ */
+ uint32_t maxAllocationsToMove;
+} VmaDefragmentationInfo;
+
+/** \brief Statistics returned by function vmaDefragment(). */
+typedef struct VmaDefragmentationStats {
+ /// Total number of bytes that have been copied while moving allocations to different places.
+ VkDeviceSize bytesMoved;
+ /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
+ VkDeviceSize bytesFreed;
+ /// Number of allocations that have been moved to different places.
+ uint32_t allocationsMoved;
+ /// Number of empty `VkDeviceMemory` objects that have been released to the system.
+ uint32_t deviceMemoryBlocksFreed;
+} VmaDefragmentationStats;
+
+/** \brief Begins defragmentation process.
+
+@param allocator Allocator object.
+@param pInfo Structure filled with parameters of defragmentation.
+@param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information.
+@param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation.
+@return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error.
+
+Use this function instead of old, deprecated vmaDefragment().
+
+Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd():
+
+- You should not use any of allocations passed as `pInfo->pAllocations` or
+ any allocations that belong to pools passed as `pInfo->pPools`,
+ including calling vmaGetAllocationInfo(), vmaTouchAllocation(), or access
+ their data.
+- Some mutexes protecting internal data structures may be locked, so trying to
+ make or free any allocations, bind buffers or images, map memory, or launch
+ another simultaneous defragmentation in between may cause stall (when done on
+ another thread) or deadlock (when done on the same thread), unless you are
+ 100% sure that defragmented allocations are in different pools.
+- Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined.
+ They become valid after call to vmaDefragmentationEnd().
+- If `pInfo->commandBuffer` is not null, you must submit that command buffer
+ and make sure it finished execution before calling vmaDefragmentationEnd().
+
+For more information and important limitations regarding defragmentation, see documentation chapter:
+[Defragmentation](@ref defragmentation).
+*/
+VkResult vmaDefragmentationBegin(
+ VmaAllocator allocator,
+ const VmaDefragmentationInfo2 *pInfo,
+ VmaDefragmentationStats *pStats,
+ VmaDefragmentationContext *pContext);
+
+/** \brief Ends defragmentation process.
+
+Use this function to finish defragmentation started by vmaDefragmentationBegin().
+It is safe to pass `context == null`. The function then does nothing.
+*/
+VkResult vmaDefragmentationEnd(
+ VmaAllocator allocator,
+ VmaDefragmentationContext context);
+
+/** \brief Deprecated. Compacts memory by moving allocations.
+
+@param pAllocations Array of allocations that can be moved during this compation.
+@param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays.
+@param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information.
+@param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values.
+@param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information.
+@return `VK_SUCCESS` if completed, negative error code in case of error.
+
+\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead.
+
+This function works by moving allocations to different places (different
+`VkDeviceMemory` objects and/or different offsets) in order to optimize memory
+usage. Only allocations that are in `pAllocations` array can be moved. All other
+allocations are considered nonmovable in this call. Basic rules:
+
+- Only allocations made in memory types that have
+ `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`
+ flags can be compacted. You may pass other allocations but it makes no sense -
+ these will never be moved.
+- Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or
+ #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations
+ passed to this function that come from such pools are ignored.
+- Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or
+ created as dedicated allocations for any other reason are also ignored.
+- Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT
+ flag can be compacted. If not persistently mapped, memory will be mapped
+ temporarily inside this function if needed.
+- You must not pass same #VmaAllocation object multiple times in `pAllocations` array.
+
+The function also frees empty `VkDeviceMemory` blocks.
+
+Warning: This function may be time-consuming, so you shouldn't call it too often
+(like after every resource creation/destruction).
+You can call it on special occasions (like when reloading a game level or
+when you just destroyed a lot of objects). Calling it every frame may be OK, but
+you should measure that on your platform.
+
+For more information, see [Defragmentation](@ref defragmentation) chapter.
+*/
+VkResult vmaDefragment(
+ VmaAllocator allocator,
+ VmaAllocation *pAllocations,
+ size_t allocationCount,
+ VkBool32 *pAllocationsChanged,
+ const VmaDefragmentationInfo *pDefragmentationInfo,
+ VmaDefragmentationStats *pDefragmentationStats);
+
+/** \brief Binds buffer to allocation.
+
+Binds specified buffer to region of memory represented by specified allocation.
+Gets `VkDeviceMemory` handle and offset from the allocation.
+If you want to create a buffer, allocate memory for it and bind them together separately,
+you should use this function for binding instead of standard `vkBindBufferMemory()`,
+because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
+allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
+(which is illegal in Vulkan).
+
+It is recommended to use function vmaCreateBuffer() instead of this one.
+*/
+VkResult vmaBindBufferMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkBuffer buffer);
+
+/** \brief Binds image to allocation.
+
+Binds specified image to region of memory represented by specified allocation.
+Gets `VkDeviceMemory` handle and offset from the allocation.
+If you want to create an image, allocate memory for it and bind them together separately,
+you should use this function for binding instead of standard `vkBindImageMemory()`,
+because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
+allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
+(which is illegal in Vulkan).
+
+It is recommended to use function vmaCreateImage() instead of this one.
+*/
+VkResult vmaBindImageMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkImage image);
+
+/**
+@param[out] pBuffer Buffer that was created.
+@param[out] pAllocation Allocation that was created.
+@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+
+This function automatically:
+
+-# Creates buffer.
+-# Allocates appropriate memory for it.
+-# Binds the buffer with the memory.
+
+If any of these operations fail, buffer and allocation are not created,
+returned value is negative error code, *pBuffer and *pAllocation are null.
+
+If the function succeeded, you must destroy both buffer and allocation when you
+no longer need them using either convenience function vmaDestroyBuffer() or
+separately, using `vkDestroyBuffer()` and vmaFreeMemory().
+
+If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
+VK_KHR_dedicated_allocation extension is used internally to query driver whether
+it requires or prefers the new buffer to have dedicated allocation. If yes,
+and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null
+and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
+allocation for this buffer, just like when using
+VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+*/
+VkResult vmaCreateBuffer(
+ VmaAllocator allocator,
+ const VkBufferCreateInfo *pBufferCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ VkBuffer *pBuffer,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo);
+
+/** \brief Destroys Vulkan buffer and frees allocated memory.
+
+This is just a convenience function equivalent to:
+
+\code
+vkDestroyBuffer(device, buffer, allocationCallbacks);
+vmaFreeMemory(allocator, allocation);
+\endcode
+
+It it safe to pass null as buffer and/or allocation.
+*/
+void vmaDestroyBuffer(
+ VmaAllocator allocator,
+ VkBuffer buffer,
+ VmaAllocation allocation);
+
+/// Function similar to vmaCreateBuffer().
+VkResult vmaCreateImage(
+ VmaAllocator allocator,
+ const VkImageCreateInfo *pImageCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ VkImage *pImage,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo);
+
+/** \brief Destroys Vulkan image and frees allocated memory.
+
+This is just a convenience function equivalent to:
+
+\code
+vkDestroyImage(device, image, allocationCallbacks);
+vmaFreeMemory(allocator, allocation);
+\endcode
+
+It it safe to pass null as image and/or allocation.
+*/
+void vmaDestroyImage(
+ VmaAllocator allocator,
+ VkImage image,
+ VmaAllocation allocation);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
+
+// For Visual Studio IntelliSense.
+#if defined(__cplusplus) && defined(__INTELLISENSE__)
+#define VMA_IMPLEMENTATION
+#endif
+
+#ifdef VMA_IMPLEMENTATION
+#undef VMA_IMPLEMENTATION
+
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+
+/*******************************************************************************
+CONFIGURATION SECTION
+
+Define some of these macros before each #include of this header or change them
+here if you need other then default behavior depending on your environment.
+*/
+
+/*
+Define this macro to 1 to make the library fetch pointers to Vulkan functions
+internally, like:
+
+ vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
+
+Define to 0 if you are going to provide you own pointers to Vulkan functions via
+VmaAllocatorCreateInfo::pVulkanFunctions.
+*/
+#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
+#define VMA_STATIC_VULKAN_FUNCTIONS 1
+#endif
+
+// Define this macro to 1 to make the library use STL containers instead of its own implementation.
+//#define VMA_USE_STL_CONTAINERS 1
+
+/* Set this macro to 1 to make the library including and using STL containers:
+std::pair, std::vector, std::list, std::unordered_map.
+
+Set it to 0 or undefined to make the library using its own implementation of
+the containers.
+*/
+#if VMA_USE_STL_CONTAINERS
+#define VMA_USE_STL_VECTOR 1
+#define VMA_USE_STL_UNORDERED_MAP 1
+#define VMA_USE_STL_LIST 1
+#endif
+
+#ifndef VMA_USE_STL_SHARED_MUTEX
+// Compiler conforms to C++17.
+#if __cplusplus >= 201703L
+#define VMA_USE_STL_SHARED_MUTEX 1
+// Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
+// Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
+// See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
+#elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
+#define VMA_USE_STL_SHARED_MUTEX 1
+#else
+#define VMA_USE_STL_SHARED_MUTEX 0
+#endif
+#endif
+
+/*
+THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
+Library has its own container implementation.
+*/
+#if VMA_USE_STL_VECTOR
+#include <vector>
+#endif
+
+#if VMA_USE_STL_UNORDERED_MAP
+#include <unordered_map>
+#endif
+
+#if VMA_USE_STL_LIST
+#include <list>
+#endif
+
+/*
+Following headers are used in this CONFIGURATION section only, so feel free to
+remove them if not needed.
+*/
+#include <algorithm> // for min, max
+#include <cassert> // for assert
+#include <mutex>
+
+#ifndef VMA_NULL
+// Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
+#define VMA_NULL nullptr
+#endif
+
+#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
+#include <cstdlib>
+void *aligned_alloc(size_t alignment, size_t size) {
+ // alignment must be >= sizeof(void*)
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+
+ return memalign(alignment, size);
+}
+#elif defined(__APPLE__) || defined(__ANDROID__)
+#include <cstdlib>
+void *aligned_alloc(size_t alignment, size_t size) {
+ // alignment must be >= sizeof(void*)
+ if (alignment < sizeof(void *)) {
+ alignment = sizeof(void *);
+ }
+
+ void *pointer;
+ if (posix_memalign(&pointer, alignment, size) == 0)
+ return pointer;
+ return VMA_NULL;
+}
+#endif
+
+// If your compiler is not compatible with C++11 and definition of
+// aligned_alloc() function is missing, uncommeting following line may help:
+
+//#include <malloc.h>
+
+// Normal assert to check for programmer's errors, especially in Debug configuration.
+#ifndef VMA_ASSERT
+#ifdef _DEBUG
+#define VMA_ASSERT(expr) assert(expr)
+#else
+#define VMA_ASSERT(expr)
+#endif
+#endif
+
+// Assert that will be called very often, like inside data structures e.g. operator[].
+// Making it non-empty can make program slow.
+#ifndef VMA_HEAVY_ASSERT
+#ifdef _DEBUG
+#define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
+#else
+#define VMA_HEAVY_ASSERT(expr)
+#endif
+#endif
+
+#ifndef VMA_ALIGN_OF
+#define VMA_ALIGN_OF(type) (__alignof(type))
+#endif
+
+#ifndef VMA_SYSTEM_ALIGNED_MALLOC
+#if defined(_WIN32)
+#define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
+#else
+#define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size)))
+#endif
+#endif
+
+#ifndef VMA_SYSTEM_FREE
+#if defined(_WIN32)
+#define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
+#else
+#define VMA_SYSTEM_FREE(ptr) free(ptr)
+#endif
+#endif
+
+#ifndef VMA_MIN
+#define VMA_MIN(v1, v2) (std::min((v1), (v2)))
+#endif
+
+#ifndef VMA_MAX
+#define VMA_MAX(v1, v2) (std::max((v1), (v2)))
+#endif
+
+#ifndef VMA_SWAP
+#define VMA_SWAP(v1, v2) std::swap((v1), (v2))
+#endif
+
+#ifndef VMA_SORT
+#define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
+#endif
+
+#ifndef VMA_DEBUG_LOG
+#define VMA_DEBUG_LOG(format, ...)
+/*
+ #define VMA_DEBUG_LOG(format, ...) do { \
+ printf(format, __VA_ARGS__); \
+ printf("\n"); \
+ } while(false)
+ */
+#endif
+
+// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
+#if VMA_STATS_STRING_ENABLED
+static inline void VmaUint32ToStr(char *outStr, size_t strLen, uint32_t num) {
+ snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
+}
+static inline void VmaUint64ToStr(char *outStr, size_t strLen, uint64_t num) {
+ snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
+}
+static inline void VmaPtrToStr(char *outStr, size_t strLen, const void *ptr) {
+ snprintf(outStr, strLen, "%p", ptr);
+}
+#endif
+
+#ifndef VMA_MUTEX
+class VmaMutex {
+public:
+ void Lock() { m_Mutex.lock(); }
+ void Unlock() { m_Mutex.unlock(); }
+
+private:
+ std::mutex m_Mutex;
+};
+#define VMA_MUTEX VmaMutex
+#endif
+
+// Read-write mutex, where "read" is shared access, "write" is exclusive access.
+#ifndef VMA_RW_MUTEX
+#if VMA_USE_STL_SHARED_MUTEX
+// Use std::shared_mutex from C++17.
+#include <shared_mutex>
+class VmaRWMutex {
+public:
+ void LockRead() { m_Mutex.lock_shared(); }
+ void UnlockRead() { m_Mutex.unlock_shared(); }
+ void LockWrite() { m_Mutex.lock(); }
+ void UnlockWrite() { m_Mutex.unlock(); }
+
+private:
+ std::shared_mutex m_Mutex;
+};
+#define VMA_RW_MUTEX VmaRWMutex
+#elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
+// Use SRWLOCK from WinAPI.
+// Minimum supported client = Windows Vista, server = Windows Server 2008.
+class VmaRWMutex {
+public:
+ VmaRWMutex() { InitializeSRWLock(&m_Lock); }
+ void LockRead() { AcquireSRWLockShared(&m_Lock); }
+ void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
+ void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
+ void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
+
+private:
+ SRWLOCK m_Lock;
+};
+#define VMA_RW_MUTEX VmaRWMutex
+#else
+// Less efficient fallback: Use normal mutex.
+class VmaRWMutex {
+public:
+ void LockRead() { m_Mutex.Lock(); }
+ void UnlockRead() { m_Mutex.Unlock(); }
+ void LockWrite() { m_Mutex.Lock(); }
+ void UnlockWrite() { m_Mutex.Unlock(); }
+
+private:
+ VMA_MUTEX m_Mutex;
+};
+#define VMA_RW_MUTEX VmaRWMutex
+#endif // #if VMA_USE_STL_SHARED_MUTEX
+#endif // #ifndef VMA_RW_MUTEX
+
+/*
+If providing your own implementation, you need to implement a subset of std::atomic:
+
+- Constructor(uint32_t desired)
+- uint32_t load() const
+- void store(uint32_t desired)
+- bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
+*/
+#ifndef VMA_ATOMIC_UINT32
+#include <atomic>
+#define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
+#endif
+
+#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
+/**
+ Every allocation will have its own memory block.
+ Define to 1 for debugging purposes only.
+ */
+#define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
+#endif
+
+#ifndef VMA_DEBUG_ALIGNMENT
+/**
+ Minimum alignment of all allocations, in bytes.
+ Set to more than 1 for debugging purposes only. Must be power of two.
+ */
+#define VMA_DEBUG_ALIGNMENT (1)
+#endif
+
+#ifndef VMA_DEBUG_MARGIN
+/**
+ Minimum margin before and after every allocation, in bytes.
+ Set nonzero for debugging purposes only.
+ */
+#define VMA_DEBUG_MARGIN (0)
+#endif
+
+#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
+/**
+ Define this macro to 1 to automatically fill new allocations and destroyed
+ allocations with some bit pattern.
+ */
+#define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
+#endif
+
+#ifndef VMA_DEBUG_DETECT_CORRUPTION
+/**
+ Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
+ enable writing magic value to the margin before and after every allocation and
+ validating it, so that memory corruptions (out-of-bounds writes) are detected.
+ */
+#define VMA_DEBUG_DETECT_CORRUPTION (0)
+#endif
+
+#ifndef VMA_DEBUG_GLOBAL_MUTEX
+/**
+ Set this to 1 for debugging purposes only, to enable single mutex protecting all
+ entry calls to the library. Can be useful for debugging multithreading issues.
+ */
+#define VMA_DEBUG_GLOBAL_MUTEX (0)
+#endif
+
+#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
+/**
+ Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
+ Set to more than 1 for debugging purposes only. Must be power of two.
+ */
+#define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
+#endif
+
+#ifndef VMA_SMALL_HEAP_MAX_SIZE
+/// Maximum size of a memory heap in Vulkan to consider it "small".
+#define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
+#endif
+
+#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
+/// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
+#define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
+#endif
+
+#ifndef VMA_CLASS_NO_COPY
+#define VMA_CLASS_NO_COPY(className) \
+private: \
+ className(const className &) = delete; \
+ className &operator=(const className &) = delete;
+#endif
+
+static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
+
+// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
+static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
+
+static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
+static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+
+/*******************************************************************************
+END OF CONFIGURATION
+*/
+
+static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
+
+static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
+ VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL
+};
+
+// Returns number of bits set to 1 in (v).
+static inline uint32_t VmaCountBitsSet(uint32_t v) {
+ uint32_t c = v - ((v >> 1) & 0x55555555);
+ c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
+ c = ((c >> 4) + c) & 0x0F0F0F0F;
+ c = ((c >> 8) + c) & 0x00FF00FF;
+ c = ((c >> 16) + c) & 0x0000FFFF;
+ return c;
+}
+
+// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
+// Use types like uint32_t, uint64_t as T.
+template <typename T>
+static inline T VmaAlignUp(T val, T align) {
+ return (val + align - 1) / align * align;
+}
+// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
+// Use types like uint32_t, uint64_t as T.
+template <typename T>
+static inline T VmaAlignDown(T val, T align) {
+ return val / align * align;
+}
+
+// Division with mathematical rounding to nearest number.
+template <typename T>
+static inline T VmaRoundDiv(T x, T y) {
+ return (x + (y / (T)2)) / y;
+}
+
+/*
+Returns true if given number is a power of two.
+T must be unsigned integer number or signed integer but always nonnegative.
+For 0 returns true.
+*/
+template <typename T>
+inline bool VmaIsPow2(T x) {
+ return (x & (x - 1)) == 0;
+}
+
+// Returns smallest power of 2 greater or equal to v.
+static inline uint32_t VmaNextPow2(uint32_t v) {
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+static inline uint64_t VmaNextPow2(uint64_t v) {
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ v++;
+ return v;
+}
+
+// Returns largest power of 2 less or equal to v.
+static inline uint32_t VmaPrevPow2(uint32_t v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v = v ^ (v >> 1);
+ return v;
+}
+static inline uint64_t VmaPrevPow2(uint64_t v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ v = v ^ (v >> 1);
+ return v;
+}
+
+static inline bool VmaStrIsEmpty(const char *pStr) {
+ return pStr == VMA_NULL || *pStr == '\0';
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+static const char *VmaAlgorithmToStr(uint32_t algorithm) {
+ switch (algorithm) {
+ case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
+ return "Linear";
+ case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT:
+ return "Buddy";
+ case 0:
+ return "Default";
+ default:
+ VMA_ASSERT(0);
+ return "";
+ }
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+#ifndef VMA_SORT
+
+template <typename Iterator, typename Compare>
+Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp) {
+ Iterator centerValue = end;
+ --centerValue;
+ Iterator insertIndex = beg;
+ for (Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex) {
+ if (cmp(*memTypeIndex, *centerValue)) {
+ if (insertIndex != memTypeIndex) {
+ VMA_SWAP(*memTypeIndex, *insertIndex);
+ }
+ ++insertIndex;
+ }
+ }
+ if (insertIndex != centerValue) {
+ VMA_SWAP(*insertIndex, *centerValue);
+ }
+ return insertIndex;
+}
+
+template <typename Iterator, typename Compare>
+void VmaQuickSort(Iterator beg, Iterator end, Compare cmp) {
+ if (beg < end) {
+ Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
+ VmaQuickSort<Iterator, Compare>(beg, it, cmp);
+ VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
+ }
+}
+
+#define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
+
+#endif // #ifndef VMA_SORT
+
+/*
+Returns true if two memory blocks occupy overlapping pages.
+ResourceA must be in less memory offset than ResourceB.
+
+Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
+chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
+*/
+static inline bool VmaBlocksOnSamePage(
+ VkDeviceSize resourceAOffset,
+ VkDeviceSize resourceASize,
+ VkDeviceSize resourceBOffset,
+ VkDeviceSize pageSize) {
+ VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
+ VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
+ VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
+ VkDeviceSize resourceBStart = resourceBOffset;
+ VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
+ return resourceAEndPage == resourceBStartPage;
+}
+
+enum VmaSuballocationType {
+ VMA_SUBALLOCATION_TYPE_FREE = 0,
+ VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
+ VMA_SUBALLOCATION_TYPE_BUFFER = 2,
+ VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
+ VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
+ VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
+ VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+};
+
+/*
+Returns true if given suballocation types could conflict and must respect
+VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
+or linear image and another one is optimal image. If type is unknown, behave
+conservatively.
+*/
+static inline bool VmaIsBufferImageGranularityConflict(
+ VmaSuballocationType suballocType1,
+ VmaSuballocationType suballocType2) {
+ if (suballocType1 > suballocType2) {
+ VMA_SWAP(suballocType1, suballocType2);
+ }
+
+ switch (suballocType1) {
+ case VMA_SUBALLOCATION_TYPE_FREE:
+ return false;
+ case VMA_SUBALLOCATION_TYPE_UNKNOWN:
+ return true;
+ case VMA_SUBALLOCATION_TYPE_BUFFER:
+ return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
+ return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
+ return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
+ return false;
+ default:
+ VMA_ASSERT(0);
+ return true;
+ }
+}
+
+static void VmaWriteMagicValue(void *pData, VkDeviceSize offset) {
+ uint32_t *pDst = (uint32_t *)((char *)pData + offset);
+ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+ for (size_t i = 0; i < numberCount; ++i, ++pDst) {
+ *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
+ }
+}
+
+static bool VmaValidateMagicValue(const void *pData, VkDeviceSize offset) {
+ const uint32_t *pSrc = (const uint32_t *)((const char *)pData + offset);
+ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+ for (size_t i = 0; i < numberCount; ++i, ++pSrc) {
+ if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+Fills structure with parameters of an example buffer to be used for transfers
+during GPU memory defragmentation.
+*/
+static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo &outBufCreateInfo) {
+ memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
+ outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
+}
+
+// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
+struct VmaMutexLock {
+ VMA_CLASS_NO_COPY(VmaMutexLock)
+public:
+ VmaMutexLock(VMA_MUTEX &mutex, bool useMutex = true) :
+ m_pMutex(useMutex ? &mutex : VMA_NULL) {
+ if (m_pMutex) {
+ m_pMutex->Lock();
+ }
+ }
+ ~VmaMutexLock() {
+ if (m_pMutex) {
+ m_pMutex->Unlock();
+ }
+ }
+
+private:
+ VMA_MUTEX *m_pMutex;
+};
+
+// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
+struct VmaMutexLockRead {
+ VMA_CLASS_NO_COPY(VmaMutexLockRead)
+public:
+ VmaMutexLockRead(VMA_RW_MUTEX &mutex, bool useMutex) :
+ m_pMutex(useMutex ? &mutex : VMA_NULL) {
+ if (m_pMutex) {
+ m_pMutex->LockRead();
+ }
+ }
+ ~VmaMutexLockRead() {
+ if (m_pMutex) {
+ m_pMutex->UnlockRead();
+ }
+ }
+
+private:
+ VMA_RW_MUTEX *m_pMutex;
+};
+
+// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
+struct VmaMutexLockWrite {
+ VMA_CLASS_NO_COPY(VmaMutexLockWrite)
+public:
+ VmaMutexLockWrite(VMA_RW_MUTEX &mutex, bool useMutex) :
+ m_pMutex(useMutex ? &mutex : VMA_NULL) {
+ if (m_pMutex) {
+ m_pMutex->LockWrite();
+ }
+ }
+ ~VmaMutexLockWrite() {
+ if (m_pMutex) {
+ m_pMutex->UnlockWrite();
+ }
+ }
+
+private:
+ VMA_RW_MUTEX *m_pMutex;
+};
+
+#if VMA_DEBUG_GLOBAL_MUTEX
+static VMA_MUTEX gDebugGlobalMutex;
+#define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
+#else
+#define VMA_DEBUG_GLOBAL_MUTEX_LOCK
+#endif
+
+// Minimum size of a free suballocation to register it in the free suballocation collection.
+static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
+
+/*
+Performs binary search and returns iterator to first element that is greater or
+equal to (key), according to comparison (cmp).
+
+Cmp should return true if first argument is less than second argument.
+
+Returned value is the found element, if present in the collection or place where
+new element with value (key) should be inserted.
+*/
+template <typename CmpLess, typename IterT, typename KeyT>
+static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp) {
+ size_t down = 0, up = (end - beg);
+ while (down < up) {
+ const size_t mid = (down + up) / 2;
+ if (cmp(*(beg + mid), key)) {
+ down = mid + 1;
+ } else {
+ up = mid;
+ }
+ }
+ return beg + down;
+}
+
+/*
+Returns true if all pointers in the array are not-null and unique.
+Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
+T must be pointer type, e.g. VmaAllocation, VmaPool.
+*/
+template <typename T>
+static bool VmaValidatePointerArray(uint32_t count, const T *arr) {
+ for (uint32_t i = 0; i < count; ++i) {
+ const T iPtr = arr[i];
+ if (iPtr == VMA_NULL) {
+ return false;
+ }
+ for (uint32_t j = i + 1; j < count; ++j) {
+ if (iPtr == arr[j]) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Memory allocation
+
+static void *VmaMalloc(const VkAllocationCallbacks *pAllocationCallbacks, size_t size, size_t alignment) {
+ if ((pAllocationCallbacks != VMA_NULL) &&
+ (pAllocationCallbacks->pfnAllocation != VMA_NULL)) {
+ return (*pAllocationCallbacks->pfnAllocation)(
+ pAllocationCallbacks->pUserData,
+ size,
+ alignment,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ } else {
+ return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
+ }
+}
+
+static void VmaFree(const VkAllocationCallbacks *pAllocationCallbacks, void *ptr) {
+ if ((pAllocationCallbacks != VMA_NULL) &&
+ (pAllocationCallbacks->pfnFree != VMA_NULL)) {
+ (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
+ } else {
+ VMA_SYSTEM_FREE(ptr);
+ }
+}
+
+template <typename T>
+static T *VmaAllocate(const VkAllocationCallbacks *pAllocationCallbacks) {
+ return (T *)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
+}
+
+template <typename T>
+static T *VmaAllocateArray(const VkAllocationCallbacks *pAllocationCallbacks, size_t count) {
+ return (T *)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
+}
+
+#define vma_new(allocator, type) new (VmaAllocate<type>(allocator))(type)
+
+#define vma_new_array(allocator, type, count) new (VmaAllocateArray<type>((allocator), (count)))(type)
+
+template <typename T>
+static void vma_delete(const VkAllocationCallbacks *pAllocationCallbacks, T *ptr) {
+ ptr->~T();
+ VmaFree(pAllocationCallbacks, ptr);
+}
+
+template <typename T>
+static void vma_delete_array(const VkAllocationCallbacks *pAllocationCallbacks, T *ptr, size_t count) {
+ if (ptr != VMA_NULL) {
+ for (size_t i = count; i--;) {
+ ptr[i].~T();
+ }
+ VmaFree(pAllocationCallbacks, ptr);
+ }
+}
+
+// STL-compatible allocator.
+template <typename T>
+class VmaStlAllocator {
+public:
+ const VkAllocationCallbacks *const m_pCallbacks;
+ typedef T value_type;
+
+ VmaStlAllocator(const VkAllocationCallbacks *pCallbacks) :
+ m_pCallbacks(pCallbacks) {}
+ template <typename U>
+ VmaStlAllocator(const VmaStlAllocator<U> &src) :
+ m_pCallbacks(src.m_pCallbacks) {}
+
+ T *allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
+ void deallocate(T *p, size_t n) { VmaFree(m_pCallbacks, p); }
+
+ template <typename U>
+ bool operator==(const VmaStlAllocator<U> &rhs) const {
+ return m_pCallbacks == rhs.m_pCallbacks;
+ }
+ template <typename U>
+ bool operator!=(const VmaStlAllocator<U> &rhs) const {
+ return m_pCallbacks != rhs.m_pCallbacks;
+ }
+
+ VmaStlAllocator &operator=(const VmaStlAllocator &x) = delete;
+};
+
+#if VMA_USE_STL_VECTOR
+
+#define VmaVector std::vector
+
+template <typename T, typename allocatorT>
+static void VmaVectorInsert(std::vector<T, allocatorT> &vec, size_t index, const T &item) {
+ vec.insert(vec.begin() + index, item);
+}
+
+template <typename T, typename allocatorT>
+static void VmaVectorRemove(std::vector<T, allocatorT> &vec, size_t index) {
+ vec.erase(vec.begin() + index);
+}
+
+#else // #if VMA_USE_STL_VECTOR
+
+/* Class with interface compatible with subset of std::vector.
+T must be POD because constructors and destructors are not called and memcpy is
+used for these objects. */
+template <typename T, typename AllocatorT>
+class VmaVector {
+public:
+ typedef T value_type;
+
+ VmaVector(const AllocatorT &allocator) :
+ m_Allocator(allocator),
+ m_pArray(VMA_NULL),
+ m_Count(0),
+ m_Capacity(0) {
+ }
+
+ VmaVector(size_t count, const AllocatorT &allocator) :
+ m_Allocator(allocator),
+ m_pArray(count ? (T *)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
+ m_Count(count),
+ m_Capacity(count) {
+ }
+
+ VmaVector(const VmaVector<T, AllocatorT> &src) :
+ m_Allocator(src.m_Allocator),
+ m_pArray(src.m_Count ? (T *)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
+ m_Count(src.m_Count),
+ m_Capacity(src.m_Count) {
+ if (m_Count != 0) {
+ memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
+ }
+ }
+
+ ~VmaVector() {
+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ }
+
+ VmaVector &operator=(const VmaVector<T, AllocatorT> &rhs) {
+ if (&rhs != this) {
+ resize(rhs.m_Count);
+ if (m_Count != 0) {
+ memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
+ }
+ }
+ return *this;
+ }
+
+ bool empty() const { return m_Count == 0; }
+ size_t size() const { return m_Count; }
+ T *data() { return m_pArray; }
+ const T *data() const { return m_pArray; }
+
+ T &operator[](size_t index) {
+ VMA_HEAVY_ASSERT(index < m_Count);
+ return m_pArray[index];
+ }
+ const T &operator[](size_t index) const {
+ VMA_HEAVY_ASSERT(index < m_Count);
+ return m_pArray[index];
+ }
+
+ T &front() {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return m_pArray[0];
+ }
+ const T &front() const {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return m_pArray[0];
+ }
+ T &back() {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return m_pArray[m_Count - 1];
+ }
+ const T &back() const {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return m_pArray[m_Count - 1];
+ }
+
+ void reserve(size_t newCapacity, bool freeMemory = false) {
+ newCapacity = VMA_MAX(newCapacity, m_Count);
+
+ if ((newCapacity < m_Capacity) && !freeMemory) {
+ newCapacity = m_Capacity;
+ }
+
+ if (newCapacity != m_Capacity) {
+ T *const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
+ if (m_Count != 0) {
+ memcpy(newArray, m_pArray, m_Count * sizeof(T));
+ }
+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ m_Capacity = newCapacity;
+ m_pArray = newArray;
+ }
+ }
+
+ void resize(size_t newCount, bool freeMemory = false) {
+ size_t newCapacity = m_Capacity;
+ if (newCount > m_Capacity) {
+ newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
+ } else if (freeMemory) {
+ newCapacity = newCount;
+ }
+
+ if (newCapacity != m_Capacity) {
+ T *const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
+ const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
+ if (elementsToCopy != 0) {
+ memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
+ }
+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ m_Capacity = newCapacity;
+ m_pArray = newArray;
+ }
+
+ m_Count = newCount;
+ }
+
+ void clear(bool freeMemory = false) {
+ resize(0, freeMemory);
+ }
+
+ void insert(size_t index, const T &src) {
+ VMA_HEAVY_ASSERT(index <= m_Count);
+ const size_t oldCount = size();
+ resize(oldCount + 1);
+ if (index < oldCount) {
+ memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
+ }
+ m_pArray[index] = src;
+ }
+
+ void remove(size_t index) {
+ VMA_HEAVY_ASSERT(index < m_Count);
+ const size_t oldCount = size();
+ if (index < oldCount - 1) {
+ memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
+ }
+ resize(oldCount - 1);
+ }
+
+ void push_back(const T &src) {
+ const size_t newIndex = size();
+ resize(newIndex + 1);
+ m_pArray[newIndex] = src;
+ }
+
+ void pop_back() {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ resize(size() - 1);
+ }
+
+ void push_front(const T &src) {
+ insert(0, src);
+ }
+
+ void pop_front() {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ remove(0);
+ }
+
+ typedef T *iterator;
+
+ iterator begin() { return m_pArray; }
+ iterator end() { return m_pArray + m_Count; }
+
+private:
+ AllocatorT m_Allocator;
+ T *m_pArray;
+ size_t m_Count;
+ size_t m_Capacity;
+};
+
+template <typename T, typename allocatorT>
+static void VmaVectorInsert(VmaVector<T, allocatorT> &vec, size_t index, const T &item) {
+ vec.insert(index, item);
+}
+
+template <typename T, typename allocatorT>
+static void VmaVectorRemove(VmaVector<T, allocatorT> &vec, size_t index) {
+ vec.remove(index);
+}
+
+#endif // #if VMA_USE_STL_VECTOR
+
+template <typename CmpLess, typename VectorT>
+size_t VmaVectorInsertSorted(VectorT &vector, const typename VectorT::value_type &value) {
+ const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+ vector.data(),
+ vector.data() + vector.size(),
+ value,
+ CmpLess()) -
+ vector.data();
+ VmaVectorInsert(vector, indexToInsert, value);
+ return indexToInsert;
+}
+
+template <typename CmpLess, typename VectorT>
+bool VmaVectorRemoveSorted(VectorT &vector, const typename VectorT::value_type &value) {
+ CmpLess comparator;
+ typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
+ vector.begin(),
+ vector.end(),
+ value,
+ comparator);
+ if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) {
+ size_t indexToRemove = it - vector.begin();
+ VmaVectorRemove(vector, indexToRemove);
+ return true;
+ }
+ return false;
+}
+
+template <typename CmpLess, typename IterT, typename KeyT>
+IterT VmaVectorFindSorted(const IterT &beg, const IterT &end, const KeyT &value) {
+ CmpLess comparator;
+ IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
+ beg, end, value, comparator);
+ if (it == end ||
+ (!comparator(*it, value) && !comparator(value, *it))) {
+ return it;
+ }
+ return end;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaPoolAllocator
+
+/*
+Allocator for objects of type T using a list of arrays (pools) to speed up
+allocation. Number of elements that can be allocated is not bounded because
+allocator can create multiple blocks.
+*/
+template <typename T>
+class VmaPoolAllocator {
+ VMA_CLASS_NO_COPY(VmaPoolAllocator)
+public:
+ VmaPoolAllocator(const VkAllocationCallbacks *pAllocationCallbacks, uint32_t firstBlockCapacity);
+ ~VmaPoolAllocator();
+ void Clear();
+ T *Alloc();
+ void Free(T *ptr);
+
+private:
+ union Item {
+ uint32_t NextFreeIndex;
+ T Value;
+ };
+
+ struct ItemBlock {
+ Item *pItems;
+ uint32_t Capacity;
+ uint32_t FirstFreeIndex;
+ };
+
+ const VkAllocationCallbacks *m_pAllocationCallbacks;
+ const uint32_t m_FirstBlockCapacity;
+ VmaVector<ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
+
+ ItemBlock &CreateNewBlock();
+};
+
+template <typename T>
+VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks *pAllocationCallbacks, uint32_t firstBlockCapacity) :
+ m_pAllocationCallbacks(pAllocationCallbacks),
+ m_FirstBlockCapacity(firstBlockCapacity),
+ m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks)) {
+ VMA_ASSERT(m_FirstBlockCapacity > 1);
+}
+
+template <typename T>
+VmaPoolAllocator<T>::~VmaPoolAllocator() {
+ Clear();
+}
+
+template <typename T>
+void VmaPoolAllocator<T>::Clear() {
+ for (size_t i = m_ItemBlocks.size(); i--;)
+ vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
+ m_ItemBlocks.clear();
+}
+
+template <typename T>
+T *VmaPoolAllocator<T>::Alloc() {
+ for (size_t i = m_ItemBlocks.size(); i--;) {
+ ItemBlock &block = m_ItemBlocks[i];
+ // This block has some free items: Use first one.
+ if (block.FirstFreeIndex != UINT32_MAX) {
+ Item *const pItem = &block.pItems[block.FirstFreeIndex];
+ block.FirstFreeIndex = pItem->NextFreeIndex;
+ return &pItem->Value;
+ }
+ }
+
+ // No block has free item: Create new one and use it.
+ ItemBlock &newBlock = CreateNewBlock();
+ Item *const pItem = &newBlock.pItems[0];
+ newBlock.FirstFreeIndex = pItem->NextFreeIndex;
+ return &pItem->Value;
+}
+
+template <typename T>
+void VmaPoolAllocator<T>::Free(T *ptr) {
+ // Search all memory blocks to find ptr.
+ for (size_t i = m_ItemBlocks.size(); i--;) {
+ ItemBlock &block = m_ItemBlocks[i];
+
+ // Casting to union.
+ Item *pItemPtr;
+ memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
+
+ // Check if pItemPtr is in address range of this block.
+ if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) {
+ const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
+ pItemPtr->NextFreeIndex = block.FirstFreeIndex;
+ block.FirstFreeIndex = index;
+ return;
+ }
+ }
+ VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
+}
+
+template <typename T>
+typename VmaPoolAllocator<T>::ItemBlock &VmaPoolAllocator<T>::CreateNewBlock() {
+ const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
+ m_FirstBlockCapacity :
+ m_ItemBlocks.back().Capacity * 3 / 2;
+
+ const ItemBlock newBlock = {
+ vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
+ newBlockCapacity,
+ 0
+ };
+
+ m_ItemBlocks.push_back(newBlock);
+
+ // Setup singly-linked list of all free items in this block.
+ for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
+ newBlock.pItems[i].NextFreeIndex = i + 1;
+ newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
+ return m_ItemBlocks.back();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaRawList, VmaList
+
+#if VMA_USE_STL_LIST
+
+#define VmaList std::list
+
+#else // #if VMA_USE_STL_LIST
+
+template <typename T>
+struct VmaListItem {
+ VmaListItem *pPrev;
+ VmaListItem *pNext;
+ T Value;
+};
+
+// Doubly linked list.
+template <typename T>
+class VmaRawList {
+ VMA_CLASS_NO_COPY(VmaRawList)
+public:
+ typedef VmaListItem<T> ItemType;
+
+ VmaRawList(const VkAllocationCallbacks *pAllocationCallbacks);
+ ~VmaRawList();
+ void Clear();
+
+ size_t GetCount() const { return m_Count; }
+ bool IsEmpty() const { return m_Count == 0; }
+
+ ItemType *Front() { return m_pFront; }
+ const ItemType *Front() const { return m_pFront; }
+ ItemType *Back() { return m_pBack; }
+ const ItemType *Back() const { return m_pBack; }
+
+ ItemType *PushBack();
+ ItemType *PushFront();
+ ItemType *PushBack(const T &value);
+ ItemType *PushFront(const T &value);
+ void PopBack();
+ void PopFront();
+
+ // Item can be null - it means PushBack.
+ ItemType *InsertBefore(ItemType *pItem);
+ // Item can be null - it means PushFront.
+ ItemType *InsertAfter(ItemType *pItem);
+
+ ItemType *InsertBefore(ItemType *pItem, const T &value);
+ ItemType *InsertAfter(ItemType *pItem, const T &value);
+
+ void Remove(ItemType *pItem);
+
+private:
+ const VkAllocationCallbacks *const m_pAllocationCallbacks;
+ VmaPoolAllocator<ItemType> m_ItemAllocator;
+ ItemType *m_pFront;
+ ItemType *m_pBack;
+ size_t m_Count;
+};
+
+template <typename T>
+VmaRawList<T>::VmaRawList(const VkAllocationCallbacks *pAllocationCallbacks) :
+ m_pAllocationCallbacks(pAllocationCallbacks),
+ m_ItemAllocator(pAllocationCallbacks, 128),
+ m_pFront(VMA_NULL),
+ m_pBack(VMA_NULL),
+ m_Count(0) {
+}
+
+template <typename T>
+VmaRawList<T>::~VmaRawList() {
+ // Intentionally not calling Clear, because that would be unnecessary
+ // computations to return all items to m_ItemAllocator as free.
+}
+
+template <typename T>
+void VmaRawList<T>::Clear() {
+ if (IsEmpty() == false) {
+ ItemType *pItem = m_pBack;
+ while (pItem != VMA_NULL) {
+ ItemType *const pPrevItem = pItem->pPrev;
+ m_ItemAllocator.Free(pItem);
+ pItem = pPrevItem;
+ }
+ m_pFront = VMA_NULL;
+ m_pBack = VMA_NULL;
+ m_Count = 0;
+ }
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::PushBack() {
+ ItemType *const pNewItem = m_ItemAllocator.Alloc();
+ pNewItem->pNext = VMA_NULL;
+ if (IsEmpty()) {
+ pNewItem->pPrev = VMA_NULL;
+ m_pFront = pNewItem;
+ m_pBack = pNewItem;
+ m_Count = 1;
+ } else {
+ pNewItem->pPrev = m_pBack;
+ m_pBack->pNext = pNewItem;
+ m_pBack = pNewItem;
+ ++m_Count;
+ }
+ return pNewItem;
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::PushFront() {
+ ItemType *const pNewItem = m_ItemAllocator.Alloc();
+ pNewItem->pPrev = VMA_NULL;
+ if (IsEmpty()) {
+ pNewItem->pNext = VMA_NULL;
+ m_pFront = pNewItem;
+ m_pBack = pNewItem;
+ m_Count = 1;
+ } else {
+ pNewItem->pNext = m_pFront;
+ m_pFront->pPrev = pNewItem;
+ m_pFront = pNewItem;
+ ++m_Count;
+ }
+ return pNewItem;
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::PushBack(const T &value) {
+ ItemType *const pNewItem = PushBack();
+ pNewItem->Value = value;
+ return pNewItem;
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::PushFront(const T &value) {
+ ItemType *const pNewItem = PushFront();
+ pNewItem->Value = value;
+ return pNewItem;
+}
+
+template <typename T>
+void VmaRawList<T>::PopBack() {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType *const pBackItem = m_pBack;
+ ItemType *const pPrevItem = pBackItem->pPrev;
+ if (pPrevItem != VMA_NULL) {
+ pPrevItem->pNext = VMA_NULL;
+ }
+ m_pBack = pPrevItem;
+ m_ItemAllocator.Free(pBackItem);
+ --m_Count;
+}
+
+template <typename T>
+void VmaRawList<T>::PopFront() {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType *const pFrontItem = m_pFront;
+ ItemType *const pNextItem = pFrontItem->pNext;
+ if (pNextItem != VMA_NULL) {
+ pNextItem->pPrev = VMA_NULL;
+ }
+ m_pFront = pNextItem;
+ m_ItemAllocator.Free(pFrontItem);
+ --m_Count;
+}
+
+template <typename T>
+void VmaRawList<T>::Remove(ItemType *pItem) {
+ VMA_HEAVY_ASSERT(pItem != VMA_NULL);
+ VMA_HEAVY_ASSERT(m_Count > 0);
+
+ if (pItem->pPrev != VMA_NULL) {
+ pItem->pPrev->pNext = pItem->pNext;
+ } else {
+ VMA_HEAVY_ASSERT(m_pFront == pItem);
+ m_pFront = pItem->pNext;
+ }
+
+ if (pItem->pNext != VMA_NULL) {
+ pItem->pNext->pPrev = pItem->pPrev;
+ } else {
+ VMA_HEAVY_ASSERT(m_pBack == pItem);
+ m_pBack = pItem->pPrev;
+ }
+
+ m_ItemAllocator.Free(pItem);
+ --m_Count;
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::InsertBefore(ItemType *pItem) {
+ if (pItem != VMA_NULL) {
+ ItemType *const prevItem = pItem->pPrev;
+ ItemType *const newItem = m_ItemAllocator.Alloc();
+ newItem->pPrev = prevItem;
+ newItem->pNext = pItem;
+ pItem->pPrev = newItem;
+ if (prevItem != VMA_NULL) {
+ prevItem->pNext = newItem;
+ } else {
+ VMA_HEAVY_ASSERT(m_pFront == pItem);
+ m_pFront = newItem;
+ }
+ ++m_Count;
+ return newItem;
+ } else
+ return PushBack();
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::InsertAfter(ItemType *pItem) {
+ if (pItem != VMA_NULL) {
+ ItemType *const nextItem = pItem->pNext;
+ ItemType *const newItem = m_ItemAllocator.Alloc();
+ newItem->pNext = nextItem;
+ newItem->pPrev = pItem;
+ pItem->pNext = newItem;
+ if (nextItem != VMA_NULL) {
+ nextItem->pPrev = newItem;
+ } else {
+ VMA_HEAVY_ASSERT(m_pBack == pItem);
+ m_pBack = newItem;
+ }
+ ++m_Count;
+ return newItem;
+ } else
+ return PushFront();
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::InsertBefore(ItemType *pItem, const T &value) {
+ ItemType *const newItem = InsertBefore(pItem);
+ newItem->Value = value;
+ return newItem;
+}
+
+template <typename T>
+VmaListItem<T> *VmaRawList<T>::InsertAfter(ItemType *pItem, const T &value) {
+ ItemType *const newItem = InsertAfter(pItem);
+ newItem->Value = value;
+ return newItem;
+}
+
+template <typename T, typename AllocatorT>
+class VmaList {
+ VMA_CLASS_NO_COPY(VmaList)
+public:
+ class iterator {
+ public:
+ iterator() :
+ m_pList(VMA_NULL),
+ m_pItem(VMA_NULL) {
+ }
+
+ T &operator*() const {
+ VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ return m_pItem->Value;
+ }
+ T *operator->() const {
+ VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ return &m_pItem->Value;
+ }
+
+ iterator &operator++() {
+ VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ m_pItem = m_pItem->pNext;
+ return *this;
+ }
+ iterator &operator--() {
+ if (m_pItem != VMA_NULL) {
+ m_pItem = m_pItem->pPrev;
+ } else {
+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ m_pItem = m_pList->Back();
+ }
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator result = *this;
+ ++*this;
+ return result;
+ }
+ iterator operator--(int) {
+ iterator result = *this;
+ --*this;
+ return result;
+ }
+
+ bool operator==(const iterator &rhs) const {
+ VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ return m_pItem == rhs.m_pItem;
+ }
+ bool operator!=(const iterator &rhs) const {
+ VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ return m_pItem != rhs.m_pItem;
+ }
+
+ private:
+ VmaRawList<T> *m_pList;
+ VmaListItem<T> *m_pItem;
+
+ iterator(VmaRawList<T> *pList, VmaListItem<T> *pItem) :
+ m_pList(pList),
+ m_pItem(pItem) {
+ }
+
+ friend class VmaList<T, AllocatorT>;
+ };
+
+ class const_iterator {
+ public:
+ const_iterator() :
+ m_pList(VMA_NULL),
+ m_pItem(VMA_NULL) {
+ }
+
+ const_iterator(const iterator &src) :
+ m_pList(src.m_pList),
+ m_pItem(src.m_pItem) {
+ }
+
+ const T &operator*() const {
+ VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ return m_pItem->Value;
+ }
+ const T *operator->() const {
+ VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ return &m_pItem->Value;
+ }
+
+ const_iterator &operator++() {
+ VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
+ m_pItem = m_pItem->pNext;
+ return *this;
+ }
+ const_iterator &operator--() {
+ if (m_pItem != VMA_NULL) {
+ m_pItem = m_pItem->pPrev;
+ } else {
+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ m_pItem = m_pList->Back();
+ }
+ return *this;
+ }
+
+ const_iterator operator++(int) {
+ const_iterator result = *this;
+ ++*this;
+ return result;
+ }
+ const_iterator operator--(int) {
+ const_iterator result = *this;
+ --*this;
+ return result;
+ }
+
+ bool operator==(const const_iterator &rhs) const {
+ VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ return m_pItem == rhs.m_pItem;
+ }
+ bool operator!=(const const_iterator &rhs) const {
+ VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
+ return m_pItem != rhs.m_pItem;
+ }
+
+ private:
+ const_iterator(const VmaRawList<T> *pList, const VmaListItem<T> *pItem) :
+ m_pList(pList),
+ m_pItem(pItem) {
+ }
+
+ const VmaRawList<T> *m_pList;
+ const VmaListItem<T> *m_pItem;
+
+ friend class VmaList<T, AllocatorT>;
+ };
+
+ VmaList(const AllocatorT &allocator) :
+ m_RawList(allocator.m_pCallbacks) {}
+
+ bool empty() const { return m_RawList.IsEmpty(); }
+ size_t size() const { return m_RawList.GetCount(); }
+
+ iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
+ iterator end() { return iterator(&m_RawList, VMA_NULL); }
+
+ const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
+ const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
+
+ void clear() { m_RawList.Clear(); }
+ void push_back(const T &value) { m_RawList.PushBack(value); }
+ void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
+ iterator insert(iterator it, const T &value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
+
+private:
+ VmaRawList<T> m_RawList;
+};
+
+#endif // #if VMA_USE_STL_LIST
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaMap
+
+// Unused in this version.
+#if 0
+
+#if VMA_USE_STL_UNORDERED_MAP
+
+#define VmaPair std::pair
+
+#define VMA_MAP_TYPE(KeyT, ValueT) \
+ std::unordered_map<KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator<std::pair<KeyT, ValueT> > >
+
+#else // #if VMA_USE_STL_UNORDERED_MAP
+
+template<typename T1, typename T2>
+struct VmaPair
+{
+ T1 first;
+ T2 second;
+
+ VmaPair() : first(), second() { }
+ VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
+};
+
+/* Class compatible with subset of interface of std::unordered_map.
+KeyT, ValueT must be POD because they will be stored in VmaVector.
+*/
+template<typename KeyT, typename ValueT>
+class VmaMap
+{
+public:
+ typedef VmaPair<KeyT, ValueT> PairType;
+ typedef PairType* iterator;
+
+ VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
+
+ iterator begin() { return m_Vector.begin(); }
+ iterator end() { return m_Vector.end(); }
+
+ void insert(const PairType& pair);
+ iterator find(const KeyT& key);
+ void erase(iterator it);
+
+private:
+ VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
+};
+
+#define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
+
+template<typename FirstT, typename SecondT>
+struct VmaPairFirstLess
+{
+ bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
+ {
+ return lhs.first < rhs.first;
+ }
+ bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
+ {
+ return lhs.first < rhsFirst;
+ }
+};
+
+template<typename KeyT, typename ValueT>
+void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
+{
+ const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+ m_Vector.data(),
+ m_Vector.data() + m_Vector.size(),
+ pair,
+ VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
+ VmaVectorInsert(m_Vector, indexToInsert, pair);
+}
+
+template<typename KeyT, typename ValueT>
+VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
+{
+ PairType* it = VmaBinaryFindFirstNotLess(
+ m_Vector.data(),
+ m_Vector.data() + m_Vector.size(),
+ key,
+ VmaPairFirstLess<KeyT, ValueT>());
+ if((it != m_Vector.end()) && (it->first == key))
+ {
+ return it;
+ }
+ else
+ {
+ return m_Vector.end();
+ }
+}
+
+template<typename KeyT, typename ValueT>
+void VmaMap<KeyT, ValueT>::erase(iterator it)
+{
+ VmaVectorRemove(m_Vector, it - m_Vector.begin());
+}
+
+#endif // #if VMA_USE_STL_UNORDERED_MAP
+
+#endif // #if 0
+
+////////////////////////////////////////////////////////////////////////////////
+
+class VmaDeviceMemoryBlock;
+
+enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH,
+ VMA_CACHE_INVALIDATE };
+
+struct VmaAllocation_T {
+private:
+ static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
+
+ enum FLAGS {
+ FLAG_USER_DATA_STRING = 0x01,
+ };
+
+public:
+ enum ALLOCATION_TYPE {
+ ALLOCATION_TYPE_NONE,
+ ALLOCATION_TYPE_BLOCK,
+ ALLOCATION_TYPE_DEDICATED,
+ };
+
+ /*
+ This struct cannot have constructor or destructor. It must be POD because it is
+ allocated using VmaPoolAllocator.
+ */
+
+ void Ctor(uint32_t currentFrameIndex, bool userDataString) {
+ m_Alignment = 1;
+ m_Size = 0;
+ m_pUserData = VMA_NULL;
+ m_LastUseFrameIndex = currentFrameIndex;
+ m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
+ m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
+ m_MapCount = 0;
+ m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
+
+#if VMA_STATS_STRING_ENABLED
+ m_CreationFrameIndex = currentFrameIndex;
+ m_BufferImageUsage = 0;
+#endif
+ }
+
+ void Dtor() {
+ VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
+
+ // Check if owned string was freed.
+ VMA_ASSERT(m_pUserData == VMA_NULL);
+ }
+
+ void InitBlockAllocation(
+ VmaDeviceMemoryBlock *block,
+ VkDeviceSize offset,
+ VkDeviceSize alignment,
+ VkDeviceSize size,
+ VmaSuballocationType suballocationType,
+ bool mapped,
+ bool canBecomeLost) {
+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+ VMA_ASSERT(block != VMA_NULL);
+ m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
+ m_Alignment = alignment;
+ m_Size = size;
+ m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
+ m_SuballocationType = (uint8_t)suballocationType;
+ m_BlockAllocation.m_Block = block;
+ m_BlockAllocation.m_Offset = offset;
+ m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
+ }
+
+ void InitLost() {
+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+ VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
+ m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
+ m_BlockAllocation.m_Block = VMA_NULL;
+ m_BlockAllocation.m_Offset = 0;
+ m_BlockAllocation.m_CanBecomeLost = true;
+ }
+
+ void ChangeBlockAllocation(
+ VmaAllocator hAllocator,
+ VmaDeviceMemoryBlock *block,
+ VkDeviceSize offset);
+
+ void ChangeSize(VkDeviceSize newSize);
+ void ChangeOffset(VkDeviceSize newOffset);
+
+ // pMappedData not null means allocation is created with MAPPED flag.
+ void InitDedicatedAllocation(
+ uint32_t memoryTypeIndex,
+ VkDeviceMemory hMemory,
+ VmaSuballocationType suballocationType,
+ void *pMappedData,
+ VkDeviceSize size) {
+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+ VMA_ASSERT(hMemory != VK_NULL_HANDLE);
+ m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
+ m_Alignment = 0;
+ m_Size = size;
+ m_SuballocationType = (uint8_t)suballocationType;
+ m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
+ m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
+ m_DedicatedAllocation.m_hMemory = hMemory;
+ m_DedicatedAllocation.m_pMappedData = pMappedData;
+ }
+
+ ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
+ VkDeviceSize GetAlignment() const { return m_Alignment; }
+ VkDeviceSize GetSize() const { return m_Size; }
+ bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
+ void *GetUserData() const { return m_pUserData; }
+ void SetUserData(VmaAllocator hAllocator, void *pUserData);
+ VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
+
+ VmaDeviceMemoryBlock *GetBlock() const {
+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+ return m_BlockAllocation.m_Block;
+ }
+ VkDeviceSize GetOffset() const;
+ VkDeviceMemory GetMemory() const;
+ uint32_t GetMemoryTypeIndex() const;
+ bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
+ void *GetMappedData() const;
+ bool CanBecomeLost() const;
+
+ uint32_t GetLastUseFrameIndex() const {
+ return m_LastUseFrameIndex.load();
+ }
+ bool CompareExchangeLastUseFrameIndex(uint32_t &expected, uint32_t desired) {
+ return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
+ }
+ /*
+ - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
+ makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
+ - Else, returns false.
+
+ If hAllocation is already lost, assert - you should not call it then.
+ If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
+ */
+ bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ void DedicatedAllocCalcStatsInfo(VmaStatInfo &outInfo) {
+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
+ outInfo.blockCount = 1;
+ outInfo.allocationCount = 1;
+ outInfo.unusedRangeCount = 0;
+ outInfo.usedBytes = m_Size;
+ outInfo.unusedBytes = 0;
+ outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
+ outInfo.unusedRangeSizeMin = UINT64_MAX;
+ outInfo.unusedRangeSizeMax = 0;
+ }
+
+ void BlockAllocMap();
+ void BlockAllocUnmap();
+ VkResult DedicatedAllocMap(VmaAllocator hAllocator, void **ppData);
+ void DedicatedAllocUnmap(VmaAllocator hAllocator);
+
+#if VMA_STATS_STRING_ENABLED
+ uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
+ uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
+
+ void InitBufferImageUsage(uint32_t bufferImageUsage) {
+ VMA_ASSERT(m_BufferImageUsage == 0);
+ m_BufferImageUsage = bufferImageUsage;
+ }
+
+ void PrintParameters(class VmaJsonWriter &json) const;
+#endif
+
+private:
+ VkDeviceSize m_Alignment;
+ VkDeviceSize m_Size;
+ void *m_pUserData;
+ VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
+ uint8_t m_Type; // ALLOCATION_TYPE
+ uint8_t m_SuballocationType; // VmaSuballocationType
+ // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
+ // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
+ uint8_t m_MapCount;
+ uint8_t m_Flags; // enum FLAGS
+
+ // Allocation out of VmaDeviceMemoryBlock.
+ struct BlockAllocation {
+ VmaDeviceMemoryBlock *m_Block;
+ VkDeviceSize m_Offset;
+ bool m_CanBecomeLost;
+ };
+
+ // Allocation for an object that has its own private VkDeviceMemory.
+ struct DedicatedAllocation {
+ uint32_t m_MemoryTypeIndex;
+ VkDeviceMemory m_hMemory;
+ void *m_pMappedData; // Not null means memory is mapped.
+ };
+
+ union {
+ // Allocation out of VmaDeviceMemoryBlock.
+ BlockAllocation m_BlockAllocation;
+ // Allocation for an object that has its own private VkDeviceMemory.
+ DedicatedAllocation m_DedicatedAllocation;
+ };
+
+#if VMA_STATS_STRING_ENABLED
+ uint32_t m_CreationFrameIndex;
+ uint32_t m_BufferImageUsage; // 0 if unknown.
+#endif
+
+ void FreeUserDataString(VmaAllocator hAllocator);
+};
+
+/*
+Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
+allocated memory block or free.
+*/
+struct VmaSuballocation {
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ VmaAllocation hAllocation;
+ VmaSuballocationType type;
+};
+
+// Comparator for offsets.
+struct VmaSuballocationOffsetLess {
+ bool operator()(const VmaSuballocation &lhs, const VmaSuballocation &rhs) const {
+ return lhs.offset < rhs.offset;
+ }
+};
+struct VmaSuballocationOffsetGreater {
+ bool operator()(const VmaSuballocation &lhs, const VmaSuballocation &rhs) const {
+ return lhs.offset > rhs.offset;
+ }
+};
+
+typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
+
+// Cost of one additional allocation lost, as equivalent in bytes.
+static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
+
+enum class VmaAllocationRequestType {
+ Normal,
+ // Used by "Linear" algorithm.
+ UpperAddress,
+ EndOf1st,
+ EndOf2nd,
+};
+
+/*
+Parameters of planned allocation inside a VmaDeviceMemoryBlock.
+
+If canMakeOtherLost was false:
+- item points to a FREE suballocation.
+- itemsToMakeLostCount is 0.
+
+If canMakeOtherLost was true:
+- item points to first of sequence of suballocations, which are either FREE,
+ or point to VmaAllocations that can become lost.
+- itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
+ the requested allocation to succeed.
+*/
+struct VmaAllocationRequest {
+ VkDeviceSize offset;
+ VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
+ VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
+ VmaSuballocationList::iterator item;
+ size_t itemsToMakeLostCount;
+ void *customData;
+ VmaAllocationRequestType type;
+
+ VkDeviceSize CalcCost() const {
+ return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
+ }
+};
+
+/*
+Data structure used for bookkeeping of allocations and unused ranges of memory
+in a single VkDeviceMemory block.
+*/
+class VmaBlockMetadata {
+public:
+ VmaBlockMetadata(VmaAllocator hAllocator);
+ virtual ~VmaBlockMetadata() {}
+ virtual void Init(VkDeviceSize size) { m_Size = size; }
+
+ // Validates all data structures inside this object. If not valid, returns false.
+ virtual bool Validate() const = 0;
+ VkDeviceSize GetSize() const { return m_Size; }
+ virtual size_t GetAllocationCount() const = 0;
+ virtual VkDeviceSize GetSumFreeSize() const = 0;
+ virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
+ // Returns true if this block is empty - contains only single free suballocation.
+ virtual bool IsEmpty() const = 0;
+
+ virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const = 0;
+ // Shouldn't modify blockCount.
+ virtual void AddPoolStats(VmaPoolStats &inoutStats) const = 0;
+
+#if VMA_STATS_STRING_ENABLED
+ virtual void PrintDetailedMap(class VmaJsonWriter &json) const = 0;
+#endif
+
+ // Tries to find a place for suballocation with given parameters inside this block.
+ // If succeeded, fills pAllocationRequest and returns true.
+ // If failed, returns false.
+ virtual bool CreateAllocationRequest(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ bool upperAddress,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest) = 0;
+
+ virtual bool MakeRequestedAllocationsLost(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VmaAllocationRequest *pAllocationRequest) = 0;
+
+ virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
+
+ virtual VkResult CheckCorruption(const void *pBlockData) = 0;
+
+ // Makes actual allocation based on request. Request must already be checked and valid.
+ virtual void Alloc(
+ const VmaAllocationRequest &request,
+ VmaSuballocationType type,
+ VkDeviceSize allocSize,
+ VmaAllocation hAllocation) = 0;
+
+ // Frees suballocation assigned to given memory region.
+ virtual void Free(const VmaAllocation allocation) = 0;
+ virtual void FreeAtOffset(VkDeviceSize offset) = 0;
+
+ // Tries to resize (grow or shrink) space for given allocation, in place.
+ virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
+
+protected:
+ const VkAllocationCallbacks *GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
+
+#if VMA_STATS_STRING_ENABLED
+ void PrintDetailedMap_Begin(class VmaJsonWriter &json,
+ VkDeviceSize unusedBytes,
+ size_t allocationCount,
+ size_t unusedRangeCount) const;
+ void PrintDetailedMap_Allocation(class VmaJsonWriter &json,
+ VkDeviceSize offset,
+ VmaAllocation hAllocation) const;
+ void PrintDetailedMap_UnusedRange(class VmaJsonWriter &json,
+ VkDeviceSize offset,
+ VkDeviceSize size) const;
+ void PrintDetailedMap_End(class VmaJsonWriter &json) const;
+#endif
+
+private:
+ VkDeviceSize m_Size;
+ const VkAllocationCallbacks *m_pAllocationCallbacks;
+};
+
+#define VMA_VALIDATE(cond) \
+ do { \
+ if (!(cond)) { \
+ VMA_ASSERT(0 && "Validation failed: " #cond); \
+ return false; \
+ } \
+ } while (false)
+
+class VmaBlockMetadata_Generic : public VmaBlockMetadata {
+ VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
+public:
+ VmaBlockMetadata_Generic(VmaAllocator hAllocator);
+ virtual ~VmaBlockMetadata_Generic();
+ virtual void Init(VkDeviceSize size);
+
+ virtual bool Validate() const;
+ virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
+ virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
+ virtual VkDeviceSize GetUnusedRangeSizeMax() const;
+ virtual bool IsEmpty() const;
+
+ virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const;
+ virtual void AddPoolStats(VmaPoolStats &inoutStats) const;
+
+#if VMA_STATS_STRING_ENABLED
+ virtual void PrintDetailedMap(class VmaJsonWriter &json) const;
+#endif
+
+ virtual bool CreateAllocationRequest(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ bool upperAddress,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest);
+
+ virtual bool MakeRequestedAllocationsLost(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VmaAllocationRequest *pAllocationRequest);
+
+ virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ virtual VkResult CheckCorruption(const void *pBlockData);
+
+ virtual void Alloc(
+ const VmaAllocationRequest &request,
+ VmaSuballocationType type,
+ VkDeviceSize allocSize,
+ VmaAllocation hAllocation);
+
+ virtual void Free(const VmaAllocation allocation);
+ virtual void FreeAtOffset(VkDeviceSize offset);
+
+ virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // For defragmentation
+
+ bool IsBufferImageGranularityConflictPossible(
+ VkDeviceSize bufferImageGranularity,
+ VmaSuballocationType &inOutPrevSuballocType) const;
+
+private:
+ friend class VmaDefragmentationAlgorithm_Generic;
+ friend class VmaDefragmentationAlgorithm_Fast;
+
+ uint32_t m_FreeCount;
+ VkDeviceSize m_SumFreeSize;
+ VmaSuballocationList m_Suballocations;
+ // Suballocations that are free and have size greater than certain threshold.
+ // Sorted by size, ascending.
+ VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator> > m_FreeSuballocationsBySize;
+
+ bool ValidateFreeSuballocationList() const;
+
+ // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
+ // If yes, fills pOffset and returns true. If no, returns false.
+ bool CheckAllocation(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ VmaSuballocationType allocType,
+ VmaSuballocationList::const_iterator suballocItem,
+ bool canMakeOtherLost,
+ VkDeviceSize *pOffset,
+ size_t *itemsToMakeLostCount,
+ VkDeviceSize *pSumFreeSize,
+ VkDeviceSize *pSumItemSize) const;
+ // Given free suballocation, it merges it with following one, which must also be free.
+ void MergeFreeWithNext(VmaSuballocationList::iterator item);
+ // Releases given suballocation, making it free.
+ // Merges it with adjacent free suballocations if applicable.
+ // Returns iterator to new free suballocation at this place.
+ VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
+ // Given free suballocation, it inserts it into sorted list of
+ // m_FreeSuballocationsBySize if it's suitable.
+ void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
+ // Given free suballocation, it removes it from sorted list of
+ // m_FreeSuballocationsBySize if it's suitable.
+ void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
+};
+
+/*
+Allocations and their references in internal data structure look like this:
+
+if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
+
+ 0 +-------+
+ | |
+ | |
+ | |
+ +-------+
+ | Alloc | 1st[m_1stNullItemsBeginCount]
+ +-------+
+ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+ +-------+
+ | ... |
+ +-------+
+ | Alloc | 1st[1st.size() - 1]
+ +-------+
+ | |
+ | |
+ | |
+GetSize() +-------+
+
+if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
+
+ 0 +-------+
+ | Alloc | 2nd[0]
+ +-------+
+ | Alloc | 2nd[1]
+ +-------+
+ | ... |
+ +-------+
+ | Alloc | 2nd[2nd.size() - 1]
+ +-------+
+ | |
+ | |
+ | |
+ +-------+
+ | Alloc | 1st[m_1stNullItemsBeginCount]
+ +-------+
+ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+ +-------+
+ | ... |
+ +-------+
+ | Alloc | 1st[1st.size() - 1]
+ +-------+
+ | |
+GetSize() +-------+
+
+if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
+
+ 0 +-------+
+ | |
+ | |
+ | |
+ +-------+
+ | Alloc | 1st[m_1stNullItemsBeginCount]
+ +-------+
+ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+ +-------+
+ | ... |
+ +-------+
+ | Alloc | 1st[1st.size() - 1]
+ +-------+
+ | |
+ | |
+ | |
+ +-------+
+ | Alloc | 2nd[2nd.size() - 1]
+ +-------+
+ | ... |
+ +-------+
+ | Alloc | 2nd[1]
+ +-------+
+ | Alloc | 2nd[0]
+GetSize() +-------+
+
+*/
+class VmaBlockMetadata_Linear : public VmaBlockMetadata {
+ VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
+public:
+ VmaBlockMetadata_Linear(VmaAllocator hAllocator);
+ virtual ~VmaBlockMetadata_Linear();
+ virtual void Init(VkDeviceSize size);
+
+ virtual bool Validate() const;
+ virtual size_t GetAllocationCount() const;
+ virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
+ virtual VkDeviceSize GetUnusedRangeSizeMax() const;
+ virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
+
+ virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const;
+ virtual void AddPoolStats(VmaPoolStats &inoutStats) const;
+
+#if VMA_STATS_STRING_ENABLED
+ virtual void PrintDetailedMap(class VmaJsonWriter &json) const;
+#endif
+
+ virtual bool CreateAllocationRequest(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ bool upperAddress,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest);
+
+ virtual bool MakeRequestedAllocationsLost(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VmaAllocationRequest *pAllocationRequest);
+
+ virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ virtual VkResult CheckCorruption(const void *pBlockData);
+
+ virtual void Alloc(
+ const VmaAllocationRequest &request,
+ VmaSuballocationType type,
+ VkDeviceSize allocSize,
+ VmaAllocation hAllocation);
+
+ virtual void Free(const VmaAllocation allocation);
+ virtual void FreeAtOffset(VkDeviceSize offset);
+
+private:
+ /*
+ There are two suballocation vectors, used in ping-pong way.
+ The one with index m_1stVectorIndex is called 1st.
+ The one with index (m_1stVectorIndex ^ 1) is called 2nd.
+ 2nd can be non-empty only when 1st is not empty.
+ When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
+ */
+ typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
+
+ enum SECOND_VECTOR_MODE {
+ SECOND_VECTOR_EMPTY,
+ /*
+ Suballocations in 2nd vector are created later than the ones in 1st, but they
+ all have smaller offset.
+ */
+ SECOND_VECTOR_RING_BUFFER,
+ /*
+ Suballocations in 2nd vector are upper side of double stack.
+ They all have offsets higher than those in 1st vector.
+ Top of this stack means smaller offsets, but higher indices in this vector.
+ */
+ SECOND_VECTOR_DOUBLE_STACK,
+ };
+
+ VkDeviceSize m_SumFreeSize;
+ SuballocationVectorType m_Suballocations0, m_Suballocations1;
+ uint32_t m_1stVectorIndex;
+ SECOND_VECTOR_MODE m_2ndVectorMode;
+
+ SuballocationVectorType &AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+ SuballocationVectorType &AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
+ const SuballocationVectorType &AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+ const SuballocationVectorType &AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
+
+ // Number of items in 1st vector with hAllocation = null at the beginning.
+ size_t m_1stNullItemsBeginCount;
+ // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
+ size_t m_1stNullItemsMiddleCount;
+ // Number of items in 2nd vector with hAllocation = null.
+ size_t m_2ndNullItemsCount;
+
+ bool ShouldCompact1st() const;
+ void CleanupAfterFree();
+
+ bool CreateAllocationRequest_LowerAddress(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest);
+ bool CreateAllocationRequest_UpperAddress(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest);
+};
+
+/*
+- GetSize() is the original size of allocated memory block.
+- m_UsableSize is this size aligned down to a power of two.
+ All allocations and calculations happen relative to m_UsableSize.
+- GetUnusableSize() is the difference between them.
+ It is repoted as separate, unused range, not available for allocations.
+
+Node at level 0 has size = m_UsableSize.
+Each next level contains nodes with size 2 times smaller than current level.
+m_LevelCount is the maximum number of levels to use in the current object.
+*/
+class VmaBlockMetadata_Buddy : public VmaBlockMetadata {
+ VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
+public:
+ VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
+ virtual ~VmaBlockMetadata_Buddy();
+ virtual void Init(VkDeviceSize size);
+
+ virtual bool Validate() const;
+ virtual size_t GetAllocationCount() const { return m_AllocationCount; }
+ virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
+ virtual VkDeviceSize GetUnusedRangeSizeMax() const;
+ virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
+
+ virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const;
+ virtual void AddPoolStats(VmaPoolStats &inoutStats) const;
+
+#if VMA_STATS_STRING_ENABLED
+ virtual void PrintDetailedMap(class VmaJsonWriter &json) const;
+#endif
+
+ virtual bool CreateAllocationRequest(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ bool upperAddress,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest);
+
+ virtual bool MakeRequestedAllocationsLost(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VmaAllocationRequest *pAllocationRequest);
+
+ virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
+
+ virtual VkResult CheckCorruption(const void *pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
+
+ virtual void Alloc(
+ const VmaAllocationRequest &request,
+ VmaSuballocationType type,
+ VkDeviceSize allocSize,
+ VmaAllocation hAllocation);
+
+ virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
+ virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
+
+private:
+ static const VkDeviceSize MIN_NODE_SIZE = 32;
+ static const size_t MAX_LEVELS = 30;
+
+ struct ValidationContext {
+ size_t calculatedAllocationCount;
+ size_t calculatedFreeCount;
+ VkDeviceSize calculatedSumFreeSize;
+
+ ValidationContext() :
+ calculatedAllocationCount(0),
+ calculatedFreeCount(0),
+ calculatedSumFreeSize(0) {}
+ };
+
+ struct Node {
+ VkDeviceSize offset;
+ enum TYPE {
+ TYPE_FREE,
+ TYPE_ALLOCATION,
+ TYPE_SPLIT,
+ TYPE_COUNT
+ } type;
+ Node *parent;
+ Node *buddy;
+
+ union {
+ struct
+ {
+ Node *prev;
+ Node *next;
+ } free;
+ struct
+ {
+ VmaAllocation alloc;
+ } allocation;
+ struct
+ {
+ Node *leftChild;
+ } split;
+ };
+ };
+
+ // Size of the memory block aligned down to a power of two.
+ VkDeviceSize m_UsableSize;
+ uint32_t m_LevelCount;
+
+ Node *m_Root;
+ struct {
+ Node *front;
+ Node *back;
+ } m_FreeList[MAX_LEVELS];
+ // Number of nodes in the tree with type == TYPE_ALLOCATION.
+ size_t m_AllocationCount;
+ // Number of nodes in the tree with type == TYPE_FREE.
+ size_t m_FreeCount;
+ // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
+ VkDeviceSize m_SumFreeSize;
+
+ VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
+ void DeleteNode(Node *node);
+ bool ValidateNode(ValidationContext &ctx, const Node *parent, const Node *curr, uint32_t level, VkDeviceSize levelNodeSize) const;
+ uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
+ inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
+ // Alloc passed just for validation. Can be null.
+ void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
+ void CalcAllocationStatInfoNode(VmaStatInfo &outInfo, const Node *node, VkDeviceSize levelNodeSize) const;
+ // Adds node to the front of FreeList at given level.
+ // node->type must be FREE.
+ // node->free.prev, next can be undefined.
+ void AddToFreeListFront(uint32_t level, Node *node);
+ // Removes node from FreeList at given level.
+ // node->type must be FREE.
+ // node->free.prev, next stay untouched.
+ void RemoveFromFreeList(uint32_t level, Node *node);
+
+#if VMA_STATS_STRING_ENABLED
+ void PrintDetailedMapNode(class VmaJsonWriter &json, const Node *node, VkDeviceSize levelNodeSize) const;
+#endif
+};
+
+/*
+Represents a single block of device memory (`VkDeviceMemory`) with all the
+data about its regions (aka suballocations, #VmaAllocation), assigned and free.
+
+Thread-safety: This class must be externally synchronized.
+*/
+class VmaDeviceMemoryBlock {
+ VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
+public:
+ VmaBlockMetadata *m_pMetadata;
+
+ VmaDeviceMemoryBlock(VmaAllocator hAllocator);
+
+ ~VmaDeviceMemoryBlock() {
+ VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
+ VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+ }
+
+ // Always call after construction.
+ void Init(
+ VmaAllocator hAllocator,
+ VmaPool hParentPool,
+ uint32_t newMemoryTypeIndex,
+ VkDeviceMemory newMemory,
+ VkDeviceSize newSize,
+ uint32_t id,
+ uint32_t algorithm);
+ // Always call before destruction.
+ void Destroy(VmaAllocator allocator);
+
+ VmaPool GetParentPool() const { return m_hParentPool; }
+ VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
+ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+ uint32_t GetId() const { return m_Id; }
+ void *GetMappedData() const { return m_pMappedData; }
+
+ // Validates all data structures inside this object. If not valid, returns false.
+ bool Validate() const;
+
+ VkResult CheckCorruption(VmaAllocator hAllocator);
+
+ // ppData can be null.
+ VkResult Map(VmaAllocator hAllocator, uint32_t count, void **ppData);
+ void Unmap(VmaAllocator hAllocator, uint32_t count);
+
+ VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
+ VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
+
+ VkResult BindBufferMemory(
+ const VmaAllocator hAllocator,
+ const VmaAllocation hAllocation,
+ VkBuffer hBuffer);
+ VkResult BindImageMemory(
+ const VmaAllocator hAllocator,
+ const VmaAllocation hAllocation,
+ VkImage hImage);
+
+private:
+ VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
+ uint32_t m_MemoryTypeIndex;
+ uint32_t m_Id;
+ VkDeviceMemory m_hMemory;
+
+ /*
+ Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
+ Also protects m_MapCount, m_pMappedData.
+ Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
+ */
+ VMA_MUTEX m_Mutex;
+ uint32_t m_MapCount;
+ void *m_pMappedData;
+};
+
+struct VmaPointerLess {
+ bool operator()(const void *lhs, const void *rhs) const {
+ return lhs < rhs;
+ }
+};
+
+struct VmaDefragmentationMove {
+ size_t srcBlockIndex;
+ size_t dstBlockIndex;
+ VkDeviceSize srcOffset;
+ VkDeviceSize dstOffset;
+ VkDeviceSize size;
+};
+
+class VmaDefragmentationAlgorithm;
+
+/*
+Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
+Vulkan memory type.
+
+Synchronized internally with a mutex.
+*/
+struct VmaBlockVector {
+ VMA_CLASS_NO_COPY(VmaBlockVector)
+public:
+ VmaBlockVector(
+ VmaAllocator hAllocator,
+ VmaPool hParentPool,
+ uint32_t memoryTypeIndex,
+ VkDeviceSize preferredBlockSize,
+ size_t minBlockCount,
+ size_t maxBlockCount,
+ VkDeviceSize bufferImageGranularity,
+ uint32_t frameInUseCount,
+ bool isCustomPool,
+ bool explicitBlockSize,
+ uint32_t algorithm);
+ ~VmaBlockVector();
+
+ VkResult CreateMinBlocks();
+
+ VmaPool GetParentPool() const { return m_hParentPool; }
+ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+ VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
+ VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
+ uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
+ uint32_t GetAlgorithm() const { return m_Algorithm; }
+
+ void GetPoolStats(VmaPoolStats *pStats);
+
+ bool IsEmpty() const { return m_Blocks.empty(); }
+ bool IsCorruptionDetectionEnabled() const;
+
+ VkResult Allocate(
+ uint32_t currentFrameIndex,
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaSuballocationType suballocType,
+ size_t allocationCount,
+ VmaAllocation *pAllocations);
+
+ void Free(
+ VmaAllocation hAllocation);
+
+ // Adds statistics of this BlockVector to pStats.
+ void AddStats(VmaStats *pStats);
+
+#if VMA_STATS_STRING_ENABLED
+ void PrintDetailedMap(class VmaJsonWriter &json);
+#endif
+
+ void MakePoolAllocationsLost(
+ uint32_t currentFrameIndex,
+ size_t *pLostAllocationCount);
+ VkResult CheckCorruption();
+
+ // Saves results in pCtx->res.
+ void Defragment(
+ class VmaBlockVectorDefragmentationContext *pCtx,
+ VmaDefragmentationStats *pStats,
+ VkDeviceSize &maxCpuBytesToMove, uint32_t &maxCpuAllocationsToMove,
+ VkDeviceSize &maxGpuBytesToMove, uint32_t &maxGpuAllocationsToMove,
+ VkCommandBuffer commandBuffer);
+ void DefragmentationEnd(
+ class VmaBlockVectorDefragmentationContext *pCtx,
+ VmaDefragmentationStats *pStats);
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // To be used only while the m_Mutex is locked. Used during defragmentation.
+
+ size_t GetBlockCount() const { return m_Blocks.size(); }
+ VmaDeviceMemoryBlock *GetBlock(size_t index) const { return m_Blocks[index]; }
+ size_t CalcAllocationCount() const;
+ bool IsBufferImageGranularityConflictPossible() const;
+
+private:
+ friend class VmaDefragmentationAlgorithm_Generic;
+
+ const VmaAllocator m_hAllocator;
+ const VmaPool m_hParentPool;
+ const uint32_t m_MemoryTypeIndex;
+ const VkDeviceSize m_PreferredBlockSize;
+ const size_t m_MinBlockCount;
+ const size_t m_MaxBlockCount;
+ const VkDeviceSize m_BufferImageGranularity;
+ const uint32_t m_FrameInUseCount;
+ const bool m_IsCustomPool;
+ const bool m_ExplicitBlockSize;
+ const uint32_t m_Algorithm;
+ /* There can be at most one allocation that is completely empty - a
+ hysteresis to avoid pessimistic case of alternating creation and destruction
+ of a VkDeviceMemory. */
+ bool m_HasEmptyBlock;
+ VMA_RW_MUTEX m_Mutex;
+ // Incrementally sorted by sumFreeSize, ascending.
+ VmaVector<VmaDeviceMemoryBlock *, VmaStlAllocator<VmaDeviceMemoryBlock *> > m_Blocks;
+ uint32_t m_NextBlockId;
+
+ VkDeviceSize CalcMaxBlockSize() const;
+
+ // Finds and removes given block from vector.
+ void Remove(VmaDeviceMemoryBlock *pBlock);
+
+ // Performs single step in sorting m_Blocks. They may not be fully sorted
+ // after this call.
+ void IncrementallySortBlocks();
+
+ VkResult AllocatePage(
+ uint32_t currentFrameIndex,
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaSuballocationType suballocType,
+ VmaAllocation *pAllocation);
+
+ // To be used only without CAN_MAKE_OTHER_LOST flag.
+ VkResult AllocateFromBlock(
+ VmaDeviceMemoryBlock *pBlock,
+ uint32_t currentFrameIndex,
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ VmaAllocationCreateFlags allocFlags,
+ void *pUserData,
+ VmaSuballocationType suballocType,
+ uint32_t strategy,
+ VmaAllocation *pAllocation);
+
+ VkResult CreateBlock(VkDeviceSize blockSize, size_t *pNewBlockIndex);
+
+ // Saves result to pCtx->res.
+ void ApplyDefragmentationMovesCpu(
+ class VmaBlockVectorDefragmentationContext *pDefragCtx,
+ const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves);
+ // Saves result to pCtx->res.
+ void ApplyDefragmentationMovesGpu(
+ class VmaBlockVectorDefragmentationContext *pDefragCtx,
+ const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkCommandBuffer commandBuffer);
+
+ /*
+ Used during defragmentation. pDefragmentationStats is optional. It's in/out
+ - updated with new data.
+ */
+ void FreeEmptyBlocks(VmaDefragmentationStats *pDefragmentationStats);
+};
+
+struct VmaPool_T {
+ VMA_CLASS_NO_COPY(VmaPool_T)
+public:
+ VmaBlockVector m_BlockVector;
+
+ VmaPool_T(
+ VmaAllocator hAllocator,
+ const VmaPoolCreateInfo &createInfo,
+ VkDeviceSize preferredBlockSize);
+ ~VmaPool_T();
+
+ uint32_t GetId() const { return m_Id; }
+ void SetId(uint32_t id) {
+ VMA_ASSERT(m_Id == 0);
+ m_Id = id;
+ }
+
+#if VMA_STATS_STRING_ENABLED
+ //void PrintDetailedMap(class VmaStringBuilder& sb);
+#endif
+
+private:
+ uint32_t m_Id;
+};
+
+/*
+Performs defragmentation:
+
+- Updates `pBlockVector->m_pMetadata`.
+- Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
+- Does not move actual data, only returns requested moves as `moves`.
+*/
+class VmaDefragmentationAlgorithm {
+ VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
+public:
+ VmaDefragmentationAlgorithm(
+ VmaAllocator hAllocator,
+ VmaBlockVector *pBlockVector,
+ uint32_t currentFrameIndex) :
+ m_hAllocator(hAllocator),
+ m_pBlockVector(pBlockVector),
+ m_CurrentFrameIndex(currentFrameIndex) {
+ }
+ virtual ~VmaDefragmentationAlgorithm() {
+ }
+
+ virtual void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) = 0;
+ virtual void AddAll() = 0;
+
+ virtual VkResult Defragment(
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkDeviceSize maxBytesToMove,
+ uint32_t maxAllocationsToMove) = 0;
+
+ virtual VkDeviceSize GetBytesMoved() const = 0;
+ virtual uint32_t GetAllocationsMoved() const = 0;
+
+protected:
+ VmaAllocator const m_hAllocator;
+ VmaBlockVector *const m_pBlockVector;
+ const uint32_t m_CurrentFrameIndex;
+
+ struct AllocationInfo {
+ VmaAllocation m_hAllocation;
+ VkBool32 *m_pChanged;
+
+ AllocationInfo() :
+ m_hAllocation(VK_NULL_HANDLE),
+ m_pChanged(VMA_NULL) {
+ }
+ AllocationInfo(VmaAllocation hAlloc, VkBool32 *pChanged) :
+ m_hAllocation(hAlloc),
+ m_pChanged(pChanged) {
+ }
+ };
+};
+
+class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm {
+ VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
+public:
+ VmaDefragmentationAlgorithm_Generic(
+ VmaAllocator hAllocator,
+ VmaBlockVector *pBlockVector,
+ uint32_t currentFrameIndex,
+ bool overlappingMoveSupported);
+ virtual ~VmaDefragmentationAlgorithm_Generic();
+
+ virtual void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged);
+ virtual void AddAll() { m_AllAllocations = true; }
+
+ virtual VkResult Defragment(
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkDeviceSize maxBytesToMove,
+ uint32_t maxAllocationsToMove);
+
+ virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
+ virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
+
+private:
+ uint32_t m_AllocationCount;
+ bool m_AllAllocations;
+
+ VkDeviceSize m_BytesMoved;
+ uint32_t m_AllocationsMoved;
+
+ struct AllocationInfoSizeGreater {
+ bool operator()(const AllocationInfo &lhs, const AllocationInfo &rhs) const {
+ return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
+ }
+ };
+
+ struct AllocationInfoOffsetGreater {
+ bool operator()(const AllocationInfo &lhs, const AllocationInfo &rhs) const {
+ return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
+ }
+ };
+
+ struct BlockInfo {
+ size_t m_OriginalBlockIndex;
+ VmaDeviceMemoryBlock *m_pBlock;
+ bool m_HasNonMovableAllocations;
+ VmaVector<AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
+
+ BlockInfo(const VkAllocationCallbacks *pAllocationCallbacks) :
+ m_OriginalBlockIndex(SIZE_MAX),
+ m_pBlock(VMA_NULL),
+ m_HasNonMovableAllocations(true),
+ m_Allocations(pAllocationCallbacks) {
+ }
+
+ void CalcHasNonMovableAllocations() {
+ const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
+ const size_t defragmentAllocCount = m_Allocations.size();
+ m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
+ }
+
+ void SortAllocationsBySizeDescending() {
+ VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
+ }
+
+ void SortAllocationsByOffsetDescending() {
+ VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
+ }
+ };
+
+ struct BlockPointerLess {
+ bool operator()(const BlockInfo *pLhsBlockInfo, const VmaDeviceMemoryBlock *pRhsBlock) const {
+ return pLhsBlockInfo->m_pBlock < pRhsBlock;
+ }
+ bool operator()(const BlockInfo *pLhsBlockInfo, const BlockInfo *pRhsBlockInfo) const {
+ return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
+ }
+ };
+
+ // 1. Blocks with some non-movable allocations go first.
+ // 2. Blocks with smaller sumFreeSize go first.
+ struct BlockInfoCompareMoveDestination {
+ bool operator()(const BlockInfo *pLhsBlockInfo, const BlockInfo *pRhsBlockInfo) const {
+ if (pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations) {
+ return true;
+ }
+ if (!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations) {
+ return false;
+ }
+ if (pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize()) {
+ return true;
+ }
+ return false;
+ }
+ };
+
+ typedef VmaVector<BlockInfo *, VmaStlAllocator<BlockInfo *> > BlockInfoVector;
+ BlockInfoVector m_Blocks;
+
+ VkResult DefragmentRound(
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkDeviceSize maxBytesToMove,
+ uint32_t maxAllocationsToMove);
+
+ size_t CalcBlocksWithNonMovableCount() const;
+
+ static bool MoveMakesSense(
+ size_t dstBlockIndex, VkDeviceSize dstOffset,
+ size_t srcBlockIndex, VkDeviceSize srcOffset);
+};
+
+class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm {
+ VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
+public:
+ VmaDefragmentationAlgorithm_Fast(
+ VmaAllocator hAllocator,
+ VmaBlockVector *pBlockVector,
+ uint32_t currentFrameIndex,
+ bool overlappingMoveSupported);
+ virtual ~VmaDefragmentationAlgorithm_Fast();
+
+ virtual void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) { ++m_AllocationCount; }
+ virtual void AddAll() { m_AllAllocations = true; }
+
+ virtual VkResult Defragment(
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkDeviceSize maxBytesToMove,
+ uint32_t maxAllocationsToMove);
+
+ virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
+ virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
+
+private:
+ struct BlockInfo {
+ size_t origBlockIndex;
+ };
+
+ class FreeSpaceDatabase {
+ public:
+ FreeSpaceDatabase() {
+ FreeSpace s = {};
+ s.blockInfoIndex = SIZE_MAX;
+ for (size_t i = 0; i < MAX_COUNT; ++i) {
+ m_FreeSpaces[i] = s;
+ }
+ }
+
+ void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size) {
+ if (size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) {
+ return;
+ }
+
+ // Find first invalid or the smallest structure.
+ size_t bestIndex = SIZE_MAX;
+ for (size_t i = 0; i < MAX_COUNT; ++i) {
+ // Empty structure.
+ if (m_FreeSpaces[i].blockInfoIndex == SIZE_MAX) {
+ bestIndex = i;
+ break;
+ }
+ if (m_FreeSpaces[i].size < size &&
+ (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size)) {
+ bestIndex = i;
+ }
+ }
+
+ if (bestIndex != SIZE_MAX) {
+ m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
+ m_FreeSpaces[bestIndex].offset = offset;
+ m_FreeSpaces[bestIndex].size = size;
+ }
+ }
+
+ bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
+ size_t &outBlockInfoIndex, VkDeviceSize &outDstOffset) {
+ size_t bestIndex = SIZE_MAX;
+ VkDeviceSize bestFreeSpaceAfter = 0;
+ for (size_t i = 0; i < MAX_COUNT; ++i) {
+ // Structure is valid.
+ if (m_FreeSpaces[i].blockInfoIndex != SIZE_MAX) {
+ const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
+ // Allocation fits into this structure.
+ if (dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size) {
+ const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
+ (dstOffset + size);
+ if (bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter) {
+ bestIndex = i;
+ bestFreeSpaceAfter = freeSpaceAfter;
+ }
+ }
+ }
+ }
+
+ if (bestIndex != SIZE_MAX) {
+ outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
+ outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
+
+ if (bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) {
+ // Leave this structure for remaining empty space.
+ const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
+ m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
+ m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
+ } else {
+ // This structure becomes invalid.
+ m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
+ }
+
+ return true;
+ }
+
+ return false;
+ }
+
+ private:
+ static const size_t MAX_COUNT = 4;
+
+ struct FreeSpace {
+ size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
+ VkDeviceSize offset;
+ VkDeviceSize size;
+ } m_FreeSpaces[MAX_COUNT];
+ };
+
+ const bool m_OverlappingMoveSupported;
+
+ uint32_t m_AllocationCount;
+ bool m_AllAllocations;
+
+ VkDeviceSize m_BytesMoved;
+ uint32_t m_AllocationsMoved;
+
+ VmaVector<BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
+
+ void PreprocessMetadata();
+ void PostprocessMetadata();
+ void InsertSuballoc(VmaBlockMetadata_Generic *pMetadata, const VmaSuballocation &suballoc);
+};
+
+struct VmaBlockDefragmentationContext {
+ enum BLOCK_FLAG {
+ BLOCK_FLAG_USED = 0x00000001,
+ };
+ uint32_t flags;
+ VkBuffer hBuffer;
+
+ VmaBlockDefragmentationContext() :
+ flags(0),
+ hBuffer(VK_NULL_HANDLE) {
+ }
+};
+
+class VmaBlockVectorDefragmentationContext {
+ VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
+public:
+ VkResult res;
+ bool mutexLocked;
+ VmaVector<VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
+
+ VmaBlockVectorDefragmentationContext(
+ VmaAllocator hAllocator,
+ VmaPool hCustomPool, // Optional.
+ VmaBlockVector *pBlockVector,
+ uint32_t currFrameIndex,
+ uint32_t flags);
+ ~VmaBlockVectorDefragmentationContext();
+
+ VmaPool GetCustomPool() const { return m_hCustomPool; }
+ VmaBlockVector *GetBlockVector() const { return m_pBlockVector; }
+ VmaDefragmentationAlgorithm *GetAlgorithm() const { return m_pAlgorithm; }
+
+ void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged);
+ void AddAll() { m_AllAllocations = true; }
+
+ void Begin(bool overlappingMoveSupported);
+
+private:
+ const VmaAllocator m_hAllocator;
+ // Null if not from custom pool.
+ const VmaPool m_hCustomPool;
+ // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
+ VmaBlockVector *const m_pBlockVector;
+ const uint32_t m_CurrFrameIndex;
+ const uint32_t m_AlgorithmFlags;
+ // Owner of this object.
+ VmaDefragmentationAlgorithm *m_pAlgorithm;
+
+ struct AllocInfo {
+ VmaAllocation hAlloc;
+ VkBool32 *pChanged;
+ };
+ // Used between constructor and Begin.
+ VmaVector<AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
+ bool m_AllAllocations;
+};
+
+struct VmaDefragmentationContext_T {
+private:
+ VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
+public:
+ VmaDefragmentationContext_T(
+ VmaAllocator hAllocator,
+ uint32_t currFrameIndex,
+ uint32_t flags,
+ VmaDefragmentationStats *pStats);
+ ~VmaDefragmentationContext_T();
+
+ void AddPools(uint32_t poolCount, VmaPool *pPools);
+ void AddAllocations(
+ uint32_t allocationCount,
+ VmaAllocation *pAllocations,
+ VkBool32 *pAllocationsChanged);
+
+ /*
+ Returns:
+ - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
+ - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
+ - Negative value if error occured and object can be destroyed immediately.
+ */
+ VkResult Defragment(
+ VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
+ VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
+ VkCommandBuffer commandBuffer, VmaDefragmentationStats *pStats);
+
+private:
+ const VmaAllocator m_hAllocator;
+ const uint32_t m_CurrFrameIndex;
+ const uint32_t m_Flags;
+ VmaDefragmentationStats *const m_pStats;
+ // Owner of these objects.
+ VmaBlockVectorDefragmentationContext *m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
+ // Owner of these objects.
+ VmaVector<VmaBlockVectorDefragmentationContext *, VmaStlAllocator<VmaBlockVectorDefragmentationContext *> > m_CustomPoolContexts;
+};
+
+#if VMA_RECORDING_ENABLED
+
+class VmaRecorder {
+public:
+ VmaRecorder();
+ VkResult Init(const VmaRecordSettings &settings, bool useMutex);
+ void WriteConfiguration(
+ const VkPhysicalDeviceProperties &devProps,
+ const VkPhysicalDeviceMemoryProperties &memProps,
+ bool dedicatedAllocationExtensionEnabled);
+ ~VmaRecorder();
+
+ void RecordCreateAllocator(uint32_t frameIndex);
+ void RecordDestroyAllocator(uint32_t frameIndex);
+ void RecordCreatePool(uint32_t frameIndex,
+ const VmaPoolCreateInfo &createInfo,
+ VmaPool pool);
+ void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
+ void RecordAllocateMemory(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaAllocation allocation);
+ void RecordAllocateMemoryPages(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ const VmaAllocationCreateInfo &createInfo,
+ uint64_t allocationCount,
+ const VmaAllocation *pAllocations);
+ void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ bool requiresDedicatedAllocation,
+ bool prefersDedicatedAllocation,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaAllocation allocation);
+ void RecordAllocateMemoryForImage(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ bool requiresDedicatedAllocation,
+ bool prefersDedicatedAllocation,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaAllocation allocation);
+ void RecordFreeMemory(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordFreeMemoryPages(uint32_t frameIndex,
+ uint64_t allocationCount,
+ const VmaAllocation *pAllocations);
+ void RecordResizeAllocation(
+ uint32_t frameIndex,
+ VmaAllocation allocation,
+ VkDeviceSize newSize);
+ void RecordSetAllocationUserData(uint32_t frameIndex,
+ VmaAllocation allocation,
+ const void *pUserData);
+ void RecordCreateLostAllocation(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordMapMemory(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordUnmapMemory(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordFlushAllocation(uint32_t frameIndex,
+ VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+ void RecordInvalidateAllocation(uint32_t frameIndex,
+ VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+ void RecordCreateBuffer(uint32_t frameIndex,
+ const VkBufferCreateInfo &bufCreateInfo,
+ const VmaAllocationCreateInfo &allocCreateInfo,
+ VmaAllocation allocation);
+ void RecordCreateImage(uint32_t frameIndex,
+ const VkImageCreateInfo &imageCreateInfo,
+ const VmaAllocationCreateInfo &allocCreateInfo,
+ VmaAllocation allocation);
+ void RecordDestroyBuffer(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordDestroyImage(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordTouchAllocation(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordGetAllocationInfo(uint32_t frameIndex,
+ VmaAllocation allocation);
+ void RecordMakePoolAllocationsLost(uint32_t frameIndex,
+ VmaPool pool);
+ void RecordDefragmentationBegin(uint32_t frameIndex,
+ const VmaDefragmentationInfo2 &info,
+ VmaDefragmentationContext ctx);
+ void RecordDefragmentationEnd(uint32_t frameIndex,
+ VmaDefragmentationContext ctx);
+
+private:
+ struct CallParams {
+ uint32_t threadId;
+ double time;
+ };
+
+ class UserDataString {
+ public:
+ UserDataString(VmaAllocationCreateFlags allocFlags, const void *pUserData);
+ const char *GetString() const { return m_Str; }
+
+ private:
+ char m_PtrStr[17];
+ const char *m_Str;
+ };
+
+ bool m_UseMutex;
+ VmaRecordFlags m_Flags;
+ FILE *m_File;
+ VMA_MUTEX m_FileMutex;
+ int64_t m_Freq;
+ int64_t m_StartCounter;
+
+ void GetBasicParams(CallParams &outParams);
+
+ // T must be a pointer type, e.g. VmaAllocation, VmaPool.
+ template <typename T>
+ void PrintPointerList(uint64_t count, const T *pItems) {
+ if (count) {
+ fprintf(m_File, "%p", pItems[0]);
+ for (uint64_t i = 1; i < count; ++i) {
+ fprintf(m_File, " %p", pItems[i]);
+ }
+ }
+ }
+
+ void PrintPointerList(uint64_t count, const VmaAllocation *pItems);
+ void Flush();
+};
+
+#endif // #if VMA_RECORDING_ENABLED
+
+/*
+Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
+*/
+class VmaAllocationObjectAllocator {
+ VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
+public:
+ VmaAllocationObjectAllocator(const VkAllocationCallbacks *pAllocationCallbacks);
+
+ VmaAllocation Allocate();
+ void Free(VmaAllocation hAlloc);
+
+private:
+ VMA_MUTEX m_Mutex;
+ VmaPoolAllocator<VmaAllocation_T> m_Allocator;
+};
+
+// Main allocator object.
+struct VmaAllocator_T {
+ VMA_CLASS_NO_COPY(VmaAllocator_T)
+public:
+ bool m_UseMutex;
+ bool m_UseKhrDedicatedAllocation;
+ VkDevice m_hDevice;
+ bool m_AllocationCallbacksSpecified;
+ VkAllocationCallbacks m_AllocationCallbacks;
+ VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
+ VmaAllocationObjectAllocator m_AllocationObjectAllocator;
+
+ // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
+ VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
+ VMA_MUTEX m_HeapSizeLimitMutex;
+
+ VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
+ VkPhysicalDeviceMemoryProperties m_MemProps;
+
+ // Default pools.
+ VmaBlockVector *m_pBlockVectors[VK_MAX_MEMORY_TYPES];
+
+ // Each vector is sorted by memory (handle value).
+ typedef VmaVector<VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
+ AllocationVectorType *m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
+ VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
+
+ VmaAllocator_T(const VmaAllocatorCreateInfo *pCreateInfo);
+ VkResult Init(const VmaAllocatorCreateInfo *pCreateInfo);
+ ~VmaAllocator_T();
+
+ const VkAllocationCallbacks *GetAllocationCallbacks() const {
+ return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
+ }
+ const VmaVulkanFunctions &GetVulkanFunctions() const {
+ return m_VulkanFunctions;
+ }
+
+ VkDeviceSize GetBufferImageGranularity() const {
+ return VMA_MAX(
+ static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
+ m_PhysicalDeviceProperties.limits.bufferImageGranularity);
+ }
+
+ uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
+ uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
+
+ uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const {
+ VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
+ return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
+ }
+ // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
+ bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const {
+ return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ }
+ // Minimum alignment for all allocations in specific memory type.
+ VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const {
+ return IsMemoryTypeNonCoherent(memTypeIndex) ?
+ VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
+ (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
+ }
+
+ bool IsIntegratedGpu() const {
+ return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
+ }
+
+#if VMA_RECORDING_ENABLED
+ VmaRecorder *GetRecorder() const { return m_pRecorder; }
+#endif
+
+ void GetBufferMemoryRequirements(
+ VkBuffer hBuffer,
+ VkMemoryRequirements &memReq,
+ bool &requiresDedicatedAllocation,
+ bool &prefersDedicatedAllocation) const;
+ void GetImageMemoryRequirements(
+ VkImage hImage,
+ VkMemoryRequirements &memReq,
+ bool &requiresDedicatedAllocation,
+ bool &prefersDedicatedAllocation) const;
+
+ // Main allocation function.
+ VkResult AllocateMemory(
+ const VkMemoryRequirements &vkMemReq,
+ bool requiresDedicatedAllocation,
+ bool prefersDedicatedAllocation,
+ VkBuffer dedicatedBuffer,
+ VkImage dedicatedImage,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaSuballocationType suballocType,
+ size_t allocationCount,
+ VmaAllocation *pAllocations);
+
+ // Main deallocation function.
+ void FreeMemory(
+ size_t allocationCount,
+ const VmaAllocation *pAllocations);
+
+ VkResult ResizeAllocation(
+ const VmaAllocation alloc,
+ VkDeviceSize newSize);
+
+ void CalculateStats(VmaStats *pStats);
+
+#if VMA_STATS_STRING_ENABLED
+ void PrintDetailedMap(class VmaJsonWriter &json);
+#endif
+
+ VkResult DefragmentationBegin(
+ const VmaDefragmentationInfo2 &info,
+ VmaDefragmentationStats *pStats,
+ VmaDefragmentationContext *pContext);
+ VkResult DefragmentationEnd(
+ VmaDefragmentationContext context);
+
+ void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo *pAllocationInfo);
+ bool TouchAllocation(VmaAllocation hAllocation);
+
+ VkResult CreatePool(const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool);
+ void DestroyPool(VmaPool pool);
+ void GetPoolStats(VmaPool pool, VmaPoolStats *pPoolStats);
+
+ void SetCurrentFrameIndex(uint32_t frameIndex);
+ uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
+
+ void MakePoolAllocationsLost(
+ VmaPool hPool,
+ size_t *pLostAllocationCount);
+ VkResult CheckPoolCorruption(VmaPool hPool);
+ VkResult CheckCorruption(uint32_t memoryTypeBits);
+
+ void CreateLostAllocation(VmaAllocation *pAllocation);
+
+ VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory);
+ void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
+
+ VkResult Map(VmaAllocation hAllocation, void **ppData);
+ void Unmap(VmaAllocation hAllocation);
+
+ VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
+ VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
+
+ void FlushOrInvalidateAllocation(
+ VmaAllocation hAllocation,
+ VkDeviceSize offset, VkDeviceSize size,
+ VMA_CACHE_OPERATION op);
+
+ void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
+
+ /*
+ Returns bit mask of memory types that can support defragmentation on GPU as
+ they support creation of required buffer for copy operations.
+ */
+ uint32_t GetGpuDefragmentationMemoryTypeBits();
+
+private:
+ VkDeviceSize m_PreferredLargeHeapBlockSize;
+
+ VkPhysicalDevice m_PhysicalDevice;
+ VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
+ VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
+
+ VMA_RW_MUTEX m_PoolsMutex;
+ // Protected by m_PoolsMutex. Sorted by pointer value.
+ VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
+ uint32_t m_NextPoolId;
+
+ VmaVulkanFunctions m_VulkanFunctions;
+
+#if VMA_RECORDING_ENABLED
+ VmaRecorder *m_pRecorder;
+#endif
+
+ void ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunctions);
+
+ VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
+
+ VkResult AllocateMemoryOfType(
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ bool dedicatedAllocation,
+ VkBuffer dedicatedBuffer,
+ VkImage dedicatedImage,
+ const VmaAllocationCreateInfo &createInfo,
+ uint32_t memTypeIndex,
+ VmaSuballocationType suballocType,
+ size_t allocationCount,
+ VmaAllocation *pAllocations);
+
+ // Helper function only to be used inside AllocateDedicatedMemory.
+ VkResult AllocateDedicatedMemoryPage(
+ VkDeviceSize size,
+ VmaSuballocationType suballocType,
+ uint32_t memTypeIndex,
+ const VkMemoryAllocateInfo &allocInfo,
+ bool map,
+ bool isUserDataString,
+ void *pUserData,
+ VmaAllocation *pAllocation);
+
+ // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
+ VkResult AllocateDedicatedMemory(
+ VkDeviceSize size,
+ VmaSuballocationType suballocType,
+ uint32_t memTypeIndex,
+ bool map,
+ bool isUserDataString,
+ void *pUserData,
+ VkBuffer dedicatedBuffer,
+ VkImage dedicatedImage,
+ size_t allocationCount,
+ VmaAllocation *pAllocations);
+
+ // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
+ void FreeDedicatedMemory(VmaAllocation allocation);
+
+ /*
+ Calculates and returns bit mask of memory types that can support defragmentation
+ on GPU as they support creation of required buffer for copy operations.
+ */
+ uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Memory allocation #2 after VmaAllocator_T definition
+
+static void *VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) {
+ return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
+}
+
+static void VmaFree(VmaAllocator hAllocator, void *ptr) {
+ VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
+}
+
+template <typename T>
+static T *VmaAllocate(VmaAllocator hAllocator) {
+ return (T *)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
+}
+
+template <typename T>
+static T *VmaAllocateArray(VmaAllocator hAllocator, size_t count) {
+ return (T *)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
+}
+
+template <typename T>
+static void vma_delete(VmaAllocator hAllocator, T *ptr) {
+ if (ptr != VMA_NULL) {
+ ptr->~T();
+ VmaFree(hAllocator, ptr);
+ }
+}
+
+template <typename T>
+static void vma_delete_array(VmaAllocator hAllocator, T *ptr, size_t count) {
+ if (ptr != VMA_NULL) {
+ for (size_t i = count; i--;)
+ ptr[i].~T();
+ VmaFree(hAllocator, ptr);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaStringBuilder
+
+#if VMA_STATS_STRING_ENABLED
+
+class VmaStringBuilder {
+public:
+ VmaStringBuilder(VmaAllocator alloc) :
+ m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) {}
+ size_t GetLength() const { return m_Data.size(); }
+ const char *GetData() const { return m_Data.data(); }
+
+ void Add(char ch) { m_Data.push_back(ch); }
+ void Add(const char *pStr);
+ void AddNewLine() { Add('\n'); }
+ void AddNumber(uint32_t num);
+ void AddNumber(uint64_t num);
+ void AddPointer(const void *ptr);
+
+private:
+ VmaVector<char, VmaStlAllocator<char> > m_Data;
+};
+
+void VmaStringBuilder::Add(const char *pStr) {
+ const size_t strLen = strlen(pStr);
+ if (strLen > 0) {
+ const size_t oldCount = m_Data.size();
+ m_Data.resize(oldCount + strLen);
+ memcpy(m_Data.data() + oldCount, pStr, strLen);
+ }
+}
+
+void VmaStringBuilder::AddNumber(uint32_t num) {
+ char buf[11];
+ VmaUint32ToStr(buf, sizeof(buf), num);
+ Add(buf);
+}
+
+void VmaStringBuilder::AddNumber(uint64_t num) {
+ char buf[21];
+ VmaUint64ToStr(buf, sizeof(buf), num);
+ Add(buf);
+}
+
+void VmaStringBuilder::AddPointer(const void *ptr) {
+ char buf[21];
+ VmaPtrToStr(buf, sizeof(buf), ptr);
+ Add(buf);
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaJsonWriter
+
+#if VMA_STATS_STRING_ENABLED
+
+class VmaJsonWriter {
+ VMA_CLASS_NO_COPY(VmaJsonWriter)
+public:
+ VmaJsonWriter(const VkAllocationCallbacks *pAllocationCallbacks, VmaStringBuilder &sb);
+ ~VmaJsonWriter();
+
+ void BeginObject(bool singleLine = false);
+ void EndObject();
+
+ void BeginArray(bool singleLine = false);
+ void EndArray();
+
+ void WriteString(const char *pStr);
+ void BeginString(const char *pStr = VMA_NULL);
+ void ContinueString(const char *pStr);
+ void ContinueString(uint32_t n);
+ void ContinueString(uint64_t n);
+ void ContinueString_Pointer(const void *ptr);
+ void EndString(const char *pStr = VMA_NULL);
+
+ void WriteNumber(uint32_t n);
+ void WriteNumber(uint64_t n);
+ void WriteBool(bool b);
+ void WriteNull();
+
+private:
+ static const char *const INDENT;
+
+ enum COLLECTION_TYPE {
+ COLLECTION_TYPE_OBJECT,
+ COLLECTION_TYPE_ARRAY,
+ };
+ struct StackItem {
+ COLLECTION_TYPE type;
+ uint32_t valueCount;
+ bool singleLineMode;
+ };
+
+ VmaStringBuilder &m_SB;
+ VmaVector<StackItem, VmaStlAllocator<StackItem> > m_Stack;
+ bool m_InsideString;
+
+ void BeginValue(bool isString);
+ void WriteIndent(bool oneLess = false);
+};
+
+const char *const VmaJsonWriter::INDENT = " ";
+
+VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks *pAllocationCallbacks, VmaStringBuilder &sb) :
+ m_SB(sb),
+ m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
+ m_InsideString(false) {
+}
+
+VmaJsonWriter::~VmaJsonWriter() {
+ VMA_ASSERT(!m_InsideString);
+ VMA_ASSERT(m_Stack.empty());
+}
+
+void VmaJsonWriter::BeginObject(bool singleLine) {
+ VMA_ASSERT(!m_InsideString);
+
+ BeginValue(false);
+ m_SB.Add('{');
+
+ StackItem item;
+ item.type = COLLECTION_TYPE_OBJECT;
+ item.valueCount = 0;
+ item.singleLineMode = singleLine;
+ m_Stack.push_back(item);
+}
+
+void VmaJsonWriter::EndObject() {
+ VMA_ASSERT(!m_InsideString);
+
+ WriteIndent(true);
+ m_SB.Add('}');
+
+ VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
+ m_Stack.pop_back();
+}
+
+void VmaJsonWriter::BeginArray(bool singleLine) {
+ VMA_ASSERT(!m_InsideString);
+
+ BeginValue(false);
+ m_SB.Add('[');
+
+ StackItem item;
+ item.type = COLLECTION_TYPE_ARRAY;
+ item.valueCount = 0;
+ item.singleLineMode = singleLine;
+ m_Stack.push_back(item);
+}
+
+void VmaJsonWriter::EndArray() {
+ VMA_ASSERT(!m_InsideString);
+
+ WriteIndent(true);
+ m_SB.Add(']');
+
+ VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
+ m_Stack.pop_back();
+}
+
+void VmaJsonWriter::WriteString(const char *pStr) {
+ BeginString(pStr);
+ EndString();
+}
+
+void VmaJsonWriter::BeginString(const char *pStr) {
+ VMA_ASSERT(!m_InsideString);
+
+ BeginValue(true);
+ m_SB.Add('"');
+ m_InsideString = true;
+ if (pStr != VMA_NULL && pStr[0] != '\0') {
+ ContinueString(pStr);
+ }
+}
+
+void VmaJsonWriter::ContinueString(const char *pStr) {
+ VMA_ASSERT(m_InsideString);
+
+ const size_t strLen = strlen(pStr);
+ for (size_t i = 0; i < strLen; ++i) {
+ char ch = pStr[i];
+ if (ch == '\\') {
+ m_SB.Add("\\\\");
+ } else if (ch == '"') {
+ m_SB.Add("\\\"");
+ } else if (ch >= 32) {
+ m_SB.Add(ch);
+ } else
+ switch (ch) {
+ case '\b':
+ m_SB.Add("\\b");
+ break;
+ case '\f':
+ m_SB.Add("\\f");
+ break;
+ case '\n':
+ m_SB.Add("\\n");
+ break;
+ case '\r':
+ m_SB.Add("\\r");
+ break;
+ case '\t':
+ m_SB.Add("\\t");
+ break;
+ default:
+ VMA_ASSERT(0 && "Character not currently supported.");
+ break;
+ }
+ }
+}
+
+void VmaJsonWriter::ContinueString(uint32_t n) {
+ VMA_ASSERT(m_InsideString);
+ m_SB.AddNumber(n);
+}
+
+void VmaJsonWriter::ContinueString(uint64_t n) {
+ VMA_ASSERT(m_InsideString);
+ m_SB.AddNumber(n);
+}
+
+void VmaJsonWriter::ContinueString_Pointer(const void *ptr) {
+ VMA_ASSERT(m_InsideString);
+ m_SB.AddPointer(ptr);
+}
+
+void VmaJsonWriter::EndString(const char *pStr) {
+ VMA_ASSERT(m_InsideString);
+ if (pStr != VMA_NULL && pStr[0] != '\0') {
+ ContinueString(pStr);
+ }
+ m_SB.Add('"');
+ m_InsideString = false;
+}
+
+void VmaJsonWriter::WriteNumber(uint32_t n) {
+ VMA_ASSERT(!m_InsideString);
+ BeginValue(false);
+ m_SB.AddNumber(n);
+}
+
+void VmaJsonWriter::WriteNumber(uint64_t n) {
+ VMA_ASSERT(!m_InsideString);
+ BeginValue(false);
+ m_SB.AddNumber(n);
+}
+
+void VmaJsonWriter::WriteBool(bool b) {
+ VMA_ASSERT(!m_InsideString);
+ BeginValue(false);
+ m_SB.Add(b ? "true" : "false");
+}
+
+void VmaJsonWriter::WriteNull() {
+ VMA_ASSERT(!m_InsideString);
+ BeginValue(false);
+ m_SB.Add("null");
+}
+
+void VmaJsonWriter::BeginValue(bool isString) {
+ if (!m_Stack.empty()) {
+ StackItem &currItem = m_Stack.back();
+ if (currItem.type == COLLECTION_TYPE_OBJECT &&
+ currItem.valueCount % 2 == 0) {
+ VMA_ASSERT(isString);
+ }
+
+ if (currItem.type == COLLECTION_TYPE_OBJECT &&
+ currItem.valueCount % 2 != 0) {
+ m_SB.Add(": ");
+ } else if (currItem.valueCount > 0) {
+ m_SB.Add(", ");
+ WriteIndent();
+ } else {
+ WriteIndent();
+ }
+ ++currItem.valueCount;
+ }
+}
+
+void VmaJsonWriter::WriteIndent(bool oneLess) {
+ if (!m_Stack.empty() && !m_Stack.back().singleLineMode) {
+ m_SB.AddNewLine();
+
+ size_t count = m_Stack.size();
+ if (count > 0 && oneLess) {
+ --count;
+ }
+ for (size_t i = 0; i < count; ++i) {
+ m_SB.Add(INDENT);
+ }
+ }
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+////////////////////////////////////////////////////////////////////////////////
+
+void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void *pUserData) {
+ if (IsUserDataString()) {
+ VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
+
+ FreeUserDataString(hAllocator);
+
+ if (pUserData != VMA_NULL) {
+ const char *const newStrSrc = (char *)pUserData;
+ const size_t newStrLen = strlen(newStrSrc);
+ char *const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
+ memcpy(newStrDst, newStrSrc, newStrLen + 1);
+ m_pUserData = newStrDst;
+ }
+ } else {
+ m_pUserData = pUserData;
+ }
+}
+
+void VmaAllocation_T::ChangeBlockAllocation(
+ VmaAllocator hAllocator,
+ VmaDeviceMemoryBlock *block,
+ VkDeviceSize offset) {
+ VMA_ASSERT(block != VMA_NULL);
+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+
+ // Move mapping reference counter from old block to new block.
+ if (block != m_BlockAllocation.m_Block) {
+ uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
+ if (IsPersistentMap())
+ ++mapRefCount;
+ m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
+ block->Map(hAllocator, mapRefCount, VMA_NULL);
+ }
+
+ m_BlockAllocation.m_Block = block;
+ m_BlockAllocation.m_Offset = offset;
+}
+
+void VmaAllocation_T::ChangeSize(VkDeviceSize newSize) {
+ VMA_ASSERT(newSize > 0);
+ m_Size = newSize;
+}
+
+void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset) {
+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+ m_BlockAllocation.m_Offset = newOffset;
+}
+
+VkDeviceSize VmaAllocation_T::GetOffset() const {
+ switch (m_Type) {
+ case ALLOCATION_TYPE_BLOCK:
+ return m_BlockAllocation.m_Offset;
+ case ALLOCATION_TYPE_DEDICATED:
+ return 0;
+ default:
+ VMA_ASSERT(0);
+ return 0;
+ }
+}
+
+VkDeviceMemory VmaAllocation_T::GetMemory() const {
+ switch (m_Type) {
+ case ALLOCATION_TYPE_BLOCK:
+ return m_BlockAllocation.m_Block->GetDeviceMemory();
+ case ALLOCATION_TYPE_DEDICATED:
+ return m_DedicatedAllocation.m_hMemory;
+ default:
+ VMA_ASSERT(0);
+ return VK_NULL_HANDLE;
+ }
+}
+
+uint32_t VmaAllocation_T::GetMemoryTypeIndex() const {
+ switch (m_Type) {
+ case ALLOCATION_TYPE_BLOCK:
+ return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
+ case ALLOCATION_TYPE_DEDICATED:
+ return m_DedicatedAllocation.m_MemoryTypeIndex;
+ default:
+ VMA_ASSERT(0);
+ return UINT32_MAX;
+ }
+}
+
+void *VmaAllocation_T::GetMappedData() const {
+ switch (m_Type) {
+ case ALLOCATION_TYPE_BLOCK:
+ if (m_MapCount != 0) {
+ void *pBlockData = m_BlockAllocation.m_Block->GetMappedData();
+ VMA_ASSERT(pBlockData != VMA_NULL);
+ return (char *)pBlockData + m_BlockAllocation.m_Offset;
+ } else {
+ return VMA_NULL;
+ }
+ break;
+ case ALLOCATION_TYPE_DEDICATED:
+ VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
+ return m_DedicatedAllocation.m_pMappedData;
+ default:
+ VMA_ASSERT(0);
+ return VMA_NULL;
+ }
+}
+
+bool VmaAllocation_T::CanBecomeLost() const {
+ switch (m_Type) {
+ case ALLOCATION_TYPE_BLOCK:
+ return m_BlockAllocation.m_CanBecomeLost;
+ case ALLOCATION_TYPE_DEDICATED:
+ return false;
+ default:
+ VMA_ASSERT(0);
+ return false;
+ }
+}
+
+bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) {
+ VMA_ASSERT(CanBecomeLost());
+
+ /*
+ Warning: This is a carefully designed algorithm.
+ Do not modify unless you really know what you're doing :)
+ */
+ uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
+ for (;;) {
+ if (localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) {
+ VMA_ASSERT(0);
+ return false;
+ } else if (localLastUseFrameIndex + frameInUseCount >= currentFrameIndex) {
+ return false;
+ } else // Last use time earlier than current time.
+ {
+ if (CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST)) {
+ // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
+ // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
+ return true;
+ }
+ }
+ }
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+// Correspond to values of enum VmaSuballocationType.
+static const char *VMA_SUBALLOCATION_TYPE_NAMES[] = {
+ "FREE",
+ "UNKNOWN",
+ "BUFFER",
+ "IMAGE_UNKNOWN",
+ "IMAGE_LINEAR",
+ "IMAGE_OPTIMAL",
+};
+
+void VmaAllocation_T::PrintParameters(class VmaJsonWriter &json) const {
+ json.WriteString("Type");
+ json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
+
+ json.WriteString("Size");
+ json.WriteNumber(m_Size);
+
+ if (m_pUserData != VMA_NULL) {
+ json.WriteString("UserData");
+ if (IsUserDataString()) {
+ json.WriteString((const char *)m_pUserData);
+ } else {
+ json.BeginString();
+ json.ContinueString_Pointer(m_pUserData);
+ json.EndString();
+ }
+ }
+
+ json.WriteString("CreationFrameIndex");
+ json.WriteNumber(m_CreationFrameIndex);
+
+ json.WriteString("LastUseFrameIndex");
+ json.WriteNumber(GetLastUseFrameIndex());
+
+ if (m_BufferImageUsage != 0) {
+ json.WriteString("Usage");
+ json.WriteNumber(m_BufferImageUsage);
+ }
+}
+
+#endif
+
+void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator) {
+ VMA_ASSERT(IsUserDataString());
+ if (m_pUserData != VMA_NULL) {
+ char *const oldStr = (char *)m_pUserData;
+ const size_t oldStrLen = strlen(oldStr);
+ vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
+ m_pUserData = VMA_NULL;
+ }
+}
+
+void VmaAllocation_T::BlockAllocMap() {
+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+
+ if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) {
+ ++m_MapCount;
+ } else {
+ VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
+ }
+}
+
+void VmaAllocation_T::BlockAllocUnmap() {
+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+
+ if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) {
+ --m_MapCount;
+ } else {
+ VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
+ }
+}
+
+VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void **ppData) {
+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+
+ if (m_MapCount != 0) {
+ if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) {
+ VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
+ *ppData = m_DedicatedAllocation.m_pMappedData;
+ ++m_MapCount;
+ return VK_SUCCESS;
+ } else {
+ VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
+ return VK_ERROR_MEMORY_MAP_FAILED;
+ }
+ } else {
+ VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+ hAllocator->m_hDevice,
+ m_DedicatedAllocation.m_hMemory,
+ 0, // offset
+ VK_WHOLE_SIZE,
+ 0, // flags
+ ppData);
+ if (result == VK_SUCCESS) {
+ m_DedicatedAllocation.m_pMappedData = *ppData;
+ m_MapCount = 1;
+ }
+ return result;
+ }
+}
+
+void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) {
+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+
+ if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) {
+ --m_MapCount;
+ if (m_MapCount == 0) {
+ m_DedicatedAllocation.m_pMappedData = VMA_NULL;
+ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
+ hAllocator->m_hDevice,
+ m_DedicatedAllocation.m_hMemory);
+ }
+ } else {
+ VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
+ }
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+static void VmaPrintStatInfo(VmaJsonWriter &json, const VmaStatInfo &stat) {
+ json.BeginObject();
+
+ json.WriteString("Blocks");
+ json.WriteNumber(stat.blockCount);
+
+ json.WriteString("Allocations");
+ json.WriteNumber(stat.allocationCount);
+
+ json.WriteString("UnusedRanges");
+ json.WriteNumber(stat.unusedRangeCount);
+
+ json.WriteString("UsedBytes");
+ json.WriteNumber(stat.usedBytes);
+
+ json.WriteString("UnusedBytes");
+ json.WriteNumber(stat.unusedBytes);
+
+ if (stat.allocationCount > 1) {
+ json.WriteString("AllocationSize");
+ json.BeginObject(true);
+ json.WriteString("Min");
+ json.WriteNumber(stat.allocationSizeMin);
+ json.WriteString("Avg");
+ json.WriteNumber(stat.allocationSizeAvg);
+ json.WriteString("Max");
+ json.WriteNumber(stat.allocationSizeMax);
+ json.EndObject();
+ }
+
+ if (stat.unusedRangeCount > 1) {
+ json.WriteString("UnusedRangeSize");
+ json.BeginObject(true);
+ json.WriteString("Min");
+ json.WriteNumber(stat.unusedRangeSizeMin);
+ json.WriteString("Avg");
+ json.WriteNumber(stat.unusedRangeSizeAvg);
+ json.WriteString("Max");
+ json.WriteNumber(stat.unusedRangeSizeMax);
+ json.EndObject();
+ }
+
+ json.EndObject();
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+struct VmaSuballocationItemSizeLess {
+ bool operator()(
+ const VmaSuballocationList::iterator lhs,
+ const VmaSuballocationList::iterator rhs) const {
+ return lhs->size < rhs->size;
+ }
+ bool operator()(
+ const VmaSuballocationList::iterator lhs,
+ VkDeviceSize rhsSize) const {
+ return lhs->size < rhsSize;
+ }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaBlockMetadata
+
+VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
+ m_Size(0),
+ m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks()) {
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter &json,
+ VkDeviceSize unusedBytes,
+ size_t allocationCount,
+ size_t unusedRangeCount) const {
+ json.BeginObject();
+
+ json.WriteString("TotalBytes");
+ json.WriteNumber(GetSize());
+
+ json.WriteString("UnusedBytes");
+ json.WriteNumber(unusedBytes);
+
+ json.WriteString("Allocations");
+ json.WriteNumber((uint64_t)allocationCount);
+
+ json.WriteString("UnusedRanges");
+ json.WriteNumber((uint64_t)unusedRangeCount);
+
+ json.WriteString("Suballocations");
+ json.BeginArray();
+}
+
+void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter &json,
+ VkDeviceSize offset,
+ VmaAllocation hAllocation) const {
+ json.BeginObject(true);
+
+ json.WriteString("Offset");
+ json.WriteNumber(offset);
+
+ hAllocation->PrintParameters(json);
+
+ json.EndObject();
+}
+
+void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter &json,
+ VkDeviceSize offset,
+ VkDeviceSize size) const {
+ json.BeginObject(true);
+
+ json.WriteString("Offset");
+ json.WriteNumber(offset);
+
+ json.WriteString("Type");
+ json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
+
+ json.WriteString("Size");
+ json.WriteNumber(size);
+
+ json.EndObject();
+}
+
+void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter &json) const {
+ json.EndArray();
+ json.EndObject();
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaBlockMetadata_Generic
+
+VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
+ VmaBlockMetadata(hAllocator),
+ m_FreeCount(0),
+ m_SumFreeSize(0),
+ m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+ m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks())) {
+}
+
+VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic() {
+}
+
+void VmaBlockMetadata_Generic::Init(VkDeviceSize size) {
+ VmaBlockMetadata::Init(size);
+
+ m_FreeCount = 1;
+ m_SumFreeSize = size;
+
+ VmaSuballocation suballoc = {};
+ suballoc.offset = 0;
+ suballoc.size = size;
+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ suballoc.hAllocation = VK_NULL_HANDLE;
+
+ VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
+ m_Suballocations.push_back(suballoc);
+ VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
+ --suballocItem;
+ m_FreeSuballocationsBySize.push_back(suballocItem);
+}
+
+bool VmaBlockMetadata_Generic::Validate() const {
+ VMA_VALIDATE(!m_Suballocations.empty());
+
+ // Expected offset of new suballocation as calculated from previous ones.
+ VkDeviceSize calculatedOffset = 0;
+ // Expected number of free suballocations as calculated from traversing their list.
+ uint32_t calculatedFreeCount = 0;
+ // Expected sum size of free suballocations as calculated from traversing their list.
+ VkDeviceSize calculatedSumFreeSize = 0;
+ // Expected number of free suballocations that should be registered in
+ // m_FreeSuballocationsBySize calculated from traversing their list.
+ size_t freeSuballocationsToRegister = 0;
+ // True if previous visited suballocation was free.
+ bool prevFree = false;
+
+ for (VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+ suballocItem != m_Suballocations.cend();
+ ++suballocItem) {
+ const VmaSuballocation &subAlloc = *suballocItem;
+
+ // Actual offset of this suballocation doesn't match expected one.
+ VMA_VALIDATE(subAlloc.offset == calculatedOffset);
+
+ const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
+ // Two adjacent free suballocations are invalid. They should be merged.
+ VMA_VALIDATE(!prevFree || !currFree);
+
+ VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
+
+ if (currFree) {
+ calculatedSumFreeSize += subAlloc.size;
+ ++calculatedFreeCount;
+ if (subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) {
+ ++freeSuballocationsToRegister;
+ }
+
+ // Margin required between allocations - every free space must be at least that large.
+ VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
+ } else {
+ VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
+ VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
+
+ // Margin required between allocations - previous allocation must be free.
+ VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
+ }
+
+ calculatedOffset += subAlloc.size;
+ prevFree = currFree;
+ }
+
+ // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
+ // match expected one.
+ VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
+
+ VkDeviceSize lastSize = 0;
+ for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) {
+ VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
+
+ // Only free suballocations can be registered in m_FreeSuballocationsBySize.
+ VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+ // They must be sorted by size ascending.
+ VMA_VALIDATE(suballocItem->size >= lastSize);
+
+ lastSize = suballocItem->size;
+ }
+
+ // Check if totals match calculacted values.
+ VMA_VALIDATE(ValidateFreeSuballocationList());
+ VMA_VALIDATE(calculatedOffset == GetSize());
+ VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
+ VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
+
+ return true;
+}
+
+VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const {
+ if (!m_FreeSuballocationsBySize.empty()) {
+ return m_FreeSuballocationsBySize.back()->size;
+ } else {
+ return 0;
+ }
+}
+
+bool VmaBlockMetadata_Generic::IsEmpty() const {
+ return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
+}
+
+void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo &outInfo) const {
+ outInfo.blockCount = 1;
+
+ const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
+ outInfo.allocationCount = rangeCount - m_FreeCount;
+ outInfo.unusedRangeCount = m_FreeCount;
+
+ outInfo.unusedBytes = m_SumFreeSize;
+ outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
+
+ outInfo.allocationSizeMin = UINT64_MAX;
+ outInfo.allocationSizeMax = 0;
+ outInfo.unusedRangeSizeMin = UINT64_MAX;
+ outInfo.unusedRangeSizeMax = 0;
+
+ for (VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+ suballocItem != m_Suballocations.cend();
+ ++suballocItem) {
+ const VmaSuballocation &suballoc = *suballocItem;
+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) {
+ outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+ outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
+ } else {
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
+ outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
+ }
+ }
+}
+
+void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats &inoutStats) const {
+ const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
+
+ inoutStats.size += GetSize();
+ inoutStats.unusedSize += m_SumFreeSize;
+ inoutStats.allocationCount += rangeCount - m_FreeCount;
+ inoutStats.unusedRangeCount += m_FreeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter &json) const {
+ PrintDetailedMap_Begin(json,
+ m_SumFreeSize, // unusedBytes
+ m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
+ m_FreeCount); // unusedRangeCount
+
+ size_t i = 0;
+ for (VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
+ suballocItem != m_Suballocations.cend();
+ ++suballocItem, ++i) {
+ if (suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
+ } else {
+ PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
+ }
+ }
+
+ PrintDetailedMap_End(json);
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+bool VmaBlockMetadata_Generic::CreateAllocationRequest(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ bool upperAddress,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest) {
+ VMA_ASSERT(allocSize > 0);
+ VMA_ASSERT(!upperAddress);
+ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+ VMA_ASSERT(pAllocationRequest != VMA_NULL);
+ VMA_HEAVY_ASSERT(Validate());
+
+ pAllocationRequest->type = VmaAllocationRequestType::Normal;
+
+ // There is not enough total free space in this block to fullfill the request: Early return.
+ if (canMakeOtherLost == false &&
+ m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN) {
+ return false;
+ }
+
+ // New algorithm, efficiently searching freeSuballocationsBySize.
+ const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
+ if (freeSuballocCount > 0) {
+ if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) {
+ // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
+ VmaSuballocationList::iterator *const it = VmaBinaryFindFirstNotLess(
+ m_FreeSuballocationsBySize.data(),
+ m_FreeSuballocationsBySize.data() + freeSuballocCount,
+ allocSize + 2 * VMA_DEBUG_MARGIN,
+ VmaSuballocationItemSizeLess());
+ size_t index = it - m_FreeSuballocationsBySize.data();
+ for (; index < freeSuballocCount; ++index) {
+ if (CheckAllocation(
+ currentFrameIndex,
+ frameInUseCount,
+ bufferImageGranularity,
+ allocSize,
+ allocAlignment,
+ allocType,
+ m_FreeSuballocationsBySize[index],
+ false, // canMakeOtherLost
+ &pAllocationRequest->offset,
+ &pAllocationRequest->itemsToMakeLostCount,
+ &pAllocationRequest->sumFreeSize,
+ &pAllocationRequest->sumItemSize)) {
+ pAllocationRequest->item = m_FreeSuballocationsBySize[index];
+ return true;
+ }
+ }
+ } else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) {
+ for (VmaSuballocationList::iterator it = m_Suballocations.begin();
+ it != m_Suballocations.end();
+ ++it) {
+ if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
+ currentFrameIndex,
+ frameInUseCount,
+ bufferImageGranularity,
+ allocSize,
+ allocAlignment,
+ allocType,
+ it,
+ false, // canMakeOtherLost
+ &pAllocationRequest->offset,
+ &pAllocationRequest->itemsToMakeLostCount,
+ &pAllocationRequest->sumFreeSize,
+ &pAllocationRequest->sumItemSize)) {
+ pAllocationRequest->item = it;
+ return true;
+ }
+ }
+ } else // WORST_FIT, FIRST_FIT
+ {
+ // Search staring from biggest suballocations.
+ for (size_t index = freeSuballocCount; index--;) {
+ if (CheckAllocation(
+ currentFrameIndex,
+ frameInUseCount,
+ bufferImageGranularity,
+ allocSize,
+ allocAlignment,
+ allocType,
+ m_FreeSuballocationsBySize[index],
+ false, // canMakeOtherLost
+ &pAllocationRequest->offset,
+ &pAllocationRequest->itemsToMakeLostCount,
+ &pAllocationRequest->sumFreeSize,
+ &pAllocationRequest->sumItemSize)) {
+ pAllocationRequest->item = m_FreeSuballocationsBySize[index];
+ return true;
+ }
+ }
+ }
+ }
+
+ if (canMakeOtherLost) {
+ // Brute-force algorithm. TODO: Come up with something better.
+
+ bool found = false;
+ VmaAllocationRequest tmpAllocRequest = {};
+ tmpAllocRequest.type = VmaAllocationRequestType::Normal;
+ for (VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
+ suballocIt != m_Suballocations.end();
+ ++suballocIt) {
+ if (suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
+ suballocIt->hAllocation->CanBecomeLost()) {
+ if (CheckAllocation(
+ currentFrameIndex,
+ frameInUseCount,
+ bufferImageGranularity,
+ allocSize,
+ allocAlignment,
+ allocType,
+ suballocIt,
+ canMakeOtherLost,
+ &tmpAllocRequest.offset,
+ &tmpAllocRequest.itemsToMakeLostCount,
+ &tmpAllocRequest.sumFreeSize,
+ &tmpAllocRequest.sumItemSize)) {
+ if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) {
+ *pAllocationRequest = tmpAllocRequest;
+ pAllocationRequest->item = suballocIt;
+ break;
+ }
+ if (!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost()) {
+ *pAllocationRequest = tmpAllocRequest;
+ pAllocationRequest->item = suballocIt;
+ found = true;
+ }
+ }
+ }
+ }
+
+ return found;
+ }
+
+ return false;
+}
+
+bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VmaAllocationRequest *pAllocationRequest) {
+ VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
+
+ while (pAllocationRequest->itemsToMakeLostCount > 0) {
+ if (pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ ++pAllocationRequest->item;
+ }
+ VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
+ VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
+ VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
+ if (pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) {
+ pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
+ --pAllocationRequest->itemsToMakeLostCount;
+ } else {
+ return false;
+ }
+ }
+
+ VMA_HEAVY_ASSERT(Validate());
+ VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
+ VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ return true;
+}
+
+uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) {
+ uint32_t lostAllocationCount = 0;
+ for (VmaSuballocationList::iterator it = m_Suballocations.begin();
+ it != m_Suballocations.end();
+ ++it) {
+ if (it->type != VMA_SUBALLOCATION_TYPE_FREE &&
+ it->hAllocation->CanBecomeLost() &&
+ it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) {
+ it = FreeSuballocation(it);
+ ++lostAllocationCount;
+ }
+ }
+ return lostAllocationCount;
+}
+
+VkResult VmaBlockMetadata_Generic::CheckCorruption(const void *pBlockData) {
+ for (VmaSuballocationList::iterator it = m_Suballocations.begin();
+ it != m_Suballocations.end();
+ ++it) {
+ if (it->type != VMA_SUBALLOCATION_TYPE_FREE) {
+ if (!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ if (!VmaValidateMagicValue(pBlockData, it->offset + it->size)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+void VmaBlockMetadata_Generic::Alloc(
+ const VmaAllocationRequest &request,
+ VmaSuballocationType type,
+ VkDeviceSize allocSize,
+ VmaAllocation hAllocation) {
+ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+ VMA_ASSERT(request.item != m_Suballocations.end());
+ VmaSuballocation &suballoc = *request.item;
+ // Given suballocation is a free block.
+ VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+ // Given offset is inside this suballocation.
+ VMA_ASSERT(request.offset >= suballoc.offset);
+ const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
+ VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
+ const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
+
+ // Unregister this free suballocation from m_FreeSuballocationsBySize and update
+ // it to become used.
+ UnregisterFreeSuballocation(request.item);
+
+ suballoc.offset = request.offset;
+ suballoc.size = allocSize;
+ suballoc.type = type;
+ suballoc.hAllocation = hAllocation;
+
+ // If there are any free bytes remaining at the end, insert new free suballocation after current one.
+ if (paddingEnd) {
+ VmaSuballocation paddingSuballoc = {};
+ paddingSuballoc.offset = request.offset + allocSize;
+ paddingSuballoc.size = paddingEnd;
+ paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ VmaSuballocationList::iterator next = request.item;
+ ++next;
+ const VmaSuballocationList::iterator paddingEndItem =
+ m_Suballocations.insert(next, paddingSuballoc);
+ RegisterFreeSuballocation(paddingEndItem);
+ }
+
+ // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
+ if (paddingBegin) {
+ VmaSuballocation paddingSuballoc = {};
+ paddingSuballoc.offset = request.offset - paddingBegin;
+ paddingSuballoc.size = paddingBegin;
+ paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ const VmaSuballocationList::iterator paddingBeginItem =
+ m_Suballocations.insert(request.item, paddingSuballoc);
+ RegisterFreeSuballocation(paddingBeginItem);
+ }
+
+ // Update totals.
+ m_FreeCount = m_FreeCount - 1;
+ if (paddingBegin > 0) {
+ ++m_FreeCount;
+ }
+ if (paddingEnd > 0) {
+ ++m_FreeCount;
+ }
+ m_SumFreeSize -= allocSize;
+}
+
+void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation) {
+ for (VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
+ suballocItem != m_Suballocations.end();
+ ++suballocItem) {
+ VmaSuballocation &suballoc = *suballocItem;
+ if (suballoc.hAllocation == allocation) {
+ FreeSuballocation(suballocItem);
+ VMA_HEAVY_ASSERT(Validate());
+ return;
+ }
+ }
+ VMA_ASSERT(0 && "Not found!");
+}
+
+void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) {
+ for (VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
+ suballocItem != m_Suballocations.end();
+ ++suballocItem) {
+ VmaSuballocation &suballoc = *suballocItem;
+ if (suballoc.offset == offset) {
+ FreeSuballocation(suballocItem);
+ return;
+ }
+ }
+ VMA_ASSERT(0 && "Not found!");
+}
+
+bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) {
+ typedef VmaSuballocationList::iterator iter_type;
+ for (iter_type suballocItem = m_Suballocations.begin();
+ suballocItem != m_Suballocations.end();
+ ++suballocItem) {
+ VmaSuballocation &suballoc = *suballocItem;
+ if (suballoc.hAllocation == alloc) {
+ iter_type nextItem = suballocItem;
+ ++nextItem;
+
+ // Should have been ensured on higher level.
+ VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
+
+ // Shrinking.
+ if (newSize < alloc->GetSize()) {
+ const VkDeviceSize sizeDiff = suballoc.size - newSize;
+
+ // There is next item.
+ if (nextItem != m_Suballocations.end()) {
+ // Next item is free.
+ if (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ // Grow this next item backward.
+ UnregisterFreeSuballocation(nextItem);
+ nextItem->offset -= sizeDiff;
+ nextItem->size += sizeDiff;
+ RegisterFreeSuballocation(nextItem);
+ }
+ // Next item is not free.
+ else {
+ // Create free item after current one.
+ VmaSuballocation newFreeSuballoc;
+ newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
+ newFreeSuballoc.offset = suballoc.offset + newSize;
+ newFreeSuballoc.size = sizeDiff;
+ newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
+ RegisterFreeSuballocation(newFreeSuballocIt);
+
+ ++m_FreeCount;
+ }
+ }
+ // This is the last item.
+ else {
+ // Create free item at the end.
+ VmaSuballocation newFreeSuballoc;
+ newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
+ newFreeSuballoc.offset = suballoc.offset + newSize;
+ newFreeSuballoc.size = sizeDiff;
+ newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ m_Suballocations.push_back(newFreeSuballoc);
+
+ iter_type newFreeSuballocIt = m_Suballocations.end();
+ RegisterFreeSuballocation(--newFreeSuballocIt);
+
+ ++m_FreeCount;
+ }
+
+ suballoc.size = newSize;
+ m_SumFreeSize += sizeDiff;
+ }
+ // Growing.
+ else {
+ const VkDeviceSize sizeDiff = newSize - suballoc.size;
+
+ // There is next item.
+ if (nextItem != m_Suballocations.end()) {
+ // Next item is free.
+ if (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ // There is not enough free space, including margin.
+ if (nextItem->size < sizeDiff + VMA_DEBUG_MARGIN) {
+ return false;
+ }
+
+ // There is more free space than required.
+ if (nextItem->size > sizeDiff) {
+ // Move and shrink this next item.
+ UnregisterFreeSuballocation(nextItem);
+ nextItem->offset += sizeDiff;
+ nextItem->size -= sizeDiff;
+ RegisterFreeSuballocation(nextItem);
+ }
+ // There is exactly the amount of free space required.
+ else {
+ // Remove this next free item.
+ UnregisterFreeSuballocation(nextItem);
+ m_Suballocations.erase(nextItem);
+ --m_FreeCount;
+ }
+ }
+ // Next item is not free - there is no space to grow.
+ else {
+ return false;
+ }
+ }
+ // This is the last item - there is no space to grow.
+ else {
+ return false;
+ }
+
+ suballoc.size = newSize;
+ m_SumFreeSize -= sizeDiff;
+ }
+
+ // We cannot call Validate() here because alloc object is updated to new size outside of this call.
+ return true;
+ }
+ }
+ VMA_ASSERT(0 && "Not found!");
+ return false;
+}
+
+bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const {
+ VkDeviceSize lastSize = 0;
+ for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) {
+ const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
+
+ VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
+ VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
+ VMA_VALIDATE(it->size >= lastSize);
+ lastSize = it->size;
+ }
+ return true;
+}
+
+bool VmaBlockMetadata_Generic::CheckAllocation(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ VmaSuballocationType allocType,
+ VmaSuballocationList::const_iterator suballocItem,
+ bool canMakeOtherLost,
+ VkDeviceSize *pOffset,
+ size_t *itemsToMakeLostCount,
+ VkDeviceSize *pSumFreeSize,
+ VkDeviceSize *pSumItemSize) const {
+ VMA_ASSERT(allocSize > 0);
+ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+ VMA_ASSERT(suballocItem != m_Suballocations.cend());
+ VMA_ASSERT(pOffset != VMA_NULL);
+
+ *itemsToMakeLostCount = 0;
+ *pSumFreeSize = 0;
+ *pSumItemSize = 0;
+
+ if (canMakeOtherLost) {
+ if (suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ *pSumFreeSize = suballocItem->size;
+ } else {
+ if (suballocItem->hAllocation->CanBecomeLost() &&
+ suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) {
+ ++*itemsToMakeLostCount;
+ *pSumItemSize = suballocItem->size;
+ } else {
+ return false;
+ }
+ }
+
+ // Remaining size is too small for this request: Early return.
+ if (GetSize() - suballocItem->offset < allocSize) {
+ return false;
+ }
+
+ // Start from offset equal to beginning of this suballocation.
+ *pOffset = suballocItem->offset;
+
+ // Apply VMA_DEBUG_MARGIN at the beginning.
+ if (VMA_DEBUG_MARGIN > 0) {
+ *pOffset += VMA_DEBUG_MARGIN;
+ }
+
+ // Apply alignment.
+ *pOffset = VmaAlignUp(*pOffset, allocAlignment);
+
+ // Check previous suballocations for BufferImageGranularity conflicts.
+ // Make bigger alignment if necessary.
+ if (bufferImageGranularity > 1) {
+ bool bufferImageGranularityConflict = false;
+ VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
+ while (prevSuballocItem != m_Suballocations.cbegin()) {
+ --prevSuballocItem;
+ const VmaSuballocation &prevSuballoc = *prevSuballocItem;
+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) {
+ bufferImageGranularityConflict = true;
+ break;
+ }
+ } else
+ // Already on previous page.
+ break;
+ }
+ if (bufferImageGranularityConflict) {
+ *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
+ }
+ }
+
+ // Now that we have final *pOffset, check if we are past suballocItem.
+ // If yes, return false - this function should be called for another suballocItem as starting point.
+ if (*pOffset >= suballocItem->offset + suballocItem->size) {
+ return false;
+ }
+
+ // Calculate padding at the beginning based on current offset.
+ const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
+
+ // Calculate required margin at the end.
+ const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
+
+ const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
+ // Another early return check.
+ if (suballocItem->offset + totalSize > GetSize()) {
+ return false;
+ }
+
+ // Advance lastSuballocItem until desired size is reached.
+ // Update itemsToMakeLostCount.
+ VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
+ if (totalSize > suballocItem->size) {
+ VkDeviceSize remainingSize = totalSize - suballocItem->size;
+ while (remainingSize > 0) {
+ ++lastSuballocItem;
+ if (lastSuballocItem == m_Suballocations.cend()) {
+ return false;
+ }
+ if (lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ *pSumFreeSize += lastSuballocItem->size;
+ } else {
+ VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
+ if (lastSuballocItem->hAllocation->CanBecomeLost() &&
+ lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) {
+ ++*itemsToMakeLostCount;
+ *pSumItemSize += lastSuballocItem->size;
+ } else {
+ return false;
+ }
+ }
+ remainingSize = (lastSuballocItem->size < remainingSize) ?
+ remainingSize - lastSuballocItem->size :
+ 0;
+ }
+ }
+
+ // Check next suballocations for BufferImageGranularity conflicts.
+ // If conflict exists, we must mark more allocations lost or fail.
+ if (bufferImageGranularity > 1) {
+ VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
+ ++nextSuballocItem;
+ while (nextSuballocItem != m_Suballocations.cend()) {
+ const VmaSuballocation &nextSuballoc = *nextSuballocItem;
+ if (VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) {
+ VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
+ if (nextSuballoc.hAllocation->CanBecomeLost() &&
+ nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) {
+ ++*itemsToMakeLostCount;
+ } else {
+ return false;
+ }
+ }
+ } else {
+ // Already on next page.
+ break;
+ }
+ ++nextSuballocItem;
+ }
+ }
+ } else {
+ const VmaSuballocation &suballoc = *suballocItem;
+ VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ *pSumFreeSize = suballoc.size;
+
+ // Size of this suballocation is too small for this request: Early return.
+ if (suballoc.size < allocSize) {
+ return false;
+ }
+
+ // Start from offset equal to beginning of this suballocation.
+ *pOffset = suballoc.offset;
+
+ // Apply VMA_DEBUG_MARGIN at the beginning.
+ if (VMA_DEBUG_MARGIN > 0) {
+ *pOffset += VMA_DEBUG_MARGIN;
+ }
+
+ // Apply alignment.
+ *pOffset = VmaAlignUp(*pOffset, allocAlignment);
+
+ // Check previous suballocations for BufferImageGranularity conflicts.
+ // Make bigger alignment if necessary.
+ if (bufferImageGranularity > 1) {
+ bool bufferImageGranularityConflict = false;
+ VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
+ while (prevSuballocItem != m_Suballocations.cbegin()) {
+ --prevSuballocItem;
+ const VmaSuballocation &prevSuballoc = *prevSuballocItem;
+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) {
+ bufferImageGranularityConflict = true;
+ break;
+ }
+ } else
+ // Already on previous page.
+ break;
+ }
+ if (bufferImageGranularityConflict) {
+ *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
+ }
+ }
+
+ // Calculate padding at the beginning based on current offset.
+ const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
+
+ // Calculate required margin at the end.
+ const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
+
+ // Fail if requested size plus margin before and after is bigger than size of this suballocation.
+ if (paddingBegin + allocSize + requiredEndMargin > suballoc.size) {
+ return false;
+ }
+
+ // Check next suballocations for BufferImageGranularity conflicts.
+ // If conflict exists, allocation cannot be made here.
+ if (bufferImageGranularity > 1) {
+ VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
+ ++nextSuballocItem;
+ while (nextSuballocItem != m_Suballocations.cend()) {
+ const VmaSuballocation &nextSuballoc = *nextSuballocItem;
+ if (VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) {
+ return false;
+ }
+ } else {
+ // Already on next page.
+ break;
+ }
+ ++nextSuballocItem;
+ }
+ }
+ }
+
+ // All tests passed: Success. pOffset is already filled.
+ return true;
+}
+
+void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) {
+ VMA_ASSERT(item != m_Suballocations.end());
+ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ VmaSuballocationList::iterator nextItem = item;
+ ++nextItem;
+ VMA_ASSERT(nextItem != m_Suballocations.end());
+ VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ item->size += nextItem->size;
+ --m_FreeCount;
+ m_Suballocations.erase(nextItem);
+}
+
+VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) {
+ // Change this suballocation to be marked as free.
+ VmaSuballocation &suballoc = *suballocItem;
+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ suballoc.hAllocation = VK_NULL_HANDLE;
+
+ // Update totals.
+ ++m_FreeCount;
+ m_SumFreeSize += suballoc.size;
+
+ // Merge with previous and/or next suballocation if it's also free.
+ bool mergeWithNext = false;
+ bool mergeWithPrev = false;
+
+ VmaSuballocationList::iterator nextItem = suballocItem;
+ ++nextItem;
+ if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) {
+ mergeWithNext = true;
+ }
+
+ VmaSuballocationList::iterator prevItem = suballocItem;
+ if (suballocItem != m_Suballocations.begin()) {
+ --prevItem;
+ if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ mergeWithPrev = true;
+ }
+ }
+
+ if (mergeWithNext) {
+ UnregisterFreeSuballocation(nextItem);
+ MergeFreeWithNext(suballocItem);
+ }
+
+ if (mergeWithPrev) {
+ UnregisterFreeSuballocation(prevItem);
+ MergeFreeWithNext(prevItem);
+ RegisterFreeSuballocation(prevItem);
+ return prevItem;
+ } else {
+ RegisterFreeSuballocation(suballocItem);
+ return suballocItem;
+ }
+}
+
+void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) {
+ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+ VMA_ASSERT(item->size > 0);
+
+ // You may want to enable this validation at the beginning or at the end of
+ // this function, depending on what do you want to check.
+ VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+
+ if (item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) {
+ if (m_FreeSuballocationsBySize.empty()) {
+ m_FreeSuballocationsBySize.push_back(item);
+ } else {
+ VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
+ }
+ }
+
+ //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+}
+
+void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) {
+ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+ VMA_ASSERT(item->size > 0);
+
+ // You may want to enable this validation at the beginning or at the end of
+ // this function, depending on what do you want to check.
+ VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+
+ if (item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) {
+ VmaSuballocationList::iterator *const it = VmaBinaryFindFirstNotLess(
+ m_FreeSuballocationsBySize.data(),
+ m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
+ item,
+ VmaSuballocationItemSizeLess());
+ for (size_t index = it - m_FreeSuballocationsBySize.data();
+ index < m_FreeSuballocationsBySize.size();
+ ++index) {
+ if (m_FreeSuballocationsBySize[index] == item) {
+ VmaVectorRemove(m_FreeSuballocationsBySize, index);
+ return;
+ }
+ VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
+ }
+ VMA_ASSERT(0 && "Not found.");
+ }
+
+ //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+}
+
+bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
+ VkDeviceSize bufferImageGranularity,
+ VmaSuballocationType &inOutPrevSuballocType) const {
+ if (bufferImageGranularity == 1 || IsEmpty()) {
+ return false;
+ }
+
+ VkDeviceSize minAlignment = VK_WHOLE_SIZE;
+ bool typeConflictFound = false;
+ for (VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
+ it != m_Suballocations.cend();
+ ++it) {
+ const VmaSuballocationType suballocType = it->type;
+ if (suballocType != VMA_SUBALLOCATION_TYPE_FREE) {
+ minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
+ if (VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType)) {
+ typeConflictFound = true;
+ }
+ inOutPrevSuballocType = suballocType;
+ }
+ }
+
+ return typeConflictFound || minAlignment >= bufferImageGranularity;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaBlockMetadata_Linear
+
+VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
+ VmaBlockMetadata(hAllocator),
+ m_SumFreeSize(0),
+ m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+ m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
+ m_1stVectorIndex(0),
+ m_2ndVectorMode(SECOND_VECTOR_EMPTY),
+ m_1stNullItemsBeginCount(0),
+ m_1stNullItemsMiddleCount(0),
+ m_2ndNullItemsCount(0) {
+}
+
+VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear() {
+}
+
+void VmaBlockMetadata_Linear::Init(VkDeviceSize size) {
+ VmaBlockMetadata::Init(size);
+ m_SumFreeSize = size;
+}
+
+bool VmaBlockMetadata_Linear::Validate() const {
+ const SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+
+ VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
+ VMA_VALIDATE(!suballocations1st.empty() ||
+ suballocations2nd.empty() ||
+ m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
+
+ if (!suballocations1st.empty()) {
+ // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
+ VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
+ // Null item at the end should be just pop_back().
+ VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
+ }
+ if (!suballocations2nd.empty()) {
+ // Null item at the end should be just pop_back().
+ VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
+ }
+
+ VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
+ VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
+
+ VkDeviceSize sumUsedSize = 0;
+ const size_t suballoc1stCount = suballocations1st.size();
+ VkDeviceSize offset = VMA_DEBUG_MARGIN;
+
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ const size_t suballoc2ndCount = suballocations2nd.size();
+ size_t nullItem2ndCount = 0;
+ for (size_t i = 0; i < suballoc2ndCount; ++i) {
+ const VmaSuballocation &suballoc = suballocations2nd[i];
+ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+ VMA_VALIDATE(suballoc.offset >= offset);
+
+ if (!currFree) {
+ VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+ VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+ sumUsedSize += suballoc.size;
+ } else {
+ ++nullItem2ndCount;
+ }
+
+ offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+ }
+
+ VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+ }
+
+ for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) {
+ const VmaSuballocation &suballoc = suballocations1st[i];
+ VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
+ suballoc.hAllocation == VK_NULL_HANDLE);
+ }
+
+ size_t nullItem1stCount = m_1stNullItemsBeginCount;
+
+ for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) {
+ const VmaSuballocation &suballoc = suballocations1st[i];
+ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+ VMA_VALIDATE(suballoc.offset >= offset);
+ VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
+
+ if (!currFree) {
+ VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+ VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+ sumUsedSize += suballoc.size;
+ } else {
+ ++nullItem1stCount;
+ }
+
+ offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+ }
+ VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
+
+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ const size_t suballoc2ndCount = suballocations2nd.size();
+ size_t nullItem2ndCount = 0;
+ for (size_t i = suballoc2ndCount; i--;) {
+ const VmaSuballocation &suballoc = suballocations2nd[i];
+ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+
+ VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
+ VMA_VALIDATE(suballoc.offset >= offset);
+
+ if (!currFree) {
+ VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
+ VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
+ sumUsedSize += suballoc.size;
+ } else {
+ ++nullItem2ndCount;
+ }
+
+ offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
+ }
+
+ VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+ }
+
+ VMA_VALIDATE(offset <= GetSize());
+ VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
+
+ return true;
+}
+
+size_t VmaBlockMetadata_Linear::GetAllocationCount() const {
+ return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
+ AccessSuballocations2nd().size() - m_2ndNullItemsCount;
+}
+
+VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const {
+ const VkDeviceSize size = GetSize();
+
+ /*
+ We don't consider gaps inside allocation vectors with freed allocations because
+ they are not suitable for reuse in linear allocator. We consider only space that
+ is available for new allocations.
+ */
+ if (IsEmpty()) {
+ return size;
+ }
+
+ const SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+
+ switch (m_2ndVectorMode) {
+ case SECOND_VECTOR_EMPTY:
+ /*
+ Available space is after end of 1st, as well as before beginning of 1st (which
+ whould make it a ring buffer).
+ */
+ {
+ const size_t suballocations1stCount = suballocations1st.size();
+ VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
+ const VmaSuballocation &firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
+ const VmaSuballocation &lastSuballoc = suballocations1st[suballocations1stCount - 1];
+ return VMA_MAX(
+ firstSuballoc.offset,
+ size - (lastSuballoc.offset + lastSuballoc.size));
+ }
+ break;
+
+ case SECOND_VECTOR_RING_BUFFER:
+ /*
+ Available space is only between end of 2nd and beginning of 1st.
+ */
+ {
+ const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ const VmaSuballocation &lastSuballoc2nd = suballocations2nd.back();
+ const VmaSuballocation &firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
+ return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
+ }
+ break;
+
+ case SECOND_VECTOR_DOUBLE_STACK:
+ /*
+ Available space is only between end of 1st and top of 2nd.
+ */
+ {
+ const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ const VmaSuballocation &topSuballoc2nd = suballocations2nd.back();
+ const VmaSuballocation &lastSuballoc1st = suballocations1st.back();
+ return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
+ }
+ break;
+
+ default:
+ VMA_ASSERT(0);
+ return 0;
+ }
+}
+
+void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const {
+ const VkDeviceSize size = GetSize();
+ const SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ const size_t suballoc1stCount = suballocations1st.size();
+ const size_t suballoc2ndCount = suballocations2nd.size();
+
+ outInfo.blockCount = 1;
+ outInfo.allocationCount = (uint32_t)GetAllocationCount();
+ outInfo.unusedRangeCount = 0;
+ outInfo.usedBytes = 0;
+ outInfo.allocationSizeMin = UINT64_MAX;
+ outInfo.allocationSizeMax = 0;
+ outInfo.unusedRangeSizeMin = UINT64_MAX;
+ outInfo.unusedRangeSizeMax = 0;
+
+ VkDeviceSize lastOffset = 0;
+
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+ size_t nextAlloc2ndIndex = 0;
+ while (lastOffset < freeSpace2ndTo1stEnd) {
+ // Find next non-null allocation or move nextAllocIndex to the end.
+ while (nextAlloc2ndIndex < suballoc2ndCount &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex < suballoc2ndCount) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusedRangeSize;
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+ outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ outInfo.usedBytes += suballoc.size;
+ outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+ outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+ if (lastOffset < freeSpace2ndTo1stEnd) {
+ const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusedRangeSize;
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+ outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = freeSpace2ndTo1stEnd;
+ }
+ }
+ }
+
+ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+ const VkDeviceSize freeSpace1stTo2ndEnd =
+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+ while (lastOffset < freeSpace1stTo2ndEnd) {
+ // Find next non-null allocation or move nextAllocIndex to the end.
+ while (nextAlloc1stIndex < suballoc1stCount &&
+ suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc1stIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc1stIndex < suballoc1stCount) {
+ const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusedRangeSize;
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+ outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ outInfo.usedBytes += suballoc.size;
+ outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+ outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc1stIndex;
+ }
+ // We are at the end.
+ else {
+ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+ if (lastOffset < freeSpace1stTo2ndEnd) {
+ const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusedRangeSize;
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+ outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = freeSpace1stTo2ndEnd;
+ }
+ }
+
+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+ while (lastOffset < size) {
+ // Find next non-null allocation or move nextAllocIndex to the end.
+ while (nextAlloc2ndIndex != SIZE_MAX &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ --nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex != SIZE_MAX) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusedRangeSize;
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+ outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ outInfo.usedBytes += suballoc.size;
+ outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
+ outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ --nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ // There is free space from lastOffset to size.
+ if (lastOffset < size) {
+ const VkDeviceSize unusedRangeSize = size - lastOffset;
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusedRangeSize;
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
+ outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = size;
+ }
+ }
+ }
+
+ outInfo.unusedBytes = size - outInfo.usedBytes;
+}
+
+void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats &inoutStats) const {
+ const SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ const VkDeviceSize size = GetSize();
+ const size_t suballoc1stCount = suballocations1st.size();
+ const size_t suballoc2ndCount = suballocations2nd.size();
+
+ inoutStats.size += size;
+
+ VkDeviceSize lastOffset = 0;
+
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+ size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
+ while (lastOffset < freeSpace2ndTo1stEnd) {
+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+ while (nextAlloc2ndIndex < suballoc2ndCount &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex < suballoc2ndCount) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ inoutStats.unusedSize += unusedRangeSize;
+ ++inoutStats.unusedRangeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ ++inoutStats.allocationCount;
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < freeSpace2ndTo1stEnd) {
+ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+ const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+ inoutStats.unusedSize += unusedRangeSize;
+ ++inoutStats.unusedRangeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = freeSpace2ndTo1stEnd;
+ }
+ }
+ }
+
+ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+ const VkDeviceSize freeSpace1stTo2ndEnd =
+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+ while (lastOffset < freeSpace1stTo2ndEnd) {
+ // Find next non-null allocation or move nextAllocIndex to the end.
+ while (nextAlloc1stIndex < suballoc1stCount &&
+ suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc1stIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc1stIndex < suballoc1stCount) {
+ const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ inoutStats.unusedSize += unusedRangeSize;
+ ++inoutStats.unusedRangeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ ++inoutStats.allocationCount;
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc1stIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < freeSpace1stTo2ndEnd) {
+ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+ const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+ inoutStats.unusedSize += unusedRangeSize;
+ ++inoutStats.unusedRangeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = freeSpace1stTo2ndEnd;
+ }
+ }
+
+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+ while (lastOffset < size) {
+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+ while (nextAlloc2ndIndex != SIZE_MAX &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ --nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex != SIZE_MAX) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ inoutStats.unusedSize += unusedRangeSize;
+ ++inoutStats.unusedRangeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ ++inoutStats.allocationCount;
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ --nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < size) {
+ // There is free space from lastOffset to size.
+ const VkDeviceSize unusedRangeSize = size - lastOffset;
+ inoutStats.unusedSize += unusedRangeSize;
+ ++inoutStats.unusedRangeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = size;
+ }
+ }
+ }
+}
+
+#if VMA_STATS_STRING_ENABLED
+void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const {
+ const VkDeviceSize size = GetSize();
+ const SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ const size_t suballoc1stCount = suballocations1st.size();
+ const size_t suballoc2ndCount = suballocations2nd.size();
+
+ // FIRST PASS
+
+ size_t unusedRangeCount = 0;
+ VkDeviceSize usedBytes = 0;
+
+ VkDeviceSize lastOffset = 0;
+
+ size_t alloc2ndCount = 0;
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+ size_t nextAlloc2ndIndex = 0;
+ while (lastOffset < freeSpace2ndTo1stEnd) {
+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+ while (nextAlloc2ndIndex < suballoc2ndCount &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex < suballoc2ndCount) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ ++unusedRangeCount;
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ ++alloc2ndCount;
+ usedBytes += suballoc.size;
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < freeSpace2ndTo1stEnd) {
+ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+ ++unusedRangeCount;
+ }
+
+ // End of loop.
+ lastOffset = freeSpace2ndTo1stEnd;
+ }
+ }
+ }
+
+ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+ size_t alloc1stCount = 0;
+ const VkDeviceSize freeSpace1stTo2ndEnd =
+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+ while (lastOffset < freeSpace1stTo2ndEnd) {
+ // Find next non-null allocation or move nextAllocIndex to the end.
+ while (nextAlloc1stIndex < suballoc1stCount &&
+ suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc1stIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc1stIndex < suballoc1stCount) {
+ const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ ++unusedRangeCount;
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ ++alloc1stCount;
+ usedBytes += suballoc.size;
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc1stIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < size) {
+ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+ ++unusedRangeCount;
+ }
+
+ // End of loop.
+ lastOffset = freeSpace1stTo2ndEnd;
+ }
+ }
+
+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+ while (lastOffset < size) {
+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+ while (nextAlloc2ndIndex != SIZE_MAX &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ --nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex != SIZE_MAX) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ ++unusedRangeCount;
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ ++alloc2ndCount;
+ usedBytes += suballoc.size;
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ --nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < size) {
+ // There is free space from lastOffset to size.
+ ++unusedRangeCount;
+ }
+
+ // End of loop.
+ lastOffset = size;
+ }
+ }
+ }
+
+ const VkDeviceSize unusedBytes = size - usedBytes;
+ PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
+
+ // SECOND PASS
+ lastOffset = 0;
+
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+ size_t nextAlloc2ndIndex = 0;
+ while (lastOffset < freeSpace2ndTo1stEnd) {
+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+ while (nextAlloc2ndIndex < suballoc2ndCount &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex < suballoc2ndCount) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < freeSpace2ndTo1stEnd) {
+ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+ const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = freeSpace2ndTo1stEnd;
+ }
+ }
+ }
+
+ nextAlloc1stIndex = m_1stNullItemsBeginCount;
+ while (lastOffset < freeSpace1stTo2ndEnd) {
+ // Find next non-null allocation or move nextAllocIndex to the end.
+ while (nextAlloc1stIndex < suballoc1stCount &&
+ suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) {
+ ++nextAlloc1stIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc1stIndex < suballoc1stCount) {
+ const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ ++nextAlloc1stIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < freeSpace1stTo2ndEnd) {
+ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+ const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = freeSpace1stTo2ndEnd;
+ }
+ }
+
+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+ while (lastOffset < size) {
+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+ while (nextAlloc2ndIndex != SIZE_MAX &&
+ suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) {
+ --nextAlloc2ndIndex;
+ }
+
+ // Found non-null allocation.
+ if (nextAlloc2ndIndex != SIZE_MAX) {
+ const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex];
+
+ // 1. Process free space before this allocation.
+ if (lastOffset < suballoc.offset) {
+ // There is free space from lastOffset to suballoc.offset.
+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+ }
+
+ // 2. Process this allocation.
+ // There is allocation with suballoc.offset, suballoc.size.
+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
+
+ // 3. Prepare for next iteration.
+ lastOffset = suballoc.offset + suballoc.size;
+ --nextAlloc2ndIndex;
+ }
+ // We are at the end.
+ else {
+ if (lastOffset < size) {
+ // There is free space from lastOffset to size.
+ const VkDeviceSize unusedRangeSize = size - lastOffset;
+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+ }
+
+ // End of loop.
+ lastOffset = size;
+ }
+ }
+ }
+
+ PrintDetailedMap_End(json);
+}
+#endif // #if VMA_STATS_STRING_ENABLED
+
+bool VmaBlockMetadata_Linear::CreateAllocationRequest(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ bool upperAddress,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest) {
+ VMA_ASSERT(allocSize > 0);
+ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+ VMA_ASSERT(pAllocationRequest != VMA_NULL);
+ VMA_HEAVY_ASSERT(Validate());
+ return upperAddress ?
+ CreateAllocationRequest_UpperAddress(
+ currentFrameIndex, frameInUseCount, bufferImageGranularity,
+ allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
+ CreateAllocationRequest_LowerAddress(
+ currentFrameIndex, frameInUseCount, bufferImageGranularity,
+ allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
+}
+
+bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest) {
+ const VkDeviceSize size = GetSize();
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
+ return false;
+ }
+
+ // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
+ if (allocSize > size) {
+ return false;
+ }
+ VkDeviceSize resultBaseOffset = size - allocSize;
+ if (!suballocations2nd.empty()) {
+ const VmaSuballocation &lastSuballoc = suballocations2nd.back();
+ resultBaseOffset = lastSuballoc.offset - allocSize;
+ if (allocSize > lastSuballoc.offset) {
+ return false;
+ }
+ }
+
+ // Start from offset equal to end of free space.
+ VkDeviceSize resultOffset = resultBaseOffset;
+
+ // Apply VMA_DEBUG_MARGIN at the end.
+ if (VMA_DEBUG_MARGIN > 0) {
+ if (resultOffset < VMA_DEBUG_MARGIN) {
+ return false;
+ }
+ resultOffset -= VMA_DEBUG_MARGIN;
+ }
+
+ // Apply alignment.
+ resultOffset = VmaAlignDown(resultOffset, allocAlignment);
+
+ // Check next suballocations from 2nd for BufferImageGranularity conflicts.
+ // Make bigger alignment if necessary.
+ if (bufferImageGranularity > 1 && !suballocations2nd.empty()) {
+ bool bufferImageGranularityConflict = false;
+ for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--;) {
+ const VmaSuballocation &nextSuballoc = suballocations2nd[nextSuballocIndex];
+ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) {
+ bufferImageGranularityConflict = true;
+ break;
+ }
+ } else
+ // Already on previous page.
+ break;
+ }
+ if (bufferImageGranularityConflict) {
+ resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
+ }
+ }
+
+ // There is enough free space.
+ const VkDeviceSize endOf1st = !suballocations1st.empty() ?
+ suballocations1st.back().offset + suballocations1st.back().size :
+ 0;
+ if (endOf1st + VMA_DEBUG_MARGIN <= resultOffset) {
+ // Check previous suballocations for BufferImageGranularity conflicts.
+ // If conflict exists, allocation cannot be made here.
+ if (bufferImageGranularity > 1) {
+ for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--;) {
+ const VmaSuballocation &prevSuballoc = suballocations1st[prevSuballocIndex];
+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) {
+ return false;
+ }
+ } else {
+ // Already on next page.
+ break;
+ }
+ }
+ }
+
+ // All tests passed: Success.
+ pAllocationRequest->offset = resultOffset;
+ pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
+ pAllocationRequest->sumItemSize = 0;
+ // pAllocationRequest->item unused.
+ pAllocationRequest->itemsToMakeLostCount = 0;
+ pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
+ return true;
+ }
+
+ return false;
+}
+
+bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest) {
+ const VkDeviceSize size = GetSize();
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+
+ if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ // Try to allocate at the end of 1st vector.
+
+ VkDeviceSize resultBaseOffset = 0;
+ if (!suballocations1st.empty()) {
+ const VmaSuballocation &lastSuballoc = suballocations1st.back();
+ resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
+ }
+
+ // Start from offset equal to beginning of free space.
+ VkDeviceSize resultOffset = resultBaseOffset;
+
+ // Apply VMA_DEBUG_MARGIN at the beginning.
+ if (VMA_DEBUG_MARGIN > 0) {
+ resultOffset += VMA_DEBUG_MARGIN;
+ }
+
+ // Apply alignment.
+ resultOffset = VmaAlignUp(resultOffset, allocAlignment);
+
+ // Check previous suballocations for BufferImageGranularity conflicts.
+ // Make bigger alignment if necessary.
+ if (bufferImageGranularity > 1 && !suballocations1st.empty()) {
+ bool bufferImageGranularityConflict = false;
+ for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--;) {
+ const VmaSuballocation &prevSuballoc = suballocations1st[prevSuballocIndex];
+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) {
+ bufferImageGranularityConflict = true;
+ break;
+ }
+ } else
+ // Already on previous page.
+ break;
+ }
+ if (bufferImageGranularityConflict) {
+ resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+ }
+ }
+
+ const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
+ suballocations2nd.back().offset :
+ size;
+
+ // There is enough free space at the end after alignment.
+ if (resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd) {
+ // Check next suballocations for BufferImageGranularity conflicts.
+ // If conflict exists, allocation cannot be made here.
+ if (bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--;) {
+ const VmaSuballocation &nextSuballoc = suballocations2nd[nextSuballocIndex];
+ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) {
+ return false;
+ }
+ } else {
+ // Already on previous page.
+ break;
+ }
+ }
+ }
+
+ // All tests passed: Success.
+ pAllocationRequest->offset = resultOffset;
+ pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
+ pAllocationRequest->sumItemSize = 0;
+ // pAllocationRequest->item, customData unused.
+ pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
+ pAllocationRequest->itemsToMakeLostCount = 0;
+ return true;
+ }
+ }
+
+ // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
+ // beginning of 1st vector as the end of free space.
+ if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ VMA_ASSERT(!suballocations1st.empty());
+
+ VkDeviceSize resultBaseOffset = 0;
+ if (!suballocations2nd.empty()) {
+ const VmaSuballocation &lastSuballoc = suballocations2nd.back();
+ resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
+ }
+
+ // Start from offset equal to beginning of free space.
+ VkDeviceSize resultOffset = resultBaseOffset;
+
+ // Apply VMA_DEBUG_MARGIN at the beginning.
+ if (VMA_DEBUG_MARGIN > 0) {
+ resultOffset += VMA_DEBUG_MARGIN;
+ }
+
+ // Apply alignment.
+ resultOffset = VmaAlignUp(resultOffset, allocAlignment);
+
+ // Check previous suballocations for BufferImageGranularity conflicts.
+ // Make bigger alignment if necessary.
+ if (bufferImageGranularity > 1 && !suballocations2nd.empty()) {
+ bool bufferImageGranularityConflict = false;
+ for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--;) {
+ const VmaSuballocation &prevSuballoc = suballocations2nd[prevSuballocIndex];
+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) {
+ bufferImageGranularityConflict = true;
+ break;
+ }
+ } else
+ // Already on previous page.
+ break;
+ }
+ if (bufferImageGranularityConflict) {
+ resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+ }
+ }
+
+ pAllocationRequest->itemsToMakeLostCount = 0;
+ pAllocationRequest->sumItemSize = 0;
+ size_t index1st = m_1stNullItemsBeginCount;
+
+ if (canMakeOtherLost) {
+ while (index1st < suballocations1st.size() &&
+ resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset) {
+ // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
+ const VmaSuballocation &suballoc = suballocations1st[index1st];
+ if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) {
+ // No problem.
+ } else {
+ VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+ if (suballoc.hAllocation->CanBecomeLost() &&
+ suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) {
+ ++pAllocationRequest->itemsToMakeLostCount;
+ pAllocationRequest->sumItemSize += suballoc.size;
+ } else {
+ return false;
+ }
+ }
+ ++index1st;
+ }
+
+ // Check next suballocations for BufferImageGranularity conflicts.
+ // If conflict exists, we must mark more allocations lost or fail.
+ if (bufferImageGranularity > 1) {
+ while (index1st < suballocations1st.size()) {
+ const VmaSuballocation &suballoc = suballocations1st[index1st];
+ if (VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity)) {
+ if (suballoc.hAllocation != VK_NULL_HANDLE) {
+ // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
+ if (suballoc.hAllocation->CanBecomeLost() &&
+ suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) {
+ ++pAllocationRequest->itemsToMakeLostCount;
+ pAllocationRequest->sumItemSize += suballoc.size;
+ } else {
+ return false;
+ }
+ }
+ } else {
+ // Already on next page.
+ break;
+ }
+ ++index1st;
+ }
+ }
+
+ // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
+ if (index1st == suballocations1st.size() &&
+ resultOffset + allocSize + VMA_DEBUG_MARGIN > size) {
+ // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
+ VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
+ }
+ }
+
+ // There is enough free space at the end after alignment.
+ if ((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
+ (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)) {
+ // Check next suballocations for BufferImageGranularity conflicts.
+ // If conflict exists, allocation cannot be made here.
+ if (bufferImageGranularity > 1) {
+ for (size_t nextSuballocIndex = index1st;
+ nextSuballocIndex < suballocations1st.size();
+ nextSuballocIndex++) {
+ const VmaSuballocation &nextSuballoc = suballocations1st[nextSuballocIndex];
+ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) {
+ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) {
+ return false;
+ }
+ } else {
+ // Already on next page.
+ break;
+ }
+ }
+ }
+
+ // All tests passed: Success.
+ pAllocationRequest->offset = resultOffset;
+ pAllocationRequest->sumFreeSize =
+ (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size) - resultBaseOffset - pAllocationRequest->sumItemSize;
+ pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
+ // pAllocationRequest->item, customData unused.
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VmaAllocationRequest *pAllocationRequest) {
+ if (pAllocationRequest->itemsToMakeLostCount == 0) {
+ return true;
+ }
+
+ VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
+
+ // We always start from 1st.
+ SuballocationVectorType *suballocations = &AccessSuballocations1st();
+ size_t index = m_1stNullItemsBeginCount;
+ size_t madeLostCount = 0;
+ while (madeLostCount < pAllocationRequest->itemsToMakeLostCount) {
+ if (index == suballocations->size()) {
+ index = 0;
+ // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ suballocations = &AccessSuballocations2nd();
+ }
+ // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
+ // suballocations continues pointing at AccessSuballocations1st().
+ VMA_ASSERT(!suballocations->empty());
+ }
+ VmaSuballocation &suballoc = (*suballocations)[index];
+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) {
+ VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
+ VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
+ if (suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) {
+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ suballoc.hAllocation = VK_NULL_HANDLE;
+ m_SumFreeSize += suballoc.size;
+ if (suballocations == &AccessSuballocations1st()) {
+ ++m_1stNullItemsMiddleCount;
+ } else {
+ ++m_2ndNullItemsCount;
+ }
+ ++madeLostCount;
+ } else {
+ return false;
+ }
+ }
+ ++index;
+ }
+
+ CleanupAfterFree();
+ //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
+
+ return true;
+}
+
+uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) {
+ uint32_t lostAllocationCount = 0;
+
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) {
+ VmaSuballocation &suballoc = suballocations1st[i];
+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
+ suballoc.hAllocation->CanBecomeLost() &&
+ suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) {
+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ suballoc.hAllocation = VK_NULL_HANDLE;
+ ++m_1stNullItemsMiddleCount;
+ m_SumFreeSize += suballoc.size;
+ ++lostAllocationCount;
+ }
+ }
+
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) {
+ VmaSuballocation &suballoc = suballocations2nd[i];
+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
+ suballoc.hAllocation->CanBecomeLost() &&
+ suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) {
+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ suballoc.hAllocation = VK_NULL_HANDLE;
+ ++m_2ndNullItemsCount;
+ m_SumFreeSize += suballoc.size;
+ ++lostAllocationCount;
+ }
+ }
+
+ if (lostAllocationCount) {
+ CleanupAfterFree();
+ }
+
+ return lostAllocationCount;
+}
+
+VkResult VmaBlockMetadata_Linear::CheckCorruption(const void *pBlockData) {
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) {
+ const VmaSuballocation &suballoc = suballocations1st[i];
+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) {
+ if (!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ }
+ }
+
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) {
+ const VmaSuballocation &suballoc = suballocations2nd[i];
+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) {
+ if (!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+void VmaBlockMetadata_Linear::Alloc(
+ const VmaAllocationRequest &request,
+ VmaSuballocationType type,
+ VkDeviceSize allocSize,
+ VmaAllocation hAllocation) {
+ const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
+
+ switch (request.type) {
+ case VmaAllocationRequestType::UpperAddress: {
+ VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
+ "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+ suballocations2nd.push_back(newSuballoc);
+ m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
+ } break;
+ case VmaAllocationRequestType::EndOf1st: {
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+
+ VMA_ASSERT(suballocations1st.empty() ||
+ request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
+ // Check if it fits before the end of the block.
+ VMA_ASSERT(request.offset + allocSize <= GetSize());
+
+ suballocations1st.push_back(newSuballoc);
+ } break;
+ case VmaAllocationRequestType::EndOf2nd: {
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
+ VMA_ASSERT(!suballocations1st.empty() &&
+ request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+
+ switch (m_2ndVectorMode) {
+ case SECOND_VECTOR_EMPTY:
+ // First allocation from second part ring buffer.
+ VMA_ASSERT(suballocations2nd.empty());
+ m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
+ break;
+ case SECOND_VECTOR_RING_BUFFER:
+ // 2-part ring buffer is already started.
+ VMA_ASSERT(!suballocations2nd.empty());
+ break;
+ case SECOND_VECTOR_DOUBLE_STACK:
+ VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
+ break;
+ default:
+ VMA_ASSERT(0);
+ }
+
+ suballocations2nd.push_back(newSuballoc);
+ } break;
+ default:
+ VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
+ }
+
+ m_SumFreeSize -= newSuballoc.size;
+}
+
+void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation) {
+ FreeAtOffset(allocation->GetOffset());
+}
+
+void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) {
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+
+ if (!suballocations1st.empty()) {
+ // First allocation: Mark it as next empty at the beginning.
+ VmaSuballocation &firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
+ if (firstSuballoc.offset == offset) {
+ firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+ firstSuballoc.hAllocation = VK_NULL_HANDLE;
+ m_SumFreeSize += firstSuballoc.size;
+ ++m_1stNullItemsBeginCount;
+ CleanupAfterFree();
+ return;
+ }
+ }
+
+ // Last allocation in 2-part ring buffer or top of upper stack (same logic).
+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) {
+ VmaSuballocation &lastSuballoc = suballocations2nd.back();
+ if (lastSuballoc.offset == offset) {
+ m_SumFreeSize += lastSuballoc.size;
+ suballocations2nd.pop_back();
+ CleanupAfterFree();
+ return;
+ }
+ }
+ // Last allocation in 1st vector.
+ else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) {
+ VmaSuballocation &lastSuballoc = suballocations1st.back();
+ if (lastSuballoc.offset == offset) {
+ m_SumFreeSize += lastSuballoc.size;
+ suballocations1st.pop_back();
+ CleanupAfterFree();
+ return;
+ }
+ }
+
+ // Item from the middle of 1st vector.
+ {
+ VmaSuballocation refSuballoc;
+ refSuballoc.offset = offset;
+ // Rest of members stays uninitialized intentionally for better performance.
+ SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
+ suballocations1st.begin() + m_1stNullItemsBeginCount,
+ suballocations1st.end(),
+ refSuballoc);
+ if (it != suballocations1st.end()) {
+ it->type = VMA_SUBALLOCATION_TYPE_FREE;
+ it->hAllocation = VK_NULL_HANDLE;
+ ++m_1stNullItemsMiddleCount;
+ m_SumFreeSize += it->size;
+ CleanupAfterFree();
+ return;
+ }
+ }
+
+ if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) {
+ // Item from the middle of 2nd vector.
+ VmaSuballocation refSuballoc;
+ refSuballoc.offset = offset;
+ // Rest of members stays uninitialized intentionally for better performance.
+ SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
+ VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
+ VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
+ if (it != suballocations2nd.end()) {
+ it->type = VMA_SUBALLOCATION_TYPE_FREE;
+ it->hAllocation = VK_NULL_HANDLE;
+ ++m_2ndNullItemsCount;
+ m_SumFreeSize += it->size;
+ CleanupAfterFree();
+ return;
+ }
+ }
+
+ VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
+}
+
+bool VmaBlockMetadata_Linear::ShouldCompact1st() const {
+ const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+ const size_t suballocCount = AccessSuballocations1st().size();
+ return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
+}
+
+void VmaBlockMetadata_Linear::CleanupAfterFree() {
+ SuballocationVectorType &suballocations1st = AccessSuballocations1st();
+ SuballocationVectorType &suballocations2nd = AccessSuballocations2nd();
+
+ if (IsEmpty()) {
+ suballocations1st.clear();
+ suballocations2nd.clear();
+ m_1stNullItemsBeginCount = 0;
+ m_1stNullItemsMiddleCount = 0;
+ m_2ndNullItemsCount = 0;
+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+ } else {
+ const size_t suballoc1stCount = suballocations1st.size();
+ const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+ VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
+
+ // Find more null items at the beginning of 1st vector.
+ while (m_1stNullItemsBeginCount < suballoc1stCount &&
+ suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) {
+ ++m_1stNullItemsBeginCount;
+ --m_1stNullItemsMiddleCount;
+ }
+
+ // Find more null items at the end of 1st vector.
+ while (m_1stNullItemsMiddleCount > 0 &&
+ suballocations1st.back().hAllocation == VK_NULL_HANDLE) {
+ --m_1stNullItemsMiddleCount;
+ suballocations1st.pop_back();
+ }
+
+ // Find more null items at the end of 2nd vector.
+ while (m_2ndNullItemsCount > 0 &&
+ suballocations2nd.back().hAllocation == VK_NULL_HANDLE) {
+ --m_2ndNullItemsCount;
+ suballocations2nd.pop_back();
+ }
+
+ // Find more null items at the beginning of 2nd vector.
+ while (m_2ndNullItemsCount > 0 &&
+ suballocations2nd[0].hAllocation == VK_NULL_HANDLE) {
+ --m_2ndNullItemsCount;
+ VmaVectorRemove(suballocations2nd, 0);
+ }
+
+ if (ShouldCompact1st()) {
+ const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
+ size_t srcIndex = m_1stNullItemsBeginCount;
+ for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) {
+ while (suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE) {
+ ++srcIndex;
+ }
+ if (dstIndex != srcIndex) {
+ suballocations1st[dstIndex] = suballocations1st[srcIndex];
+ }
+ ++srcIndex;
+ }
+ suballocations1st.resize(nonNullItemCount);
+ m_1stNullItemsBeginCount = 0;
+ m_1stNullItemsMiddleCount = 0;
+ }
+
+ // 2nd vector became empty.
+ if (suballocations2nd.empty()) {
+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+ }
+
+ // 1st vector became empty.
+ if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) {
+ suballocations1st.clear();
+ m_1stNullItemsBeginCount = 0;
+
+ if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) {
+ // Swap 1st with 2nd. Now 2nd is empty.
+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+ m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
+ while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
+ suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) {
+ ++m_1stNullItemsBeginCount;
+ --m_1stNullItemsMiddleCount;
+ }
+ m_2ndNullItemsCount = 0;
+ m_1stVectorIndex ^= 1;
+ }
+ }
+ }
+
+ VMA_HEAVY_ASSERT(Validate());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaBlockMetadata_Buddy
+
+VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
+ VmaBlockMetadata(hAllocator),
+ m_Root(VMA_NULL),
+ m_AllocationCount(0),
+ m_FreeCount(1),
+ m_SumFreeSize(0) {
+ memset(m_FreeList, 0, sizeof(m_FreeList));
+}
+
+VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() {
+ DeleteNode(m_Root);
+}
+
+void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) {
+ VmaBlockMetadata::Init(size);
+
+ m_UsableSize = VmaPrevPow2(size);
+ m_SumFreeSize = m_UsableSize;
+
+ // Calculate m_LevelCount.
+ m_LevelCount = 1;
+ while (m_LevelCount < MAX_LEVELS &&
+ LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE) {
+ ++m_LevelCount;
+ }
+
+ Node *rootNode = vma_new(GetAllocationCallbacks(), Node)();
+ rootNode->offset = 0;
+ rootNode->type = Node::TYPE_FREE;
+ rootNode->parent = VMA_NULL;
+ rootNode->buddy = VMA_NULL;
+
+ m_Root = rootNode;
+ AddToFreeListFront(0, rootNode);
+}
+
+bool VmaBlockMetadata_Buddy::Validate() const {
+ // Validate tree.
+ ValidationContext ctx;
+ if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) {
+ VMA_VALIDATE(false && "ValidateNode failed.");
+ }
+ VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
+ VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
+
+ // Validate free node lists.
+ for (uint32_t level = 0; level < m_LevelCount; ++level) {
+ VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
+ m_FreeList[level].front->free.prev == VMA_NULL);
+
+ for (Node *node = m_FreeList[level].front;
+ node != VMA_NULL;
+ node = node->free.next) {
+ VMA_VALIDATE(node->type == Node::TYPE_FREE);
+
+ if (node->free.next == VMA_NULL) {
+ VMA_VALIDATE(m_FreeList[level].back == node);
+ } else {
+ VMA_VALIDATE(node->free.next->free.prev == node);
+ }
+ }
+ }
+
+ // Validate that free lists ar higher levels are empty.
+ for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) {
+ VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
+ }
+
+ return true;
+}
+
+VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const {
+ for (uint32_t level = 0; level < m_LevelCount; ++level) {
+ if (m_FreeList[level].front != VMA_NULL) {
+ return LevelToNodeSize(level);
+ }
+ }
+ return 0;
+}
+
+void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo &outInfo) const {
+ const VkDeviceSize unusableSize = GetUnusableSize();
+
+ outInfo.blockCount = 1;
+
+ outInfo.allocationCount = outInfo.unusedRangeCount = 0;
+ outInfo.usedBytes = outInfo.unusedBytes = 0;
+
+ outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
+ outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
+ outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
+
+ CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
+
+ if (unusableSize > 0) {
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusableSize;
+ outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
+ outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
+ }
+}
+
+void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats &inoutStats) const {
+ const VkDeviceSize unusableSize = GetUnusableSize();
+
+ inoutStats.size += GetSize();
+ inoutStats.unusedSize += m_SumFreeSize + unusableSize;
+ inoutStats.allocationCount += m_AllocationCount;
+ inoutStats.unusedRangeCount += m_FreeCount;
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
+
+ if (unusableSize > 0) {
+ ++inoutStats.unusedRangeCount;
+ // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
+ }
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter &json) const {
+ // TODO optimize
+ VmaStatInfo stat;
+ CalcAllocationStatInfo(stat);
+
+ PrintDetailedMap_Begin(
+ json,
+ stat.unusedBytes,
+ stat.allocationCount,
+ stat.unusedRangeCount);
+
+ PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
+
+ const VkDeviceSize unusableSize = GetUnusableSize();
+ if (unusableSize > 0) {
+ PrintDetailedMap_UnusedRange(json,
+ m_UsableSize, // offset
+ unusableSize); // size
+ }
+
+ PrintDetailedMap_End(json);
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VkDeviceSize bufferImageGranularity,
+ VkDeviceSize allocSize,
+ VkDeviceSize allocAlignment,
+ bool upperAddress,
+ VmaSuballocationType allocType,
+ bool canMakeOtherLost,
+ uint32_t strategy,
+ VmaAllocationRequest *pAllocationRequest) {
+ VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
+
+ // Simple way to respect bufferImageGranularity. May be optimized some day.
+ // Whenever it might be an OPTIMAL image...
+ if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
+ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) {
+ allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
+ allocSize = VMA_MAX(allocSize, bufferImageGranularity);
+ }
+
+ if (allocSize > m_UsableSize) {
+ return false;
+ }
+
+ const uint32_t targetLevel = AllocSizeToLevel(allocSize);
+ for (uint32_t level = targetLevel + 1; level--;) {
+ for (Node *freeNode = m_FreeList[level].front;
+ freeNode != VMA_NULL;
+ freeNode = freeNode->free.next) {
+ if (freeNode->offset % allocAlignment == 0) {
+ pAllocationRequest->type = VmaAllocationRequestType::Normal;
+ pAllocationRequest->offset = freeNode->offset;
+ pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
+ pAllocationRequest->sumItemSize = 0;
+ pAllocationRequest->itemsToMakeLostCount = 0;
+ pAllocationRequest->customData = (void *)(uintptr_t)level;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
+ uint32_t currentFrameIndex,
+ uint32_t frameInUseCount,
+ VmaAllocationRequest *pAllocationRequest) {
+ /*
+ Lost allocations are not supported in buddy allocator at the moment.
+ Support might be added in the future.
+ */
+ return pAllocationRequest->itemsToMakeLostCount == 0;
+}
+
+uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) {
+ /*
+ Lost allocations are not supported in buddy allocator at the moment.
+ Support might be added in the future.
+ */
+ return 0;
+}
+
+void VmaBlockMetadata_Buddy::Alloc(
+ const VmaAllocationRequest &request,
+ VmaSuballocationType type,
+ VkDeviceSize allocSize,
+ VmaAllocation hAllocation) {
+ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+
+ const uint32_t targetLevel = AllocSizeToLevel(allocSize);
+ uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
+
+ Node *currNode = m_FreeList[currLevel].front;
+ VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+ while (currNode->offset != request.offset) {
+ currNode = currNode->free.next;
+ VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+ }
+
+ // Go down, splitting free nodes.
+ while (currLevel < targetLevel) {
+ // currNode is already first free node at currLevel.
+ // Remove it from list of free nodes at this currLevel.
+ RemoveFromFreeList(currLevel, currNode);
+
+ const uint32_t childrenLevel = currLevel + 1;
+
+ // Create two free sub-nodes.
+ Node *leftChild = vma_new(GetAllocationCallbacks(), Node)();
+ Node *rightChild = vma_new(GetAllocationCallbacks(), Node)();
+
+ leftChild->offset = currNode->offset;
+ leftChild->type = Node::TYPE_FREE;
+ leftChild->parent = currNode;
+ leftChild->buddy = rightChild;
+
+ rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
+ rightChild->type = Node::TYPE_FREE;
+ rightChild->parent = currNode;
+ rightChild->buddy = leftChild;
+
+ // Convert current currNode to split type.
+ currNode->type = Node::TYPE_SPLIT;
+ currNode->split.leftChild = leftChild;
+
+ // Add child nodes to free list. Order is important!
+ AddToFreeListFront(childrenLevel, rightChild);
+ AddToFreeListFront(childrenLevel, leftChild);
+
+ ++m_FreeCount;
+ //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
+ ++currLevel;
+ currNode = m_FreeList[currLevel].front;
+
+ /*
+ We can be sure that currNode, as left child of node previously split,
+ also fullfills the alignment requirement.
+ */
+ }
+
+ // Remove from free list.
+ VMA_ASSERT(currLevel == targetLevel &&
+ currNode != VMA_NULL &&
+ currNode->type == Node::TYPE_FREE);
+ RemoveFromFreeList(currLevel, currNode);
+
+ // Convert to allocation node.
+ currNode->type = Node::TYPE_ALLOCATION;
+ currNode->allocation.alloc = hAllocation;
+
+ ++m_AllocationCount;
+ --m_FreeCount;
+ m_SumFreeSize -= allocSize;
+}
+
+void VmaBlockMetadata_Buddy::DeleteNode(Node *node) {
+ if (node->type == Node::TYPE_SPLIT) {
+ DeleteNode(node->split.leftChild->buddy);
+ DeleteNode(node->split.leftChild);
+ }
+
+ vma_delete(GetAllocationCallbacks(), node);
+}
+
+bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext &ctx, const Node *parent, const Node *curr, uint32_t level, VkDeviceSize levelNodeSize) const {
+ VMA_VALIDATE(level < m_LevelCount);
+ VMA_VALIDATE(curr->parent == parent);
+ VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
+ VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
+ switch (curr->type) {
+ case Node::TYPE_FREE:
+ // curr->free.prev, next are validated separately.
+ ctx.calculatedSumFreeSize += levelNodeSize;
+ ++ctx.calculatedFreeCount;
+ break;
+ case Node::TYPE_ALLOCATION:
+ ++ctx.calculatedAllocationCount;
+ ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
+ VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
+ break;
+ case Node::TYPE_SPLIT: {
+ const uint32_t childrenLevel = level + 1;
+ const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
+ const Node *const leftChild = curr->split.leftChild;
+ VMA_VALIDATE(leftChild != VMA_NULL);
+ VMA_VALIDATE(leftChild->offset == curr->offset);
+ if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) {
+ VMA_VALIDATE(false && "ValidateNode for left child failed.");
+ }
+ const Node *const rightChild = leftChild->buddy;
+ VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
+ if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) {
+ VMA_VALIDATE(false && "ValidateNode for right child failed.");
+ }
+ } break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const {
+ // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
+ uint32_t level = 0;
+ VkDeviceSize currLevelNodeSize = m_UsableSize;
+ VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
+ while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) {
+ ++level;
+ currLevelNodeSize = nextLevelNodeSize;
+ nextLevelNodeSize = currLevelNodeSize >> 1;
+ }
+ return level;
+}
+
+void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset) {
+ // Find node and level.
+ Node *node = m_Root;
+ VkDeviceSize nodeOffset = 0;
+ uint32_t level = 0;
+ VkDeviceSize levelNodeSize = LevelToNodeSize(0);
+ while (node->type == Node::TYPE_SPLIT) {
+ const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
+ if (offset < nodeOffset + nextLevelSize) {
+ node = node->split.leftChild;
+ } else {
+ node = node->split.leftChild->buddy;
+ nodeOffset += nextLevelSize;
+ }
+ ++level;
+ levelNodeSize = nextLevelSize;
+ }
+
+ VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
+ VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
+
+ ++m_FreeCount;
+ --m_AllocationCount;
+ m_SumFreeSize += alloc->GetSize();
+
+ node->type = Node::TYPE_FREE;
+
+ // Join free nodes if possible.
+ while (level > 0 && node->buddy->type == Node::TYPE_FREE) {
+ RemoveFromFreeList(level, node->buddy);
+ Node *const parent = node->parent;
+
+ vma_delete(GetAllocationCallbacks(), node->buddy);
+ vma_delete(GetAllocationCallbacks(), node);
+ parent->type = Node::TYPE_FREE;
+
+ node = parent;
+ --level;
+ //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
+ --m_FreeCount;
+ }
+
+ AddToFreeListFront(level, node);
+}
+
+void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo &outInfo, const Node *node, VkDeviceSize levelNodeSize) const {
+ switch (node->type) {
+ case Node::TYPE_FREE:
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += levelNodeSize;
+ outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
+ outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
+ break;
+ case Node::TYPE_ALLOCATION: {
+ const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
+ ++outInfo.allocationCount;
+ outInfo.usedBytes += allocSize;
+ outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
+ outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
+
+ const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
+ if (unusedRangeSize > 0) {
+ ++outInfo.unusedRangeCount;
+ outInfo.unusedBytes += unusedRangeSize;
+ outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
+ outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
+ }
+ } break;
+ case Node::TYPE_SPLIT: {
+ const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+ const Node *const leftChild = node->split.leftChild;
+ CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
+ const Node *const rightChild = leftChild->buddy;
+ CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
+ } break;
+ default:
+ VMA_ASSERT(0);
+ }
+}
+
+void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node *node) {
+ VMA_ASSERT(node->type == Node::TYPE_FREE);
+
+ // List is empty.
+ Node *const frontNode = m_FreeList[level].front;
+ if (frontNode == VMA_NULL) {
+ VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
+ node->free.prev = node->free.next = VMA_NULL;
+ m_FreeList[level].front = m_FreeList[level].back = node;
+ } else {
+ VMA_ASSERT(frontNode->free.prev == VMA_NULL);
+ node->free.prev = VMA_NULL;
+ node->free.next = frontNode;
+ frontNode->free.prev = node;
+ m_FreeList[level].front = node;
+ }
+}
+
+void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node *node) {
+ VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
+
+ // It is at the front.
+ if (node->free.prev == VMA_NULL) {
+ VMA_ASSERT(m_FreeList[level].front == node);
+ m_FreeList[level].front = node->free.next;
+ } else {
+ Node *const prevFreeNode = node->free.prev;
+ VMA_ASSERT(prevFreeNode->free.next == node);
+ prevFreeNode->free.next = node->free.next;
+ }
+
+ // It is at the back.
+ if (node->free.next == VMA_NULL) {
+ VMA_ASSERT(m_FreeList[level].back == node);
+ m_FreeList[level].back = node->free.prev;
+ } else {
+ Node *const nextFreeNode = node->free.next;
+ VMA_ASSERT(nextFreeNode->free.prev == node);
+ nextFreeNode->free.prev = node->free.prev;
+ }
+}
+
+#if VMA_STATS_STRING_ENABLED
+void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter &json, const Node *node, VkDeviceSize levelNodeSize) const {
+ switch (node->type) {
+ case Node::TYPE_FREE:
+ PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
+ break;
+ case Node::TYPE_ALLOCATION: {
+ PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
+ const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
+ if (allocSize < levelNodeSize) {
+ PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
+ }
+ } break;
+ case Node::TYPE_SPLIT: {
+ const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+ const Node *const leftChild = node->split.leftChild;
+ PrintDetailedMapNode(json, leftChild, childrenNodeSize);
+ const Node *const rightChild = leftChild->buddy;
+ PrintDetailedMapNode(json, rightChild, childrenNodeSize);
+ } break;
+ default:
+ VMA_ASSERT(0);
+ }
+}
+#endif // #if VMA_STATS_STRING_ENABLED
+
+////////////////////////////////////////////////////////////////////////////////
+// class VmaDeviceMemoryBlock
+
+VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
+ m_pMetadata(VMA_NULL),
+ m_MemoryTypeIndex(UINT32_MAX),
+ m_Id(0),
+ m_hMemory(VK_NULL_HANDLE),
+ m_MapCount(0),
+ m_pMappedData(VMA_NULL) {
+}
+
+void VmaDeviceMemoryBlock::Init(
+ VmaAllocator hAllocator,
+ VmaPool hParentPool,
+ uint32_t newMemoryTypeIndex,
+ VkDeviceMemory newMemory,
+ VkDeviceSize newSize,
+ uint32_t id,
+ uint32_t algorithm) {
+ VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+
+ m_hParentPool = hParentPool;
+ m_MemoryTypeIndex = newMemoryTypeIndex;
+ m_Id = id;
+ m_hMemory = newMemory;
+
+ switch (algorithm) {
+ case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
+ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
+ break;
+ case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT:
+ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
+ break;
+ default:
+ VMA_ASSERT(0);
+ // Fall-through.
+ case 0:
+ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
+ }
+ m_pMetadata->Init(newSize);
+}
+
+void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) {
+ // This is the most important assert in the entire library.
+ // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
+ VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
+
+ VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
+ allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
+ m_hMemory = VK_NULL_HANDLE;
+
+ vma_delete(allocator, m_pMetadata);
+ m_pMetadata = VMA_NULL;
+}
+
+bool VmaDeviceMemoryBlock::Validate() const {
+ VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
+ (m_pMetadata->GetSize() != 0));
+
+ return m_pMetadata->Validate();
+}
+
+VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) {
+ void *pData = nullptr;
+ VkResult res = Map(hAllocator, 1, &pData);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+
+ res = m_pMetadata->CheckCorruption(pData);
+
+ Unmap(hAllocator, 1);
+
+ return res;
+}
+
+VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void **ppData) {
+ if (count == 0) {
+ return VK_SUCCESS;
+ }
+
+ VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+ if (m_MapCount != 0) {
+ m_MapCount += count;
+ VMA_ASSERT(m_pMappedData != VMA_NULL);
+ if (ppData != VMA_NULL) {
+ *ppData = m_pMappedData;
+ }
+ return VK_SUCCESS;
+ } else {
+ VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+ hAllocator->m_hDevice,
+ m_hMemory,
+ 0, // offset
+ VK_WHOLE_SIZE,
+ 0, // flags
+ &m_pMappedData);
+ if (result == VK_SUCCESS) {
+ if (ppData != VMA_NULL) {
+ *ppData = m_pMappedData;
+ }
+ m_MapCount = count;
+ }
+ return result;
+ }
+}
+
+void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) {
+ if (count == 0) {
+ return;
+ }
+
+ VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+ if (m_MapCount >= count) {
+ m_MapCount -= count;
+ if (m_MapCount == 0) {
+ m_pMappedData = VMA_NULL;
+ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
+ }
+ } else {
+ VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
+ }
+}
+
+VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) {
+ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+ VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
+
+ void *pData;
+ VkResult res = Map(hAllocator, 1, &pData);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+
+ VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
+ VmaWriteMagicValue(pData, allocOffset + allocSize);
+
+ Unmap(hAllocator, 1);
+
+ return VK_SUCCESS;
+}
+
+VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) {
+ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+ VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
+
+ void *pData;
+ VkResult res = Map(hAllocator, 1, &pData);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+
+ if (!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
+ } else if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) {
+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
+ }
+
+ Unmap(hAllocator, 1);
+
+ return VK_SUCCESS;
+}
+
+VkResult VmaDeviceMemoryBlock::BindBufferMemory(
+ const VmaAllocator hAllocator,
+ const VmaAllocation hAllocation,
+ VkBuffer hBuffer) {
+ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+ hAllocation->GetBlock() == this);
+ // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
+ VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+ return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
+ hAllocator->m_hDevice,
+ hBuffer,
+ m_hMemory,
+ hAllocation->GetOffset());
+}
+
+VkResult VmaDeviceMemoryBlock::BindImageMemory(
+ const VmaAllocator hAllocator,
+ const VmaAllocation hAllocation,
+ VkImage hImage) {
+ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+ hAllocation->GetBlock() == this);
+ // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
+ VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
+ return hAllocator->GetVulkanFunctions().vkBindImageMemory(
+ hAllocator->m_hDevice,
+ hImage,
+ m_hMemory,
+ hAllocation->GetOffset());
+}
+
+static void InitStatInfo(VmaStatInfo &outInfo) {
+ memset(&outInfo, 0, sizeof(outInfo));
+ outInfo.allocationSizeMin = UINT64_MAX;
+ outInfo.unusedRangeSizeMin = UINT64_MAX;
+}
+
+// Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
+static void VmaAddStatInfo(VmaStatInfo &inoutInfo, const VmaStatInfo &srcInfo) {
+ inoutInfo.blockCount += srcInfo.blockCount;
+ inoutInfo.allocationCount += srcInfo.allocationCount;
+ inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
+ inoutInfo.usedBytes += srcInfo.usedBytes;
+ inoutInfo.unusedBytes += srcInfo.unusedBytes;
+ inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
+ inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
+ inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
+ inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
+}
+
+static void VmaPostprocessCalcStatInfo(VmaStatInfo &inoutInfo) {
+ inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
+ VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) :
+ 0;
+ inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
+ VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) :
+ 0;
+}
+
+VmaPool_T::VmaPool_T(
+ VmaAllocator hAllocator,
+ const VmaPoolCreateInfo &createInfo,
+ VkDeviceSize preferredBlockSize) :
+ m_BlockVector(
+ hAllocator,
+ this, // hParentPool
+ createInfo.memoryTypeIndex,
+ createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
+ createInfo.minBlockCount,
+ createInfo.maxBlockCount,
+ (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
+ createInfo.frameInUseCount,
+ true, // isCustomPool
+ createInfo.blockSize != 0, // explicitBlockSize
+ createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
+ m_Id(0) {
+}
+
+VmaPool_T::~VmaPool_T() {
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+VmaBlockVector::VmaBlockVector(
+ VmaAllocator hAllocator,
+ VmaPool hParentPool,
+ uint32_t memoryTypeIndex,
+ VkDeviceSize preferredBlockSize,
+ size_t minBlockCount,
+ size_t maxBlockCount,
+ VkDeviceSize bufferImageGranularity,
+ uint32_t frameInUseCount,
+ bool isCustomPool,
+ bool explicitBlockSize,
+ uint32_t algorithm) :
+ m_hAllocator(hAllocator),
+ m_hParentPool(hParentPool),
+ m_MemoryTypeIndex(memoryTypeIndex),
+ m_PreferredBlockSize(preferredBlockSize),
+ m_MinBlockCount(minBlockCount),
+ m_MaxBlockCount(maxBlockCount),
+ m_BufferImageGranularity(bufferImageGranularity),
+ m_FrameInUseCount(frameInUseCount),
+ m_IsCustomPool(isCustomPool),
+ m_ExplicitBlockSize(explicitBlockSize),
+ m_Algorithm(algorithm),
+ m_HasEmptyBlock(false),
+ m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock *>(hAllocator->GetAllocationCallbacks())),
+ m_NextBlockId(0) {
+}
+
+VmaBlockVector::~VmaBlockVector() {
+ for (size_t i = m_Blocks.size(); i--;) {
+ m_Blocks[i]->Destroy(m_hAllocator);
+ vma_delete(m_hAllocator, m_Blocks[i]);
+ }
+}
+
+VkResult VmaBlockVector::CreateMinBlocks() {
+ for (size_t i = 0; i < m_MinBlockCount; ++i) {
+ VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+ }
+ return VK_SUCCESS;
+}
+
+void VmaBlockVector::GetPoolStats(VmaPoolStats *pStats) {
+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+ const size_t blockCount = m_Blocks.size();
+
+ pStats->size = 0;
+ pStats->unusedSize = 0;
+ pStats->allocationCount = 0;
+ pStats->unusedRangeCount = 0;
+ pStats->unusedRangeSizeMax = 0;
+ pStats->blockCount = blockCount;
+
+ for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) {
+ const VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pBlock);
+ VMA_HEAVY_ASSERT(pBlock->Validate());
+ pBlock->m_pMetadata->AddPoolStats(*pStats);
+ }
+}
+
+bool VmaBlockVector::IsCorruptionDetectionEnabled() const {
+ const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
+ (VMA_DEBUG_MARGIN > 0) &&
+ (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
+ (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
+}
+
+static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+
+VkResult VmaBlockVector::Allocate(
+ uint32_t currentFrameIndex,
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaSuballocationType suballocType,
+ size_t allocationCount,
+ VmaAllocation *pAllocations) {
+ size_t allocIndex;
+ VkResult res = VK_SUCCESS;
+
+ if (IsCorruptionDetectionEnabled()) {
+ size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+ alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+ }
+
+ {
+ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+ for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) {
+ res = AllocatePage(
+ currentFrameIndex,
+ size,
+ alignment,
+ createInfo,
+ suballocType,
+ pAllocations + allocIndex);
+ if (res != VK_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ if (res != VK_SUCCESS) {
+ // Free all already created allocations.
+ while (allocIndex--) {
+ Free(pAllocations[allocIndex]);
+ }
+ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+ }
+
+ return res;
+}
+
+VkResult VmaBlockVector::AllocatePage(
+ uint32_t currentFrameIndex,
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaSuballocationType suballocType,
+ VmaAllocation *pAllocation) {
+ const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+ bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
+ const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
+ const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
+ const bool canCreateNewBlock =
+ ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
+ (m_Blocks.size() < m_MaxBlockCount);
+ uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
+
+ // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
+ // Which in turn is available only when maxBlockCount = 1.
+ if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1) {
+ canMakeOtherLost = false;
+ }
+
+ // Upper address can only be used with linear allocator and within single memory block.
+ if (isUpperAddress &&
+ (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) {
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ // Validate strategy.
+ switch (strategy) {
+ case 0:
+ strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT;
+ break;
+ case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT:
+ case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT:
+ case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT:
+ break;
+ default:
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ // Early reject: requested allocation size is larger that maximum block size for this block vector.
+ if (size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize) {
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+
+ /*
+ Under certain condition, this whole section can be skipped for optimization, so
+ we move on directly to trying to allocate with canMakeOtherLost. That's the case
+ e.g. for custom pools with linear algorithm.
+ */
+ if (!canMakeOtherLost || canCreateNewBlock) {
+ // 1. Search existing allocations. Try to allocate without making other allocations lost.
+ VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
+ allocFlagsCopy &= ~VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT;
+
+ if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) {
+ // Use only last block.
+ if (!m_Blocks.empty()) {
+ VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks.back();
+ VMA_ASSERT(pCurrBlock);
+ VkResult res = AllocateFromBlock(
+ pCurrBlock,
+ currentFrameIndex,
+ size,
+ alignment,
+ allocFlagsCopy,
+ createInfo.pUserData,
+ suballocType,
+ strategy,
+ pAllocation);
+ if (res == VK_SUCCESS) {
+ VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
+ return VK_SUCCESS;
+ }
+ }
+ } else {
+ if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) {
+ // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+ for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) {
+ VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pCurrBlock);
+ VkResult res = AllocateFromBlock(
+ pCurrBlock,
+ currentFrameIndex,
+ size,
+ alignment,
+ allocFlagsCopy,
+ createInfo.pUserData,
+ suballocType,
+ strategy,
+ pAllocation);
+ if (res == VK_SUCCESS) {
+ VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
+ return VK_SUCCESS;
+ }
+ }
+ } else // WORST_FIT, FIRST_FIT
+ {
+ // Backward order in m_Blocks - prefer blocks with largest amount of free space.
+ for (size_t blockIndex = m_Blocks.size(); blockIndex--;) {
+ VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pCurrBlock);
+ VkResult res = AllocateFromBlock(
+ pCurrBlock,
+ currentFrameIndex,
+ size,
+ alignment,
+ allocFlagsCopy,
+ createInfo.pUserData,
+ suballocType,
+ strategy,
+ pAllocation);
+ if (res == VK_SUCCESS) {
+ VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
+ return VK_SUCCESS;
+ }
+ }
+ }
+ }
+
+ // 2. Try to create new block.
+ if (canCreateNewBlock) {
+ // Calculate optimal size for new block.
+ VkDeviceSize newBlockSize = m_PreferredBlockSize;
+ uint32_t newBlockSizeShift = 0;
+ const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
+
+ if (!m_ExplicitBlockSize) {
+ // Allocate 1/8, 1/4, 1/2 as first blocks.
+ const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
+ for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) {
+ const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+ if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) {
+ newBlockSize = smallerNewBlockSize;
+ ++newBlockSizeShift;
+ } else {
+ break;
+ }
+ }
+ }
+
+ size_t newBlockIndex = 0;
+ VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
+ // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
+ if (!m_ExplicitBlockSize) {
+ while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) {
+ const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+ if (smallerNewBlockSize >= size) {
+ newBlockSize = smallerNewBlockSize;
+ ++newBlockSizeShift;
+ res = CreateBlock(newBlockSize, &newBlockIndex);
+ } else {
+ break;
+ }
+ }
+ }
+
+ if (res == VK_SUCCESS) {
+ VmaDeviceMemoryBlock *const pBlock = m_Blocks[newBlockIndex];
+ VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
+
+ res = AllocateFromBlock(
+ pBlock,
+ currentFrameIndex,
+ size,
+ alignment,
+ allocFlagsCopy,
+ createInfo.pUserData,
+ suballocType,
+ strategy,
+ pAllocation);
+ if (res == VK_SUCCESS) {
+ VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
+ return VK_SUCCESS;
+ } else {
+ // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ }
+ }
+ }
+
+ // 3. Try to allocate from existing blocks with making other allocations lost.
+ if (canMakeOtherLost) {
+ uint32_t tryIndex = 0;
+ for (; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex) {
+ VmaDeviceMemoryBlock *pBestRequestBlock = VMA_NULL;
+ VmaAllocationRequest bestRequest = {};
+ VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
+
+ // 1. Search existing allocations.
+ if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) {
+ // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+ for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) {
+ VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pCurrBlock);
+ VmaAllocationRequest currRequest = {};
+ if (pCurrBlock->m_pMetadata->CreateAllocationRequest(
+ currentFrameIndex,
+ m_FrameInUseCount,
+ m_BufferImageGranularity,
+ size,
+ alignment,
+ (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
+ suballocType,
+ canMakeOtherLost,
+ strategy,
+ &currRequest)) {
+ const VkDeviceSize currRequestCost = currRequest.CalcCost();
+ if (pBestRequestBlock == VMA_NULL ||
+ currRequestCost < bestRequestCost) {
+ pBestRequestBlock = pCurrBlock;
+ bestRequest = currRequest;
+ bestRequestCost = currRequestCost;
+
+ if (bestRequestCost == 0) {
+ break;
+ }
+ }
+ }
+ }
+ } else // WORST_FIT, FIRST_FIT
+ {
+ // Backward order in m_Blocks - prefer blocks with largest amount of free space.
+ for (size_t blockIndex = m_Blocks.size(); blockIndex--;) {
+ VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pCurrBlock);
+ VmaAllocationRequest currRequest = {};
+ if (pCurrBlock->m_pMetadata->CreateAllocationRequest(
+ currentFrameIndex,
+ m_FrameInUseCount,
+ m_BufferImageGranularity,
+ size,
+ alignment,
+ (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
+ suballocType,
+ canMakeOtherLost,
+ strategy,
+ &currRequest)) {
+ const VkDeviceSize currRequestCost = currRequest.CalcCost();
+ if (pBestRequestBlock == VMA_NULL ||
+ currRequestCost < bestRequestCost ||
+ strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) {
+ pBestRequestBlock = pCurrBlock;
+ bestRequest = currRequest;
+ bestRequestCost = currRequestCost;
+
+ if (bestRequestCost == 0 ||
+ strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (pBestRequestBlock != VMA_NULL) {
+ if (mapped) {
+ VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+ }
+
+ if (pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
+ currentFrameIndex,
+ m_FrameInUseCount,
+ &bestRequest)) {
+ // We no longer have an empty Allocation.
+ if (pBestRequestBlock->m_pMetadata->IsEmpty()) {
+ m_HasEmptyBlock = false;
+ }
+ // Allocate from this pBlock.
+ *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
+ (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
+ pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
+ (*pAllocation)->InitBlockAllocation(pBestRequestBlock, bestRequest.offset, alignment, size, suballocType, mapped, (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
+ VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
+ VMA_DEBUG_LOG(" Returned from existing block");
+ (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
+ if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) {
+ m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+ }
+ if (IsCorruptionDetectionEnabled()) {
+ VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
+ VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+ }
+ return VK_SUCCESS;
+ }
+ // else: Some allocations must have been touched while we are here. Next try.
+ } else {
+ // Could not find place in any of the blocks - break outer loop.
+ break;
+ }
+ }
+ /* Maximum number of tries exceeded - a very unlike event when many other
+ threads are simultaneously touching allocations making it impossible to make
+ lost at the same time as we try to allocate. */
+ if (tryIndex == VMA_ALLOCATION_TRY_COUNT) {
+ return VK_ERROR_TOO_MANY_OBJECTS;
+ }
+ }
+
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+}
+
+void VmaBlockVector::Free(
+ VmaAllocation hAllocation) {
+ VmaDeviceMemoryBlock *pBlockToDelete = VMA_NULL;
+
+ // Scope for lock.
+ {
+ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+ VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock();
+
+ if (IsCorruptionDetectionEnabled()) {
+ VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
+ VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
+ }
+
+ if (hAllocation->IsPersistentMap()) {
+ pBlock->Unmap(m_hAllocator, 1);
+ }
+
+ pBlock->m_pMetadata->Free(hAllocation);
+ VMA_HEAVY_ASSERT(pBlock->Validate());
+
+ VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
+
+ // pBlock became empty after this deallocation.
+ if (pBlock->m_pMetadata->IsEmpty()) {
+ // Already has empty Allocation. We don't want to have two, so delete this one.
+ if (m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount) {
+ pBlockToDelete = pBlock;
+ Remove(pBlock);
+ }
+ // We now have first empty block.
+ else {
+ m_HasEmptyBlock = true;
+ }
+ }
+ // pBlock didn't become empty, but we have another empty block - find and free that one.
+ // (This is optional, heuristics.)
+ else if (m_HasEmptyBlock) {
+ VmaDeviceMemoryBlock *pLastBlock = m_Blocks.back();
+ if (pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount) {
+ pBlockToDelete = pLastBlock;
+ m_Blocks.pop_back();
+ m_HasEmptyBlock = false;
+ }
+ }
+
+ IncrementallySortBlocks();
+ }
+
+ // Destruction of a free Allocation. Deferred until this point, outside of mutex
+ // lock, for performance reason.
+ if (pBlockToDelete != VMA_NULL) {
+ VMA_DEBUG_LOG(" Deleted empty allocation");
+ pBlockToDelete->Destroy(m_hAllocator);
+ vma_delete(m_hAllocator, pBlockToDelete);
+ }
+}
+
+VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const {
+ VkDeviceSize result = 0;
+ for (size_t i = m_Blocks.size(); i--;) {
+ result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
+ if (result >= m_PreferredBlockSize) {
+ break;
+ }
+ }
+ return result;
+}
+
+void VmaBlockVector::Remove(VmaDeviceMemoryBlock *pBlock) {
+ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) {
+ if (m_Blocks[blockIndex] == pBlock) {
+ VmaVectorRemove(m_Blocks, blockIndex);
+ return;
+ }
+ }
+ VMA_ASSERT(0);
+}
+
+void VmaBlockVector::IncrementallySortBlocks() {
+ if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) {
+ // Bubble sort only until first swap.
+ for (size_t i = 1; i < m_Blocks.size(); ++i) {
+ if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) {
+ VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
+ return;
+ }
+ }
+ }
+}
+
+VkResult VmaBlockVector::AllocateFromBlock(
+ VmaDeviceMemoryBlock *pBlock,
+ uint32_t currentFrameIndex,
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ VmaAllocationCreateFlags allocFlags,
+ void *pUserData,
+ VmaSuballocationType suballocType,
+ uint32_t strategy,
+ VmaAllocation *pAllocation) {
+ VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
+ const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+ const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
+ const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
+
+ VmaAllocationRequest currRequest = {};
+ if (pBlock->m_pMetadata->CreateAllocationRequest(
+ currentFrameIndex,
+ m_FrameInUseCount,
+ m_BufferImageGranularity,
+ size,
+ alignment,
+ isUpperAddress,
+ suballocType,
+ false, // canMakeOtherLost
+ strategy,
+ &currRequest)) {
+ // Allocate from pCurrBlock.
+ VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
+
+ if (mapped) {
+ VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+ }
+
+ // We no longer have an empty Allocation.
+ if (pBlock->m_pMetadata->IsEmpty()) {
+ m_HasEmptyBlock = false;
+ }
+
+ *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
+ (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
+ pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
+ (*pAllocation)->InitBlockAllocation(pBlock, currRequest.offset, alignment, size, suballocType, mapped, (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
+ VMA_HEAVY_ASSERT(pBlock->Validate());
+ (*pAllocation)->SetUserData(m_hAllocator, pUserData);
+ if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) {
+ m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+ }
+ if (IsCorruptionDetectionEnabled()) {
+ VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
+ VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+ }
+ return VK_SUCCESS;
+ }
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+}
+
+VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t *pNewBlockIndex) {
+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
+ allocInfo.allocationSize = blockSize;
+ VkDeviceMemory mem = VK_NULL_HANDLE;
+ VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
+ if (res < 0) {
+ return res;
+ }
+
+ // New VkDeviceMemory successfully created.
+
+ // Create new Allocation for it.
+ VmaDeviceMemoryBlock *const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
+ pBlock->Init(
+ m_hAllocator,
+ m_hParentPool,
+ m_MemoryTypeIndex,
+ mem,
+ allocInfo.allocationSize,
+ m_NextBlockId++,
+ m_Algorithm);
+
+ m_Blocks.push_back(pBlock);
+ if (pNewBlockIndex != VMA_NULL) {
+ *pNewBlockIndex = m_Blocks.size() - 1;
+ }
+
+ return VK_SUCCESS;
+}
+
+void VmaBlockVector::ApplyDefragmentationMovesCpu(
+ class VmaBlockVectorDefragmentationContext *pDefragCtx,
+ const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves) {
+ const size_t blockCount = m_Blocks.size();
+ const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
+
+ enum BLOCK_FLAG {
+ BLOCK_FLAG_USED = 0x00000001,
+ BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
+ };
+
+ struct BlockInfo {
+ uint32_t flags;
+ void *pMappedData;
+ };
+ VmaVector<BlockInfo, VmaStlAllocator<BlockInfo> >
+ blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
+ memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
+
+ // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
+ const size_t moveCount = moves.size();
+ for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) {
+ const VmaDefragmentationMove &move = moves[moveIndex];
+ blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
+ blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
+ }
+
+ VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
+ // Go over all blocks. Get mapped pointer or map if necessary.
+ for (size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) {
+ BlockInfo &currBlockInfo = blockInfo[blockIndex];
+ VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex];
+ if ((currBlockInfo.flags & BLOCK_FLAG_USED) != 0) {
+ currBlockInfo.pMappedData = pBlock->GetMappedData();
+ // It is not originally mapped - map it.
+ if (currBlockInfo.pMappedData == VMA_NULL) {
+ pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
+ if (pDefragCtx->res == VK_SUCCESS) {
+ currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
+ }
+ }
+ }
+ }
+
+ // Go over all moves. Do actual data transfer.
+ if (pDefragCtx->res == VK_SUCCESS) {
+ const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+ VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+
+ for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) {
+ const VmaDefragmentationMove &move = moves[moveIndex];
+
+ const BlockInfo &srcBlockInfo = blockInfo[move.srcBlockIndex];
+ const BlockInfo &dstBlockInfo = blockInfo[move.dstBlockIndex];
+
+ VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
+
+ // Invalidate source.
+ if (isNonCoherent) {
+ VmaDeviceMemoryBlock *const pSrcBlock = m_Blocks[move.srcBlockIndex];
+ memRange.memory = pSrcBlock->GetDeviceMemory();
+ memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
+ memRange.size = VMA_MIN(
+ VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
+ pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
+ (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+ }
+
+ // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
+ memmove(
+ reinterpret_cast<char *>(dstBlockInfo.pMappedData) + move.dstOffset,
+ reinterpret_cast<char *>(srcBlockInfo.pMappedData) + move.srcOffset,
+ static_cast<size_t>(move.size));
+
+ if (IsCorruptionDetectionEnabled()) {
+ VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
+ VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
+ }
+
+ // Flush destination.
+ if (isNonCoherent) {
+ VmaDeviceMemoryBlock *const pDstBlock = m_Blocks[move.dstBlockIndex];
+ memRange.memory = pDstBlock->GetDeviceMemory();
+ memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
+ memRange.size = VMA_MIN(
+ VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
+ pDstBlock->m_pMetadata->GetSize() - memRange.offset);
+ (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
+ }
+ }
+ }
+
+ // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
+ // Regardless of pCtx->res == VK_SUCCESS.
+ for (size_t blockIndex = blockCount; blockIndex--;) {
+ const BlockInfo &currBlockInfo = blockInfo[blockIndex];
+ if ((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0) {
+ VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex];
+ pBlock->Unmap(m_hAllocator, 1);
+ }
+ }
+}
+
+void VmaBlockVector::ApplyDefragmentationMovesGpu(
+ class VmaBlockVectorDefragmentationContext *pDefragCtx,
+ const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkCommandBuffer commandBuffer) {
+ const size_t blockCount = m_Blocks.size();
+
+ pDefragCtx->blockContexts.resize(blockCount);
+ memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
+
+ // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
+ const size_t moveCount = moves.size();
+ for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) {
+ const VmaDefragmentationMove &move = moves[moveIndex];
+ pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+ pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+ }
+
+ VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
+
+ // Go over all blocks. Create and bind buffer for whole block if necessary.
+ {
+ VkBufferCreateInfo bufCreateInfo;
+ VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
+
+ for (size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) {
+ VmaBlockDefragmentationContext &currBlockCtx = pDefragCtx->blockContexts[blockIndex];
+ VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex];
+ if ((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0) {
+ bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
+ pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
+ m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
+ if (pDefragCtx->res == VK_SUCCESS) {
+ pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
+ m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
+ }
+ }
+ }
+ }
+
+ // Go over all moves. Post data transfer commands to command buffer.
+ if (pDefragCtx->res == VK_SUCCESS) {
+ for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) {
+ const VmaDefragmentationMove &move = moves[moveIndex];
+
+ const VmaBlockDefragmentationContext &srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
+ const VmaBlockDefragmentationContext &dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
+
+ VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
+
+ VkBufferCopy region = {
+ move.srcOffset,
+ move.dstOffset,
+ move.size
+ };
+ (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
+ commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
+ }
+ }
+
+ // Save buffers to defrag context for later destruction.
+ if (pDefragCtx->res == VK_SUCCESS && moveCount > 0) {
+ pDefragCtx->res = VK_NOT_READY;
+ }
+}
+
+void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats *pDefragmentationStats) {
+ m_HasEmptyBlock = false;
+ for (size_t blockIndex = m_Blocks.size(); blockIndex--;) {
+ VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex];
+ if (pBlock->m_pMetadata->IsEmpty()) {
+ if (m_Blocks.size() > m_MinBlockCount) {
+ if (pDefragmentationStats != VMA_NULL) {
+ ++pDefragmentationStats->deviceMemoryBlocksFreed;
+ pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
+ }
+
+ VmaVectorRemove(m_Blocks, blockIndex);
+ pBlock->Destroy(m_hAllocator);
+ vma_delete(m_hAllocator, pBlock);
+ } else {
+ m_HasEmptyBlock = true;
+ }
+ }
+ }
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter &json) {
+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+ json.BeginObject();
+
+ if (m_IsCustomPool) {
+ json.WriteString("MemoryTypeIndex");
+ json.WriteNumber(m_MemoryTypeIndex);
+
+ json.WriteString("BlockSize");
+ json.WriteNumber(m_PreferredBlockSize);
+
+ json.WriteString("BlockCount");
+ json.BeginObject(true);
+ if (m_MinBlockCount > 0) {
+ json.WriteString("Min");
+ json.WriteNumber((uint64_t)m_MinBlockCount);
+ }
+ if (m_MaxBlockCount < SIZE_MAX) {
+ json.WriteString("Max");
+ json.WriteNumber((uint64_t)m_MaxBlockCount);
+ }
+ json.WriteString("Cur");
+ json.WriteNumber((uint64_t)m_Blocks.size());
+ json.EndObject();
+
+ if (m_FrameInUseCount > 0) {
+ json.WriteString("FrameInUseCount");
+ json.WriteNumber(m_FrameInUseCount);
+ }
+
+ if (m_Algorithm != 0) {
+ json.WriteString("Algorithm");
+ json.WriteString(VmaAlgorithmToStr(m_Algorithm));
+ }
+ } else {
+ json.WriteString("PreferredBlockSize");
+ json.WriteNumber(m_PreferredBlockSize);
+ }
+
+ json.WriteString("Blocks");
+ json.BeginObject();
+ for (size_t i = 0; i < m_Blocks.size(); ++i) {
+ json.BeginString();
+ json.ContinueString(m_Blocks[i]->GetId());
+ json.EndString();
+
+ m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
+ }
+ json.EndObject();
+
+ json.EndObject();
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+void VmaBlockVector::Defragment(
+ class VmaBlockVectorDefragmentationContext *pCtx,
+ VmaDefragmentationStats *pStats,
+ VkDeviceSize &maxCpuBytesToMove, uint32_t &maxCpuAllocationsToMove,
+ VkDeviceSize &maxGpuBytesToMove, uint32_t &maxGpuAllocationsToMove,
+ VkCommandBuffer commandBuffer) {
+ pCtx->res = VK_SUCCESS;
+
+ const VkMemoryPropertyFlags memPropFlags =
+ m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
+ const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
+ const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
+
+ const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
+ isHostVisible;
+ const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
+ !IsCorruptionDetectionEnabled() &&
+ ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
+
+ // There are options to defragment this memory type.
+ if (canDefragmentOnCpu || canDefragmentOnGpu) {
+ bool defragmentOnGpu;
+ // There is only one option to defragment this memory type.
+ if (canDefragmentOnGpu != canDefragmentOnCpu) {
+ defragmentOnGpu = canDefragmentOnGpu;
+ }
+ // Both options are available: Heuristics to choose the best one.
+ else {
+ defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
+ m_hAllocator->IsIntegratedGpu();
+ }
+
+ bool overlappingMoveSupported = !defragmentOnGpu;
+
+ if (m_hAllocator->m_UseMutex) {
+ m_Mutex.LockWrite();
+ pCtx->mutexLocked = true;
+ }
+
+ pCtx->Begin(overlappingMoveSupported);
+
+ // Defragment.
+
+ const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
+ const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
+ pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
+
+ // Accumulate statistics.
+ if (pStats != VMA_NULL) {
+ const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
+ const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
+ pStats->bytesMoved += bytesMoved;
+ pStats->allocationsMoved += allocationsMoved;
+ VMA_ASSERT(bytesMoved <= maxBytesToMove);
+ VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
+ if (defragmentOnGpu) {
+ maxGpuBytesToMove -= bytesMoved;
+ maxGpuAllocationsToMove -= allocationsMoved;
+ } else {
+ maxCpuBytesToMove -= bytesMoved;
+ maxCpuAllocationsToMove -= allocationsMoved;
+ }
+ }
+
+ if (pCtx->res >= VK_SUCCESS) {
+ if (defragmentOnGpu) {
+ ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
+ } else {
+ ApplyDefragmentationMovesCpu(pCtx, moves);
+ }
+ }
+ }
+}
+
+void VmaBlockVector::DefragmentationEnd(
+ class VmaBlockVectorDefragmentationContext *pCtx,
+ VmaDefragmentationStats *pStats) {
+ // Destroy buffers.
+ for (size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;) {
+ VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
+ if (blockCtx.hBuffer) {
+ (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
+ m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
+ }
+ }
+
+ if (pCtx->res >= VK_SUCCESS) {
+ FreeEmptyBlocks(pStats);
+ }
+
+ if (pCtx->mutexLocked) {
+ VMA_ASSERT(m_hAllocator->m_UseMutex);
+ m_Mutex.UnlockWrite();
+ }
+}
+
+size_t VmaBlockVector::CalcAllocationCount() const {
+ size_t result = 0;
+ for (size_t i = 0; i < m_Blocks.size(); ++i) {
+ result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
+ }
+ return result;
+}
+
+bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const {
+ if (m_BufferImageGranularity == 1) {
+ return false;
+ }
+ VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
+ for (size_t i = 0, count = m_Blocks.size(); i < count; ++i) {
+ VmaDeviceMemoryBlock *const pBlock = m_Blocks[i];
+ VMA_ASSERT(m_Algorithm == 0);
+ VmaBlockMetadata_Generic *const pMetadata = (VmaBlockMetadata_Generic *)pBlock->m_pMetadata;
+ if (pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void VmaBlockVector::MakePoolAllocationsLost(
+ uint32_t currentFrameIndex,
+ size_t *pLostAllocationCount) {
+ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+ size_t lostAllocationCount = 0;
+ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) {
+ VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pBlock);
+ lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
+ }
+ if (pLostAllocationCount != VMA_NULL) {
+ *pLostAllocationCount = lostAllocationCount;
+ }
+}
+
+VkResult VmaBlockVector::CheckCorruption() {
+ if (!IsCorruptionDetectionEnabled()) {
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) {
+ VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pBlock);
+ VkResult res = pBlock->CheckCorruption(m_hAllocator);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+ }
+ return VK_SUCCESS;
+}
+
+void VmaBlockVector::AddStats(VmaStats *pStats) {
+ const uint32_t memTypeIndex = m_MemoryTypeIndex;
+ const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
+
+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) {
+ const VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex];
+ VMA_ASSERT(pBlock);
+ VMA_HEAVY_ASSERT(pBlock->Validate());
+ VmaStatInfo allocationStatInfo;
+ pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
+ VmaAddStatInfo(pStats->total, allocationStatInfo);
+ VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
+ VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaDefragmentationAlgorithm_Generic members definition
+
+VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
+ VmaAllocator hAllocator,
+ VmaBlockVector *pBlockVector,
+ uint32_t currentFrameIndex,
+ bool overlappingMoveSupported) :
+ VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+ m_AllocationCount(0),
+ m_AllAllocations(false),
+ m_BytesMoved(0),
+ m_AllocationsMoved(0),
+ m_Blocks(VmaStlAllocator<BlockInfo *>(hAllocator->GetAllocationCallbacks())) {
+ // Create block info for each block.
+ const size_t blockCount = m_pBlockVector->m_Blocks.size();
+ for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) {
+ BlockInfo *pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
+ pBlockInfo->m_OriginalBlockIndex = blockIndex;
+ pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
+ m_Blocks.push_back(pBlockInfo);
+ }
+
+ // Sort them by m_pBlock pointer value.
+ VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
+}
+
+VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic() {
+ for (size_t i = m_Blocks.size(); i--;) {
+ vma_delete(m_hAllocator, m_Blocks[i]);
+ }
+}
+
+void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) {
+ // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
+ if (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST) {
+ VmaDeviceMemoryBlock *pBlock = hAlloc->GetBlock();
+ BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
+ if (it != m_Blocks.end() && (*it)->m_pBlock == pBlock) {
+ AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
+ (*it)->m_Allocations.push_back(allocInfo);
+ } else {
+ VMA_ASSERT(0);
+ }
+
+ ++m_AllocationCount;
+ }
+}
+
+VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkDeviceSize maxBytesToMove,
+ uint32_t maxAllocationsToMove) {
+ if (m_Blocks.empty()) {
+ return VK_SUCCESS;
+ }
+
+ // This is a choice based on research.
+ // Option 1:
+ uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
+ // Option 2:
+ //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
+ // Option 3:
+ //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
+
+ size_t srcBlockMinIndex = 0;
+ // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
+ /*
+ if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
+ {
+ const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
+ if(blocksWithNonMovableCount > 0)
+ {
+ srcBlockMinIndex = blocksWithNonMovableCount - 1;
+ }
+ }
+ */
+
+ size_t srcBlockIndex = m_Blocks.size() - 1;
+ size_t srcAllocIndex = SIZE_MAX;
+ for (;;) {
+ // 1. Find next allocation to move.
+ // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
+ // 1.2. Then start from last to first m_Allocations.
+ while (srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size()) {
+ if (m_Blocks[srcBlockIndex]->m_Allocations.empty()) {
+ // Finished: no more allocations to process.
+ if (srcBlockIndex == srcBlockMinIndex) {
+ return VK_SUCCESS;
+ } else {
+ --srcBlockIndex;
+ srcAllocIndex = SIZE_MAX;
+ }
+ } else {
+ srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
+ }
+ }
+
+ BlockInfo *pSrcBlockInfo = m_Blocks[srcBlockIndex];
+ AllocationInfo &allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
+
+ const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
+ const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
+ const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
+ const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
+
+ // 2. Try to find new place for this allocation in preceding or current block.
+ for (size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) {
+ BlockInfo *pDstBlockInfo = m_Blocks[dstBlockIndex];
+ VmaAllocationRequest dstAllocRequest;
+ if (pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
+ m_CurrentFrameIndex,
+ m_pBlockVector->GetFrameInUseCount(),
+ m_pBlockVector->GetBufferImageGranularity(),
+ size,
+ alignment,
+ false, // upperAddress
+ suballocType,
+ false, // canMakeOtherLost
+ strategy,
+ &dstAllocRequest) &&
+ MoveMakesSense(
+ dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset)) {
+ VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
+
+ // Reached limit on number of allocations or bytes to move.
+ if ((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
+ (m_BytesMoved + size > maxBytesToMove)) {
+ return VK_SUCCESS;
+ }
+
+ VmaDefragmentationMove move;
+ move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
+ move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
+ move.srcOffset = srcOffset;
+ move.dstOffset = dstAllocRequest.offset;
+ move.size = size;
+ moves.push_back(move);
+
+ pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
+ dstAllocRequest,
+ suballocType,
+ size,
+ allocInfo.m_hAllocation);
+ pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
+
+ allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
+
+ if (allocInfo.m_pChanged != VMA_NULL) {
+ *allocInfo.m_pChanged = VK_TRUE;
+ }
+
+ ++m_AllocationsMoved;
+ m_BytesMoved += size;
+
+ VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
+
+ break;
+ }
+ }
+
+ // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
+
+ if (srcAllocIndex > 0) {
+ --srcAllocIndex;
+ } else {
+ if (srcBlockIndex > 0) {
+ --srcBlockIndex;
+ srcAllocIndex = SIZE_MAX;
+ } else {
+ return VK_SUCCESS;
+ }
+ }
+ }
+}
+
+size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const {
+ size_t result = 0;
+ for (size_t i = 0; i < m_Blocks.size(); ++i) {
+ if (m_Blocks[i]->m_HasNonMovableAllocations) {
+ ++result;
+ }
+ }
+ return result;
+}
+
+VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkDeviceSize maxBytesToMove,
+ uint32_t maxAllocationsToMove) {
+ if (!m_AllAllocations && m_AllocationCount == 0) {
+ return VK_SUCCESS;
+ }
+
+ const size_t blockCount = m_Blocks.size();
+ for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) {
+ BlockInfo *pBlockInfo = m_Blocks[blockIndex];
+
+ if (m_AllAllocations) {
+ VmaBlockMetadata_Generic *pMetadata = (VmaBlockMetadata_Generic *)pBlockInfo->m_pBlock->m_pMetadata;
+ for (VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
+ it != pMetadata->m_Suballocations.end();
+ ++it) {
+ if (it->type != VMA_SUBALLOCATION_TYPE_FREE) {
+ AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
+ pBlockInfo->m_Allocations.push_back(allocInfo);
+ }
+ }
+ }
+
+ pBlockInfo->CalcHasNonMovableAllocations();
+
+ // This is a choice based on research.
+ // Option 1:
+ pBlockInfo->SortAllocationsByOffsetDescending();
+ // Option 2:
+ //pBlockInfo->SortAllocationsBySizeDescending();
+ }
+
+ // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
+ VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
+
+ // This is a choice based on research.
+ const uint32_t roundCount = 2;
+
+ // Execute defragmentation rounds (the main part).
+ VkResult result = VK_SUCCESS;
+ for (uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round) {
+ result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
+ }
+
+ return result;
+}
+
+bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
+ size_t dstBlockIndex, VkDeviceSize dstOffset,
+ size_t srcBlockIndex, VkDeviceSize srcOffset) {
+ if (dstBlockIndex < srcBlockIndex) {
+ return true;
+ }
+ if (dstBlockIndex > srcBlockIndex) {
+ return false;
+ }
+ if (dstOffset < srcOffset) {
+ return true;
+ }
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaDefragmentationAlgorithm_Fast
+
+VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
+ VmaAllocator hAllocator,
+ VmaBlockVector *pBlockVector,
+ uint32_t currentFrameIndex,
+ bool overlappingMoveSupported) :
+ VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
+ m_OverlappingMoveSupported(overlappingMoveSupported),
+ m_AllocationCount(0),
+ m_AllAllocations(false),
+ m_BytesMoved(0),
+ m_AllocationsMoved(0),
+ m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks())) {
+ VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
+}
+
+VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast() {
+}
+
+VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves,
+ VkDeviceSize maxBytesToMove,
+ uint32_t maxAllocationsToMove) {
+ VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
+
+ const size_t blockCount = m_pBlockVector->GetBlockCount();
+ if (blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0) {
+ return VK_SUCCESS;
+ }
+
+ PreprocessMetadata();
+
+ // Sort blocks in order from most destination.
+
+ m_BlockInfos.resize(blockCount);
+ for (size_t i = 0; i < blockCount; ++i) {
+ m_BlockInfos[i].origBlockIndex = i;
+ }
+
+ VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo &lhs, const BlockInfo &rhs) -> bool {
+ return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
+ m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
+ });
+
+ // THE MAIN ALGORITHM
+
+ FreeSpaceDatabase freeSpaceDb;
+
+ size_t dstBlockInfoIndex = 0;
+ size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+ VmaDeviceMemoryBlock *pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+ VmaBlockMetadata_Generic *pDstMetadata = (VmaBlockMetadata_Generic *)pDstBlock->m_pMetadata;
+ VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
+ VkDeviceSize dstOffset = 0;
+
+ bool end = false;
+ for (size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex) {
+ const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
+ VmaDeviceMemoryBlock *const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
+ VmaBlockMetadata_Generic *const pSrcMetadata = (VmaBlockMetadata_Generic *)pSrcBlock->m_pMetadata;
+ for (VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
+ !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end();) {
+ VmaAllocation_T *const pAlloc = srcSuballocIt->hAllocation;
+ const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
+ const VkDeviceSize srcAllocSize = srcSuballocIt->size;
+ if (m_AllocationsMoved == maxAllocationsToMove ||
+ m_BytesMoved + srcAllocSize > maxBytesToMove) {
+ end = true;
+ break;
+ }
+ const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
+
+ // Try to place it in one of free spaces from the database.
+ size_t freeSpaceInfoIndex;
+ VkDeviceSize dstAllocOffset;
+ if (freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
+ freeSpaceInfoIndex, dstAllocOffset)) {
+ size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
+ VmaDeviceMemoryBlock *pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
+ VmaBlockMetadata_Generic *pFreeSpaceMetadata = (VmaBlockMetadata_Generic *)pFreeSpaceBlock->m_pMetadata;
+
+ // Same block
+ if (freeSpaceInfoIndex == srcBlockInfoIndex) {
+ VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
+ // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
+
+ VmaSuballocation suballoc = *srcSuballocIt;
+ suballoc.offset = dstAllocOffset;
+ suballoc.hAllocation->ChangeOffset(dstAllocOffset);
+ m_BytesMoved += srcAllocSize;
+ ++m_AllocationsMoved;
+
+ VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+ ++nextSuballocIt;
+ pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+ srcSuballocIt = nextSuballocIt;
+
+ InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
+ VmaDefragmentationMove move = {
+ srcOrigBlockIndex, freeSpaceOrigBlockIndex,
+ srcAllocOffset, dstAllocOffset,
+ srcAllocSize
+ };
+ moves.push_back(move);
+ }
+ // Different block
+ else {
+ // MOVE OPTION 2: Move the allocation to a different block.
+
+ VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
+
+ VmaSuballocation suballoc = *srcSuballocIt;
+ suballoc.offset = dstAllocOffset;
+ suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
+ m_BytesMoved += srcAllocSize;
+ ++m_AllocationsMoved;
+
+ VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+ ++nextSuballocIt;
+ pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+ srcSuballocIt = nextSuballocIt;
+
+ InsertSuballoc(pFreeSpaceMetadata, suballoc);
+
+ VmaDefragmentationMove move = {
+ srcOrigBlockIndex, freeSpaceOrigBlockIndex,
+ srcAllocOffset, dstAllocOffset,
+ srcAllocSize
+ };
+ moves.push_back(move);
+ }
+ } else {
+ dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
+
+ // If the allocation doesn't fit before the end of dstBlock, forward to next block.
+ while (dstBlockInfoIndex < srcBlockInfoIndex &&
+ dstAllocOffset + srcAllocSize > dstBlockSize) {
+ // But before that, register remaining free space at the end of dst block.
+ freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
+
+ ++dstBlockInfoIndex;
+ dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
+ pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
+ pDstMetadata = (VmaBlockMetadata_Generic *)pDstBlock->m_pMetadata;
+ dstBlockSize = pDstMetadata->GetSize();
+ dstOffset = 0;
+ dstAllocOffset = 0;
+ }
+
+ // Same block
+ if (dstBlockInfoIndex == srcBlockInfoIndex) {
+ VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
+
+ const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
+
+ bool skipOver = overlap;
+ if (overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset) {
+ // If destination and source place overlap, skip if it would move it
+ // by only < 1/64 of its size.
+ skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
+ }
+
+ if (skipOver) {
+ freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
+
+ dstOffset = srcAllocOffset + srcAllocSize;
+ ++srcSuballocIt;
+ }
+ // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
+ else {
+ srcSuballocIt->offset = dstAllocOffset;
+ srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
+ dstOffset = dstAllocOffset + srcAllocSize;
+ m_BytesMoved += srcAllocSize;
+ ++m_AllocationsMoved;
+ ++srcSuballocIt;
+ VmaDefragmentationMove move = {
+ srcOrigBlockIndex, dstOrigBlockIndex,
+ srcAllocOffset, dstAllocOffset,
+ srcAllocSize
+ };
+ moves.push_back(move);
+ }
+ }
+ // Different block
+ else {
+ // MOVE OPTION 2: Move the allocation to a different block.
+
+ VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
+ VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
+
+ VmaSuballocation suballoc = *srcSuballocIt;
+ suballoc.offset = dstAllocOffset;
+ suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
+ dstOffset = dstAllocOffset + srcAllocSize;
+ m_BytesMoved += srcAllocSize;
+ ++m_AllocationsMoved;
+
+ VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
+ ++nextSuballocIt;
+ pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
+ srcSuballocIt = nextSuballocIt;
+
+ pDstMetadata->m_Suballocations.push_back(suballoc);
+
+ VmaDefragmentationMove move = {
+ srcOrigBlockIndex, dstOrigBlockIndex,
+ srcAllocOffset, dstAllocOffset,
+ srcAllocSize
+ };
+ moves.push_back(move);
+ }
+ }
+ }
+ }
+
+ m_BlockInfos.clear();
+
+ PostprocessMetadata();
+
+ return VK_SUCCESS;
+}
+
+void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata() {
+ const size_t blockCount = m_pBlockVector->GetBlockCount();
+ for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) {
+ VmaBlockMetadata_Generic *const pMetadata =
+ (VmaBlockMetadata_Generic *)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+ pMetadata->m_FreeCount = 0;
+ pMetadata->m_SumFreeSize = pMetadata->GetSize();
+ pMetadata->m_FreeSuballocationsBySize.clear();
+ for (VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+ it != pMetadata->m_Suballocations.end();) {
+ if (it->type == VMA_SUBALLOCATION_TYPE_FREE) {
+ VmaSuballocationList::iterator nextIt = it;
+ ++nextIt;
+ pMetadata->m_Suballocations.erase(it);
+ it = nextIt;
+ } else {
+ ++it;
+ }
+ }
+ }
+}
+
+void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() {
+ const size_t blockCount = m_pBlockVector->GetBlockCount();
+ for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) {
+ VmaBlockMetadata_Generic *const pMetadata =
+ (VmaBlockMetadata_Generic *)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
+ const VkDeviceSize blockSize = pMetadata->GetSize();
+
+ // No allocations in this block - entire area is free.
+ if (pMetadata->m_Suballocations.empty()) {
+ pMetadata->m_FreeCount = 1;
+ //pMetadata->m_SumFreeSize is already set to blockSize.
+ VmaSuballocation suballoc = {
+ 0, // offset
+ blockSize, // size
+ VMA_NULL, // hAllocation
+ VMA_SUBALLOCATION_TYPE_FREE
+ };
+ pMetadata->m_Suballocations.push_back(suballoc);
+ pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
+ }
+ // There are some allocations in this block.
+ else {
+ VkDeviceSize offset = 0;
+ VmaSuballocationList::iterator it;
+ for (it = pMetadata->m_Suballocations.begin();
+ it != pMetadata->m_Suballocations.end();
+ ++it) {
+ VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
+ VMA_ASSERT(it->offset >= offset);
+
+ // Need to insert preceding free space.
+ if (it->offset > offset) {
+ ++pMetadata->m_FreeCount;
+ const VkDeviceSize freeSize = it->offset - offset;
+ VmaSuballocation suballoc = {
+ offset, // offset
+ freeSize, // size
+ VMA_NULL, // hAllocation
+ VMA_SUBALLOCATION_TYPE_FREE
+ };
+ VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+ if (freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) {
+ pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
+ }
+ }
+
+ pMetadata->m_SumFreeSize -= it->size;
+ offset = it->offset + it->size;
+ }
+
+ // Need to insert trailing free space.
+ if (offset < blockSize) {
+ ++pMetadata->m_FreeCount;
+ const VkDeviceSize freeSize = blockSize - offset;
+ VmaSuballocation suballoc = {
+ offset, // offset
+ freeSize, // size
+ VMA_NULL, // hAllocation
+ VMA_SUBALLOCATION_TYPE_FREE
+ };
+ VMA_ASSERT(it == pMetadata->m_Suballocations.end());
+ VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
+ if (freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) {
+ pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
+ }
+ }
+
+ VMA_SORT(
+ pMetadata->m_FreeSuballocationsBySize.begin(),
+ pMetadata->m_FreeSuballocationsBySize.end(),
+ VmaSuballocationItemSizeLess());
+ }
+
+ VMA_HEAVY_ASSERT(pMetadata->Validate());
+ }
+}
+
+void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic *pMetadata, const VmaSuballocation &suballoc) {
+ // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
+ VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
+ while (it != pMetadata->m_Suballocations.end()) {
+ if (it->offset < suballoc.offset) {
+ ++it;
+ }
+ }
+ pMetadata->m_Suballocations.insert(it, suballoc);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaBlockVectorDefragmentationContext
+
+VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
+ VmaAllocator hAllocator,
+ VmaPool hCustomPool,
+ VmaBlockVector *pBlockVector,
+ uint32_t currFrameIndex,
+ uint32_t algorithmFlags) :
+ res(VK_SUCCESS),
+ mutexLocked(false),
+ blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
+ m_hAllocator(hAllocator),
+ m_hCustomPool(hCustomPool),
+ m_pBlockVector(pBlockVector),
+ m_CurrFrameIndex(currFrameIndex),
+ m_AlgorithmFlags(algorithmFlags),
+ m_pAlgorithm(VMA_NULL),
+ m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
+ m_AllAllocations(false) {
+}
+
+VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() {
+ vma_delete(m_hAllocator, m_pAlgorithm);
+}
+
+void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) {
+ AllocInfo info = { hAlloc, pChanged };
+ m_Allocations.push_back(info);
+}
+
+void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported) {
+ const bool allAllocations = m_AllAllocations ||
+ m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
+
+ /********************************
+ HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
+ ********************************/
+
+ /*
+ Fast algorithm is supported only when certain criteria are met:
+ - VMA_DEBUG_MARGIN is 0.
+ - All allocations in this block vector are moveable.
+ - There is no possibility of image/buffer granularity conflict.
+ */
+ if (VMA_DEBUG_MARGIN == 0 &&
+ allAllocations &&
+ !m_pBlockVector->IsBufferImageGranularityConflictPossible()) {
+ m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
+ m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+ } else {
+ m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
+ m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
+ }
+
+ if (allAllocations) {
+ m_pAlgorithm->AddAll();
+ } else {
+ for (size_t i = 0, count = m_Allocations.size(); i < count; ++i) {
+ m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaDefragmentationContext
+
+VmaDefragmentationContext_T::VmaDefragmentationContext_T(
+ VmaAllocator hAllocator,
+ uint32_t currFrameIndex,
+ uint32_t flags,
+ VmaDefragmentationStats *pStats) :
+ m_hAllocator(hAllocator),
+ m_CurrFrameIndex(currFrameIndex),
+ m_Flags(flags),
+ m_pStats(pStats),
+ m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext *>(hAllocator->GetAllocationCallbacks())) {
+ memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
+}
+
+VmaDefragmentationContext_T::~VmaDefragmentationContext_T() {
+ for (size_t i = m_CustomPoolContexts.size(); i--;) {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[i];
+ pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
+ vma_delete(m_hAllocator, pBlockVectorCtx);
+ }
+ for (size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--;) {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[i];
+ if (pBlockVectorCtx) {
+ pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
+ vma_delete(m_hAllocator, pBlockVectorCtx);
+ }
+ }
+}
+
+void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool *pPools) {
+ for (uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) {
+ VmaPool pool = pPools[poolIndex];
+ VMA_ASSERT(pool);
+ // Pools with algorithm other than default are not defragmented.
+ if (pool->m_BlockVector.GetAlgorithm() == 0) {
+ VmaBlockVectorDefragmentationContext *pBlockVectorDefragCtx = VMA_NULL;
+
+ for (size_t i = m_CustomPoolContexts.size(); i--;) {
+ if (m_CustomPoolContexts[i]->GetCustomPool() == pool) {
+ pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+ break;
+ }
+ }
+
+ if (!pBlockVectorDefragCtx) {
+ pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+ m_hAllocator,
+ pool,
+ &pool->m_BlockVector,
+ m_CurrFrameIndex,
+ m_Flags);
+ m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+ }
+
+ pBlockVectorDefragCtx->AddAll();
+ }
+ }
+}
+
+void VmaDefragmentationContext_T::AddAllocations(
+ uint32_t allocationCount,
+ VmaAllocation *pAllocations,
+ VkBool32 *pAllocationsChanged) {
+ // Dispatch pAllocations among defragmentators. Create them when necessary.
+ for (uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) {
+ const VmaAllocation hAlloc = pAllocations[allocIndex];
+ VMA_ASSERT(hAlloc);
+ // DedicatedAlloc cannot be defragmented.
+ if ((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
+ // Lost allocation cannot be defragmented.
+ (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)) {
+ VmaBlockVectorDefragmentationContext *pBlockVectorDefragCtx = VMA_NULL;
+
+ const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
+ // This allocation belongs to custom pool.
+ if (hAllocPool != VK_NULL_HANDLE) {
+ // Pools with algorithm other than default are not defragmented.
+ if (hAllocPool->m_BlockVector.GetAlgorithm() == 0) {
+ for (size_t i = m_CustomPoolContexts.size(); i--;) {
+ if (m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool) {
+ pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+ break;
+ }
+ }
+ if (!pBlockVectorDefragCtx) {
+ pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+ m_hAllocator,
+ hAllocPool,
+ &hAllocPool->m_BlockVector,
+ m_CurrFrameIndex,
+ m_Flags);
+ m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+ }
+ }
+ }
+ // This allocation belongs to default pool.
+ else {
+ const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
+ pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
+ if (!pBlockVectorDefragCtx) {
+ pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+ m_hAllocator,
+ VMA_NULL, // hCustomPool
+ m_hAllocator->m_pBlockVectors[memTypeIndex],
+ m_CurrFrameIndex,
+ m_Flags);
+ m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
+ }
+ }
+
+ if (pBlockVectorDefragCtx) {
+ VkBool32 *const pChanged = (pAllocationsChanged != VMA_NULL) ?
+ &pAllocationsChanged[allocIndex] :
+ VMA_NULL;
+ pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
+ }
+ }
+ }
+}
+
+VkResult VmaDefragmentationContext_T::Defragment(
+ VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
+ VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
+ VkCommandBuffer commandBuffer, VmaDefragmentationStats *pStats) {
+ if (pStats) {
+ memset(pStats, 0, sizeof(VmaDefragmentationStats));
+ }
+
+ if (commandBuffer == VK_NULL_HANDLE) {
+ maxGpuBytesToMove = 0;
+ maxGpuAllocationsToMove = 0;
+ }
+
+ VkResult res = VK_SUCCESS;
+
+ // Process default pools.
+ for (uint32_t memTypeIndex = 0;
+ memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
+ ++memTypeIndex) {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+ if (pBlockVectorCtx) {
+ VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+ pBlockVectorCtx->GetBlockVector()->Defragment(
+ pBlockVectorCtx,
+ pStats,
+ maxCpuBytesToMove, maxCpuAllocationsToMove,
+ maxGpuBytesToMove, maxGpuAllocationsToMove,
+ commandBuffer);
+ if (pBlockVectorCtx->res != VK_SUCCESS) {
+ res = pBlockVectorCtx->res;
+ }
+ }
+ }
+
+ // Process custom pools.
+ for (size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+ customCtxIndex < customCtxCount && res >= VK_SUCCESS;
+ ++customCtxIndex) {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+ VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+ pBlockVectorCtx->GetBlockVector()->Defragment(
+ pBlockVectorCtx,
+ pStats,
+ maxCpuBytesToMove, maxCpuAllocationsToMove,
+ maxGpuBytesToMove, maxGpuAllocationsToMove,
+ commandBuffer);
+ if (pBlockVectorCtx->res != VK_SUCCESS) {
+ res = pBlockVectorCtx->res;
+ }
+ }
+
+ return res;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaRecorder
+
+#if VMA_RECORDING_ENABLED
+
+VmaRecorder::VmaRecorder() :
+ m_UseMutex(true),
+ m_Flags(0),
+ m_File(VMA_NULL),
+ m_Freq(INT64_MAX),
+ m_StartCounter(INT64_MAX) {
+}
+
+VkResult VmaRecorder::Init(const VmaRecordSettings &settings, bool useMutex) {
+ m_UseMutex = useMutex;
+ m_Flags = settings.flags;
+
+ QueryPerformanceFrequency((LARGE_INTEGER *)&m_Freq);
+ QueryPerformanceCounter((LARGE_INTEGER *)&m_StartCounter);
+
+ // Open file for writing.
+ errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
+ if (err != 0) {
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+
+ // Write header.
+ fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
+ fprintf(m_File, "%s\n", "1,5");
+
+ return VK_SUCCESS;
+}
+
+VmaRecorder::~VmaRecorder() {
+ if (m_File != VMA_NULL) {
+ fclose(m_File);
+ }
+}
+
+void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
+ Flush();
+}
+
+void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
+ Flush();
+}
+
+void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo &createInfo, VmaPool pool) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
+ createInfo.memoryTypeIndex,
+ createInfo.flags,
+ createInfo.blockSize,
+ (uint64_t)createInfo.minBlockCount,
+ (uint64_t)createInfo.maxBlockCount,
+ createInfo.frameInUseCount,
+ pool);
+ Flush();
+}
+
+void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
+ pool);
+ Flush();
+}
+
+void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+ fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+ vkMemReq.size,
+ vkMemReq.alignment,
+ vkMemReq.memoryTypeBits,
+ createInfo.flags,
+ createInfo.usage,
+ createInfo.requiredFlags,
+ createInfo.preferredFlags,
+ createInfo.memoryTypeBits,
+ createInfo.pool,
+ allocation,
+ userDataStr.GetString());
+ Flush();
+}
+
+void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ const VmaAllocationCreateInfo &createInfo,
+ uint64_t allocationCount,
+ const VmaAllocation *pAllocations) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+ fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
+ vkMemReq.size,
+ vkMemReq.alignment,
+ vkMemReq.memoryTypeBits,
+ createInfo.flags,
+ createInfo.usage,
+ createInfo.requiredFlags,
+ createInfo.preferredFlags,
+ createInfo.memoryTypeBits,
+ createInfo.pool);
+ PrintPointerList(allocationCount, pAllocations);
+ fprintf(m_File, ",%s\n", userDataStr.GetString());
+ Flush();
+}
+
+void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ bool requiresDedicatedAllocation,
+ bool prefersDedicatedAllocation,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+ fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+ vkMemReq.size,
+ vkMemReq.alignment,
+ vkMemReq.memoryTypeBits,
+ requiresDedicatedAllocation ? 1 : 0,
+ prefersDedicatedAllocation ? 1 : 0,
+ createInfo.flags,
+ createInfo.usage,
+ createInfo.requiredFlags,
+ createInfo.preferredFlags,
+ createInfo.memoryTypeBits,
+ createInfo.pool,
+ allocation,
+ userDataStr.GetString());
+ Flush();
+}
+
+void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
+ const VkMemoryRequirements &vkMemReq,
+ bool requiresDedicatedAllocation,
+ bool prefersDedicatedAllocation,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
+ fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+ vkMemReq.size,
+ vkMemReq.alignment,
+ vkMemReq.memoryTypeBits,
+ requiresDedicatedAllocation ? 1 : 0,
+ prefersDedicatedAllocation ? 1 : 0,
+ createInfo.flags,
+ createInfo.usage,
+ createInfo.requiredFlags,
+ createInfo.preferredFlags,
+ createInfo.memoryTypeBits,
+ createInfo.pool,
+ allocation,
+ userDataStr.GetString());
+ Flush();
+}
+
+void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
+ uint64_t allocationCount,
+ const VmaAllocation *pAllocations) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
+ PrintPointerList(allocationCount, pAllocations);
+ fprintf(m_File, "\n");
+ Flush();
+}
+
+void VmaRecorder::RecordResizeAllocation(
+ uint32_t frameIndex,
+ VmaAllocation allocation,
+ VkDeviceSize newSize) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
+ allocation, newSize);
+ Flush();
+}
+
+void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
+ VmaAllocation allocation,
+ const void *pUserData) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ UserDataString userDataStr(
+ allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
+ pUserData);
+ fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+ allocation,
+ userDataStr.GetString());
+ Flush();
+}
+
+void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
+ VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+ allocation,
+ offset,
+ size);
+ Flush();
+}
+
+void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
+ VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
+ allocation,
+ offset,
+ size);
+ Flush();
+}
+
+void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
+ const VkBufferCreateInfo &bufCreateInfo,
+ const VmaAllocationCreateInfo &allocCreateInfo,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
+ fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+ bufCreateInfo.flags,
+ bufCreateInfo.size,
+ bufCreateInfo.usage,
+ bufCreateInfo.sharingMode,
+ allocCreateInfo.flags,
+ allocCreateInfo.usage,
+ allocCreateInfo.requiredFlags,
+ allocCreateInfo.preferredFlags,
+ allocCreateInfo.memoryTypeBits,
+ allocCreateInfo.pool,
+ allocation,
+ userDataStr.GetString());
+ Flush();
+}
+
+void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
+ const VkImageCreateInfo &imageCreateInfo,
+ const VmaAllocationCreateInfo &allocCreateInfo,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
+ fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
+ imageCreateInfo.flags,
+ imageCreateInfo.imageType,
+ imageCreateInfo.format,
+ imageCreateInfo.extent.width,
+ imageCreateInfo.extent.height,
+ imageCreateInfo.extent.depth,
+ imageCreateInfo.mipLevels,
+ imageCreateInfo.arrayLayers,
+ imageCreateInfo.samples,
+ imageCreateInfo.tiling,
+ imageCreateInfo.usage,
+ imageCreateInfo.sharingMode,
+ imageCreateInfo.initialLayout,
+ allocCreateInfo.flags,
+ allocCreateInfo.usage,
+ allocCreateInfo.requiredFlags,
+ allocCreateInfo.preferredFlags,
+ allocCreateInfo.memoryTypeBits,
+ allocCreateInfo.pool,
+ allocation,
+ userDataStr.GetString());
+ Flush();
+}
+
+void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
+ VmaAllocation allocation) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
+ allocation);
+ Flush();
+}
+
+void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
+ VmaPool pool) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
+ pool);
+ Flush();
+}
+
+void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
+ const VmaDefragmentationInfo2 &info,
+ VmaDefragmentationContext ctx) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
+ info.flags);
+ PrintPointerList(info.allocationCount, info.pAllocations);
+ fprintf(m_File, ",");
+ PrintPointerList(info.poolCount, info.pPools);
+ fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
+ info.maxCpuBytesToMove,
+ info.maxCpuAllocationsToMove,
+ info.maxGpuBytesToMove,
+ info.maxGpuAllocationsToMove,
+ info.commandBuffer,
+ ctx);
+ Flush();
+}
+
+void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
+ VmaDefragmentationContext ctx) {
+ CallParams callParams;
+ GetBasicParams(callParams);
+
+ VmaMutexLock lock(m_FileMutex, m_UseMutex);
+ fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
+ ctx);
+ Flush();
+}
+
+VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void *pUserData) {
+ if (pUserData != VMA_NULL) {
+ if ((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0) {
+ m_Str = (const char *)pUserData;
+ } else {
+ sprintf_s(m_PtrStr, "%p", pUserData);
+ m_Str = m_PtrStr;
+ }
+ } else {
+ m_Str = "";
+ }
+}
+
+void VmaRecorder::WriteConfiguration(
+ const VkPhysicalDeviceProperties &devProps,
+ const VkPhysicalDeviceMemoryProperties &memProps,
+ bool dedicatedAllocationExtensionEnabled) {
+ fprintf(m_File, "Config,Begin\n");
+
+ fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
+ fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
+ fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
+ fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
+ fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
+ fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
+
+ fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
+ fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
+ fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
+
+ fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
+ for (uint32_t i = 0; i < memProps.memoryHeapCount; ++i) {
+ fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
+ fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
+ }
+ fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
+ for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {
+ fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
+ fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
+ }
+
+ fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
+
+ fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
+ fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
+ fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
+ fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
+ fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
+ fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
+ fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
+ fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
+ fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+
+ fprintf(m_File, "Config,End\n");
+}
+
+void VmaRecorder::GetBasicParams(CallParams &outParams) {
+ outParams.threadId = GetCurrentThreadId();
+
+ LARGE_INTEGER counter;
+ QueryPerformanceCounter(&counter);
+ outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
+}
+
+void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation *pItems) {
+ if (count) {
+ fprintf(m_File, "%p", pItems[0]);
+ for (uint64_t i = 1; i < count; ++i) {
+ fprintf(m_File, " %p", pItems[i]);
+ }
+ }
+}
+
+void VmaRecorder::Flush() {
+ if ((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0) {
+ fflush(m_File);
+ }
+}
+
+#endif // #if VMA_RECORDING_ENABLED
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaAllocationObjectAllocator
+
+VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks *pAllocationCallbacks) :
+ m_Allocator(pAllocationCallbacks, 1024) {
+}
+
+VmaAllocation VmaAllocationObjectAllocator::Allocate() {
+ VmaMutexLock mutexLock(m_Mutex);
+ return m_Allocator.Alloc();
+}
+
+void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) {
+ VmaMutexLock mutexLock(m_Mutex);
+ m_Allocator.Free(hAlloc);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// VmaAllocator_T
+
+VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo *pCreateInfo) :
+ m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
+ m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
+ m_hDevice(pCreateInfo->device),
+ m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
+ m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
+ *pCreateInfo->pAllocationCallbacks :
+ VmaEmptyAllocationCallbacks),
+ m_AllocationObjectAllocator(&m_AllocationCallbacks),
+ m_PreferredLargeHeapBlockSize(0),
+ m_PhysicalDevice(pCreateInfo->physicalDevice),
+ m_CurrentFrameIndex(0),
+ m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
+ m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
+ m_NextPoolId(0)
+#if VMA_RECORDING_ENABLED
+ ,
+ m_pRecorder(VMA_NULL)
+#endif
+{
+ if (VMA_DEBUG_DETECT_CORRUPTION) {
+ // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
+ VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
+ }
+
+ VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
+
+#if !(VMA_DEDICATED_ALLOCATION)
+ if ((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) {
+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
+ }
+#endif
+
+ memset(&m_DeviceMemoryCallbacks, 0, sizeof(m_DeviceMemoryCallbacks));
+ memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
+ memset(&m_MemProps, 0, sizeof(m_MemProps));
+
+ memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
+ memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
+
+ for (uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) {
+ m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
+ }
+
+ if (pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) {
+ m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
+ m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
+ }
+
+ ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
+
+ (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
+ (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
+
+ VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
+ VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
+ VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
+ VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
+
+ m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
+ pCreateInfo->preferredLargeHeapBlockSize :
+ static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+
+ if (pCreateInfo->pHeapSizeLimit != VMA_NULL) {
+ for (uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) {
+ const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
+ if (limit != VK_WHOLE_SIZE) {
+ m_HeapSizeLimit[heapIndex] = limit;
+ if (limit < m_MemProps.memoryHeaps[heapIndex].size) {
+ m_MemProps.memoryHeaps[heapIndex].size = limit;
+ }
+ }
+ }
+ }
+
+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) {
+ const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
+
+ m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
+ this,
+ VK_NULL_HANDLE, // hParentPool
+ memTypeIndex,
+ preferredBlockSize,
+ 0,
+ SIZE_MAX,
+ GetBufferImageGranularity(),
+ pCreateInfo->frameInUseCount,
+ false, // isCustomPool
+ false, // explicitBlockSize
+ false); // linearAlgorithm
+ // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
+ // becase minBlockCount is 0.
+ m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
+ }
+}
+
+VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo *pCreateInfo) {
+ VkResult res = VK_SUCCESS;
+
+ if (pCreateInfo->pRecordSettings != VMA_NULL &&
+ !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath)) {
+#if VMA_RECORDING_ENABLED
+ m_pRecorder = vma_new(this, VmaRecorder)();
+ res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+ m_pRecorder->WriteConfiguration(
+ m_PhysicalDeviceProperties,
+ m_MemProps,
+ m_UseKhrDedicatedAllocation);
+ m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
+#else
+ VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+#endif
+ }
+
+ return res;
+}
+
+VmaAllocator_T::~VmaAllocator_T() {
+#if VMA_RECORDING_ENABLED
+ if (m_pRecorder != VMA_NULL) {
+ m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
+ vma_delete(this, m_pRecorder);
+ }
+#endif
+
+ VMA_ASSERT(m_Pools.empty());
+
+ for (size_t i = GetMemoryTypeCount(); i--;) {
+ if (m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty()) {
+ VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
+ }
+
+ vma_delete(this, m_pDedicatedAllocations[i]);
+ vma_delete(this, m_pBlockVectors[i]);
+ }
+}
+
+void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunctions) {
+#if VMA_STATIC_VULKAN_FUNCTIONS == 1
+ m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
+ m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
+ m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
+ m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
+ m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
+ m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
+ m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
+ m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
+ m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
+ m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
+ m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
+ m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
+ m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
+ m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
+ m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
+ m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
+ m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
+#if VMA_DEDICATED_ALLOCATION
+ if (m_UseKhrDedicatedAllocation) {
+ m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
+ (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
+ m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
+ (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
+ }
+#endif // #if VMA_DEDICATED_ALLOCATION
+#endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
+#define VMA_COPY_IF_NOT_NULL(funcName) \
+ if (pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
+
+ if (pVulkanFunctions != VMA_NULL) {
+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
+ VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
+ VMA_COPY_IF_NOT_NULL(vkFreeMemory);
+ VMA_COPY_IF_NOT_NULL(vkMapMemory);
+ VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
+ VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
+ VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
+ VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
+ VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
+ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
+ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
+ VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
+ VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
+ VMA_COPY_IF_NOT_NULL(vkCreateImage);
+ VMA_COPY_IF_NOT_NULL(vkDestroyImage);
+ VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
+#if VMA_DEDICATED_ALLOCATION
+ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
+ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
+#endif
+ }
+
+#undef VMA_COPY_IF_NOT_NULL
+
+ // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
+ // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
+ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
+#if VMA_DEDICATED_ALLOCATION
+ if (m_UseKhrDedicatedAllocation) {
+ VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
+ VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
+ }
+#endif
+}
+
+VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) {
+ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+ const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+ const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
+ return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
+}
+
+VkResult VmaAllocator_T::AllocateMemoryOfType(
+ VkDeviceSize size,
+ VkDeviceSize alignment,
+ bool dedicatedAllocation,
+ VkBuffer dedicatedBuffer,
+ VkImage dedicatedImage,
+ const VmaAllocationCreateInfo &createInfo,
+ uint32_t memTypeIndex,
+ VmaSuballocationType suballocType,
+ size_t allocationCount,
+ VmaAllocation *pAllocations) {
+ VMA_ASSERT(pAllocations != VMA_NULL);
+ VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
+
+ VmaAllocationCreateInfo finalCreateInfo = createInfo;
+
+ // If memory type is not HOST_VISIBLE, disable MAPPED.
+ if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+ (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
+ }
+
+ VmaBlockVector *const blockVector = m_pBlockVectors[memTypeIndex];
+ VMA_ASSERT(blockVector);
+
+ const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
+ bool preferDedicatedMemory =
+ VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
+ dedicatedAllocation ||
+ // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
+ size > preferredBlockSize / 2;
+
+ if (preferDedicatedMemory &&
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
+ finalCreateInfo.pool == VK_NULL_HANDLE) {
+ finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+ }
+
+ if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) {
+ if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) {
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ } else {
+ return AllocateDedicatedMemory(
+ size,
+ suballocType,
+ memTypeIndex,
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+ finalCreateInfo.pUserData,
+ dedicatedBuffer,
+ dedicatedImage,
+ allocationCount,
+ pAllocations);
+ }
+ } else {
+ VkResult res = blockVector->Allocate(
+ m_CurrentFrameIndex.load(),
+ size,
+ alignment,
+ finalCreateInfo,
+ suballocType,
+ allocationCount,
+ pAllocations);
+ if (res == VK_SUCCESS) {
+ return res;
+ }
+
+ // 5. Try dedicated memory.
+ if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) {
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ } else {
+ res = AllocateDedicatedMemory(
+ size,
+ suballocType,
+ memTypeIndex,
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+ finalCreateInfo.pUserData,
+ dedicatedBuffer,
+ dedicatedImage,
+ allocationCount,
+ pAllocations);
+ if (res == VK_SUCCESS) {
+ // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
+ VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
+ return VK_SUCCESS;
+ } else {
+ // Everything failed: Return error code.
+ VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+ return res;
+ }
+ }
+ }
+}
+
+VkResult VmaAllocator_T::AllocateDedicatedMemory(
+ VkDeviceSize size,
+ VmaSuballocationType suballocType,
+ uint32_t memTypeIndex,
+ bool map,
+ bool isUserDataString,
+ void *pUserData,
+ VkBuffer dedicatedBuffer,
+ VkImage dedicatedImage,
+ size_t allocationCount,
+ VmaAllocation *pAllocations) {
+ VMA_ASSERT(allocationCount > 0 && pAllocations);
+
+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ allocInfo.memoryTypeIndex = memTypeIndex;
+ allocInfo.allocationSize = size;
+
+#if VMA_DEDICATED_ALLOCATION
+ VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
+ if (m_UseKhrDedicatedAllocation) {
+ if (dedicatedBuffer != VK_NULL_HANDLE) {
+ VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
+ dedicatedAllocInfo.buffer = dedicatedBuffer;
+ allocInfo.pNext = &dedicatedAllocInfo;
+ } else if (dedicatedImage != VK_NULL_HANDLE) {
+ dedicatedAllocInfo.image = dedicatedImage;
+ allocInfo.pNext = &dedicatedAllocInfo;
+ }
+ }
+#endif // #if VMA_DEDICATED_ALLOCATION
+
+ size_t allocIndex;
+ VkResult res = VK_SUCCESS;
+ for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) {
+ res = AllocateDedicatedMemoryPage(
+ size,
+ suballocType,
+ memTypeIndex,
+ allocInfo,
+ map,
+ isUserDataString,
+ pUserData,
+ pAllocations + allocIndex);
+ if (res != VK_SUCCESS) {
+ break;
+ }
+ }
+
+ if (res == VK_SUCCESS) {
+ // Register them in m_pDedicatedAllocations.
+ {
+ VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+ AllocationVectorType *pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
+ VMA_ASSERT(pDedicatedAllocations);
+ for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) {
+ VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
+ }
+ }
+
+ VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
+ } else {
+ // Free all already created allocations.
+ while (allocIndex--) {
+ VmaAllocation currAlloc = pAllocations[allocIndex];
+ VkDeviceMemory hMemory = currAlloc->GetMemory();
+
+ /*
+ There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+ before vkFreeMemory.
+
+ if(currAlloc->GetMappedData() != VMA_NULL)
+ {
+ (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+ }
+ */
+
+ FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
+
+ currAlloc->SetUserData(this, VMA_NULL);
+ currAlloc->Dtor();
+ m_AllocationObjectAllocator.Free(currAlloc);
+ }
+
+ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+ }
+
+ return res;
+}
+
+VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
+ VkDeviceSize size,
+ VmaSuballocationType suballocType,
+ uint32_t memTypeIndex,
+ const VkMemoryAllocateInfo &allocInfo,
+ bool map,
+ bool isUserDataString,
+ void *pUserData,
+ VmaAllocation *pAllocation) {
+ VkDeviceMemory hMemory = VK_NULL_HANDLE;
+ VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
+ if (res < 0) {
+ VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+ return res;
+ }
+
+ void *pMappedData = VMA_NULL;
+ if (map) {
+ res = (*m_VulkanFunctions.vkMapMemory)(
+ m_hDevice,
+ hMemory,
+ 0,
+ VK_WHOLE_SIZE,
+ 0,
+ &pMappedData);
+ if (res < 0) {
+ VMA_DEBUG_LOG(" vkMapMemory FAILED");
+ FreeVulkanMemory(memTypeIndex, size, hMemory);
+ return res;
+ }
+ }
+
+ *pAllocation = m_AllocationObjectAllocator.Allocate();
+ (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
+ (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
+ (*pAllocation)->SetUserData(this, pUserData);
+ if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) {
+ FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+ }
+
+ return VK_SUCCESS;
+}
+
+void VmaAllocator_T::GetBufferMemoryRequirements(
+ VkBuffer hBuffer,
+ VkMemoryRequirements &memReq,
+ bool &requiresDedicatedAllocation,
+ bool &prefersDedicatedAllocation) const {
+#if VMA_DEDICATED_ALLOCATION
+ if (m_UseKhrDedicatedAllocation) {
+ VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
+ memReqInfo.buffer = hBuffer;
+
+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
+ VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+ memReq2.pNext = &memDedicatedReq;
+
+ (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+
+ memReq = memReq2.memoryRequirements;
+ requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+ prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+ } else
+#endif // #if VMA_DEDICATED_ALLOCATION
+ {
+ (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
+ requiresDedicatedAllocation = false;
+ prefersDedicatedAllocation = false;
+ }
+}
+
+void VmaAllocator_T::GetImageMemoryRequirements(
+ VkImage hImage,
+ VkMemoryRequirements &memReq,
+ bool &requiresDedicatedAllocation,
+ bool &prefersDedicatedAllocation) const {
+#if VMA_DEDICATED_ALLOCATION
+ if (m_UseKhrDedicatedAllocation) {
+ VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
+ memReqInfo.image = hImage;
+
+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+
+ VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+ memReq2.pNext = &memDedicatedReq;
+
+ (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+
+ memReq = memReq2.memoryRequirements;
+ requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+ prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+ } else
+#endif // #if VMA_DEDICATED_ALLOCATION
+ {
+ (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
+ requiresDedicatedAllocation = false;
+ prefersDedicatedAllocation = false;
+ }
+}
+
+VkResult VmaAllocator_T::AllocateMemory(
+ const VkMemoryRequirements &vkMemReq,
+ bool requiresDedicatedAllocation,
+ bool prefersDedicatedAllocation,
+ VkBuffer dedicatedBuffer,
+ VkImage dedicatedImage,
+ const VmaAllocationCreateInfo &createInfo,
+ VmaSuballocationType suballocType,
+ size_t allocationCount,
+ VmaAllocation *pAllocations) {
+ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+
+ VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
+
+ if (vkMemReq.size == 0) {
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ if ((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
+ (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) {
+ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ if ((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+ (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0) {
+ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ if (requiresDedicatedAllocation) {
+ if ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) {
+ VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ if (createInfo.pool != VK_NULL_HANDLE) {
+ VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ }
+ if ((createInfo.pool != VK_NULL_HANDLE) &&
+ ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0)) {
+ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+
+ if (createInfo.pool != VK_NULL_HANDLE) {
+ const VkDeviceSize alignmentForPool = VMA_MAX(
+ vkMemReq.alignment,
+ GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
+ return createInfo.pool->m_BlockVector.Allocate(
+ m_CurrentFrameIndex.load(),
+ vkMemReq.size,
+ alignmentForPool,
+ createInfo,
+ suballocType,
+ allocationCount,
+ pAllocations);
+ } else {
+ // Bit mask of memory Vulkan types acceptable for this allocation.
+ uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
+ uint32_t memTypeIndex = UINT32_MAX;
+ VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
+ if (res == VK_SUCCESS) {
+ VkDeviceSize alignmentForMemType = VMA_MAX(
+ vkMemReq.alignment,
+ GetMemoryTypeMinAlignment(memTypeIndex));
+
+ res = AllocateMemoryOfType(
+ vkMemReq.size,
+ alignmentForMemType,
+ requiresDedicatedAllocation || prefersDedicatedAllocation,
+ dedicatedBuffer,
+ dedicatedImage,
+ createInfo,
+ memTypeIndex,
+ suballocType,
+ allocationCount,
+ pAllocations);
+ // Succeeded on first try.
+ if (res == VK_SUCCESS) {
+ return res;
+ }
+ // Allocation from this memory type failed. Try other compatible memory types.
+ else {
+ for (;;) {
+ // Remove old memTypeIndex from list of possibilities.
+ memoryTypeBits &= ~(1u << memTypeIndex);
+ // Find alternative memTypeIndex.
+ res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
+ if (res == VK_SUCCESS) {
+ alignmentForMemType = VMA_MAX(
+ vkMemReq.alignment,
+ GetMemoryTypeMinAlignment(memTypeIndex));
+
+ res = AllocateMemoryOfType(
+ vkMemReq.size,
+ alignmentForMemType,
+ requiresDedicatedAllocation || prefersDedicatedAllocation,
+ dedicatedBuffer,
+ dedicatedImage,
+ createInfo,
+ memTypeIndex,
+ suballocType,
+ allocationCount,
+ pAllocations);
+ // Allocation from this alternative memory type succeeded.
+ if (res == VK_SUCCESS) {
+ return res;
+ }
+ // else: Allocation from this memory type failed. Try next one - next loop iteration.
+ }
+ // No other matching memory type index could be found.
+ else {
+ // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ }
+ }
+ }
+ // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
+ else
+ return res;
+ }
+}
+
+void VmaAllocator_T::FreeMemory(
+ size_t allocationCount,
+ const VmaAllocation *pAllocations) {
+ VMA_ASSERT(pAllocations);
+
+ for (size_t allocIndex = allocationCount; allocIndex--;) {
+ VmaAllocation allocation = pAllocations[allocIndex];
+
+ if (allocation != VK_NULL_HANDLE) {
+ if (TouchAllocation(allocation)) {
+ if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) {
+ FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
+ }
+
+ switch (allocation->GetType()) {
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: {
+ VmaBlockVector *pBlockVector = VMA_NULL;
+ VmaPool hPool = allocation->GetBlock()->GetParentPool();
+ if (hPool != VK_NULL_HANDLE) {
+ pBlockVector = &hPool->m_BlockVector;
+ } else {
+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+ pBlockVector = m_pBlockVectors[memTypeIndex];
+ }
+ pBlockVector->Free(allocation);
+ } break;
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ FreeDedicatedMemory(allocation);
+ break;
+ default:
+ VMA_ASSERT(0);
+ }
+ }
+
+ allocation->SetUserData(this, VMA_NULL);
+ allocation->Dtor();
+ m_AllocationObjectAllocator.Free(allocation);
+ }
+ }
+}
+
+VkResult VmaAllocator_T::ResizeAllocation(
+ const VmaAllocation alloc,
+ VkDeviceSize newSize) {
+ if (newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) {
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+ if (newSize == alloc->GetSize()) {
+ return VK_SUCCESS;
+ }
+
+ switch (alloc->GetType()) {
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+ if (alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize)) {
+ alloc->ChangeSize(newSize);
+ VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
+ return VK_SUCCESS;
+ } else {
+ return VK_ERROR_OUT_OF_POOL_MEMORY;
+ }
+ default:
+ VMA_ASSERT(0);
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+}
+
+void VmaAllocator_T::CalculateStats(VmaStats *pStats) {
+ // Initialize.
+ InitStatInfo(pStats->total);
+ for (size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
+ InitStatInfo(pStats->memoryType[i]);
+ for (size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
+ InitStatInfo(pStats->memoryHeap[i]);
+
+ // Process default pools.
+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) {
+ VmaBlockVector *const pBlockVector = m_pBlockVectors[memTypeIndex];
+ VMA_ASSERT(pBlockVector);
+ pBlockVector->AddStats(pStats);
+ }
+
+ // Process custom pools.
+ {
+ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+ for (size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) {
+ m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
+ }
+ }
+
+ // Process dedicated allocations.
+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) {
+ const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+ VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+ AllocationVectorType *const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
+ VMA_ASSERT(pDedicatedAllocVector);
+ for (size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex) {
+ VmaStatInfo allocationStatInfo;
+ (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
+ VmaAddStatInfo(pStats->total, allocationStatInfo);
+ VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
+ VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
+ }
+ }
+
+ // Postprocess.
+ VmaPostprocessCalcStatInfo(pStats->total);
+ for (size_t i = 0; i < GetMemoryTypeCount(); ++i)
+ VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
+ for (size_t i = 0; i < GetMemoryHeapCount(); ++i)
+ VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
+}
+
+static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+
+VkResult VmaAllocator_T::DefragmentationBegin(
+ const VmaDefragmentationInfo2 &info,
+ VmaDefragmentationStats *pStats,
+ VmaDefragmentationContext *pContext) {
+ if (info.pAllocationsChanged != VMA_NULL) {
+ memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
+ }
+
+ *pContext = vma_new(this, VmaDefragmentationContext_T)(
+ this, m_CurrentFrameIndex.load(), info.flags, pStats);
+
+ (*pContext)->AddPools(info.poolCount, info.pPools);
+ (*pContext)->AddAllocations(
+ info.allocationCount, info.pAllocations, info.pAllocationsChanged);
+
+ VkResult res = (*pContext)->Defragment(
+ info.maxCpuBytesToMove, info.maxCpuAllocationsToMove,
+ info.maxGpuBytesToMove, info.maxGpuAllocationsToMove,
+ info.commandBuffer, pStats);
+
+ if (res != VK_NOT_READY) {
+ vma_delete(this, *pContext);
+ *pContext = VMA_NULL;
+ }
+
+ return res;
+}
+
+VkResult VmaAllocator_T::DefragmentationEnd(
+ VmaDefragmentationContext context) {
+ vma_delete(this, context);
+ return VK_SUCCESS;
+}
+
+void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo *pAllocationInfo) {
+ if (hAllocation->CanBecomeLost()) {
+ /*
+ Warning: This is a carefully designed algorithm.
+ Do not modify unless you really know what you're doing :)
+ */
+ const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+ uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+ for (;;) {
+ if (localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) {
+ pAllocationInfo->memoryType = UINT32_MAX;
+ pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
+ pAllocationInfo->offset = 0;
+ pAllocationInfo->size = hAllocation->GetSize();
+ pAllocationInfo->pMappedData = VMA_NULL;
+ pAllocationInfo->pUserData = hAllocation->GetUserData();
+ return;
+ } else if (localLastUseFrameIndex == localCurrFrameIndex) {
+ pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
+ pAllocationInfo->deviceMemory = hAllocation->GetMemory();
+ pAllocationInfo->offset = hAllocation->GetOffset();
+ pAllocationInfo->size = hAllocation->GetSize();
+ pAllocationInfo->pMappedData = VMA_NULL;
+ pAllocationInfo->pUserData = hAllocation->GetUserData();
+ return;
+ } else // Last use time earlier than current time.
+ {
+ if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) {
+ localLastUseFrameIndex = localCurrFrameIndex;
+ }
+ }
+ }
+ } else {
+#if VMA_STATS_STRING_ENABLED
+ uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+ uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+ for (;;) {
+ VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+ if (localLastUseFrameIndex == localCurrFrameIndex) {
+ break;
+ } else // Last use time earlier than current time.
+ {
+ if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) {
+ localLastUseFrameIndex = localCurrFrameIndex;
+ }
+ }
+ }
+#endif
+
+ pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
+ pAllocationInfo->deviceMemory = hAllocation->GetMemory();
+ pAllocationInfo->offset = hAllocation->GetOffset();
+ pAllocationInfo->size = hAllocation->GetSize();
+ pAllocationInfo->pMappedData = hAllocation->GetMappedData();
+ pAllocationInfo->pUserData = hAllocation->GetUserData();
+ }
+}
+
+bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation) {
+ // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
+ if (hAllocation->CanBecomeLost()) {
+ uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+ uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+ for (;;) {
+ if (localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) {
+ return false;
+ } else if (localLastUseFrameIndex == localCurrFrameIndex) {
+ return true;
+ } else // Last use time earlier than current time.
+ {
+ if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) {
+ localLastUseFrameIndex = localCurrFrameIndex;
+ }
+ }
+ }
+ } else {
+#if VMA_STATS_STRING_ENABLED
+ uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
+ uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
+ for (;;) {
+ VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
+ if (localLastUseFrameIndex == localCurrFrameIndex) {
+ break;
+ } else // Last use time earlier than current time.
+ {
+ if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) {
+ localLastUseFrameIndex = localCurrFrameIndex;
+ }
+ }
+ }
+#endif
+
+ return true;
+ }
+}
+
+VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool) {
+ VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
+
+ VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
+
+ if (newCreateInfo.maxBlockCount == 0) {
+ newCreateInfo.maxBlockCount = SIZE_MAX;
+ }
+ if (newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) {
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+
+ const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
+
+ *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
+
+ VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
+ if (res != VK_SUCCESS) {
+ vma_delete(this, *pPool);
+ *pPool = VMA_NULL;
+ return res;
+ }
+
+ // Add to m_Pools.
+ {
+ VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+ (*pPool)->SetId(m_NextPoolId++);
+ VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
+ }
+
+ return VK_SUCCESS;
+}
+
+void VmaAllocator_T::DestroyPool(VmaPool pool) {
+ // Remove from m_Pools.
+ {
+ VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+ bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
+ VMA_ASSERT(success && "Pool not found in Allocator.");
+ }
+
+ vma_delete(this, pool);
+}
+
+void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats *pPoolStats) {
+ pool->m_BlockVector.GetPoolStats(pPoolStats);
+}
+
+void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) {
+ m_CurrentFrameIndex.store(frameIndex);
+}
+
+void VmaAllocator_T::MakePoolAllocationsLost(
+ VmaPool hPool,
+ size_t *pLostAllocationCount) {
+ hPool->m_BlockVector.MakePoolAllocationsLost(
+ m_CurrentFrameIndex.load(),
+ pLostAllocationCount);
+}
+
+VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) {
+ return hPool->m_BlockVector.CheckCorruption();
+}
+
+VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) {
+ VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
+
+ // Process default pools.
+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) {
+ if (((1u << memTypeIndex) & memoryTypeBits) != 0) {
+ VmaBlockVector *const pBlockVector = m_pBlockVectors[memTypeIndex];
+ VMA_ASSERT(pBlockVector);
+ VkResult localRes = pBlockVector->CheckCorruption();
+ switch (localRes) {
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ break;
+ case VK_SUCCESS:
+ finalRes = VK_SUCCESS;
+ break;
+ default:
+ return localRes;
+ }
+ }
+ }
+
+ // Process custom pools.
+ {
+ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+ for (size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) {
+ if (((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) {
+ VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
+ switch (localRes) {
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ break;
+ case VK_SUCCESS:
+ finalRes = VK_SUCCESS;
+ break;
+ default:
+ return localRes;
+ }
+ }
+ }
+ }
+
+ return finalRes;
+}
+
+void VmaAllocator_T::CreateLostAllocation(VmaAllocation *pAllocation) {
+ *pAllocation = m_AllocationObjectAllocator.Allocate();
+ (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
+ (*pAllocation)->InitLost();
+}
+
+VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
+ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
+
+ VkResult res;
+ if (m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE) {
+ VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
+ if (m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize) {
+ res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
+ if (res == VK_SUCCESS) {
+ m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
+ }
+ } else {
+ res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ } else {
+ res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
+ }
+
+ if (res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) {
+ (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
+ }
+
+ return res;
+}
+
+void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) {
+ if (m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) {
+ (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
+ }
+
+ (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
+
+ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
+ if (m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE) {
+ VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
+ m_HeapSizeLimit[heapIndex] += size;
+ }
+}
+
+VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void **ppData) {
+ if (hAllocation->CanBecomeLost()) {
+ return VK_ERROR_MEMORY_MAP_FAILED;
+ }
+
+ switch (hAllocation->GetType()) {
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: {
+ VmaDeviceMemoryBlock *const pBlock = hAllocation->GetBlock();
+ char *pBytes = VMA_NULL;
+ VkResult res = pBlock->Map(this, 1, (void **)&pBytes);
+ if (res == VK_SUCCESS) {
+ *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
+ hAllocation->BlockAllocMap();
+ }
+ return res;
+ }
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ return hAllocation->DedicatedAllocMap(this, ppData);
+ default:
+ VMA_ASSERT(0);
+ return VK_ERROR_MEMORY_MAP_FAILED;
+ }
+}
+
+void VmaAllocator_T::Unmap(VmaAllocation hAllocation) {
+ switch (hAllocation->GetType()) {
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: {
+ VmaDeviceMemoryBlock *const pBlock = hAllocation->GetBlock();
+ hAllocation->BlockAllocUnmap();
+ pBlock->Unmap(this, 1);
+ } break;
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ hAllocation->DedicatedAllocUnmap(this);
+ break;
+ default:
+ VMA_ASSERT(0);
+ }
+}
+
+VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer) {
+ VkResult res = VK_SUCCESS;
+ switch (hAllocation->GetType()) {
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ res = GetVulkanFunctions().vkBindBufferMemory(
+ m_hDevice,
+ hBuffer,
+ hAllocation->GetMemory(),
+ 0); //memoryOffset
+ break;
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: {
+ VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock();
+ VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
+ res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
+ break;
+ }
+ default:
+ VMA_ASSERT(0);
+ }
+ return res;
+}
+
+VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage) {
+ VkResult res = VK_SUCCESS;
+ switch (hAllocation->GetType()) {
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ res = GetVulkanFunctions().vkBindImageMemory(
+ m_hDevice,
+ hImage,
+ hAllocation->GetMemory(),
+ 0); //memoryOffset
+ break;
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: {
+ VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock();
+ VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
+ res = pBlock->BindImageMemory(this, hAllocation, hImage);
+ break;
+ }
+ default:
+ VMA_ASSERT(0);
+ }
+ return res;
+}
+
+void VmaAllocator_T::FlushOrInvalidateAllocation(
+ VmaAllocation hAllocation,
+ VkDeviceSize offset, VkDeviceSize size,
+ VMA_CACHE_OPERATION op) {
+ const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
+ if (size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) {
+ const VkDeviceSize allocationSize = hAllocation->GetSize();
+ VMA_ASSERT(offset <= allocationSize);
+
+ const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+
+ VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
+ memRange.memory = hAllocation->GetMemory();
+
+ switch (hAllocation->GetType()) {
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+ if (size == VK_WHOLE_SIZE) {
+ memRange.size = allocationSize - memRange.offset;
+ } else {
+ VMA_ASSERT(offset + size <= allocationSize);
+ memRange.size = VMA_MIN(
+ VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
+ allocationSize - memRange.offset);
+ }
+ break;
+
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: {
+ // 1. Still within this allocation.
+ memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+ if (size == VK_WHOLE_SIZE) {
+ size = allocationSize - offset;
+ } else {
+ VMA_ASSERT(offset + size <= allocationSize);
+ }
+ memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
+
+ // 2. Adjust to whole block.
+ const VkDeviceSize allocationOffset = hAllocation->GetOffset();
+ VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
+ const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
+ memRange.offset += allocationOffset;
+ memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
+
+ break;
+ }
+
+ default:
+ VMA_ASSERT(0);
+ }
+
+ switch (op) {
+ case VMA_CACHE_FLUSH:
+ (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
+ break;
+ case VMA_CACHE_INVALIDATE:
+ (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
+ break;
+ default:
+ VMA_ASSERT(0);
+ }
+ }
+ // else: Just ignore this call.
+}
+
+void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation) {
+ VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+
+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+ {
+ VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+ AllocationVectorType *const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
+ VMA_ASSERT(pDedicatedAllocations);
+ bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
+ VMA_ASSERT(success);
+ }
+
+ VkDeviceMemory hMemory = allocation->GetMemory();
+
+ /*
+ There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+ before vkFreeMemory.
+
+ if(allocation->GetMappedData() != VMA_NULL)
+ {
+ (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+ }
+ */
+
+ FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
+
+ VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
+}
+
+uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const {
+ VkBufferCreateInfo dummyBufCreateInfo;
+ VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
+
+ uint32_t memoryTypeBits = 0;
+
+ // Create buffer.
+ VkBuffer buf = VMA_NULL;
+ VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
+ m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
+ if (res == VK_SUCCESS) {
+ // Query for supported memory types.
+ VkMemoryRequirements memReq;
+ (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
+ memoryTypeBits = memReq.memoryTypeBits;
+
+ // Destroy buffer.
+ (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
+ }
+
+ return memoryTypeBits;
+}
+
+void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) {
+ if (VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
+ !hAllocation->CanBecomeLost() &&
+ (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) {
+ void *pData = VMA_NULL;
+ VkResult res = Map(hAllocation, &pData);
+ if (res == VK_SUCCESS) {
+ memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
+ FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
+ Unmap(hAllocation);
+ } else {
+ VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
+ }
+ }
+}
+
+uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() {
+ uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
+ if (memoryTypeBits == UINT32_MAX) {
+ memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
+ m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
+ }
+ return memoryTypeBits;
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) {
+ bool dedicatedAllocationsStarted = false;
+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) {
+ VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
+ AllocationVectorType *const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
+ VMA_ASSERT(pDedicatedAllocVector);
+ if (pDedicatedAllocVector->empty() == false) {
+ if (dedicatedAllocationsStarted == false) {
+ dedicatedAllocationsStarted = true;
+ json.WriteString("DedicatedAllocations");
+ json.BeginObject();
+ }
+
+ json.BeginString("Type ");
+ json.ContinueString(memTypeIndex);
+ json.EndString();
+
+ json.BeginArray();
+
+ for (size_t i = 0; i < pDedicatedAllocVector->size(); ++i) {
+ json.BeginObject(true);
+ const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
+ hAlloc->PrintParameters(json);
+ json.EndObject();
+ }
+
+ json.EndArray();
+ }
+ }
+ if (dedicatedAllocationsStarted) {
+ json.EndObject();
+ }
+
+ {
+ bool allocationsStarted = false;
+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) {
+ if (m_pBlockVectors[memTypeIndex]->IsEmpty() == false) {
+ if (allocationsStarted == false) {
+ allocationsStarted = true;
+ json.WriteString("DefaultPools");
+ json.BeginObject();
+ }
+
+ json.BeginString("Type ");
+ json.ContinueString(memTypeIndex);
+ json.EndString();
+
+ m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
+ }
+ }
+ if (allocationsStarted) {
+ json.EndObject();
+ }
+ }
+
+ // Custom pools
+ {
+ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+ const size_t poolCount = m_Pools.size();
+ if (poolCount > 0) {
+ json.WriteString("Pools");
+ json.BeginObject();
+ for (size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) {
+ json.BeginString();
+ json.ContinueString(m_Pools[poolIndex]->GetId());
+ json.EndString();
+
+ m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
+ }
+ json.EndObject();
+ }
+ }
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+////////////////////////////////////////////////////////////////////////////////
+// Public interface
+
+VkResult vmaCreateAllocator(
+ const VmaAllocatorCreateInfo *pCreateInfo,
+ VmaAllocator *pAllocator) {
+ VMA_ASSERT(pCreateInfo && pAllocator);
+ VMA_DEBUG_LOG("vmaCreateAllocator");
+ *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
+ return (*pAllocator)->Init(pCreateInfo);
+}
+
+void vmaDestroyAllocator(
+ VmaAllocator allocator) {
+ if (allocator != VK_NULL_HANDLE) {
+ VMA_DEBUG_LOG("vmaDestroyAllocator");
+ VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
+ vma_delete(&allocationCallbacks, allocator);
+ }
+}
+
+void vmaGetPhysicalDeviceProperties(
+ VmaAllocator allocator,
+ const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) {
+ VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
+ *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
+}
+
+void vmaGetMemoryProperties(
+ VmaAllocator allocator,
+ const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties) {
+ VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
+ *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
+}
+
+void vmaGetMemoryTypeProperties(
+ VmaAllocator allocator,
+ uint32_t memoryTypeIndex,
+ VkMemoryPropertyFlags *pFlags) {
+ VMA_ASSERT(allocator && pFlags);
+ VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
+ *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
+}
+
+void vmaSetCurrentFrameIndex(
+ VmaAllocator allocator,
+ uint32_t frameIndex) {
+ VMA_ASSERT(allocator);
+ VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ allocator->SetCurrentFrameIndex(frameIndex);
+}
+
+void vmaCalculateStats(
+ VmaAllocator allocator,
+ VmaStats *pStats) {
+ VMA_ASSERT(allocator && pStats);
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+ allocator->CalculateStats(pStats);
+}
+
+#if VMA_STATS_STRING_ENABLED
+
+void vmaBuildStatsString(
+ VmaAllocator allocator,
+ char **ppStatsString,
+ VkBool32 detailedMap) {
+ VMA_ASSERT(allocator && ppStatsString);
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VmaStringBuilder sb(allocator);
+ {
+ VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
+ json.BeginObject();
+
+ VmaStats stats;
+ allocator->CalculateStats(&stats);
+
+ json.WriteString("Total");
+ VmaPrintStatInfo(json, stats.total);
+
+ for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) {
+ json.BeginString("Heap ");
+ json.ContinueString(heapIndex);
+ json.EndString();
+ json.BeginObject();
+
+ json.WriteString("Size");
+ json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
+
+ json.WriteString("Flags");
+ json.BeginArray(true);
+ if ((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) {
+ json.WriteString("DEVICE_LOCAL");
+ }
+ json.EndArray();
+
+ if (stats.memoryHeap[heapIndex].blockCount > 0) {
+ json.WriteString("Stats");
+ VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
+ }
+
+ for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) {
+ if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) {
+ json.BeginString("Type ");
+ json.ContinueString(typeIndex);
+ json.EndString();
+
+ json.BeginObject();
+
+ json.WriteString("Flags");
+ json.BeginArray(true);
+ VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
+ if ((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) {
+ json.WriteString("DEVICE_LOCAL");
+ }
+ if ((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) {
+ json.WriteString("HOST_VISIBLE");
+ }
+ if ((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0) {
+ json.WriteString("HOST_COHERENT");
+ }
+ if ((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) {
+ json.WriteString("HOST_CACHED");
+ }
+ if ((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0) {
+ json.WriteString("LAZILY_ALLOCATED");
+ }
+ json.EndArray();
+
+ if (stats.memoryType[typeIndex].blockCount > 0) {
+ json.WriteString("Stats");
+ VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
+ }
+
+ json.EndObject();
+ }
+ }
+
+ json.EndObject();
+ }
+ if (detailedMap == VK_TRUE) {
+ allocator->PrintDetailedMap(json);
+ }
+
+ json.EndObject();
+ }
+
+ const size_t len = sb.GetLength();
+ char *const pChars = vma_new_array(allocator, char, len + 1);
+ if (len > 0) {
+ memcpy(pChars, sb.GetData(), len);
+ }
+ pChars[len] = '\0';
+ *ppStatsString = pChars;
+}
+
+void vmaFreeStatsString(
+ VmaAllocator allocator,
+ char *pStatsString) {
+ if (pStatsString != VMA_NULL) {
+ VMA_ASSERT(allocator);
+ size_t len = strlen(pStatsString);
+ vma_delete_array(allocator, pStatsString, len + 1);
+ }
+}
+
+#endif // #if VMA_STATS_STRING_ENABLED
+
+/*
+This function is not protected by any mutex because it just reads immutable data.
+*/
+VkResult vmaFindMemoryTypeIndex(
+ VmaAllocator allocator,
+ uint32_t memoryTypeBits,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ uint32_t *pMemoryTypeIndex) {
+ VMA_ASSERT(allocator != VK_NULL_HANDLE);
+ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
+ if (pAllocationCreateInfo->memoryTypeBits != 0) {
+ memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
+ }
+
+ uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
+ uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
+
+ // Convert usage to requiredFlags and preferredFlags.
+ switch (pAllocationCreateInfo->usage) {
+ case VMA_MEMORY_USAGE_UNKNOWN:
+ break;
+ case VMA_MEMORY_USAGE_GPU_ONLY:
+ if (!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ break;
+ case VMA_MEMORY_USAGE_CPU_ONLY:
+ requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ break;
+ case VMA_MEMORY_USAGE_CPU_TO_GPU:
+ requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ if (!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ break;
+ case VMA_MEMORY_USAGE_GPU_TO_CPU:
+ requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ break;
+ default:
+ break;
+ }
+
+ *pMemoryTypeIndex = UINT32_MAX;
+ uint32_t minCost = UINT32_MAX;
+ for (uint32_t memTypeIndex = 0, memTypeBit = 1;
+ memTypeIndex < allocator->GetMemoryTypeCount();
+ ++memTypeIndex, memTypeBit <<= 1) {
+ // This memory type is acceptable according to memoryTypeBits bitmask.
+ if ((memTypeBit & memoryTypeBits) != 0) {
+ const VkMemoryPropertyFlags currFlags =
+ allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
+ // This memory type contains requiredFlags.
+ if ((requiredFlags & ~currFlags) == 0) {
+ // Calculate cost as number of bits from preferredFlags not present in this memory type.
+ uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
+ // Remember memory type with lowest cost.
+ if (currCost < minCost) {
+ *pMemoryTypeIndex = memTypeIndex;
+ if (currCost == 0) {
+ return VK_SUCCESS;
+ }
+ minCost = currCost;
+ }
+ }
+ }
+ }
+ return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
+}
+
+VkResult vmaFindMemoryTypeIndexForBufferInfo(
+ VmaAllocator allocator,
+ const VkBufferCreateInfo *pBufferCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ uint32_t *pMemoryTypeIndex) {
+ VMA_ASSERT(allocator != VK_NULL_HANDLE);
+ VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
+ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
+ const VkDevice hDev = allocator->m_hDevice;
+ VkBuffer hBuffer = VK_NULL_HANDLE;
+ VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
+ hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
+ if (res == VK_SUCCESS) {
+ VkMemoryRequirements memReq = {};
+ allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
+ hDev, hBuffer, &memReq);
+
+ res = vmaFindMemoryTypeIndex(
+ allocator,
+ memReq.memoryTypeBits,
+ pAllocationCreateInfo,
+ pMemoryTypeIndex);
+
+ allocator->GetVulkanFunctions().vkDestroyBuffer(
+ hDev, hBuffer, allocator->GetAllocationCallbacks());
+ }
+ return res;
+}
+
+VkResult vmaFindMemoryTypeIndexForImageInfo(
+ VmaAllocator allocator,
+ const VkImageCreateInfo *pImageCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ uint32_t *pMemoryTypeIndex) {
+ VMA_ASSERT(allocator != VK_NULL_HANDLE);
+ VMA_ASSERT(pImageCreateInfo != VMA_NULL);
+ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+
+ const VkDevice hDev = allocator->m_hDevice;
+ VkImage hImage = VK_NULL_HANDLE;
+ VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
+ hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
+ if (res == VK_SUCCESS) {
+ VkMemoryRequirements memReq = {};
+ allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
+ hDev, hImage, &memReq);
+
+ res = vmaFindMemoryTypeIndex(
+ allocator,
+ memReq.memoryTypeBits,
+ pAllocationCreateInfo,
+ pMemoryTypeIndex);
+
+ allocator->GetVulkanFunctions().vkDestroyImage(
+ hDev, hImage, allocator->GetAllocationCallbacks());
+ }
+ return res;
+}
+
+VkResult vmaCreatePool(
+ VmaAllocator allocator,
+ const VmaPoolCreateInfo *pCreateInfo,
+ VmaPool *pPool) {
+ VMA_ASSERT(allocator && pCreateInfo && pPool);
+
+ VMA_DEBUG_LOG("vmaCreatePool");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VkResult res = allocator->CreatePool(pCreateInfo, pPool);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
+ }
+#endif
+
+ return res;
+}
+
+void vmaDestroyPool(
+ VmaAllocator allocator,
+ VmaPool pool) {
+ VMA_ASSERT(allocator);
+
+ if (pool == VK_NULL_HANDLE) {
+ return;
+ }
+
+ VMA_DEBUG_LOG("vmaDestroyPool");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
+ }
+#endif
+
+ allocator->DestroyPool(pool);
+}
+
+void vmaGetPoolStats(
+ VmaAllocator allocator,
+ VmaPool pool,
+ VmaPoolStats *pPoolStats) {
+ VMA_ASSERT(allocator && pool && pPoolStats);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ allocator->GetPoolStats(pool, pPoolStats);
+}
+
+void vmaMakePoolAllocationsLost(
+ VmaAllocator allocator,
+ VmaPool pool,
+ size_t *pLostAllocationCount) {
+ VMA_ASSERT(allocator && pool);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
+ }
+#endif
+
+ allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
+}
+
+VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) {
+ VMA_ASSERT(allocator && pool);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VMA_DEBUG_LOG("vmaCheckPoolCorruption");
+
+ return allocator->CheckPoolCorruption(pool);
+}
+
+VkResult vmaAllocateMemory(
+ VmaAllocator allocator,
+ const VkMemoryRequirements *pVkMemoryRequirements,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo) {
+ VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
+
+ VMA_DEBUG_LOG("vmaAllocateMemory");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VkResult result = allocator->AllocateMemory(
+ *pVkMemoryRequirements,
+ false, // requiresDedicatedAllocation
+ false, // prefersDedicatedAllocation
+ VK_NULL_HANDLE, // dedicatedBuffer
+ VK_NULL_HANDLE, // dedicatedImage
+ *pCreateInfo,
+ VMA_SUBALLOCATION_TYPE_UNKNOWN,
+ 1, // allocationCount
+ pAllocation);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordAllocateMemory(
+ allocator->GetCurrentFrameIndex(),
+ *pVkMemoryRequirements,
+ *pCreateInfo,
+ *pAllocation);
+ }
+#endif
+
+ if (pAllocationInfo != VMA_NULL && result == VK_SUCCESS) {
+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+ }
+
+ return result;
+}
+
+VkResult vmaAllocateMemoryPages(
+ VmaAllocator allocator,
+ const VkMemoryRequirements *pVkMemoryRequirements,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ size_t allocationCount,
+ VmaAllocation *pAllocations,
+ VmaAllocationInfo *pAllocationInfo) {
+ if (allocationCount == 0) {
+ return VK_SUCCESS;
+ }
+
+ VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
+
+ VMA_DEBUG_LOG("vmaAllocateMemoryPages");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VkResult result = allocator->AllocateMemory(
+ *pVkMemoryRequirements,
+ false, // requiresDedicatedAllocation
+ false, // prefersDedicatedAllocation
+ VK_NULL_HANDLE, // dedicatedBuffer
+ VK_NULL_HANDLE, // dedicatedImage
+ *pCreateInfo,
+ VMA_SUBALLOCATION_TYPE_UNKNOWN,
+ allocationCount,
+ pAllocations);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordAllocateMemoryPages(
+ allocator->GetCurrentFrameIndex(),
+ *pVkMemoryRequirements,
+ *pCreateInfo,
+ (uint64_t)allocationCount,
+ pAllocations);
+ }
+#endif
+
+ if (pAllocationInfo != VMA_NULL && result == VK_SUCCESS) {
+ for (size_t i = 0; i < allocationCount; ++i) {
+ allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
+ }
+ }
+
+ return result;
+}
+
+VkResult vmaAllocateMemoryForBuffer(
+ VmaAllocator allocator,
+ VkBuffer buffer,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo) {
+ VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
+ VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VkMemoryRequirements vkMemReq = {};
+ bool requiresDedicatedAllocation = false;
+ bool prefersDedicatedAllocation = false;
+ allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
+ requiresDedicatedAllocation,
+ prefersDedicatedAllocation);
+
+ VkResult result = allocator->AllocateMemory(
+ vkMemReq,
+ requiresDedicatedAllocation,
+ prefersDedicatedAllocation,
+ buffer, // dedicatedBuffer
+ VK_NULL_HANDLE, // dedicatedImage
+ *pCreateInfo,
+ VMA_SUBALLOCATION_TYPE_BUFFER,
+ 1, // allocationCount
+ pAllocation);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
+ allocator->GetCurrentFrameIndex(),
+ vkMemReq,
+ requiresDedicatedAllocation,
+ prefersDedicatedAllocation,
+ *pCreateInfo,
+ *pAllocation);
+ }
+#endif
+
+ if (pAllocationInfo && result == VK_SUCCESS) {
+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+ }
+
+ return result;
+}
+
+VkResult vmaAllocateMemoryForImage(
+ VmaAllocator allocator,
+ VkImage image,
+ const VmaAllocationCreateInfo *pCreateInfo,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo) {
+ VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+
+ VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VkMemoryRequirements vkMemReq = {};
+ bool requiresDedicatedAllocation = false;
+ bool prefersDedicatedAllocation = false;
+ allocator->GetImageMemoryRequirements(image, vkMemReq,
+ requiresDedicatedAllocation, prefersDedicatedAllocation);
+
+ VkResult result = allocator->AllocateMemory(
+ vkMemReq,
+ requiresDedicatedAllocation,
+ prefersDedicatedAllocation,
+ VK_NULL_HANDLE, // dedicatedBuffer
+ image, // dedicatedImage
+ *pCreateInfo,
+ VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
+ 1, // allocationCount
+ pAllocation);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordAllocateMemoryForImage(
+ allocator->GetCurrentFrameIndex(),
+ vkMemReq,
+ requiresDedicatedAllocation,
+ prefersDedicatedAllocation,
+ *pCreateInfo,
+ *pAllocation);
+ }
+#endif
+
+ if (pAllocationInfo && result == VK_SUCCESS) {
+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+ }
+
+ return result;
+}
+
+void vmaFreeMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation) {
+ VMA_ASSERT(allocator);
+
+ if (allocation == VK_NULL_HANDLE) {
+ return;
+ }
+
+ VMA_DEBUG_LOG("vmaFreeMemory");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordFreeMemory(
+ allocator->GetCurrentFrameIndex(),
+ allocation);
+ }
+#endif
+
+ allocator->FreeMemory(
+ 1, // allocationCount
+ &allocation);
+}
+
+void vmaFreeMemoryPages(
+ VmaAllocator allocator,
+ size_t allocationCount,
+ VmaAllocation *pAllocations) {
+ if (allocationCount == 0) {
+ return;
+ }
+
+ VMA_ASSERT(allocator);
+
+ VMA_DEBUG_LOG("vmaFreeMemoryPages");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordFreeMemoryPages(
+ allocator->GetCurrentFrameIndex(),
+ (uint64_t)allocationCount,
+ pAllocations);
+ }
+#endif
+
+ allocator->FreeMemory(allocationCount, pAllocations);
+}
+
+VkResult vmaResizeAllocation(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize newSize) {
+ VMA_ASSERT(allocator && allocation);
+
+ VMA_DEBUG_LOG("vmaResizeAllocation");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordResizeAllocation(
+ allocator->GetCurrentFrameIndex(),
+ allocation,
+ newSize);
+ }
+#endif
+
+ return allocator->ResizeAllocation(allocation, newSize);
+}
+
+void vmaGetAllocationInfo(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VmaAllocationInfo *pAllocationInfo) {
+ VMA_ASSERT(allocator && allocation && pAllocationInfo);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordGetAllocationInfo(
+ allocator->GetCurrentFrameIndex(),
+ allocation);
+ }
+#endif
+
+ allocator->GetAllocationInfo(allocation, pAllocationInfo);
+}
+
+VkBool32 vmaTouchAllocation(
+ VmaAllocator allocator,
+ VmaAllocation allocation) {
+ VMA_ASSERT(allocator && allocation);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordTouchAllocation(
+ allocator->GetCurrentFrameIndex(),
+ allocation);
+ }
+#endif
+
+ return allocator->TouchAllocation(allocation);
+}
+
+void vmaSetAllocationUserData(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ void *pUserData) {
+ VMA_ASSERT(allocator && allocation);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ allocation->SetUserData(allocator, pUserData);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordSetAllocationUserData(
+ allocator->GetCurrentFrameIndex(),
+ allocation,
+ pUserData);
+ }
+#endif
+}
+
+void vmaCreateLostAllocation(
+ VmaAllocator allocator,
+ VmaAllocation *pAllocation) {
+ VMA_ASSERT(allocator && pAllocation);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+
+ allocator->CreateLostAllocation(pAllocation);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordCreateLostAllocation(
+ allocator->GetCurrentFrameIndex(),
+ *pAllocation);
+ }
+#endif
+}
+
+VkResult vmaMapMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ void **ppData) {
+ VMA_ASSERT(allocator && allocation && ppData);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VkResult res = allocator->Map(allocation, ppData);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordMapMemory(
+ allocator->GetCurrentFrameIndex(),
+ allocation);
+ }
+#endif
+
+ return res;
+}
+
+void vmaUnmapMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation) {
+ VMA_ASSERT(allocator && allocation);
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordUnmapMemory(
+ allocator->GetCurrentFrameIndex(),
+ allocation);
+ }
+#endif
+
+ allocator->Unmap(allocation);
+}
+
+void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) {
+ VMA_ASSERT(allocator && allocation);
+
+ VMA_DEBUG_LOG("vmaFlushAllocation");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordFlushAllocation(
+ allocator->GetCurrentFrameIndex(),
+ allocation, offset, size);
+ }
+#endif
+}
+
+void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) {
+ VMA_ASSERT(allocator && allocation);
+
+ VMA_DEBUG_LOG("vmaInvalidateAllocation");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordInvalidateAllocation(
+ allocator->GetCurrentFrameIndex(),
+ allocation, offset, size);
+ }
+#endif
+}
+
+VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) {
+ VMA_ASSERT(allocator);
+
+ VMA_DEBUG_LOG("vmaCheckCorruption");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ return allocator->CheckCorruption(memoryTypeBits);
+}
+
+VkResult vmaDefragment(
+ VmaAllocator allocator,
+ VmaAllocation *pAllocations,
+ size_t allocationCount,
+ VkBool32 *pAllocationsChanged,
+ const VmaDefragmentationInfo *pDefragmentationInfo,
+ VmaDefragmentationStats *pDefragmentationStats) {
+ // Deprecated interface, reimplemented using new one.
+
+ VmaDefragmentationInfo2 info2 = {};
+ info2.allocationCount = (uint32_t)allocationCount;
+ info2.pAllocations = pAllocations;
+ info2.pAllocationsChanged = pAllocationsChanged;
+ if (pDefragmentationInfo != VMA_NULL) {
+ info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
+ info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
+ } else {
+ info2.maxCpuAllocationsToMove = UINT32_MAX;
+ info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
+ }
+ // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
+
+ VmaDefragmentationContext ctx;
+ VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
+ if (res == VK_NOT_READY) {
+ res = vmaDefragmentationEnd(allocator, ctx);
+ }
+ return res;
+}
+
+VkResult vmaDefragmentationBegin(
+ VmaAllocator allocator,
+ const VmaDefragmentationInfo2 *pInfo,
+ VmaDefragmentationStats *pStats,
+ VmaDefragmentationContext *pContext) {
+ VMA_ASSERT(allocator && pInfo && pContext);
+
+ // Degenerate case: Nothing to defragment.
+ if (pInfo->allocationCount == 0 && pInfo->poolCount == 0) {
+ return VK_SUCCESS;
+ }
+
+ VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
+ VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
+ VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
+ VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
+
+ VMA_DEBUG_LOG("vmaDefragmentationBegin");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordDefragmentationBegin(
+ allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
+ }
+#endif
+
+ return res;
+}
+
+VkResult vmaDefragmentationEnd(
+ VmaAllocator allocator,
+ VmaDefragmentationContext context) {
+ VMA_ASSERT(allocator);
+
+ VMA_DEBUG_LOG("vmaDefragmentationEnd");
+
+ if (context != VK_NULL_HANDLE) {
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordDefragmentationEnd(
+ allocator->GetCurrentFrameIndex(), context);
+ }
+#endif
+
+ return allocator->DefragmentationEnd(context);
+ } else {
+ return VK_SUCCESS;
+ }
+}
+
+VkResult vmaBindBufferMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkBuffer buffer) {
+ VMA_ASSERT(allocator && allocation && buffer);
+
+ VMA_DEBUG_LOG("vmaBindBufferMemory");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ return allocator->BindBufferMemory(allocation, buffer);
+}
+
+VkResult vmaBindImageMemory(
+ VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkImage image) {
+ VMA_ASSERT(allocator && allocation && image);
+
+ VMA_DEBUG_LOG("vmaBindImageMemory");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ return allocator->BindImageMemory(allocation, image);
+}
+
+VkResult vmaCreateBuffer(
+ VmaAllocator allocator,
+ const VkBufferCreateInfo *pBufferCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ VkBuffer *pBuffer,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo) {
+ VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
+
+ if (pBufferCreateInfo->size == 0) {
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+
+ VMA_DEBUG_LOG("vmaCreateBuffer");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ *pBuffer = VK_NULL_HANDLE;
+ *pAllocation = VK_NULL_HANDLE;
+
+ // 1. Create VkBuffer.
+ VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
+ allocator->m_hDevice,
+ pBufferCreateInfo,
+ allocator->GetAllocationCallbacks(),
+ pBuffer);
+ if (res >= 0) {
+ // 2. vkGetBufferMemoryRequirements.
+ VkMemoryRequirements vkMemReq = {};
+ bool requiresDedicatedAllocation = false;
+ bool prefersDedicatedAllocation = false;
+ allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
+ requiresDedicatedAllocation, prefersDedicatedAllocation);
+
+ // Make sure alignment requirements for specific buffer usages reported
+ // in Physical Device Properties are included in alignment reported by memory requirements.
+ if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0) {
+ VMA_ASSERT(vkMemReq.alignment %
+ allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment ==
+ 0);
+ }
+ if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0) {
+ VMA_ASSERT(vkMemReq.alignment %
+ allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment ==
+ 0);
+ }
+ if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0) {
+ VMA_ASSERT(vkMemReq.alignment %
+ allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment ==
+ 0);
+ }
+
+ // 3. Allocate memory using allocator.
+ res = allocator->AllocateMemory(
+ vkMemReq,
+ requiresDedicatedAllocation,
+ prefersDedicatedAllocation,
+ *pBuffer, // dedicatedBuffer
+ VK_NULL_HANDLE, // dedicatedImage
+ *pAllocationCreateInfo,
+ VMA_SUBALLOCATION_TYPE_BUFFER,
+ 1, // allocationCount
+ pAllocation);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordCreateBuffer(
+ allocator->GetCurrentFrameIndex(),
+ *pBufferCreateInfo,
+ *pAllocationCreateInfo,
+ *pAllocation);
+ }
+#endif
+
+ if (res >= 0) {
+ // 3. Bind buffer with memory.
+ if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) {
+ res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
+ }
+ if (res >= 0) {
+// All steps succeeded.
+#if VMA_STATS_STRING_ENABLED
+ (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
+#endif
+ if (pAllocationInfo != VMA_NULL) {
+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+ }
+
+ return VK_SUCCESS;
+ }
+ allocator->FreeMemory(
+ 1, // allocationCount
+ pAllocation);
+ *pAllocation = VK_NULL_HANDLE;
+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+ *pBuffer = VK_NULL_HANDLE;
+ return res;
+ }
+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+ *pBuffer = VK_NULL_HANDLE;
+ return res;
+ }
+ return res;
+}
+
+void vmaDestroyBuffer(
+ VmaAllocator allocator,
+ VkBuffer buffer,
+ VmaAllocation allocation) {
+ VMA_ASSERT(allocator);
+
+ if (buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) {
+ return;
+ }
+
+ VMA_DEBUG_LOG("vmaDestroyBuffer");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordDestroyBuffer(
+ allocator->GetCurrentFrameIndex(),
+ allocation);
+ }
+#endif
+
+ if (buffer != VK_NULL_HANDLE) {
+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
+ }
+
+ if (allocation != VK_NULL_HANDLE) {
+ allocator->FreeMemory(
+ 1, // allocationCount
+ &allocation);
+ }
+}
+
+VkResult vmaCreateImage(
+ VmaAllocator allocator,
+ const VkImageCreateInfo *pImageCreateInfo,
+ const VmaAllocationCreateInfo *pAllocationCreateInfo,
+ VkImage *pImage,
+ VmaAllocation *pAllocation,
+ VmaAllocationInfo *pAllocationInfo) {
+ VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
+
+ if (pImageCreateInfo->extent.width == 0 ||
+ pImageCreateInfo->extent.height == 0 ||
+ pImageCreateInfo->extent.depth == 0 ||
+ pImageCreateInfo->mipLevels == 0 ||
+ pImageCreateInfo->arrayLayers == 0) {
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+
+ VMA_DEBUG_LOG("vmaCreateImage");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ *pImage = VK_NULL_HANDLE;
+ *pAllocation = VK_NULL_HANDLE;
+
+ // 1. Create VkImage.
+ VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
+ allocator->m_hDevice,
+ pImageCreateInfo,
+ allocator->GetAllocationCallbacks(),
+ pImage);
+ if (res >= 0) {
+ VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
+ VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
+ VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
+
+ // 2. Allocate memory using allocator.
+ VkMemoryRequirements vkMemReq = {};
+ bool requiresDedicatedAllocation = false;
+ bool prefersDedicatedAllocation = false;
+ allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
+ requiresDedicatedAllocation, prefersDedicatedAllocation);
+
+ res = allocator->AllocateMemory(
+ vkMemReq,
+ requiresDedicatedAllocation,
+ prefersDedicatedAllocation,
+ VK_NULL_HANDLE, // dedicatedBuffer
+ *pImage, // dedicatedImage
+ *pAllocationCreateInfo,
+ suballocType,
+ 1, // allocationCount
+ pAllocation);
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordCreateImage(
+ allocator->GetCurrentFrameIndex(),
+ *pImageCreateInfo,
+ *pAllocationCreateInfo,
+ *pAllocation);
+ }
+#endif
+
+ if (res >= 0) {
+ // 3. Bind image with memory.
+ if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) {
+ res = allocator->BindImageMemory(*pAllocation, *pImage);
+ }
+ if (res >= 0) {
+// All steps succeeded.
+#if VMA_STATS_STRING_ENABLED
+ (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
+#endif
+ if (pAllocationInfo != VMA_NULL) {
+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+ }
+
+ return VK_SUCCESS;
+ }
+ allocator->FreeMemory(
+ 1, // allocationCount
+ pAllocation);
+ *pAllocation = VK_NULL_HANDLE;
+ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+ *pImage = VK_NULL_HANDLE;
+ return res;
+ }
+ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+ *pImage = VK_NULL_HANDLE;
+ return res;
+ }
+ return res;
+}
+
+void vmaDestroyImage(
+ VmaAllocator allocator,
+ VkImage image,
+ VmaAllocation allocation) {
+ VMA_ASSERT(allocator);
+
+ if (image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) {
+ return;
+ }
+
+ VMA_DEBUG_LOG("vmaDestroyImage");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+#if VMA_RECORDING_ENABLED
+ if (allocator->GetRecorder() != VMA_NULL) {
+ allocator->GetRecorder()->RecordDestroyImage(
+ allocator->GetCurrentFrameIndex(),
+ allocation);
+ }
+#endif
+
+ if (image != VK_NULL_HANDLE) {
+ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
+ }
+ if (allocation != VK_NULL_HANDLE) {
+ allocator->FreeMemory(
+ 1, // allocationCount
+ &allocation);
+ }
+}
+
+#endif // #ifdef VMA_IMPLEMENTATION
diff --git a/drivers/vulkan/vulkan_context.cpp b/drivers/vulkan/vulkan_context.cpp
new file mode 100644
index 0000000000..c71923ec6f
--- /dev/null
+++ b/drivers/vulkan/vulkan_context.cpp
@@ -0,0 +1,1314 @@
+#include "vulkan_context.h"
+#include "core/print_string.h"
+#include "core/project_settings.h"
+#include "core/version.h"
+#include "vk_enum_string_helper.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+#define VULKAN_DEBUG(m_text) print_line(m_text)
+#define APP_SHORT_NAME "GodotEngine"
+
+VKAPI_ATTR VkBool32 VKAPI_CALL VulkanContext::_debug_messenger_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageType,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
+ void *pUserData) {
+ char prefix[64] = "";
+ char *message = (char *)malloc(strlen(pCallbackData->pMessage) + 5000);
+ ERR_FAIL_COND_V(!message, false);
+
+ if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) {
+ strcat(prefix, "VERBOSE : ");
+ } else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
+ strcat(prefix, "INFO : ");
+ } else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
+ strcat(prefix, "WARNING : ");
+ } else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
+ strcat(prefix, "ERROR : ");
+ }
+
+ if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT) {
+ strcat(prefix, "GENERAL");
+ } else {
+ if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) {
+ strcat(prefix, "VALIDATION");
+ //validation_error = 1;
+ }
+ if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) {
+ if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) {
+ strcat(prefix, "|");
+ }
+ strcat(prefix, "PERFORMANCE");
+ }
+ }
+
+ sprintf(message, "%s - Message Id Number: %d | Message Id Name: %s\n\t%s\n", prefix, pCallbackData->messageIdNumber,
+ pCallbackData->pMessageIdName, pCallbackData->pMessage);
+
+ if (pCallbackData->objectCount > 0) {
+ char tmp_message[500];
+ sprintf(tmp_message, "\n\tObjects - %d\n", pCallbackData->objectCount);
+ strcat(message, tmp_message);
+ for (uint32_t object = 0; object < pCallbackData->objectCount; ++object) {
+ if (NULL != pCallbackData->pObjects[object].pObjectName && strlen(pCallbackData->pObjects[object].pObjectName) > 0) {
+ sprintf(tmp_message, "\t\tObject[%d] - %s, Handle %p, Name \"%s\"\n", object,
+ string_VkObjectType(pCallbackData->pObjects[object].objectType),
+ (void *)(pCallbackData->pObjects[object].objectHandle), pCallbackData->pObjects[object].pObjectName);
+ } else {
+ sprintf(tmp_message, "\t\tObject[%d] - %s, Handle %p\n", object,
+ string_VkObjectType(pCallbackData->pObjects[object].objectType),
+ (void *)(pCallbackData->pObjects[object].objectHandle));
+ }
+ strcat(message, tmp_message);
+ }
+ }
+ if (pCallbackData->cmdBufLabelCount > 0) {
+ char tmp_message[500];
+ sprintf(tmp_message, "\n\tCommand Buffer Labels - %d\n", pCallbackData->cmdBufLabelCount);
+ strcat(message, tmp_message);
+ for (uint32_t cmd_buf_label = 0; cmd_buf_label < pCallbackData->cmdBufLabelCount; ++cmd_buf_label) {
+ sprintf(tmp_message, "\t\tLabel[%d] - %s { %f, %f, %f, %f}\n", cmd_buf_label,
+ pCallbackData->pCmdBufLabels[cmd_buf_label].pLabelName, pCallbackData->pCmdBufLabels[cmd_buf_label].color[0],
+ pCallbackData->pCmdBufLabels[cmd_buf_label].color[1], pCallbackData->pCmdBufLabels[cmd_buf_label].color[2],
+ pCallbackData->pCmdBufLabels[cmd_buf_label].color[3]);
+ strcat(message, tmp_message);
+ }
+ }
+
+ ERR_PRINT(message);
+
+ free(message);
+
+ // Don't bail out, but keep going.
+ return false;
+}
+
+VkBool32 VulkanContext::_check_layers(uint32_t check_count, const char **check_names, uint32_t layer_count, VkLayerProperties *layers) {
+ for (uint32_t i = 0; i < check_count; i++) {
+ VkBool32 found = 0;
+ for (uint32_t j = 0; j < layer_count; j++) {
+ if (!strcmp(check_names[i], layers[j].layerName)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ERR_PRINT("Cant find layer: " + String(check_names[i]));
+ return 0;
+ }
+ }
+ return 1;
+}
+
+Error VulkanContext::_create_validation_layers() {
+
+ VkResult err;
+ uint32_t instance_layer_count = 0;
+ uint32_t validation_layer_count = 0;
+ const char *instance_validation_layers_alt1[] = { "VK_LAYER_LUNARG_standard_validation" };
+ const char *instance_validation_layers_alt2[] = { "VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation",
+ "VK_LAYER_LUNARG_object_tracker", "VK_LAYER_LUNARG_core_validation",
+ "VK_LAYER_GOOGLE_unique_objects" };
+ VkBool32 validation_found = 0;
+ err = vkEnumerateInstanceLayerProperties(&instance_layer_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ const char **instance_validation_layers = instance_validation_layers_alt1;
+ if (instance_layer_count > 0) {
+ VkLayerProperties *instance_layers = (VkLayerProperties *)malloc(sizeof(VkLayerProperties) * instance_layer_count);
+ err = vkEnumerateInstanceLayerProperties(&instance_layer_count, instance_layers);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ validation_found = _check_layers(ARRAY_SIZE(instance_validation_layers_alt1), instance_validation_layers,
+ instance_layer_count, instance_layers);
+ if (validation_found) {
+ enabled_layer_count = ARRAY_SIZE(instance_validation_layers_alt1);
+ enabled_layers[0] = "VK_LAYER_LUNARG_standard_validation";
+ validation_layer_count = 1;
+ } else {
+ // use alternative set of validation layers
+ instance_validation_layers = instance_validation_layers_alt2;
+ enabled_layer_count = ARRAY_SIZE(instance_validation_layers_alt2);
+ validation_found = _check_layers(ARRAY_SIZE(instance_validation_layers_alt2), instance_validation_layers,
+ instance_layer_count, instance_layers);
+ validation_layer_count = ARRAY_SIZE(instance_validation_layers_alt2);
+ for (uint32_t i = 0; i < validation_layer_count; i++) {
+ enabled_layers[i] = instance_validation_layers[i];
+ }
+ }
+ free(instance_layers);
+ }
+
+ if (!validation_found) {
+ return ERR_CANT_CREATE;
+ }
+
+ return OK;
+}
+
+Error VulkanContext::_initialize_extensions() {
+
+ VkResult err;
+ uint32_t instance_extension_count = 0;
+
+ enabled_extension_count = 0;
+ enabled_layer_count = 0;
+ /* Look for instance extensions */
+ VkBool32 surfaceExtFound = 0;
+ VkBool32 platformSurfaceExtFound = 0;
+ memset(extension_names, 0, sizeof(extension_names));
+
+ err = vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ if (instance_extension_count > 0) {
+ VkExtensionProperties *instance_extensions = (VkExtensionProperties *)malloc(sizeof(VkExtensionProperties) * instance_extension_count);
+ err = vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, instance_extensions);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ for (uint32_t i = 0; i < instance_extension_count; i++) {
+ if (!strcmp(VK_KHR_SURFACE_EXTENSION_NAME, instance_extensions[i].extensionName)) {
+ surfaceExtFound = 1;
+ extension_names[enabled_extension_count++] = VK_KHR_SURFACE_EXTENSION_NAME;
+ }
+
+ if (!strcmp(_get_platform_surface_extension(), instance_extensions[i].extensionName)) {
+ platformSurfaceExtFound = 1;
+ extension_names[enabled_extension_count++] = _get_platform_surface_extension();
+ }
+ if (!strcmp(VK_EXT_DEBUG_REPORT_EXTENSION_NAME, instance_extensions[i].extensionName)) {
+ if (use_validation_layers) {
+ extension_names[enabled_extension_count++] = VK_EXT_DEBUG_REPORT_EXTENSION_NAME;
+ }
+ }
+ if (!strcmp(VK_EXT_DEBUG_UTILS_EXTENSION_NAME, instance_extensions[i].extensionName)) {
+ if (use_validation_layers) {
+ extension_names[enabled_extension_count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME;
+ }
+ }
+ ERR_FAIL_COND_V(enabled_extension_count >= MAX_EXTENSIONS, ERR_BUG); //??
+ }
+
+ free(instance_extensions);
+ }
+
+ ERR_FAIL_COND_V_MSG(!surfaceExtFound, ERR_CANT_CREATE, "No surface extension found, is a driver installed?");
+ ERR_FAIL_COND_V_MSG(!platformSurfaceExtFound, ERR_CANT_CREATE, "No platform surface extension found, is a driver installed?");
+
+ return OK;
+}
+
+Error VulkanContext::_create_physical_device() {
+
+ /* Look for validation layers */
+ if (use_validation_layers) {
+ _create_validation_layers();
+ }
+
+ {
+ Error err = _initialize_extensions();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ CharString cs = ProjectSettings::get_singleton()->get("application/config/name").operator String().utf8();
+ String name = "GodotEngine " + String(VERSION_FULL_NAME);
+ CharString namecs = name.utf8();
+ const VkApplicationInfo app = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .pNext = NULL,
+ .pApplicationName = cs.get_data(),
+ .applicationVersion = 0,
+ .pEngineName = namecs.get_data(),
+ .engineVersion = 0,
+ .apiVersion = VK_API_VERSION_1_0,
+ };
+ VkInstanceCreateInfo inst_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pNext = NULL,
+ .pApplicationInfo = &app,
+ .enabledLayerCount = enabled_layer_count,
+ .ppEnabledLayerNames = (const char *const *)instance_validation_layers,
+ .enabledExtensionCount = enabled_extension_count,
+ .ppEnabledExtensionNames = (const char *const *)extension_names,
+ };
+
+ /*
+ * This is info for a temp callback to use during CreateInstance.
+ * After the instance is created, we use the instance-based
+ * function to register the final callback.
+ */
+ VkDebugUtilsMessengerCreateInfoEXT dbg_messenger_create_info;
+ if (use_validation_layers) {
+ // VK_EXT_debug_utils style
+ dbg_messenger_create_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+ dbg_messenger_create_info.pNext = NULL;
+ dbg_messenger_create_info.flags = 0;
+ dbg_messenger_create_info.messageSeverity =
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
+ dbg_messenger_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
+ dbg_messenger_create_info.pfnUserCallback = _debug_messenger_callback;
+ dbg_messenger_create_info.pUserData = this;
+ inst_info.pNext = &dbg_messenger_create_info;
+ }
+
+ uint32_t gpu_count;
+
+ VkResult err = vkCreateInstance(&inst_info, NULL, &inst);
+ ERR_FAIL_COND_V_MSG(err == VK_ERROR_INCOMPATIBLE_DRIVER, ERR_CANT_CREATE,
+ "Cannot find a compatible Vulkan installable client driver (ICD).\n\n"
+ "vkCreateInstance Failure");
+ ERR_FAIL_COND_V_MSG(err == VK_ERROR_EXTENSION_NOT_PRESENT, ERR_CANT_CREATE,
+ "Cannot find a specified extension library.\n"
+ "Make sure your layers path is set appropriately.\n"
+ "vkCreateInstance Failure");
+ ERR_FAIL_COND_V_MSG(err, ERR_CANT_CREATE,
+ "vkCreateInstance failed.\n\n"
+ "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
+ "Please look at the Getting Started guide for additional information.\n"
+ "vkCreateInstance Failure");
+
+ /* Make initial call to query gpu_count, then second call for gpu info*/
+ err = vkEnumeratePhysicalDevices(inst, &gpu_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ ERR_FAIL_COND_V_MSG(gpu_count == 0, ERR_CANT_CREATE,
+ "vkEnumeratePhysicalDevices reported zero accessible devices.\n\n"
+ "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
+ "vkEnumeratePhysicalDevices Failure");
+
+ VkPhysicalDevice *physical_devices = (VkPhysicalDevice *)malloc(sizeof(VkPhysicalDevice) * gpu_count);
+ err = vkEnumeratePhysicalDevices(inst, &gpu_count, physical_devices);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ /* for now, just grab the first physical device */
+ gpu = physical_devices[0];
+ free(physical_devices);
+
+ /* Look for device extensions */
+ uint32_t device_extension_count = 0;
+ VkBool32 swapchainExtFound = 0;
+ enabled_extension_count = 0;
+ memset(extension_names, 0, sizeof(extension_names));
+
+ err = vkEnumerateDeviceExtensionProperties(gpu, NULL, &device_extension_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ if (device_extension_count > 0) {
+ VkExtensionProperties *device_extensions = (VkExtensionProperties *)malloc(sizeof(VkExtensionProperties) * device_extension_count);
+ err = vkEnumerateDeviceExtensionProperties(gpu, NULL, &device_extension_count, device_extensions);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ for (uint32_t i = 0; i < device_extension_count; i++) {
+ if (!strcmp(VK_KHR_SWAPCHAIN_EXTENSION_NAME, device_extensions[i].extensionName)) {
+ swapchainExtFound = 1;
+ extension_names[enabled_extension_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+ }
+ ERR_FAIL_COND_V(enabled_extension_count >= MAX_EXTENSIONS, ERR_BUG);
+ }
+
+ if (VK_KHR_incremental_present_enabled) {
+ // Even though the user "enabled" the extension via the command
+ // line, we must make sure that it's enumerated for use with the
+ // device. Therefore, disable it here, and re-enable it again if
+ // enumerated.
+ VK_KHR_incremental_present_enabled = false;
+ for (uint32_t i = 0; i < device_extension_count; i++) {
+ if (!strcmp(VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME, device_extensions[i].extensionName)) {
+ extension_names[enabled_extension_count++] = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
+ VK_KHR_incremental_present_enabled = true;
+ VULKAN_DEBUG("VK_KHR_incremental_present extension enabled\n");
+ }
+ ERR_FAIL_COND_V(enabled_extension_count >= MAX_EXTENSIONS, ERR_BUG);
+ }
+ if (!VK_KHR_incremental_present_enabled) {
+ VULKAN_DEBUG("VK_KHR_incremental_present extension NOT AVAILABLE\n");
+ }
+ }
+
+ if (VK_GOOGLE_display_timing_enabled) {
+ // Even though the user "enabled" the extension via the command
+ // line, we must make sure that it's enumerated for use with the
+ // device. Therefore, disable it here, and re-enable it again if
+ // enumerated.
+ VK_GOOGLE_display_timing_enabled = false;
+ for (uint32_t i = 0; i < device_extension_count; i++) {
+ if (!strcmp(VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME, device_extensions[i].extensionName)) {
+ extension_names[enabled_extension_count++] = VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME;
+ VK_GOOGLE_display_timing_enabled = true;
+ VULKAN_DEBUG("VK_GOOGLE_display_timing extension enabled\n");
+ }
+ ERR_FAIL_COND_V(enabled_extension_count >= MAX_EXTENSIONS, ERR_BUG);
+ }
+ if (!VK_GOOGLE_display_timing_enabled) {
+ VULKAN_DEBUG("VK_GOOGLE_display_timing extension NOT AVAILABLE\n");
+ }
+ }
+
+ free(device_extensions);
+ }
+
+ ERR_FAIL_COND_V_MSG(!swapchainExtFound, ERR_CANT_CREATE,
+ "vkEnumerateDeviceExtensionProperties failed to find the " VK_KHR_SWAPCHAIN_EXTENSION_NAME
+ " extension.\n\nDo you have a compatible Vulkan installable client driver (ICD) installed?\n"
+ "vkCreateInstance Failure");
+
+ if (use_validation_layers) {
+ // Setup VK_EXT_debug_utils function pointers always (we use them for
+ // debug labels and names).
+ CreateDebugUtilsMessengerEXT =
+ (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(inst, "vkCreateDebugUtilsMessengerEXT");
+ DestroyDebugUtilsMessengerEXT =
+ (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(inst, "vkDestroyDebugUtilsMessengerEXT");
+ SubmitDebugUtilsMessageEXT =
+ (PFN_vkSubmitDebugUtilsMessageEXT)vkGetInstanceProcAddr(inst, "vkSubmitDebugUtilsMessageEXT");
+ CmdBeginDebugUtilsLabelEXT =
+ (PFN_vkCmdBeginDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdBeginDebugUtilsLabelEXT");
+ CmdEndDebugUtilsLabelEXT =
+ (PFN_vkCmdEndDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdEndDebugUtilsLabelEXT");
+ CmdInsertDebugUtilsLabelEXT =
+ (PFN_vkCmdInsertDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdInsertDebugUtilsLabelEXT");
+ SetDebugUtilsObjectNameEXT =
+ (PFN_vkSetDebugUtilsObjectNameEXT)vkGetInstanceProcAddr(inst, "vkSetDebugUtilsObjectNameEXT");
+ if (NULL == CreateDebugUtilsMessengerEXT || NULL == DestroyDebugUtilsMessengerEXT ||
+ NULL == SubmitDebugUtilsMessageEXT || NULL == CmdBeginDebugUtilsLabelEXT ||
+ NULL == CmdEndDebugUtilsLabelEXT || NULL == CmdInsertDebugUtilsLabelEXT ||
+ NULL == SetDebugUtilsObjectNameEXT) {
+ ERR_FAIL_V_MSG(ERR_CANT_CREATE,
+ "GetProcAddr: Failed to init VK_EXT_debug_utils\n"
+ "GetProcAddr: Failure");
+ }
+
+ err = CreateDebugUtilsMessengerEXT(inst, &dbg_messenger_create_info, NULL, &dbg_messenger);
+ switch (err) {
+ case VK_SUCCESS:
+ break;
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ ERR_FAIL_V_MSG(ERR_CANT_CREATE,
+ "CreateDebugUtilsMessengerEXT: out of host memory\n"
+ "CreateDebugUtilsMessengerEXT Failure");
+ break;
+ default:
+ ERR_FAIL_V_MSG(ERR_CANT_CREATE,
+ "CreateDebugUtilsMessengerEXT: unknown failure\n"
+ "CreateDebugUtilsMessengerEXT Failure");
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ break;
+ }
+ }
+ vkGetPhysicalDeviceProperties(gpu, &gpu_props);
+
+ /* Call with NULL data to get count */
+ vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_family_count, NULL);
+ ERR_FAIL_COND_V(queue_family_count == 0, ERR_CANT_CREATE);
+
+ queue_props = (VkQueueFamilyProperties *)malloc(queue_family_count * sizeof(VkQueueFamilyProperties));
+ vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_family_count, queue_props);
+
+ // Query fine-grained feature support for this device.
+ // If app has specific feature requirements it should check supported
+ // features based on this query
+ VkPhysicalDeviceFeatures physDevFeatures;
+ vkGetPhysicalDeviceFeatures(gpu, &physDevFeatures);
+
+#define GET_INSTANCE_PROC_ADDR(inst, entrypoint) \
+ { \
+ fp##entrypoint = (PFN_vk##entrypoint)vkGetInstanceProcAddr(inst, "vk" #entrypoint); \
+ ERR_FAIL_COND_V_MSG(fp##entrypoint == NULL, ERR_CANT_CREATE, \
+ "vkGetInstanceProcAddr failed to find vk" #entrypoint); \
+ }
+
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfaceSupportKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfaceCapabilitiesKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfaceFormatsKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfacePresentModesKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetSwapchainImagesKHR);
+
+ return OK;
+}
+
+Error VulkanContext::_create_device() {
+
+ VkResult err;
+ float queue_priorities[1] = { 0.0 };
+ VkDeviceQueueCreateInfo queues[2];
+ queues[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queues[0].pNext = NULL;
+ queues[0].queueFamilyIndex = graphics_queue_family_index;
+ queues[0].queueCount = 1;
+ queues[0].pQueuePriorities = queue_priorities;
+ queues[0].flags = 0;
+
+ VkDeviceCreateInfo sdevice = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .pNext = NULL,
+ .queueCreateInfoCount = 1,
+ .pQueueCreateInfos = queues,
+ .enabledLayerCount = 0,
+ .ppEnabledLayerNames = NULL,
+ .enabledExtensionCount = enabled_extension_count,
+ .ppEnabledExtensionNames = (const char *const *)extension_names,
+ .pEnabledFeatures = NULL, // If specific features are required, pass them in here
+ };
+ if (separate_present_queue) {
+ queues[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queues[1].pNext = NULL;
+ queues[1].queueFamilyIndex = present_queue_family_index;
+ queues[1].queueCount = 1;
+ queues[1].pQueuePriorities = queue_priorities;
+ queues[1].flags = 0;
+ sdevice.queueCreateInfoCount = 2;
+ }
+ err = vkCreateDevice(gpu, &sdevice, NULL, &device);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ return OK;
+}
+
+Error VulkanContext::_create_swap_chain() {
+
+ VkResult err = _create_surface(&surface, inst);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ // Iterate over each queue to learn whether it supports presenting:
+ VkBool32 *supportsPresent = (VkBool32 *)malloc(queue_family_count * sizeof(VkBool32));
+ for (uint32_t i = 0; i < queue_family_count; i++) {
+ fpGetPhysicalDeviceSurfaceSupportKHR(gpu, i, surface, &supportsPresent[i]);
+ }
+
+ // Search for a graphics and a present queue in the array of queue
+ // families, try to find one that supports both
+ uint32_t graphicsQueueFamilyIndex = UINT32_MAX;
+ uint32_t presentQueueFamilyIndex = UINT32_MAX;
+ for (uint32_t i = 0; i < queue_family_count; i++) {
+ if ((queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) {
+ if (graphicsQueueFamilyIndex == UINT32_MAX) {
+ graphicsQueueFamilyIndex = i;
+ }
+
+ if (supportsPresent[i] == VK_TRUE) {
+ graphicsQueueFamilyIndex = i;
+ presentQueueFamilyIndex = i;
+ break;
+ }
+ }
+ }
+
+ if (presentQueueFamilyIndex == UINT32_MAX) {
+ // If didn't find a queue that supports both graphics and present, then
+ // find a separate present queue.
+ for (uint32_t i = 0; i < queue_family_count; ++i) {
+ if (supportsPresent[i] == VK_TRUE) {
+ presentQueueFamilyIndex = i;
+ break;
+ }
+ }
+ }
+
+ // Generate error if could not find both a graphics and a present queue
+ ERR_FAIL_COND_V_MSG(graphicsQueueFamilyIndex == UINT32_MAX || presentQueueFamilyIndex == UINT32_MAX, ERR_CANT_CREATE,
+ "Could not find both graphics and present queues\n");
+
+ graphics_queue_family_index = graphicsQueueFamilyIndex;
+ present_queue_family_index = presentQueueFamilyIndex;
+ separate_present_queue = (graphics_queue_family_index != present_queue_family_index);
+ free(supportsPresent);
+
+ _create_device();
+
+ static PFN_vkGetDeviceProcAddr g_gdpa = NULL;
+#define GET_DEVICE_PROC_ADDR(dev, entrypoint) \
+ { \
+ if (!g_gdpa) g_gdpa = (PFN_vkGetDeviceProcAddr)vkGetInstanceProcAddr(inst, "vkGetDeviceProcAddr"); \
+ fp##entrypoint = (PFN_vk##entrypoint)g_gdpa(dev, "vk" #entrypoint); \
+ ERR_FAIL_COND_V_MSG(fp##entrypoint == NULL, ERR_CANT_CREATE, \
+ "vkGetDeviceProcAddr failed to find vk" #entrypoint); \
+ }
+
+ GET_DEVICE_PROC_ADDR(device, CreateSwapchainKHR);
+ GET_DEVICE_PROC_ADDR(device, DestroySwapchainKHR);
+ GET_DEVICE_PROC_ADDR(device, GetSwapchainImagesKHR);
+ GET_DEVICE_PROC_ADDR(device, AcquireNextImageKHR);
+ GET_DEVICE_PROC_ADDR(device, QueuePresentKHR);
+ if (VK_GOOGLE_display_timing_enabled) {
+ GET_DEVICE_PROC_ADDR(device, GetRefreshCycleDurationGOOGLE);
+ GET_DEVICE_PROC_ADDR(device, GetPastPresentationTimingGOOGLE);
+ }
+
+ vkGetDeviceQueue(device, graphics_queue_family_index, 0, &graphics_queue);
+
+ if (!separate_present_queue) {
+ present_queue = graphics_queue;
+ } else {
+ vkGetDeviceQueue(device, present_queue_family_index, 0, &present_queue);
+ }
+
+ // Get the list of VkFormat's that are supported:
+ uint32_t formatCount;
+ err = fpGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &formatCount, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ VkSurfaceFormatKHR *surfFormats = (VkSurfaceFormatKHR *)malloc(formatCount * sizeof(VkSurfaceFormatKHR));
+ err = fpGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &formatCount, surfFormats);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ // If the format list includes just one entry of VK_FORMAT_UNDEFINED,
+ // the surface has no preferred format. Otherwise, at least one
+ // supported format will be returned.
+ if (true || (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED)) {
+ format = VK_FORMAT_B8G8R8A8_UNORM;
+ } else {
+ ERR_FAIL_COND_V(formatCount < 1, ERR_CANT_CREATE);
+ format = surfFormats[0].format;
+ }
+ color_space = surfFormats[0].colorSpace;
+ return OK;
+}
+
+Error VulkanContext::_create_semaphores() {
+ VkResult err;
+
+ // Create semaphores to synchronize acquiring presentable buffers before
+ // rendering and waiting for drawing to be complete before presenting
+ VkSemaphoreCreateInfo semaphoreCreateInfo = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = NULL,
+ .flags = 0,
+ };
+
+ // Create fences that we can use to throttle if we get too far
+ // ahead of the image presents
+ VkFenceCreateInfo fence_ci = {
+ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = NULL, .flags = VK_FENCE_CREATE_SIGNALED_BIT
+ };
+ for (uint32_t i = 0; i < FRAME_LAG; i++) {
+ err = vkCreateFence(device, &fence_ci, NULL, &fences[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ err = vkCreateSemaphore(device, &semaphoreCreateInfo, NULL, &image_acquired_semaphores[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ err = vkCreateSemaphore(device, &semaphoreCreateInfo, NULL, &draw_complete_semaphores[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ if (separate_present_queue) {
+ err = vkCreateSemaphore(device, &semaphoreCreateInfo, NULL, &image_ownership_semaphores[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ }
+ frame_index = 0;
+
+ // Get Memory information and properties
+ vkGetPhysicalDeviceMemoryProperties(gpu, &memory_properties);
+
+ return OK;
+}
+
+Error VulkanContext::_prepare_buffers() {
+ VkResult err;
+ VkSwapchainKHR oldSwapchain = swapchain;
+
+ // Check the surface capabilities and formats
+ VkSurfaceCapabilitiesKHR surfCapabilities;
+ err = fpGetPhysicalDeviceSurfaceCapabilitiesKHR(gpu, surface, &surfCapabilities);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ uint32_t presentModeCount;
+ err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, surface, &presentModeCount, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR));
+ ERR_FAIL_COND_V(!presentModes, ERR_CANT_CREATE);
+ err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, surface, &presentModeCount, presentModes);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ VkExtent2D swapchainExtent;
+ // width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF.
+ if (surfCapabilities.currentExtent.width == 0xFFFFFFFF) {
+ // If the surface size is undefined, the size is set to the size
+ // of the images requested, which must fit within the minimum and
+ // maximum values.
+ swapchainExtent.width = width;
+ swapchainExtent.height = height;
+
+ if (swapchainExtent.width < surfCapabilities.minImageExtent.width) {
+ swapchainExtent.width = surfCapabilities.minImageExtent.width;
+ } else if (swapchainExtent.width > surfCapabilities.maxImageExtent.width) {
+ swapchainExtent.width = surfCapabilities.maxImageExtent.width;
+ }
+
+ if (swapchainExtent.height < surfCapabilities.minImageExtent.height) {
+ swapchainExtent.height = surfCapabilities.minImageExtent.height;
+ } else if (swapchainExtent.height > surfCapabilities.maxImageExtent.height) {
+ swapchainExtent.height = surfCapabilities.maxImageExtent.height;
+ }
+ } else {
+ // If the surface size is defined, the swap chain size must match
+ swapchainExtent = surfCapabilities.currentExtent;
+ width = surfCapabilities.currentExtent.width;
+ height = surfCapabilities.currentExtent.height;
+ }
+
+ if (width == 0 || height == 0) {
+ is_minimized = true;
+ return OK;
+ } else {
+ is_minimized = false;
+ }
+
+ // The FIFO present mode is guaranteed by the spec to be supported
+ // and to have no tearing. It's a great default present mode to use.
+ VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR;
+
+ // There are times when you may wish to use another present mode. The
+ // following code shows how to select them, and the comments provide some
+ // reasons you may wish to use them.
+ //
+ // It should be noted that Vulkan 1.0 doesn't provide a method for
+ // synchronizing rendering with the presentation engine's display. There
+ // is a method provided for throttling rendering with the display, but
+ // there are some presentation engines for which this method will not work.
+ // If an application doesn't throttle its rendering, and if it renders much
+ // faster than the refresh rate of the display, this can waste power on
+ // mobile devices. That is because power is being spent rendering images
+ // that may never be seen.
+
+ // VK_PRESENT_MODE_IMMEDIATE_KHR is for applications that don't care about
+ // tearing, or have some way of synchronizing their rendering with the
+ // display.
+ // VK_PRESENT_MODE_MAILBOX_KHR may be useful for applications that
+ // generally render a new presentable image every refresh cycle, but are
+ // occasionally early. In this case, the application wants the new image
+ // to be displayed instead of the previously-queued-for-presentation image
+ // that has not yet been displayed.
+ // VK_PRESENT_MODE_FIFO_RELAXED_KHR is for applications that generally
+ // render a new presentable image every refresh cycle, but are occasionally
+ // late. In this case (perhaps because of stuttering/latency concerns),
+ // the application wants the late image to be immediately displayed, even
+ // though that may mean some tearing.
+
+ if (presentMode != swapchainPresentMode) {
+ for (size_t i = 0; i < presentModeCount; ++i) {
+ if (presentModes[i] == presentMode) {
+ swapchainPresentMode = presentMode;
+ break;
+ }
+ }
+ }
+ ERR_FAIL_COND_V_MSG(swapchainPresentMode != presentMode, ERR_CANT_CREATE, "Present mode specified is not supported\n");
+
+ // Determine the number of VkImages to use in the swap chain.
+ // Application desires to acquire 3 images at a time for triple
+ // buffering
+ uint32_t desiredNumOfSwapchainImages = 3;
+ if (desiredNumOfSwapchainImages < surfCapabilities.minImageCount) {
+ desiredNumOfSwapchainImages = surfCapabilities.minImageCount;
+ }
+ // If maxImageCount is 0, we can ask for as many images as we want;
+ // otherwise we're limited to maxImageCount
+ if ((surfCapabilities.maxImageCount > 0) && (desiredNumOfSwapchainImages > surfCapabilities.maxImageCount)) {
+ // Application must settle for fewer images than desired:
+ desiredNumOfSwapchainImages = surfCapabilities.maxImageCount;
+ }
+
+ VkSurfaceTransformFlagsKHR preTransform;
+ if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) {
+ preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ } else {
+ preTransform = surfCapabilities.currentTransform;
+ }
+
+ // Find a supported composite alpha mode - one of these is guaranteed to be set
+ VkCompositeAlphaFlagBitsKHR compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
+ };
+ for (uint32_t i = 0; i < ARRAY_SIZE(compositeAlphaFlags); i++) {
+ if (surfCapabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
+ compositeAlpha = compositeAlphaFlags[i];
+ break;
+ }
+ }
+
+ VkSwapchainCreateInfoKHR swapchain_ci = {
+ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ .pNext = NULL,
+ .surface = surface,
+ .minImageCount = desiredNumOfSwapchainImages,
+ .imageFormat = format,
+ .imageColorSpace = color_space,
+ .imageExtent = {
+ .width = swapchainExtent.width,
+ .height = swapchainExtent.height,
+ },
+ .imageArrayLayers = 1,
+ .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .queueFamilyIndexCount = 0,
+ .pQueueFamilyIndices = NULL,
+ .preTransform = (VkSurfaceTransformFlagBitsKHR)preTransform,
+ .compositeAlpha = compositeAlpha,
+ .presentMode = swapchainPresentMode,
+ .clipped = true,
+ .oldSwapchain = oldSwapchain,
+ };
+ uint32_t i;
+ err = fpCreateSwapchainKHR(device, &swapchain_ci, NULL, &swapchain);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ // If we just re-created an existing swapchain, we should destroy the old
+ // swapchain at this point.
+ // Note: destroying the swapchain also cleans up all its associated
+ // presentable images once the platform is done with them.
+ if (oldSwapchain != VK_NULL_HANDLE) {
+ fpDestroySwapchainKHR(device, oldSwapchain, NULL);
+ }
+
+ err = fpGetSwapchainImagesKHR(device, swapchain, &swapchainImageCount, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ VkImage *swapchainImages = (VkImage *)malloc(swapchainImageCount * sizeof(VkImage));
+ ERR_FAIL_COND_V(!swapchainImages, ERR_CANT_CREATE);
+ err = fpGetSwapchainImagesKHR(device, swapchain, &swapchainImageCount, swapchainImages);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ swapchain_image_resources =
+ (SwapchainImageResources *)malloc(sizeof(SwapchainImageResources) * swapchainImageCount);
+ ERR_FAIL_COND_V(!swapchain_image_resources, ERR_CANT_CREATE);
+
+ for (i = 0; i < swapchainImageCount; i++) {
+ VkImageViewCreateInfo color_image_view = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .pNext = NULL,
+ .flags = 0,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = format,
+ .components = {
+ .r = VK_COMPONENT_SWIZZLE_R,
+ .g = VK_COMPONENT_SWIZZLE_G,
+ .b = VK_COMPONENT_SWIZZLE_B,
+ .a = VK_COMPONENT_SWIZZLE_A,
+ },
+ .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 },
+ };
+
+ swapchain_image_resources[i].image = swapchainImages[i];
+
+ color_image_view.image = swapchain_image_resources[i].image;
+
+ err = vkCreateImageView(device, &color_image_view, NULL, &swapchain_image_resources[i].view);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+
+ if (VK_GOOGLE_display_timing_enabled) {
+ VkRefreshCycleDurationGOOGLE rc_dur;
+ err = fpGetRefreshCycleDurationGOOGLE(device, swapchain, &rc_dur);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ refresh_duration = rc_dur.refreshDuration;
+
+ syncd_with_actual_presents = false;
+ // Initially target 1X the refresh duration:
+ target_IPD = refresh_duration;
+ refresh_duration_multiplier = 1;
+ prev_desired_present_time = 0;
+ next_present_id = 1;
+ }
+
+ if (NULL != presentModes) {
+ free(presentModes);
+ }
+
+ return OK;
+}
+
+Error VulkanContext::_prepare_framebuffers() {
+
+ //for this, we only need color (no depth), since Godot does not render to the main
+ //render buffer
+
+ const VkAttachmentDescription attachment = {
+
+ .flags = 0,
+ .format = format,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+
+ };
+ const VkAttachmentReference color_reference = {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ };
+
+ const VkSubpassDescription subpass = {
+ .flags = 0,
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .inputAttachmentCount = 0,
+ .pInputAttachments = NULL,
+ .colorAttachmentCount = 1,
+ .pColorAttachments = &color_reference,
+ .pResolveAttachments = NULL,
+ .pDepthStencilAttachment = NULL,
+ .preserveAttachmentCount = 0,
+ .pPreserveAttachments = NULL,
+ };
+ const VkRenderPassCreateInfo rp_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .pNext = NULL,
+ .flags = 0,
+ .attachmentCount = 1,
+ .pAttachments = &attachment,
+ .subpassCount = 1,
+ .pSubpasses = &subpass,
+ .dependencyCount = 0,
+ .pDependencies = NULL,
+ };
+ VkResult err;
+
+ err = vkCreateRenderPass(device, &rp_info, NULL, &render_pass);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ const VkFramebufferCreateInfo fb_info = {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .pNext = NULL,
+ .renderPass = render_pass,
+ .attachmentCount = 1,
+ .pAttachments = &swapchain_image_resources[i].view,
+ .width = width,
+ .height = height,
+ .layers = 1,
+ };
+
+ err = vkCreateFramebuffer(device, &fb_info, NULL, &swapchain_image_resources[i].framebuffer);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+
+ return OK;
+}
+
+Error VulkanContext::_create_buffers() {
+
+ Error error = _prepare_buffers();
+ if (error != OK) {
+ return error;
+ }
+
+ if (minimized) {
+ prepared = false;
+ return OK;
+ }
+
+ _prepare_framebuffers();
+
+ if (separate_present_queue) {
+ const VkCommandPoolCreateInfo present_cmd_pool_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ .pNext = NULL,
+ .flags = 0,
+ .queueFamilyIndex = present_queue_family_index,
+ };
+ VkResult err = vkCreateCommandPool(device, &present_cmd_pool_info, NULL, &present_cmd_pool);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ const VkCommandBufferAllocateInfo present_cmd_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .pNext = NULL,
+ .commandPool = present_cmd_pool,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandBufferCount = 1,
+ };
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ err = vkAllocateCommandBuffers(device, &present_cmd_info,
+ &swapchain_image_resources[i].graphics_to_present_cmd);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ const VkCommandBufferBeginInfo cmd_buf_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ .pNext = NULL,
+ .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
+ .pInheritanceInfo = NULL,
+ };
+ err = vkBeginCommandBuffer(swapchain_image_resources[i].graphics_to_present_cmd, &cmd_buf_info);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ VkImageMemoryBarrier image_ownership_barrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .pNext = NULL,
+ .srcAccessMask = 0,
+ .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ .srcQueueFamilyIndex = graphics_queue_family_index,
+ .dstQueueFamilyIndex = present_queue_family_index,
+ .image = swapchain_image_resources[i].image,
+ .subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } };
+
+ vkCmdPipelineBarrier(swapchain_image_resources[i].graphics_to_present_cmd, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &image_ownership_barrier);
+ err = vkEndCommandBuffer(swapchain_image_resources[i].graphics_to_present_cmd);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ }
+
+ current_buffer = 0;
+ prepared = true;
+
+ return OK;
+}
+
+Error VulkanContext::initialize(int p_width, int p_height, bool p_minimized) {
+
+ screen_width = p_width;
+ screen_height = p_height;
+ minimized = p_minimized;
+
+ Error err = _create_physical_device();
+ if (err) {
+ return err;
+ }
+
+ err = _create_swap_chain();
+ if (err) {
+ return err;
+ }
+
+ err = _create_semaphores();
+ if (err) {
+ return err;
+ }
+
+ err = _create_buffers();
+ if (err) {
+ return err;
+ }
+
+ print_line("Vulkan context creation success o_O");
+ return OK;
+}
+
+void VulkanContext::set_setup_buffer(const VkCommandBuffer &pCommandBuffer) {
+ command_buffer_queue.write[0] = pCommandBuffer;
+}
+
+void VulkanContext::append_command_buffer(const VkCommandBuffer &pCommandBuffer) {
+
+ if (command_buffer_queue.size() <= command_buffer_count) {
+ command_buffer_queue.resize(command_buffer_count + 1);
+ }
+
+ command_buffer_queue.write[command_buffer_count] = pCommandBuffer;
+ command_buffer_count++;
+}
+
+void VulkanContext::flush(bool p_flush_setup, bool p_flush_pending) {
+
+ // ensure everything else pending is executed
+ for (int i = 0; i < FRAME_LAG; i++) {
+ int to_fence = (frame_index + i) % FRAME_LAG;
+ vkWaitForFences(device, 1, &fences[to_fence], VK_TRUE, UINT64_MAX);
+ }
+
+ //flush the pending setup buffer
+
+ if (p_flush_setup && command_buffer_queue[0]) {
+
+ //use a fence to wait for everything done
+
+ vkResetFences(device, 1, &fences[frame_index]);
+
+ VkSubmitInfo submit_info;
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.pNext = NULL;
+ submit_info.pWaitDstStageMask = NULL;
+ submit_info.waitSemaphoreCount = 0;
+ submit_info.pWaitSemaphores = NULL;
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = command_buffer_queue.ptr();
+ submit_info.signalSemaphoreCount = 0;
+ submit_info.pSignalSemaphores = NULL;
+ VkResult err = vkQueueSubmit(graphics_queue, 1, &submit_info, fences[frame_index]);
+ command_buffer_queue.write[0] = NULL;
+ ERR_FAIL_COND(err);
+ vkWaitForFences(device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
+ }
+
+ if (p_flush_pending && command_buffer_count > 1) {
+
+ //use a fence to wait for everything done
+
+ vkResetFences(device, 1, &fences[frame_index]);
+
+ VkSubmitInfo submit_info;
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.pNext = NULL;
+ submit_info.pWaitDstStageMask = NULL;
+ submit_info.waitSemaphoreCount = 0;
+ submit_info.pWaitSemaphores = NULL;
+ submit_info.commandBufferCount = command_buffer_count - 1;
+ submit_info.pCommandBuffers = command_buffer_queue.ptr() + 1;
+ submit_info.signalSemaphoreCount = 0;
+ submit_info.pSignalSemaphores = NULL;
+ VkResult err = vkQueueSubmit(graphics_queue, 1, &submit_info, fences[frame_index]);
+ command_buffer_queue.write[0] = NULL;
+ ERR_FAIL_COND(err);
+ vkWaitForFences(device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
+
+ command_buffer_count = 1;
+ }
+}
+
+Error VulkanContext::swap_buffers() {
+
+ // print_line("swapbuffers?");
+ VkResult err;
+
+ // Ensure no more than FRAME_LAG renderings are outstanding
+ vkWaitForFences(device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
+ vkResetFences(device, 1, &fences[frame_index]);
+
+ do {
+ // Get the index of the next available swapchain image:
+ err =
+ fpAcquireNextImageKHR(device, swapchain, UINT64_MAX,
+ image_acquired_semaphores[frame_index], VK_NULL_HANDLE, &current_buffer);
+
+ if (err == VK_ERROR_OUT_OF_DATE_KHR) {
+ // swapchain is out of date (e.g. the window was resized) and
+ // must be recreated:
+ print_line("early out of data");
+ resize_notify();
+ } else if (err == VK_SUBOPTIMAL_KHR) {
+ print_line("early suboptimal");
+ // swapchain is not as optimal as it could be, but the platform's
+ // presentation engine will still present the image correctly.
+ break;
+ } else {
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ } while (err != VK_SUCCESS);
+
+#if 0
+ if (VK_GOOGLE_display_timing_enabled) {
+ // Look at what happened to previous presents, and make appropriate
+ // adjustments in timing:
+ DemoUpdateTargetIPD(demo);
+
+ // Note: a real application would position its geometry to that it's in
+ // the correct locatoin for when the next image is presented. It might
+ // also wait, so that there's less latency between any input and when
+ // the next image is rendered/presented. This demo program is so
+ // simple that it doesn't do either of those.
+ }
+#endif
+ // Wait for the image acquired semaphore to be signaled to ensure
+ // that the image won't be rendered to until the presentation
+ // engine has fully released ownership to the application, and it is
+ // okay to render to the image.
+
+ const VkCommandBuffer *commands_ptr = NULL;
+ uint32_t commands_to_submit = 0;
+
+ if (command_buffer_queue[0] == NULL) {
+ //no setup command, but commands to submit, submit from the first and skip command
+ if (command_buffer_count > 1) {
+ commands_ptr = command_buffer_queue.ptr() + 1;
+ commands_to_submit = command_buffer_count - 1;
+ }
+ } else {
+ commands_ptr = command_buffer_queue.ptr();
+ commands_to_submit = command_buffer_count;
+ }
+
+ VkPipelineStageFlags pipe_stage_flags;
+ VkSubmitInfo submit_info;
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.pNext = NULL;
+ submit_info.pWaitDstStageMask = &pipe_stage_flags;
+ pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = &image_acquired_semaphores[frame_index];
+ submit_info.commandBufferCount = commands_to_submit;
+ submit_info.pCommandBuffers = commands_ptr;
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = &draw_complete_semaphores[frame_index];
+ err = vkQueueSubmit(graphics_queue, 1, &submit_info, fences[frame_index]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ command_buffer_queue.write[0] = NULL;
+ command_buffer_count = 1;
+
+ if (separate_present_queue) {
+ // If we are using separate queues, change image ownership to the
+ // present queue before presenting, waiting for the draw complete
+ // semaphore and signalling the ownership released semaphore when finished
+ VkFence nullFence = VK_NULL_HANDLE;
+ pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = &draw_complete_semaphores[frame_index];
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &swapchain_image_resources[current_buffer].graphics_to_present_cmd;
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = &image_ownership_semaphores[frame_index];
+ err = vkQueueSubmit(present_queue, 1, &submit_info, nullFence);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+
+ // If we are using separate queues we have to wait for image ownership,
+ // otherwise wait for draw complete
+ VkPresentInfoKHR present = {
+ .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ .pNext = NULL,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = (separate_present_queue) ? &image_ownership_semaphores[frame_index] : &draw_complete_semaphores[frame_index],
+ .swapchainCount = 1,
+ .pSwapchains = &swapchain,
+ .pImageIndices = &current_buffer,
+ };
+#if 0
+ if (VK_KHR_incremental_present_enabled) {
+ // If using VK_KHR_incremental_present, we provide a hint of the region
+ // that contains changed content relative to the previously-presented
+ // image. The implementation can use this hint in order to save
+ // work/power (by only copying the region in the hint). The
+ // implementation is free to ignore the hint though, and so we must
+ // ensure that the entire image has the correctly-drawn content.
+ uint32_t eighthOfWidth = width / 8;
+ uint32_t eighthOfHeight = height / 8;
+ VkRectLayerKHR rect = {
+ .offset.x = eighthOfWidth,
+ .offset.y = eighthOfHeight,
+ .extent.width = eighthOfWidth * 6,
+ .extent.height = eighthOfHeight * 6,
+ .layer = 0,
+ };
+ VkPresentRegionKHR region = {
+ .rectangleCount = 1,
+ .pRectangles = &rect,
+ };
+ VkPresentRegionsKHR regions = {
+ .sType = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR,
+ .pNext = present.pNext,
+ .swapchainCount = present.swapchainCount,
+ .pRegions = &region,
+ };
+ present.pNext = &regions;
+ }
+#endif
+
+#if 0
+ if (VK_GOOGLE_display_timing_enabled) {
+ VkPresentTimeGOOGLE ptime;
+ if (prev_desired_present_time == 0) {
+ // This must be the first present for this swapchain.
+ //
+ // We don't know where we are relative to the presentation engine's
+ // display's refresh cycle. We also don't know how long rendering
+ // takes. Let's make a grossly-simplified assumption that the
+ // desiredPresentTime should be half way between now and
+ // now+target_IPD. We will adjust over time.
+ uint64_t curtime = getTimeInNanoseconds();
+ if (curtime == 0) {
+ // Since we didn't find out the current time, don't give a
+ // desiredPresentTime:
+ ptime.desiredPresentTime = 0;
+ } else {
+ ptime.desiredPresentTime = curtime + (target_IPD >> 1);
+ }
+ } else {
+ ptime.desiredPresentTime = (prev_desired_present_time + target_IPD);
+ }
+ ptime.presentID = next_present_id++;
+ prev_desired_present_time = ptime.desiredPresentTime;
+
+ VkPresentTimesInfoGOOGLE present_time = {
+ .sType = VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE,
+ .pNext = present.pNext,
+ .swapchainCount = present.swapchainCount,
+ .pTimes = &ptime,
+ };
+ if (VK_GOOGLE_display_timing_enabled) {
+ present.pNext = &present_time;
+ }
+ }
+#endif
+ static int total_frames = 0;
+ total_frames++;
+ // print_line("current buffer: " + itos(current_buffer));
+ err = fpQueuePresentKHR(present_queue, &present);
+
+ frame_index += 1;
+ frame_index %= FRAME_LAG;
+
+ if (err == VK_ERROR_OUT_OF_DATE_KHR) {
+ // swapchain is out of date (e.g. the window was resized) and
+ // must be recreated:
+ print_line("out of date");
+ resize_notify();
+ } else if (err == VK_SUBOPTIMAL_KHR) {
+ // swapchain is not as optimal as it could be, but the platform's
+ // presentation engine will still present the image correctly.
+ print_line("suboptimal");
+ } else {
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+
+ return OK;
+}
+
+void VulkanContext::resize_notify() {
+}
+
+VkDevice VulkanContext::get_device() {
+ return device;
+}
+
+VkPhysicalDevice VulkanContext::get_physical_device() {
+ return gpu;
+}
+int VulkanContext::get_frame_count() const {
+ return swapchainImageCount;
+}
+uint32_t VulkanContext::get_graphics_queue() const {
+ return graphics_queue_family_index;
+}
+
+int VulkanContext::get_screen_width(int p_screen) {
+ return width;
+}
+
+int VulkanContext::get_screen_height(int p_screen) {
+ return height;
+}
+
+VkFramebuffer VulkanContext::get_frame_framebuffer(int p_frame) {
+ return swapchain_image_resources[p_frame].framebuffer;
+}
+VkFormat VulkanContext::get_screen_format() const {
+ return format;
+}
+
+VkRenderPass VulkanContext::get_render_pass() {
+ return render_pass;
+}
+
+VkPhysicalDeviceLimits VulkanContext::get_device_limits() const {
+ return gpu_props.limits;
+}
+
+VulkanContext::VulkanContext() {
+ presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ command_buffer_count = 0;
+ instance_validation_layers = NULL;
+ use_validation_layers = true;
+ VK_KHR_incremental_present_enabled = true;
+ VK_GOOGLE_display_timing_enabled = true;
+ swapchain = NULL;
+ prepared = false;
+
+ command_buffer_queue.resize(1); //first one is the setup command always
+ command_buffer_queue.write[0] = NULL;
+ command_buffer_count = 1;
+}
diff --git a/drivers/vulkan/vulkan_context.h b/drivers/vulkan/vulkan_context.h
new file mode 100644
index 0000000000..7a62ef51e2
--- /dev/null
+++ b/drivers/vulkan/vulkan_context.h
@@ -0,0 +1,158 @@
+#ifndef VULKAN_CONTEXT_H
+#define VULKAN_CONTEXT_H
+
+#include "core/error_list.h"
+#include "core/ustring.h"
+#include <vulkan/vulkan.h>
+
+class VulkanContext {
+
+ enum {
+ MAX_EXTENSIONS = 128,
+ MAX_LAYERS = 64,
+ FRAME_LAG = 2
+ };
+
+ bool use_validation_layers;
+
+ VkInstance inst;
+ VkSurfaceKHR surface;
+ VkPhysicalDevice gpu;
+ VkPhysicalDeviceProperties gpu_props;
+ uint32_t queue_family_count;
+ VkQueueFamilyProperties *queue_props;
+ VkDevice device;
+
+ //present
+ uint32_t graphics_queue_family_index;
+ uint32_t present_queue_family_index;
+ bool separate_present_queue;
+ VkQueue graphics_queue;
+ VkQueue present_queue;
+ VkColorSpaceKHR color_space;
+ VkFormat format;
+ VkSemaphore image_acquired_semaphores[FRAME_LAG];
+ VkSemaphore draw_complete_semaphores[FRAME_LAG];
+ VkSemaphore image_ownership_semaphores[FRAME_LAG];
+ int frame_index;
+ VkFence fences[FRAME_LAG];
+ VkPhysicalDeviceMemoryProperties memory_properties;
+
+ typedef struct {
+ VkImage image;
+ VkCommandBuffer cmd;
+ VkCommandBuffer graphics_to_present_cmd;
+ VkImageView view;
+ VkBuffer uniform_buffer;
+ VkDeviceMemory uniform_memory;
+ VkFramebuffer framebuffer;
+ VkDescriptorSet descriptor_set;
+ } SwapchainImageResources;
+
+ VkSwapchainKHR swapchain;
+ SwapchainImageResources *swapchain_image_resources;
+ VkPresentModeKHR presentMode;
+ uint32_t swapchainImageCount;
+ uint64_t refresh_duration;
+ bool syncd_with_actual_presents;
+ uint64_t refresh_duration_multiplier;
+ uint64_t target_IPD; // image present duration (inverse of frame rate)
+ uint64_t prev_desired_present_time;
+ uint32_t next_present_id;
+ uint32_t last_early_id; // 0 if no early images
+ uint32_t last_late_id; // 0 if no late images
+ bool is_minimized;
+ uint32_t current_buffer;
+
+ //commands
+ VkRenderPass render_pass;
+ VkCommandPool present_cmd_pool; //for separate present queue
+
+ bool prepared;
+ int width, height;
+
+ //extensions
+ bool VK_KHR_incremental_present_enabled;
+ bool VK_GOOGLE_display_timing_enabled;
+ const char **instance_validation_layers;
+ uint32_t enabled_extension_count;
+ uint32_t enabled_layer_count;
+ const char *extension_names[MAX_EXTENSIONS];
+ const char *enabled_layers[MAX_LAYERS];
+
+ PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT;
+ PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT;
+ PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT;
+ PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT;
+ PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT;
+ PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT;
+ PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT;
+ PFN_vkGetPhysicalDeviceSurfaceSupportKHR fpGetPhysicalDeviceSurfaceSupportKHR;
+ PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR fpGetPhysicalDeviceSurfaceCapabilitiesKHR;
+ PFN_vkGetPhysicalDeviceSurfaceFormatsKHR fpGetPhysicalDeviceSurfaceFormatsKHR;
+ PFN_vkGetPhysicalDeviceSurfacePresentModesKHR fpGetPhysicalDeviceSurfacePresentModesKHR;
+ PFN_vkCreateSwapchainKHR fpCreateSwapchainKHR;
+ PFN_vkDestroySwapchainKHR fpDestroySwapchainKHR;
+ PFN_vkGetSwapchainImagesKHR fpGetSwapchainImagesKHR;
+ PFN_vkAcquireNextImageKHR fpAcquireNextImageKHR;
+ PFN_vkQueuePresentKHR fpQueuePresentKHR;
+ PFN_vkGetRefreshCycleDurationGOOGLE fpGetRefreshCycleDurationGOOGLE;
+ PFN_vkGetPastPresentationTimingGOOGLE fpGetPastPresentationTimingGOOGLE;
+
+ VkDebugUtilsMessengerEXT dbg_messenger;
+
+ Error _create_validation_layers();
+ Error _initialize_extensions();
+
+ VkBool32 _check_layers(uint32_t check_count, const char **check_names, uint32_t layer_count, VkLayerProperties *layers);
+ static VKAPI_ATTR VkBool32 VKAPI_CALL _debug_messenger_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageType,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
+ void *pUserData);
+
+ Error _create_physical_device();
+ Error _create_device();
+ Error _create_swap_chain();
+ Error _create_semaphores();
+
+ Error _prepare_buffers();
+ Error _prepare_framebuffers();
+ Error _create_buffers();
+
+ int screen_width;
+ int screen_height;
+ bool minimized;
+
+ Vector<VkCommandBuffer> command_buffer_queue;
+ int command_buffer_count;
+
+protected:
+ virtual const char *_get_platform_surface_extension() const = 0;
+ virtual VkResult _create_surface(VkSurfaceKHR *surface, VkInstance p_instance) = 0;
+
+ VkSurfaceKHR &get_surface() { return surface; }
+
+public:
+ VkDevice get_device();
+ VkPhysicalDevice get_physical_device();
+ int get_frame_count() const;
+ uint32_t get_graphics_queue() const;
+
+ int get_screen_width(int p_screen = 0);
+ int get_screen_height(int p_screen = 0);
+
+ VkFramebuffer get_frame_framebuffer(int p_frame);
+ VkRenderPass get_render_pass();
+ VkFormat get_screen_format() const;
+ VkPhysicalDeviceLimits get_device_limits() const;
+
+ void set_setup_buffer(const VkCommandBuffer &pCommandBuffer);
+ void append_command_buffer(const VkCommandBuffer &pCommandBuffer);
+ void resize_notify();
+ void flush(bool p_flush_setup = false, bool p_flush_pending = false);
+ Error swap_buffers();
+ Error initialize(int p_width, int p_height, bool p_minimized);
+ VulkanContext();
+};
+
+#endif // VULKAN_DEVICE_H
diff --git a/platform/x11/SCsub b/platform/x11/SCsub
index 3d5aa15208..09e283b634 100644
--- a/platform/x11/SCsub
+++ b/platform/x11/SCsub
@@ -6,7 +6,8 @@ from platform_methods import run_in_subprocess
import platform_x11_builders
common_x11 = [
- "context_gl_x11.cpp",
+ #"context_gl_x11.cpp",
+ "vulkan_context_x11.cpp",
"crash_handler_x11.cpp",
"os_x11.cpp",
"key_mapping_x11.cpp",
diff --git a/platform/x11/detect.py b/platform/x11/detect.py
index bd5e5e0812..5f43d19151 100644
--- a/platform/x11/detect.py
+++ b/platform/x11/detect.py
@@ -318,7 +318,9 @@ def configure(env):
env.ParseConfig('pkg-config zlib --cflags --libs')
env.Prepend(CPPPATH=['#platform/x11'])
- env.Append(CPPDEFINES=['X11_ENABLED', 'UNIX_ENABLED', 'OPENGL_ENABLED', 'GLES_ENABLED'])
+ env.Append(CPPDEFINES=['X11_ENABLED', 'UNIX_ENABLED'])
+ env.Append(CPPDEFINES=['VULKAN_ENABLED'])
+ env.Append(LIBS=['vulkan'])
env.Append(LIBS=['GL', 'pthread'])
if (platform.system() == "Linux"):
diff --git a/platform/x11/os_x11.cpp b/platform/x11/os_x11.cpp
index 1cd763853c..fd06112b40 100644
--- a/platform/x11/os_x11.cpp
+++ b/platform/x11/os_x11.cpp
@@ -33,8 +33,9 @@
#include "core/os/dir_access.h"
#include "core/print_string.h"
-#include "drivers/gles2/rasterizer_gles2.h"
-#include "drivers/gles3/rasterizer_gles3.h"
+//#include "drivers/gles2/rasterizer_gles2.h"
+//#include "drivers/gles3/rasterizer_gles3.h"
+#include "drivers/dummy/rasterizer_dummy.h"
#include "errno.h"
#include "key_mapping_x11.h"
#include "servers/visual/visual_server_raster.h"
@@ -103,6 +104,14 @@ int OS_X11::get_current_video_driver() const {
return video_driver_index;
}
+static RenderingDevice::ID test_pipeline = RenderingDevice::INVALID_ID;
+static RenderingDevice::ID test_index_array = RenderingDevice::INVALID_ID;
+static RenderingDevice::ID test_vertex_array = RenderingDevice::INVALID_ID;
+static RenderingDevice::ID test_uniform_set = RenderingDevice::INVALID_ID;
+static RenderingDevice::ID test_framebuffer_pipeline = RenderingDevice::INVALID_ID;
+static RenderingDevice::ID test_framebuffer_uniform_set = RenderingDevice::INVALID_ID;
+static RenderingDevice::ID test_framebuffer = RenderingDevice::INVALID_ID;
+
Error OS_X11::initialize(const VideoMode &p_desired, int p_video_driver, int p_audio_driver) {
long im_event_mask = 0;
@@ -361,6 +370,286 @@ Error OS_X11::initialize(const VideoMode &p_desired, int p_video_driver, int p_a
context_gl->set_use_vsync(current_videomode.use_vsync);
+#else
+ long visualMask = VisualScreenMask;
+ int numberOfVisuals;
+ XVisualInfo vInfoTemplate = {};
+ vInfoTemplate.screen = DefaultScreen(x11_display);
+ XVisualInfo *visualInfo = XGetVisualInfo(x11_display, visualMask, &vInfoTemplate, &numberOfVisuals);
+
+ Colormap colormap = XCreateColormap(x11_display, RootWindow(x11_display, vInfoTemplate.screen), visualInfo->visual, AllocNone);
+
+ XSetWindowAttributes windowAttributes = {};
+ windowAttributes.colormap = colormap;
+ windowAttributes.background_pixel = 0xFFFFFFFF;
+ windowAttributes.border_pixel = 0;
+ windowAttributes.event_mask = KeyPressMask | KeyReleaseMask | StructureNotifyMask | ExposureMask;
+ /*
+ window = XCreateWindow(demo->display, RootWindow(display, vInfoTemplate.screen), 0, 0, demo->width,
+ demo->height, 0, visualInfo->depth, InputOutput, visualInfo->visual,
+ CWBackPixel | CWBorderPixel | CWEventMask | CWColormap, &windowAttributes);
+ */
+ unsigned long valuemask = CWBorderPixel | CWColormap | CWEventMask;
+ x11_window = XCreateWindow(x11_display, RootWindow(x11_display, visualInfo->screen), 0, 0, OS::get_singleton()->get_video_mode().width, OS::get_singleton()->get_video_mode().height, 0, visualInfo->depth, InputOutput, visualInfo->visual, valuemask, &windowAttributes);
+
+ //set_class_hint(x11_display, x11_window);
+ XMapWindow(x11_display, x11_window);
+ XFlush(x11_display);
+
+ XSync(x11_display, False);
+ //XSetErrorHandler(oldHandler);
+
+ XFree(visualInfo);
+
+ context_vulkan = memnew(VulkanContextX11(x11_window, x11_display));
+ context_vulkan->initialize(OS::get_singleton()->get_video_mode().width, OS::get_singleton()->get_video_mode().height, false);
+ //temporary
+ rendering_device = memnew(RenderingDeviceVulkan);
+ rendering_device->initialize(context_vulkan);
+ RasterizerDummy::make_current();
+
+ // test shader
+
+ RenderingDevice::ID shader;
+ {
+ RenderingDevice::ShaderStageSource vert;
+ vert.shader_stage = RenderingDevice::SHADER_STAGE_VERTEX;
+ vert.shader_source = "#version 450\n"
+ "layout(location = 0) in vec4 vertex_pos;\n"
+ "layout(location = 1) in vec2 uv_pos;\n"
+ "layout(location = 0) out vec2 uv_interp;\n"
+ "void main() { gl_Position = vertex_pos; uv_interp=uv_pos;\n }";
+ //"void main() { if (gl_VertexIndex==0) gl_Position=vec4(-0.8,-0.8,0.0,1.0); if (gl_VertexIndex==1) gl_Position=vec4(-0.8,-0.2,0.0,1.0); if (gl_VertexIndex==2) gl_Position=vec4(-0.2,-0.2,0.0,1.0); if (gl_VertexIndex==3) gl_Position=vec4(-0.2,-0.8,0.0,1.0);\n }";
+
+ RenderingDevice::ShaderStageSource frag;
+ frag.shader_stage = RenderingDevice::SHADER_STAGE_FRAGMENT;
+ frag.shader_source = "#version 450\n"
+ "layout (location = 0) in vec2 uv_interp;\n"
+ "layout (location = 0) out vec4 uFragColor;\n"
+ "layout (binding = 0) uniform sampler2D t;\n"
+ "void main() { uFragColor=texture(t,uv_interp); }\n";
+
+ Vector<RenderingDevice::ShaderStageSource> source;
+ source.push_back(vert);
+ source.push_back(frag);
+ String error;
+ shader = rendering_device->shader_create_from_source(source, &error);
+ if (shader == RenderingDevice::INVALID_ID) {
+ print_line("failed compilation: " + error);
+ } else {
+ print_line("compilation success");
+ }
+ }
+
+ RenderingDevice::ID vertex_desc;
+ {
+
+ PoolVector<uint8_t> pv;
+ pv.resize(24 * 4);
+ {
+ PoolVector<uint8_t>::Write w = pv.write();
+ float *p32 = (float *)w.ptr();
+ p32[0] = -0.8;
+ p32[1] = -0.8;
+ p32[2] = 0.0;
+ p32[3] = 1.0;
+
+ p32[4] = 0.0;
+ p32[5] = 0.0;
+
+ p32[6] = -0.8;
+ p32[7] = -0.2;
+ p32[8] = 0.0;
+ p32[9] = 1.0;
+
+ p32[10] = 0.0;
+ p32[11] = 1.0;
+
+ p32[12] = -0.2;
+ p32[13] = -0.2;
+ p32[14] = 0.0;
+ p32[15] = 1.0;
+
+ p32[16] = 1.0;
+ p32[17] = 1.0;
+
+ p32[18] = -0.2;
+ p32[19] = -0.8;
+ p32[20] = 0.0;
+ p32[21] = 1.0;
+
+ p32[22] = 1.0;
+ p32[23] = 0.0;
+ }
+
+ RenderingDevice::ID vertex_buffer = rendering_device->vertex_buffer_create(pv.size(), pv);
+ Vector<RenderingDevice::VertexDescription> vdarr;
+ RenderingDevice::VertexDescription vd;
+ vd.format = RenderingDevice::DATA_FORMAT_R32G32B32A32_SFLOAT;
+ vd.stride = 4 * 6; //vertex/uv
+ vd.offset = 0;
+ vd.location = 0;
+ vdarr.push_back(vd);
+ vd.format = RenderingDevice::DATA_FORMAT_R32G32_SFLOAT;
+ vd.stride = 4 * 6; //vertex/uv
+ vd.offset = 4 * 4; //offset to UV
+ vd.location = 1;
+ vdarr.push_back(vd);
+
+ vertex_desc = rendering_device->vertex_description_create(vdarr);
+
+ Vector<RenderingDevice::ID> buffers;
+ buffers.push_back(vertex_buffer);
+ buffers.push_back(vertex_buffer);
+
+ test_vertex_array = rendering_device->vertex_array_create(4, vertex_desc, buffers);
+ }
+
+ RenderingDevice::ID test_framebuffer_tex_id;
+
+ {
+ RenderingDevice::TextureFormat tex_format;
+ tex_format.format = RenderingDevice::DATA_FORMAT_R8G8B8A8_UNORM; //RenderingDevice::DATA_FORMAT_A8B8G8R8_UNORM_PACK32;
+ tex_format.width = 256;
+ tex_format.height = 256;
+ tex_format.mipmaps = 1;
+ tex_format.type = RenderingDevice::TEXTURE_TYPE_2D;
+ tex_format.usage_bits = RenderingDevice::TEXTURE_USAGE_SAMPLING_BIT | RenderingDevice::TEXTURE_USAGE_COLOR_ATTACHMENT_BIT;
+
+ test_framebuffer_tex_id = rendering_device->texture_create(tex_format, RenderingDevice::TextureView());
+
+ Vector<RenderingDevice::ID> ids;
+ ids.push_back(test_framebuffer_tex_id);
+
+ test_framebuffer = rendering_device->framebuffer_create(ids);
+ }
+
+ test_pipeline = rendering_device->render_pipeline_create(shader, rendering_device->framebuffer_get_format(test_framebuffer), vertex_desc, RenderingDevice::RENDER_PRIMITIVE_TRIANGLES, RenderingDevice::PipelineRasterizationState(), RenderingDevice::PipelineMultisampleState(), RenderingDevice::PipelineDepthStencilState(), RenderingDevice::PipelineColorBlendState::create_disabled());
+
+ {
+
+ Ref<Image> img;
+ img.instance();
+ Error terr = img->load("../logo.png");
+ if (terr != OK) {
+ print_line("Cant load logo?");
+ }
+
+ img->convert(Image::FORMAT_RGBA8);
+
+ RenderingDevice::TextureFormat tex_format;
+ tex_format.format = RenderingDevice::DATA_FORMAT_R8G8B8A8_UNORM; //RenderingDevice::DATA_FORMAT_A8B8G8R8_UNORM_PACK32;
+ tex_format.width = img->get_width();
+ tex_format.height = img->get_height();
+ print_line("imgsize: " + Vector2(img->get_width(), img->get_height()));
+ tex_format.mipmaps = 1;
+ tex_format.type = RenderingDevice::TEXTURE_TYPE_2D;
+ tex_format.usage_bits = RenderingDevice::TEXTURE_USAGE_SAMPLING_BIT | RenderingDevice::TEXTURE_USAGE_CAN_UPDATE_BIT;
+
+ Vector<PoolVector<uint8_t> > initial_data;
+ initial_data.push_back(img->get_data());
+
+ RenderingDevice::ID tex_id = rendering_device->texture_create(tex_format, RenderingDevice::TextureView(), initial_data);
+ RenderingDevice::ID sampler = rendering_device->sampler_create(RenderingDevice::SamplerState());
+
+ Vector<RenderingDevice::Uniform> uniform_description;
+
+ RenderingDevice::Uniform u;
+ u.type = RenderingDevice::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
+ u.binding = 0;
+ u.ids.push_back(sampler);
+ u.ids.push_back(tex_id);
+
+ uniform_description.push_back(u);
+
+ test_uniform_set = rendering_device->uniform_set_create(uniform_description, shader, 0);
+ }
+
+ {
+ PoolVector<uint8_t> pv;
+ pv.resize(6 * 4);
+ {
+ PoolVector<uint8_t>::Write w = pv.write();
+ int *p32 = (int *)w.ptr();
+ p32[0] = 0;
+ p32[1] = 1;
+ p32[2] = 2;
+ p32[3] = 0;
+ p32[4] = 2;
+ p32[5] = 3;
+ }
+ RenderingDevice::ID index_buffer = rendering_device->index_buffer_create(6, RenderingDevice::INDEX_BUFFER_FORMAT_UINT32, pv);
+ test_index_array = rendering_device->index_array_create(index_buffer, 0, 6);
+ }
+
+ {
+
+ RenderingDevice::ID sampler = rendering_device->sampler_create(RenderingDevice::SamplerState());
+
+ Vector<RenderingDevice::Uniform> uniform_description;
+
+ RenderingDevice::Uniform u;
+ u.type = RenderingDevice::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
+ u.binding = 0;
+ u.ids.push_back(sampler);
+ u.ids.push_back(test_framebuffer_tex_id);
+
+ uniform_description.push_back(u);
+
+ test_framebuffer_uniform_set = rendering_device->uniform_set_create(uniform_description, shader, 0);
+ test_framebuffer_pipeline = rendering_device->render_pipeline_create(shader, rendering_device->screen_get_framebuffer_format(), vertex_desc, RenderingDevice::RENDER_PRIMITIVE_TRIANGLES, RenderingDevice::PipelineRasterizationState(), RenderingDevice::PipelineMultisampleState(), RenderingDevice::PipelineDepthStencilState(), RenderingDevice::PipelineColorBlendState::create_disabled());
+ }
+
+#if 0
+ Vector<RenderingDevice::ShaderStageSource> source;
+ RenderingDevice::ShaderStageSource frag;
+ frag.shader_stage = RenderingDevice::SHADER_STAGE_FRAGMENT;
+ frag.shader_source = ""
+ "#version 450\n"
+ "#extension GL_ARB_separate_shader_objects : enable\n"
+ "#extension GL_ARB_shading_language_420pack : enable\n"
+ "layout (set =2, binding = 3) uniform sampler2D sampie;\n"
+ "layout (set =2, binding = 4) uniform texture2D texie;\n"
+ "layout (set =2, binding = 5) uniform sampler sampieonly;\n"
+ "layout (set =2, binding = 6) uniform sampler2D sampiearr[2];\n"
+ "layout (set =2, binding = 7) uniform texture2D texiearr[2];\n"
+ "layout (set =2, binding = 8) uniform sampler sampieonlyarr[2];\n"
+ "layout (set =2, binding = 9) uniform samplerBuffer sabufsa;\n"
+ "layout (set =2, binding = 9) uniform textureBuffer texbufsa;\n"
+ "layout (set=3,binding=1,rgba32f) uniform image2D img1;\n"
+ "layout(std140, set=1,binding = 0) uniform buf {\n"
+ " mat4 MVP;\n"
+ " vec4 position[12*3];\n"
+ " vec4 attr[12*3];\n"
+ "} ubuf;\n"
+ "layout(std140, set=1,binding = 1) buffer popis {\n"
+ " int popitos;\n"
+ "} popibuf;\n"
+ "layout (location = 0) out vec4 uFragColor;\n"
+ " \n"
+ "const vec3 lightDir= vec3(0.424, 0.566, 0.707);\n"
+ "\n"
+ "void main() {\n"
+ " uFragColor = texture(sampie, vec2(ubuf.attr[0].x));\n"
+ " uFragColor+= texture(sampler2D(texie,sampieonly), vec2(ubuf.attr[0].x));\n"
+ " uFragColor+= texture(sampiearr[1], vec2(ubuf.attr[0].x));\n"
+ " uFragColor+= texture(sampler2D(texiearr[1],sampieonlyarr[1]), vec2(ubuf.attr[0].x));\n"
+ " uFragColor+= texelFetch(sabufsa,0);\n"
+ " uFragColor+= texelFetch(samplerBuffer(texbufsa,sampieonly),0);\n"
+ " uFragColor+= texelFetch(texbufsa,0);\n"
+ " uFragColor.xy+= imageSize(img1);\n"
+ " uFragColor.x+= float(popibuf.popitos);\n"
+ "}\n";
+ source.push_back(frag);
+ String error;
+ RenderingDevice::ID shader = rendering_device->shader_create_from_source(source, &error);
+ if (shader == RenderingDevice::INVALID_ID) {
+ print_line("failed compilation: " + error);
+ } else {
+ print_line("compilation success");
+ }
+#endif
+
#endif
visual_server = memnew(VisualServerRaster);
@@ -3124,6 +3413,27 @@ void OS_X11::swap_buffers() {
#if defined(OPENGL_ENABLED)
context_gl->swap_buffers();
#endif
+
+ Vector<Color> clear;
+ clear.push_back(Color(0.5, 0.8, 0.2));
+ RenderingDevice::ID cmd_list = rendering_device->draw_list_begin(test_framebuffer, RenderingDevice::INITIAL_ACTION_CLEAR, RenderingDevice::FINAL_ACTION_READ_COLOR_DISCARD_DEPTH, clear);
+ rendering_device->draw_list_bind_render_pipeline(cmd_list, test_pipeline);
+ rendering_device->draw_list_bind_index_array(cmd_list, test_index_array);
+ rendering_device->draw_list_bind_vertex_array(cmd_list, test_vertex_array);
+ rendering_device->draw_list_bind_uniform_set(cmd_list, test_uniform_set, 0);
+ rendering_device->draw_list_draw(cmd_list, true);
+ rendering_device->draw_list_end();
+
+ cmd_list = rendering_device->draw_list_begin_for_screen();
+ rendering_device->draw_list_bind_render_pipeline(cmd_list, test_framebuffer_pipeline);
+ rendering_device->draw_list_bind_index_array(cmd_list, test_index_array);
+ rendering_device->draw_list_bind_vertex_array(cmd_list, test_vertex_array);
+ rendering_device->draw_list_bind_uniform_set(cmd_list, test_framebuffer_uniform_set, 0);
+ rendering_device->draw_list_draw(cmd_list, true);
+ rendering_device->draw_list_end();
+ rendering_device->finalize_frame();
+ context_vulkan->swap_buffers();
+ rendering_device->advance_frame();
}
void OS_X11::alert(const String &p_alert, const String &p_title) {
diff --git a/platform/x11/os_x11.h b/platform/x11/os_x11.h
index 25b406743b..5999e2840e 100644
--- a/platform/x11/os_x11.h
+++ b/platform/x11/os_x11.h
@@ -31,7 +31,7 @@
#ifndef OS_X11_H
#define OS_X11_H
-#include "context_gl_x11.h"
+//#include "context_gl_x11.h"
#include "core/os/input.h"
#include "crash_handler_x11.h"
#include "drivers/alsa/audio_driver_alsa.h"
@@ -45,6 +45,8 @@
#include "servers/visual/rasterizer.h"
#include "servers/visual_server.h"
//#include "servers/visual/visual_server_wrap_mt.h"
+#include "drivers/vulkan/rendering_device_vulkan.h"
+#include "platform/x11/vulkan_context_x11.h"
#include <X11/Xcursor/Xcursor.h>
#include <X11/Xlib.h>
@@ -94,6 +96,11 @@ class OS_X11 : public OS_Unix {
#if defined(OPENGL_ENABLED)
ContextGL_X11 *context_gl;
#endif
+#if defined(VULKAN_ENABLED)
+ VulkanContextX11 *context_vulkan;
+ RenderingDeviceVulkan *rendering_device;
+#endif
+
//Rasterizer *rasterizer;
VisualServer *visual_server;
VideoMode current_videomode;
diff --git a/platform/x11/vulkan_context_x11.cpp b/platform/x11/vulkan_context_x11.cpp
new file mode 100644
index 0000000000..eab7c3935b
--- /dev/null
+++ b/platform/x11/vulkan_context_x11.cpp
@@ -0,0 +1,22 @@
+#include "vulkan_context_x11.h"
+#include <vulkan/vulkan_xlib.h>
+const char *VulkanContextX11::_get_platform_surface_extension() const {
+ return VK_KHR_XLIB_SURFACE_EXTENSION_NAME;
+}
+
+VkResult VulkanContextX11::_create_surface(VkSurfaceKHR *surface, VkInstance p_instance) {
+
+ VkXlibSurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = NULL;
+ createInfo.flags = 0;
+ createInfo.dpy = display;
+ createInfo.window = window;
+
+ return vkCreateXlibSurfaceKHR(p_instance, &createInfo, NULL, surface);
+}
+
+VulkanContextX11::VulkanContextX11(Window p_window, Display *p_display) {
+ window = p_window;
+ display = p_display;
+}
diff --git a/platform/x11/vulkan_context_x11.h b/platform/x11/vulkan_context_x11.h
new file mode 100644
index 0000000000..b91f63bd27
--- /dev/null
+++ b/platform/x11/vulkan_context_x11.h
@@ -0,0 +1,18 @@
+#ifndef VULKAN_DEVICE_X11_H
+#define VULKAN_DEVICE_X11_H
+
+#include "drivers/vulkan/vulkan_context.h"
+#include <X11/Xlib.h>
+
+class VulkanContextX11 : public VulkanContext {
+ Window window;
+ Display *display;
+
+ virtual const char *_get_platform_surface_extension() const;
+ virtual VkResult _create_surface(VkSurfaceKHR *surface, VkInstance p_instance);
+
+public:
+ VulkanContextX11(Window p_window, Display *display);
+};
+
+#endif // VULKAN_DEVICE_X11_H
diff --git a/servers/visual/rendering_device.cpp b/servers/visual/rendering_device.cpp
new file mode 100644
index 0000000000..d17efce690
--- /dev/null
+++ b/servers/visual/rendering_device.cpp
@@ -0,0 +1,6 @@
+#include "rendering_device.h"
+
+RenderingDevice::RenderingDevice()
+{
+
+}
diff --git a/servers/visual/rendering_device.h b/servers/visual/rendering_device.h
new file mode 100644
index 0000000000..19fbd431ca
--- /dev/null
+++ b/servers/visual/rendering_device.h
@@ -0,0 +1,849 @@
+#ifndef RENDERING_DEVICE_H
+#define RENDERING_DEVICE_H
+
+#include "core/object.h"
+
+class RenderingDevice : public Object {
+ GDCLASS(RenderingDevice, Object)
+public:
+ //base numeric ID for all types
+ enum {
+ INVALID_ID = -1
+ };
+
+ typedef int64_t ID;
+
+ /*****************/
+ /**** GENERIC ****/
+ /*****************/
+
+ enum CompareOperator {
+ COMPARE_OP_NEVER,
+ COMPARE_OP_LESS,
+ COMPARE_OP_EQUAL,
+ COMPARE_OP_LESS_OR_EQUAL,
+ COMPARE_OP_GREATER,
+ COMPARE_OP_NOT_EQUAL,
+ COMPARE_OP_GREATER_OR_EQUAL,
+ COMPARE_OP_ALWAYS,
+ COMPARE_OP_MAX //not an actual operator, just the amount of operators :D
+ };
+
+ enum DataFormat {
+ DATA_FORMAT_R4G4_UNORM_PACK8,
+ DATA_FORMAT_R4G4B4A4_UNORM_PACK16,
+ DATA_FORMAT_B4G4R4A4_UNORM_PACK16,
+ DATA_FORMAT_R5G6B5_UNORM_PACK16,
+ DATA_FORMAT_B5G6R5_UNORM_PACK16,
+ DATA_FORMAT_R5G5B5A1_UNORM_PACK16,
+ DATA_FORMAT_B5G5R5A1_UNORM_PACK16,
+ DATA_FORMAT_A1R5G5B5_UNORM_PACK16,
+ DATA_FORMAT_R8_UNORM,
+ DATA_FORMAT_R8_SNORM,
+ DATA_FORMAT_R8_USCALED,
+ DATA_FORMAT_R8_SSCALED,
+ DATA_FORMAT_R8_UINT,
+ DATA_FORMAT_R8_SINT,
+ DATA_FORMAT_R8_SRGB,
+ DATA_FORMAT_R8G8_UNORM,
+ DATA_FORMAT_R8G8_SNORM,
+ DATA_FORMAT_R8G8_USCALED,
+ DATA_FORMAT_R8G8_SSCALED,
+ DATA_FORMAT_R8G8_UINT,
+ DATA_FORMAT_R8G8_SINT,
+ DATA_FORMAT_R8G8_SRGB,
+ DATA_FORMAT_R8G8B8_UNORM,
+ DATA_FORMAT_R8G8B8_SNORM,
+ DATA_FORMAT_R8G8B8_USCALED,
+ DATA_FORMAT_R8G8B8_SSCALED,
+ DATA_FORMAT_R8G8B8_UINT,
+ DATA_FORMAT_R8G8B8_SINT,
+ DATA_FORMAT_R8G8B8_SRGB,
+ DATA_FORMAT_B8G8R8_UNORM,
+ DATA_FORMAT_B8G8R8_SNORM,
+ DATA_FORMAT_B8G8R8_USCALED,
+ DATA_FORMAT_B8G8R8_SSCALED,
+ DATA_FORMAT_B8G8R8_UINT,
+ DATA_FORMAT_B8G8R8_SINT,
+ DATA_FORMAT_B8G8R8_SRGB,
+ DATA_FORMAT_R8G8B8A8_UNORM,
+ DATA_FORMAT_R8G8B8A8_SNORM,
+ DATA_FORMAT_R8G8B8A8_USCALED,
+ DATA_FORMAT_R8G8B8A8_SSCALED,
+ DATA_FORMAT_R8G8B8A8_UINT,
+ DATA_FORMAT_R8G8B8A8_SINT,
+ DATA_FORMAT_R8G8B8A8_SRGB,
+ DATA_FORMAT_B8G8R8A8_UNORM,
+ DATA_FORMAT_B8G8R8A8_SNORM,
+ DATA_FORMAT_B8G8R8A8_USCALED,
+ DATA_FORMAT_B8G8R8A8_SSCALED,
+ DATA_FORMAT_B8G8R8A8_UINT,
+ DATA_FORMAT_B8G8R8A8_SINT,
+ DATA_FORMAT_B8G8R8A8_SRGB,
+ DATA_FORMAT_A8B8G8R8_UNORM_PACK32,
+ DATA_FORMAT_A8B8G8R8_SNORM_PACK32,
+ DATA_FORMAT_A8B8G8R8_USCALED_PACK32,
+ DATA_FORMAT_A8B8G8R8_SSCALED_PACK32,
+ DATA_FORMAT_A8B8G8R8_UINT_PACK32,
+ DATA_FORMAT_A8B8G8R8_SINT_PACK32,
+ DATA_FORMAT_A8B8G8R8_SRGB_PACK32,
+ DATA_FORMAT_A2R10G10B10_UNORM_PACK32,
+ DATA_FORMAT_A2R10G10B10_SNORM_PACK32,
+ DATA_FORMAT_A2R10G10B10_USCALED_PACK32,
+ DATA_FORMAT_A2R10G10B10_SSCALED_PACK32,
+ DATA_FORMAT_A2R10G10B10_UINT_PACK32,
+ DATA_FORMAT_A2R10G10B10_SINT_PACK32,
+ DATA_FORMAT_A2B10G10R10_UNORM_PACK32,
+ DATA_FORMAT_A2B10G10R10_SNORM_PACK32,
+ DATA_FORMAT_A2B10G10R10_USCALED_PACK32,
+ DATA_FORMAT_A2B10G10R10_SSCALED_PACK32,
+ DATA_FORMAT_A2B10G10R10_UINT_PACK32,
+ DATA_FORMAT_A2B10G10R10_SINT_PACK32,
+ DATA_FORMAT_R16_UNORM,
+ DATA_FORMAT_R16_SNORM,
+ DATA_FORMAT_R16_USCALED,
+ DATA_FORMAT_R16_SSCALED,
+ DATA_FORMAT_R16_UINT,
+ DATA_FORMAT_R16_SINT,
+ DATA_FORMAT_R16_SFLOAT,
+ DATA_FORMAT_R16G16_UNORM,
+ DATA_FORMAT_R16G16_SNORM,
+ DATA_FORMAT_R16G16_USCALED,
+ DATA_FORMAT_R16G16_SSCALED,
+ DATA_FORMAT_R16G16_UINT,
+ DATA_FORMAT_R16G16_SINT,
+ DATA_FORMAT_R16G16_SFLOAT,
+ DATA_FORMAT_R16G16B16_UNORM,
+ DATA_FORMAT_R16G16B16_SNORM,
+ DATA_FORMAT_R16G16B16_USCALED,
+ DATA_FORMAT_R16G16B16_SSCALED,
+ DATA_FORMAT_R16G16B16_UINT,
+ DATA_FORMAT_R16G16B16_SINT,
+ DATA_FORMAT_R16G16B16_SFLOAT,
+ DATA_FORMAT_R16G16B16A16_UNORM,
+ DATA_FORMAT_R16G16B16A16_SNORM,
+ DATA_FORMAT_R16G16B16A16_USCALED,
+ DATA_FORMAT_R16G16B16A16_SSCALED,
+ DATA_FORMAT_R16G16B16A16_UINT,
+ DATA_FORMAT_R16G16B16A16_SINT,
+ DATA_FORMAT_R16G16B16A16_SFLOAT,
+ DATA_FORMAT_R32_UINT,
+ DATA_FORMAT_R32_SINT,
+ DATA_FORMAT_R32_SFLOAT,
+ DATA_FORMAT_R32G32_UINT,
+ DATA_FORMAT_R32G32_SINT,
+ DATA_FORMAT_R32G32_SFLOAT,
+ DATA_FORMAT_R32G32B32_UINT,
+ DATA_FORMAT_R32G32B32_SINT,
+ DATA_FORMAT_R32G32B32_SFLOAT,
+ DATA_FORMAT_R32G32B32A32_UINT,
+ DATA_FORMAT_R32G32B32A32_SINT,
+ DATA_FORMAT_R32G32B32A32_SFLOAT,
+ DATA_FORMAT_R64_UINT,
+ DATA_FORMAT_R64_SINT,
+ DATA_FORMAT_R64_SFLOAT,
+ DATA_FORMAT_R64G64_UINT,
+ DATA_FORMAT_R64G64_SINT,
+ DATA_FORMAT_R64G64_SFLOAT,
+ DATA_FORMAT_R64G64B64_UINT,
+ DATA_FORMAT_R64G64B64_SINT,
+ DATA_FORMAT_R64G64B64_SFLOAT,
+ DATA_FORMAT_R64G64B64A64_UINT,
+ DATA_FORMAT_R64G64B64A64_SINT,
+ DATA_FORMAT_R64G64B64A64_SFLOAT,
+ DATA_FORMAT_B10G11R11_UFLOAT_PACK32,
+ DATA_FORMAT_E5B9G9R9_UFLOAT_PACK32,
+ DATA_FORMAT_D16_UNORM,
+ DATA_FORMAT_X8_D24_UNORM_PACK32,
+ DATA_FORMAT_D32_SFLOAT,
+ DATA_FORMAT_S8_UINT,
+ DATA_FORMAT_D16_UNORM_S8_UINT,
+ DATA_FORMAT_D24_UNORM_S8_UINT,
+ DATA_FORMAT_D32_SFLOAT_S8_UINT,
+ DATA_FORMAT_BC1_RGB_UNORM_BLOCK,
+ DATA_FORMAT_BC1_RGB_SRGB_BLOCK,
+ DATA_FORMAT_BC1_RGBA_UNORM_BLOCK,
+ DATA_FORMAT_BC1_RGBA_SRGB_BLOCK,
+ DATA_FORMAT_BC2_UNORM_BLOCK,
+ DATA_FORMAT_BC2_SRGB_BLOCK,
+ DATA_FORMAT_BC3_UNORM_BLOCK,
+ DATA_FORMAT_BC3_SRGB_BLOCK,
+ DATA_FORMAT_BC4_UNORM_BLOCK,
+ DATA_FORMAT_BC4_SNORM_BLOCK,
+ DATA_FORMAT_BC5_UNORM_BLOCK,
+ DATA_FORMAT_BC5_SNORM_BLOCK,
+ DATA_FORMAT_BC6H_UFLOAT_BLOCK,
+ DATA_FORMAT_BC6H_SFLOAT_BLOCK,
+ DATA_FORMAT_BC7_UNORM_BLOCK,
+ DATA_FORMAT_BC7_SRGB_BLOCK,
+ DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
+ DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK,
+ DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK,
+ DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK,
+ DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK,
+ DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK,
+ DATA_FORMAT_EAC_R11_UNORM_BLOCK,
+ DATA_FORMAT_EAC_R11_SNORM_BLOCK,
+ DATA_FORMAT_EAC_R11G11_UNORM_BLOCK,
+ DATA_FORMAT_EAC_R11G11_SNORM_BLOCK,
+ DATA_FORMAT_ASTC_4x4_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_4x4_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_5x4_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_5x4_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_5x5_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_5x5_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_6x5_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_6x5_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_6x6_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_6x6_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_8x5_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_8x5_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_8x6_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_8x6_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_8x8_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_8x8_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_10x5_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_10x5_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_10x6_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_10x6_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_10x8_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_10x8_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_10x10_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_10x10_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_12x10_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_12x10_SRGB_BLOCK,
+ DATA_FORMAT_ASTC_12x12_UNORM_BLOCK,
+ DATA_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ DATA_FORMAT_G8B8G8R8_422_UNORM,
+ DATA_FORMAT_B8G8R8G8_422_UNORM,
+ DATA_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
+ DATA_FORMAT_G8_B8R8_2PLANE_420_UNORM,
+ DATA_FORMAT_G8_B8_R8_3PLANE_422_UNORM,
+ DATA_FORMAT_G8_B8R8_2PLANE_422_UNORM,
+ DATA_FORMAT_G8_B8_R8_3PLANE_444_UNORM,
+ DATA_FORMAT_R10X6_UNORM_PACK16,
+ DATA_FORMAT_R10X6G10X6_UNORM_2PACK16,
+ DATA_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16,
+ DATA_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16,
+ DATA_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16,
+ DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16,
+ DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16,
+ DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16,
+ DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16,
+ DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16,
+ DATA_FORMAT_R12X4_UNORM_PACK16,
+ DATA_FORMAT_R12X4G12X4_UNORM_2PACK16,
+ DATA_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16,
+ DATA_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16,
+ DATA_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16,
+ DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16,
+ DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16,
+ DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16,
+ DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16,
+ DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16,
+ DATA_FORMAT_G16B16G16R16_422_UNORM,
+ DATA_FORMAT_B16G16R16G16_422_UNORM,
+ DATA_FORMAT_G16_B16_R16_3PLANE_420_UNORM,
+ DATA_FORMAT_G16_B16R16_2PLANE_420_UNORM,
+ DATA_FORMAT_G16_B16_R16_3PLANE_422_UNORM,
+ DATA_FORMAT_G16_B16R16_2PLANE_422_UNORM,
+ DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM,
+ DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG,
+ DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG,
+ DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG,
+ DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG,
+ DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG,
+ DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG,
+ DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG,
+ DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG,
+ DATA_FORMAT_MAX
+ };
+
+ /*****************/
+ /**** TEXTURE ****/
+ /*****************/
+
+
+ enum TextureType {
+ TEXTURE_TYPE_1D,
+ TEXTURE_TYPE_2D,
+ TEXTURE_TYPE_3D,
+ TEXTURE_TYPE_CUBE,
+ TEXTURE_TYPE_1D_ARRAY,
+ TEXTURE_TYPE_2D_ARRAY,
+ TEXTURE_TYPE_CUBE_ARRAY,
+ TEXTURE_TYPE_MAX
+ };
+
+ enum TextureSamples {
+ TEXTURE_SAMPLES_1,
+ TEXTURE_SAMPLES_2,
+ TEXTURE_SAMPLES_4,
+ TEXTURE_SAMPLES_8,
+ TEXTURE_SAMPLES_16,
+ TEXTURE_SAMPLES_32,
+ TEXTURE_SAMPLES_64,
+ TEXTURE_SAMPLES_MAX
+ };
+
+ enum TextureUsageBits {
+ TEXTURE_USAGE_SAMPLING_BIT = (1 << 0),
+ TEXTURE_USAGE_COLOR_ATTACHMENT_BIT = (1 << 1),
+ TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = (1 << 2),
+ TEXTURE_USAGE_STORAGE_BIT = (1 << 3),
+ TEXTURE_USAGE_STORAGE_ATOMIC_BIT = (1 << 4),
+ TEXTURE_USAGE_CPU_READ_BIT = (1 << 5),
+ TEXTURE_USAGE_CAN_UPDATE_BIT = (1 << 6),
+ TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT = (1 << 7),
+ };
+
+ enum TextureSwizzle {
+ TEXTURE_SWIZZLE_IDENTITY,
+ TEXTURE_SWIZZLE_ZERO,
+ TEXTURE_SWIZZLE_ONE,
+ TEXTURE_SWIZZLE_R,
+ TEXTURE_SWIZZLE_G,
+ TEXTURE_SWIZZLE_B,
+ TEXTURE_SWIZZLE_A,
+ TEXTURE_SWIZZLE_MAX
+ };
+
+ struct TextureFormat {
+ DataFormat format;
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ uint32_t array_layers;
+ uint32_t mipmaps;
+ TextureType type;
+ TextureSamples samples;
+ uint32_t usage_bits;
+
+
+ TextureFormat() {
+ format = DATA_FORMAT_R8_UNORM;
+ width = 1;
+ height = 1;
+ depth = 1;
+ array_layers = 1;
+ mipmaps = 1;
+ type = TEXTURE_TYPE_1D;
+ samples = TEXTURE_SAMPLES_1;
+ usage_bits = 0;
+ }
+ };
+
+ struct TextureView {
+ DataFormat format_override;
+ TextureSwizzle swizzle_r;
+ TextureSwizzle swizzle_g;
+ TextureSwizzle swizzle_b;
+ TextureSwizzle swizzle_a;
+
+ TextureView() {
+ format_override=DATA_FORMAT_MAX; //means, use same as format
+ swizzle_r = TEXTURE_SWIZZLE_R;
+ swizzle_g = TEXTURE_SWIZZLE_G;
+ swizzle_b = TEXTURE_SWIZZLE_B;
+ swizzle_a = TEXTURE_SWIZZLE_A;
+ }
+ };
+
+ virtual ID texture_create(const TextureFormat &p_format,const TextureView& p_view, const Vector<PoolVector<uint8_t> >&p_data = Vector<PoolVector<uint8_t> >()) = 0;
+ virtual ID texture_create_shared(const TextureView& p_view, ID p_with_texture) = 0;
+ virtual Error texture_update(ID p_texture,uint32_t p_mipmap,uint32_t p_layer,const PoolVector<uint8_t>&p_data, bool p_sync_with_draw = false) =0; //this function can be used from any thread and it takes effect at the begining of the frame, unless sync with draw is used, which is used to mix updates with draw calls
+
+ virtual bool texture_is_format_supported_for_usage(DataFormat p_format,TextureUsageBits p_usage) const = 0;
+
+
+ /*********************/
+ /**** FRAMEBUFFER ****/
+ /*********************/
+
+ struct AttachmentFormat {
+ DataFormat format;
+ TextureSamples samples;
+ uint32_t usage_flags;
+ };
+
+ // This ID is warranted to be unique for the same formats, does not need to be freed
+ virtual ID framebuffer_format_create(const Vector<AttachmentFormat>& p_format) =0;
+
+ virtual ID framebuffer_create(const Vector<ID> &p_texture_attachments,ID p_format_check=INVALID_ID) = 0;
+
+ virtual ID framebuffer_get_format(ID p_framebuffer) = 0;
+
+ /*****************/
+ /**** SAMPLER ****/
+ /*****************/
+
+ enum SamplerFilter {
+ SAMPLER_FILTER_NEAREST,
+ SAMPLER_FILTER_LINEAR,
+ };
+
+ enum SamplerRepeatMode {
+ SAMPLER_REPEAT_MODE_REPEAT,
+ SAMPLER_REPEAT_MODE_MIRRORED_REPEAT,
+ SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE,
+ SAMPLER_REPEAT_MODE_CLAMP_TO_BORDER,
+ SAMPLER_REPEAT_MODE_MIRROR_CLAMP_TO_EDGE,
+ SAMPLER_REPEAT_MODE_MAX
+ };
+
+ enum SamplerBorderColor {
+ SAMPLER_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ SAMPLER_BORDER_COLOR_INT_TRANSPARENT_BLACK,
+ SAMPLER_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
+ SAMPLER_BORDER_COLOR_INT_OPAQUE_BLACK,
+ SAMPLER_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
+ SAMPLER_BORDER_COLOR_INT_OPAQUE_WHITE,
+ SAMPLER_BORDER_COLOR_MAX
+ };
+
+ struct SamplerState {
+ SamplerFilter mag_filter;
+ SamplerFilter min_filter;
+ SamplerFilter mip_filter;
+ SamplerRepeatMode repeat_u;
+ SamplerRepeatMode repeat_v;
+ SamplerRepeatMode repeat_w;
+ float lod_bias;
+ bool use_anisotropy;
+ float anisotropy_max;
+ bool enable_compare;
+ CompareOperator compare_op;
+ float min_lod;
+ float max_lod;
+ SamplerBorderColor border_color;
+ bool unnormalized_uvw;
+
+ SamplerState() {
+ mag_filter=SAMPLER_FILTER_NEAREST;
+ min_filter=SAMPLER_FILTER_NEAREST;
+ mip_filter=SAMPLER_FILTER_NEAREST;
+ repeat_u=SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE;
+ repeat_v=SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE;
+ repeat_w=SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE;
+ lod_bias=0;
+ use_anisotropy=false;
+ anisotropy_max=1.0;
+ enable_compare=false;
+ compare_op=COMPARE_OP_ALWAYS;
+ min_lod=0;
+ max_lod=1e20; //something very large should do
+ border_color=SAMPLER_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
+ unnormalized_uvw=false;
+ }
+ };
+
+ virtual ID sampler_create(const SamplerState &p_state) = 0;
+
+ /**********************/
+ /**** VERTEX ARRAY ****/
+ /**********************/
+
+ enum VertexFrequency {
+ VERTEX_FREQUENCY_VERTEX,
+ VERTEX_FREQUENCY_INSTANCE,
+ };
+
+ struct VertexDescription {
+ uint32_t location; //shader location
+ uint32_t offset;
+ DataFormat format;
+ uint32_t stride;
+ VertexFrequency frequency;
+ VertexDescription() {
+ location=0;
+ offset=0;
+ stride=0;
+ format=DATA_FORMAT_MAX;
+ frequency=VERTEX_FREQUENCY_VERTEX;
+ }
+ };
+ virtual ID vertex_buffer_create(uint32_t p_size_bytes, const PoolVector<uint8_t> &p_data = PoolVector<uint8_t>()) = 0;
+
+ // This ID is warranted to be unique for the same formats, does not need to be freed
+ virtual ID vertex_description_create(const Vector<VertexDescription> &p_vertex_descriptions) = 0;
+ virtual ID vertex_array_create(uint32_t p_vertex_count, ID p_vertex_description,const Vector<ID>& p_src_buffers) = 0;
+
+ enum IndexBufferFormat {
+ INDEX_BUFFER_FORMAT_UINT16,
+ INDEX_BUFFER_FORMAT_UINT32,
+ };
+
+ virtual ID index_buffer_create(uint32_t p_size_indices, IndexBufferFormat p_format, const PoolVector<uint8_t> &p_data = PoolVector<uint8_t>(),bool p_use_restart_indices=false) = 0;
+ virtual ID index_array_create(ID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count) =0;
+
+ /****************/
+ /**** SHADER ****/
+ /****************/
+
+ enum ShaderStage {
+ SHADER_STAGE_VERTEX,
+ SHADER_STAGE_FRAGMENT,
+ SHADER_STAGE_TESSELATION_CONTROL,
+ SHADER_STAGE_TESSELATION_EVALUATION,
+ SHADER_STAGE_COMPUTE,
+ SHADER_STAGE_MAX,
+ SHADER_STAGE_VERTEX_BIT = (1 << SHADER_STAGE_VERTEX),
+ SHADER_STAGE_FRAGMENT_BIT = (1 << SHADER_STAGE_FRAGMENT),
+ SHADER_STAGE_TESSELATION_CONTROL_BIT = (1 << SHADER_STAGE_TESSELATION_CONTROL),
+ SHADER_STAGE_TESSELATION_EVALUATION_BIT = (1 << SHADER_STAGE_TESSELATION_EVALUATION),
+ SHADER_STAGE_COMPUTE_BIT = (1 << SHADER_STAGE_COMPUTE),
+ };
+
+ struct ShaderStageSource {
+ ShaderStage shader_stage;
+ String shader_source;
+ ShaderStageSource() {
+ shader_stage=SHADER_STAGE_VERTEX;
+ }
+ };
+
+ virtual ID shader_create_from_source(const Vector<ShaderStageSource> &p_stages,String *r_error=NULL,bool p_allow_cache=true) = 0;
+
+
+ /******************/
+ /**** UNIFORMS ****/
+ /******************/
+
+ enum UniformType {
+ UNIFORM_TYPE_SAMPLER, //for sampling only (sampler GLSL type)
+ UNIFORM_TYPE_SAMPLER_WITH_TEXTURE, // for sampling only, but includes a texture, (samplerXX GLSL type)
+ UNIFORM_TYPE_TEXTURE, //only texture, (textureXX GLSL type)
+ UNIFORM_TYPE_IMAGE, // storage image (imageXX GLSL type), for compute mostly
+ UNIFORM_TYPE_TEXTURE_BUFFER, // buffer texture (or TBO, textureBuffer type)
+ UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER, // buffer texture with a sampler(or TBO, samplerBuffer type)
+ UNIFORM_TYPE_IMAGE_BUFFER, //texel buffer, (imageBuffer type), for compute mostly
+ UNIFORM_TYPE_UNIFORM_BUFFER, //regular uniform buffer (or UBO).
+ UNIFORM_TYPE_STORAGE_BUFFER, //storage buffer ("buffer" qualifier) like UBO, but supports storage, for compute mostly
+ UNIFORM_TYPE_INPUT_ATTACHMENT, //used for sub-pass read/write, for compute mostly
+ UNIFORM_TYPE_MAX
+ };
+
+ virtual ID uniform_buffer_create(uint32_t p_size_bytes,const PoolVector<uint8_t>& p_data=PoolVector<uint8_t>()) =0;
+ virtual ID storage_buffer_create(uint32_t p_size,const PoolVector<uint8_t>& p_data=PoolVector<uint8_t>()) =0;
+ virtual ID texture_buffer_create(uint32_t p_size_elements,DataFormat p_format,const PoolVector<uint8_t>& p_data=PoolVector<uint8_t>()) =0;
+
+ struct Uniform {
+ UniformType type;
+ int binding; //binding index as specified in shader
+
+ //for single items, provide one ID, for
+ //multiple items (declared as arrays in shader),
+ //provide more
+ //for sampler with texture, supply two IDs for each.
+ //accepted IDs are: Sampler, Texture, Uniform Buffer and Texture Buffer
+ Vector<ID> ids;
+
+ Uniform() {
+ type=UNIFORM_TYPE_IMAGE;
+ binding=0;
+ }
+ };
+
+ virtual ID uniform_set_create(const Vector<Uniform>& p_uniforms,ID p_shader,uint32_t p_shader_set) = 0;
+
+ virtual Error buffer_update(ID p_buffer, uint32_t p_offset, uint32_t p_size, void *p_data,bool p_sync_with_draw=false) =0; //this function can be used from any thread and it takes effect at the begining of the frame, unless sync with draw is used, which is used to mix updates with draw calls
+
+ /*************************/
+ /**** RENDER PIPELINE ****/
+ /*************************/
+
+ enum RenderPrimitive {
+ RENDER_PRIMITIVE_POINTS,
+ RENDER_PRIMITIVE_LINES,
+ RENDER_PRIMITIVE_LINES_WITH_ADJACENCY,
+ RENDER_PRIMITIVE_LINESTRIPS,
+ RENDER_PRIMITIVE_LINESTRIPS_WITH_ADJACENCY,
+ RENDER_PRIMITIVE_TRIANGLES,
+ RENDER_PRIMITIVE_TRIANGLES_WITH_ADJACENCY,
+ RENDER_PRIMITIVE_TRIANGLE_STRIPS,
+ RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_AJACENCY,
+ RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX,
+ RENDER_PRIMITIVE_TESSELATION_PATCH,
+ RENDER_PRIMITIVE_MAX
+ };
+
+ //disable optimization, tesselate control points
+
+ enum PolygonCullMode {
+ POLYGON_CULL_DISABLED,
+ POLYGON_CULL_FRONT,
+ POLYGON_CULL_BACK,
+ };
+
+ enum PolygonFrontFace {
+ POLYGON_FRONT_FACE_CLOCKWISE,
+ POLYGON_FRONT_FACE_COUNTER_CLOCKWISE,
+ };
+
+ enum StencilOperation {
+ STENCIL_OP_KEEP,
+ STENCIL_OP_ZERO,
+ STENCIL_OP_REPLACE,
+ STENCIL_OP_INCREMENT_AND_CLAMP,
+ STENCIL_OP_DECREMENT_AND_CLAMP,
+ STENCIL_OP_INVERT,
+ STENCIL_OP_INCREMENT_AND_WRAP,
+ STENCIL_OP_DECREMENT_AND_WRAP,
+ STENCIL_OP_MAX //not an actual operator, just the amount of operators :D
+ };
+
+ enum LogicOperation {
+ LOGIC_OP_CLEAR,
+ LOGIC_OP_AND,
+ LOGIC_OP_AND_REVERSE,
+ LOGIC_OP_COPY,
+ LOGIC_OP_AND_INVERTED,
+ LOGIC_OP_NO_OP,
+ LOGIC_OP_XOR,
+ LOGIC_OP_OR,
+ LOGIC_OP_NOR,
+ LOGIC_OP_EQUIVALENT,
+ LOGIC_OP_INVERT,
+ LOGIC_OP_OR_REVERSE,
+ LOGIC_OP_COPY_INVERTED,
+ LOGIC_OP_OR_INVERTED,
+ LOGIC_OP_NAND,
+ LOGIC_OP_SET,
+ LOGIC_OP_MAX //not an actual operator, just the amount of operators :D
+ };
+
+ enum BlendFactor {
+ BLEND_FACTOR_ZERO,
+ BLEND_FACTOR_ONE,
+ BLEND_FACTOR_SRC_COLOR,
+ BLEND_FACTOR_ONE_MINUS_SRC_COLOR,
+ BLEND_FACTOR_DST_COLOR,
+ BLEND_FACTOR_ONE_MINUS_DST_COLOR,
+ BLEND_FACTOR_SRC_ALPHA,
+ BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
+ BLEND_FACTOR_DST_ALPHA,
+ BLEND_FACTOR_ONE_MINUS_DST_ALPHA,
+ BLEND_FACTOR_CONSTANT_COLOR,
+ BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,
+ BLEND_FACTOR_CONSTANT_ALPHA,
+ BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA,
+ BLEND_FACTOR_SRC_ALPHA_SATURATE,
+ BLEND_FACTOR_SRC1_COLOR,
+ BLEND_FACTOR_ONE_MINUS_SRC1_COLOR,
+ BLEND_FACTOR_SRC1_ALPHA,
+ BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA,
+ BLEND_FACTOR_MAX
+ };
+
+ enum BlendOperation {
+ BLEND_OP_ADD,
+ BLEND_OP_SUBTRACT,
+ BLEND_OP_REVERSE_SUBTRACT,
+ BLEND_OP_MINIMUM,
+ BLEND_OP_MAXIMUM, //yes this one is an actual operator
+ BLEND_OP_MAX //not an actual operator, just the amount of operators :D
+ };
+
+ struct PipelineRasterizationState {
+ bool enable_depth_clamp;
+ bool discard_primitives;
+ bool wireframe;
+ PolygonCullMode cull_mode;
+ PolygonFrontFace front_face;
+ bool depth_bias_enable;
+ float depth_bias_constant_factor;
+ float depth_bias_clamp;
+ float depth_bias_slope_factor;
+ float line_width;
+ uint32_t patch_control_points;
+ PipelineRasterizationState() {
+ enable_depth_clamp = false;
+ discard_primitives = false;
+ wireframe = false;
+ cull_mode = POLYGON_CULL_DISABLED;
+ front_face = POLYGON_FRONT_FACE_CLOCKWISE;
+ depth_bias_enable = false;
+ depth_bias_constant_factor = 0;
+ depth_bias_clamp = 0;
+ depth_bias_slope_factor = 0;
+ line_width = 1.0;
+ patch_control_points=1;
+ }
+ };
+
+ struct PipelineMultisampleState {
+ TextureSamples sample_count;
+ bool enable_sample_shading;
+ float min_sample_shading;
+ Vector<uint32_t> sample_mask;
+ bool enable_alpha_to_coverage;
+ bool enable_alpha_to_one;
+
+ PipelineMultisampleState() {
+ sample_count = TEXTURE_SAMPLES_1;
+ enable_sample_shading = false;
+ min_sample_shading = 0;
+ enable_alpha_to_coverage = false;
+ enable_alpha_to_one = false;
+ }
+ };
+
+ struct PipelineDepthStencilState {
+
+ bool enable_depth_test;
+ bool enable_depth_write;
+ CompareOperator depth_compare_operator;
+ bool enable_depth_range;
+ float depth_range_min;
+ float depth_range_max;
+ bool enable_stencil;
+
+ struct StencilOperationState {
+ StencilOperation fail;
+ StencilOperation pass;
+ StencilOperation depth_fail;
+ CompareOperator compare;
+ uint32_t compare_mask;
+ uint32_t write_mask;
+ uint32_t reference;
+
+ StencilOperationState() {
+ fail = STENCIL_OP_ZERO;
+ pass = STENCIL_OP_ZERO;
+ depth_fail = STENCIL_OP_ZERO;
+ compare = COMPARE_OP_ALWAYS;
+ compare_mask = 0;
+ write_mask = 0;
+ reference = 0;
+ }
+ };
+
+ StencilOperationState stencil_operation_front;
+ StencilOperationState stencil_operation_back;
+
+ PipelineDepthStencilState() {
+ enable_depth_test = false;
+ enable_depth_write = false;
+ depth_compare_operator = COMPARE_OP_ALWAYS;
+ enable_depth_range = false;
+ depth_range_min = 0;
+ depth_range_max = 0;
+ enable_stencil = false;
+ }
+ };
+
+ struct PipelineColorBlendState {
+
+ bool enable_logic_op;
+ LogicOperation logic_op;
+ struct Attachment {
+ bool enable_blend;
+ BlendFactor src_color_blend_factor;
+ BlendFactor dst_color_blend_factor;
+ BlendOperation color_blend_op;
+ BlendFactor src_alpha_blend_factor;
+ BlendFactor dst_alpha_blend_factor;
+ BlendOperation alpha_blend_op;
+ bool write_r;
+ bool write_g;
+ bool write_b;
+ bool write_a;
+ Attachment() {
+ enable_blend = false;
+ src_color_blend_factor = BLEND_FACTOR_ZERO;
+ dst_color_blend_factor = BLEND_FACTOR_ZERO;
+ color_blend_op = BLEND_OP_ADD;
+ src_alpha_blend_factor = BLEND_FACTOR_ZERO;
+ dst_alpha_blend_factor = BLEND_FACTOR_ZERO;
+ alpha_blend_op = BLEND_OP_ADD;
+ write_r = true;
+ write_g = true;
+ write_b = true;
+ write_a = true;
+ }
+ };
+
+ static PipelineColorBlendState create_disabled(int p_attachments=1) {
+ PipelineColorBlendState bs;
+ for(int i=0;i<p_attachments;i++) {
+ bs.attachments.push_back(Attachment());
+ }
+ return bs;
+ }
+
+ Vector<Attachment> attachments; //one per render target texture
+ Color blend_constant;
+
+ PipelineColorBlendState() {
+ enable_logic_op = false;
+ logic_op = LOGIC_OP_CLEAR;
+ }
+ };
+
+ enum PipelineDynamicStateFlags {
+ DYNAMIC_STATE_LINE_WIDTH = (1 << 0),
+ DYNAMIC_STATE_DEPTH_BIAS = (1 << 1),
+ DYNAMIC_STATE_BLEND_CONSTANTS = (1 << 2),
+ DYNAMIC_STATE_DEPTH_BOUNDS = (1 << 3),
+ DYNAMIC_STATE_STENCIL_COMPARE_MASK = (1 << 4),
+ DYNAMIC_STATE_STENCIL_WRITE_MASK = (1 << 5),
+ DYNAMIC_STATE_STENCIL_REFERENCE = (1 << 6),
+ };
+
+ virtual ID render_pipeline_create(ID p_shader, ID p_framebuffer_format, ID p_vertex_description,RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags=0) = 0;
+
+ /****************/
+ /**** SCREEN ****/
+ /****************/
+
+ virtual int screen_get_width(int p_screen = 0) const = 0;
+ virtual int screen_get_height(int p_screen = 0) const = 0;
+ virtual ID screen_get_framebuffer_format() const = 0;
+
+ /********************/
+ /**** DRAW LISTS ****/
+ /********************/
+
+
+ enum InitialAction {
+ INITIAL_ACTION_CLEAR, //start rendering and clear the framebuffer (supply params)
+ INITIAL_ACTION_KEEP_COLOR, //start rendering, but keep attached color texture contents (depth will be cleared)
+ INITIAL_ACTION_KEEP_COLOR_AND_DEPTH, //start rendering, but keep attached color and depth texture contents (depth will be cleared)
+ INITIAL_ACTION_CONTINUE, //continue rendering (framebuffer must have been left in "continue" state as final action prevously)
+ INITIAL_ACTION_MAX
+ };
+
+ enum FinalAction {
+ FINAL_ACTION_READ_COLOR_AND_DEPTH, //will no longer render to it, allows attached textures to be read again, but depth buffer contents will be dropped (Can't be read from)
+ FINAL_ACTION_READ_COLOR_DISCARD_DEPTH, //will no longer render to it, allows attached textures to be read again
+ FINAL_ACTION_DISCARD, // discard contents after rendering
+ FINAL_ACTION_CONTINUE, //will continue rendering later, attached textures can't be read until re-bound with "finish"
+ FINAL_ACTION_MAX
+ };
+
+
+ virtual ID draw_list_begin_for_screen(int p_screen = 0, const Color &p_clear_color = Color()) =0;
+ virtual ID draw_list_begin(ID p_framebuffer, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_color_values = Vector<Color>(),const Rect2& p_region=Rect2()) =0;
+ virtual Error draw_list_begin_split(ID p_framebuffer, uint32_t p_splits,ID *r_split_ids, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Color> &p_clear_color_values = Vector<Color>(),const Rect2& p_region=Rect2()) =0;
+
+ virtual void draw_list_bind_render_pipeline(ID p_list, ID p_render_pipeline) = 0;
+ virtual void draw_list_bind_uniform_set(ID p_list, ID p_uniform_set, uint32_t p_index) =0;
+ virtual void draw_list_bind_vertex_array(ID p_list, ID p_vertex_array) = 0;
+ virtual void draw_list_bind_index_array(ID p_list, ID p_index_array) = 0;
+
+ virtual void draw_list_draw(ID p_list, bool p_use_indices, uint32_t p_instances=1) = 0;
+
+ virtual void draw_list_enable_scissor(ID p_list, const Rect2& p_rect) = 0;
+ virtual void draw_list_disable_scissor(ID p_list) = 0;
+
+ virtual void draw_list_end() =0;
+
+ /***************/
+ /**** FREE! ****/
+ /***************/
+
+ virtual void free(ID p_id) =0;
+ RenderingDevice();
+};
+
+#endif // RENDERING_DEVICE_H
diff --git a/servers/visual/visual_server_viewport.cpp b/servers/visual/visual_server_viewport.cpp
index df9cef20f9..b9cdceead0 100644
--- a/servers/visual/visual_server_viewport.cpp
+++ b/servers/visual/visual_server_viewport.cpp
@@ -290,7 +290,10 @@ void VisualServerViewport::draw_viewports() {
if (vp->update_mode == VS::VIEWPORT_UPDATE_DISABLED)
continue;
- ERR_CONTINUE(!vp->render_target.is_valid());
+ if (!vp->render_target.is_valid()) {
+ continue;
+ }
+ //ERR_CONTINUE(!vp->render_target.is_valid());
bool visible = vp->viewport_to_screen_rect != Rect2() || vp->update_mode == VS::VIEWPORT_UPDATE_ALWAYS || vp->update_mode == VS::VIEWPORT_UPDATE_ONCE || (vp->update_mode == VS::VIEWPORT_UPDATE_WHEN_VISIBLE && VSG::storage->render_target_was_used(vp->render_target));
visible = visible && vp->size.x > 1 && vp->size.y > 1;
diff --git a/thirdparty/glslang/OGLCompilersDLL/InitializeDll.cpp b/thirdparty/glslang/OGLCompilersDLL/InitializeDll.cpp
new file mode 100644
index 0000000000..abea9108b1
--- /dev/null
+++ b/thirdparty/glslang/OGLCompilersDLL/InitializeDll.cpp
@@ -0,0 +1,165 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#define SH_EXPORTING
+
+#include <cassert>
+
+#include "InitializeDll.h"
+#include "../glslang/Include/InitializeGlobals.h"
+#include "../glslang/Public/ShaderLang.h"
+#include "../glslang/Include/PoolAlloc.h"
+
+namespace glslang {
+
+OS_TLSIndex ThreadInitializeIndex = OS_INVALID_TLS_INDEX;
+
+// Per-process initialization.
+// Needs to be called at least once before parsing, etc. is done.
+// Will also do thread initialization for the calling thread; other
+// threads will need to do that explicitly.
+bool InitProcess()
+{
+ glslang::GetGlobalLock();
+
+ if (ThreadInitializeIndex != OS_INVALID_TLS_INDEX) {
+ //
+ // Function is re-entrant.
+ //
+
+ glslang::ReleaseGlobalLock();
+ return true;
+ }
+
+ ThreadInitializeIndex = OS_AllocTLSIndex();
+
+ if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "InitProcess(): Failed to allocate TLS area for init flag");
+
+ glslang::ReleaseGlobalLock();
+ return false;
+ }
+
+ if (! InitializePoolIndex()) {
+ assert(0 && "InitProcess(): Failed to initialize global pool");
+
+ glslang::ReleaseGlobalLock();
+ return false;
+ }
+
+ if (! InitThread()) {
+ assert(0 && "InitProcess(): Failed to initialize thread");
+
+ glslang::ReleaseGlobalLock();
+ return false;
+ }
+
+ glslang::ReleaseGlobalLock();
+ return true;
+}
+
+// Per-thread scoped initialization.
+// Must be called at least once by each new thread sharing the
+// symbol tables, etc., needed to parse.
+bool InitThread()
+{
+ //
+ // This function is re-entrant
+ //
+ if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "InitThread(): Process hasn't been initalised.");
+ return false;
+ }
+
+ if (OS_GetTLSValue(ThreadInitializeIndex) != 0)
+ return true;
+
+ if (! OS_SetTLSValue(ThreadInitializeIndex, (void *)1)) {
+ assert(0 && "InitThread(): Unable to set init flag.");
+ return false;
+ }
+
+ glslang::SetThreadPoolAllocator(nullptr);
+
+ return true;
+}
+
+// Not necessary to call this: InitThread() is reentrant, and the need
+// to do per thread tear down has been removed.
+//
+// This is kept, with memory management removed, to satisfy any exiting
+// calls to it that rely on it.
+bool DetachThread()
+{
+ bool success = true;
+
+ if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX)
+ return true;
+
+ //
+ // Function is re-entrant and this thread may not have been initialized.
+ //
+ if (OS_GetTLSValue(ThreadInitializeIndex) != 0) {
+ if (!OS_SetTLSValue(ThreadInitializeIndex, (void *)0)) {
+ assert(0 && "DetachThread(): Unable to clear init flag.");
+ success = false;
+ }
+ }
+
+ return success;
+}
+
+// Not necessary to call this: InitProcess() is reentrant.
+//
+// This is kept, with memory management removed, to satisfy any exiting
+// calls to it that rely on it.
+//
+// Users of glslang should call shFinalize() or glslang::FinalizeProcess() for
+// process-scoped memory tear down.
+bool DetachProcess()
+{
+ bool success = true;
+
+ if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX)
+ return true;
+
+ success = DetachThread();
+
+ OS_FreeTLSIndex(ThreadInitializeIndex);
+ ThreadInitializeIndex = OS_INVALID_TLS_INDEX;
+
+ return success;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/OGLCompilersDLL/InitializeDll.h b/thirdparty/glslang/OGLCompilersDLL/InitializeDll.h
new file mode 100644
index 0000000000..661cee4d24
--- /dev/null
+++ b/thirdparty/glslang/OGLCompilersDLL/InitializeDll.h
@@ -0,0 +1,49 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef __INITIALIZEDLL_H
+#define __INITIALIZEDLL_H
+
+#include "../glslang/OSDependent/osinclude.h"
+
+namespace glslang {
+
+bool InitProcess();
+bool InitThread();
+bool DetachThread(); // not called from standalone, perhaps other tools rely on parts of it
+bool DetachProcess(); // not called from standalone, perhaps other tools rely on parts of it
+
+} // end namespace glslang
+
+#endif // __INITIALIZEDLL_H
+
diff --git a/thirdparty/glslang/SPIRV/GLSL.ext.AMD.h b/thirdparty/glslang/SPIRV/GLSL.ext.AMD.h
new file mode 100644
index 0000000000..009d2f1cf0
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/GLSL.ext.AMD.h
@@ -0,0 +1,108 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextAMD_H
+#define GLSLextAMD_H
+
+static const int GLSLextAMDVersion = 100;
+static const int GLSLextAMDRevision = 7;
+
+// SPV_AMD_shader_ballot
+static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot";
+
+enum ShaderBallotAMD {
+ ShaderBallotBadAMD = 0, // Don't use
+
+ SwizzleInvocationsAMD = 1,
+ SwizzleInvocationsMaskedAMD = 2,
+ WriteInvocationAMD = 3,
+ MbcntAMD = 4,
+
+ ShaderBallotCountAMD
+};
+
+// SPV_AMD_shader_trinary_minmax
+static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax";
+
+enum ShaderTrinaryMinMaxAMD {
+ ShaderTrinaryMinMaxBadAMD = 0, // Don't use
+
+ FMin3AMD = 1,
+ UMin3AMD = 2,
+ SMin3AMD = 3,
+ FMax3AMD = 4,
+ UMax3AMD = 5,
+ SMax3AMD = 6,
+ FMid3AMD = 7,
+ UMid3AMD = 8,
+ SMid3AMD = 9,
+
+ ShaderTrinaryMinMaxCountAMD
+};
+
+// SPV_AMD_shader_explicit_vertex_parameter
+static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter";
+
+enum ShaderExplicitVertexParameterAMD {
+ ShaderExplicitVertexParameterBadAMD = 0, // Don't use
+
+ InterpolateAtVertexAMD = 1,
+
+ ShaderExplicitVertexParameterCountAMD
+};
+
+// SPV_AMD_gcn_shader
+static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader";
+
+enum GcnShaderAMD {
+ GcnShaderBadAMD = 0, // Don't use
+
+ CubeFaceIndexAMD = 1,
+ CubeFaceCoordAMD = 2,
+ TimeAMD = 3,
+
+ GcnShaderCountAMD
+};
+
+// SPV_AMD_gpu_shader_half_float
+static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float";
+
+// SPV_AMD_texture_gather_bias_lod
+static const char* const E_SPV_AMD_texture_gather_bias_lod = "SPV_AMD_texture_gather_bias_lod";
+
+// SPV_AMD_gpu_shader_int16
+static const char* const E_SPV_AMD_gpu_shader_int16 = "SPV_AMD_gpu_shader_int16";
+
+// SPV_AMD_shader_image_load_store_lod
+static const char* const E_SPV_AMD_shader_image_load_store_lod = "SPV_AMD_shader_image_load_store_lod";
+
+// SPV_AMD_shader_fragment_mask
+static const char* const E_SPV_AMD_shader_fragment_mask = "SPV_AMD_shader_fragment_mask";
+
+// SPV_AMD_gpu_shader_half_float_fetch
+static const char* const E_SPV_AMD_gpu_shader_half_float_fetch = "SPV_AMD_gpu_shader_half_float_fetch";
+
+#endif // #ifndef GLSLextAMD_H
diff --git a/thirdparty/glslang/SPIRV/GLSL.ext.EXT.h b/thirdparty/glslang/SPIRV/GLSL.ext.EXT.h
new file mode 100644
index 0000000000..e29c055b9a
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/GLSL.ext.EXT.h
@@ -0,0 +1,38 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextEXT_H
+#define GLSLextEXT_H
+
+static const int GLSLextEXTVersion = 100;
+static const int GLSLextEXTRevision = 2;
+
+static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export";
+static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
+static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
+static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
+
+#endif // #ifndef GLSLextEXT_H
diff --git a/thirdparty/glslang/SPIRV/GLSL.ext.KHR.h b/thirdparty/glslang/SPIRV/GLSL.ext.KHR.h
new file mode 100644
index 0000000000..333442bb3e
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/GLSL.ext.KHR.h
@@ -0,0 +1,45 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextKHR_H
+#define GLSLextKHR_H
+
+static const int GLSLextKHRVersion = 100;
+static const int GLSLextKHRRevision = 2;
+
+static const char* const E_SPV_KHR_shader_ballot = "SPV_KHR_shader_ballot";
+static const char* const E_SPV_KHR_subgroup_vote = "SPV_KHR_subgroup_vote";
+static const char* const E_SPV_KHR_device_group = "SPV_KHR_device_group";
+static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview";
+static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters";
+static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage";
+static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage";
+static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
+static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
+static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
+static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer";
+
+#endif // #ifndef GLSLextKHR_H
diff --git a/thirdparty/glslang/SPIRV/GLSL.ext.NV.h b/thirdparty/glslang/SPIRV/GLSL.ext.NV.h
new file mode 100644
index 0000000000..ede2c570eb
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/GLSL.ext.NV.h
@@ -0,0 +1,78 @@
+/*
+** Copyright (c) 2014-2017 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLextNV_H
+#define GLSLextNV_H
+
+enum BuiltIn;
+enum Decoration;
+enum Op;
+enum Capability;
+
+static const int GLSLextNVVersion = 100;
+static const int GLSLextNVRevision = 11;
+
+//SPV_NV_sample_mask_override_coverage
+const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage";
+
+//SPV_NV_geometry_shader_passthrough
+const char* const E_SPV_NV_geometry_shader_passthrough = "SPV_NV_geometry_shader_passthrough";
+
+//SPV_NV_viewport_array2
+const char* const E_SPV_NV_viewport_array2 = "SPV_NV_viewport_array2";
+const char* const E_ARB_shader_viewport_layer_array = "SPV_ARB_shader_viewport_layer_array";
+
+//SPV_NV_stereo_view_rendering
+const char* const E_SPV_NV_stereo_view_rendering = "SPV_NV_stereo_view_rendering";
+
+//SPV_NVX_multiview_per_view_attributes
+const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_per_view_attributes";
+
+//SPV_NV_shader_subgroup_partitioned
+const char* const E_SPV_NV_shader_subgroup_partitioned = "SPV_NV_shader_subgroup_partitioned";
+
+//SPV_NV_fragment_shader_barycentric
+const char* const E_SPV_NV_fragment_shader_barycentric = "SPV_NV_fragment_shader_barycentric";
+
+//SPV_NV_compute_shader_derivatives
+const char* const E_SPV_NV_compute_shader_derivatives = "SPV_NV_compute_shader_derivatives";
+
+//SPV_NV_shader_image_footprint
+const char* const E_SPV_NV_shader_image_footprint = "SPV_NV_shader_image_footprint";
+
+//SPV_NV_mesh_shader
+const char* const E_SPV_NV_mesh_shader = "SPV_NV_mesh_shader";
+
+//SPV_NV_raytracing
+const char* const E_SPV_NV_ray_tracing = "SPV_NV_ray_tracing";
+
+//SPV_NV_shading_rate
+const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate";
+
+//SPV_NV_cooperative_matrix
+const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix";
+
+#endif // #ifndef GLSLextNV_H
diff --git a/thirdparty/glslang/SPIRV/GLSL.std.450.h b/thirdparty/glslang/SPIRV/GLSL.std.450.h
new file mode 100644
index 0000000000..df31092bec
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/GLSL.std.450.h
@@ -0,0 +1,131 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+static const int GLSLstd450Version = 100;
+static const int GLSLstd450Revision = 1;
+
+enum GLSLstd450 {
+ GLSLstd450Bad = 0, // Don't use
+
+ GLSLstd450Round = 1,
+ GLSLstd450RoundEven = 2,
+ GLSLstd450Trunc = 3,
+ GLSLstd450FAbs = 4,
+ GLSLstd450SAbs = 5,
+ GLSLstd450FSign = 6,
+ GLSLstd450SSign = 7,
+ GLSLstd450Floor = 8,
+ GLSLstd450Ceil = 9,
+ GLSLstd450Fract = 10,
+
+ GLSLstd450Radians = 11,
+ GLSLstd450Degrees = 12,
+ GLSLstd450Sin = 13,
+ GLSLstd450Cos = 14,
+ GLSLstd450Tan = 15,
+ GLSLstd450Asin = 16,
+ GLSLstd450Acos = 17,
+ GLSLstd450Atan = 18,
+ GLSLstd450Sinh = 19,
+ GLSLstd450Cosh = 20,
+ GLSLstd450Tanh = 21,
+ GLSLstd450Asinh = 22,
+ GLSLstd450Acosh = 23,
+ GLSLstd450Atanh = 24,
+ GLSLstd450Atan2 = 25,
+
+ GLSLstd450Pow = 26,
+ GLSLstd450Exp = 27,
+ GLSLstd450Log = 28,
+ GLSLstd450Exp2 = 29,
+ GLSLstd450Log2 = 30,
+ GLSLstd450Sqrt = 31,
+ GLSLstd450InverseSqrt = 32,
+
+ GLSLstd450Determinant = 33,
+ GLSLstd450MatrixInverse = 34,
+
+ GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
+ GLSLstd450ModfStruct = 36, // no OpVariable operand
+ GLSLstd450FMin = 37,
+ GLSLstd450UMin = 38,
+ GLSLstd450SMin = 39,
+ GLSLstd450FMax = 40,
+ GLSLstd450UMax = 41,
+ GLSLstd450SMax = 42,
+ GLSLstd450FClamp = 43,
+ GLSLstd450UClamp = 44,
+ GLSLstd450SClamp = 45,
+ GLSLstd450FMix = 46,
+ GLSLstd450IMix = 47, // Reserved
+ GLSLstd450Step = 48,
+ GLSLstd450SmoothStep = 49,
+
+ GLSLstd450Fma = 50,
+ GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
+ GLSLstd450FrexpStruct = 52, // no OpVariable operand
+ GLSLstd450Ldexp = 53,
+
+ GLSLstd450PackSnorm4x8 = 54,
+ GLSLstd450PackUnorm4x8 = 55,
+ GLSLstd450PackSnorm2x16 = 56,
+ GLSLstd450PackUnorm2x16 = 57,
+ GLSLstd450PackHalf2x16 = 58,
+ GLSLstd450PackDouble2x32 = 59,
+ GLSLstd450UnpackSnorm2x16 = 60,
+ GLSLstd450UnpackUnorm2x16 = 61,
+ GLSLstd450UnpackHalf2x16 = 62,
+ GLSLstd450UnpackSnorm4x8 = 63,
+ GLSLstd450UnpackUnorm4x8 = 64,
+ GLSLstd450UnpackDouble2x32 = 65,
+
+ GLSLstd450Length = 66,
+ GLSLstd450Distance = 67,
+ GLSLstd450Cross = 68,
+ GLSLstd450Normalize = 69,
+ GLSLstd450FaceForward = 70,
+ GLSLstd450Reflect = 71,
+ GLSLstd450Refract = 72,
+
+ GLSLstd450FindILsb = 73,
+ GLSLstd450FindSMsb = 74,
+ GLSLstd450FindUMsb = 75,
+
+ GLSLstd450InterpolateAtCentroid = 76,
+ GLSLstd450InterpolateAtSample = 77,
+ GLSLstd450InterpolateAtOffset = 78,
+
+ GLSLstd450NMin = 79,
+ GLSLstd450NMax = 80,
+ GLSLstd450NClamp = 81,
+
+ GLSLstd450Count
+};
+
+#endif // #ifndef GLSLstd450_H
diff --git a/thirdparty/glslang/SPIRV/GlslangToSpv.cpp b/thirdparty/glslang/SPIRV/GlslangToSpv.cpp
new file mode 100644
index 0000000000..4ef6cd7fc1
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/GlslangToSpv.cpp
@@ -0,0 +1,8066 @@
+//
+// Copyright (C) 2014-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Visit the nodes in the glslang intermediate tree representation to
+// translate them to SPIR-V.
+//
+
+#include "spirv.hpp"
+#include "GlslangToSpv.h"
+#include "SpvBuilder.h"
+namespace spv {
+ #include "GLSL.std.450.h"
+ #include "GLSL.ext.KHR.h"
+ #include "GLSL.ext.EXT.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+ #include "GLSL.ext.NV.h"
+}
+
+// Glslang includes
+#include "../glslang/MachineIndependent/localintermediate.h"
+#include "../glslang/MachineIndependent/SymbolTable.h"
+#include "../glslang/Include/Common.h"
+#include "../glslang/Include/revision.h"
+
+#include <fstream>
+#include <iomanip>
+#include <list>
+#include <map>
+#include <stack>
+#include <string>
+#include <vector>
+
+namespace {
+
+namespace {
+class SpecConstantOpModeGuard {
+public:
+ SpecConstantOpModeGuard(spv::Builder* builder)
+ : builder_(builder) {
+ previous_flag_ = builder->isInSpecConstCodeGenMode();
+ }
+ ~SpecConstantOpModeGuard() {
+ previous_flag_ ? builder_->setToSpecConstCodeGenMode()
+ : builder_->setToNormalCodeGenMode();
+ }
+ void turnOnSpecConstantOpMode() {
+ builder_->setToSpecConstCodeGenMode();
+ }
+
+private:
+ spv::Builder* builder_;
+ bool previous_flag_;
+};
+
+struct OpDecorations {
+ spv::Decoration precision;
+ spv::Decoration noContraction;
+ spv::Decoration nonUniform;
+};
+
+} // namespace
+
+//
+// The main holder of information for translating glslang to SPIR-V.
+//
+// Derives from the AST walking base class.
+//
+class TGlslangToSpvTraverser : public glslang::TIntermTraverser {
+public:
+ TGlslangToSpvTraverser(unsigned int spvVersion, const glslang::TIntermediate*, spv::SpvBuildLogger* logger,
+ glslang::SpvOptions& options);
+ virtual ~TGlslangToSpvTraverser() { }
+
+ bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*);
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary*);
+ void visitConstantUnion(glslang::TIntermConstantUnion*);
+ bool visitSelection(glslang::TVisit, glslang::TIntermSelection*);
+ bool visitSwitch(glslang::TVisit, glslang::TIntermSwitch*);
+ void visitSymbol(glslang::TIntermSymbol* symbol);
+ bool visitUnary(glslang::TVisit, glslang::TIntermUnary*);
+ bool visitLoop(glslang::TVisit, glslang::TIntermLoop*);
+ bool visitBranch(glslang::TVisit visit, glslang::TIntermBranch*);
+
+ void finishSpv();
+ void dumpSpv(std::vector<unsigned int>& out);
+
+protected:
+ TGlslangToSpvTraverser(TGlslangToSpvTraverser&);
+ TGlslangToSpvTraverser& operator=(TGlslangToSpvTraverser&);
+
+ spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier);
+ spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier);
+ spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier);
+ spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type);
+ spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
+ spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
+ spv::Scope TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
+ spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration);
+ spv::ImageFormat TranslateImageFormat(const glslang::TType& type);
+ spv::SelectionControlMask TranslateSelectionControl(const glslang::TIntermSelection&) const;
+ spv::SelectionControlMask TranslateSwitchControl(const glslang::TIntermSwitch&) const;
+ spv::LoopControlMask TranslateLoopControl(const glslang::TIntermLoop&, std::vector<unsigned int>& operands) const;
+ spv::StorageClass TranslateStorageClass(const glslang::TType&);
+ void addIndirectionIndexCapabilities(const glslang::TType& baseType, const glslang::TType& indexType);
+ spv::Id createSpvVariable(const glslang::TIntermSymbol*);
+ spv::Id getSampledType(const glslang::TSampler&);
+ spv::Id getInvertedSwizzleType(const glslang::TIntermTyped&);
+ spv::Id createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped&, spv::Id parentResult);
+ void convertSwizzle(const glslang::TIntermAggregate&, std::vector<unsigned>& swizzle);
+ spv::Id convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly = false);
+ spv::Id convertGlslangToSpvType(const glslang::TType& type, glslang::TLayoutPacking, const glslang::TQualifier&,
+ bool lastBufferBlockMember, bool forwardReferenceOnly = false);
+ bool filterMember(const glslang::TType& member);
+ spv::Id convertGlslangStructToSpvType(const glslang::TType&, const glslang::TTypeList* glslangStruct,
+ glslang::TLayoutPacking, const glslang::TQualifier&);
+ void decorateStructType(const glslang::TType&, const glslang::TTypeList* glslangStruct, glslang::TLayoutPacking,
+ const glslang::TQualifier&, spv::Id);
+ spv::Id makeArraySizeId(const glslang::TArraySizes&, int dim);
+ spv::Id accessChainLoad(const glslang::TType& type);
+ void accessChainStore(const glslang::TType& type, spv::Id rvalue);
+ void multiTypeStore(const glslang::TType&, spv::Id rValue);
+ glslang::TLayoutPacking getExplicitLayout(const glslang::TType& type) const;
+ int getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+ int getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+ void updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, int& currentOffset,
+ int& nextOffset, glslang::TLayoutPacking, glslang::TLayoutMatrix);
+ void declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember);
+
+ bool isShaderEntryPoint(const glslang::TIntermAggregate* node);
+ bool writableParam(glslang::TStorageQualifier) const;
+ bool originalParam(glslang::TStorageQualifier, const glslang::TType&, bool implicitThisParam);
+ void makeFunctions(const glslang::TIntermSequence&);
+ void makeGlobalInitializers(const glslang::TIntermSequence&);
+ void visitFunctions(const glslang::TIntermSequence&);
+ void handleFunctionEntry(const glslang::TIntermAggregate* node);
+ void translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments);
+ void translateArguments(glslang::TIntermUnary& node, std::vector<spv::Id>& arguments);
+ spv::Id createImageTextureFunctionCall(glslang::TIntermOperator* node);
+ spv::Id handleUserFunctionCall(const glslang::TIntermAggregate*);
+
+ spv::Id createBinaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right,
+ glslang::TBasicType typeProxy, bool reduceComparison = true);
+ spv::Id createBinaryMatrixOperation(spv::Op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right);
+ spv::Id createUnaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id operand,
+ glslang::TBasicType typeProxy);
+ spv::Id createUnaryMatrixOperation(spv::Op op, OpDecorations&, spv::Id typeId, spv::Id operand,
+ glslang::TBasicType typeProxy);
+ spv::Id createConversion(glslang::TOperator op, OpDecorations&, spv::Id destTypeId, spv::Id operand,
+ glslang::TBasicType typeProxy);
+ spv::Id createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize);
+ spv::Id makeSmearedConstant(spv::Id constant, int vectorSize);
+ spv::Id createAtomicOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, spv::Id typeId, std::vector<spv::Id>& operands);
+ spv::Id createSubgroupOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy);
+ spv::Id createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId);
+ spv::Id getSymbolId(const glslang::TIntermSymbol* node);
+#ifdef NV_EXTENSIONS
+ void addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier & qualifier);
+#endif
+ spv::Id createSpvConstant(const glslang::TIntermTyped&);
+ spv::Id createSpvConstantFromConstUnionArray(const glslang::TType& type, const glslang::TConstUnionArray&, int& nextConst, bool specConstant);
+ bool isTrivialLeaf(const glslang::TIntermTyped* node);
+ bool isTrivial(const glslang::TIntermTyped* node);
+ spv::Id createShortCircuit(glslang::TOperator, glslang::TIntermTyped& left, glslang::TIntermTyped& right);
+#ifdef AMD_EXTENSIONS
+ spv::Id getExtBuiltins(const char* name);
+#endif
+ void addPre13Extension(const char* ext)
+ {
+ if (builder.getSpvVersion() < glslang::EShTargetSpv_1_3)
+ builder.addExtension(ext);
+ }
+
+ glslang::SpvOptions& options;
+ spv::Function* shaderEntry;
+ spv::Function* currentFunction;
+ spv::Instruction* entryPoint;
+ int sequenceDepth;
+
+ spv::SpvBuildLogger* logger;
+
+ // There is a 1:1 mapping between a spv builder and a module; this is thread safe
+ spv::Builder builder;
+ bool inEntryPoint;
+ bool entryPointTerminated;
+ bool linkageOnly; // true when visiting the set of objects in the AST present only for establishing interface, whether or not they were statically used
+ std::set<spv::Id> iOSet; // all input/output variables from either static use or declaration of interface
+ const glslang::TIntermediate* glslangIntermediate;
+ spv::Id stdBuiltins;
+ std::unordered_map<const char*, spv::Id> extBuiltinMap;
+
+ std::unordered_map<int, spv::Id> symbolValues;
+ std::unordered_set<int> rValueParameters; // set of formal function parameters passed as rValues, rather than a pointer
+ std::unordered_map<std::string, spv::Function*> functionMap;
+ std::unordered_map<const glslang::TTypeList*, spv::Id> structMap[glslang::ElpCount][glslang::ElmCount];
+ // for mapping glslang block indices to spv indices (e.g., due to hidden members):
+ std::unordered_map<const glslang::TTypeList*, std::vector<int> > memberRemapper;
+ std::stack<bool> breakForLoop; // false means break for switch
+ std::unordered_map<std::string, const glslang::TIntermSymbol*> counterOriginator;
+ // Map pointee types for EbtReference to their forward pointers
+ std::map<const glslang::TType *, spv::Id> forwardPointers;
+};
+
+//
+// Helper functions for translating glslang representations to SPIR-V enumerants.
+//
+
+// Translate glslang profile to SPIR-V source language.
+spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EProfile profile)
+{
+ switch (source) {
+ case glslang::EShSourceGlsl:
+ switch (profile) {
+ case ENoProfile:
+ case ECoreProfile:
+ case ECompatibilityProfile:
+ return spv::SourceLanguageGLSL;
+ case EEsProfile:
+ return spv::SourceLanguageESSL;
+ default:
+ return spv::SourceLanguageUnknown;
+ }
+ case glslang::EShSourceHlsl:
+ return spv::SourceLanguageHLSL;
+ default:
+ return spv::SourceLanguageUnknown;
+ }
+}
+
+// Translate glslang language (stage) to SPIR-V execution model.
+spv::ExecutionModel TranslateExecutionModel(EShLanguage stage)
+{
+ switch (stage) {
+ case EShLangVertex: return spv::ExecutionModelVertex;
+ case EShLangTessControl: return spv::ExecutionModelTessellationControl;
+ case EShLangTessEvaluation: return spv::ExecutionModelTessellationEvaluation;
+ case EShLangGeometry: return spv::ExecutionModelGeometry;
+ case EShLangFragment: return spv::ExecutionModelFragment;
+ case EShLangCompute: return spv::ExecutionModelGLCompute;
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV: return spv::ExecutionModelRayGenerationNV;
+ case EShLangIntersectNV: return spv::ExecutionModelIntersectionNV;
+ case EShLangAnyHitNV: return spv::ExecutionModelAnyHitNV;
+ case EShLangClosestHitNV: return spv::ExecutionModelClosestHitNV;
+ case EShLangMissNV: return spv::ExecutionModelMissNV;
+ case EShLangCallableNV: return spv::ExecutionModelCallableNV;
+ case EShLangTaskNV: return spv::ExecutionModelTaskNV;
+ case EShLangMeshNV: return spv::ExecutionModelMeshNV;
+#endif
+ default:
+ assert(0);
+ return spv::ExecutionModelFragment;
+ }
+}
+
+// Translate glslang sampler type to SPIR-V dimensionality.
+spv::Dim TranslateDimensionality(const glslang::TSampler& sampler)
+{
+ switch (sampler.dim) {
+ case glslang::Esd1D: return spv::Dim1D;
+ case glslang::Esd2D: return spv::Dim2D;
+ case glslang::Esd3D: return spv::Dim3D;
+ case glslang::EsdCube: return spv::DimCube;
+ case glslang::EsdRect: return spv::DimRect;
+ case glslang::EsdBuffer: return spv::DimBuffer;
+ case glslang::EsdSubpass: return spv::DimSubpassData;
+ default:
+ assert(0);
+ return spv::Dim2D;
+ }
+}
+
+// Translate glslang precision to SPIR-V precision decorations.
+spv::Decoration TranslatePrecisionDecoration(glslang::TPrecisionQualifier glslangPrecision)
+{
+ switch (glslangPrecision) {
+ case glslang::EpqLow: return spv::DecorationRelaxedPrecision;
+ case glslang::EpqMedium: return spv::DecorationRelaxedPrecision;
+ default:
+ return spv::NoPrecision;
+ }
+}
+
+// Translate glslang type to SPIR-V precision decorations.
+spv::Decoration TranslatePrecisionDecoration(const glslang::TType& type)
+{
+ return TranslatePrecisionDecoration(type.getQualifier().precision);
+}
+
+// Translate glslang type to SPIR-V block decorations.
+spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useStorageBuffer)
+{
+ if (type.getBasicType() == glslang::EbtBlock) {
+ switch (type.getQualifier().storage) {
+ case glslang::EvqUniform: return spv::DecorationBlock;
+ case glslang::EvqBuffer: return useStorageBuffer ? spv::DecorationBlock : spv::DecorationBufferBlock;
+ case glslang::EvqVaryingIn: return spv::DecorationBlock;
+ case glslang::EvqVaryingOut: return spv::DecorationBlock;
+#ifdef NV_EXTENSIONS
+ case glslang::EvqPayloadNV: return spv::DecorationBlock;
+ case glslang::EvqPayloadInNV: return spv::DecorationBlock;
+ case glslang::EvqHitAttrNV: return spv::DecorationBlock;
+ case glslang::EvqCallableDataNV: return spv::DecorationBlock;
+ case glslang::EvqCallableDataInNV: return spv::DecorationBlock;
+#endif
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ return spv::DecorationMax;
+}
+
+// Translate glslang type to SPIR-V memory decorations.
+void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector<spv::Decoration>& memory, bool useVulkanMemoryModel)
+{
+ if (!useVulkanMemoryModel) {
+ if (qualifier.coherent)
+ memory.push_back(spv::DecorationCoherent);
+ if (qualifier.volatil) {
+ memory.push_back(spv::DecorationVolatile);
+ memory.push_back(spv::DecorationCoherent);
+ }
+ }
+ if (qualifier.restrict)
+ memory.push_back(spv::DecorationRestrict);
+ if (qualifier.readonly)
+ memory.push_back(spv::DecorationNonWritable);
+ if (qualifier.writeonly)
+ memory.push_back(spv::DecorationNonReadable);
+}
+
+// Translate glslang type to SPIR-V layout decorations.
+spv::Decoration TranslateLayoutDecoration(const glslang::TType& type, glslang::TLayoutMatrix matrixLayout)
+{
+ if (type.isMatrix()) {
+ switch (matrixLayout) {
+ case glslang::ElmRowMajor:
+ return spv::DecorationRowMajor;
+ case glslang::ElmColumnMajor:
+ return spv::DecorationColMajor;
+ default:
+ // opaque layouts don't need a majorness
+ return spv::DecorationMax;
+ }
+ } else {
+ switch (type.getBasicType()) {
+ default:
+ return spv::DecorationMax;
+ break;
+ case glslang::EbtBlock:
+ switch (type.getQualifier().storage) {
+ case glslang::EvqUniform:
+ case glslang::EvqBuffer:
+ switch (type.getQualifier().layoutPacking) {
+ case glslang::ElpShared: return spv::DecorationGLSLShared;
+ case glslang::ElpPacked: return spv::DecorationGLSLPacked;
+ default:
+ return spv::DecorationMax;
+ }
+ case glslang::EvqVaryingIn:
+ case glslang::EvqVaryingOut:
+ if (type.getQualifier().isTaskMemory()) {
+ switch (type.getQualifier().layoutPacking) {
+ case glslang::ElpShared: return spv::DecorationGLSLShared;
+ case glslang::ElpPacked: return spv::DecorationGLSLPacked;
+ default: break;
+ }
+ } else {
+ assert(type.getQualifier().layoutPacking == glslang::ElpNone);
+ }
+ return spv::DecorationMax;
+#ifdef NV_EXTENSIONS
+ case glslang::EvqPayloadNV:
+ case glslang::EvqPayloadInNV:
+ case glslang::EvqHitAttrNV:
+ case glslang::EvqCallableDataNV:
+ case glslang::EvqCallableDataInNV:
+ return spv::DecorationMax;
+#endif
+ default:
+ assert(0);
+ return spv::DecorationMax;
+ }
+ }
+ }
+}
+
+// Translate glslang type to SPIR-V interpolation decorations.
+// Returns spv::DecorationMax when no decoration
+// should be applied.
+spv::Decoration TGlslangToSpvTraverser::TranslateInterpolationDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.smooth)
+ // Smooth decoration doesn't exist in SPIR-V 1.0
+ return spv::DecorationMax;
+ else if (qualifier.nopersp)
+ return spv::DecorationNoPerspective;
+ else if (qualifier.flat)
+ return spv::DecorationFlat;
+#ifdef AMD_EXTENSIONS
+ else if (qualifier.explicitInterp) {
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::DecorationExplicitInterpAMD;
+ }
+#endif
+ else
+ return spv::DecorationMax;
+}
+
+// Translate glslang type to SPIR-V auxiliary storage decorations.
+// Returns spv::DecorationMax when no decoration
+// should be applied.
+spv::Decoration TGlslangToSpvTraverser::TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.patch)
+ return spv::DecorationPatch;
+ else if (qualifier.centroid)
+ return spv::DecorationCentroid;
+ else if (qualifier.sample) {
+ builder.addCapability(spv::CapabilitySampleRateShading);
+ return spv::DecorationSample;
+ } else
+ return spv::DecorationMax;
+}
+
+// If glslang type is invariant, return SPIR-V invariant decoration.
+spv::Decoration TranslateInvariantDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.invariant)
+ return spv::DecorationInvariant;
+ else
+ return spv::DecorationMax;
+}
+
+// If glslang type is noContraction, return SPIR-V NoContraction decoration.
+spv::Decoration TranslateNoContractionDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.noContraction)
+ return spv::DecorationNoContraction;
+ else
+ return spv::DecorationMax;
+}
+
+// If glslang type is nonUniform, return SPIR-V NonUniform decoration.
+spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glslang::TQualifier& qualifier)
+{
+ if (qualifier.isNonUniform()) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityShaderNonUniformEXT);
+ return spv::DecorationNonUniformEXT;
+ } else
+ return spv::DecorationMax;
+}
+
+spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
+{
+ if (!glslangIntermediate->usingVulkanMemoryModel() || coherentFlags.isImage) {
+ return spv::MemoryAccessMaskNone;
+ }
+ spv::MemoryAccessMask mask = spv::MemoryAccessMaskNone;
+ if (coherentFlags.volatil ||
+ coherentFlags.coherent ||
+ coherentFlags.devicecoherent ||
+ coherentFlags.queuefamilycoherent ||
+ coherentFlags.workgroupcoherent ||
+ coherentFlags.subgroupcoherent) {
+ mask = mask | spv::MemoryAccessMakePointerAvailableKHRMask |
+ spv::MemoryAccessMakePointerVisibleKHRMask;
+ }
+ if (coherentFlags.nonprivate) {
+ mask = mask | spv::MemoryAccessNonPrivatePointerKHRMask;
+ }
+ if (coherentFlags.volatil) {
+ mask = mask | spv::MemoryAccessVolatileMask;
+ }
+ if (mask != spv::MemoryAccessMaskNone) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ return mask;
+}
+
+spv::ImageOperandsMask TGlslangToSpvTraverser::TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
+{
+ if (!glslangIntermediate->usingVulkanMemoryModel()) {
+ return spv::ImageOperandsMaskNone;
+ }
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (coherentFlags.volatil ||
+ coherentFlags.coherent ||
+ coherentFlags.devicecoherent ||
+ coherentFlags.queuefamilycoherent ||
+ coherentFlags.workgroupcoherent ||
+ coherentFlags.subgroupcoherent) {
+ mask = mask | spv::ImageOperandsMakeTexelAvailableKHRMask |
+ spv::ImageOperandsMakeTexelVisibleKHRMask;
+ }
+ if (coherentFlags.nonprivate) {
+ mask = mask | spv::ImageOperandsNonPrivateTexelKHRMask;
+ }
+ if (coherentFlags.volatil) {
+ mask = mask | spv::ImageOperandsVolatileTexelKHRMask;
+ }
+ if (mask != spv::ImageOperandsMaskNone) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ return mask;
+}
+
+spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCoherent(const glslang::TType& type)
+{
+ spv::Builder::AccessChain::CoherentFlags flags;
+ flags.coherent = type.getQualifier().coherent;
+ flags.devicecoherent = type.getQualifier().devicecoherent;
+ flags.queuefamilycoherent = type.getQualifier().queuefamilycoherent;
+ // shared variables are implicitly workgroupcoherent in GLSL.
+ flags.workgroupcoherent = type.getQualifier().workgroupcoherent ||
+ type.getQualifier().storage == glslang::EvqShared;
+ flags.subgroupcoherent = type.getQualifier().subgroupcoherent;
+ flags.volatil = type.getQualifier().volatil;
+ // *coherent variables are implicitly nonprivate in GLSL
+ flags.nonprivate = type.getQualifier().nonprivate ||
+ flags.subgroupcoherent ||
+ flags.workgroupcoherent ||
+ flags.queuefamilycoherent ||
+ flags.devicecoherent ||
+ flags.coherent ||
+ flags.volatil;
+ flags.isImage = type.getBasicType() == glslang::EbtSampler;
+ return flags;
+}
+
+spv::Scope TGlslangToSpvTraverser::TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
+{
+ spv::Scope scope;
+ if (coherentFlags.volatil || coherentFlags.coherent) {
+ // coherent defaults to Device scope in the old model, QueueFamilyKHR scope in the new model
+ scope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
+ } else if (coherentFlags.devicecoherent) {
+ scope = spv::ScopeDevice;
+ } else if (coherentFlags.queuefamilycoherent) {
+ scope = spv::ScopeQueueFamilyKHR;
+ } else if (coherentFlags.workgroupcoherent) {
+ scope = spv::ScopeWorkgroup;
+ } else if (coherentFlags.subgroupcoherent) {
+ scope = spv::ScopeSubgroup;
+ } else {
+ scope = spv::ScopeMax;
+ }
+ if (glslangIntermediate->usingVulkanMemoryModel() && scope == spv::ScopeDevice) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+ return scope;
+}
+
+// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate
+// associated capabilities when required. For some built-in variables, a capability
+// is generated only when using the variable in an executable instruction, but not when
+// just declaring a struct member variable with it. This is true for PointSize,
+// ClipDistance, and CullDistance.
+spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltInVariable builtIn, bool memberDeclaration)
+{
+ switch (builtIn) {
+ case glslang::EbvPointSize:
+ // Defer adding the capability until the built-in is actually used.
+ if (! memberDeclaration) {
+ switch (glslangIntermediate->getStage()) {
+ case EShLangGeometry:
+ builder.addCapability(spv::CapabilityGeometryPointSize);
+ break;
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ builder.addCapability(spv::CapabilityTessellationPointSize);
+ break;
+ default:
+ break;
+ }
+ }
+ return spv::BuiltInPointSize;
+
+ // These *Distance capabilities logically belong here, but if the member is declared and
+ // then never used, consumers of SPIR-V prefer the capability not be declared.
+ // They are now generated when used, rather than here when declared.
+ // Potentially, the specification should be more clear what the minimum
+ // use needed is to trigger the capability.
+ //
+ case glslang::EbvClipDistance:
+ if (!memberDeclaration)
+ builder.addCapability(spv::CapabilityClipDistance);
+ return spv::BuiltInClipDistance;
+
+ case glslang::EbvCullDistance:
+ if (!memberDeclaration)
+ builder.addCapability(spv::CapabilityCullDistance);
+ return spv::BuiltInCullDistance;
+
+ case glslang::EbvViewportIndex:
+ builder.addCapability(spv::CapabilityMultiViewport);
+ if (glslangIntermediate->getStage() == EShLangVertex ||
+ glslangIntermediate->getStage() == EShLangTessControl ||
+ glslangIntermediate->getStage() == EShLangTessEvaluation) {
+
+ builder.addExtension(spv::E_SPV_EXT_shader_viewport_index_layer);
+ builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
+ }
+ return spv::BuiltInViewportIndex;
+
+ case glslang::EbvSampleId:
+ builder.addCapability(spv::CapabilitySampleRateShading);
+ return spv::BuiltInSampleId;
+
+ case glslang::EbvSamplePosition:
+ builder.addCapability(spv::CapabilitySampleRateShading);
+ return spv::BuiltInSamplePosition;
+
+ case glslang::EbvSampleMask:
+ return spv::BuiltInSampleMask;
+
+ case glslang::EbvLayer:
+#ifdef NV_EXTENSIONS
+ if (glslangIntermediate->getStage() == EShLangMeshNV) {
+ return spv::BuiltInLayer;
+ }
+#endif
+ builder.addCapability(spv::CapabilityGeometry);
+ if (glslangIntermediate->getStage() == EShLangVertex ||
+ glslangIntermediate->getStage() == EShLangTessControl ||
+ glslangIntermediate->getStage() == EShLangTessEvaluation) {
+
+ builder.addExtension(spv::E_SPV_EXT_shader_viewport_index_layer);
+ builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
+ }
+ return spv::BuiltInLayer;
+
+ case glslang::EbvPosition: return spv::BuiltInPosition;
+ case glslang::EbvVertexId: return spv::BuiltInVertexId;
+ case glslang::EbvInstanceId: return spv::BuiltInInstanceId;
+ case glslang::EbvVertexIndex: return spv::BuiltInVertexIndex;
+ case glslang::EbvInstanceIndex: return spv::BuiltInInstanceIndex;
+
+ case glslang::EbvBaseVertex:
+ addPre13Extension(spv::E_SPV_KHR_shader_draw_parameters);
+ builder.addCapability(spv::CapabilityDrawParameters);
+ return spv::BuiltInBaseVertex;
+
+ case glslang::EbvBaseInstance:
+ addPre13Extension(spv::E_SPV_KHR_shader_draw_parameters);
+ builder.addCapability(spv::CapabilityDrawParameters);
+ return spv::BuiltInBaseInstance;
+
+ case glslang::EbvDrawId:
+ addPre13Extension(spv::E_SPV_KHR_shader_draw_parameters);
+ builder.addCapability(spv::CapabilityDrawParameters);
+ return spv::BuiltInDrawIndex;
+
+ case glslang::EbvPrimitiveId:
+ if (glslangIntermediate->getStage() == EShLangFragment)
+ builder.addCapability(spv::CapabilityGeometry);
+ return spv::BuiltInPrimitiveId;
+
+ case glslang::EbvFragStencilRef:
+ builder.addExtension(spv::E_SPV_EXT_shader_stencil_export);
+ builder.addCapability(spv::CapabilityStencilExportEXT);
+ return spv::BuiltInFragStencilRefEXT;
+
+ case glslang::EbvInvocationId: return spv::BuiltInInvocationId;
+ case glslang::EbvTessLevelInner: return spv::BuiltInTessLevelInner;
+ case glslang::EbvTessLevelOuter: return spv::BuiltInTessLevelOuter;
+ case glslang::EbvTessCoord: return spv::BuiltInTessCoord;
+ case glslang::EbvPatchVertices: return spv::BuiltInPatchVertices;
+ case glslang::EbvFragCoord: return spv::BuiltInFragCoord;
+ case glslang::EbvPointCoord: return spv::BuiltInPointCoord;
+ case glslang::EbvFace: return spv::BuiltInFrontFacing;
+ case glslang::EbvFragDepth: return spv::BuiltInFragDepth;
+ case glslang::EbvHelperInvocation: return spv::BuiltInHelperInvocation;
+ case glslang::EbvNumWorkGroups: return spv::BuiltInNumWorkgroups;
+ case glslang::EbvWorkGroupSize: return spv::BuiltInWorkgroupSize;
+ case glslang::EbvWorkGroupId: return spv::BuiltInWorkgroupId;
+ case glslang::EbvLocalInvocationId: return spv::BuiltInLocalInvocationId;
+ case glslang::EbvLocalInvocationIndex: return spv::BuiltInLocalInvocationIndex;
+ case glslang::EbvGlobalInvocationId: return spv::BuiltInGlobalInvocationId;
+
+ case glslang::EbvSubGroupSize:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupSize;
+
+ case glslang::EbvSubGroupInvocation:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupLocalInvocationId;
+
+ case glslang::EbvSubGroupEqMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupEqMaskKHR;
+
+ case glslang::EbvSubGroupGeMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupGeMaskKHR;
+
+ case glslang::EbvSubGroupGtMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupGtMaskKHR;
+
+ case glslang::EbvSubGroupLeMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupLeMaskKHR;
+
+ case glslang::EbvSubGroupLtMask:
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ return spv::BuiltInSubgroupLtMaskKHR;
+
+ case glslang::EbvNumSubgroups:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInNumSubgroups;
+
+ case glslang::EbvSubgroupID:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInSubgroupId;
+
+ case glslang::EbvSubgroupSize2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInSubgroupSize;
+
+ case glslang::EbvSubgroupInvocation2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ return spv::BuiltInSubgroupLocalInvocationId;
+
+ case glslang::EbvSubgroupEqMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupEqMask;
+
+ case glslang::EbvSubgroupGeMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupGeMask;
+
+ case glslang::EbvSubgroupGtMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupGtMask;
+
+ case glslang::EbvSubgroupLeMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupLeMask;
+
+ case glslang::EbvSubgroupLtMask2:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ return spv::BuiltInSubgroupLtMask;
+#ifdef AMD_EXTENSIONS
+ case glslang::EbvBaryCoordNoPersp:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordNoPerspAMD;
+
+ case glslang::EbvBaryCoordNoPerspCentroid:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordNoPerspCentroidAMD;
+
+ case glslang::EbvBaryCoordNoPerspSample:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordNoPerspSampleAMD;
+
+ case glslang::EbvBaryCoordSmooth:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordSmoothAMD;
+
+ case glslang::EbvBaryCoordSmoothCentroid:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordSmoothCentroidAMD;
+
+ case glslang::EbvBaryCoordSmoothSample:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordSmoothSampleAMD;
+
+ case glslang::EbvBaryCoordPullModel:
+ builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ return spv::BuiltInBaryCoordPullModelAMD;
+#endif
+
+ case glslang::EbvDeviceIndex:
+ addPre13Extension(spv::E_SPV_KHR_device_group);
+ builder.addCapability(spv::CapabilityDeviceGroup);
+ return spv::BuiltInDeviceIndex;
+
+ case glslang::EbvViewIndex:
+ addPre13Extension(spv::E_SPV_KHR_multiview);
+ builder.addCapability(spv::CapabilityMultiView);
+ return spv::BuiltInViewIndex;
+
+ case glslang::EbvFragSizeEXT:
+ builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density);
+ builder.addCapability(spv::CapabilityFragmentDensityEXT);
+ return spv::BuiltInFragSizeEXT;
+
+ case glslang::EbvFragInvocationCountEXT:
+ builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density);
+ builder.addCapability(spv::CapabilityFragmentDensityEXT);
+ return spv::BuiltInFragInvocationCountEXT;
+
+#ifdef NV_EXTENSIONS
+ case glslang::EbvViewportMaskNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NV_viewport_array2);
+ builder.addCapability(spv::CapabilityShaderViewportMaskNV);
+ }
+ return spv::BuiltInViewportMaskNV;
+ case glslang::EbvSecondaryPositionNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ }
+ return spv::BuiltInSecondaryPositionNV;
+ case glslang::EbvSecondaryViewportMaskNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ }
+ return spv::BuiltInSecondaryViewportMaskNV;
+ case glslang::EbvPositionPerViewNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes);
+ builder.addCapability(spv::CapabilityPerViewAttributesNV);
+ }
+ return spv::BuiltInPositionPerViewNV;
+ case glslang::EbvViewportMaskPerViewNV:
+ if (!memberDeclaration) {
+ builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes);
+ builder.addCapability(spv::CapabilityPerViewAttributesNV);
+ }
+ return spv::BuiltInViewportMaskPerViewNV;
+ case glslang::EbvFragFullyCoveredNV:
+ builder.addExtension(spv::E_SPV_EXT_fragment_fully_covered);
+ builder.addCapability(spv::CapabilityFragmentFullyCoveredEXT);
+ return spv::BuiltInFullyCoveredEXT;
+ case glslang::EbvFragmentSizeNV:
+ builder.addExtension(spv::E_SPV_NV_shading_rate);
+ builder.addCapability(spv::CapabilityShadingRateNV);
+ return spv::BuiltInFragmentSizeNV;
+ case glslang::EbvInvocationsPerPixelNV:
+ builder.addExtension(spv::E_SPV_NV_shading_rate);
+ builder.addCapability(spv::CapabilityShadingRateNV);
+ return spv::BuiltInInvocationsPerPixelNV;
+
+ // raytracing
+ case glslang::EbvLaunchIdNV:
+ return spv::BuiltInLaunchIdNV;
+ case glslang::EbvLaunchSizeNV:
+ return spv::BuiltInLaunchSizeNV;
+ case glslang::EbvWorldRayOriginNV:
+ return spv::BuiltInWorldRayOriginNV;
+ case glslang::EbvWorldRayDirectionNV:
+ return spv::BuiltInWorldRayDirectionNV;
+ case glslang::EbvObjectRayOriginNV:
+ return spv::BuiltInObjectRayOriginNV;
+ case glslang::EbvObjectRayDirectionNV:
+ return spv::BuiltInObjectRayDirectionNV;
+ case glslang::EbvRayTminNV:
+ return spv::BuiltInRayTminNV;
+ case glslang::EbvRayTmaxNV:
+ return spv::BuiltInRayTmaxNV;
+ case glslang::EbvInstanceCustomIndexNV:
+ return spv::BuiltInInstanceCustomIndexNV;
+ case glslang::EbvHitTNV:
+ return spv::BuiltInHitTNV;
+ case glslang::EbvHitKindNV:
+ return spv::BuiltInHitKindNV;
+ case glslang::EbvObjectToWorldNV:
+ return spv::BuiltInObjectToWorldNV;
+ case glslang::EbvWorldToObjectNV:
+ return spv::BuiltInWorldToObjectNV;
+ case glslang::EbvIncomingRayFlagsNV:
+ return spv::BuiltInIncomingRayFlagsNV;
+ case glslang::EbvBaryCoordNV:
+ builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric);
+ builder.addCapability(spv::CapabilityFragmentBarycentricNV);
+ return spv::BuiltInBaryCoordNV;
+ case glslang::EbvBaryCoordNoPerspNV:
+ builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric);
+ builder.addCapability(spv::CapabilityFragmentBarycentricNV);
+ return spv::BuiltInBaryCoordNoPerspNV;
+ case glslang::EbvTaskCountNV:
+ return spv::BuiltInTaskCountNV;
+ case glslang::EbvPrimitiveCountNV:
+ return spv::BuiltInPrimitiveCountNV;
+ case glslang::EbvPrimitiveIndicesNV:
+ return spv::BuiltInPrimitiveIndicesNV;
+ case glslang::EbvClipDistancePerViewNV:
+ return spv::BuiltInClipDistancePerViewNV;
+ case glslang::EbvCullDistancePerViewNV:
+ return spv::BuiltInCullDistancePerViewNV;
+ case glslang::EbvLayerPerViewNV:
+ return spv::BuiltInLayerPerViewNV;
+ case glslang::EbvMeshViewCountNV:
+ return spv::BuiltInMeshViewCountNV;
+ case glslang::EbvMeshViewIndicesNV:
+ return spv::BuiltInMeshViewIndicesNV;
+#endif
+ default:
+ return spv::BuiltInMax;
+ }
+}
+
+// Translate glslang image layout format to SPIR-V image format.
+spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TType& type)
+{
+ assert(type.getBasicType() == glslang::EbtSampler);
+
+ // Check for capabilities
+ switch (type.getQualifier().layoutFormat) {
+ case glslang::ElfRg32f:
+ case glslang::ElfRg16f:
+ case glslang::ElfR11fG11fB10f:
+ case glslang::ElfR16f:
+ case glslang::ElfRgba16:
+ case glslang::ElfRgb10A2:
+ case glslang::ElfRg16:
+ case glslang::ElfRg8:
+ case glslang::ElfR16:
+ case glslang::ElfR8:
+ case glslang::ElfRgba16Snorm:
+ case glslang::ElfRg16Snorm:
+ case glslang::ElfRg8Snorm:
+ case glslang::ElfR16Snorm:
+ case glslang::ElfR8Snorm:
+
+ case glslang::ElfRg32i:
+ case glslang::ElfRg16i:
+ case glslang::ElfRg8i:
+ case glslang::ElfR16i:
+ case glslang::ElfR8i:
+
+ case glslang::ElfRgb10a2ui:
+ case glslang::ElfRg32ui:
+ case glslang::ElfRg16ui:
+ case glslang::ElfRg8ui:
+ case glslang::ElfR16ui:
+ case glslang::ElfR8ui:
+ builder.addCapability(spv::CapabilityStorageImageExtendedFormats);
+ break;
+
+ default:
+ break;
+ }
+
+ // do the translation
+ switch (type.getQualifier().layoutFormat) {
+ case glslang::ElfNone: return spv::ImageFormatUnknown;
+ case glslang::ElfRgba32f: return spv::ImageFormatRgba32f;
+ case glslang::ElfRgba16f: return spv::ImageFormatRgba16f;
+ case glslang::ElfR32f: return spv::ImageFormatR32f;
+ case glslang::ElfRgba8: return spv::ImageFormatRgba8;
+ case glslang::ElfRgba8Snorm: return spv::ImageFormatRgba8Snorm;
+ case glslang::ElfRg32f: return spv::ImageFormatRg32f;
+ case glslang::ElfRg16f: return spv::ImageFormatRg16f;
+ case glslang::ElfR11fG11fB10f: return spv::ImageFormatR11fG11fB10f;
+ case glslang::ElfR16f: return spv::ImageFormatR16f;
+ case glslang::ElfRgba16: return spv::ImageFormatRgba16;
+ case glslang::ElfRgb10A2: return spv::ImageFormatRgb10A2;
+ case glslang::ElfRg16: return spv::ImageFormatRg16;
+ case glslang::ElfRg8: return spv::ImageFormatRg8;
+ case glslang::ElfR16: return spv::ImageFormatR16;
+ case glslang::ElfR8: return spv::ImageFormatR8;
+ case glslang::ElfRgba16Snorm: return spv::ImageFormatRgba16Snorm;
+ case glslang::ElfRg16Snorm: return spv::ImageFormatRg16Snorm;
+ case glslang::ElfRg8Snorm: return spv::ImageFormatRg8Snorm;
+ case glslang::ElfR16Snorm: return spv::ImageFormatR16Snorm;
+ case glslang::ElfR8Snorm: return spv::ImageFormatR8Snorm;
+ case glslang::ElfRgba32i: return spv::ImageFormatRgba32i;
+ case glslang::ElfRgba16i: return spv::ImageFormatRgba16i;
+ case glslang::ElfRgba8i: return spv::ImageFormatRgba8i;
+ case glslang::ElfR32i: return spv::ImageFormatR32i;
+ case glslang::ElfRg32i: return spv::ImageFormatRg32i;
+ case glslang::ElfRg16i: return spv::ImageFormatRg16i;
+ case glslang::ElfRg8i: return spv::ImageFormatRg8i;
+ case glslang::ElfR16i: return spv::ImageFormatR16i;
+ case glslang::ElfR8i: return spv::ImageFormatR8i;
+ case glslang::ElfRgba32ui: return spv::ImageFormatRgba32ui;
+ case glslang::ElfRgba16ui: return spv::ImageFormatRgba16ui;
+ case glslang::ElfRgba8ui: return spv::ImageFormatRgba8ui;
+ case glslang::ElfR32ui: return spv::ImageFormatR32ui;
+ case glslang::ElfRg32ui: return spv::ImageFormatRg32ui;
+ case glslang::ElfRg16ui: return spv::ImageFormatRg16ui;
+ case glslang::ElfRgb10a2ui: return spv::ImageFormatRgb10a2ui;
+ case glslang::ElfRg8ui: return spv::ImageFormatRg8ui;
+ case glslang::ElfR16ui: return spv::ImageFormatR16ui;
+ case glslang::ElfR8ui: return spv::ImageFormatR8ui;
+ default: return spv::ImageFormatMax;
+ }
+}
+
+spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSelectionControl(const glslang::TIntermSelection& selectionNode) const
+{
+ if (selectionNode.getFlatten())
+ return spv::SelectionControlFlattenMask;
+ if (selectionNode.getDontFlatten())
+ return spv::SelectionControlDontFlattenMask;
+ return spv::SelectionControlMaskNone;
+}
+
+spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSwitchControl(const glslang::TIntermSwitch& switchNode) const
+{
+ if (switchNode.getFlatten())
+ return spv::SelectionControlFlattenMask;
+ if (switchNode.getDontFlatten())
+ return spv::SelectionControlDontFlattenMask;
+ return spv::SelectionControlMaskNone;
+}
+
+// return a non-0 dependency if the dependency argument must be set
+spv::LoopControlMask TGlslangToSpvTraverser::TranslateLoopControl(const glslang::TIntermLoop& loopNode,
+ std::vector<unsigned int>& operands) const
+{
+ spv::LoopControlMask control = spv::LoopControlMaskNone;
+
+ if (loopNode.getDontUnroll())
+ control = control | spv::LoopControlDontUnrollMask;
+ if (loopNode.getUnroll())
+ control = control | spv::LoopControlUnrollMask;
+ if (unsigned(loopNode.getLoopDependency()) == glslang::TIntermLoop::dependencyInfinite)
+ control = control | spv::LoopControlDependencyInfiniteMask;
+ else if (loopNode.getLoopDependency() > 0) {
+ control = control | spv::LoopControlDependencyLengthMask;
+ operands.push_back((unsigned int)loopNode.getLoopDependency());
+ }
+ if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) {
+ if (loopNode.getMinIterations() > 0) {
+ control = control | spv::LoopControlMinIterationsMask;
+ operands.push_back(loopNode.getMinIterations());
+ }
+ if (loopNode.getMaxIterations() < glslang::TIntermLoop::iterationsInfinite) {
+ control = control | spv::LoopControlMaxIterationsMask;
+ operands.push_back(loopNode.getMaxIterations());
+ }
+ if (loopNode.getIterationMultiple() > 1) {
+ control = control | spv::LoopControlIterationMultipleMask;
+ operands.push_back(loopNode.getIterationMultiple());
+ }
+ if (loopNode.getPeelCount() > 0) {
+ control = control | spv::LoopControlPeelCountMask;
+ operands.push_back(loopNode.getPeelCount());
+ }
+ if (loopNode.getPartialCount() > 0) {
+ control = control | spv::LoopControlPartialCountMask;
+ operands.push_back(loopNode.getPartialCount());
+ }
+ }
+
+ return control;
+}
+
+// Translate glslang type to SPIR-V storage class.
+spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::TType& type)
+{
+ if (type.getQualifier().isPipeInput())
+ return spv::StorageClassInput;
+ if (type.getQualifier().isPipeOutput())
+ return spv::StorageClassOutput;
+
+ if (glslangIntermediate->getSource() != glslang::EShSourceHlsl ||
+ type.getQualifier().storage == glslang::EvqUniform) {
+ if (type.getBasicType() == glslang::EbtAtomicUint)
+ return spv::StorageClassAtomicCounter;
+ if (type.containsOpaque())
+ return spv::StorageClassUniformConstant;
+ }
+
+#ifdef NV_EXTENSIONS
+ if (type.getQualifier().isUniformOrBuffer() &&
+ type.getQualifier().layoutShaderRecordNV) {
+ return spv::StorageClassShaderRecordBufferNV;
+ }
+#endif
+
+ if (glslangIntermediate->usingStorageBuffer() && type.getQualifier().storage == glslang::EvqBuffer) {
+ addPre13Extension(spv::E_SPV_KHR_storage_buffer_storage_class);
+ return spv::StorageClassStorageBuffer;
+ }
+
+ if (type.getQualifier().isUniformOrBuffer()) {
+ if (type.getQualifier().layoutPushConstant)
+ return spv::StorageClassPushConstant;
+ if (type.getBasicType() == glslang::EbtBlock)
+ return spv::StorageClassUniform;
+ return spv::StorageClassUniformConstant;
+ }
+
+ switch (type.getQualifier().storage) {
+ case glslang::EvqShared: return spv::StorageClassWorkgroup;
+ case glslang::EvqGlobal: return spv::StorageClassPrivate;
+ case glslang::EvqConstReadOnly: return spv::StorageClassFunction;
+ case glslang::EvqTemporary: return spv::StorageClassFunction;
+#ifdef NV_EXTENSIONS
+ case glslang::EvqPayloadNV: return spv::StorageClassRayPayloadNV;
+ case glslang::EvqPayloadInNV: return spv::StorageClassIncomingRayPayloadNV;
+ case glslang::EvqHitAttrNV: return spv::StorageClassHitAttributeNV;
+ case glslang::EvqCallableDataNV: return spv::StorageClassCallableDataNV;
+ case glslang::EvqCallableDataInNV: return spv::StorageClassIncomingCallableDataNV;
+#endif
+ default:
+ assert(0);
+ break;
+ }
+
+ return spv::StorageClassFunction;
+}
+
+// Add capabilities pertaining to how an array is indexed.
+void TGlslangToSpvTraverser::addIndirectionIndexCapabilities(const glslang::TType& baseType,
+ const glslang::TType& indexType)
+{
+ if (indexType.getQualifier().isNonUniform()) {
+ // deal with an asserted non-uniform index
+ // SPV_EXT_descriptor_indexing already added in TranslateNonUniformDecoration
+ if (baseType.getBasicType() == glslang::EbtSampler) {
+ if (baseType.getQualifier().hasAttachment())
+ builder.addCapability(spv::CapabilityInputAttachmentArrayNonUniformIndexingEXT);
+ else if (baseType.isImage() && baseType.getSampler().dim == glslang::EsdBuffer)
+ builder.addCapability(spv::CapabilityStorageTexelBufferArrayNonUniformIndexingEXT);
+ else if (baseType.isTexture() && baseType.getSampler().dim == glslang::EsdBuffer)
+ builder.addCapability(spv::CapabilityUniformTexelBufferArrayNonUniformIndexingEXT);
+ else if (baseType.isImage())
+ builder.addCapability(spv::CapabilityStorageImageArrayNonUniformIndexingEXT);
+ else if (baseType.isTexture())
+ builder.addCapability(spv::CapabilitySampledImageArrayNonUniformIndexingEXT);
+ } else if (baseType.getBasicType() == glslang::EbtBlock) {
+ if (baseType.getQualifier().storage == glslang::EvqBuffer)
+ builder.addCapability(spv::CapabilityStorageBufferArrayNonUniformIndexingEXT);
+ else if (baseType.getQualifier().storage == glslang::EvqUniform)
+ builder.addCapability(spv::CapabilityUniformBufferArrayNonUniformIndexingEXT);
+ }
+ } else {
+ // assume a dynamically uniform index
+ if (baseType.getBasicType() == glslang::EbtSampler) {
+ if (baseType.getQualifier().hasAttachment()) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityInputAttachmentArrayDynamicIndexingEXT);
+ } else if (baseType.isImage() && baseType.getSampler().dim == glslang::EsdBuffer) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityStorageTexelBufferArrayDynamicIndexingEXT);
+ } else if (baseType.isTexture() && baseType.getSampler().dim == glslang::EsdBuffer) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityUniformTexelBufferArrayDynamicIndexingEXT);
+ }
+ }
+ }
+}
+
+// Return whether or not the given type is something that should be tied to a
+// descriptor set.
+bool IsDescriptorResource(const glslang::TType& type)
+{
+ // uniform and buffer blocks are included, unless it is a push_constant
+ if (type.getBasicType() == glslang::EbtBlock)
+ return type.getQualifier().isUniformOrBuffer() &&
+#ifdef NV_EXTENSIONS
+ ! type.getQualifier().layoutShaderRecordNV &&
+#endif
+ ! type.getQualifier().layoutPushConstant;
+
+ // non block...
+ // basically samplerXXX/subpass/sampler/texture are all included
+ // if they are the global-scope-class, not the function parameter
+ // (or local, if they ever exist) class.
+ if (type.getBasicType() == glslang::EbtSampler)
+ return type.getQualifier().isUniformOrBuffer();
+
+ // None of the above.
+ return false;
+}
+
+void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& parent)
+{
+ if (child.layoutMatrix == glslang::ElmNone)
+ child.layoutMatrix = parent.layoutMatrix;
+
+ if (parent.invariant)
+ child.invariant = true;
+ if (parent.nopersp)
+ child.nopersp = true;
+#ifdef AMD_EXTENSIONS
+ if (parent.explicitInterp)
+ child.explicitInterp = true;
+#endif
+ if (parent.flat)
+ child.flat = true;
+ if (parent.centroid)
+ child.centroid = true;
+ if (parent.patch)
+ child.patch = true;
+ if (parent.sample)
+ child.sample = true;
+ if (parent.coherent)
+ child.coherent = true;
+ if (parent.devicecoherent)
+ child.devicecoherent = true;
+ if (parent.queuefamilycoherent)
+ child.queuefamilycoherent = true;
+ if (parent.workgroupcoherent)
+ child.workgroupcoherent = true;
+ if (parent.subgroupcoherent)
+ child.subgroupcoherent = true;
+ if (parent.nonprivate)
+ child.nonprivate = true;
+ if (parent.volatil)
+ child.volatil = true;
+ if (parent.restrict)
+ child.restrict = true;
+ if (parent.readonly)
+ child.readonly = true;
+ if (parent.writeonly)
+ child.writeonly = true;
+#ifdef NV_EXTENSIONS
+ if (parent.perPrimitiveNV)
+ child.perPrimitiveNV = true;
+ if (parent.perViewNV)
+ child.perViewNV = true;
+ if (parent.perTaskNV)
+ child.perTaskNV = true;
+#endif
+}
+
+bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier)
+{
+ // This should list qualifiers that simultaneous satisfy:
+ // - struct members might inherit from a struct declaration
+ // (note that non-block structs don't explicitly inherit,
+ // only implicitly, meaning no decoration involved)
+ // - affect decorations on the struct members
+ // (note smooth does not, and expecting something like volatile
+ // to effect the whole object)
+ // - are not part of the offset/st430/etc or row/column-major layout
+ return qualifier.invariant || (qualifier.hasLocation() && type.getBasicType() == glslang::EbtBlock);
+}
+
+//
+// Implement the TGlslangToSpvTraverser class.
+//
+
+TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, const glslang::TIntermediate* glslangIntermediate,
+ spv::SpvBuildLogger* buildLogger, glslang::SpvOptions& options)
+ : TIntermTraverser(true, false, true),
+ options(options),
+ shaderEntry(nullptr), currentFunction(nullptr),
+ sequenceDepth(0), logger(buildLogger),
+ builder(spvVersion, (glslang::GetKhronosToolId() << 16) | glslang::GetSpirvGeneratorVersion(), logger),
+ inEntryPoint(false), entryPointTerminated(false), linkageOnly(false),
+ glslangIntermediate(glslangIntermediate)
+{
+ spv::ExecutionModel executionModel = TranslateExecutionModel(glslangIntermediate->getStage());
+
+ builder.clearAccessChain();
+ builder.setSource(TranslateSourceLanguage(glslangIntermediate->getSource(), glslangIntermediate->getProfile()),
+ glslangIntermediate->getVersion());
+
+ if (options.generateDebugInfo) {
+ builder.setEmitOpLines();
+ builder.setSourceFile(glslangIntermediate->getSourceFile());
+
+ // Set the source shader's text. If for SPV version 1.0, include
+ // a preamble in comments stating the OpModuleProcessed instructions.
+ // Otherwise, emit those as actual instructions.
+ std::string text;
+ const std::vector<std::string>& processes = glslangIntermediate->getProcesses();
+ for (int p = 0; p < (int)processes.size(); ++p) {
+ if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1) {
+ text.append("// OpModuleProcessed ");
+ text.append(processes[p]);
+ text.append("\n");
+ } else
+ builder.addModuleProcessed(processes[p]);
+ }
+ if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1 && (int)processes.size() > 0)
+ text.append("#line 1\n");
+ text.append(glslangIntermediate->getSourceText());
+ builder.setSourceText(text);
+ // Pass name and text for all included files
+ const std::map<std::string, std::string>& include_txt = glslangIntermediate->getIncludeText();
+ for (auto iItr = include_txt.begin(); iItr != include_txt.end(); ++iItr)
+ builder.addInclude(iItr->first, iItr->second);
+ }
+ stdBuiltins = builder.import("GLSL.std.450");
+
+ spv::AddressingModel addressingModel = spv::AddressingModelLogical;
+ spv::MemoryModel memoryModel = spv::MemoryModelGLSL450;
+
+ if (glslangIntermediate->usingPhysicalStorageBuffer()) {
+ addressingModel = spv::AddressingModelPhysicalStorageBuffer64EXT;
+ builder.addExtension(spv::E_SPV_EXT_physical_storage_buffer);
+ builder.addCapability(spv::CapabilityPhysicalStorageBufferAddressesEXT);
+ };
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
+ memoryModel = spv::MemoryModelVulkanKHR;
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ builder.addExtension(spv::E_SPV_KHR_vulkan_memory_model);
+ }
+ builder.setMemoryModel(addressingModel, memoryModel);
+
+ if (glslangIntermediate->usingVariablePointers()) {
+ builder.addCapability(spv::CapabilityVariablePointers);
+ }
+
+ shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str());
+ entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str());
+
+ // Add the source extensions
+ const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions();
+ for (auto it = sourceExtensions.begin(); it != sourceExtensions.end(); ++it)
+ builder.addSourceExtension(it->c_str());
+
+ // Add the top-level modes for this shader.
+
+ if (glslangIntermediate->getXfbMode()) {
+ builder.addCapability(spv::CapabilityTransformFeedback);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeXfb);
+ }
+
+ unsigned int mode;
+ switch (glslangIntermediate->getStage()) {
+ case EShLangVertex:
+ builder.addCapability(spv::CapabilityShader);
+ break;
+
+ case EShLangTessEvaluation:
+ case EShLangTessControl:
+ builder.addCapability(spv::CapabilityTessellation);
+
+ glslang::TLayoutGeometry primitive;
+
+ if (glslangIntermediate->getStage() == EShLangTessControl) {
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+ primitive = glslangIntermediate->getOutputPrimitive();
+ } else {
+ primitive = glslangIntermediate->getInputPrimitive();
+ }
+
+ switch (primitive) {
+ case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break;
+ case glslang::ElgQuads: mode = spv::ExecutionModeQuads; break;
+ case glslang::ElgIsolines: mode = spv::ExecutionModeIsolines; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ switch (glslangIntermediate->getVertexSpacing()) {
+ case glslang::EvsEqual: mode = spv::ExecutionModeSpacingEqual; break;
+ case glslang::EvsFractionalEven: mode = spv::ExecutionModeSpacingFractionalEven; break;
+ case glslang::EvsFractionalOdd: mode = spv::ExecutionModeSpacingFractionalOdd; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ switch (glslangIntermediate->getVertexOrder()) {
+ case glslang::EvoCw: mode = spv::ExecutionModeVertexOrderCw; break;
+ case glslang::EvoCcw: mode = spv::ExecutionModeVertexOrderCcw; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ if (glslangIntermediate->getPointMode())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModePointMode);
+ break;
+
+ case EShLangGeometry:
+ builder.addCapability(spv::CapabilityGeometry);
+ switch (glslangIntermediate->getInputPrimitive()) {
+ case glslang::ElgPoints: mode = spv::ExecutionModeInputPoints; break;
+ case glslang::ElgLines: mode = spv::ExecutionModeInputLines; break;
+ case glslang::ElgLinesAdjacency: mode = spv::ExecutionModeInputLinesAdjacency; break;
+ case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break;
+ case glslang::ElgTrianglesAdjacency: mode = spv::ExecutionModeInputTrianglesAdjacency; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeInvocations, glslangIntermediate->getInvocations());
+
+ switch (glslangIntermediate->getOutputPrimitive()) {
+ case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break;
+ case glslang::ElgLineStrip: mode = spv::ExecutionModeOutputLineStrip; break;
+ case glslang::ElgTriangleStrip: mode = spv::ExecutionModeOutputTriangleStrip; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+ break;
+
+ case EShLangFragment:
+ builder.addCapability(spv::CapabilityShader);
+ if (glslangIntermediate->getPixelCenterInteger())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModePixelCenterInteger);
+
+ if (glslangIntermediate->getOriginUpperLeft())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginUpperLeft);
+ else
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginLowerLeft);
+
+ if (glslangIntermediate->getEarlyFragmentTests())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeEarlyFragmentTests);
+
+ if (glslangIntermediate->getPostDepthCoverage()) {
+ builder.addCapability(spv::CapabilitySampleMaskPostDepthCoverage);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModePostDepthCoverage);
+ builder.addExtension(spv::E_SPV_KHR_post_depth_coverage);
+ }
+
+ switch(glslangIntermediate->getDepth()) {
+ case glslang::EldGreater: mode = spv::ExecutionModeDepthGreater; break;
+ case glslang::EldLess: mode = spv::ExecutionModeDepthLess; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+
+ if (glslangIntermediate->getDepth() != glslang::EldUnchanged && glslangIntermediate->isDepthReplacing())
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeDepthReplacing);
+ break;
+
+ case EShLangCompute:
+ builder.addCapability(spv::CapabilityShader);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0),
+ glslangIntermediate->getLocalSize(1),
+ glslangIntermediate->getLocalSize(2));
+#ifdef NV_EXTENSIONS
+ if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupQuads) {
+ builder.addCapability(spv::CapabilityComputeDerivativeGroupQuadsNV);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupQuadsNV);
+ builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives);
+ } else if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupLinear) {
+ builder.addCapability(spv::CapabilityComputeDerivativeGroupLinearNV);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupLinearNV);
+ builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives);
+ }
+#endif
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ builder.addCapability(spv::CapabilityRayTracingNV);
+ builder.addExtension("SPV_NV_ray_tracing");
+ break;
+ case EShLangTaskNV:
+ case EShLangMeshNV:
+ builder.addCapability(spv::CapabilityMeshShadingNV);
+ builder.addExtension(spv::E_SPV_NV_mesh_shader);
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0),
+ glslangIntermediate->getLocalSize(1),
+ glslangIntermediate->getLocalSize(2));
+ if (glslangIntermediate->getStage() == EShLangMeshNV) {
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices());
+ builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputPrimitivesNV, glslangIntermediate->getPrimitives());
+
+ switch (glslangIntermediate->getOutputPrimitive()) {
+ case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break;
+ case glslang::ElgLines: mode = spv::ExecutionModeOutputLinesNV; break;
+ case glslang::ElgTriangles: mode = spv::ExecutionModeOutputTrianglesNV; break;
+ default: mode = spv::ExecutionModeMax; break;
+ }
+ if (mode != spv::ExecutionModeMax)
+ builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode);
+ }
+ break;
+#endif
+
+ default:
+ break;
+ }
+}
+
+// Finish creating SPV, after the traversal is complete.
+void TGlslangToSpvTraverser::finishSpv()
+{
+ // Finish the entry point function
+ if (! entryPointTerminated) {
+ builder.setBuildPoint(shaderEntry->getLastBlock());
+ builder.leaveFunction();
+ }
+
+ // finish off the entry-point SPV instruction by adding the Input/Output <id>
+ for (auto it = iOSet.cbegin(); it != iOSet.cend(); ++it)
+ entryPoint->addIdOperand(*it);
+
+ // Add capabilities, extensions, remove unneeded decorations, etc.,
+ // based on the resulting SPIR-V.
+ builder.postProcess();
+}
+
+// Write the SPV into 'out'.
+void TGlslangToSpvTraverser::dumpSpv(std::vector<unsigned int>& out)
+{
+ builder.dump(out);
+}
+
+//
+// Implement the traversal functions.
+//
+// Return true from interior nodes to have the external traversal
+// continue on to children. Return false if children were
+// already processed.
+//
+
+//
+// Symbols can turn into
+// - uniform/input reads
+// - output writes
+// - complex lvalue base setups: foo.bar[3].... , where we see foo and start up an access chain
+// - something simple that degenerates into the last bullet
+//
+void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol)
+{
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (symbol->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ // getSymbolId() will set up all the IO decorations on the first call.
+ // Formal function parameters were mapped during makeFunctions().
+ spv::Id id = getSymbolId(symbol);
+
+ // Include all "static use" and "linkage only" interface variables on the OpEntryPoint instruction
+ if (builder.isPointer(id)) {
+ // Consider adding to the OpEntryPoint interface list.
+ // Only looking at structures if they have at least one member.
+ if (!symbol->getType().isStruct() || symbol->getType().getStruct()->size() > 0) {
+ spv::StorageClass sc = builder.getStorageClass(id);
+ // Before SPIR-V 1.4, we only want to include Input and Output.
+ // Starting with SPIR-V 1.4, we want all globals.
+ if ((glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4 && sc != spv::StorageClassFunction) ||
+ (sc == spv::StorageClassInput || sc == spv::StorageClassOutput)) {
+ iOSet.insert(id);
+ }
+ }
+ }
+
+ // Only process non-linkage-only nodes for generating actual static uses
+ if (! linkageOnly || symbol->getQualifier().isSpecConstant()) {
+ // Prepare to generate code for the access
+
+ // L-value chains will be computed left to right. We're on the symbol now,
+ // which is the left-most part of the access chain, so now is "clear" time,
+ // followed by setting the base.
+ builder.clearAccessChain();
+
+ // For now, we consider all user variables as being in memory, so they are pointers,
+ // except for
+ // A) R-Value arguments to a function, which are an intermediate object.
+ // See comments in handleUserFunctionCall().
+ // B) Specialization constants (normal constants don't even come in as a variable),
+ // These are also pure R-values.
+ glslang::TQualifier qualifier = symbol->getQualifier();
+ if (qualifier.isSpecConstant() || rValueParameters.find(symbol->getId()) != rValueParameters.end())
+ builder.setAccessChainRValue(id);
+ else
+ builder.setAccessChainLValue(id);
+ }
+
+ // Process linkage-only nodes for any special additional interface work.
+ if (linkageOnly) {
+ if (glslangIntermediate->getHlslFunctionality1()) {
+ // Map implicit counter buffers to their originating buffers, which should have been
+ // seen by now, given earlier pruning of unused counters, and preservation of order
+ // of declaration.
+ if (symbol->getType().getQualifier().isUniformOrBuffer()) {
+ if (!glslangIntermediate->hasCounterBufferName(symbol->getName())) {
+ // Save possible originating buffers for counter buffers, keyed by
+ // making the potential counter-buffer name.
+ std::string keyName = symbol->getName().c_str();
+ keyName = glslangIntermediate->addCounterBufferName(keyName);
+ counterOriginator[keyName] = symbol;
+ } else {
+ // Handle a counter buffer, by finding the saved originating buffer.
+ std::string keyName = symbol->getName().c_str();
+ auto it = counterOriginator.find(keyName);
+ if (it != counterOriginator.end()) {
+ id = getSymbolId(it->second);
+ if (id != spv::NoResult) {
+ spv::Id counterId = getSymbolId(symbol);
+ if (counterId != spv::NoResult) {
+ builder.addExtension("SPV_GOOGLE_hlsl_functionality1");
+ builder.addDecorationId(id, spv::DecorationHlslCounterBufferGOOGLE, counterId);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::TIntermBinary* node)
+{
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ // First, handle special cases
+ switch (node->getOp()) {
+ case glslang::EOpAssign:
+ case glslang::EOpAddAssign:
+ case glslang::EOpSubAssign:
+ case glslang::EOpMulAssign:
+ case glslang::EOpVectorTimesMatrixAssign:
+ case glslang::EOpVectorTimesScalarAssign:
+ case glslang::EOpMatrixTimesScalarAssign:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ case glslang::EOpDivAssign:
+ case glslang::EOpModAssign:
+ case glslang::EOpAndAssign:
+ case glslang::EOpInclusiveOrAssign:
+ case glslang::EOpExclusiveOrAssign:
+ case glslang::EOpLeftShiftAssign:
+ case glslang::EOpRightShiftAssign:
+ // A bin-op assign "a += b" means the same thing as "a = a + b"
+ // where a is evaluated before b. For a simple assignment, GLSL
+ // says to evaluate the left before the right. So, always, left
+ // node then right node.
+ {
+ // get the left l-value, save it away
+ builder.clearAccessChain();
+ node->getLeft()->traverse(this);
+ spv::Builder::AccessChain lValue = builder.getAccessChain();
+
+ // evaluate the right
+ builder.clearAccessChain();
+ node->getRight()->traverse(this);
+ spv::Id rValue = accessChainLoad(node->getRight()->getType());
+
+ if (node->getOp() != glslang::EOpAssign) {
+ // the left is also an r-value
+ builder.setAccessChain(lValue);
+ spv::Id leftRValue = accessChainLoad(node->getLeft()->getType());
+
+ // do the operation
+ OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()),
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ rValue = createBinaryOperation(node->getOp(), decorations,
+ convertGlslangToSpvType(node->getType()), leftRValue, rValue,
+ node->getType().getBasicType());
+
+ // these all need their counterparts in createBinaryOperation()
+ assert(rValue != spv::NoResult);
+ }
+
+ // store the result
+ builder.setAccessChain(lValue);
+ multiTypeStore(node->getLeft()->getType(), rValue);
+
+ // assignments are expressions having an rValue after they are evaluated...
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(rValue);
+ }
+ return false;
+ case glslang::EOpIndexDirect:
+ case glslang::EOpIndexDirectStruct:
+ {
+ // Structure, array, matrix, or vector indirection with statically known index.
+ // Get the left part of the access chain.
+ node->getLeft()->traverse(this);
+
+ // Add the next element in the chain
+
+ const int glslangIndex = node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (! node->getLeft()->getType().isArray() &&
+ node->getLeft()->getType().isVector() &&
+ node->getOp() == glslang::EOpIndexDirect) {
+ // This is essentially a hard-coded vector swizzle of size 1,
+ // so short circuit the access-chain stuff with a swizzle.
+ std::vector<unsigned> swizzle;
+ swizzle.push_back(glslangIndex);
+ int dummySize;
+ builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()),
+ TranslateCoherent(node->getLeft()->getType()),
+ glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), dummySize));
+ } else {
+
+ // Load through a block reference is performed with a dot operator that
+ // is mapped to EOpIndexDirectStruct. When we get to the actual reference,
+ // do a load and reset the access chain.
+ if (node->getLeft()->getBasicType() == glslang::EbtReference &&
+ !node->getLeft()->getType().isArray() &&
+ node->getOp() == glslang::EOpIndexDirectStruct)
+ {
+ spv::Id left = accessChainLoad(node->getLeft()->getType());
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(left);
+ }
+
+ int spvIndex = glslangIndex;
+ if (node->getLeft()->getBasicType() == glslang::EbtBlock &&
+ node->getOp() == glslang::EOpIndexDirectStruct)
+ {
+ // This may be, e.g., an anonymous block-member selection, which generally need
+ // index remapping due to hidden members in anonymous blocks.
+ std::vector<int>& remapper = memberRemapper[node->getLeft()->getType().getStruct()];
+ assert(remapper.size() > 0);
+ spvIndex = remapper[glslangIndex];
+ }
+
+ // normal case for indexing array or structure or block
+ builder.accessChainPush(builder.makeIntConstant(spvIndex), TranslateCoherent(node->getLeft()->getType()), node->getLeft()->getType().getBufferReferenceAlignment());
+
+ // Add capabilities here for accessing PointSize and clip/cull distance.
+ // We have deferred generation of associated capabilities until now.
+ if (node->getLeft()->getType().isStruct() && ! node->getLeft()->getType().isArray())
+ declareUseOfStructMember(*(node->getLeft()->getType().getStruct()), glslangIndex);
+ }
+ }
+ return false;
+ case glslang::EOpIndexIndirect:
+ {
+ // Array, matrix, or vector indirection with variable index.
+ // Will use native SPIR-V access-chain for and array indirection;
+ // matrices are arrays of vectors, so will also work for a matrix.
+ // Will use the access chain's 'component' for variable index into a vector.
+
+ // This adapter is building access chains left to right.
+ // Set up the access chain to the left.
+ node->getLeft()->traverse(this);
+
+ // save it so that computing the right side doesn't trash it
+ spv::Builder::AccessChain partial = builder.getAccessChain();
+
+ // compute the next index in the chain
+ builder.clearAccessChain();
+ node->getRight()->traverse(this);
+ spv::Id index = accessChainLoad(node->getRight()->getType());
+
+ addIndirectionIndexCapabilities(node->getLeft()->getType(), node->getRight()->getType());
+
+ // restore the saved access chain
+ builder.setAccessChain(partial);
+
+ if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector()) {
+ int dummySize;
+ builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()),
+ TranslateCoherent(node->getLeft()->getType()),
+ glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), dummySize));
+ } else
+ builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()), node->getLeft()->getType().getBufferReferenceAlignment());
+ }
+ return false;
+ case glslang::EOpVectorSwizzle:
+ {
+ node->getLeft()->traverse(this);
+ std::vector<unsigned> swizzle;
+ convertSwizzle(*node->getRight()->getAsAggregate(), swizzle);
+ int dummySize;
+ builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()),
+ TranslateCoherent(node->getLeft()->getType()),
+ glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), dummySize));
+ }
+ return false;
+ case glslang::EOpMatrixSwizzle:
+ logger->missingFunctionality("matrix swizzle");
+ return true;
+ case glslang::EOpLogicalOr:
+ case glslang::EOpLogicalAnd:
+ {
+
+ // These may require short circuiting, but can sometimes be done as straight
+ // binary operations. The right operand must be short circuited if it has
+ // side effects, and should probably be if it is complex.
+ if (isTrivial(node->getRight()->getAsTyped()))
+ break; // handle below as a normal binary operation
+ // otherwise, we need to do dynamic short circuiting on the right operand
+ spv::Id result = createShortCircuit(node->getOp(), *node->getLeft()->getAsTyped(), *node->getRight()->getAsTyped());
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ }
+ return false;
+ default:
+ break;
+ }
+
+ // Assume generic binary op...
+
+ // get right operand
+ builder.clearAccessChain();
+ node->getLeft()->traverse(this);
+ spv::Id left = accessChainLoad(node->getLeft()->getType());
+
+ // get left operand
+ builder.clearAccessChain();
+ node->getRight()->traverse(this);
+ spv::Id right = accessChainLoad(node->getRight()->getType());
+
+ // get result
+ OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()),
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ spv::Id result = createBinaryOperation(node->getOp(), decorations,
+ convertGlslangToSpvType(node->getType()), left, right,
+ node->getLeft()->getType().getBasicType());
+
+ builder.clearAccessChain();
+ if (! result) {
+ logger->missingFunctionality("unknown glslang binary operation");
+ return true; // pick up a child as the place-holder result
+ } else {
+ builder.setAccessChainRValue(result);
+ return false;
+ }
+}
+
+bool TGlslangToSpvTraverser::visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node)
+{
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ spv::Id result = spv::NoResult;
+
+ // try texturing first
+ result = createImageTextureFunctionCall(node);
+ if (result != spv::NoResult) {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false; // done with this node
+ }
+
+ // Non-texturing.
+
+ if (node->getOp() == glslang::EOpArrayLength) {
+ // Quite special; won't want to evaluate the operand.
+
+ // Currently, the front-end does not allow .length() on an array until it is sized,
+ // except for the last block membeor of an SSBO.
+ // TODO: If this changes, link-time sized arrays might show up here, and need their
+ // size extracted.
+
+ // Normal .length() would have been constant folded by the front-end.
+ // So, this has to be block.lastMember.length().
+ // SPV wants "block" and member number as the operands, go get them.
+
+ spv::Id length;
+ if (node->getOperand()->getType().isCoopMat()) {
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ spv::Id typeId = convertGlslangToSpvType(node->getOperand()->getType());
+ assert(builder.isCooperativeMatrixType(typeId));
+
+ length = builder.createCooperativeMatrixLength(typeId);
+ } else {
+ glslang::TIntermTyped* block = node->getOperand()->getAsBinaryNode()->getLeft();
+ block->traverse(this);
+ unsigned int member = node->getOperand()->getAsBinaryNode()->getRight()->getAsConstantUnion()->getConstArray()[0].getUConst();
+ length = builder.createArrayLength(builder.accessChainGetLValue(), member);
+ }
+
+ // GLSL semantics say the result of .length() is an int, while SPIR-V says
+ // signedness must be 0. So, convert from SPIR-V unsigned back to GLSL's
+ // AST expectation of a signed result.
+ if (glslangIntermediate->getSource() == glslang::EShSourceGlsl) {
+ if (builder.isInSpecConstCodeGenMode()) {
+ length = builder.createBinOp(spv::OpIAdd, builder.makeIntType(32), length, builder.makeIntConstant(0));
+ } else {
+ length = builder.createUnaryOp(spv::OpBitcast, builder.makeIntType(32), length);
+ }
+ }
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(length);
+
+ return false;
+ }
+
+ // Start by evaluating the operand
+
+ // Does it need a swizzle inversion? If so, evaluation is inverted;
+ // operate first on the swizzle base, then apply the swizzle.
+ spv::Id invertedType = spv::NoType;
+ auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? invertedType : convertGlslangToSpvType(node->getType()); };
+ if (node->getOp() == glslang::EOpInterpolateAtCentroid)
+ invertedType = getInvertedSwizzleType(*node->getOperand());
+
+ builder.clearAccessChain();
+ if (invertedType != spv::NoType)
+ node->getOperand()->getAsBinaryNode()->getLeft()->traverse(this);
+ else
+ node->getOperand()->traverse(this);
+
+ spv::Id operand = spv::NoResult;
+
+ if (node->getOp() == glslang::EOpAtomicCounterIncrement ||
+ node->getOp() == glslang::EOpAtomicCounterDecrement ||
+ node->getOp() == glslang::EOpAtomicCounter ||
+ node->getOp() == glslang::EOpInterpolateAtCentroid)
+ operand = builder.accessChainGetLValue(); // Special case l-value operands
+ else
+ operand = accessChainLoad(node->getOperand()->getType());
+
+ OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()),
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+
+ // it could be a conversion
+ if (! result)
+ result = createConversion(node->getOp(), decorations, resultType(), operand, node->getOperand()->getBasicType());
+
+ // if not, then possibly an operation
+ if (! result)
+ result = createUnaryOperation(node->getOp(), decorations, resultType(), operand, node->getOperand()->getBasicType());
+
+ if (result) {
+ if (invertedType) {
+ result = createInvertedSwizzle(decorations.precision, *node->getOperand(), result);
+ builder.addDecoration(result, decorations.nonUniform);
+ }
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false; // done with this node
+ }
+
+ // it must be a special case, check...
+ switch (node->getOp()) {
+ case glslang::EOpPostIncrement:
+ case glslang::EOpPostDecrement:
+ case glslang::EOpPreIncrement:
+ case glslang::EOpPreDecrement:
+ {
+ // we need the integer value "1" or the floating point "1.0" to add/subtract
+ spv::Id one = 0;
+ if (node->getBasicType() == glslang::EbtFloat)
+ one = builder.makeFloatConstant(1.0F);
+ else if (node->getBasicType() == glslang::EbtDouble)
+ one = builder.makeDoubleConstant(1.0);
+ else if (node->getBasicType() == glslang::EbtFloat16)
+ one = builder.makeFloat16Constant(1.0F);
+ else if (node->getBasicType() == glslang::EbtInt8 || node->getBasicType() == glslang::EbtUint8)
+ one = builder.makeInt8Constant(1);
+ else if (node->getBasicType() == glslang::EbtInt16 || node->getBasicType() == glslang::EbtUint16)
+ one = builder.makeInt16Constant(1);
+ else if (node->getBasicType() == glslang::EbtInt64 || node->getBasicType() == glslang::EbtUint64)
+ one = builder.makeInt64Constant(1);
+ else
+ one = builder.makeIntConstant(1);
+ glslang::TOperator op;
+ if (node->getOp() == glslang::EOpPreIncrement ||
+ node->getOp() == glslang::EOpPostIncrement)
+ op = glslang::EOpAdd;
+ else
+ op = glslang::EOpSub;
+
+ spv::Id result = createBinaryOperation(op, decorations,
+ convertGlslangToSpvType(node->getType()), operand, one,
+ node->getType().getBasicType());
+ assert(result != spv::NoResult);
+
+ // The result of operation is always stored, but conditionally the
+ // consumed result. The consumed result is always an r-value.
+ builder.accessChainStore(result);
+ builder.clearAccessChain();
+ if (node->getOp() == glslang::EOpPreIncrement ||
+ node->getOp() == glslang::EOpPreDecrement)
+ builder.setAccessChainRValue(result);
+ else
+ builder.setAccessChainRValue(operand);
+ }
+
+ return false;
+
+ case glslang::EOpEmitStreamVertex:
+ builder.createNoResultOp(spv::OpEmitStreamVertex, operand);
+ return false;
+ case glslang::EOpEndStreamPrimitive:
+ builder.createNoResultOp(spv::OpEndStreamPrimitive, operand);
+ return false;
+
+ default:
+ logger->missingFunctionality("unknown glslang unary");
+ return true; // pick up operand as placeholder result
+ }
+}
+
+bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TIntermAggregate* node)
+{
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+
+ spv::Id result = spv::NoResult;
+ spv::Id invertedType = spv::NoType; // to use to override the natural type of the node
+ auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? invertedType : convertGlslangToSpvType(node->getType()); };
+
+ // try texturing
+ result = createImageTextureFunctionCall(node);
+ if (result != spv::NoResult) {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false;
+ } else if (node->getOp() == glslang::EOpImageStore ||
+#ifdef AMD_EXTENSIONS
+ node->getOp() == glslang::EOpImageStoreLod ||
+#endif
+ node->getOp() == glslang::EOpImageAtomicStore) {
+ // "imageStore" is a special case, which has no result
+ return false;
+ }
+
+ glslang::TOperator binOp = glslang::EOpNull;
+ bool reduceComparison = true;
+ bool isMatrix = false;
+ bool noReturnValue = false;
+ bool atomic = false;
+
+ assert(node->getOp());
+
+ spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision());
+
+ switch (node->getOp()) {
+ case glslang::EOpSequence:
+ {
+ if (preVisit)
+ ++sequenceDepth;
+ else
+ --sequenceDepth;
+
+ if (sequenceDepth == 1) {
+ // If this is the parent node of all the functions, we want to see them
+ // early, so all call points have actual SPIR-V functions to reference.
+ // In all cases, still let the traverser visit the children for us.
+ makeFunctions(node->getAsAggregate()->getSequence());
+
+ // Also, we want all globals initializers to go into the beginning of the entry point, before
+ // anything else gets there, so visit out of order, doing them all now.
+ makeGlobalInitializers(node->getAsAggregate()->getSequence());
+
+ // Initializers are done, don't want to visit again, but functions and link objects need to be processed,
+ // so do them manually.
+ visitFunctions(node->getAsAggregate()->getSequence());
+
+ return false;
+ }
+
+ return true;
+ }
+ case glslang::EOpLinkerObjects:
+ {
+ if (visit == glslang::EvPreVisit)
+ linkageOnly = true;
+ else
+ linkageOnly = false;
+
+ return true;
+ }
+ case glslang::EOpComma:
+ {
+ // processing from left to right naturally leaves the right-most
+ // lying around in the access chain
+ glslang::TIntermSequence& glslangOperands = node->getSequence();
+ for (int i = 0; i < (int)glslangOperands.size(); ++i)
+ glslangOperands[i]->traverse(this);
+
+ return false;
+ }
+ case glslang::EOpFunction:
+ if (visit == glslang::EvPreVisit) {
+ if (isShaderEntryPoint(node)) {
+ inEntryPoint = true;
+ builder.setBuildPoint(shaderEntry->getLastBlock());
+ currentFunction = shaderEntry;
+ } else {
+ handleFunctionEntry(node);
+ }
+ } else {
+ if (inEntryPoint)
+ entryPointTerminated = true;
+ builder.leaveFunction();
+ inEntryPoint = false;
+ }
+
+ return true;
+ case glslang::EOpParameters:
+ // Parameters will have been consumed by EOpFunction processing, but not
+ // the body, so we still visited the function node's children, making this
+ // child redundant.
+ return false;
+ case glslang::EOpFunctionCall:
+ {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ if (node->isUserDefined())
+ result = handleUserFunctionCall(node);
+ // assert(result); // this can happen for bad shaders because the call graph completeness checking is not yet done
+ if (result) {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ } else
+ logger->missingFunctionality("missing user function; linker needs to catch that");
+
+ return false;
+ }
+ case glslang::EOpConstructMat2x2:
+ case glslang::EOpConstructMat2x3:
+ case glslang::EOpConstructMat2x4:
+ case glslang::EOpConstructMat3x2:
+ case glslang::EOpConstructMat3x3:
+ case glslang::EOpConstructMat3x4:
+ case glslang::EOpConstructMat4x2:
+ case glslang::EOpConstructMat4x3:
+ case glslang::EOpConstructMat4x4:
+ case glslang::EOpConstructDMat2x2:
+ case glslang::EOpConstructDMat2x3:
+ case glslang::EOpConstructDMat2x4:
+ case glslang::EOpConstructDMat3x2:
+ case glslang::EOpConstructDMat3x3:
+ case glslang::EOpConstructDMat3x4:
+ case glslang::EOpConstructDMat4x2:
+ case glslang::EOpConstructDMat4x3:
+ case glslang::EOpConstructDMat4x4:
+ case glslang::EOpConstructIMat2x2:
+ case glslang::EOpConstructIMat2x3:
+ case glslang::EOpConstructIMat2x4:
+ case glslang::EOpConstructIMat3x2:
+ case glslang::EOpConstructIMat3x3:
+ case glslang::EOpConstructIMat3x4:
+ case glslang::EOpConstructIMat4x2:
+ case glslang::EOpConstructIMat4x3:
+ case glslang::EOpConstructIMat4x4:
+ case glslang::EOpConstructUMat2x2:
+ case glslang::EOpConstructUMat2x3:
+ case glslang::EOpConstructUMat2x4:
+ case glslang::EOpConstructUMat3x2:
+ case glslang::EOpConstructUMat3x3:
+ case glslang::EOpConstructUMat3x4:
+ case glslang::EOpConstructUMat4x2:
+ case glslang::EOpConstructUMat4x3:
+ case glslang::EOpConstructUMat4x4:
+ case glslang::EOpConstructBMat2x2:
+ case glslang::EOpConstructBMat2x3:
+ case glslang::EOpConstructBMat2x4:
+ case glslang::EOpConstructBMat3x2:
+ case glslang::EOpConstructBMat3x3:
+ case glslang::EOpConstructBMat3x4:
+ case glslang::EOpConstructBMat4x2:
+ case glslang::EOpConstructBMat4x3:
+ case glslang::EOpConstructBMat4x4:
+ case glslang::EOpConstructF16Mat2x2:
+ case glslang::EOpConstructF16Mat2x3:
+ case glslang::EOpConstructF16Mat2x4:
+ case glslang::EOpConstructF16Mat3x2:
+ case glslang::EOpConstructF16Mat3x3:
+ case glslang::EOpConstructF16Mat3x4:
+ case glslang::EOpConstructF16Mat4x2:
+ case glslang::EOpConstructF16Mat4x3:
+ case glslang::EOpConstructF16Mat4x4:
+ isMatrix = true;
+ // fall through
+ case glslang::EOpConstructFloat:
+ case glslang::EOpConstructVec2:
+ case glslang::EOpConstructVec3:
+ case glslang::EOpConstructVec4:
+ case glslang::EOpConstructDouble:
+ case glslang::EOpConstructDVec2:
+ case glslang::EOpConstructDVec3:
+ case glslang::EOpConstructDVec4:
+ case glslang::EOpConstructFloat16:
+ case glslang::EOpConstructF16Vec2:
+ case glslang::EOpConstructF16Vec3:
+ case glslang::EOpConstructF16Vec4:
+ case glslang::EOpConstructBool:
+ case glslang::EOpConstructBVec2:
+ case glslang::EOpConstructBVec3:
+ case glslang::EOpConstructBVec4:
+ case glslang::EOpConstructInt8:
+ case glslang::EOpConstructI8Vec2:
+ case glslang::EOpConstructI8Vec3:
+ case glslang::EOpConstructI8Vec4:
+ case glslang::EOpConstructUint8:
+ case glslang::EOpConstructU8Vec2:
+ case glslang::EOpConstructU8Vec3:
+ case glslang::EOpConstructU8Vec4:
+ case glslang::EOpConstructInt16:
+ case glslang::EOpConstructI16Vec2:
+ case glslang::EOpConstructI16Vec3:
+ case glslang::EOpConstructI16Vec4:
+ case glslang::EOpConstructUint16:
+ case glslang::EOpConstructU16Vec2:
+ case glslang::EOpConstructU16Vec3:
+ case glslang::EOpConstructU16Vec4:
+ case glslang::EOpConstructInt:
+ case glslang::EOpConstructIVec2:
+ case glslang::EOpConstructIVec3:
+ case glslang::EOpConstructIVec4:
+ case glslang::EOpConstructUint:
+ case glslang::EOpConstructUVec2:
+ case glslang::EOpConstructUVec3:
+ case glslang::EOpConstructUVec4:
+ case glslang::EOpConstructInt64:
+ case glslang::EOpConstructI64Vec2:
+ case glslang::EOpConstructI64Vec3:
+ case glslang::EOpConstructI64Vec4:
+ case glslang::EOpConstructUint64:
+ case glslang::EOpConstructU64Vec2:
+ case glslang::EOpConstructU64Vec3:
+ case glslang::EOpConstructU64Vec4:
+ case glslang::EOpConstructStruct:
+ case glslang::EOpConstructTextureSampler:
+ case glslang::EOpConstructReference:
+ case glslang::EOpConstructCooperativeMatrix:
+ {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ std::vector<spv::Id> arguments;
+ translateArguments(*node, arguments);
+ spv::Id constructed;
+ if (node->getOp() == glslang::EOpConstructTextureSampler)
+ constructed = builder.createOp(spv::OpSampledImage, resultType(), arguments);
+ else if (node->getOp() == glslang::EOpConstructStruct ||
+ node->getOp() == glslang::EOpConstructCooperativeMatrix ||
+ node->getType().isArray()) {
+ std::vector<spv::Id> constituents;
+ for (int c = 0; c < (int)arguments.size(); ++c)
+ constituents.push_back(arguments[c]);
+ constructed = builder.createCompositeConstruct(resultType(), constituents);
+ } else if (isMatrix)
+ constructed = builder.createMatrixConstructor(precision, arguments, resultType());
+ else
+ constructed = builder.createConstructor(precision, arguments, resultType());
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(constructed);
+
+ return false;
+ }
+
+ // These six are component-wise compares with component-wise results.
+ // Forward on to createBinaryOperation(), requesting a vector result.
+ case glslang::EOpLessThan:
+ case glslang::EOpGreaterThan:
+ case glslang::EOpLessThanEqual:
+ case glslang::EOpGreaterThanEqual:
+ case glslang::EOpVectorEqual:
+ case glslang::EOpVectorNotEqual:
+ {
+ // Map the operation to a binary
+ binOp = node->getOp();
+ reduceComparison = false;
+ switch (node->getOp()) {
+ case glslang::EOpVectorEqual: binOp = glslang::EOpVectorEqual; break;
+ case glslang::EOpVectorNotEqual: binOp = glslang::EOpVectorNotEqual; break;
+ default: binOp = node->getOp(); break;
+ }
+
+ break;
+ }
+ case glslang::EOpMul:
+ // component-wise matrix multiply
+ binOp = glslang::EOpMul;
+ break;
+ case glslang::EOpOuterProduct:
+ // two vectors multiplied to make a matrix
+ binOp = glslang::EOpOuterProduct;
+ break;
+ case glslang::EOpDot:
+ {
+ // for scalar dot product, use multiply
+ glslang::TIntermSequence& glslangOperands = node->getSequence();
+ if (glslangOperands[0]->getAsTyped()->getVectorSize() == 1)
+ binOp = glslang::EOpMul;
+ break;
+ }
+ case glslang::EOpMod:
+ // when an aggregate, this is the floating-point mod built-in function,
+ // which can be emitted by the one in createBinaryOperation()
+ binOp = glslang::EOpMod;
+ break;
+ case glslang::EOpEmitVertex:
+ case glslang::EOpEndPrimitive:
+ case glslang::EOpBarrier:
+ case glslang::EOpMemoryBarrier:
+ case glslang::EOpMemoryBarrierAtomicCounter:
+ case glslang::EOpMemoryBarrierBuffer:
+ case glslang::EOpMemoryBarrierImage:
+ case glslang::EOpMemoryBarrierShared:
+ case glslang::EOpGroupMemoryBarrier:
+ case glslang::EOpDeviceMemoryBarrier:
+ case glslang::EOpAllMemoryBarrierWithGroupSync:
+ case glslang::EOpDeviceMemoryBarrierWithGroupSync:
+ case glslang::EOpWorkgroupMemoryBarrier:
+ case glslang::EOpWorkgroupMemoryBarrierWithGroupSync:
+ case glslang::EOpSubgroupBarrier:
+ case glslang::EOpSubgroupMemoryBarrier:
+ case glslang::EOpSubgroupMemoryBarrierBuffer:
+ case glslang::EOpSubgroupMemoryBarrierImage:
+ case glslang::EOpSubgroupMemoryBarrierShared:
+ noReturnValue = true;
+ // These all have 0 operands and will naturally finish up in the code below for 0 operands
+ break;
+
+ case glslang::EOpAtomicStore:
+ noReturnValue = true;
+ // fallthrough
+ case glslang::EOpAtomicLoad:
+ case glslang::EOpAtomicAdd:
+ case glslang::EOpAtomicMin:
+ case glslang::EOpAtomicMax:
+ case glslang::EOpAtomicAnd:
+ case glslang::EOpAtomicOr:
+ case glslang::EOpAtomicXor:
+ case glslang::EOpAtomicExchange:
+ case glslang::EOpAtomicCompSwap:
+ atomic = true;
+ break;
+
+ case glslang::EOpAtomicCounterAdd:
+ case glslang::EOpAtomicCounterSubtract:
+ case glslang::EOpAtomicCounterMin:
+ case glslang::EOpAtomicCounterMax:
+ case glslang::EOpAtomicCounterAnd:
+ case glslang::EOpAtomicCounterOr:
+ case glslang::EOpAtomicCounterXor:
+ case glslang::EOpAtomicCounterExchange:
+ case glslang::EOpAtomicCounterCompSwap:
+ builder.addExtension("SPV_KHR_shader_atomic_counter_ops");
+ builder.addCapability(spv::CapabilityAtomicStorageOps);
+ atomic = true;
+ break;
+
+#ifdef NV_EXTENSIONS
+ case glslang::EOpIgnoreIntersectionNV:
+ case glslang::EOpTerminateRayNV:
+ case glslang::EOpTraceNV:
+ case glslang::EOpExecuteCallableNV:
+ case glslang::EOpWritePackedPrimitiveIndices4x8NV:
+ noReturnValue = true;
+ break;
+#endif
+ case glslang::EOpCooperativeMatrixLoad:
+ case glslang::EOpCooperativeMatrixStore:
+ noReturnValue = true;
+ break;
+
+ default:
+ break;
+ }
+
+ //
+ // See if it maps to a regular operation.
+ //
+ if (binOp != glslang::EOpNull) {
+ glslang::TIntermTyped* left = node->getSequence()[0]->getAsTyped();
+ glslang::TIntermTyped* right = node->getSequence()[1]->getAsTyped();
+ assert(left && right);
+
+ builder.clearAccessChain();
+ left->traverse(this);
+ spv::Id leftId = accessChainLoad(left->getType());
+
+ builder.clearAccessChain();
+ right->traverse(this);
+ spv::Id rightId = accessChainLoad(right->getType());
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ OpDecorations decorations = { precision,
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ result = createBinaryOperation(binOp, decorations,
+ resultType(), leftId, rightId,
+ left->getType().getBasicType(), reduceComparison);
+
+ // code above should only make binOp that exists in createBinaryOperation
+ assert(result != spv::NoResult);
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+
+ return false;
+ }
+
+ //
+ // Create the list of operands.
+ //
+ glslang::TIntermSequence& glslangOperands = node->getSequence();
+ std::vector<spv::Id> operands;
+ std::vector<spv::IdImmediate> memoryAccessOperands;
+ for (int arg = 0; arg < (int)glslangOperands.size(); ++arg) {
+ // special case l-value operands; there are just a few
+ bool lvalue = false;
+ switch (node->getOp()) {
+ case glslang::EOpFrexp:
+ case glslang::EOpModf:
+ if (arg == 1)
+ lvalue = true;
+ break;
+ case glslang::EOpInterpolateAtSample:
+ case glslang::EOpInterpolateAtOffset:
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpInterpolateAtVertex:
+#endif
+ if (arg == 0) {
+ lvalue = true;
+
+ // Does it need a swizzle inversion? If so, evaluation is inverted;
+ // operate first on the swizzle base, then apply the swizzle.
+ if (glslangOperands[0]->getAsOperator() &&
+ glslangOperands[0]->getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
+ invertedType = convertGlslangToSpvType(glslangOperands[0]->getAsBinaryNode()->getLeft()->getType());
+ }
+ break;
+ case glslang::EOpAtomicAdd:
+ case glslang::EOpAtomicMin:
+ case glslang::EOpAtomicMax:
+ case glslang::EOpAtomicAnd:
+ case glslang::EOpAtomicOr:
+ case glslang::EOpAtomicXor:
+ case glslang::EOpAtomicExchange:
+ case glslang::EOpAtomicCompSwap:
+ case glslang::EOpAtomicLoad:
+ case glslang::EOpAtomicStore:
+ case glslang::EOpAtomicCounterAdd:
+ case glslang::EOpAtomicCounterSubtract:
+ case glslang::EOpAtomicCounterMin:
+ case glslang::EOpAtomicCounterMax:
+ case glslang::EOpAtomicCounterAnd:
+ case glslang::EOpAtomicCounterOr:
+ case glslang::EOpAtomicCounterXor:
+ case glslang::EOpAtomicCounterExchange:
+ case glslang::EOpAtomicCounterCompSwap:
+ if (arg == 0)
+ lvalue = true;
+ break;
+ case glslang::EOpAddCarry:
+ case glslang::EOpSubBorrow:
+ if (arg == 2)
+ lvalue = true;
+ break;
+ case glslang::EOpUMulExtended:
+ case glslang::EOpIMulExtended:
+ if (arg >= 2)
+ lvalue = true;
+ break;
+ case glslang::EOpCooperativeMatrixLoad:
+ if (arg == 0 || arg == 1)
+ lvalue = true;
+ break;
+ case glslang::EOpCooperativeMatrixStore:
+ if (arg == 1)
+ lvalue = true;
+ break;
+ default:
+ break;
+ }
+ builder.clearAccessChain();
+ if (invertedType != spv::NoType && arg == 0)
+ glslangOperands[0]->getAsBinaryNode()->getLeft()->traverse(this);
+ else
+ glslangOperands[arg]->traverse(this);
+
+ if (node->getOp() == glslang::EOpCooperativeMatrixLoad ||
+ node->getOp() == glslang::EOpCooperativeMatrixStore) {
+
+ if (arg == 1) {
+ // fold "element" parameter into the access chain
+ spv::Builder::AccessChain save = builder.getAccessChain();
+ builder.clearAccessChain();
+ glslangOperands[2]->traverse(this);
+
+ spv::Id elementId = accessChainLoad(glslangOperands[2]->getAsTyped()->getType());
+
+ builder.setAccessChain(save);
+
+ // Point to the first element of the array.
+ builder.accessChainPush(elementId, TranslateCoherent(glslangOperands[arg]->getAsTyped()->getType()),
+ glslangOperands[arg]->getAsTyped()->getType().getBufferReferenceAlignment());
+
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
+ unsigned int alignment = builder.getAccessChain().alignment;
+
+ int memoryAccess = TranslateMemoryAccess(coherentFlags);
+ if (node->getOp() == glslang::EOpCooperativeMatrixLoad)
+ memoryAccess &= ~spv::MemoryAccessMakePointerAvailableKHRMask;
+ if (node->getOp() == glslang::EOpCooperativeMatrixStore)
+ memoryAccess &= ~spv::MemoryAccessMakePointerVisibleKHRMask;
+ if (builder.getStorageClass(builder.getAccessChain().base) == spv::StorageClassPhysicalStorageBufferEXT) {
+ memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
+ }
+
+ memoryAccessOperands.push_back(spv::IdImmediate(false, memoryAccess));
+
+ if (memoryAccess & spv::MemoryAccessAlignedMask) {
+ memoryAccessOperands.push_back(spv::IdImmediate(false, alignment));
+ }
+
+ if (memoryAccess & (spv::MemoryAccessMakePointerAvailableKHRMask | spv::MemoryAccessMakePointerVisibleKHRMask)) {
+ memoryAccessOperands.push_back(spv::IdImmediate(true, builder.makeUintConstant(TranslateMemoryScope(coherentFlags))));
+ }
+ } else if (arg == 2) {
+ continue;
+ }
+ }
+
+ if (lvalue)
+ operands.push_back(builder.accessChainGetLValue());
+ else {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ operands.push_back(accessChainLoad(glslangOperands[arg]->getAsTyped()->getType()));
+ }
+ }
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ if (node->getOp() == glslang::EOpCooperativeMatrixLoad) {
+ std::vector<spv::IdImmediate> idImmOps;
+
+ idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf
+ idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride
+ idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor
+ idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end());
+ // get the pointee type
+ spv::Id typeId = builder.getContainedTypeId(builder.getTypeId(operands[0]));
+ assert(builder.isCooperativeMatrixType(typeId));
+ // do the op
+ spv::Id result = builder.createOp(spv::OpCooperativeMatrixLoadNV, typeId, idImmOps);
+ // store the result to the pointer (out param 'm')
+ builder.createStore(result, operands[0]);
+ result = 0;
+ } else if (node->getOp() == glslang::EOpCooperativeMatrixStore) {
+ std::vector<spv::IdImmediate> idImmOps;
+
+ idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf
+ idImmOps.push_back(spv::IdImmediate(true, operands[0])); // object
+ idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride
+ idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor
+ idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end());
+
+ builder.createNoResultOp(spv::OpCooperativeMatrixStoreNV, idImmOps);
+ result = 0;
+ } else if (atomic) {
+ // Handle all atomics
+ result = createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+ } else {
+ // Pass through to generic operations.
+ switch (glslangOperands.size()) {
+ case 0:
+ result = createNoArgOperation(node->getOp(), precision, resultType());
+ break;
+ case 1:
+ {
+ OpDecorations decorations = { precision,
+ TranslateNoContractionDecoration(node->getType().getQualifier()),
+ TranslateNonUniformDecoration(node->getType().getQualifier()) };
+ result = createUnaryOperation(
+ node->getOp(), decorations,
+ resultType(), operands.front(),
+ glslangOperands[0]->getAsTyped()->getBasicType());
+ }
+ break;
+ default:
+ result = createMiscOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+ break;
+ }
+ if (invertedType)
+ result = createInvertedSwizzle(precision, *glslangOperands[0]->getAsBinaryNode(), result);
+ }
+
+ if (noReturnValue)
+ return false;
+
+ if (! result) {
+ logger->missingFunctionality("unknown glslang aggregate");
+ return true; // pick up a child as a placeholder operand
+ } else {
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ return false;
+ }
+}
+
+// This path handles both if-then-else and ?:
+// The if-then-else has a node type of void, while
+// ?: has either a void or a non-void node type
+//
+// Leaving the result, when not void:
+// GLSL only has r-values as the result of a :?, but
+// if we have an l-value, that can be more efficient if it will
+// become the base of a complex r-value expression, because the
+// next layer copies r-values into memory to use the access-chain mechanism
+bool TGlslangToSpvTraverser::visitSelection(glslang::TVisit /* visit */, glslang::TIntermSelection* node)
+{
+ // see if OpSelect can handle it
+ const auto isOpSelectable = [&]() {
+ if (node->getBasicType() == glslang::EbtVoid)
+ return false;
+ // OpSelect can do all other types starting with SPV 1.4
+ if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_4) {
+ // pre-1.4, only scalars and vectors can be handled
+ if ((!node->getType().isScalar() && !node->getType().isVector()))
+ return false;
+ }
+ return true;
+ };
+
+ // See if it simple and safe, or required, to execute both sides.
+ // Crucially, side effects must be either semantically required or avoided,
+ // and there are performance trade-offs.
+ // Return true if required or a good idea (and safe) to execute both sides,
+ // false otherwise.
+ const auto bothSidesPolicy = [&]() -> bool {
+ // do we have both sides?
+ if (node->getTrueBlock() == nullptr ||
+ node->getFalseBlock() == nullptr)
+ return false;
+
+ // required? (unless we write additional code to look for side effects
+ // and make performance trade-offs if none are present)
+ if (!node->getShortCircuit())
+ return true;
+
+ // if not required to execute both, decide based on performance/practicality...
+
+ if (!isOpSelectable())
+ return false;
+
+ assert(node->getType() == node->getTrueBlock() ->getAsTyped()->getType() &&
+ node->getType() == node->getFalseBlock()->getAsTyped()->getType());
+
+ // return true if a single operand to ? : is okay for OpSelect
+ const auto operandOkay = [](glslang::TIntermTyped* node) {
+ return node->getAsSymbolNode() || node->getType().getQualifier().isConstant();
+ };
+
+ return operandOkay(node->getTrueBlock() ->getAsTyped()) &&
+ operandOkay(node->getFalseBlock()->getAsTyped());
+ };
+
+ spv::Id result = spv::NoResult; // upcoming result selecting between trueValue and falseValue
+ // emit the condition before doing anything with selection
+ node->getCondition()->traverse(this);
+ spv::Id condition = accessChainLoad(node->getCondition()->getType());
+
+ // Find a way of executing both sides and selecting the right result.
+ const auto executeBothSides = [&]() -> void {
+ // execute both sides
+ node->getTrueBlock()->traverse(this);
+ spv::Id trueValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType());
+ node->getFalseBlock()->traverse(this);
+ spv::Id falseValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType());
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ // done if void
+ if (node->getBasicType() == glslang::EbtVoid)
+ return;
+
+ // emit code to select between trueValue and falseValue
+
+ // see if OpSelect can handle it
+ if (isOpSelectable()) {
+ // Emit OpSelect for this selection.
+
+ // smear condition to vector, if necessary (AST is always scalar)
+ // Before 1.4, smear like for mix(), starting with 1.4, keep it scalar
+ if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_4 && builder.isVector(trueValue)) {
+ condition = builder.smearScalar(spv::NoPrecision, condition,
+ builder.makeVectorType(builder.makeBoolType(),
+ builder.getNumComponents(trueValue)));
+ }
+
+ // OpSelect
+ result = builder.createTriOp(spv::OpSelect,
+ convertGlslangToSpvType(node->getType()), condition,
+ trueValue, falseValue);
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(result);
+ } else {
+ // We need control flow to select the result.
+ // TODO: Once SPIR-V OpSelect allows arbitrary types, eliminate this path.
+ result = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(node->getType()));
+
+ // Selection control:
+ const spv::SelectionControlMask control = TranslateSelectionControl(*node);
+
+ // make an "if" based on the value created by the condition
+ spv::Builder::If ifBuilder(condition, control, builder);
+
+ // emit the "then" statement
+ builder.createStore(trueValue, result);
+ ifBuilder.makeBeginElse();
+ // emit the "else" statement
+ builder.createStore(falseValue, result);
+
+ // finish off the control flow
+ ifBuilder.makeEndIf();
+
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(result);
+ }
+ };
+
+ // Execute the one side needed, as per the condition
+ const auto executeOneSide = [&]() {
+ // Always emit control flow.
+ if (node->getBasicType() != glslang::EbtVoid)
+ result = builder.createVariable(spv::StorageClassFunction, convertGlslangToSpvType(node->getType()));
+
+ // Selection control:
+ const spv::SelectionControlMask control = TranslateSelectionControl(*node);
+
+ // make an "if" based on the value created by the condition
+ spv::Builder::If ifBuilder(condition, control, builder);
+
+ // emit the "then" statement
+ if (node->getTrueBlock() != nullptr) {
+ node->getTrueBlock()->traverse(this);
+ if (result != spv::NoResult)
+ builder.createStore(accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()), result);
+ }
+
+ if (node->getFalseBlock() != nullptr) {
+ ifBuilder.makeBeginElse();
+ // emit the "else" statement
+ node->getFalseBlock()->traverse(this);
+ if (result != spv::NoResult)
+ builder.createStore(accessChainLoad(node->getFalseBlock()->getAsTyped()->getType()), result);
+ }
+
+ // finish off the control flow
+ ifBuilder.makeEndIf();
+
+ if (result != spv::NoResult) {
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(result);
+ }
+ };
+
+ // Try for OpSelect (or a requirement to execute both sides)
+ if (bothSidesPolicy()) {
+ SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder);
+ if (node->getType().getQualifier().isSpecConstant())
+ spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
+ executeBothSides();
+ } else
+ executeOneSide();
+
+ return false;
+}
+
+bool TGlslangToSpvTraverser::visitSwitch(glslang::TVisit /* visit */, glslang::TIntermSwitch* node)
+{
+ // emit and get the condition before doing anything with switch
+ node->getCondition()->traverse(this);
+ spv::Id selector = accessChainLoad(node->getCondition()->getAsTyped()->getType());
+
+ // Selection control:
+ const spv::SelectionControlMask control = TranslateSwitchControl(*node);
+
+ // browse the children to sort out code segments
+ int defaultSegment = -1;
+ std::vector<TIntermNode*> codeSegments;
+ glslang::TIntermSequence& sequence = node->getBody()->getSequence();
+ std::vector<int> caseValues;
+ std::vector<int> valueIndexToSegment(sequence.size()); // note: probably not all are used, it is an overestimate
+ for (glslang::TIntermSequence::iterator c = sequence.begin(); c != sequence.end(); ++c) {
+ TIntermNode* child = *c;
+ if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpDefault)
+ defaultSegment = (int)codeSegments.size();
+ else if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpCase) {
+ valueIndexToSegment[caseValues.size()] = (int)codeSegments.size();
+ caseValues.push_back(child->getAsBranchNode()->getExpression()->getAsConstantUnion()->getConstArray()[0].getIConst());
+ } else
+ codeSegments.push_back(child);
+ }
+
+ // handle the case where the last code segment is missing, due to no code
+ // statements between the last case and the end of the switch statement
+ if ((caseValues.size() && (int)codeSegments.size() == valueIndexToSegment[caseValues.size() - 1]) ||
+ (int)codeSegments.size() == defaultSegment)
+ codeSegments.push_back(nullptr);
+
+ // make the switch statement
+ std::vector<spv::Block*> segmentBlocks; // returned, as the blocks allocated in the call
+ builder.makeSwitch(selector, control, (int)codeSegments.size(), caseValues, valueIndexToSegment, defaultSegment, segmentBlocks);
+
+ // emit all the code in the segments
+ breakForLoop.push(false);
+ for (unsigned int s = 0; s < codeSegments.size(); ++s) {
+ builder.nextSwitchSegment(segmentBlocks, s);
+ if (codeSegments[s])
+ codeSegments[s]->traverse(this);
+ else
+ builder.addSwitchBreak();
+ }
+ breakForLoop.pop();
+
+ builder.endSwitch(segmentBlocks);
+
+ return false;
+}
+
+void TGlslangToSpvTraverser::visitConstantUnion(glslang::TIntermConstantUnion* node)
+{
+ int nextConst = 0;
+ spv::Id constant = createSpvConstantFromConstUnionArray(node->getType(), node->getConstArray(), nextConst, false);
+
+ builder.clearAccessChain();
+ builder.setAccessChainRValue(constant);
+}
+
+bool TGlslangToSpvTraverser::visitLoop(glslang::TVisit /* visit */, glslang::TIntermLoop* node)
+{
+ auto blocks = builder.makeNewLoop();
+ builder.createBranch(&blocks.head);
+
+ // Loop control:
+ std::vector<unsigned int> operands;
+ const spv::LoopControlMask control = TranslateLoopControl(*node, operands);
+
+ // Spec requires back edges to target header blocks, and every header block
+ // must dominate its merge block. Make a header block first to ensure these
+ // conditions are met. By definition, it will contain OpLoopMerge, followed
+ // by a block-ending branch. But we don't want to put any other body/test
+ // instructions in it, since the body/test may have arbitrary instructions,
+ // including merges of its own.
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ builder.setBuildPoint(&blocks.head);
+ builder.createLoopMerge(&blocks.merge, &blocks.continue_target, control, operands);
+ if (node->testFirst() && node->getTest()) {
+ spv::Block& test = builder.makeNewBlock();
+ builder.createBranch(&test);
+
+ builder.setBuildPoint(&test);
+ node->getTest()->traverse(this);
+ spv::Id condition = accessChainLoad(node->getTest()->getType());
+ builder.createConditionalBranch(condition, &blocks.body, &blocks.merge);
+
+ builder.setBuildPoint(&blocks.body);
+ breakForLoop.push(true);
+ if (node->getBody())
+ node->getBody()->traverse(this);
+ builder.createBranch(&blocks.continue_target);
+ breakForLoop.pop();
+
+ builder.setBuildPoint(&blocks.continue_target);
+ if (node->getTerminal())
+ node->getTerminal()->traverse(this);
+ builder.createBranch(&blocks.head);
+ } else {
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+ builder.createBranch(&blocks.body);
+
+ breakForLoop.push(true);
+ builder.setBuildPoint(&blocks.body);
+ if (node->getBody())
+ node->getBody()->traverse(this);
+ builder.createBranch(&blocks.continue_target);
+ breakForLoop.pop();
+
+ builder.setBuildPoint(&blocks.continue_target);
+ if (node->getTerminal())
+ node->getTerminal()->traverse(this);
+ if (node->getTest()) {
+ node->getTest()->traverse(this);
+ spv::Id condition =
+ accessChainLoad(node->getTest()->getType());
+ builder.createConditionalBranch(condition, &blocks.head, &blocks.merge);
+ } else {
+ // TODO: unless there was a break/return/discard instruction
+ // somewhere in the body, this is an infinite loop, so we should
+ // issue a warning.
+ builder.createBranch(&blocks.head);
+ }
+ }
+ builder.setBuildPoint(&blocks.merge);
+ builder.closeLoop();
+ return false;
+}
+
+bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::TIntermBranch* node)
+{
+ if (node->getExpression())
+ node->getExpression()->traverse(this);
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ switch (node->getFlowOp()) {
+ case glslang::EOpKill:
+ builder.makeDiscard();
+ break;
+ case glslang::EOpBreak:
+ if (breakForLoop.top())
+ builder.createLoopExit();
+ else
+ builder.addSwitchBreak();
+ break;
+ case glslang::EOpContinue:
+ builder.createLoopContinue();
+ break;
+ case glslang::EOpReturn:
+ if (node->getExpression()) {
+ const glslang::TType& glslangReturnType = node->getExpression()->getType();
+ spv::Id returnId = accessChainLoad(glslangReturnType);
+ if (builder.getTypeId(returnId) != currentFunction->getReturnType()) {
+ builder.clearAccessChain();
+ spv::Id copyId = builder.createVariable(spv::StorageClassFunction, currentFunction->getReturnType());
+ builder.setAccessChainLValue(copyId);
+ multiTypeStore(glslangReturnType, returnId);
+ returnId = builder.createLoad(copyId);
+ }
+ builder.makeReturn(false, returnId);
+ } else
+ builder.makeReturn(false);
+
+ builder.clearAccessChain();
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ return false;
+}
+
+spv::Id TGlslangToSpvTraverser::createSpvVariable(const glslang::TIntermSymbol* node)
+{
+ // First, steer off constants, which are not SPIR-V variables, but
+ // can still have a mapping to a SPIR-V Id.
+ // This includes specialization constants.
+ if (node->getQualifier().isConstant()) {
+ spv::Id result = createSpvConstant(*node);
+ if (result != spv::NoResult)
+ return result;
+ }
+
+ // Now, handle actual variables
+ spv::StorageClass storageClass = TranslateStorageClass(node->getType());
+ spv::Id spvType = convertGlslangToSpvType(node->getType());
+
+ const bool contains16BitType = node->getType().containsBasicType(glslang::EbtFloat16) ||
+ node->getType().containsBasicType(glslang::EbtInt16) ||
+ node->getType().containsBasicType(glslang::EbtUint16);
+ if (contains16BitType) {
+ switch (storageClass) {
+ case spv::StorageClassInput:
+ case spv::StorageClassOutput:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ builder.addCapability(spv::CapabilityStorageInputOutput16);
+ break;
+ case spv::StorageClassPushConstant:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ builder.addCapability(spv::CapabilityStoragePushConstant16);
+ break;
+ case spv::StorageClassUniform:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ if (node->getType().getQualifier().storage == glslang::EvqBuffer)
+ builder.addCapability(spv::CapabilityStorageUniformBufferBlock16);
+ else
+ builder.addCapability(spv::CapabilityStorageUniform16);
+ break;
+ case spv::StorageClassStorageBuffer:
+ case spv::StorageClassPhysicalStorageBufferEXT:
+ addPre13Extension(spv::E_SPV_KHR_16bit_storage);
+ builder.addCapability(spv::CapabilityStorageUniformBufferBlock16);
+ break;
+ default:
+ break;
+ }
+ }
+
+ const bool contains8BitType = node->getType().containsBasicType(glslang::EbtInt8) ||
+ node->getType().containsBasicType(glslang::EbtUint8);
+ if (contains8BitType) {
+ if (storageClass == spv::StorageClassPushConstant) {
+ builder.addExtension(spv::E_SPV_KHR_8bit_storage);
+ builder.addCapability(spv::CapabilityStoragePushConstant8);
+ } else if (storageClass == spv::StorageClassUniform) {
+ builder.addExtension(spv::E_SPV_KHR_8bit_storage);
+ builder.addCapability(spv::CapabilityUniformAndStorageBuffer8BitAccess);
+ } else if (storageClass == spv::StorageClassStorageBuffer) {
+ builder.addExtension(spv::E_SPV_KHR_8bit_storage);
+ builder.addCapability(spv::CapabilityStorageBuffer8BitAccess);
+ }
+ }
+
+ const char* name = node->getName().c_str();
+ if (glslang::IsAnonymous(name))
+ name = "";
+
+ return builder.createVariable(storageClass, spvType, name);
+}
+
+// Return type Id of the sampled type.
+spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler)
+{
+ switch (sampler.type) {
+ case glslang::EbtFloat: return builder.makeFloatType(32);
+#ifdef AMD_EXTENSIONS
+ case glslang::EbtFloat16:
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float_fetch);
+ builder.addCapability(spv::CapabilityFloat16ImageAMD);
+ return builder.makeFloatType(16);
+#endif
+ case glslang::EbtInt: return builder.makeIntType(32);
+ case glslang::EbtUint: return builder.makeUintType(32);
+ default:
+ assert(0);
+ return builder.makeFloatType(32);
+ }
+}
+
+// If node is a swizzle operation, return the type that should be used if
+// the swizzle base is first consumed by another operation, before the swizzle
+// is applied.
+spv::Id TGlslangToSpvTraverser::getInvertedSwizzleType(const glslang::TIntermTyped& node)
+{
+ if (node.getAsOperator() &&
+ node.getAsOperator()->getOp() == glslang::EOpVectorSwizzle)
+ return convertGlslangToSpvType(node.getAsBinaryNode()->getLeft()->getType());
+ else
+ return spv::NoType;
+}
+
+// When inverting a swizzle with a parent op, this function
+// will apply the swizzle operation to a completed parent operation.
+spv::Id TGlslangToSpvTraverser::createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped& node, spv::Id parentResult)
+{
+ std::vector<unsigned> swizzle;
+ convertSwizzle(*node.getAsBinaryNode()->getRight()->getAsAggregate(), swizzle);
+ return builder.createRvalueSwizzle(precision, convertGlslangToSpvType(node.getType()), parentResult, swizzle);
+}
+
+// Convert a glslang AST swizzle node to a swizzle vector for building SPIR-V.
+void TGlslangToSpvTraverser::convertSwizzle(const glslang::TIntermAggregate& node, std::vector<unsigned>& swizzle)
+{
+ const glslang::TIntermSequence& swizzleSequence = node.getSequence();
+ for (int i = 0; i < (int)swizzleSequence.size(); ++i)
+ swizzle.push_back(swizzleSequence[i]->getAsConstantUnion()->getConstArray()[0].getIConst());
+}
+
+// Convert from a glslang type to an SPV type, by calling into a
+// recursive version of this function. This establishes the inherited
+// layout state rooted from the top-level type.
+spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly)
+{
+ return convertGlslangToSpvType(type, getExplicitLayout(type), type.getQualifier(), false, forwardReferenceOnly);
+}
+
+// Do full recursive conversion of an arbitrary glslang type to a SPIR-V Id.
+// explicitLayout can be kept the same throughout the hierarchical recursive walk.
+// Mutually recursive with convertGlslangStructToSpvType().
+spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type,
+ glslang::TLayoutPacking explicitLayout, const glslang::TQualifier& qualifier,
+ bool lastBufferBlockMember, bool forwardReferenceOnly)
+{
+ spv::Id spvType = spv::NoResult;
+
+ switch (type.getBasicType()) {
+ case glslang::EbtVoid:
+ spvType = builder.makeVoidType();
+ assert (! type.isArray());
+ break;
+ case glslang::EbtFloat:
+ spvType = builder.makeFloatType(32);
+ break;
+ case glslang::EbtDouble:
+ spvType = builder.makeFloatType(64);
+ break;
+ case glslang::EbtFloat16:
+ spvType = builder.makeFloatType(16);
+ break;
+ case glslang::EbtBool:
+ // "transparent" bool doesn't exist in SPIR-V. The GLSL convention is
+ // a 32-bit int where non-0 means true.
+ if (explicitLayout != glslang::ElpNone)
+ spvType = builder.makeUintType(32);
+ else
+ spvType = builder.makeBoolType();
+ break;
+ case glslang::EbtInt8:
+ spvType = builder.makeIntType(8);
+ break;
+ case glslang::EbtUint8:
+ spvType = builder.makeUintType(8);
+ break;
+ case glslang::EbtInt16:
+ spvType = builder.makeIntType(16);
+ break;
+ case glslang::EbtUint16:
+ spvType = builder.makeUintType(16);
+ break;
+ case glslang::EbtInt:
+ spvType = builder.makeIntType(32);
+ break;
+ case glslang::EbtUint:
+ spvType = builder.makeUintType(32);
+ break;
+ case glslang::EbtInt64:
+ spvType = builder.makeIntType(64);
+ break;
+ case glslang::EbtUint64:
+ spvType = builder.makeUintType(64);
+ break;
+ case glslang::EbtAtomicUint:
+ builder.addCapability(spv::CapabilityAtomicStorage);
+ spvType = builder.makeUintType(32);
+ break;
+#ifdef NV_EXTENSIONS
+ case glslang::EbtAccStructNV:
+ spvType = builder.makeAccelerationStructureNVType();
+ break;
+#endif
+ case glslang::EbtSampler:
+ {
+ const glslang::TSampler& sampler = type.getSampler();
+ if (sampler.sampler) {
+ // pure sampler
+ spvType = builder.makeSamplerType();
+ } else {
+ // an image is present, make its type
+ spvType = builder.makeImageType(getSampledType(sampler), TranslateDimensionality(sampler), sampler.shadow, sampler.arrayed, sampler.ms,
+ sampler.image ? 2 : 1, TranslateImageFormat(type));
+ if (sampler.combined) {
+ // already has both image and sampler, make the combined type
+ spvType = builder.makeSampledImageType(spvType);
+ }
+ }
+ }
+ break;
+ case glslang::EbtStruct:
+ case glslang::EbtBlock:
+ {
+ // If we've seen this struct type, return it
+ const glslang::TTypeList* glslangMembers = type.getStruct();
+
+ // Try to share structs for different layouts, but not yet for other
+ // kinds of qualification (primarily not yet including interpolant qualification).
+ if (! HasNonLayoutQualifiers(type, qualifier))
+ spvType = structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers];
+ if (spvType != spv::NoResult)
+ break;
+
+ // else, we haven't seen it...
+ if (type.getBasicType() == glslang::EbtBlock)
+ memberRemapper[glslangMembers].resize(glslangMembers->size());
+ spvType = convertGlslangStructToSpvType(type, glslangMembers, explicitLayout, qualifier);
+ }
+ break;
+ case glslang::EbtReference:
+ {
+ // Make the forward pointer, then recurse to convert the structure type, then
+ // patch up the forward pointer with a real pointer type.
+ if (forwardPointers.find(type.getReferentType()) == forwardPointers.end()) {
+ spv::Id forwardId = builder.makeForwardPointer(spv::StorageClassPhysicalStorageBufferEXT);
+ forwardPointers[type.getReferentType()] = forwardId;
+ }
+ spvType = forwardPointers[type.getReferentType()];
+ if (!forwardReferenceOnly) {
+ spv::Id referentType = convertGlslangToSpvType(*type.getReferentType());
+ builder.makePointerFromForwardPointer(spv::StorageClassPhysicalStorageBufferEXT,
+ forwardPointers[type.getReferentType()],
+ referentType);
+ }
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (type.isMatrix())
+ spvType = builder.makeMatrixType(spvType, type.getMatrixCols(), type.getMatrixRows());
+ else {
+ // If this variable has a vector element count greater than 1, create a SPIR-V vector
+ if (type.getVectorSize() > 1)
+ spvType = builder.makeVectorType(spvType, type.getVectorSize());
+ }
+
+ if (type.isCoopMat()) {
+ builder.addCapability(spv::CapabilityCooperativeMatrixNV);
+ builder.addExtension(spv::E_SPV_NV_cooperative_matrix);
+ if (type.getBasicType() == glslang::EbtFloat16)
+ builder.addCapability(spv::CapabilityFloat16);
+
+ spv::Id scope = makeArraySizeId(*type.getTypeParameters(), 1);
+ spv::Id rows = makeArraySizeId(*type.getTypeParameters(), 2);
+ spv::Id cols = makeArraySizeId(*type.getTypeParameters(), 3);
+
+ spvType = builder.makeCooperativeMatrixType(spvType, scope, rows, cols);
+ }
+
+ if (type.isArray()) {
+ int stride = 0; // keep this 0 unless doing an explicit layout; 0 will mean no decoration, no stride
+
+ // Do all but the outer dimension
+ if (type.getArraySizes()->getNumDims() > 1) {
+ // We need to decorate array strides for types needing explicit layout, except blocks.
+ if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock) {
+ // Use a dummy glslang type for querying internal strides of
+ // arrays of arrays, but using just a one-dimensional array.
+ glslang::TType simpleArrayType(type, 0); // deference type of the array
+ while (simpleArrayType.getArraySizes()->getNumDims() > 1)
+ simpleArrayType.getArraySizes()->dereference();
+
+ // Will compute the higher-order strides here, rather than making a whole
+ // pile of types and doing repetitive recursion on their contents.
+ stride = getArrayStride(simpleArrayType, explicitLayout, qualifier.layoutMatrix);
+ }
+
+ // make the arrays
+ for (int dim = type.getArraySizes()->getNumDims() - 1; dim > 0; --dim) {
+ spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), dim), stride);
+ if (stride > 0)
+ builder.addDecoration(spvType, spv::DecorationArrayStride, stride);
+ stride *= type.getArraySizes()->getDimSize(dim);
+ }
+ } else {
+ // single-dimensional array, and don't yet have stride
+
+ // We need to decorate array strides for types needing explicit layout, except blocks.
+ if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock)
+ stride = getArrayStride(type, explicitLayout, qualifier.layoutMatrix);
+ }
+
+ // Do the outer dimension, which might not be known for a runtime-sized array.
+ // (Unsized arrays that survive through linking will be runtime-sized arrays)
+ if (type.isSizedArray())
+ spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), 0), stride);
+ else {
+ if (!lastBufferBlockMember) {
+ builder.addExtension("SPV_EXT_descriptor_indexing");
+ builder.addCapability(spv::CapabilityRuntimeDescriptorArrayEXT);
+ }
+ spvType = builder.makeRuntimeArray(spvType);
+ }
+ if (stride > 0)
+ builder.addDecoration(spvType, spv::DecorationArrayStride, stride);
+ }
+
+ return spvType;
+}
+
+// TODO: this functionality should exist at a higher level, in creating the AST
+//
+// Identify interface members that don't have their required extension turned on.
+//
+bool TGlslangToSpvTraverser::filterMember(const glslang::TType& member)
+{
+#ifdef NV_EXTENSIONS
+ auto& extensions = glslangIntermediate->getRequestedExtensions();
+
+ if (member.getFieldName() == "gl_SecondaryViewportMaskNV" &&
+ extensions.find("GL_NV_stereo_view_rendering") == extensions.end())
+ return true;
+ if (member.getFieldName() == "gl_SecondaryPositionNV" &&
+ extensions.find("GL_NV_stereo_view_rendering") == extensions.end())
+ return true;
+
+ if (glslangIntermediate->getStage() != EShLangMeshNV) {
+ if (member.getFieldName() == "gl_ViewportMask" &&
+ extensions.find("GL_NV_viewport_array2") == extensions.end())
+ return true;
+ if (member.getFieldName() == "gl_PositionPerViewNV" &&
+ extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end())
+ return true;
+ if (member.getFieldName() == "gl_ViewportMaskPerViewNV" &&
+ extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end())
+ return true;
+ }
+#endif
+
+ return false;
+};
+
+// Do full recursive conversion of a glslang structure (or block) type to a SPIR-V Id.
+// explicitLayout can be kept the same throughout the hierarchical recursive walk.
+// Mutually recursive with convertGlslangToSpvType().
+spv::Id TGlslangToSpvTraverser::convertGlslangStructToSpvType(const glslang::TType& type,
+ const glslang::TTypeList* glslangMembers,
+ glslang::TLayoutPacking explicitLayout,
+ const glslang::TQualifier& qualifier)
+{
+ // Create a vector of struct types for SPIR-V to consume
+ std::vector<spv::Id> spvMembers;
+ int memberDelta = 0; // how much the member's index changes from glslang to SPIR-V, normally 0, except sometimes for blocks
+ std::vector<std::pair<glslang::TType*, glslang::TQualifier> > deferredForwardPointers;
+ for (int i = 0; i < (int)glslangMembers->size(); i++) {
+ glslang::TType& glslangMember = *(*glslangMembers)[i].type;
+ if (glslangMember.hiddenMember()) {
+ ++memberDelta;
+ if (type.getBasicType() == glslang::EbtBlock)
+ memberRemapper[glslangMembers][i] = -1;
+ } else {
+ if (type.getBasicType() == glslang::EbtBlock) {
+ memberRemapper[glslangMembers][i] = i - memberDelta;
+ if (filterMember(glslangMember))
+ continue;
+ }
+ // modify just this child's view of the qualifier
+ glslang::TQualifier memberQualifier = glslangMember.getQualifier();
+ InheritQualifiers(memberQualifier, qualifier);
+
+ // manually inherit location
+ if (! memberQualifier.hasLocation() && qualifier.hasLocation())
+ memberQualifier.layoutLocation = qualifier.layoutLocation;
+
+ // recurse
+ bool lastBufferBlockMember = qualifier.storage == glslang::EvqBuffer &&
+ i == (int)glslangMembers->size() - 1;
+
+ // Make forward pointers for any pointer members, and create a list of members to
+ // convert to spirv types after creating the struct.
+ if (glslangMember.getBasicType() == glslang::EbtReference) {
+ if (forwardPointers.find(glslangMember.getReferentType()) == forwardPointers.end()) {
+ deferredForwardPointers.push_back(std::make_pair(&glslangMember, memberQualifier));
+ }
+ spvMembers.push_back(
+ convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, true));
+ } else {
+ spvMembers.push_back(
+ convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, false));
+ }
+ }
+ }
+
+ // Make the SPIR-V type
+ spv::Id spvType = builder.makeStructType(spvMembers, type.getTypeName().c_str());
+ if (! HasNonLayoutQualifiers(type, qualifier))
+ structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers] = spvType;
+
+ // Decorate it
+ decorateStructType(type, glslangMembers, explicitLayout, qualifier, spvType);
+
+ for (int i = 0; i < (int)deferredForwardPointers.size(); ++i) {
+ auto it = deferredForwardPointers[i];
+ convertGlslangToSpvType(*it.first, explicitLayout, it.second, false);
+ }
+
+ return spvType;
+}
+
+void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
+ const glslang::TTypeList* glslangMembers,
+ glslang::TLayoutPacking explicitLayout,
+ const glslang::TQualifier& qualifier,
+ spv::Id spvType)
+{
+ // Name and decorate the non-hidden members
+ int offset = -1;
+ int locationOffset = 0; // for use within the members of this struct
+ for (int i = 0; i < (int)glslangMembers->size(); i++) {
+ glslang::TType& glslangMember = *(*glslangMembers)[i].type;
+ int member = i;
+ if (type.getBasicType() == glslang::EbtBlock) {
+ member = memberRemapper[glslangMembers][i];
+ if (filterMember(glslangMember))
+ continue;
+ }
+
+ // modify just this child's view of the qualifier
+ glslang::TQualifier memberQualifier = glslangMember.getQualifier();
+ InheritQualifiers(memberQualifier, qualifier);
+
+ // using -1 above to indicate a hidden member
+ if (member < 0)
+ continue;
+
+ builder.addMemberName(spvType, member, glslangMember.getFieldName().c_str());
+ builder.addMemberDecoration(spvType, member,
+ TranslateLayoutDecoration(glslangMember, memberQualifier.layoutMatrix));
+ builder.addMemberDecoration(spvType, member, TranslatePrecisionDecoration(glslangMember));
+ // Add interpolation and auxiliary storage decorations only to
+ // top-level members of Input and Output storage classes
+ if (type.getQualifier().storage == glslang::EvqVaryingIn ||
+ type.getQualifier().storage == glslang::EvqVaryingOut) {
+ if (type.getBasicType() == glslang::EbtBlock ||
+ glslangIntermediate->getSource() == glslang::EShSourceHlsl) {
+ builder.addMemberDecoration(spvType, member, TranslateInterpolationDecoration(memberQualifier));
+ builder.addMemberDecoration(spvType, member, TranslateAuxiliaryStorageDecoration(memberQualifier));
+#ifdef NV_EXTENSIONS
+ addMeshNVDecoration(spvType, member, memberQualifier);
+#endif
+ }
+ }
+ builder.addMemberDecoration(spvType, member, TranslateInvariantDecoration(memberQualifier));
+
+ if (type.getBasicType() == glslang::EbtBlock &&
+ qualifier.storage == glslang::EvqBuffer) {
+ // Add memory decorations only to top-level members of shader storage block
+ std::vector<spv::Decoration> memory;
+ TranslateMemoryDecoration(memberQualifier, memory, glslangIntermediate->usingVulkanMemoryModel());
+ for (unsigned int i = 0; i < memory.size(); ++i)
+ builder.addMemberDecoration(spvType, member, memory[i]);
+ }
+
+ // Location assignment was already completed correctly by the front end,
+ // just track whether a member needs to be decorated.
+ // Ignore member locations if the container is an array, as that's
+ // ill-specified and decisions have been made to not allow this.
+ if (! type.isArray() && memberQualifier.hasLocation())
+ builder.addMemberDecoration(spvType, member, spv::DecorationLocation, memberQualifier.layoutLocation);
+
+ if (qualifier.hasLocation()) // track for upcoming inheritance
+ locationOffset += glslangIntermediate->computeTypeLocationSize(
+ glslangMember, glslangIntermediate->getStage());
+
+ // component, XFB, others
+ if (glslangMember.getQualifier().hasComponent())
+ builder.addMemberDecoration(spvType, member, spv::DecorationComponent,
+ glslangMember.getQualifier().layoutComponent);
+ if (glslangMember.getQualifier().hasXfbOffset())
+ builder.addMemberDecoration(spvType, member, spv::DecorationOffset,
+ glslangMember.getQualifier().layoutXfbOffset);
+ else if (explicitLayout != glslang::ElpNone) {
+ // figure out what to do with offset, which is accumulating
+ int nextOffset;
+ updateMemberOffset(type, glslangMember, offset, nextOffset, explicitLayout, memberQualifier.layoutMatrix);
+ if (offset >= 0)
+ builder.addMemberDecoration(spvType, member, spv::DecorationOffset, offset);
+ offset = nextOffset;
+ }
+
+ if (glslangMember.isMatrix() && explicitLayout != glslang::ElpNone)
+ builder.addMemberDecoration(spvType, member, spv::DecorationMatrixStride,
+ getMatrixStride(glslangMember, explicitLayout, memberQualifier.layoutMatrix));
+
+ // built-in variable decorations
+ spv::BuiltIn builtIn = TranslateBuiltInDecoration(glslangMember.getQualifier().builtIn, true);
+ if (builtIn != spv::BuiltInMax)
+ builder.addMemberDecoration(spvType, member, spv::DecorationBuiltIn, (int)builtIn);
+
+ // nonuniform
+ builder.addMemberDecoration(spvType, member, TranslateNonUniformDecoration(glslangMember.getQualifier()));
+
+ if (glslangIntermediate->getHlslFunctionality1() && memberQualifier.semanticName != nullptr) {
+ builder.addExtension("SPV_GOOGLE_hlsl_functionality1");
+ builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE,
+ memberQualifier.semanticName);
+ }
+
+#ifdef NV_EXTENSIONS
+ if (builtIn == spv::BuiltInLayer) {
+ // SPV_NV_viewport_array2 extension
+ if (glslangMember.getQualifier().layoutViewportRelative){
+ builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationViewportRelativeNV);
+ builder.addCapability(spv::CapabilityShaderViewportMaskNV);
+ builder.addExtension(spv::E_SPV_NV_viewport_array2);
+ }
+ if (glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset != -2048){
+ builder.addMemberDecoration(spvType, member,
+ (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV,
+ glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ }
+ }
+ if (glslangMember.getQualifier().layoutPassthrough) {
+ builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationPassthroughNV);
+ builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV);
+ builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough);
+ }
+#endif
+ }
+
+ // Decorate the structure
+ builder.addDecoration(spvType, TranslateLayoutDecoration(type, qualifier.layoutMatrix));
+ builder.addDecoration(spvType, TranslateBlockDecoration(type, glslangIntermediate->usingStorageBuffer()));
+}
+
+// Turn the expression forming the array size into an id.
+// This is not quite trivial, because of specialization constants.
+// Sometimes, a raw constant is turned into an Id, and sometimes
+// a specialization constant expression is.
+spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arraySizes, int dim)
+{
+ // First, see if this is sized with a node, meaning a specialization constant:
+ glslang::TIntermTyped* specNode = arraySizes.getDimNode(dim);
+ if (specNode != nullptr) {
+ builder.clearAccessChain();
+ specNode->traverse(this);
+ return accessChainLoad(specNode->getAsTyped()->getType());
+ }
+
+ // Otherwise, need a compile-time (front end) size, get it:
+ int size = arraySizes.getDimSize(dim);
+ assert(size > 0);
+ return builder.makeUintConstant(size);
+}
+
+// Wrap the builder's accessChainLoad to:
+// - localize handling of RelaxedPrecision
+// - use the SPIR-V inferred type instead of another conversion of the glslang type
+// (avoids unnecessary work and possible type punning for structures)
+// - do conversion of concrete to abstract type
+spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type)
+{
+ spv::Id nominalTypeId = builder.accessChainGetInferredType();
+
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
+ coherentFlags |= TranslateCoherent(type);
+
+ unsigned int alignment = builder.getAccessChain().alignment;
+ alignment |= type.getBufferReferenceAlignment();
+
+ spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type),
+ TranslateNonUniformDecoration(type.getQualifier()),
+ nominalTypeId,
+ spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask),
+ TranslateMemoryScope(coherentFlags),
+ alignment);
+
+ // Need to convert to abstract types when necessary
+ if (type.getBasicType() == glslang::EbtBool) {
+ if (builder.isScalarType(nominalTypeId)) {
+ // Conversion for bool
+ spv::Id boolType = builder.makeBoolType();
+ if (nominalTypeId != boolType)
+ loadedId = builder.createBinOp(spv::OpINotEqual, boolType, loadedId, builder.makeUintConstant(0));
+ } else if (builder.isVectorType(nominalTypeId)) {
+ // Conversion for bvec
+ int vecSize = builder.getNumTypeComponents(nominalTypeId);
+ spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize);
+ if (nominalTypeId != bvecType)
+ loadedId = builder.createBinOp(spv::OpINotEqual, bvecType, loadedId, makeSmearedConstant(builder.makeUintConstant(0), vecSize));
+ }
+ }
+
+ return loadedId;
+}
+
+// Wrap the builder's accessChainStore to:
+// - do conversion of concrete to abstract type
+//
+// Implicitly uses the existing builder.accessChain as the storage target.
+void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::Id rvalue)
+{
+ // Need to convert to abstract types when necessary
+ if (type.getBasicType() == glslang::EbtBool) {
+ spv::Id nominalTypeId = builder.accessChainGetInferredType();
+
+ if (builder.isScalarType(nominalTypeId)) {
+ // Conversion for bool
+ spv::Id boolType = builder.makeBoolType();
+ if (nominalTypeId != boolType) {
+ // keep these outside arguments, for determinant order-of-evaluation
+ spv::Id one = builder.makeUintConstant(1);
+ spv::Id zero = builder.makeUintConstant(0);
+ rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero);
+ } else if (builder.getTypeId(rvalue) != boolType)
+ rvalue = builder.createBinOp(spv::OpINotEqual, boolType, rvalue, builder.makeUintConstant(0));
+ } else if (builder.isVectorType(nominalTypeId)) {
+ // Conversion for bvec
+ int vecSize = builder.getNumTypeComponents(nominalTypeId);
+ spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize);
+ if (nominalTypeId != bvecType) {
+ // keep these outside arguments, for determinant order-of-evaluation
+ spv::Id one = makeSmearedConstant(builder.makeUintConstant(1), vecSize);
+ spv::Id zero = makeSmearedConstant(builder.makeUintConstant(0), vecSize);
+ rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero);
+ } else if (builder.getTypeId(rvalue) != bvecType)
+ rvalue = builder.createBinOp(spv::OpINotEqual, bvecType, rvalue,
+ makeSmearedConstant(builder.makeUintConstant(0), vecSize));
+ }
+ }
+
+ spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags;
+ coherentFlags |= TranslateCoherent(type);
+
+ unsigned int alignment = builder.getAccessChain().alignment;
+ alignment |= type.getBufferReferenceAlignment();
+
+ builder.accessChainStore(rvalue,
+ spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerVisibleKHRMask),
+ TranslateMemoryScope(coherentFlags), alignment);
+}
+
+// For storing when types match at the glslang level, but not might match at the
+// SPIR-V level.
+//
+// This especially happens when a single glslang type expands to multiple
+// SPIR-V types, like a struct that is used in a member-undecorated way as well
+// as in a member-decorated way.
+//
+// NOTE: This function can handle any store request; if it's not special it
+// simplifies to a simple OpStore.
+//
+// Implicitly uses the existing builder.accessChain as the storage target.
+void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id rValue)
+{
+ // we only do the complex path here if it's an aggregate
+ if (! type.isStruct() && ! type.isArray()) {
+ accessChainStore(type, rValue);
+ return;
+ }
+
+ // and, it has to be a case of type aliasing
+ spv::Id rType = builder.getTypeId(rValue);
+ spv::Id lValue = builder.accessChainGetLValue();
+ spv::Id lType = builder.getContainedTypeId(builder.getTypeId(lValue));
+ if (lType == rType) {
+ accessChainStore(type, rValue);
+ return;
+ }
+
+ // Recursively (as needed) copy an aggregate type to a different aggregate type,
+ // where the two types were the same type in GLSL. This requires member
+ // by member copy, recursively.
+
+ // SPIR-V 1.4 added an instruction to do help do this.
+ if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) {
+ // However, bool in uniform space is changed to int, so
+ // OpCopyLogical does not work for that.
+ // TODO: It would be more robust to do a full recursive verification of the types satisfying SPIR-V rules.
+ bool rBool = builder.containsType(builder.getTypeId(rValue), spv::OpTypeBool, 0);
+ bool lBool = builder.containsType(lType, spv::OpTypeBool, 0);
+ if (lBool == rBool) {
+ spv::Id logicalCopy = builder.createUnaryOp(spv::OpCopyLogical, lType, rValue);
+ accessChainStore(type, logicalCopy);
+ return;
+ }
+ }
+
+ // If an array, copy element by element.
+ if (type.isArray()) {
+ glslang::TType glslangElementType(type, 0);
+ spv::Id elementRType = builder.getContainedTypeId(rType);
+ for (int index = 0; index < type.getOuterArraySize(); ++index) {
+ // get the source member
+ spv::Id elementRValue = builder.createCompositeExtract(rValue, elementRType, index);
+
+ // set up the target storage
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(lValue);
+ builder.accessChainPush(builder.makeIntConstant(index), TranslateCoherent(type), type.getBufferReferenceAlignment());
+
+ // store the member
+ multiTypeStore(glslangElementType, elementRValue);
+ }
+ } else {
+ assert(type.isStruct());
+
+ // loop over structure members
+ const glslang::TTypeList& members = *type.getStruct();
+ for (int m = 0; m < (int)members.size(); ++m) {
+ const glslang::TType& glslangMemberType = *members[m].type;
+
+ // get the source member
+ spv::Id memberRType = builder.getContainedTypeId(rType, m);
+ spv::Id memberRValue = builder.createCompositeExtract(rValue, memberRType, m);
+
+ // set up the target storage
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(lValue);
+ builder.accessChainPush(builder.makeIntConstant(m), TranslateCoherent(type), type.getBufferReferenceAlignment());
+
+ // store the member
+ multiTypeStore(glslangMemberType, memberRValue);
+ }
+ }
+}
+
+// Decide whether or not this type should be
+// decorated with offsets and strides, and if so
+// whether std140 or std430 rules should be applied.
+glslang::TLayoutPacking TGlslangToSpvTraverser::getExplicitLayout(const glslang::TType& type) const
+{
+ // has to be a block
+ if (type.getBasicType() != glslang::EbtBlock)
+ return glslang::ElpNone;
+
+ // has to be a uniform or buffer block or task in/out blocks
+ if (type.getQualifier().storage != glslang::EvqUniform &&
+ type.getQualifier().storage != glslang::EvqBuffer &&
+ !type.getQualifier().isTaskMemory())
+ return glslang::ElpNone;
+
+ // return the layout to use
+ switch (type.getQualifier().layoutPacking) {
+ case glslang::ElpStd140:
+ case glslang::ElpStd430:
+ case glslang::ElpScalar:
+ return type.getQualifier().layoutPacking;
+ default:
+ return glslang::ElpNone;
+ }
+}
+
+// Given an array type, returns the integer stride required for that array
+int TGlslangToSpvTraverser::getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+ int size;
+ int stride;
+ glslangIntermediate->getMemberAlignment(arrayType, size, stride, explicitLayout, matrixLayout == glslang::ElmRowMajor);
+
+ return stride;
+}
+
+// Given a matrix type, or array (of array) of matrixes type, returns the integer stride required for that matrix
+// when used as a member of an interface block
+int TGlslangToSpvTraverser::getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+ glslang::TType elementType;
+ elementType.shallowCopy(matrixType);
+ elementType.clearArraySizes();
+
+ int size;
+ int stride;
+ glslangIntermediate->getMemberAlignment(elementType, size, stride, explicitLayout, matrixLayout == glslang::ElmRowMajor);
+
+ return stride;
+}
+
+// Given a member type of a struct, realign the current offset for it, and compute
+// the next (not yet aligned) offset for the next member, which will get aligned
+// on the next call.
+// 'currentOffset' should be passed in already initialized, ready to modify, and reflecting
+// the migration of data from nextOffset -> currentOffset. It should be -1 on the first call.
+// -1 means a non-forced member offset (no decoration needed).
+void TGlslangToSpvTraverser::updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, int& currentOffset, int& nextOffset,
+ glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout)
+{
+ // this will get a positive value when deemed necessary
+ nextOffset = -1;
+
+ // override anything in currentOffset with user-set offset
+ if (memberType.getQualifier().hasOffset())
+ currentOffset = memberType.getQualifier().layoutOffset;
+
+ // It could be that current linker usage in glslang updated all the layoutOffset,
+ // in which case the following code does not matter. But, that's not quite right
+ // once cross-compilation unit GLSL validation is done, as the original user
+ // settings are needed in layoutOffset, and then the following will come into play.
+
+ if (explicitLayout == glslang::ElpNone) {
+ if (! memberType.getQualifier().hasOffset())
+ currentOffset = -1;
+
+ return;
+ }
+
+ // Getting this far means we need explicit offsets
+ if (currentOffset < 0)
+ currentOffset = 0;
+
+ // Now, currentOffset is valid (either 0, or from a previous nextOffset),
+ // but possibly not yet correctly aligned.
+
+ int memberSize;
+ int dummyStride;
+ int memberAlignment = glslangIntermediate->getMemberAlignment(memberType, memberSize, dummyStride, explicitLayout, matrixLayout == glslang::ElmRowMajor);
+
+ // Adjust alignment for HLSL rules
+ // TODO: make this consistent in early phases of code:
+ // adjusting this late means inconsistencies with earlier code, which for reflection is an issue
+ // Until reflection is brought in sync with these adjustments, don't apply to $Global,
+ // which is the most likely to rely on reflection, and least likely to rely implicit layouts
+ if (glslangIntermediate->usingHlslOffsets() &&
+ ! memberType.isArray() && memberType.isVector() && structType.getTypeName().compare("$Global") != 0) {
+ int dummySize;
+ int componentAlignment = glslangIntermediate->getBaseAlignmentScalar(memberType, dummySize);
+ if (componentAlignment <= 4)
+ memberAlignment = componentAlignment;
+ }
+
+ // Bump up to member alignment
+ glslang::RoundToPow2(currentOffset, memberAlignment);
+
+ // Bump up to vec4 if there is a bad straddle
+ if (explicitLayout != glslang::ElpScalar && glslangIntermediate->improperStraddle(memberType, memberSize, currentOffset))
+ glslang::RoundToPow2(currentOffset, 16);
+
+ nextOffset = currentOffset + memberSize;
+}
+
+void TGlslangToSpvTraverser::declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember)
+{
+ const glslang::TBuiltInVariable glslangBuiltIn = members[glslangMember].type->getQualifier().builtIn;
+ switch (glslangBuiltIn)
+ {
+ case glslang::EbvClipDistance:
+ case glslang::EbvCullDistance:
+ case glslang::EbvPointSize:
+#ifdef NV_EXTENSIONS
+ case glslang::EbvViewportMaskNV:
+ case glslang::EbvSecondaryPositionNV:
+ case glslang::EbvSecondaryViewportMaskNV:
+ case glslang::EbvPositionPerViewNV:
+ case glslang::EbvViewportMaskPerViewNV:
+ case glslang::EbvTaskCountNV:
+ case glslang::EbvPrimitiveCountNV:
+ case glslang::EbvPrimitiveIndicesNV:
+ case glslang::EbvClipDistancePerViewNV:
+ case glslang::EbvCullDistancePerViewNV:
+ case glslang::EbvLayerPerViewNV:
+ case glslang::EbvMeshViewCountNV:
+ case glslang::EbvMeshViewIndicesNV:
+#endif
+ // Generate the associated capability. Delegate to TranslateBuiltInDecoration.
+ // Alternately, we could just call this for any glslang built-in, since the
+ // capability already guards against duplicates.
+ TranslateBuiltInDecoration(glslangBuiltIn, false);
+ break;
+ default:
+ // Capabilities were already generated when the struct was declared.
+ break;
+ }
+}
+
+bool TGlslangToSpvTraverser::isShaderEntryPoint(const glslang::TIntermAggregate* node)
+{
+ return node->getName().compare(glslangIntermediate->getEntryPointMangledName().c_str()) == 0;
+}
+
+// Does parameter need a place to keep writes, separate from the original?
+// Assumes called after originalParam(), which filters out block/buffer/opaque-based
+// qualifiers such that we should have only in/out/inout/constreadonly here.
+bool TGlslangToSpvTraverser::writableParam(glslang::TStorageQualifier qualifier) const
+{
+ assert(qualifier == glslang::EvqIn ||
+ qualifier == glslang::EvqOut ||
+ qualifier == glslang::EvqInOut ||
+ qualifier == glslang::EvqConstReadOnly);
+ return qualifier != glslang::EvqConstReadOnly;
+}
+
+// Is parameter pass-by-original?
+bool TGlslangToSpvTraverser::originalParam(glslang::TStorageQualifier qualifier, const glslang::TType& paramType,
+ bool implicitThisParam)
+{
+ if (implicitThisParam) // implicit this
+ return true;
+ if (glslangIntermediate->getSource() == glslang::EShSourceHlsl)
+ return paramType.getBasicType() == glslang::EbtBlock;
+ return paramType.containsOpaque() || // sampler, etc.
+ (paramType.getBasicType() == glslang::EbtBlock && qualifier == glslang::EvqBuffer); // SSBO
+}
+
+// Make all the functions, skeletally, without actually visiting their bodies.
+void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions)
+{
+ const auto getParamDecorations = [&](std::vector<spv::Decoration>& decorations, const glslang::TType& type, bool useVulkanMemoryModel) {
+ spv::Decoration paramPrecision = TranslatePrecisionDecoration(type);
+ if (paramPrecision != spv::NoPrecision)
+ decorations.push_back(paramPrecision);
+ TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel);
+ if (type.getBasicType() == glslang::EbtReference) {
+ // Original and non-writable params pass the pointer directly and
+ // use restrict/aliased, others are stored to a pointer in Function
+ // memory and use RestrictPointer/AliasedPointer.
+ if (originalParam(type.getQualifier().storage, type, false) ||
+ !writableParam(type.getQualifier().storage)) {
+ decorations.push_back(type.getQualifier().restrict ? spv::DecorationRestrict : spv::DecorationAliased);
+ } else {
+ decorations.push_back(type.getQualifier().restrict ? spv::DecorationRestrictPointerEXT : spv::DecorationAliasedPointerEXT);
+ }
+ }
+ };
+
+ for (int f = 0; f < (int)glslFunctions.size(); ++f) {
+ glslang::TIntermAggregate* glslFunction = glslFunctions[f]->getAsAggregate();
+ if (! glslFunction || glslFunction->getOp() != glslang::EOpFunction || isShaderEntryPoint(glslFunction))
+ continue;
+
+ // We're on a user function. Set up the basic interface for the function now,
+ // so that it's available to call. Translating the body will happen later.
+ //
+ // Typically (except for a "const in" parameter), an address will be passed to the
+ // function. What it is an address of varies:
+ //
+ // - "in" parameters not marked as "const" can be written to without modifying the calling
+ // argument so that write needs to be to a copy, hence the address of a copy works.
+ //
+ // - "const in" parameters can just be the r-value, as no writes need occur.
+ //
+ // - "out" and "inout" arguments can't be done as pointers to the calling argument, because
+ // GLSL has copy-in/copy-out semantics. They can be handled though with a pointer to a copy.
+
+ std::vector<spv::Id> paramTypes;
+ std::vector<std::vector<spv::Decoration>> paramDecorations; // list of decorations per parameter
+ glslang::TIntermSequence& parameters = glslFunction->getSequence()[0]->getAsAggregate()->getSequence();
+
+ bool implicitThis = (int)parameters.size() > 0 && parameters[0]->getAsSymbolNode()->getName() ==
+ glslangIntermediate->implicitThisName;
+
+ paramDecorations.resize(parameters.size());
+ for (int p = 0; p < (int)parameters.size(); ++p) {
+ const glslang::TType& paramType = parameters[p]->getAsTyped()->getType();
+ spv::Id typeId = convertGlslangToSpvType(paramType);
+ if (originalParam(paramType.getQualifier().storage, paramType, implicitThis && p == 0))
+ typeId = builder.makePointer(TranslateStorageClass(paramType), typeId);
+ else if (writableParam(paramType.getQualifier().storage))
+ typeId = builder.makePointer(spv::StorageClassFunction, typeId);
+ else
+ rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId());
+ getParamDecorations(paramDecorations[p], paramType, glslangIntermediate->usingVulkanMemoryModel());
+ paramTypes.push_back(typeId);
+ }
+
+ spv::Block* functionBlock;
+ spv::Function *function = builder.makeFunctionEntry(TranslatePrecisionDecoration(glslFunction->getType()),
+ convertGlslangToSpvType(glslFunction->getType()),
+ glslFunction->getName().c_str(), paramTypes,
+ paramDecorations, &functionBlock);
+ if (implicitThis)
+ function->setImplicitThis();
+
+ // Track function to emit/call later
+ functionMap[glslFunction->getName().c_str()] = function;
+
+ // Set the parameter id's
+ for (int p = 0; p < (int)parameters.size(); ++p) {
+ symbolValues[parameters[p]->getAsSymbolNode()->getId()] = function->getParamId(p);
+ // give a name too
+ builder.addName(function->getParamId(p), parameters[p]->getAsSymbolNode()->getName().c_str());
+ }
+ }
+}
+
+// Process all the initializers, while skipping the functions and link objects
+void TGlslangToSpvTraverser::makeGlobalInitializers(const glslang::TIntermSequence& initializers)
+{
+ builder.setBuildPoint(shaderEntry->getLastBlock());
+ for (int i = 0; i < (int)initializers.size(); ++i) {
+ glslang::TIntermAggregate* initializer = initializers[i]->getAsAggregate();
+ if (initializer && initializer->getOp() != glslang::EOpFunction && initializer->getOp() != glslang::EOpLinkerObjects) {
+
+ // We're on a top-level node that's not a function. Treat as an initializer, whose
+ // code goes into the beginning of the entry point.
+ initializer->traverse(this);
+ }
+ }
+}
+
+// Process all the functions, while skipping initializers.
+void TGlslangToSpvTraverser::visitFunctions(const glslang::TIntermSequence& glslFunctions)
+{
+ for (int f = 0; f < (int)glslFunctions.size(); ++f) {
+ glslang::TIntermAggregate* node = glslFunctions[f]->getAsAggregate();
+ if (node && (node->getOp() == glslang::EOpFunction || node->getOp() == glslang::EOpLinkerObjects))
+ node->traverse(this);
+ }
+}
+
+void TGlslangToSpvTraverser::handleFunctionEntry(const glslang::TIntermAggregate* node)
+{
+ // SPIR-V functions should already be in the functionMap from the prepass
+ // that called makeFunctions().
+ currentFunction = functionMap[node->getName().c_str()];
+ spv::Block* functionBlock = currentFunction->getEntryBlock();
+ builder.setBuildPoint(functionBlock);
+}
+
+void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments)
+{
+ const glslang::TIntermSequence& glslangArguments = node.getSequence();
+
+ glslang::TSampler sampler = {};
+ bool cubeCompare = false;
+#ifdef AMD_EXTENSIONS
+ bool f16ShadowCompare = false;
+#endif
+ if (node.isTexture() || node.isImage()) {
+ sampler = glslangArguments[0]->getAsTyped()->getType().getSampler();
+ cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow;
+#ifdef AMD_EXTENSIONS
+ f16ShadowCompare = sampler.shadow && glslangArguments[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16;
+#endif
+ }
+
+ for (int i = 0; i < (int)glslangArguments.size(); ++i) {
+ builder.clearAccessChain();
+ glslangArguments[i]->traverse(this);
+
+ // Special case l-value operands
+ bool lvalue = false;
+ switch (node.getOp()) {
+ case glslang::EOpImageAtomicAdd:
+ case glslang::EOpImageAtomicMin:
+ case glslang::EOpImageAtomicMax:
+ case glslang::EOpImageAtomicAnd:
+ case glslang::EOpImageAtomicOr:
+ case glslang::EOpImageAtomicXor:
+ case glslang::EOpImageAtomicExchange:
+ case glslang::EOpImageAtomicCompSwap:
+ case glslang::EOpImageAtomicLoad:
+ case glslang::EOpImageAtomicStore:
+ if (i == 0)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseImageLoad:
+ if ((sampler.ms && i == 3) || (! sampler.ms && i == 2))
+ lvalue = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSparseTexture:
+ if (((cubeCompare || f16ShadowCompare) && i == 3) || (! (cubeCompare || f16ShadowCompare) && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureClamp:
+ if (((cubeCompare || f16ShadowCompare) && i == 4) || (! (cubeCompare || f16ShadowCompare) && i == 3))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureLod:
+ case glslang::EOpSparseTextureOffset:
+ if ((f16ShadowCompare && i == 4) || (! f16ShadowCompare && i == 3))
+ lvalue = true;
+ break;
+#else
+ case glslang::EOpSparseTexture:
+ if ((cubeCompare && i == 3) || (! cubeCompare && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureClamp:
+ if ((cubeCompare && i == 4) || (! cubeCompare && i == 3))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureLod:
+ case glslang::EOpSparseTextureOffset:
+ if (i == 3)
+ lvalue = true;
+ break;
+#endif
+ case glslang::EOpSparseTextureFetch:
+ if ((sampler.dim != glslang::EsdRect && i == 3) || (sampler.dim == glslang::EsdRect && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureFetchOffset:
+ if ((sampler.dim != glslang::EsdRect && i == 4) || (sampler.dim == glslang::EsdRect && i == 3))
+ lvalue = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSparseTextureLodOffset:
+ case glslang::EOpSparseTextureGrad:
+ case glslang::EOpSparseTextureOffsetClamp:
+ if ((f16ShadowCompare && i == 5) || (! f16ShadowCompare && i == 4))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffset:
+ case glslang::EOpSparseTextureGradClamp:
+ if ((f16ShadowCompare && i == 6) || (! f16ShadowCompare && i == 5))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffsetClamp:
+ if ((f16ShadowCompare && i == 7) || (! f16ShadowCompare && i == 6))
+ lvalue = true;
+ break;
+#else
+ case glslang::EOpSparseTextureLodOffset:
+ case glslang::EOpSparseTextureGrad:
+ case glslang::EOpSparseTextureOffsetClamp:
+ if (i == 4)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffset:
+ case glslang::EOpSparseTextureGradClamp:
+ if (i == 5)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGradOffsetClamp:
+ if (i == 6)
+ lvalue = true;
+ break;
+#endif
+ case glslang::EOpSparseTextureGather:
+ if ((sampler.shadow && i == 3) || (! sampler.shadow && i == 2))
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGatherOffset:
+ case glslang::EOpSparseTextureGatherOffsets:
+ if ((sampler.shadow && i == 4) || (! sampler.shadow && i == 3))
+ lvalue = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSparseTextureGatherLod:
+ if (i == 3)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseTextureGatherLodOffset:
+ case glslang::EOpSparseTextureGatherLodOffsets:
+ if (i == 4)
+ lvalue = true;
+ break;
+ case glslang::EOpSparseImageLoadLod:
+ if (i == 3)
+ lvalue = true;
+ break;
+#endif
+#ifdef NV_EXTENSIONS
+ case glslang::EOpImageSampleFootprintNV:
+ if (i == 4)
+ lvalue = true;
+ break;
+ case glslang::EOpImageSampleFootprintClampNV:
+ case glslang::EOpImageSampleFootprintLodNV:
+ if (i == 5)
+ lvalue = true;
+ break;
+ case glslang::EOpImageSampleFootprintGradNV:
+ if (i == 6)
+ lvalue = true;
+ break;
+ case glslang::EOpImageSampleFootprintGradClampNV:
+ if (i == 7)
+ lvalue = true;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ if (lvalue)
+ arguments.push_back(builder.accessChainGetLValue());
+ else
+ arguments.push_back(accessChainLoad(glslangArguments[i]->getAsTyped()->getType()));
+ }
+}
+
+void TGlslangToSpvTraverser::translateArguments(glslang::TIntermUnary& node, std::vector<spv::Id>& arguments)
+{
+ builder.clearAccessChain();
+ node.getOperand()->traverse(this);
+ arguments.push_back(accessChainLoad(node.getOperand()->getType()));
+}
+
+spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermOperator* node)
+{
+ if (! node->isImage() && ! node->isTexture())
+ return spv::NoResult;
+
+ builder.setLine(node->getLoc().line, node->getLoc().getFilename());
+
+ // Process a GLSL texturing op (will be SPV image)
+
+ const glslang::TType &imageType = node->getAsAggregate()
+ ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType()
+ : node->getAsUnaryNode()->getOperand()->getAsTyped()->getType();
+ const glslang::TSampler sampler = imageType.getSampler();
+#ifdef AMD_EXTENSIONS
+ bool f16ShadowCompare = (sampler.shadow && node->getAsAggregate())
+ ? node->getAsAggregate()->getSequence()[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16
+ : false;
+#endif
+
+ const auto signExtensionMask = [&]() {
+ if (builder.getSpvVersion() >= spv::Spv_1_4) {
+ if (sampler.type == glslang::EbtUint)
+ return spv::ImageOperandsZeroExtendMask;
+ else if (sampler.type == glslang::EbtInt)
+ return spv::ImageOperandsSignExtendMask;
+ }
+ return spv::ImageOperandsMaskNone;
+ };
+
+ std::vector<spv::Id> arguments;
+ if (node->getAsAggregate())
+ translateArguments(*node->getAsAggregate(), arguments);
+ else
+ translateArguments(*node->getAsUnaryNode(), arguments);
+ spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision());
+
+ spv::Builder::TextureParameters params = { };
+ params.sampler = arguments[0];
+
+ glslang::TCrackedTextureOp cracked;
+ node->crackTexture(sampler, cracked);
+
+ const bool isUnsignedResult = node->getType().getBasicType() == glslang::EbtUint;
+
+ // Check for queries
+ if (cracked.query) {
+ // OpImageQueryLod works on a sampled image, for other queries the image has to be extracted first
+ if (node->getOp() != glslang::EOpTextureQueryLod && builder.isSampledImage(params.sampler))
+ params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+
+ switch (node->getOp()) {
+ case glslang::EOpImageQuerySize:
+ case glslang::EOpTextureQuerySize:
+ if (arguments.size() > 1) {
+ params.lod = arguments[1];
+ return builder.createTextureQueryCall(spv::OpImageQuerySizeLod, params, isUnsignedResult);
+ } else
+ return builder.createTextureQueryCall(spv::OpImageQuerySize, params, isUnsignedResult);
+ case glslang::EOpImageQuerySamples:
+ case glslang::EOpTextureQuerySamples:
+ return builder.createTextureQueryCall(spv::OpImageQuerySamples, params, isUnsignedResult);
+ case glslang::EOpTextureQueryLod:
+ params.coords = arguments[1];
+ return builder.createTextureQueryCall(spv::OpImageQueryLod, params, isUnsignedResult);
+ case glslang::EOpTextureQueryLevels:
+ return builder.createTextureQueryCall(spv::OpImageQueryLevels, params, isUnsignedResult);
+ case glslang::EOpSparseTexelsResident:
+ return builder.createUnaryOp(spv::OpImageSparseTexelsResident, builder.makeBoolType(), arguments[0]);
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ int components = node->getType().getVectorSize();
+
+ if (node->getOp() == glslang::EOpTextureFetch) {
+ // These must produce 4 components, per SPIR-V spec. We'll add a conversion constructor if needed.
+ // This will only happen through the HLSL path for operator[], so we do not have to handle e.g.
+ // the EOpTexture/Proj/Lod/etc family. It would be harmless to do so, but would need more logic
+ // here around e.g. which ones return scalars or other types.
+ components = 4;
+ }
+
+ glslang::TType returnType(node->getType().getBasicType(), glslang::EvqTemporary, components);
+
+ auto resultType = [&returnType,this]{ return convertGlslangToSpvType(returnType); };
+
+ // Check for image functions other than queries
+ if (node->isImage()) {
+ std::vector<spv::IdImmediate> operands;
+ auto opIt = arguments.begin();
+ spv::IdImmediate image = { true, *(opIt++) };
+ operands.push_back(image);
+
+ // Handle subpass operations
+ // TODO: GLSL should change to have the "MS" only on the type rather than the
+ // built-in function.
+ if (cracked.subpass) {
+ // add on the (0,0) coordinate
+ spv::Id zero = builder.makeIntConstant(0);
+ std::vector<spv::Id> comps;
+ comps.push_back(zero);
+ comps.push_back(zero);
+ spv::IdImmediate coord = { true,
+ builder.makeCompositeConstant(builder.makeVectorType(builder.makeIntType(32), 2), comps) };
+ operands.push_back(coord);
+ spv::IdImmediate imageOperands = { false, spv::ImageOperandsMaskNone };
+ imageOperands.word = imageOperands.word | signExtensionMask();
+ if (sampler.ms) {
+ imageOperands.word = imageOperands.word | spv::ImageOperandsSampleMask;
+ }
+ if (imageOperands.word != spv::ImageOperandsMaskNone) {
+ operands.push_back(imageOperands);
+ if (sampler.ms) {
+ spv::IdImmediate imageOperand = { true, *(opIt++) };
+ operands.push_back(imageOperand);
+ }
+ }
+ spv::Id result = builder.createOp(spv::OpImageRead, resultType(), operands);
+ builder.setPrecision(result, precision);
+ return result;
+ }
+
+ spv::IdImmediate coord = { true, *(opIt++) };
+ operands.push_back(coord);
+#ifdef AMD_EXTENSIONS
+ if (node->getOp() == glslang::EOpImageLoad || node->getOp() == glslang::EOpImageLoadLod) {
+#else
+ if (node->getOp() == glslang::EOpImageLoad) {
+#endif
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (sampler.ms) {
+ mask = mask | spv::ImageOperandsSampleMask;
+ }
+#ifdef AMD_EXTENSIONS
+ if (cracked.lod) {
+ builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
+ builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
+ mask = mask | spv::ImageOperandsLodMask;
+ }
+#endif
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
+ mask = mask | signExtensionMask();
+ if (mask != spv::ImageOperandsMaskNone) {
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
+ operands.push_back(imageOperands);
+ }
+ if (mask & spv::ImageOperandsSampleMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#ifdef AMD_EXTENSIONS
+ if (mask & spv::ImageOperandsLodMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#endif
+ if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
+ spv::IdImmediate imageOperand = { true,
+ builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
+ operands.push_back(imageOperand);
+ }
+
+ if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
+ builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
+
+ std::vector<spv::Id> result(1, builder.createOp(spv::OpImageRead, resultType(), operands));
+ builder.setPrecision(result[0], precision);
+
+ // If needed, add a conversion constructor to the proper size.
+ if (components != node->getType().getVectorSize())
+ result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType()));
+
+ return result[0];
+#ifdef AMD_EXTENSIONS
+ } else if (node->getOp() == glslang::EOpImageStore || node->getOp() == glslang::EOpImageStoreLod) {
+#else
+ } else if (node->getOp() == glslang::EOpImageStore) {
+#endif
+
+ // Push the texel value before the operands
+#ifdef AMD_EXTENSIONS
+ if (sampler.ms || cracked.lod) {
+#else
+ if (sampler.ms) {
+#endif
+ spv::IdImmediate texel = { true, *(opIt + 1) };
+ operands.push_back(texel);
+ } else {
+ spv::IdImmediate texel = { true, *opIt };
+ operands.push_back(texel);
+ }
+
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (sampler.ms) {
+ mask = mask | spv::ImageOperandsSampleMask;
+ }
+#ifdef AMD_EXTENSIONS
+ if (cracked.lod) {
+ builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
+ builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
+ mask = mask | spv::ImageOperandsLodMask;
+ }
+#endif
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelVisibleKHRMask);
+ mask = mask | signExtensionMask();
+ if (mask != spv::ImageOperandsMaskNone) {
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
+ operands.push_back(imageOperands);
+ }
+ if (mask & spv::ImageOperandsSampleMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#ifdef AMD_EXTENSIONS
+ if (mask & spv::ImageOperandsLodMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#endif
+ if (mask & spv::ImageOperandsMakeTexelAvailableKHRMask) {
+ spv::IdImmediate imageOperand = { true,
+ builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
+ operands.push_back(imageOperand);
+ }
+
+ builder.createNoResultOp(spv::OpImageWrite, operands);
+ if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
+ builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat);
+ return spv::NoResult;
+#ifdef AMD_EXTENSIONS
+ } else if (node->getOp() == glslang::EOpSparseImageLoad ||
+ node->getOp() == glslang::EOpSparseImageLoadLod) {
+#else
+ } else if (node->getOp() == glslang::EOpSparseImageLoad) {
+#endif
+ builder.addCapability(spv::CapabilitySparseResidency);
+ if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown)
+ builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat);
+
+ spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone;
+ if (sampler.ms) {
+ mask = mask | spv::ImageOperandsSampleMask;
+ }
+#ifdef AMD_EXTENSIONS
+ if (cracked.lod) {
+ builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod);
+ builder.addCapability(spv::CapabilityImageReadWriteLodAMD);
+
+ mask = mask | spv::ImageOperandsLodMask;
+ }
+#endif
+ mask = mask | TranslateImageOperands(TranslateCoherent(imageType));
+ mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask);
+ mask = mask | signExtensionMask();
+ if (mask != spv::ImageOperandsMaskNone) {
+ spv::IdImmediate imageOperands = { false, (unsigned int)mask };
+ operands.push_back(imageOperands);
+ }
+ if (mask & spv::ImageOperandsSampleMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#ifdef AMD_EXTENSIONS
+ if (mask & spv::ImageOperandsLodMask) {
+ spv::IdImmediate imageOperand = { true, *opIt++ };
+ operands.push_back(imageOperand);
+ }
+#endif
+ if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) {
+ spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) };
+ operands.push_back(imageOperand);
+ }
+
+ // Create the return type that was a special structure
+ spv::Id texelOut = *opIt;
+ spv::Id typeId0 = resultType();
+ spv::Id typeId1 = builder.getDerefTypeId(texelOut);
+ spv::Id resultTypeId = builder.makeStructResultType(typeId0, typeId1);
+
+ spv::Id resultId = builder.createOp(spv::OpImageSparseRead, resultTypeId, operands);
+
+ // Decode the return type
+ builder.createStore(builder.createCompositeExtract(resultId, typeId1, 1), texelOut);
+ return builder.createCompositeExtract(resultId, typeId0, 0);
+ } else {
+ // Process image atomic operations
+
+ // GLSL "IMAGE_PARAMS" will involve in constructing an image texel pointer and this pointer,
+ // as the first source operand, is required by SPIR-V atomic operations.
+ // For non-MS, the sample value should be 0
+ spv::IdImmediate sample = { true, sampler.ms ? *(opIt++) : builder.makeUintConstant(0) };
+ operands.push_back(sample);
+
+ spv::Id resultTypeId;
+ // imageAtomicStore has a void return type so base the pointer type on
+ // the type of the value operand.
+ if (node->getOp() == glslang::EOpImageAtomicStore) {
+ resultTypeId = builder.makePointer(spv::StorageClassImage, builder.getTypeId(operands[2].word));
+ } else {
+ resultTypeId = builder.makePointer(spv::StorageClassImage, resultType());
+ }
+ spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands);
+
+ std::vector<spv::Id> operands;
+ operands.push_back(pointer);
+ for (; opIt != arguments.end(); ++opIt)
+ operands.push_back(*opIt);
+
+ return createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType());
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ // Check for fragment mask functions other than queries
+ if (cracked.fragMask) {
+ assert(sampler.ms);
+
+ auto opIt = arguments.begin();
+ std::vector<spv::Id> operands;
+
+ // Extract the image if necessary
+ if (builder.isSampledImage(params.sampler))
+ params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+
+ operands.push_back(params.sampler);
+ ++opIt;
+
+ if (sampler.isSubpass()) {
+ // add on the (0,0) coordinate
+ spv::Id zero = builder.makeIntConstant(0);
+ std::vector<spv::Id> comps;
+ comps.push_back(zero);
+ comps.push_back(zero);
+ operands.push_back(builder.makeCompositeConstant(builder.makeVectorType(builder.makeIntType(32), 2), comps));
+ }
+
+ for (; opIt != arguments.end(); ++opIt)
+ operands.push_back(*opIt);
+
+ spv::Op fragMaskOp = spv::OpNop;
+ if (node->getOp() == glslang::EOpFragmentMaskFetch)
+ fragMaskOp = spv::OpFragmentMaskFetchAMD;
+ else if (node->getOp() == glslang::EOpFragmentFetch)
+ fragMaskOp = spv::OpFragmentFetchAMD;
+
+ builder.addExtension(spv::E_SPV_AMD_shader_fragment_mask);
+ builder.addCapability(spv::CapabilityFragmentMaskAMD);
+ return builder.createOp(fragMaskOp, resultType(), operands);
+ }
+#endif
+
+ // Check for texture functions other than queries
+ bool sparse = node->isSparseTexture();
+#ifdef NV_EXTENSIONS
+ bool imageFootprint = node->isImageFootprint();
+#endif
+
+ bool cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow;
+
+ // check for bias argument
+ bool bias = false;
+#ifdef AMD_EXTENSIONS
+ if (! cracked.lod && ! cracked.grad && ! cracked.fetch && ! cubeCompare) {
+#else
+ if (! cracked.lod && ! cracked.gather && ! cracked.grad && ! cracked.fetch && ! cubeCompare) {
+#endif
+ int nonBiasArgCount = 2;
+#ifdef AMD_EXTENSIONS
+ if (cracked.gather)
+ ++nonBiasArgCount; // comp argument should be present when bias argument is present
+
+ if (f16ShadowCompare)
+ ++nonBiasArgCount;
+#endif
+ if (cracked.offset)
+ ++nonBiasArgCount;
+#ifdef AMD_EXTENSIONS
+ else if (cracked.offsets)
+ ++nonBiasArgCount;
+#endif
+ if (cracked.grad)
+ nonBiasArgCount += 2;
+ if (cracked.lodClamp)
+ ++nonBiasArgCount;
+ if (sparse)
+ ++nonBiasArgCount;
+#ifdef NV_EXTENSIONS
+ if (imageFootprint)
+ //Following three extra arguments
+ // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint
+ nonBiasArgCount += 3;
+#endif
+ if ((int)arguments.size() > nonBiasArgCount)
+ bias = true;
+ }
+
+ // See if the sampler param should really be just the SPV image part
+ if (cracked.fetch) {
+ // a fetch needs to have the image extracted first
+ if (builder.isSampledImage(params.sampler))
+ params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (cracked.gather) {
+ const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions();
+ if (bias || cracked.lod ||
+ sourceExtensions.find(glslang::E_GL_AMD_texture_gather_bias_lod) != sourceExtensions.end()) {
+ builder.addExtension(spv::E_SPV_AMD_texture_gather_bias_lod);
+ builder.addCapability(spv::CapabilityImageGatherBiasLodAMD);
+ }
+ }
+#endif
+
+ // set the rest of the arguments
+
+ params.coords = arguments[1];
+ int extraArgs = 0;
+ bool noImplicitLod = false;
+
+ // sort out where Dref is coming from
+#ifdef AMD_EXTENSIONS
+ if (cubeCompare || f16ShadowCompare) {
+#else
+ if (cubeCompare) {
+#endif
+ params.Dref = arguments[2];
+ ++extraArgs;
+ } else if (sampler.shadow && cracked.gather) {
+ params.Dref = arguments[2];
+ ++extraArgs;
+ } else if (sampler.shadow) {
+ std::vector<spv::Id> indexes;
+ int dRefComp;
+ if (cracked.proj)
+ dRefComp = 2; // "The resulting 3rd component of P in the shadow forms is used as Dref"
+ else
+ dRefComp = builder.getNumComponents(params.coords) - 1;
+ indexes.push_back(dRefComp);
+ params.Dref = builder.createCompositeExtract(params.coords, builder.getScalarTypeId(builder.getTypeId(params.coords)), indexes);
+ }
+
+ // lod
+ if (cracked.lod) {
+ params.lod = arguments[2 + extraArgs];
+ ++extraArgs;
+ } else if (glslangIntermediate->getStage() != EShLangFragment
+#ifdef NV_EXTENSIONS
+ // NV_compute_shader_derivatives layout qualifiers allow for implicit LODs
+ && !(glslangIntermediate->getStage() == EShLangCompute &&
+ (glslangIntermediate->getLayoutDerivativeModeNone() != glslang::LayoutDerivativeNone))
+#endif
+ ) {
+ // we need to invent the default lod for an explicit lod instruction for a non-fragment stage
+ noImplicitLod = true;
+ }
+
+ // multisample
+ if (sampler.ms) {
+ params.sample = arguments[2 + extraArgs]; // For MS, "sample" should be specified
+ ++extraArgs;
+ }
+
+ // gradient
+ if (cracked.grad) {
+ params.gradX = arguments[2 + extraArgs];
+ params.gradY = arguments[3 + extraArgs];
+ extraArgs += 2;
+ }
+
+ // offset and offsets
+ if (cracked.offset) {
+ params.offset = arguments[2 + extraArgs];
+ ++extraArgs;
+ } else if (cracked.offsets) {
+ params.offsets = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+
+ // lod clamp
+ if (cracked.lodClamp) {
+ params.lodClamp = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+ // sparse
+ if (sparse) {
+ params.texelOut = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+
+ // gather component
+ if (cracked.gather && ! sampler.shadow) {
+ // default component is 0, if missing, otherwise an argument
+ if (2 + extraArgs < (int)arguments.size()) {
+ params.component = arguments[2 + extraArgs];
+ ++extraArgs;
+ } else
+ params.component = builder.makeIntConstant(0);
+ }
+#ifdef NV_EXTENSIONS
+ spv::Id resultStruct = spv::NoResult;
+ if (imageFootprint) {
+ //Following three extra arguments
+ // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint
+ params.granularity = arguments[2 + extraArgs];
+ params.coarse = arguments[3 + extraArgs];
+ resultStruct = arguments[4 + extraArgs];
+ extraArgs += 3;
+ }
+#endif
+ // bias
+ if (bias) {
+ params.bias = arguments[2 + extraArgs];
+ ++extraArgs;
+ }
+
+#ifdef NV_EXTENSIONS
+ if (imageFootprint) {
+ builder.addExtension(spv::E_SPV_NV_shader_image_footprint);
+ builder.addCapability(spv::CapabilityImageFootprintNV);
+
+
+ //resultStructType(OpenGL type) contains 5 elements:
+ //struct gl_TextureFootprint2DNV {
+ // uvec2 anchor;
+ // uvec2 offset;
+ // uvec2 mask;
+ // uint lod;
+ // uint granularity;
+ //};
+ //or
+ //struct gl_TextureFootprint3DNV {
+ // uvec3 anchor;
+ // uvec3 offset;
+ // uvec2 mask;
+ // uint lod;
+ // uint granularity;
+ //};
+ spv::Id resultStructType = builder.getContainedTypeId(builder.getTypeId(resultStruct));
+ assert(builder.isStructType(resultStructType));
+
+ //resType (SPIR-V type) contains 6 elements:
+ //Member 0 must be a Boolean type scalar(LOD),
+ //Member 1 must be a vector of integer type, whose Signedness operand is 0(anchor),
+ //Member 2 must be a vector of integer type, whose Signedness operand is 0(offset),
+ //Member 3 must be a vector of integer type, whose Signedness operand is 0(mask),
+ //Member 4 must be a scalar of integer type, whose Signedness operand is 0(lod),
+ //Member 5 must be a scalar of integer type, whose Signedness operand is 0(granularity).
+ std::vector<spv::Id> members;
+ members.push_back(resultType());
+ for (int i = 0; i < 5; i++) {
+ members.push_back(builder.getContainedTypeId(resultStructType, i));
+ }
+ spv::Id resType = builder.makeStructType(members, "ResType");
+
+ //call ImageFootprintNV
+ spv::Id res = builder.createTextureCall(precision, resType, sparse, cracked.fetch, cracked.proj,
+ cracked.gather, noImplicitLod, params, signExtensionMask());
+
+ //copy resType (SPIR-V type) to resultStructType(OpenGL type)
+ for (int i = 0; i < 5; i++) {
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(resultStruct);
+
+ //Accessing to a struct we created, no coherent flag is set
+ spv::Builder::AccessChain::CoherentFlags flags;
+ flags.clear();
+
+ builder.accessChainPush(builder.makeIntConstant(i), flags, 0);
+ builder.accessChainStore(builder.createCompositeExtract(res, builder.getContainedTypeId(resType, i+1), i+1));
+ }
+ return builder.createCompositeExtract(res, resultType(), 0);
+ }
+#endif
+
+ // projective component (might not to move)
+ // GLSL: "The texture coordinates consumed from P, not including the last component of P,
+ // are divided by the last component of P."
+ // SPIR-V: "... (u [, v] [, w], q)... It may be a vector larger than needed, but all
+ // unused components will appear after all used components."
+ if (cracked.proj) {
+ int projSourceComp = builder.getNumComponents(params.coords) - 1;
+ int projTargetComp;
+ switch (sampler.dim) {
+ case glslang::Esd1D: projTargetComp = 1; break;
+ case glslang::Esd2D: projTargetComp = 2; break;
+ case glslang::EsdRect: projTargetComp = 2; break;
+ default: projTargetComp = projSourceComp; break;
+ }
+ // copy the projective coordinate if we have to
+ if (projTargetComp != projSourceComp) {
+ spv::Id projComp = builder.createCompositeExtract(params.coords,
+ builder.getScalarTypeId(builder.getTypeId(params.coords)),
+ projSourceComp);
+ params.coords = builder.createCompositeInsert(projComp, params.coords,
+ builder.getTypeId(params.coords), projTargetComp);
+ }
+ }
+
+ // nonprivate
+ if (imageType.getQualifier().nonprivate) {
+ params.nonprivate = true;
+ }
+
+ // volatile
+ if (imageType.getQualifier().volatil) {
+ params.volatil = true;
+ }
+
+ std::vector<spv::Id> result( 1,
+ builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather,
+ noImplicitLod, params, signExtensionMask())
+ );
+
+ if (components != node->getType().getVectorSize())
+ result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType()));
+
+ return result[0];
+}
+
+spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAggregate* node)
+{
+ // Grab the function's pointer from the previously created function
+ spv::Function* function = functionMap[node->getName().c_str()];
+ if (! function)
+ return 0;
+
+ const glslang::TIntermSequence& glslangArgs = node->getSequence();
+ const glslang::TQualifierList& qualifiers = node->getQualifierList();
+
+ // See comments in makeFunctions() for details about the semantics for parameter passing.
+ //
+ // These imply we need a four step process:
+ // 1. Evaluate the arguments
+ // 2. Allocate and make copies of in, out, and inout arguments
+ // 3. Make the call
+ // 4. Copy back the results
+
+ // 1. Evaluate the arguments and their types
+ std::vector<spv::Builder::AccessChain> lValues;
+ std::vector<spv::Id> rValues;
+ std::vector<const glslang::TType*> argTypes;
+ for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+ argTypes.push_back(&glslangArgs[a]->getAsTyped()->getType());
+ // build l-value
+ builder.clearAccessChain();
+ glslangArgs[a]->traverse(this);
+ // keep outputs and pass-by-originals as l-values, evaluate others as r-values
+ if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0) ||
+ writableParam(qualifiers[a])) {
+ // save l-value
+ lValues.push_back(builder.getAccessChain());
+ } else {
+ // process r-value
+ rValues.push_back(accessChainLoad(*argTypes.back()));
+ }
+ }
+
+ // 2. Allocate space for anything needing a copy, and if it's "in" or "inout"
+ // copy the original into that space.
+ //
+ // Also, build up the list of actual arguments to pass in for the call
+ int lValueCount = 0;
+ int rValueCount = 0;
+ std::vector<spv::Id> spvArgs;
+ for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+ spv::Id arg;
+ if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0)) {
+ builder.setAccessChain(lValues[lValueCount]);
+ arg = builder.accessChainGetLValue();
+ ++lValueCount;
+ } else if (writableParam(qualifiers[a])) {
+ // need space to hold the copy
+ arg = builder.createVariable(spv::StorageClassFunction, builder.getContainedTypeId(function->getParamType(a)), "param");
+ if (qualifiers[a] == glslang::EvqIn || qualifiers[a] == glslang::EvqInOut) {
+ // need to copy the input into output space
+ builder.setAccessChain(lValues[lValueCount]);
+ spv::Id copy = accessChainLoad(*argTypes[a]);
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(arg);
+ multiTypeStore(*argTypes[a], copy);
+ }
+ ++lValueCount;
+ } else {
+ // process r-value, which involves a copy for a type mismatch
+ if (function->getParamType(a) != convertGlslangToSpvType(*argTypes[a])) {
+ spv::Id argCopy = builder.createVariable(spv::StorageClassFunction, function->getParamType(a), "arg");
+ builder.clearAccessChain();
+ builder.setAccessChainLValue(argCopy);
+ multiTypeStore(*argTypes[a], rValues[rValueCount]);
+ arg = builder.createLoad(argCopy);
+ } else
+ arg = rValues[rValueCount];
+ ++rValueCount;
+ }
+ spvArgs.push_back(arg);
+ }
+
+ // 3. Make the call.
+ spv::Id result = builder.createFunctionCall(function, spvArgs);
+ builder.setPrecision(result, TranslatePrecisionDecoration(node->getType()));
+
+ // 4. Copy back out an "out" arguments.
+ lValueCount = 0;
+ for (int a = 0; a < (int)glslangArgs.size(); ++a) {
+ if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0))
+ ++lValueCount;
+ else if (writableParam(qualifiers[a])) {
+ if (qualifiers[a] == glslang::EvqOut || qualifiers[a] == glslang::EvqInOut) {
+ spv::Id copy = builder.createLoad(spvArgs[a]);
+ builder.setAccessChain(lValues[lValueCount]);
+ multiTypeStore(*argTypes[a], copy);
+ }
+ ++lValueCount;
+ }
+ }
+
+ return result;
+}
+
+// Translate AST operation to SPV operation, already having SPV-based operands/types.
+spv::Id TGlslangToSpvTraverser::createBinaryOperation(glslang::TOperator op, OpDecorations& decorations,
+ spv::Id typeId, spv::Id left, spv::Id right,
+ glslang::TBasicType typeProxy, bool reduceComparison)
+{
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+ bool isBool = typeProxy == glslang::EbtBool;
+
+ spv::Op binOp = spv::OpNop;
+ bool needMatchingVectors = true; // for non-matrix ops, would a scalar need to smear to match a vector?
+ bool comparison = false;
+
+ switch (op) {
+ case glslang::EOpAdd:
+ case glslang::EOpAddAssign:
+ if (isFloat)
+ binOp = spv::OpFAdd;
+ else
+ binOp = spv::OpIAdd;
+ break;
+ case glslang::EOpSub:
+ case glslang::EOpSubAssign:
+ if (isFloat)
+ binOp = spv::OpFSub;
+ else
+ binOp = spv::OpISub;
+ break;
+ case glslang::EOpMul:
+ case glslang::EOpMulAssign:
+ if (isFloat)
+ binOp = spv::OpFMul;
+ else
+ binOp = spv::OpIMul;
+ break;
+ case glslang::EOpVectorTimesScalar:
+ case glslang::EOpVectorTimesScalarAssign:
+ if (isFloat && (builder.isVector(left) || builder.isVector(right))) {
+ if (builder.isVector(right))
+ std::swap(left, right);
+ assert(builder.isScalar(right));
+ needMatchingVectors = false;
+ binOp = spv::OpVectorTimesScalar;
+ } else if (isFloat)
+ binOp = spv::OpFMul;
+ else
+ binOp = spv::OpIMul;
+ break;
+ case glslang::EOpVectorTimesMatrix:
+ case glslang::EOpVectorTimesMatrixAssign:
+ binOp = spv::OpVectorTimesMatrix;
+ break;
+ case glslang::EOpMatrixTimesVector:
+ binOp = spv::OpMatrixTimesVector;
+ break;
+ case glslang::EOpMatrixTimesScalar:
+ case glslang::EOpMatrixTimesScalarAssign:
+ binOp = spv::OpMatrixTimesScalar;
+ break;
+ case glslang::EOpMatrixTimesMatrix:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ binOp = spv::OpMatrixTimesMatrix;
+ break;
+ case glslang::EOpOuterProduct:
+ binOp = spv::OpOuterProduct;
+ needMatchingVectors = false;
+ break;
+
+ case glslang::EOpDiv:
+ case glslang::EOpDivAssign:
+ if (isFloat)
+ binOp = spv::OpFDiv;
+ else if (isUnsigned)
+ binOp = spv::OpUDiv;
+ else
+ binOp = spv::OpSDiv;
+ break;
+ case glslang::EOpMod:
+ case glslang::EOpModAssign:
+ if (isFloat)
+ binOp = spv::OpFMod;
+ else if (isUnsigned)
+ binOp = spv::OpUMod;
+ else
+ binOp = spv::OpSMod;
+ break;
+ case glslang::EOpRightShift:
+ case glslang::EOpRightShiftAssign:
+ if (isUnsigned)
+ binOp = spv::OpShiftRightLogical;
+ else
+ binOp = spv::OpShiftRightArithmetic;
+ break;
+ case glslang::EOpLeftShift:
+ case glslang::EOpLeftShiftAssign:
+ binOp = spv::OpShiftLeftLogical;
+ break;
+ case glslang::EOpAnd:
+ case glslang::EOpAndAssign:
+ binOp = spv::OpBitwiseAnd;
+ break;
+ case glslang::EOpLogicalAnd:
+ needMatchingVectors = false;
+ binOp = spv::OpLogicalAnd;
+ break;
+ case glslang::EOpInclusiveOr:
+ case glslang::EOpInclusiveOrAssign:
+ binOp = spv::OpBitwiseOr;
+ break;
+ case glslang::EOpLogicalOr:
+ needMatchingVectors = false;
+ binOp = spv::OpLogicalOr;
+ break;
+ case glslang::EOpExclusiveOr:
+ case glslang::EOpExclusiveOrAssign:
+ binOp = spv::OpBitwiseXor;
+ break;
+ case glslang::EOpLogicalXor:
+ needMatchingVectors = false;
+ binOp = spv::OpLogicalNotEqual;
+ break;
+
+ case glslang::EOpLessThan:
+ case glslang::EOpGreaterThan:
+ case glslang::EOpLessThanEqual:
+ case glslang::EOpGreaterThanEqual:
+ case glslang::EOpEqual:
+ case glslang::EOpNotEqual:
+ case glslang::EOpVectorEqual:
+ case glslang::EOpVectorNotEqual:
+ comparison = true;
+ break;
+ default:
+ break;
+ }
+
+ // handle mapped binary operations (should be non-comparison)
+ if (binOp != spv::OpNop) {
+ assert(comparison == false);
+ if (builder.isMatrix(left) || builder.isMatrix(right) ||
+ builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right))
+ return createBinaryMatrixOperation(binOp, decorations, typeId, left, right);
+
+ // No matrix involved; make both operands be the same number of components, if needed
+ if (needMatchingVectors)
+ builder.promoteScalar(decorations.precision, left, right);
+
+ spv::Id result = builder.createBinOp(binOp, typeId, left, right);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ return builder.setPrecision(result, decorations.precision);
+ }
+
+ if (! comparison)
+ return 0;
+
+ // Handle comparison instructions
+
+ if (reduceComparison && (op == glslang::EOpEqual || op == glslang::EOpNotEqual)
+ && (builder.isVector(left) || builder.isMatrix(left) || builder.isAggregate(left))) {
+ spv::Id result = builder.createCompositeCompare(decorations.precision, left, right, op == glslang::EOpEqual);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+ }
+
+ switch (op) {
+ case glslang::EOpLessThan:
+ if (isFloat)
+ binOp = spv::OpFOrdLessThan;
+ else if (isUnsigned)
+ binOp = spv::OpULessThan;
+ else
+ binOp = spv::OpSLessThan;
+ break;
+ case glslang::EOpGreaterThan:
+ if (isFloat)
+ binOp = spv::OpFOrdGreaterThan;
+ else if (isUnsigned)
+ binOp = spv::OpUGreaterThan;
+ else
+ binOp = spv::OpSGreaterThan;
+ break;
+ case glslang::EOpLessThanEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdLessThanEqual;
+ else if (isUnsigned)
+ binOp = spv::OpULessThanEqual;
+ else
+ binOp = spv::OpSLessThanEqual;
+ break;
+ case glslang::EOpGreaterThanEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdGreaterThanEqual;
+ else if (isUnsigned)
+ binOp = spv::OpUGreaterThanEqual;
+ else
+ binOp = spv::OpSGreaterThanEqual;
+ break;
+ case glslang::EOpEqual:
+ case glslang::EOpVectorEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdEqual;
+ else if (isBool)
+ binOp = spv::OpLogicalEqual;
+ else
+ binOp = spv::OpIEqual;
+ break;
+ case glslang::EOpNotEqual:
+ case glslang::EOpVectorNotEqual:
+ if (isFloat)
+ binOp = spv::OpFOrdNotEqual;
+ else if (isBool)
+ binOp = spv::OpLogicalNotEqual;
+ else
+ binOp = spv::OpINotEqual;
+ break;
+ default:
+ break;
+ }
+
+ if (binOp != spv::OpNop) {
+ spv::Id result = builder.createBinOp(binOp, typeId, left, right);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ return builder.setPrecision(result, decorations.precision);
+ }
+
+ return 0;
+}
+
+//
+// Translate AST matrix operation to SPV operation, already having SPV-based operands/types.
+// These can be any of:
+//
+// matrix * scalar
+// scalar * matrix
+// matrix * matrix linear algebraic
+// matrix * vector
+// vector * matrix
+// matrix * matrix componentwise
+// matrix op matrix op in {+, -, /}
+// matrix op scalar op in {+, -, /}
+// scalar op matrix op in {+, -, /}
+//
+spv::Id TGlslangToSpvTraverser::createBinaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId,
+ spv::Id left, spv::Id right)
+{
+ bool firstClass = true;
+
+ // First, handle first-class matrix operations (* and matrix/scalar)
+ switch (op) {
+ case spv::OpFDiv:
+ if (builder.isMatrix(left) && builder.isScalar(right)) {
+ // turn matrix / scalar into a multiply...
+ spv::Id resultType = builder.getTypeId(right);
+ right = builder.createBinOp(spv::OpFDiv, resultType, builder.makeFpConstant(resultType, 1.0), right);
+ op = spv::OpMatrixTimesScalar;
+ } else
+ firstClass = false;
+ break;
+ case spv::OpMatrixTimesScalar:
+ if (builder.isMatrix(right) || builder.isCooperativeMatrix(right))
+ std::swap(left, right);
+ assert(builder.isScalar(right));
+ break;
+ case spv::OpVectorTimesMatrix:
+ assert(builder.isVector(left));
+ assert(builder.isMatrix(right));
+ break;
+ case spv::OpMatrixTimesVector:
+ assert(builder.isMatrix(left));
+ assert(builder.isVector(right));
+ break;
+ case spv::OpMatrixTimesMatrix:
+ assert(builder.isMatrix(left));
+ assert(builder.isMatrix(right));
+ break;
+ default:
+ firstClass = false;
+ break;
+ }
+
+ if (builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right))
+ firstClass = true;
+
+ if (firstClass) {
+ spv::Id result = builder.createBinOp(op, typeId, left, right);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ return builder.setPrecision(result, decorations.precision);
+ }
+
+ // Handle component-wise +, -, *, %, and / for all combinations of type.
+ // The result type of all of them is the same type as the (a) matrix operand.
+ // The algorithm is to:
+ // - break the matrix(es) into vectors
+ // - smear any scalar to a vector
+ // - do vector operations
+ // - make a matrix out the vector results
+ switch (op) {
+ case spv::OpFAdd:
+ case spv::OpFSub:
+ case spv::OpFDiv:
+ case spv::OpFMod:
+ case spv::OpFMul:
+ {
+ // one time set up...
+ bool leftMat = builder.isMatrix(left);
+ bool rightMat = builder.isMatrix(right);
+ unsigned int numCols = leftMat ? builder.getNumColumns(left) : builder.getNumColumns(right);
+ int numRows = leftMat ? builder.getNumRows(left) : builder.getNumRows(right);
+ spv::Id scalarType = builder.getScalarTypeId(typeId);
+ spv::Id vecType = builder.makeVectorType(scalarType, numRows);
+ std::vector<spv::Id> results;
+ spv::Id smearVec = spv::NoResult;
+ if (builder.isScalar(left))
+ smearVec = builder.smearScalar(decorations.precision, left, vecType);
+ else if (builder.isScalar(right))
+ smearVec = builder.smearScalar(decorations.precision, right, vecType);
+
+ // do each vector op
+ for (unsigned int c = 0; c < numCols; ++c) {
+ std::vector<unsigned int> indexes;
+ indexes.push_back(c);
+ spv::Id leftVec = leftMat ? builder.createCompositeExtract( left, vecType, indexes) : smearVec;
+ spv::Id rightVec = rightMat ? builder.createCompositeExtract(right, vecType, indexes) : smearVec;
+ spv::Id result = builder.createBinOp(op, vecType, leftVec, rightVec);
+ builder.addDecoration(result, decorations.noContraction);
+ builder.addDecoration(result, decorations.nonUniform);
+ results.push_back(builder.setPrecision(result, decorations.precision));
+ }
+
+ // put the pieces together
+ spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+ }
+ default:
+ assert(0);
+ return spv::NoResult;
+ }
+}
+
+spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, OpDecorations& decorations, spv::Id typeId,
+ spv::Id operand, glslang::TBasicType typeProxy)
+{
+ spv::Op unaryOp = spv::OpNop;
+ int extBuiltins = -1;
+ int libCall = -1;
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+
+ switch (op) {
+ case glslang::EOpNegative:
+ if (isFloat) {
+ unaryOp = spv::OpFNegate;
+ if (builder.isMatrixType(typeId))
+ return createUnaryMatrixOperation(unaryOp, decorations, typeId, operand, typeProxy);
+ } else
+ unaryOp = spv::OpSNegate;
+ break;
+
+ case glslang::EOpLogicalNot:
+ case glslang::EOpVectorLogicalNot:
+ unaryOp = spv::OpLogicalNot;
+ break;
+ case glslang::EOpBitwiseNot:
+ unaryOp = spv::OpNot;
+ break;
+
+ case glslang::EOpDeterminant:
+ libCall = spv::GLSLstd450Determinant;
+ break;
+ case glslang::EOpMatrixInverse:
+ libCall = spv::GLSLstd450MatrixInverse;
+ break;
+ case glslang::EOpTranspose:
+ unaryOp = spv::OpTranspose;
+ break;
+
+ case glslang::EOpRadians:
+ libCall = spv::GLSLstd450Radians;
+ break;
+ case glslang::EOpDegrees:
+ libCall = spv::GLSLstd450Degrees;
+ break;
+ case glslang::EOpSin:
+ libCall = spv::GLSLstd450Sin;
+ break;
+ case glslang::EOpCos:
+ libCall = spv::GLSLstd450Cos;
+ break;
+ case glslang::EOpTan:
+ libCall = spv::GLSLstd450Tan;
+ break;
+ case glslang::EOpAcos:
+ libCall = spv::GLSLstd450Acos;
+ break;
+ case glslang::EOpAsin:
+ libCall = spv::GLSLstd450Asin;
+ break;
+ case glslang::EOpAtan:
+ libCall = spv::GLSLstd450Atan;
+ break;
+
+ case glslang::EOpAcosh:
+ libCall = spv::GLSLstd450Acosh;
+ break;
+ case glslang::EOpAsinh:
+ libCall = spv::GLSLstd450Asinh;
+ break;
+ case glslang::EOpAtanh:
+ libCall = spv::GLSLstd450Atanh;
+ break;
+ case glslang::EOpTanh:
+ libCall = spv::GLSLstd450Tanh;
+ break;
+ case glslang::EOpCosh:
+ libCall = spv::GLSLstd450Cosh;
+ break;
+ case glslang::EOpSinh:
+ libCall = spv::GLSLstd450Sinh;
+ break;
+
+ case glslang::EOpLength:
+ libCall = spv::GLSLstd450Length;
+ break;
+ case glslang::EOpNormalize:
+ libCall = spv::GLSLstd450Normalize;
+ break;
+
+ case glslang::EOpExp:
+ libCall = spv::GLSLstd450Exp;
+ break;
+ case glslang::EOpLog:
+ libCall = spv::GLSLstd450Log;
+ break;
+ case glslang::EOpExp2:
+ libCall = spv::GLSLstd450Exp2;
+ break;
+ case glslang::EOpLog2:
+ libCall = spv::GLSLstd450Log2;
+ break;
+ case glslang::EOpSqrt:
+ libCall = spv::GLSLstd450Sqrt;
+ break;
+ case glslang::EOpInverseSqrt:
+ libCall = spv::GLSLstd450InverseSqrt;
+ break;
+
+ case glslang::EOpFloor:
+ libCall = spv::GLSLstd450Floor;
+ break;
+ case glslang::EOpTrunc:
+ libCall = spv::GLSLstd450Trunc;
+ break;
+ case glslang::EOpRound:
+ libCall = spv::GLSLstd450Round;
+ break;
+ case glslang::EOpRoundEven:
+ libCall = spv::GLSLstd450RoundEven;
+ break;
+ case glslang::EOpCeil:
+ libCall = spv::GLSLstd450Ceil;
+ break;
+ case glslang::EOpFract:
+ libCall = spv::GLSLstd450Fract;
+ break;
+
+ case glslang::EOpIsNan:
+ unaryOp = spv::OpIsNan;
+ break;
+ case glslang::EOpIsInf:
+ unaryOp = spv::OpIsInf;
+ break;
+ case glslang::EOpIsFinite:
+ unaryOp = spv::OpIsFinite;
+ break;
+
+ case glslang::EOpFloatBitsToInt:
+ case glslang::EOpFloatBitsToUint:
+ case glslang::EOpIntBitsToFloat:
+ case glslang::EOpUintBitsToFloat:
+ case glslang::EOpDoubleBitsToInt64:
+ case glslang::EOpDoubleBitsToUint64:
+ case glslang::EOpInt64BitsToDouble:
+ case glslang::EOpUint64BitsToDouble:
+ case glslang::EOpFloat16BitsToInt16:
+ case glslang::EOpFloat16BitsToUint16:
+ case glslang::EOpInt16BitsToFloat16:
+ case glslang::EOpUint16BitsToFloat16:
+ unaryOp = spv::OpBitcast;
+ break;
+
+ case glslang::EOpPackSnorm2x16:
+ libCall = spv::GLSLstd450PackSnorm2x16;
+ break;
+ case glslang::EOpUnpackSnorm2x16:
+ libCall = spv::GLSLstd450UnpackSnorm2x16;
+ break;
+ case glslang::EOpPackUnorm2x16:
+ libCall = spv::GLSLstd450PackUnorm2x16;
+ break;
+ case glslang::EOpUnpackUnorm2x16:
+ libCall = spv::GLSLstd450UnpackUnorm2x16;
+ break;
+ case glslang::EOpPackHalf2x16:
+ libCall = spv::GLSLstd450PackHalf2x16;
+ break;
+ case glslang::EOpUnpackHalf2x16:
+ libCall = spv::GLSLstd450UnpackHalf2x16;
+ break;
+ case glslang::EOpPackSnorm4x8:
+ libCall = spv::GLSLstd450PackSnorm4x8;
+ break;
+ case glslang::EOpUnpackSnorm4x8:
+ libCall = spv::GLSLstd450UnpackSnorm4x8;
+ break;
+ case glslang::EOpPackUnorm4x8:
+ libCall = spv::GLSLstd450PackUnorm4x8;
+ break;
+ case glslang::EOpUnpackUnorm4x8:
+ libCall = spv::GLSLstd450UnpackUnorm4x8;
+ break;
+ case glslang::EOpPackDouble2x32:
+ libCall = spv::GLSLstd450PackDouble2x32;
+ break;
+ case glslang::EOpUnpackDouble2x32:
+ libCall = spv::GLSLstd450UnpackDouble2x32;
+ break;
+
+ case glslang::EOpPackInt2x32:
+ case glslang::EOpUnpackInt2x32:
+ case glslang::EOpPackUint2x32:
+ case glslang::EOpUnpackUint2x32:
+ case glslang::EOpPack16:
+ case glslang::EOpPack32:
+ case glslang::EOpPack64:
+ case glslang::EOpUnpack32:
+ case glslang::EOpUnpack16:
+ case glslang::EOpUnpack8:
+ case glslang::EOpPackInt2x16:
+ case glslang::EOpUnpackInt2x16:
+ case glslang::EOpPackUint2x16:
+ case glslang::EOpUnpackUint2x16:
+ case glslang::EOpPackInt4x16:
+ case glslang::EOpUnpackInt4x16:
+ case glslang::EOpPackUint4x16:
+ case glslang::EOpUnpackUint4x16:
+ case glslang::EOpPackFloat2x16:
+ case glslang::EOpUnpackFloat2x16:
+ unaryOp = spv::OpBitcast;
+ break;
+
+ case glslang::EOpDPdx:
+ unaryOp = spv::OpDPdx;
+ break;
+ case glslang::EOpDPdy:
+ unaryOp = spv::OpDPdy;
+ break;
+ case glslang::EOpFwidth:
+ unaryOp = spv::OpFwidth;
+ break;
+ case glslang::EOpDPdxFine:
+ unaryOp = spv::OpDPdxFine;
+ break;
+ case glslang::EOpDPdyFine:
+ unaryOp = spv::OpDPdyFine;
+ break;
+ case glslang::EOpFwidthFine:
+ unaryOp = spv::OpFwidthFine;
+ break;
+ case glslang::EOpDPdxCoarse:
+ unaryOp = spv::OpDPdxCoarse;
+ break;
+ case glslang::EOpDPdyCoarse:
+ unaryOp = spv::OpDPdyCoarse;
+ break;
+ case glslang::EOpFwidthCoarse:
+ unaryOp = spv::OpFwidthCoarse;
+ break;
+ case glslang::EOpInterpolateAtCentroid:
+#ifdef AMD_EXTENSIONS
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+#endif
+ libCall = spv::GLSLstd450InterpolateAtCentroid;
+ break;
+ case glslang::EOpAny:
+ unaryOp = spv::OpAny;
+ break;
+ case glslang::EOpAll:
+ unaryOp = spv::OpAll;
+ break;
+
+ case glslang::EOpAbs:
+ if (isFloat)
+ libCall = spv::GLSLstd450FAbs;
+ else
+ libCall = spv::GLSLstd450SAbs;
+ break;
+ case glslang::EOpSign:
+ if (isFloat)
+ libCall = spv::GLSLstd450FSign;
+ else
+ libCall = spv::GLSLstd450SSign;
+ break;
+
+ case glslang::EOpAtomicCounterIncrement:
+ case glslang::EOpAtomicCounterDecrement:
+ case glslang::EOpAtomicCounter:
+ {
+ // Handle all of the atomics in one place, in createAtomicOperation()
+ std::vector<spv::Id> operands;
+ operands.push_back(operand);
+ return createAtomicOperation(op, decorations.precision, typeId, operands, typeProxy);
+ }
+
+ case glslang::EOpBitFieldReverse:
+ unaryOp = spv::OpBitReverse;
+ break;
+ case glslang::EOpBitCount:
+ unaryOp = spv::OpBitCount;
+ break;
+ case glslang::EOpFindLSB:
+ libCall = spv::GLSLstd450FindILsb;
+ break;
+ case glslang::EOpFindMSB:
+ if (isUnsigned)
+ libCall = spv::GLSLstd450FindUMsb;
+ else
+ libCall = spv::GLSLstd450FindSMsb;
+ break;
+
+ case glslang::EOpBallot:
+ case glslang::EOpReadFirstInvocation:
+ case glslang::EOpAnyInvocation:
+ case glslang::EOpAllInvocations:
+ case glslang::EOpAllInvocationsEqual:
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpMinInvocations:
+ case glslang::EOpMaxInvocations:
+ case glslang::EOpAddInvocations:
+ case glslang::EOpMinInvocationsNonUniform:
+ case glslang::EOpMaxInvocationsNonUniform:
+ case glslang::EOpAddInvocationsNonUniform:
+ case glslang::EOpMinInvocationsInclusiveScan:
+ case glslang::EOpMaxInvocationsInclusiveScan:
+ case glslang::EOpAddInvocationsInclusiveScan:
+ case glslang::EOpMinInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsInclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMinInvocationsExclusiveScan:
+ case glslang::EOpMaxInvocationsExclusiveScan:
+ case glslang::EOpAddInvocationsExclusiveScan:
+ case glslang::EOpMinInvocationsExclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsExclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsExclusiveScanNonUniform:
+#endif
+ {
+ std::vector<spv::Id> operands;
+ operands.push_back(operand);
+ return createInvocationsOperation(op, typeId, operands, typeProxy);
+ }
+ case glslang::EOpSubgroupAll:
+ case glslang::EOpSubgroupAny:
+ case glslang::EOpSubgroupAllEqual:
+ case glslang::EOpSubgroupBroadcastFirst:
+ case glslang::EOpSubgroupBallot:
+ case glslang::EOpSubgroupInverseBallot:
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupBallotExclusiveBitCount:
+ case glslang::EOpSubgroupBallotFindLSB:
+ case glslang::EOpSubgroupBallotFindMSB:
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupXor:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupInclusiveXor:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupExclusiveXor:
+ case glslang::EOpSubgroupQuadSwapHorizontal:
+ case glslang::EOpSubgroupQuadSwapVertical:
+ case glslang::EOpSubgroupQuadSwapDiagonal: {
+ std::vector<spv::Id> operands;
+ operands.push_back(operand);
+ return createSubgroupOperation(op, typeId, operands, typeProxy);
+ }
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpMbcnt:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::MbcntAMD;
+ break;
+
+ case glslang::EOpCubeFaceIndex:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader);
+ libCall = spv::CubeFaceIndexAMD;
+ break;
+
+ case glslang::EOpCubeFaceCoord:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader);
+ libCall = spv::CubeFaceCoordAMD;
+ break;
+#endif
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartition:
+ unaryOp = spv::OpGroupNonUniformPartitionNV;
+ break;
+#endif
+ case glslang::EOpConstructReference:
+ unaryOp = spv::OpBitcast;
+ break;
+
+ case glslang::EOpCopyObject:
+ unaryOp = spv::OpCopyObject;
+ break;
+
+ default:
+ return 0;
+ }
+
+ spv::Id id;
+ if (libCall >= 0) {
+ std::vector<spv::Id> args;
+ args.push_back(operand);
+ id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, args);
+ } else {
+ id = builder.createUnaryOp(unaryOp, typeId, operand);
+ }
+
+ builder.addDecoration(id, decorations.noContraction);
+ builder.addDecoration(id, decorations.nonUniform);
+ return builder.setPrecision(id, decorations.precision);
+}
+
+// Create a unary operation on a matrix
+spv::Id TGlslangToSpvTraverser::createUnaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId,
+ spv::Id operand, glslang::TBasicType /* typeProxy */)
+{
+ // Handle unary operations vector by vector.
+ // The result type is the same type as the original type.
+ // The algorithm is to:
+ // - break the matrix into vectors
+ // - apply the operation to each vector
+ // - make a matrix out the vector results
+
+ // get the types sorted out
+ int numCols = builder.getNumColumns(operand);
+ int numRows = builder.getNumRows(operand);
+ spv::Id srcVecType = builder.makeVectorType(builder.getScalarTypeId(builder.getTypeId(operand)), numRows);
+ spv::Id destVecType = builder.makeVectorType(builder.getScalarTypeId(typeId), numRows);
+ std::vector<spv::Id> results;
+
+ // do each vector op
+ for (int c = 0; c < numCols; ++c) {
+ std::vector<unsigned int> indexes;
+ indexes.push_back(c);
+ spv::Id srcVec = builder.createCompositeExtract(operand, srcVecType, indexes);
+ spv::Id destVec = builder.createUnaryOp(op, destVecType, srcVec);
+ builder.addDecoration(destVec, decorations.noContraction);
+ builder.addDecoration(destVec, decorations.nonUniform);
+ results.push_back(builder.setPrecision(destVec, decorations.precision));
+ }
+
+ // put the pieces together
+ spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+}
+
+// For converting integers where both the bitwidth and the signedness could
+// change, but only do the width change here. The caller is still responsible
+// for the signedness conversion.
+spv::Id TGlslangToSpvTraverser::createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize)
+{
+ // Get the result type width, based on the type to convert to.
+ int width = 32;
+ switch(op) {
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvUint16ToInt8:
+ case glslang::EOpConvUintToInt8:
+ case glslang::EOpConvUint64ToInt8:
+ width = 8;
+ break;
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvUint8ToInt16:
+ case glslang::EOpConvUintToInt16:
+ case glslang::EOpConvUint64ToInt16:
+ width = 16;
+ break;
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt64ToUint:
+ case glslang::EOpConvUint8ToInt:
+ case glslang::EOpConvUint16ToInt:
+ case glslang::EOpConvUint64ToInt:
+ width = 32;
+ break;
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvUint8ToInt64:
+ case glslang::EOpConvUint16ToInt64:
+ case glslang::EOpConvUintToInt64:
+ width = 64;
+ break;
+
+ default:
+ assert(false && "Default missing");
+ break;
+ }
+
+ // Get the conversion operation and result type,
+ // based on the target width, but the source type.
+ spv::Id type = spv::NoType;
+ spv::Op convOp = spv::OpNop;
+ switch(op) {
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvInt64ToUint:
+ convOp = spv::OpSConvert;
+ type = builder.makeIntType(width);
+ break;
+ default:
+ convOp = spv::OpUConvert;
+ type = builder.makeUintType(width);
+ break;
+ }
+
+ if (vectorSize > 0)
+ type = builder.makeVectorType(type, vectorSize);
+
+ return builder.createUnaryOp(convOp, type, operand);
+}
+
+spv::Id TGlslangToSpvTraverser::createConversion(glslang::TOperator op, OpDecorations& decorations, spv::Id destType,
+ spv::Id operand, glslang::TBasicType typeProxy)
+{
+ spv::Op convOp = spv::OpNop;
+ spv::Id zero = 0;
+ spv::Id one = 0;
+
+ int vectorSize = builder.isVectorType(destType) ? builder.getNumTypeComponents(destType) : 0;
+
+ switch (op) {
+ case glslang::EOpConvInt8ToBool:
+ case glslang::EOpConvUint8ToBool:
+ zero = builder.makeUint8Constant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+ case glslang::EOpConvInt16ToBool:
+ case glslang::EOpConvUint16ToBool:
+ zero = builder.makeUint16Constant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+ case glslang::EOpConvIntToBool:
+ case glslang::EOpConvUintToBool:
+ zero = builder.makeUintConstant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+ case glslang::EOpConvInt64ToBool:
+ case glslang::EOpConvUint64ToBool:
+ zero = builder.makeUint64Constant(0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpINotEqual, destType, operand, zero);
+
+ case glslang::EOpConvFloatToBool:
+ zero = builder.makeFloatConstant(0.0F);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+ case glslang::EOpConvDoubleToBool:
+ zero = builder.makeDoubleConstant(0.0);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+ case glslang::EOpConvFloat16ToBool:
+ zero = builder.makeFloat16Constant(0.0F);
+ zero = makeSmearedConstant(zero, vectorSize);
+ return builder.createBinOp(spv::OpFOrdNotEqual, destType, operand, zero);
+
+ case glslang::EOpConvBoolToFloat:
+ convOp = spv::OpSelect;
+ zero = builder.makeFloatConstant(0.0F);
+ one = builder.makeFloatConstant(1.0F);
+ break;
+
+ case glslang::EOpConvBoolToDouble:
+ convOp = spv::OpSelect;
+ zero = builder.makeDoubleConstant(0.0);
+ one = builder.makeDoubleConstant(1.0);
+ break;
+
+ case glslang::EOpConvBoolToFloat16:
+ convOp = spv::OpSelect;
+ zero = builder.makeFloat16Constant(0.0F);
+ one = builder.makeFloat16Constant(1.0F);
+ break;
+
+ case glslang::EOpConvBoolToInt8:
+ zero = builder.makeInt8Constant(0);
+ one = builder.makeInt8Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToUint8:
+ zero = builder.makeUint8Constant(0);
+ one = builder.makeUint8Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToInt16:
+ zero = builder.makeInt16Constant(0);
+ one = builder.makeInt16Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToUint16:
+ zero = builder.makeUint16Constant(0);
+ one = builder.makeUint16Constant(1);
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToInt:
+ case glslang::EOpConvBoolToInt64:
+ if (op == glslang::EOpConvBoolToInt64)
+ zero = builder.makeInt64Constant(0);
+ else
+ zero = builder.makeIntConstant(0);
+
+ if (op == glslang::EOpConvBoolToInt64)
+ one = builder.makeInt64Constant(1);
+ else
+ one = builder.makeIntConstant(1);
+
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvBoolToUint:
+ case glslang::EOpConvBoolToUint64:
+ if (op == glslang::EOpConvBoolToUint64)
+ zero = builder.makeUint64Constant(0);
+ else
+ zero = builder.makeUintConstant(0);
+
+ if (op == glslang::EOpConvBoolToUint64)
+ one = builder.makeUint64Constant(1);
+ else
+ one = builder.makeUintConstant(1);
+
+ convOp = spv::OpSelect;
+ break;
+
+ case glslang::EOpConvInt8ToFloat16:
+ case glslang::EOpConvInt8ToFloat:
+ case glslang::EOpConvInt8ToDouble:
+ case glslang::EOpConvInt16ToFloat16:
+ case glslang::EOpConvInt16ToFloat:
+ case glslang::EOpConvInt16ToDouble:
+ case glslang::EOpConvIntToFloat16:
+ case glslang::EOpConvIntToFloat:
+ case glslang::EOpConvIntToDouble:
+ case glslang::EOpConvInt64ToFloat:
+ case glslang::EOpConvInt64ToDouble:
+ case glslang::EOpConvInt64ToFloat16:
+ convOp = spv::OpConvertSToF;
+ break;
+
+ case glslang::EOpConvUint8ToFloat16:
+ case glslang::EOpConvUint8ToFloat:
+ case glslang::EOpConvUint8ToDouble:
+ case glslang::EOpConvUint16ToFloat16:
+ case glslang::EOpConvUint16ToFloat:
+ case glslang::EOpConvUint16ToDouble:
+ case glslang::EOpConvUintToFloat16:
+ case glslang::EOpConvUintToFloat:
+ case glslang::EOpConvUintToDouble:
+ case glslang::EOpConvUint64ToFloat:
+ case glslang::EOpConvUint64ToDouble:
+ case glslang::EOpConvUint64ToFloat16:
+ convOp = spv::OpConvertUToF;
+ break;
+
+ case glslang::EOpConvDoubleToFloat:
+ case glslang::EOpConvFloatToDouble:
+ case glslang::EOpConvDoubleToFloat16:
+ case glslang::EOpConvFloat16ToDouble:
+ case glslang::EOpConvFloatToFloat16:
+ case glslang::EOpConvFloat16ToFloat:
+ convOp = spv::OpFConvert;
+ if (builder.isMatrixType(destType))
+ return createUnaryMatrixOperation(convOp, decorations, destType, operand, typeProxy);
+ break;
+
+ case glslang::EOpConvFloat16ToInt8:
+ case glslang::EOpConvFloatToInt8:
+ case glslang::EOpConvDoubleToInt8:
+ case glslang::EOpConvFloat16ToInt16:
+ case glslang::EOpConvFloatToInt16:
+ case glslang::EOpConvDoubleToInt16:
+ case glslang::EOpConvFloat16ToInt:
+ case glslang::EOpConvFloatToInt:
+ case glslang::EOpConvDoubleToInt:
+ case glslang::EOpConvFloat16ToInt64:
+ case glslang::EOpConvFloatToInt64:
+ case glslang::EOpConvDoubleToInt64:
+ convOp = spv::OpConvertFToS;
+ break;
+
+ case glslang::EOpConvUint8ToInt8:
+ case glslang::EOpConvInt8ToUint8:
+ case glslang::EOpConvUint16ToInt16:
+ case glslang::EOpConvInt16ToUint16:
+ case glslang::EOpConvUintToInt:
+ case glslang::EOpConvIntToUint:
+ case glslang::EOpConvUint64ToInt64:
+ case glslang::EOpConvInt64ToUint64:
+ if (builder.isInSpecConstCodeGenMode()) {
+ // Build zero scalar or vector for OpIAdd.
+ if(op == glslang::EOpConvUint8ToInt8 || op == glslang::EOpConvInt8ToUint8) {
+ zero = builder.makeUint8Constant(0);
+ } else if (op == glslang::EOpConvUint16ToInt16 || op == glslang::EOpConvInt16ToUint16) {
+ zero = builder.makeUint16Constant(0);
+ } else if (op == glslang::EOpConvUint64ToInt64 || op == glslang::EOpConvInt64ToUint64) {
+ zero = builder.makeUint64Constant(0);
+ } else {
+ zero = builder.makeUintConstant(0);
+ }
+ zero = makeSmearedConstant(zero, vectorSize);
+ // Use OpIAdd, instead of OpBitcast to do the conversion when
+ // generating for OpSpecConstantOp instruction.
+ return builder.createBinOp(spv::OpIAdd, destType, operand, zero);
+ }
+ // For normal run-time conversion instruction, use OpBitcast.
+ convOp = spv::OpBitcast;
+ break;
+
+ case glslang::EOpConvFloat16ToUint8:
+ case glslang::EOpConvFloatToUint8:
+ case glslang::EOpConvDoubleToUint8:
+ case glslang::EOpConvFloat16ToUint16:
+ case glslang::EOpConvFloatToUint16:
+ case glslang::EOpConvDoubleToUint16:
+ case glslang::EOpConvFloat16ToUint:
+ case glslang::EOpConvFloatToUint:
+ case glslang::EOpConvDoubleToUint:
+ case glslang::EOpConvFloatToUint64:
+ case glslang::EOpConvDoubleToUint64:
+ case glslang::EOpConvFloat16ToUint64:
+ convOp = spv::OpConvertFToU;
+ break;
+
+ case glslang::EOpConvInt8ToInt16:
+ case glslang::EOpConvInt8ToInt:
+ case glslang::EOpConvInt8ToInt64:
+ case glslang::EOpConvInt16ToInt8:
+ case glslang::EOpConvInt16ToInt:
+ case glslang::EOpConvInt16ToInt64:
+ case glslang::EOpConvIntToInt8:
+ case glslang::EOpConvIntToInt16:
+ case glslang::EOpConvIntToInt64:
+ case glslang::EOpConvInt64ToInt8:
+ case glslang::EOpConvInt64ToInt16:
+ case glslang::EOpConvInt64ToInt:
+ convOp = spv::OpSConvert;
+ break;
+
+ case glslang::EOpConvUint8ToUint16:
+ case glslang::EOpConvUint8ToUint:
+ case glslang::EOpConvUint8ToUint64:
+ case glslang::EOpConvUint16ToUint8:
+ case glslang::EOpConvUint16ToUint:
+ case glslang::EOpConvUint16ToUint64:
+ case glslang::EOpConvUintToUint8:
+ case glslang::EOpConvUintToUint16:
+ case glslang::EOpConvUintToUint64:
+ case glslang::EOpConvUint64ToUint8:
+ case glslang::EOpConvUint64ToUint16:
+ case glslang::EOpConvUint64ToUint:
+ convOp = spv::OpUConvert;
+ break;
+
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvInt64ToUint:
+ case glslang::EOpConvUint8ToInt16:
+ case glslang::EOpConvUint8ToInt:
+ case glslang::EOpConvUint8ToInt64:
+ case glslang::EOpConvUint16ToInt8:
+ case glslang::EOpConvUint16ToInt:
+ case glslang::EOpConvUint16ToInt64:
+ case glslang::EOpConvUintToInt8:
+ case glslang::EOpConvUintToInt16:
+ case glslang::EOpConvUintToInt64:
+ case glslang::EOpConvUint64ToInt8:
+ case glslang::EOpConvUint64ToInt16:
+ case glslang::EOpConvUint64ToInt:
+ // OpSConvert/OpUConvert + OpBitCast
+ operand = createIntWidthConversion(op, operand, vectorSize);
+
+ if (builder.isInSpecConstCodeGenMode()) {
+ // Build zero scalar or vector for OpIAdd.
+ switch(op) {
+ case glslang::EOpConvInt16ToUint8:
+ case glslang::EOpConvIntToUint8:
+ case glslang::EOpConvInt64ToUint8:
+ case glslang::EOpConvUint16ToInt8:
+ case glslang::EOpConvUintToInt8:
+ case glslang::EOpConvUint64ToInt8:
+ zero = builder.makeUint8Constant(0);
+ break;
+ case glslang::EOpConvInt8ToUint16:
+ case glslang::EOpConvIntToUint16:
+ case glslang::EOpConvInt64ToUint16:
+ case glslang::EOpConvUint8ToInt16:
+ case glslang::EOpConvUintToInt16:
+ case glslang::EOpConvUint64ToInt16:
+ zero = builder.makeUint16Constant(0);
+ break;
+ case glslang::EOpConvInt8ToUint:
+ case glslang::EOpConvInt16ToUint:
+ case glslang::EOpConvInt64ToUint:
+ case glslang::EOpConvUint8ToInt:
+ case glslang::EOpConvUint16ToInt:
+ case glslang::EOpConvUint64ToInt:
+ zero = builder.makeUintConstant(0);
+ break;
+ case glslang::EOpConvInt8ToUint64:
+ case glslang::EOpConvInt16ToUint64:
+ case glslang::EOpConvIntToUint64:
+ case glslang::EOpConvUint8ToInt64:
+ case glslang::EOpConvUint16ToInt64:
+ case glslang::EOpConvUintToInt64:
+ zero = builder.makeUint64Constant(0);
+ break;
+ default:
+ assert(false && "Default missing");
+ break;
+ }
+ zero = makeSmearedConstant(zero, vectorSize);
+ // Use OpIAdd, instead of OpBitcast to do the conversion when
+ // generating for OpSpecConstantOp instruction.
+ return builder.createBinOp(spv::OpIAdd, destType, operand, zero);
+ }
+ // For normal run-time conversion instruction, use OpBitcast.
+ convOp = spv::OpBitcast;
+ break;
+ case glslang::EOpConvUint64ToPtr:
+ convOp = spv::OpConvertUToPtr;
+ break;
+ case glslang::EOpConvPtrToUint64:
+ convOp = spv::OpConvertPtrToU;
+ break;
+ default:
+ break;
+ }
+
+ spv::Id result = 0;
+ if (convOp == spv::OpNop)
+ return result;
+
+ if (convOp == spv::OpSelect) {
+ zero = makeSmearedConstant(zero, vectorSize);
+ one = makeSmearedConstant(one, vectorSize);
+ result = builder.createTriOp(convOp, destType, operand, one, zero);
+ } else
+ result = builder.createUnaryOp(convOp, destType, operand);
+
+ result = builder.setPrecision(result, decorations.precision);
+ builder.addDecoration(result, decorations.nonUniform);
+ return result;
+}
+
+spv::Id TGlslangToSpvTraverser::makeSmearedConstant(spv::Id constant, int vectorSize)
+{
+ if (vectorSize == 0)
+ return constant;
+
+ spv::Id vectorTypeId = builder.makeVectorType(builder.getTypeId(constant), vectorSize);
+ std::vector<spv::Id> components;
+ for (int c = 0; c < vectorSize; ++c)
+ components.push_back(constant);
+ return builder.makeCompositeConstant(vectorTypeId, components);
+}
+
+// For glslang ops that map to SPV atomic opCodes
+spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv::Decoration /*precision*/, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+ spv::Op opCode = spv::OpNop;
+
+ switch (op) {
+ case glslang::EOpAtomicAdd:
+ case glslang::EOpImageAtomicAdd:
+ case glslang::EOpAtomicCounterAdd:
+ opCode = spv::OpAtomicIAdd;
+ break;
+ case glslang::EOpAtomicCounterSubtract:
+ opCode = spv::OpAtomicISub;
+ break;
+ case glslang::EOpAtomicMin:
+ case glslang::EOpImageAtomicMin:
+ case glslang::EOpAtomicCounterMin:
+ opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? spv::OpAtomicUMin : spv::OpAtomicSMin;
+ break;
+ case glslang::EOpAtomicMax:
+ case glslang::EOpImageAtomicMax:
+ case glslang::EOpAtomicCounterMax:
+ opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? spv::OpAtomicUMax : spv::OpAtomicSMax;
+ break;
+ case glslang::EOpAtomicAnd:
+ case glslang::EOpImageAtomicAnd:
+ case glslang::EOpAtomicCounterAnd:
+ opCode = spv::OpAtomicAnd;
+ break;
+ case glslang::EOpAtomicOr:
+ case glslang::EOpImageAtomicOr:
+ case glslang::EOpAtomicCounterOr:
+ opCode = spv::OpAtomicOr;
+ break;
+ case glslang::EOpAtomicXor:
+ case glslang::EOpImageAtomicXor:
+ case glslang::EOpAtomicCounterXor:
+ opCode = spv::OpAtomicXor;
+ break;
+ case glslang::EOpAtomicExchange:
+ case glslang::EOpImageAtomicExchange:
+ case glslang::EOpAtomicCounterExchange:
+ opCode = spv::OpAtomicExchange;
+ break;
+ case glslang::EOpAtomicCompSwap:
+ case glslang::EOpImageAtomicCompSwap:
+ case glslang::EOpAtomicCounterCompSwap:
+ opCode = spv::OpAtomicCompareExchange;
+ break;
+ case glslang::EOpAtomicCounterIncrement:
+ opCode = spv::OpAtomicIIncrement;
+ break;
+ case glslang::EOpAtomicCounterDecrement:
+ opCode = spv::OpAtomicIDecrement;
+ break;
+ case glslang::EOpAtomicCounter:
+ case glslang::EOpImageAtomicLoad:
+ case glslang::EOpAtomicLoad:
+ opCode = spv::OpAtomicLoad;
+ break;
+ case glslang::EOpAtomicStore:
+ case glslang::EOpImageAtomicStore:
+ opCode = spv::OpAtomicStore;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (typeProxy == glslang::EbtInt64 || typeProxy == glslang::EbtUint64)
+ builder.addCapability(spv::CapabilityInt64Atomics);
+
+ // Sort out the operands
+ // - mapping from glslang -> SPV
+ // - there are extra SPV operands that are optional in glslang
+ // - compare-exchange swaps the value and comparator
+ // - compare-exchange has an extra memory semantics
+ // - EOpAtomicCounterDecrement needs a post decrement
+ spv::Id pointerId = 0, compareId = 0, valueId = 0;
+ // scope defaults to Device in the old model, QueueFamilyKHR in the new model
+ spv::Id scopeId;
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
+ scopeId = builder.makeUintConstant(spv::ScopeQueueFamilyKHR);
+ } else {
+ scopeId = builder.makeUintConstant(spv::ScopeDevice);
+ }
+ // semantics default to relaxed
+ spv::Id semanticsId = builder.makeUintConstant(spv::MemorySemanticsMaskNone);
+ spv::Id semanticsId2 = semanticsId;
+
+ pointerId = operands[0];
+ if (opCode == spv::OpAtomicIIncrement || opCode == spv::OpAtomicIDecrement) {
+ // no additional operands
+ } else if (opCode == spv::OpAtomicCompareExchange) {
+ compareId = operands[1];
+ valueId = operands[2];
+ if (operands.size() > 3) {
+ scopeId = operands[3];
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[4]) | builder.getConstantScalar(operands[5]));
+ semanticsId2 = builder.makeUintConstant(builder.getConstantScalar(operands[6]) | builder.getConstantScalar(operands[7]));
+ }
+ } else if (opCode == spv::OpAtomicLoad) {
+ if (operands.size() > 1) {
+ scopeId = operands[1];
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]));
+ }
+ } else {
+ // atomic store or RMW
+ valueId = operands[1];
+ if (operands.size() > 2) {
+ scopeId = operands[2];
+ semanticsId = builder.makeUintConstant(builder.getConstantScalar(operands[3]) | builder.getConstantScalar(operands[4]));
+ }
+ }
+
+ // Check for capabilities
+ unsigned semanticsImmediate = builder.getConstantScalar(semanticsId) | builder.getConstantScalar(semanticsId2);
+ if (semanticsImmediate & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+
+ if (glslangIntermediate->usingVulkanMemoryModel() && builder.getConstantScalar(scopeId) == spv::ScopeDevice) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+
+ std::vector<spv::Id> spvAtomicOperands; // hold the spv operands
+ spvAtomicOperands.push_back(pointerId);
+ spvAtomicOperands.push_back(scopeId);
+ spvAtomicOperands.push_back(semanticsId);
+ if (opCode == spv::OpAtomicCompareExchange) {
+ spvAtomicOperands.push_back(semanticsId2);
+ spvAtomicOperands.push_back(valueId);
+ spvAtomicOperands.push_back(compareId);
+ } else if (opCode != spv::OpAtomicLoad && opCode != spv::OpAtomicIIncrement && opCode != spv::OpAtomicIDecrement) {
+ spvAtomicOperands.push_back(valueId);
+ }
+
+ if (opCode == spv::OpAtomicStore) {
+ builder.createNoResultOp(opCode, spvAtomicOperands);
+ return 0;
+ } else {
+ spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands);
+
+ // GLSL and HLSL atomic-counter decrement return post-decrement value,
+ // while SPIR-V returns pre-decrement value. Translate between these semantics.
+ if (op == glslang::EOpAtomicCounterDecrement)
+ resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1));
+
+ return resultId;
+ }
+}
+
+// Create group invocation operations.
+spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+#ifdef AMD_EXTENSIONS
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+#endif
+
+ spv::Op opCode = spv::OpNop;
+ std::vector<spv::IdImmediate> spvGroupOperands;
+ spv::GroupOperation groupOperation = spv::GroupOperationMax;
+
+ if (op == glslang::EOpBallot || op == glslang::EOpReadFirstInvocation ||
+ op == glslang::EOpReadInvocation) {
+ builder.addExtension(spv::E_SPV_KHR_shader_ballot);
+ builder.addCapability(spv::CapabilitySubgroupBallotKHR);
+ } else if (op == glslang::EOpAnyInvocation ||
+ op == glslang::EOpAllInvocations ||
+ op == glslang::EOpAllInvocationsEqual) {
+ builder.addExtension(spv::E_SPV_KHR_subgroup_vote);
+ builder.addCapability(spv::CapabilitySubgroupVoteKHR);
+ } else {
+ builder.addCapability(spv::CapabilityGroups);
+#ifdef AMD_EXTENSIONS
+ if (op == glslang::EOpMinInvocationsNonUniform ||
+ op == glslang::EOpMaxInvocationsNonUniform ||
+ op == glslang::EOpAddInvocationsNonUniform ||
+ op == glslang::EOpMinInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMaxInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpAddInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMinInvocationsExclusiveScanNonUniform ||
+ op == glslang::EOpMaxInvocationsExclusiveScanNonUniform ||
+ op == glslang::EOpAddInvocationsExclusiveScanNonUniform)
+ builder.addExtension(spv::E_SPV_AMD_shader_ballot);
+#endif
+
+#ifdef AMD_EXTENSIONS
+ switch (op) {
+ case glslang::EOpMinInvocations:
+ case glslang::EOpMaxInvocations:
+ case glslang::EOpAddInvocations:
+ case glslang::EOpMinInvocationsNonUniform:
+ case glslang::EOpMaxInvocationsNonUniform:
+ case glslang::EOpAddInvocationsNonUniform:
+ groupOperation = spv::GroupOperationReduce;
+ break;
+ case glslang::EOpMinInvocationsInclusiveScan:
+ case glslang::EOpMaxInvocationsInclusiveScan:
+ case glslang::EOpAddInvocationsInclusiveScan:
+ case glslang::EOpMinInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsInclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsInclusiveScanNonUniform:
+ groupOperation = spv::GroupOperationInclusiveScan;
+ break;
+ case glslang::EOpMinInvocationsExclusiveScan:
+ case glslang::EOpMaxInvocationsExclusiveScan:
+ case glslang::EOpAddInvocationsExclusiveScan:
+ case glslang::EOpMinInvocationsExclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsExclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsExclusiveScanNonUniform:
+ groupOperation = spv::GroupOperationExclusiveScan;
+ break;
+ default:
+ break;
+ }
+ spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(scope);
+ if (groupOperation != spv::GroupOperationMax) {
+ spv::IdImmediate groupOp = { false, (unsigned)groupOperation };
+ spvGroupOperands.push_back(groupOp);
+ }
+#endif
+ }
+
+ for (auto opIt = operands.begin(); opIt != operands.end(); ++opIt) {
+ spv::IdImmediate op = { true, *opIt };
+ spvGroupOperands.push_back(op);
+ }
+
+ switch (op) {
+ case glslang::EOpAnyInvocation:
+ opCode = spv::OpSubgroupAnyKHR;
+ break;
+ case glslang::EOpAllInvocations:
+ opCode = spv::OpSubgroupAllKHR;
+ break;
+ case glslang::EOpAllInvocationsEqual:
+ opCode = spv::OpSubgroupAllEqualKHR;
+ break;
+ case glslang::EOpReadInvocation:
+ opCode = spv::OpSubgroupReadInvocationKHR;
+ if (builder.isVectorType(typeId))
+ return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands);
+ break;
+ case glslang::EOpReadFirstInvocation:
+ opCode = spv::OpSubgroupFirstInvocationKHR;
+ break;
+ case glslang::EOpBallot:
+ {
+ // NOTE: According to the spec, the result type of "OpSubgroupBallotKHR" must be a 4 component vector of 32
+ // bit integer types. The GLSL built-in function "ballotARB()" assumes the maximum number of invocations in
+ // a subgroup is 64. Thus, we have to convert uvec4.xy to uint64_t as follow:
+ //
+ // result = Bitcast(SubgroupBallotKHR(Predicate).xy)
+ //
+ spv::Id uintType = builder.makeUintType(32);
+ spv::Id uvec4Type = builder.makeVectorType(uintType, 4);
+ spv::Id result = builder.createOp(spv::OpSubgroupBallotKHR, uvec4Type, spvGroupOperands);
+
+ std::vector<spv::Id> components;
+ components.push_back(builder.createCompositeExtract(result, uintType, 0));
+ components.push_back(builder.createCompositeExtract(result, uintType, 1));
+
+ spv::Id uvec2Type = builder.makeVectorType(uintType, 2);
+ return builder.createUnaryOp(spv::OpBitcast, typeId,
+ builder.createCompositeConstruct(uvec2Type, components));
+ }
+
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpMinInvocations:
+ case glslang::EOpMaxInvocations:
+ case glslang::EOpAddInvocations:
+ case glslang::EOpMinInvocationsInclusiveScan:
+ case glslang::EOpMaxInvocationsInclusiveScan:
+ case glslang::EOpAddInvocationsInclusiveScan:
+ case glslang::EOpMinInvocationsExclusiveScan:
+ case glslang::EOpMaxInvocationsExclusiveScan:
+ case glslang::EOpAddInvocationsExclusiveScan:
+ if (op == glslang::EOpMinInvocations ||
+ op == glslang::EOpMinInvocationsInclusiveScan ||
+ op == glslang::EOpMinInvocationsExclusiveScan) {
+ if (isFloat)
+ opCode = spv::OpGroupFMin;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMin;
+ else
+ opCode = spv::OpGroupSMin;
+ }
+ } else if (op == glslang::EOpMaxInvocations ||
+ op == glslang::EOpMaxInvocationsInclusiveScan ||
+ op == glslang::EOpMaxInvocationsExclusiveScan) {
+ if (isFloat)
+ opCode = spv::OpGroupFMax;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMax;
+ else
+ opCode = spv::OpGroupSMax;
+ }
+ } else {
+ if (isFloat)
+ opCode = spv::OpGroupFAdd;
+ else
+ opCode = spv::OpGroupIAdd;
+ }
+
+ if (builder.isVectorType(typeId))
+ return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands);
+
+ break;
+ case glslang::EOpMinInvocationsNonUniform:
+ case glslang::EOpMaxInvocationsNonUniform:
+ case glslang::EOpAddInvocationsNonUniform:
+ case glslang::EOpMinInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsInclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsInclusiveScanNonUniform:
+ case glslang::EOpMinInvocationsExclusiveScanNonUniform:
+ case glslang::EOpMaxInvocationsExclusiveScanNonUniform:
+ case glslang::EOpAddInvocationsExclusiveScanNonUniform:
+ if (op == glslang::EOpMinInvocationsNonUniform ||
+ op == glslang::EOpMinInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMinInvocationsExclusiveScanNonUniform) {
+ if (isFloat)
+ opCode = spv::OpGroupFMinNonUniformAMD;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMinNonUniformAMD;
+ else
+ opCode = spv::OpGroupSMinNonUniformAMD;
+ }
+ }
+ else if (op == glslang::EOpMaxInvocationsNonUniform ||
+ op == glslang::EOpMaxInvocationsInclusiveScanNonUniform ||
+ op == glslang::EOpMaxInvocationsExclusiveScanNonUniform) {
+ if (isFloat)
+ opCode = spv::OpGroupFMaxNonUniformAMD;
+ else {
+ if (isUnsigned)
+ opCode = spv::OpGroupUMaxNonUniformAMD;
+ else
+ opCode = spv::OpGroupSMaxNonUniformAMD;
+ }
+ }
+ else {
+ if (isFloat)
+ opCode = spv::OpGroupFAddNonUniformAMD;
+ else
+ opCode = spv::OpGroupIAddNonUniformAMD;
+ }
+
+ if (builder.isVectorType(typeId))
+ return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands);
+
+ break;
+#endif
+ default:
+ logger->missingFunctionality("invocation operation");
+ return spv::NoResult;
+ }
+
+ assert(opCode != spv::OpNop);
+ return builder.createOp(opCode, typeId, spvGroupOperands);
+}
+
+// Create group invocation operations on a vector
+spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation,
+ spv::Id typeId, std::vector<spv::Id>& operands)
+{
+#ifdef AMD_EXTENSIONS
+ assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin ||
+ op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax ||
+ op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast ||
+ op == spv::OpSubgroupReadInvocationKHR ||
+ op == spv::OpGroupFMinNonUniformAMD || op == spv::OpGroupUMinNonUniformAMD || op == spv::OpGroupSMinNonUniformAMD ||
+ op == spv::OpGroupFMaxNonUniformAMD || op == spv::OpGroupUMaxNonUniformAMD || op == spv::OpGroupSMaxNonUniformAMD ||
+ op == spv::OpGroupFAddNonUniformAMD || op == spv::OpGroupIAddNonUniformAMD);
+#else
+ assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin ||
+ op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax ||
+ op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast ||
+ op == spv::OpSubgroupReadInvocationKHR);
+#endif
+
+ // Handle group invocation operations scalar by scalar.
+ // The result type is the same type as the original type.
+ // The algorithm is to:
+ // - break the vector into scalars
+ // - apply the operation to each scalar
+ // - make a vector out the scalar results
+
+ // get the types sorted out
+ int numComponents = builder.getNumComponents(operands[0]);
+ spv::Id scalarType = builder.getScalarTypeId(builder.getTypeId(operands[0]));
+ std::vector<spv::Id> results;
+
+ // do each scalar op
+ for (int comp = 0; comp < numComponents; ++comp) {
+ std::vector<unsigned int> indexes;
+ indexes.push_back(comp);
+ spv::IdImmediate scalar = { true, builder.createCompositeExtract(operands[0], scalarType, indexes) };
+ std::vector<spv::IdImmediate> spvGroupOperands;
+ if (op == spv::OpSubgroupReadInvocationKHR) {
+ spvGroupOperands.push_back(scalar);
+ spv::IdImmediate operand = { true, operands[1] };
+ spvGroupOperands.push_back(operand);
+ } else if (op == spv::OpGroupBroadcast) {
+ spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(scope);
+ spvGroupOperands.push_back(scalar);
+ spv::IdImmediate operand = { true, operands[1] };
+ spvGroupOperands.push_back(operand);
+ } else {
+ spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(scope);
+ spv::IdImmediate groupOp = { false, (unsigned)groupOperation };
+ spvGroupOperands.push_back(groupOp);
+ spvGroupOperands.push_back(scalar);
+ }
+
+ results.push_back(builder.createOp(op, scalarType, spvGroupOperands));
+ }
+
+ // put the pieces together
+ return builder.createCompositeConstruct(typeId, results);
+}
+
+// Create subgroup invocation operations.
+spv::Id TGlslangToSpvTraverser::createSubgroupOperation(glslang::TOperator op, spv::Id typeId,
+ std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+ // Add the required capabilities.
+ switch (op) {
+ case glslang::EOpSubgroupElect:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ break;
+ case glslang::EOpSubgroupAll:
+ case glslang::EOpSubgroupAny:
+ case glslang::EOpSubgroupAllEqual:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformVote);
+ break;
+ case glslang::EOpSubgroupBroadcast:
+ case glslang::EOpSubgroupBroadcastFirst:
+ case glslang::EOpSubgroupBallot:
+ case glslang::EOpSubgroupInverseBallot:
+ case glslang::EOpSubgroupBallotBitExtract:
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupBallotExclusiveBitCount:
+ case glslang::EOpSubgroupBallotFindLSB:
+ case glslang::EOpSubgroupBallotFindMSB:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformBallot);
+ break;
+ case glslang::EOpSubgroupShuffle:
+ case glslang::EOpSubgroupShuffleXor:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformShuffle);
+ break;
+ case glslang::EOpSubgroupShuffleUp:
+ case glslang::EOpSubgroupShuffleDown:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformShuffleRelative);
+ break;
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupXor:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupInclusiveXor:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupExclusiveXor:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformArithmetic);
+ break;
+ case glslang::EOpSubgroupClusteredAdd:
+ case glslang::EOpSubgroupClusteredMul:
+ case glslang::EOpSubgroupClusteredMin:
+ case glslang::EOpSubgroupClusteredMax:
+ case glslang::EOpSubgroupClusteredAnd:
+ case glslang::EOpSubgroupClusteredOr:
+ case glslang::EOpSubgroupClusteredXor:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformClustered);
+ break;
+ case glslang::EOpSubgroupQuadBroadcast:
+ case glslang::EOpSubgroupQuadSwapHorizontal:
+ case glslang::EOpSubgroupQuadSwapVertical:
+ case glslang::EOpSubgroupQuadSwapDiagonal:
+ builder.addCapability(spv::CapabilityGroupNonUniform);
+ builder.addCapability(spv::CapabilityGroupNonUniformQuad);
+ break;
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedXor:
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+ builder.addExtension(spv::E_SPV_NV_shader_subgroup_partitioned);
+ builder.addCapability(spv::CapabilityGroupNonUniformPartitionedNV);
+ break;
+#endif
+ default: assert(0 && "Unhandled subgroup operation!");
+ }
+
+ const bool isUnsigned = typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64;
+ const bool isFloat = typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble;
+ const bool isBool = typeProxy == glslang::EbtBool;
+
+ spv::Op opCode = spv::OpNop;
+
+ // Figure out which opcode to use.
+ switch (op) {
+ case glslang::EOpSubgroupElect: opCode = spv::OpGroupNonUniformElect; break;
+ case glslang::EOpSubgroupAll: opCode = spv::OpGroupNonUniformAll; break;
+ case glslang::EOpSubgroupAny: opCode = spv::OpGroupNonUniformAny; break;
+ case glslang::EOpSubgroupAllEqual: opCode = spv::OpGroupNonUniformAllEqual; break;
+ case glslang::EOpSubgroupBroadcast: opCode = spv::OpGroupNonUniformBroadcast; break;
+ case glslang::EOpSubgroupBroadcastFirst: opCode = spv::OpGroupNonUniformBroadcastFirst; break;
+ case glslang::EOpSubgroupBallot: opCode = spv::OpGroupNonUniformBallot; break;
+ case glslang::EOpSubgroupInverseBallot: opCode = spv::OpGroupNonUniformInverseBallot; break;
+ case glslang::EOpSubgroupBallotBitExtract: opCode = spv::OpGroupNonUniformBallotBitExtract; break;
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupBallotExclusiveBitCount: opCode = spv::OpGroupNonUniformBallotBitCount; break;
+ case glslang::EOpSubgroupBallotFindLSB: opCode = spv::OpGroupNonUniformBallotFindLSB; break;
+ case glslang::EOpSubgroupBallotFindMSB: opCode = spv::OpGroupNonUniformBallotFindMSB; break;
+ case glslang::EOpSubgroupShuffle: opCode = spv::OpGroupNonUniformShuffle; break;
+ case glslang::EOpSubgroupShuffleXor: opCode = spv::OpGroupNonUniformShuffleXor; break;
+ case glslang::EOpSubgroupShuffleUp: opCode = spv::OpGroupNonUniformShuffleUp; break;
+ case glslang::EOpSubgroupShuffleDown: opCode = spv::OpGroupNonUniformShuffleDown; break;
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupClusteredAdd:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFAdd;
+ } else {
+ opCode = spv::OpGroupNonUniformIAdd;
+ }
+ break;
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupClusteredMul:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFMul;
+ } else {
+ opCode = spv::OpGroupNonUniformIMul;
+ }
+ break;
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupClusteredMin:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFMin;
+ } else if (isUnsigned) {
+ opCode = spv::OpGroupNonUniformUMin;
+ } else {
+ opCode = spv::OpGroupNonUniformSMin;
+ }
+ break;
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupClusteredMax:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+#endif
+ if (isFloat) {
+ opCode = spv::OpGroupNonUniformFMax;
+ } else if (isUnsigned) {
+ opCode = spv::OpGroupNonUniformUMax;
+ } else {
+ opCode = spv::OpGroupNonUniformSMax;
+ }
+ break;
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupClusteredAnd:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+#endif
+ if (isBool) {
+ opCode = spv::OpGroupNonUniformLogicalAnd;
+ } else {
+ opCode = spv::OpGroupNonUniformBitwiseAnd;
+ }
+ break;
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupClusteredOr:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+#endif
+ if (isBool) {
+ opCode = spv::OpGroupNonUniformLogicalOr;
+ } else {
+ opCode = spv::OpGroupNonUniformBitwiseOr;
+ }
+ break;
+ case glslang::EOpSubgroupXor:
+ case glslang::EOpSubgroupInclusiveXor:
+ case glslang::EOpSubgroupExclusiveXor:
+ case glslang::EOpSubgroupClusteredXor:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedXor:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+#endif
+ if (isBool) {
+ opCode = spv::OpGroupNonUniformLogicalXor;
+ } else {
+ opCode = spv::OpGroupNonUniformBitwiseXor;
+ }
+ break;
+ case glslang::EOpSubgroupQuadBroadcast: opCode = spv::OpGroupNonUniformQuadBroadcast; break;
+ case glslang::EOpSubgroupQuadSwapHorizontal:
+ case glslang::EOpSubgroupQuadSwapVertical:
+ case glslang::EOpSubgroupQuadSwapDiagonal: opCode = spv::OpGroupNonUniformQuadSwap; break;
+ default: assert(0 && "Unhandled subgroup operation!");
+ }
+
+ // get the right Group Operation
+ spv::GroupOperation groupOperation = spv::GroupOperationMax;
+ switch (op) {
+ default:
+ break;
+ case glslang::EOpSubgroupBallotBitCount:
+ case glslang::EOpSubgroupAdd:
+ case glslang::EOpSubgroupMul:
+ case glslang::EOpSubgroupMin:
+ case glslang::EOpSubgroupMax:
+ case glslang::EOpSubgroupAnd:
+ case glslang::EOpSubgroupOr:
+ case glslang::EOpSubgroupXor:
+ groupOperation = spv::GroupOperationReduce;
+ break;
+ case glslang::EOpSubgroupBallotInclusiveBitCount:
+ case glslang::EOpSubgroupInclusiveAdd:
+ case glslang::EOpSubgroupInclusiveMul:
+ case glslang::EOpSubgroupInclusiveMin:
+ case glslang::EOpSubgroupInclusiveMax:
+ case glslang::EOpSubgroupInclusiveAnd:
+ case glslang::EOpSubgroupInclusiveOr:
+ case glslang::EOpSubgroupInclusiveXor:
+ groupOperation = spv::GroupOperationInclusiveScan;
+ break;
+ case glslang::EOpSubgroupBallotExclusiveBitCount:
+ case glslang::EOpSubgroupExclusiveAdd:
+ case glslang::EOpSubgroupExclusiveMul:
+ case glslang::EOpSubgroupExclusiveMin:
+ case glslang::EOpSubgroupExclusiveMax:
+ case glslang::EOpSubgroupExclusiveAnd:
+ case glslang::EOpSubgroupExclusiveOr:
+ case glslang::EOpSubgroupExclusiveXor:
+ groupOperation = spv::GroupOperationExclusiveScan;
+ break;
+ case glslang::EOpSubgroupClusteredAdd:
+ case glslang::EOpSubgroupClusteredMul:
+ case glslang::EOpSubgroupClusteredMin:
+ case glslang::EOpSubgroupClusteredMax:
+ case glslang::EOpSubgroupClusteredAnd:
+ case glslang::EOpSubgroupClusteredOr:
+ case glslang::EOpSubgroupClusteredXor:
+ groupOperation = spv::GroupOperationClusteredReduce;
+ break;
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedXor:
+ groupOperation = spv::GroupOperationPartitionedReduceNV;
+ break;
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ groupOperation = spv::GroupOperationPartitionedInclusiveScanNV;
+ break;
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+ groupOperation = spv::GroupOperationPartitionedExclusiveScanNV;
+ break;
+#endif
+ }
+
+ // build the instruction
+ std::vector<spv::IdImmediate> spvGroupOperands;
+
+ // Every operation begins with the Execution Scope operand.
+ spv::IdImmediate executionScope = { true, builder.makeUintConstant(spv::ScopeSubgroup) };
+ spvGroupOperands.push_back(executionScope);
+
+ // Next, for all operations that use a Group Operation, push that as an operand.
+ if (groupOperation != spv::GroupOperationMax) {
+ spv::IdImmediate groupOperand = { false, (unsigned)groupOperation };
+ spvGroupOperands.push_back(groupOperand);
+ }
+
+ // Push back the operands next.
+ for (auto opIt = operands.cbegin(); opIt != operands.cend(); ++opIt) {
+ spv::IdImmediate operand = { true, *opIt };
+ spvGroupOperands.push_back(operand);
+ }
+
+ // Some opcodes have additional operands.
+ spv::Id directionId = spv::NoResult;
+ switch (op) {
+ default: break;
+ case glslang::EOpSubgroupQuadSwapHorizontal: directionId = builder.makeUintConstant(0); break;
+ case glslang::EOpSubgroupQuadSwapVertical: directionId = builder.makeUintConstant(1); break;
+ case glslang::EOpSubgroupQuadSwapDiagonal: directionId = builder.makeUintConstant(2); break;
+ }
+ if (directionId != spv::NoResult) {
+ spv::IdImmediate direction = { true, directionId };
+ spvGroupOperands.push_back(direction);
+ }
+
+ return builder.createOp(opCode, typeId, spvGroupOperands);
+}
+
+spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector<spv::Id>& operands, glslang::TBasicType typeProxy)
+{
+ bool isUnsigned = isTypeUnsignedInt(typeProxy);
+ bool isFloat = isTypeFloat(typeProxy);
+
+ spv::Op opCode = spv::OpNop;
+ int extBuiltins = -1;
+ int libCall = -1;
+ size_t consumedOperands = operands.size();
+ spv::Id typeId0 = 0;
+ if (consumedOperands > 0)
+ typeId0 = builder.getTypeId(operands[0]);
+ spv::Id typeId1 = 0;
+ if (consumedOperands > 1)
+ typeId1 = builder.getTypeId(operands[1]);
+ spv::Id frexpIntType = 0;
+
+ switch (op) {
+ case glslang::EOpMin:
+ if (isFloat)
+ libCall = spv::GLSLstd450FMin;
+ else if (isUnsigned)
+ libCall = spv::GLSLstd450UMin;
+ else
+ libCall = spv::GLSLstd450SMin;
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpModf:
+ libCall = spv::GLSLstd450Modf;
+ break;
+ case glslang::EOpMax:
+ if (isFloat)
+ libCall = spv::GLSLstd450FMax;
+ else if (isUnsigned)
+ libCall = spv::GLSLstd450UMax;
+ else
+ libCall = spv::GLSLstd450SMax;
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpPow:
+ libCall = spv::GLSLstd450Pow;
+ break;
+ case glslang::EOpDot:
+ opCode = spv::OpDot;
+ break;
+ case glslang::EOpAtan:
+ libCall = spv::GLSLstd450Atan2;
+ break;
+
+ case glslang::EOpClamp:
+ if (isFloat)
+ libCall = spv::GLSLstd450FClamp;
+ else if (isUnsigned)
+ libCall = spv::GLSLstd450UClamp;
+ else
+ libCall = spv::GLSLstd450SClamp;
+ builder.promoteScalar(precision, operands.front(), operands[1]);
+ builder.promoteScalar(precision, operands.front(), operands[2]);
+ break;
+ case glslang::EOpMix:
+ if (! builder.isBoolType(builder.getScalarTypeId(builder.getTypeId(operands.back())))) {
+ assert(isFloat);
+ libCall = spv::GLSLstd450FMix;
+ } else {
+ opCode = spv::OpSelect;
+ std::swap(operands.front(), operands.back());
+ }
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpStep:
+ libCall = spv::GLSLstd450Step;
+ builder.promoteScalar(precision, operands.front(), operands.back());
+ break;
+ case glslang::EOpSmoothStep:
+ libCall = spv::GLSLstd450SmoothStep;
+ builder.promoteScalar(precision, operands[0], operands[2]);
+ builder.promoteScalar(precision, operands[1], operands[2]);
+ break;
+
+ case glslang::EOpDistance:
+ libCall = spv::GLSLstd450Distance;
+ break;
+ case glslang::EOpCross:
+ libCall = spv::GLSLstd450Cross;
+ break;
+ case glslang::EOpFaceForward:
+ libCall = spv::GLSLstd450FaceForward;
+ break;
+ case glslang::EOpReflect:
+ libCall = spv::GLSLstd450Reflect;
+ break;
+ case glslang::EOpRefract:
+ libCall = spv::GLSLstd450Refract;
+ break;
+ case glslang::EOpInterpolateAtSample:
+#ifdef AMD_EXTENSIONS
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+#endif
+ libCall = spv::GLSLstd450InterpolateAtSample;
+ break;
+ case glslang::EOpInterpolateAtOffset:
+#ifdef AMD_EXTENSIONS
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+#endif
+ libCall = spv::GLSLstd450InterpolateAtOffset;
+ break;
+ case glslang::EOpAddCarry:
+ opCode = spv::OpIAddCarry;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpSubBorrow:
+ opCode = spv::OpISubBorrow;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpUMulExtended:
+ opCode = spv::OpUMulExtended;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpIMulExtended:
+ opCode = spv::OpSMulExtended;
+ typeId = builder.makeStructResultType(typeId0, typeId0);
+ consumedOperands = 2;
+ break;
+ case glslang::EOpBitfieldExtract:
+ if (isUnsigned)
+ opCode = spv::OpBitFieldUExtract;
+ else
+ opCode = spv::OpBitFieldSExtract;
+ break;
+ case glslang::EOpBitfieldInsert:
+ opCode = spv::OpBitFieldInsert;
+ break;
+
+ case glslang::EOpFma:
+ libCall = spv::GLSLstd450Fma;
+ break;
+ case glslang::EOpFrexp:
+ {
+ libCall = spv::GLSLstd450FrexpStruct;
+ assert(builder.isPointerType(typeId1));
+ typeId1 = builder.getContainedTypeId(typeId1);
+ int width = builder.getScalarTypeWidth(typeId1);
+#ifdef AMD_EXTENSIONS
+ if (width == 16)
+ // Using 16-bit exp operand, enable extension SPV_AMD_gpu_shader_int16
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_int16);
+#endif
+ if (builder.getNumComponents(operands[0]) == 1)
+ frexpIntType = builder.makeIntegerType(width, true);
+ else
+ frexpIntType = builder.makeVectorType(builder.makeIntegerType(width, true), builder.getNumComponents(operands[0]));
+ typeId = builder.makeStructResultType(typeId0, frexpIntType);
+ consumedOperands = 1;
+ }
+ break;
+ case glslang::EOpLdexp:
+ libCall = spv::GLSLstd450Ldexp;
+ break;
+
+ case glslang::EOpReadInvocation:
+ return createInvocationsOperation(op, typeId, operands, typeProxy);
+
+ case glslang::EOpSubgroupBroadcast:
+ case glslang::EOpSubgroupBallotBitExtract:
+ case glslang::EOpSubgroupShuffle:
+ case glslang::EOpSubgroupShuffleXor:
+ case glslang::EOpSubgroupShuffleUp:
+ case glslang::EOpSubgroupShuffleDown:
+ case glslang::EOpSubgroupClusteredAdd:
+ case glslang::EOpSubgroupClusteredMul:
+ case glslang::EOpSubgroupClusteredMin:
+ case glslang::EOpSubgroupClusteredMax:
+ case glslang::EOpSubgroupClusteredAnd:
+ case glslang::EOpSubgroupClusteredOr:
+ case glslang::EOpSubgroupClusteredXor:
+ case glslang::EOpSubgroupQuadBroadcast:
+#ifdef NV_EXTENSIONS
+ case glslang::EOpSubgroupPartitionedAdd:
+ case glslang::EOpSubgroupPartitionedMul:
+ case glslang::EOpSubgroupPartitionedMin:
+ case glslang::EOpSubgroupPartitionedMax:
+ case glslang::EOpSubgroupPartitionedAnd:
+ case glslang::EOpSubgroupPartitionedOr:
+ case glslang::EOpSubgroupPartitionedXor:
+ case glslang::EOpSubgroupPartitionedInclusiveAdd:
+ case glslang::EOpSubgroupPartitionedInclusiveMul:
+ case glslang::EOpSubgroupPartitionedInclusiveMin:
+ case glslang::EOpSubgroupPartitionedInclusiveMax:
+ case glslang::EOpSubgroupPartitionedInclusiveAnd:
+ case glslang::EOpSubgroupPartitionedInclusiveOr:
+ case glslang::EOpSubgroupPartitionedInclusiveXor:
+ case glslang::EOpSubgroupPartitionedExclusiveAdd:
+ case glslang::EOpSubgroupPartitionedExclusiveMul:
+ case glslang::EOpSubgroupPartitionedExclusiveMin:
+ case glslang::EOpSubgroupPartitionedExclusiveMax:
+ case glslang::EOpSubgroupPartitionedExclusiveAnd:
+ case glslang::EOpSubgroupPartitionedExclusiveOr:
+ case glslang::EOpSubgroupPartitionedExclusiveXor:
+#endif
+ return createSubgroupOperation(op, typeId, operands, typeProxy);
+
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpSwizzleInvocations:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::SwizzleInvocationsAMD;
+ break;
+ case glslang::EOpSwizzleInvocationsMasked:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::SwizzleInvocationsMaskedAMD;
+ break;
+ case glslang::EOpWriteInvocation:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot);
+ libCall = spv::WriteInvocationAMD;
+ break;
+
+ case glslang::EOpMin3:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+ if (isFloat)
+ libCall = spv::FMin3AMD;
+ else {
+ if (isUnsigned)
+ libCall = spv::UMin3AMD;
+ else
+ libCall = spv::SMin3AMD;
+ }
+ break;
+ case glslang::EOpMax3:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+ if (isFloat)
+ libCall = spv::FMax3AMD;
+ else {
+ if (isUnsigned)
+ libCall = spv::UMax3AMD;
+ else
+ libCall = spv::SMax3AMD;
+ }
+ break;
+ case glslang::EOpMid3:
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax);
+ if (isFloat)
+ libCall = spv::FMid3AMD;
+ else {
+ if (isUnsigned)
+ libCall = spv::UMid3AMD;
+ else
+ libCall = spv::SMid3AMD;
+ }
+ break;
+
+ case glslang::EOpInterpolateAtVertex:
+ if (typeProxy == glslang::EbtFloat16)
+ builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+ extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_explicit_vertex_parameter);
+ libCall = spv::InterpolateAtVertexAMD;
+ break;
+#endif
+ case glslang::EOpBarrier:
+ {
+ // This is for the extended controlBarrier function, with four operands.
+ // The unextended barrier() goes through createNoArgOperation.
+ assert(operands.size() == 4);
+ unsigned int executionScope = builder.getConstantScalar(operands[0]);
+ unsigned int memoryScope = builder.getConstantScalar(operands[1]);
+ unsigned int semantics = builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]);
+ builder.createControlBarrier((spv::Scope)executionScope, (spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
+ if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ if (glslangIntermediate->usingVulkanMemoryModel() && (executionScope == spv::ScopeDevice || memoryScope == spv::ScopeDevice)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+ return 0;
+ }
+ break;
+ case glslang::EOpMemoryBarrier:
+ {
+ // This is for the extended memoryBarrier function, with three operands.
+ // The unextended memoryBarrier() goes through createNoArgOperation.
+ assert(operands.size() == 3);
+ unsigned int memoryScope = builder.getConstantScalar(operands[0]);
+ unsigned int semantics = builder.getConstantScalar(operands[1]) | builder.getConstantScalar(operands[2]);
+ builder.createMemoryBarrier((spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics);
+ if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | spv::MemorySemanticsMakeVisibleKHRMask | spv::MemorySemanticsOutputMemoryKHRMask)) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ }
+ if (glslangIntermediate->usingVulkanMemoryModel() && memoryScope == spv::ScopeDevice) {
+ builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR);
+ }
+ return 0;
+ }
+ break;
+
+#ifdef NV_EXTENSIONS
+ case glslang::EOpReportIntersectionNV:
+ {
+ typeId = builder.makeBoolType();
+ opCode = spv::OpReportIntersectionNV;
+ }
+ break;
+ case glslang::EOpTraceNV:
+ {
+ builder.createNoResultOp(spv::OpTraceNV, operands);
+ return 0;
+ }
+ break;
+ case glslang::EOpExecuteCallableNV:
+ {
+ builder.createNoResultOp(spv::OpExecuteCallableNV, operands);
+ return 0;
+ }
+ break;
+ case glslang::EOpWritePackedPrimitiveIndices4x8NV:
+ builder.createNoResultOp(spv::OpWritePackedPrimitiveIndices4x8NV, operands);
+ return 0;
+#endif
+ case glslang::EOpCooperativeMatrixMulAdd:
+ opCode = spv::OpCooperativeMatrixMulAddNV;
+ break;
+
+ default:
+ return 0;
+ }
+
+ spv::Id id = 0;
+ if (libCall >= 0) {
+ // Use an extended instruction from the standard library.
+ // Construct the call arguments, without modifying the original operands vector.
+ // We might need the remaining arguments, e.g. in the EOpFrexp case.
+ std::vector<spv::Id> callArguments(operands.begin(), operands.begin() + consumedOperands);
+ id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, callArguments);
+ } else if (opCode == spv::OpDot && !isFloat) {
+ // int dot(int, int)
+ // NOTE: never called for scalar/vector1, this is turned into simple mul before this can be reached
+ const int componentCount = builder.getNumComponents(operands[0]);
+ spv::Id mulOp = builder.createBinOp(spv::OpIMul, builder.getTypeId(operands[0]), operands[0], operands[1]);
+ builder.setPrecision(mulOp, precision);
+ id = builder.createCompositeExtract(mulOp, typeId, 0);
+ for (int i = 1; i < componentCount; ++i) {
+ builder.setPrecision(id, precision);
+ id = builder.createBinOp(spv::OpIAdd, typeId, id, builder.createCompositeExtract(operands[0], typeId, i));
+ }
+ } else {
+ switch (consumedOperands) {
+ case 0:
+ // should all be handled by visitAggregate and createNoArgOperation
+ assert(0);
+ return 0;
+ case 1:
+ // should all be handled by createUnaryOperation
+ assert(0);
+ return 0;
+ case 2:
+ id = builder.createBinOp(opCode, typeId, operands[0], operands[1]);
+ break;
+ default:
+ // anything 3 or over doesn't have l-value operands, so all should be consumed
+ assert(consumedOperands == operands.size());
+ id = builder.createOp(opCode, typeId, operands);
+ break;
+ }
+ }
+
+ // Decode the return types that were structures
+ switch (op) {
+ case glslang::EOpAddCarry:
+ case glslang::EOpSubBorrow:
+ builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]);
+ id = builder.createCompositeExtract(id, typeId0, 0);
+ break;
+ case glslang::EOpUMulExtended:
+ case glslang::EOpIMulExtended:
+ builder.createStore(builder.createCompositeExtract(id, typeId0, 0), operands[3]);
+ builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]);
+ break;
+ case glslang::EOpFrexp:
+ {
+ assert(operands.size() == 2);
+ if (builder.isFloatType(builder.getScalarTypeId(typeId1))) {
+ // "exp" is floating-point type (from HLSL intrinsic)
+ spv::Id member1 = builder.createCompositeExtract(id, frexpIntType, 1);
+ member1 = builder.createUnaryOp(spv::OpConvertSToF, typeId1, member1);
+ builder.createStore(member1, operands[1]);
+ } else
+ // "exp" is integer type (from GLSL built-in function)
+ builder.createStore(builder.createCompositeExtract(id, frexpIntType, 1), operands[1]);
+ id = builder.createCompositeExtract(id, typeId0, 0);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return builder.setPrecision(id, precision);
+}
+
+// Intrinsics with no arguments (or no return value, and no precision).
+spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId)
+{
+ // GLSL memory barriers use queuefamily scope in new model, device scope in old model
+ spv::Scope memoryBarrierScope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice;
+
+ switch (op) {
+ case glslang::EOpEmitVertex:
+ builder.createNoResultOp(spv::OpEmitVertex);
+ return 0;
+ case glslang::EOpEndPrimitive:
+ builder.createNoResultOp(spv::OpEndPrimitive);
+ return 0;
+ case glslang::EOpBarrier:
+ if (glslangIntermediate->getStage() == EShLangTessControl) {
+ if (glslangIntermediate->usingVulkanMemoryModel()) {
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
+ spv::MemorySemanticsOutputMemoryKHRMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ builder.addCapability(spv::CapabilityVulkanMemoryModelKHR);
+ } else {
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone);
+ }
+ } else {
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
+ spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ }
+ return 0;
+ case glslang::EOpMemoryBarrier:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierAtomicCounter:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAtomicCounterMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierBuffer:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierImage:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpMemoryBarrierShared:
+ builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpGroupMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpAllMemoryBarrierWithGroupSync:
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice,
+ spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpDeviceMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpDeviceMemoryBarrierWithGroupSync:
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpWorkgroupMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpWorkgroupMemoryBarrierWithGroupSync:
+ builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup,
+ spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return 0;
+ case glslang::EOpSubgroupBarrier:
+ builder.createControlBarrier(spv::ScopeSubgroup, spv::ScopeSubgroup, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrier:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsAllMemory |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrierBuffer:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsUniformMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrierImage:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsImageMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupMemoryBarrierShared:
+ builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsWorkgroupMemoryMask |
+ spv::MemorySemanticsAcquireReleaseMask);
+ return spv::NoResult;
+ case glslang::EOpSubgroupElect: {
+ std::vector<spv::Id> operands;
+ return createSubgroupOperation(op, typeId, operands, glslang::EbtVoid);
+ }
+#ifdef AMD_EXTENSIONS
+ case glslang::EOpTime:
+ {
+ std::vector<spv::Id> args; // Dummy arguments
+ spv::Id id = builder.createBuiltinCall(typeId, getExtBuiltins(spv::E_SPV_AMD_gcn_shader), spv::TimeAMD, args);
+ return builder.setPrecision(id, precision);
+ }
+#endif
+#ifdef NV_EXTENSIONS
+ case glslang::EOpIgnoreIntersectionNV:
+ builder.createNoResultOp(spv::OpIgnoreIntersectionNV);
+ return 0;
+ case glslang::EOpTerminateRayNV:
+ builder.createNoResultOp(spv::OpTerminateRayNV);
+ return 0;
+#endif
+ default:
+ logger->missingFunctionality("unknown operation with no arguments");
+ return 0;
+ }
+}
+
+spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol)
+{
+ auto iter = symbolValues.find(symbol->getId());
+ spv::Id id;
+ if (symbolValues.end() != iter) {
+ id = iter->second;
+ return id;
+ }
+
+ // it was not found, create it
+ id = createSpvVariable(symbol);
+ symbolValues[symbol->getId()] = id;
+
+ if (symbol->getBasicType() != glslang::EbtBlock) {
+ builder.addDecoration(id, TranslatePrecisionDecoration(symbol->getType()));
+ builder.addDecoration(id, TranslateInterpolationDecoration(symbol->getType().getQualifier()));
+ builder.addDecoration(id, TranslateAuxiliaryStorageDecoration(symbol->getType().getQualifier()));
+#ifdef NV_EXTENSIONS
+ addMeshNVDecoration(id, /*member*/ -1, symbol->getType().getQualifier());
+#endif
+ if (symbol->getType().getQualifier().hasSpecConstantId())
+ builder.addDecoration(id, spv::DecorationSpecId, symbol->getType().getQualifier().layoutSpecConstantId);
+ if (symbol->getQualifier().hasIndex())
+ builder.addDecoration(id, spv::DecorationIndex, symbol->getQualifier().layoutIndex);
+ if (symbol->getQualifier().hasComponent())
+ builder.addDecoration(id, spv::DecorationComponent, symbol->getQualifier().layoutComponent);
+ // atomic counters use this:
+ if (symbol->getQualifier().hasOffset())
+ builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutOffset);
+ }
+
+ if (symbol->getQualifier().hasLocation())
+ builder.addDecoration(id, spv::DecorationLocation, symbol->getQualifier().layoutLocation);
+ builder.addDecoration(id, TranslateInvariantDecoration(symbol->getType().getQualifier()));
+ if (symbol->getQualifier().hasStream() && glslangIntermediate->isMultiStream()) {
+ builder.addCapability(spv::CapabilityGeometryStreams);
+ builder.addDecoration(id, spv::DecorationStream, symbol->getQualifier().layoutStream);
+ }
+ if (symbol->getQualifier().hasSet())
+ builder.addDecoration(id, spv::DecorationDescriptorSet, symbol->getQualifier().layoutSet);
+ else if (IsDescriptorResource(symbol->getType())) {
+ // default to 0
+ builder.addDecoration(id, spv::DecorationDescriptorSet, 0);
+ }
+ if (symbol->getQualifier().hasBinding())
+ builder.addDecoration(id, spv::DecorationBinding, symbol->getQualifier().layoutBinding);
+ else if (IsDescriptorResource(symbol->getType())) {
+ // default to 0
+ builder.addDecoration(id, spv::DecorationBinding, 0);
+ }
+ if (symbol->getQualifier().hasAttachment())
+ builder.addDecoration(id, spv::DecorationInputAttachmentIndex, symbol->getQualifier().layoutAttachment);
+ if (glslangIntermediate->getXfbMode()) {
+ builder.addCapability(spv::CapabilityTransformFeedback);
+ if (symbol->getQualifier().hasXfbBuffer()) {
+ builder.addDecoration(id, spv::DecorationXfbBuffer, symbol->getQualifier().layoutXfbBuffer);
+ unsigned stride = glslangIntermediate->getXfbStride(symbol->getQualifier().layoutXfbBuffer);
+ if (stride != glslang::TQualifier::layoutXfbStrideEnd)
+ builder.addDecoration(id, spv::DecorationXfbStride, stride);
+ }
+ if (symbol->getQualifier().hasXfbOffset())
+ builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutXfbOffset);
+ }
+
+ if (symbol->getType().isImage()) {
+ std::vector<spv::Decoration> memory;
+ TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, glslangIntermediate->usingVulkanMemoryModel());
+ for (unsigned int i = 0; i < memory.size(); ++i)
+ builder.addDecoration(id, memory[i]);
+ }
+
+ // built-in variable decorations
+ spv::BuiltIn builtIn = TranslateBuiltInDecoration(symbol->getQualifier().builtIn, false);
+ if (builtIn != spv::BuiltInMax)
+ builder.addDecoration(id, spv::DecorationBuiltIn, (int)builtIn);
+
+ // nonuniform
+ builder.addDecoration(id, TranslateNonUniformDecoration(symbol->getType().getQualifier()));
+
+#ifdef NV_EXTENSIONS
+ if (builtIn == spv::BuiltInSampleMask) {
+ spv::Decoration decoration;
+ // GL_NV_sample_mask_override_coverage extension
+ if (glslangIntermediate->getLayoutOverrideCoverage())
+ decoration = (spv::Decoration)spv::DecorationOverrideCoverageNV;
+ else
+ decoration = (spv::Decoration)spv::DecorationMax;
+ builder.addDecoration(id, decoration);
+ if (decoration != spv::DecorationMax) {
+ builder.addExtension(spv::E_SPV_NV_sample_mask_override_coverage);
+ }
+ }
+ else if (builtIn == spv::BuiltInLayer) {
+ // SPV_NV_viewport_array2 extension
+ if (symbol->getQualifier().layoutViewportRelative) {
+ builder.addDecoration(id, (spv::Decoration)spv::DecorationViewportRelativeNV);
+ builder.addCapability(spv::CapabilityShaderViewportMaskNV);
+ builder.addExtension(spv::E_SPV_NV_viewport_array2);
+ }
+ if (symbol->getQualifier().layoutSecondaryViewportRelativeOffset != -2048) {
+ builder.addDecoration(id, (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV,
+ symbol->getQualifier().layoutSecondaryViewportRelativeOffset);
+ builder.addCapability(spv::CapabilityShaderStereoViewNV);
+ builder.addExtension(spv::E_SPV_NV_stereo_view_rendering);
+ }
+ }
+
+ if (symbol->getQualifier().layoutPassthrough) {
+ builder.addDecoration(id, spv::DecorationPassthroughNV);
+ builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV);
+ builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough);
+ }
+ if (symbol->getQualifier().pervertexNV) {
+ builder.addDecoration(id, spv::DecorationPerVertexNV);
+ builder.addCapability(spv::CapabilityFragmentBarycentricNV);
+ builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric);
+ }
+#endif
+
+ if (glslangIntermediate->getHlslFunctionality1() && symbol->getType().getQualifier().semanticName != nullptr) {
+ builder.addExtension("SPV_GOOGLE_hlsl_functionality1");
+ builder.addDecoration(id, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE,
+ symbol->getType().getQualifier().semanticName);
+ }
+
+ if (symbol->getBasicType() == glslang::EbtReference) {
+ builder.addDecoration(id, symbol->getType().getQualifier().restrict ? spv::DecorationRestrictPointerEXT : spv::DecorationAliasedPointerEXT);
+ }
+
+ return id;
+}
+
+#ifdef NV_EXTENSIONS
+// add per-primitive, per-view. per-task decorations to a struct member (member >= 0) or an object
+void TGlslangToSpvTraverser::addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier& qualifier)
+{
+ if (member >= 0) {
+ if (qualifier.perPrimitiveNV) {
+ // Need to add capability/extension for fragment shader.
+ // Mesh shader already adds this by default.
+ if (glslangIntermediate->getStage() == EShLangFragment) {
+ builder.addCapability(spv::CapabilityMeshShadingNV);
+ builder.addExtension(spv::E_SPV_NV_mesh_shader);
+ }
+ builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerPrimitiveNV);
+ }
+ if (qualifier.perViewNV)
+ builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerViewNV);
+ if (qualifier.perTaskNV)
+ builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerTaskNV);
+ } else {
+ if (qualifier.perPrimitiveNV) {
+ // Need to add capability/extension for fragment shader.
+ // Mesh shader already adds this by default.
+ if (glslangIntermediate->getStage() == EShLangFragment) {
+ builder.addCapability(spv::CapabilityMeshShadingNV);
+ builder.addExtension(spv::E_SPV_NV_mesh_shader);
+ }
+ builder.addDecoration(id, spv::DecorationPerPrimitiveNV);
+ }
+ if (qualifier.perViewNV)
+ builder.addDecoration(id, spv::DecorationPerViewNV);
+ if (qualifier.perTaskNV)
+ builder.addDecoration(id, spv::DecorationPerTaskNV);
+ }
+}
+#endif
+
+// Make a full tree of instructions to build a SPIR-V specialization constant,
+// or regular constant if possible.
+//
+// TBD: this is not yet done, nor verified to be the best design, it does do the leaf symbols though
+//
+// Recursively walk the nodes. The nodes form a tree whose leaves are
+// regular constants, which themselves are trees that createSpvConstant()
+// recursively walks. So, this function walks the "top" of the tree:
+// - emit specialization constant-building instructions for specConstant
+// - when running into a non-spec-constant, switch to createSpvConstant()
+spv::Id TGlslangToSpvTraverser::createSpvConstant(const glslang::TIntermTyped& node)
+{
+ assert(node.getQualifier().isConstant());
+
+ // Handle front-end constants first (non-specialization constants).
+ if (! node.getQualifier().specConstant) {
+ // hand off to the non-spec-constant path
+ assert(node.getAsConstantUnion() != nullptr || node.getAsSymbolNode() != nullptr);
+ int nextConst = 0;
+ return createSpvConstantFromConstUnionArray(node.getType(), node.getAsConstantUnion() ? node.getAsConstantUnion()->getConstArray() : node.getAsSymbolNode()->getConstArray(),
+ nextConst, false);
+ }
+
+ // We now know we have a specialization constant to build
+
+ // gl_WorkGroupSize is a special case until the front-end handles hierarchical specialization constants,
+ // even then, it's specialization ids are handled by special case syntax in GLSL: layout(local_size_x = ...
+ if (node.getType().getQualifier().builtIn == glslang::EbvWorkGroupSize) {
+ std::vector<spv::Id> dimConstId;
+ for (int dim = 0; dim < 3; ++dim) {
+ bool specConst = (glslangIntermediate->getLocalSizeSpecId(dim) != glslang::TQualifier::layoutNotSet);
+ dimConstId.push_back(builder.makeUintConstant(glslangIntermediate->getLocalSize(dim), specConst));
+ if (specConst) {
+ builder.addDecoration(dimConstId.back(), spv::DecorationSpecId,
+ glslangIntermediate->getLocalSizeSpecId(dim));
+ }
+ }
+ return builder.makeCompositeConstant(builder.makeVectorType(builder.makeUintType(32), 3), dimConstId, true);
+ }
+
+ // An AST node labelled as specialization constant should be a symbol node.
+ // Its initializer should either be a sub tree with constant nodes, or a constant union array.
+ if (auto* sn = node.getAsSymbolNode()) {
+ spv::Id result;
+ if (auto* sub_tree = sn->getConstSubtree()) {
+ // Traverse the constant constructor sub tree like generating normal run-time instructions.
+ // During the AST traversal, if the node is marked as 'specConstant', SpecConstantOpModeGuard
+ // will set the builder into spec constant op instruction generating mode.
+ sub_tree->traverse(this);
+ result = accessChainLoad(sub_tree->getType());
+ } else if (auto* const_union_array = &sn->getConstArray()) {
+ int nextConst = 0;
+ result = createSpvConstantFromConstUnionArray(sn->getType(), *const_union_array, nextConst, true);
+ } else {
+ logger->missingFunctionality("Invalid initializer for spec onstant.");
+ return spv::NoResult;
+ }
+ builder.addName(result, sn->getName().c_str());
+ return result;
+ }
+
+ // Neither a front-end constant node, nor a specialization constant node with constant union array or
+ // constant sub tree as initializer.
+ logger->missingFunctionality("Neither a front-end constant nor a spec constant.");
+ return spv::NoResult;
+}
+
+// Use 'consts' as the flattened glslang source of scalar constants to recursively
+// build the aggregate SPIR-V constant.
+//
+// If there are not enough elements present in 'consts', 0 will be substituted;
+// an empty 'consts' can be used to create a fully zeroed SPIR-V constant.
+//
+spv::Id TGlslangToSpvTraverser::createSpvConstantFromConstUnionArray(const glslang::TType& glslangType, const glslang::TConstUnionArray& consts, int& nextConst, bool specConstant)
+{
+ // vector of constants for SPIR-V
+ std::vector<spv::Id> spvConsts;
+
+ // Type is used for struct and array constants
+ spv::Id typeId = convertGlslangToSpvType(glslangType);
+
+ if (glslangType.isArray()) {
+ glslang::TType elementType(glslangType, 0);
+ for (int i = 0; i < glslangType.getOuterArraySize(); ++i)
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(elementType, consts, nextConst, false));
+ } else if (glslangType.isMatrix()) {
+ glslang::TType vectorType(glslangType, 0);
+ for (int col = 0; col < glslangType.getMatrixCols(); ++col)
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(vectorType, consts, nextConst, false));
+ } else if (glslangType.isCoopMat()) {
+ glslang::TType componentType(glslangType.getBasicType());
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(componentType, consts, nextConst, false));
+ } else if (glslangType.isStruct()) {
+ glslang::TVector<glslang::TTypeLoc>::const_iterator iter;
+ for (iter = glslangType.getStruct()->begin(); iter != glslangType.getStruct()->end(); ++iter)
+ spvConsts.push_back(createSpvConstantFromConstUnionArray(*iter->type, consts, nextConst, false));
+ } else if (glslangType.getVectorSize() > 1) {
+ for (unsigned int i = 0; i < (unsigned int)glslangType.getVectorSize(); ++i) {
+ bool zero = nextConst >= consts.size();
+ switch (glslangType.getBasicType()) {
+ case glslang::EbtInt8:
+ spvConsts.push_back(builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const()));
+ break;
+ case glslang::EbtUint8:
+ spvConsts.push_back(builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const()));
+ break;
+ case glslang::EbtInt16:
+ spvConsts.push_back(builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const()));
+ break;
+ case glslang::EbtUint16:
+ spvConsts.push_back(builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const()));
+ break;
+ case glslang::EbtInt:
+ spvConsts.push_back(builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst()));
+ break;
+ case glslang::EbtUint:
+ spvConsts.push_back(builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst()));
+ break;
+ case glslang::EbtInt64:
+ spvConsts.push_back(builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const()));
+ break;
+ case glslang::EbtUint64:
+ spvConsts.push_back(builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const()));
+ break;
+ case glslang::EbtFloat:
+ spvConsts.push_back(builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst()));
+ break;
+ case glslang::EbtDouble:
+ spvConsts.push_back(builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst()));
+ break;
+ case glslang::EbtFloat16:
+ spvConsts.push_back(builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst()));
+ break;
+ case glslang::EbtBool:
+ spvConsts.push_back(builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst()));
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ ++nextConst;
+ }
+ } else {
+ // we have a non-aggregate (scalar) constant
+ bool zero = nextConst >= consts.size();
+ spv::Id scalar = 0;
+ switch (glslangType.getBasicType()) {
+ case glslang::EbtInt8:
+ scalar = builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const(), specConstant);
+ break;
+ case glslang::EbtUint8:
+ scalar = builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const(), specConstant);
+ break;
+ case glslang::EbtInt16:
+ scalar = builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const(), specConstant);
+ break;
+ case glslang::EbtUint16:
+ scalar = builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const(), specConstant);
+ break;
+ case glslang::EbtInt:
+ scalar = builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst(), specConstant);
+ break;
+ case glslang::EbtUint:
+ scalar = builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst(), specConstant);
+ break;
+ case glslang::EbtInt64:
+ scalar = builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const(), specConstant);
+ break;
+ case glslang::EbtUint64:
+ scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant);
+ break;
+ case glslang::EbtFloat:
+ scalar = builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant);
+ break;
+ case glslang::EbtDouble:
+ scalar = builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst(), specConstant);
+ break;
+ case glslang::EbtFloat16:
+ scalar = builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant);
+ break;
+ case glslang::EbtBool:
+ scalar = builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst(), specConstant);
+ break;
+ case glslang::EbtReference:
+ scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant);
+ scalar = builder.createUnaryOp(spv::OpBitcast, typeId, scalar);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ ++nextConst;
+ return scalar;
+ }
+
+ return builder.makeCompositeConstant(typeId, spvConsts);
+}
+
+// Return true if the node is a constant or symbol whose reading has no
+// non-trivial observable cost or effect.
+bool TGlslangToSpvTraverser::isTrivialLeaf(const glslang::TIntermTyped* node)
+{
+ // don't know what this is
+ if (node == nullptr)
+ return false;
+
+ // a constant is safe
+ if (node->getAsConstantUnion() != nullptr)
+ return true;
+
+ // not a symbol means non-trivial
+ if (node->getAsSymbolNode() == nullptr)
+ return false;
+
+ // a symbol, depends on what's being read
+ switch (node->getType().getQualifier().storage) {
+ case glslang::EvqTemporary:
+ case glslang::EvqGlobal:
+ case glslang::EvqIn:
+ case glslang::EvqInOut:
+ case glslang::EvqConst:
+ case glslang::EvqConstReadOnly:
+ case glslang::EvqUniform:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// A node is trivial if it is a single operation with no side effects.
+// HLSL (and/or vectors) are always trivial, as it does not short circuit.
+// Otherwise, error on the side of saying non-trivial.
+// Return true if trivial.
+bool TGlslangToSpvTraverser::isTrivial(const glslang::TIntermTyped* node)
+{
+ if (node == nullptr)
+ return false;
+
+ // count non scalars as trivial, as well as anything coming from HLSL
+ if (! node->getType().isScalarOrVec1() || glslangIntermediate->getSource() == glslang::EShSourceHlsl)
+ return true;
+
+ // symbols and constants are trivial
+ if (isTrivialLeaf(node))
+ return true;
+
+ // otherwise, it needs to be a simple operation or one or two leaf nodes
+
+ // not a simple operation
+ const glslang::TIntermBinary* binaryNode = node->getAsBinaryNode();
+ const glslang::TIntermUnary* unaryNode = node->getAsUnaryNode();
+ if (binaryNode == nullptr && unaryNode == nullptr)
+ return false;
+
+ // not on leaf nodes
+ if (binaryNode && (! isTrivialLeaf(binaryNode->getLeft()) || ! isTrivialLeaf(binaryNode->getRight())))
+ return false;
+
+ if (unaryNode && ! isTrivialLeaf(unaryNode->getOperand())) {
+ return false;
+ }
+
+ switch (node->getAsOperator()->getOp()) {
+ case glslang::EOpLogicalNot:
+ case glslang::EOpConvIntToBool:
+ case glslang::EOpConvUintToBool:
+ case glslang::EOpConvFloatToBool:
+ case glslang::EOpConvDoubleToBool:
+ case glslang::EOpEqual:
+ case glslang::EOpNotEqual:
+ case glslang::EOpLessThan:
+ case glslang::EOpGreaterThan:
+ case glslang::EOpLessThanEqual:
+ case glslang::EOpGreaterThanEqual:
+ case glslang::EOpIndexDirect:
+ case glslang::EOpIndexDirectStruct:
+ case glslang::EOpLogicalXor:
+ case glslang::EOpAny:
+ case glslang::EOpAll:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Emit short-circuiting code, where 'right' is never evaluated unless
+// the left side is true (for &&) or false (for ||).
+spv::Id TGlslangToSpvTraverser::createShortCircuit(glslang::TOperator op, glslang::TIntermTyped& left, glslang::TIntermTyped& right)
+{
+ spv::Id boolTypeId = builder.makeBoolType();
+
+ // emit left operand
+ builder.clearAccessChain();
+ left.traverse(this);
+ spv::Id leftId = accessChainLoad(left.getType());
+
+ // Operands to accumulate OpPhi operands
+ std::vector<spv::Id> phiOperands;
+ // accumulate left operand's phi information
+ phiOperands.push_back(leftId);
+ phiOperands.push_back(builder.getBuildPoint()->getId());
+
+ // Make the two kinds of operation symmetric with a "!"
+ // || => emit "if (! left) result = right"
+ // && => emit "if ( left) result = right"
+ //
+ // TODO: this runtime "not" for || could be avoided by adding functionality
+ // to 'builder' to have an "else" without an "then"
+ if (op == glslang::EOpLogicalOr)
+ leftId = builder.createUnaryOp(spv::OpLogicalNot, boolTypeId, leftId);
+
+ // make an "if" based on the left value
+ spv::Builder::If ifBuilder(leftId, spv::SelectionControlMaskNone, builder);
+
+ // emit right operand as the "then" part of the "if"
+ builder.clearAccessChain();
+ right.traverse(this);
+ spv::Id rightId = accessChainLoad(right.getType());
+
+ // accumulate left operand's phi information
+ phiOperands.push_back(rightId);
+ phiOperands.push_back(builder.getBuildPoint()->getId());
+
+ // finish the "if"
+ ifBuilder.makeEndIf();
+
+ // phi together the two results
+ return builder.createOp(spv::OpPhi, boolTypeId, phiOperands);
+}
+
+#ifdef AMD_EXTENSIONS
+// Return type Id of the imported set of extended instructions corresponds to the name.
+// Import this set if it has not been imported yet.
+spv::Id TGlslangToSpvTraverser::getExtBuiltins(const char* name)
+{
+ if (extBuiltinMap.find(name) != extBuiltinMap.end())
+ return extBuiltinMap[name];
+ else {
+ builder.addExtension(name);
+ spv::Id extBuiltins = builder.import(name);
+ extBuiltinMap[name] = extBuiltins;
+ return extBuiltins;
+ }
+}
+#endif
+
+}; // end anonymous namespace
+
+namespace glslang {
+
+void GetSpirvVersion(std::string& version)
+{
+ const int bufSize = 100;
+ char buf[bufSize];
+ snprintf(buf, bufSize, "0x%08x, Revision %d", spv::Version, spv::Revision);
+ version = buf;
+}
+
+// For low-order part of the generator's magic number. Bump up
+// when there is a change in the style (e.g., if SSA form changes,
+// or a different instruction sequence to do something gets used).
+int GetSpirvGeneratorVersion()
+{
+ // return 1; // start
+ // return 2; // EOpAtomicCounterDecrement gets a post decrement, to map between GLSL -> SPIR-V
+ // return 3; // change/correct barrier-instruction operands, to match memory model group decisions
+ // return 4; // some deeper access chains: for dynamic vector component, and local Boolean component
+ // return 5; // make OpArrayLength result type be an int with signedness of 0
+ // return 6; // revert version 5 change, which makes a different (new) kind of incorrect code,
+ // versions 4 and 6 each generate OpArrayLength as it has long been done
+ return 7; // GLSL volatile keyword maps to both SPIR-V decorations Volatile and Coherent
+}
+
+// Write SPIR-V out to a binary file
+void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName)
+{
+ std::ofstream out;
+ out.open(baseName, std::ios::binary | std::ios::out);
+ if (out.fail())
+ printf("ERROR: Failed to open file: %s\n", baseName);
+ for (int i = 0; i < (int)spirv.size(); ++i) {
+ unsigned int word = spirv[i];
+ out.write((const char*)&word, 4);
+ }
+ out.close();
+}
+
+// Write SPIR-V out to a text file with 32-bit hexadecimal words
+void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName)
+{
+ std::ofstream out;
+ out.open(baseName, std::ios::binary | std::ios::out);
+ if (out.fail())
+ printf("ERROR: Failed to open file: %s\n", baseName);
+ out << "\t// " <<
+ GetSpirvGeneratorVersion() << "." << GLSLANG_MINOR_VERSION << "." << GLSLANG_PATCH_LEVEL <<
+ std::endl;
+ if (varName != nullptr) {
+ out << "\t #pragma once" << std::endl;
+ out << "const uint32_t " << varName << "[] = {" << std::endl;
+ }
+ const int WORDS_PER_LINE = 8;
+ for (int i = 0; i < (int)spirv.size(); i += WORDS_PER_LINE) {
+ out << "\t";
+ for (int j = 0; j < WORDS_PER_LINE && i + j < (int)spirv.size(); ++j) {
+ const unsigned int word = spirv[i + j];
+ out << "0x" << std::hex << std::setw(8) << std::setfill('0') << word;
+ if (i + j + 1 < (int)spirv.size()) {
+ out << ",";
+ }
+ }
+ out << std::endl;
+ }
+ if (varName != nullptr) {
+ out << "};";
+ }
+ out.close();
+}
+
+//
+// Set up the glslang traversal
+//
+void GlslangToSpv(const TIntermediate& intermediate, std::vector<unsigned int>& spirv, SpvOptions* options)
+{
+ spv::SpvBuildLogger logger;
+ GlslangToSpv(intermediate, spirv, &logger, options);
+}
+
+void GlslangToSpv(const TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger* logger, SpvOptions* options)
+{
+ TIntermNode* root = intermediate.getTreeRoot();
+
+ if (root == 0)
+ return;
+
+ SpvOptions defaultOptions;
+ if (options == nullptr)
+ options = &defaultOptions;
+
+ GetThreadPoolAllocator().push();
+
+ TGlslangToSpvTraverser it(intermediate.getSpv().spv, &intermediate, logger, *options);
+ root->traverse(&it);
+ it.finishSpv();
+ it.dumpSpv(spirv);
+
+#if ENABLE_OPT
+ // If from HLSL, run spirv-opt to "legalize" the SPIR-V for Vulkan
+ // eg. forward and remove memory writes of opaque types.
+ if ((intermediate.getSource() == EShSourceHlsl || options->optimizeSize) && !options->disableOptimizer)
+ SpirvToolsLegalize(intermediate, spirv, logger, options);
+
+ if (options->validate)
+ SpirvToolsValidate(intermediate, spirv, logger);
+
+ if (options->disassemble)
+ SpirvToolsDisassemble(std::cout, spirv);
+
+#endif
+
+ GetThreadPoolAllocator().pop();
+}
+
+}; // end namespace glslang
diff --git a/thirdparty/glslang/SPIRV/GlslangToSpv.h b/thirdparty/glslang/SPIRV/GlslangToSpv.h
new file mode 100644
index 0000000000..86e1c23bf6
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/GlslangToSpv.h
@@ -0,0 +1,61 @@
+//
+// Copyright (C) 2014 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#pragma once
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+ #pragma warning(disable : 4464) // relative include path contains '..'
+#endif
+
+#include "SpvTools.h"
+#include "../glslang/Include/intermediate.h"
+
+#include <string>
+#include <vector>
+
+#include "Logger.h"
+
+namespace glslang {
+
+void GetSpirvVersion(std::string&);
+int GetSpirvGeneratorVersion();
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ SpvOptions* options = nullptr);
+void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger* logger, SpvOptions* options = nullptr);
+void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName);
+void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName);
+
+}
diff --git a/thirdparty/glslang/SPIRV/InReadableOrder.cpp b/thirdparty/glslang/SPIRV/InReadableOrder.cpp
new file mode 100644
index 0000000000..52b29613a4
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/InReadableOrder.cpp
@@ -0,0 +1,113 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+// The SPIR-V spec requires code blocks to appear in an order satisfying the
+// dominator-tree direction (ie, dominator before the dominated). This is,
+// actually, easy to achieve: any pre-order CFG traversal algorithm will do it.
+// Because such algorithms visit a block only after traversing some path to it
+// from the root, they necessarily visit the block's idom first.
+//
+// But not every graph-traversal algorithm outputs blocks in an order that
+// appears logical to human readers. The problem is that unrelated branches may
+// be interspersed with each other, and merge blocks may come before some of the
+// branches being merged.
+//
+// A good, human-readable order of blocks may be achieved by performing
+// depth-first search but delaying merge nodes until after all their branches
+// have been visited. This is implemented below by the inReadableOrder()
+// function.
+
+#include "spvIR.h"
+
+#include <cassert>
+#include <unordered_set>
+
+using spv::Block;
+using spv::Id;
+
+namespace {
+// Traverses CFG in a readable order, invoking a pre-set callback on each block.
+// Use by calling visit() on the root block.
+class ReadableOrderTraverser {
+public:
+ explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
+ // Visits the block if it hasn't been visited already and isn't currently
+ // being delayed. Invokes callback(block), then descends into its
+ // successors. Delays merge-block and continue-block processing until all
+ // the branches have been completed.
+ void visit(Block* block)
+ {
+ assert(block);
+ if (visited_.count(block) || delayed_.count(block))
+ return;
+ callback_(block);
+ visited_.insert(block);
+ Block* mergeBlock = nullptr;
+ Block* continueBlock = nullptr;
+ auto mergeInst = block->getMergeInstruction();
+ if (mergeInst) {
+ Id mergeId = mergeInst->getIdOperand(0);
+ mergeBlock = block->getParent().getParent().getInstruction(mergeId)->getBlock();
+ delayed_.insert(mergeBlock);
+ if (mergeInst->getOpCode() == spv::OpLoopMerge) {
+ Id continueId = mergeInst->getIdOperand(1);
+ continueBlock =
+ block->getParent().getParent().getInstruction(continueId)->getBlock();
+ delayed_.insert(continueBlock);
+ }
+ }
+ const auto successors = block->getSuccessors();
+ for (auto it = successors.cbegin(); it != successors.cend(); ++it)
+ visit(*it);
+ if (continueBlock) {
+ delayed_.erase(continueBlock);
+ visit(continueBlock);
+ }
+ if (mergeBlock) {
+ delayed_.erase(mergeBlock);
+ visit(mergeBlock);
+ }
+ }
+
+private:
+ std::function<void(Block*)> callback_;
+ // Whether a block has already been visited or is being delayed.
+ std::unordered_set<Block *> visited_, delayed_;
+};
+}
+
+void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
+{
+ ReadableOrderTraverser(callback).visit(root);
+}
diff --git a/thirdparty/glslang/SPIRV/Logger.cpp b/thirdparty/glslang/SPIRV/Logger.cpp
new file mode 100644
index 0000000000..48bd4e3ade
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/Logger.cpp
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#include "Logger.h"
+
+#include <algorithm>
+#include <iterator>
+#include <sstream>
+
+namespace spv {
+
+void SpvBuildLogger::tbdFunctionality(const std::string& f)
+{
+ if (std::find(std::begin(tbdFeatures), std::end(tbdFeatures), f) == std::end(tbdFeatures))
+ tbdFeatures.push_back(f);
+}
+
+void SpvBuildLogger::missingFunctionality(const std::string& f)
+{
+ if (std::find(std::begin(missingFeatures), std::end(missingFeatures), f) == std::end(missingFeatures))
+ missingFeatures.push_back(f);
+}
+
+std::string SpvBuildLogger::getAllMessages() const {
+ std::ostringstream messages;
+ for (auto it = tbdFeatures.cbegin(); it != tbdFeatures.cend(); ++it)
+ messages << "TBD functionality: " << *it << "\n";
+ for (auto it = missingFeatures.cbegin(); it != missingFeatures.cend(); ++it)
+ messages << "Missing functionality: " << *it << "\n";
+ for (auto it = warnings.cbegin(); it != warnings.cend(); ++it)
+ messages << "warning: " << *it << "\n";
+ for (auto it = errors.cbegin(); it != errors.cend(); ++it)
+ messages << "error: " << *it << "\n";
+ return messages.str();
+}
+
+} // end spv namespace
diff --git a/thirdparty/glslang/SPIRV/Logger.h b/thirdparty/glslang/SPIRV/Logger.h
new file mode 100644
index 0000000000..2e4ddaf517
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/Logger.h
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef GLSLANG_SPIRV_LOGGER_H
+#define GLSLANG_SPIRV_LOGGER_H
+
+#include <string>
+#include <vector>
+
+namespace spv {
+
+// A class for holding all SPIR-V build status messages, including
+// missing/TBD functionalities, warnings, and errors.
+class SpvBuildLogger {
+public:
+ SpvBuildLogger() {}
+
+ // Registers a TBD functionality.
+ void tbdFunctionality(const std::string& f);
+ // Registers a missing functionality.
+ void missingFunctionality(const std::string& f);
+
+ // Logs a warning.
+ void warning(const std::string& w) { warnings.push_back(w); }
+ // Logs an error.
+ void error(const std::string& e) { errors.push_back(e); }
+
+ // Returns all messages accumulated in the order of:
+ // TBD functionalities, missing functionalities, warnings, errors.
+ std::string getAllMessages() const;
+
+private:
+ SpvBuildLogger(const SpvBuildLogger&);
+
+ std::vector<std::string> tbdFeatures;
+ std::vector<std::string> missingFeatures;
+ std::vector<std::string> warnings;
+ std::vector<std::string> errors;
+};
+
+} // end spv namespace
+
+#endif // GLSLANG_SPIRV_LOGGER_H
diff --git a/thirdparty/glslang/SPIRV/SPVRemapper.cpp b/thirdparty/glslang/SPIRV/SPVRemapper.cpp
new file mode 100644
index 0000000000..fd0bb8950c
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/SPVRemapper.cpp
@@ -0,0 +1,1487 @@
+//
+// Copyright (C) 2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "SPVRemapper.h"
+#include "doc.h"
+
+#if !defined (use_cpp11)
+// ... not supported before C++11
+#else // defined (use_cpp11)
+
+#include <algorithm>
+#include <cassert>
+#include "../glslang/Include/Common.h"
+
+namespace spv {
+
+ // By default, just abort on error. Can be overridden via RegisterErrorHandler
+ spirvbin_t::errorfn_t spirvbin_t::errorHandler = [](const std::string&) { exit(5); };
+ // By default, eat log messages. Can be overridden via RegisterLogHandler
+ spirvbin_t::logfn_t spirvbin_t::logHandler = [](const std::string&) { };
+
+ // This can be overridden to provide other message behavior if needed
+ void spirvbin_t::msg(int minVerbosity, int indent, const std::string& txt) const
+ {
+ if (verbose >= minVerbosity)
+ logHandler(std::string(indent, ' ') + txt);
+ }
+
+ // hash opcode, with special handling for OpExtInst
+ std::uint32_t spirvbin_t::asOpCodeHash(unsigned word)
+ {
+ const spv::Op opCode = asOpCode(word);
+
+ std::uint32_t offset = 0;
+
+ switch (opCode) {
+ case spv::OpExtInst:
+ offset += asId(word + 4); break;
+ default:
+ break;
+ }
+
+ return opCode * 19 + offset; // 19 = small prime
+ }
+
+ spirvbin_t::range_t spirvbin_t::literalRange(spv::Op opCode) const
+ {
+ static const int maxCount = 1<<30;
+
+ switch (opCode) {
+ case spv::OpTypeFloat: // fall through...
+ case spv::OpTypePointer: return range_t(2, 3);
+ case spv::OpTypeInt: return range_t(2, 4);
+ // TODO: case spv::OpTypeImage:
+ // TODO: case spv::OpTypeSampledImage:
+ case spv::OpTypeSampler: return range_t(3, 8);
+ case spv::OpTypeVector: // fall through
+ case spv::OpTypeMatrix: // ...
+ case spv::OpTypePipe: return range_t(3, 4);
+ case spv::OpConstant: return range_t(3, maxCount);
+ default: return range_t(0, 0);
+ }
+ }
+
+ spirvbin_t::range_t spirvbin_t::typeRange(spv::Op opCode) const
+ {
+ static const int maxCount = 1<<30;
+
+ if (isConstOp(opCode))
+ return range_t(1, 2);
+
+ switch (opCode) {
+ case spv::OpTypeVector: // fall through
+ case spv::OpTypeMatrix: // ...
+ case spv::OpTypeSampler: // ...
+ case spv::OpTypeArray: // ...
+ case spv::OpTypeRuntimeArray: // ...
+ case spv::OpTypePipe: return range_t(2, 3);
+ case spv::OpTypeStruct: // fall through
+ case spv::OpTypeFunction: return range_t(2, maxCount);
+ case spv::OpTypePointer: return range_t(3, 4);
+ default: return range_t(0, 0);
+ }
+ }
+
+ spirvbin_t::range_t spirvbin_t::constRange(spv::Op opCode) const
+ {
+ static const int maxCount = 1<<30;
+
+ switch (opCode) {
+ case spv::OpTypeArray: // fall through...
+ case spv::OpTypeRuntimeArray: return range_t(3, 4);
+ case spv::OpConstantComposite: return range_t(3, maxCount);
+ default: return range_t(0, 0);
+ }
+ }
+
+ // Return the size of a type in 32-bit words. This currently only
+ // handles ints and floats, and is only invoked by queries which must be
+ // integer types. If ever needed, it can be generalized.
+ unsigned spirvbin_t::typeSizeInWords(spv::Id id) const
+ {
+ const unsigned typeStart = idPos(id);
+ const spv::Op opCode = asOpCode(typeStart);
+
+ if (errorLatch)
+ return 0;
+
+ switch (opCode) {
+ case spv::OpTypeInt: // fall through...
+ case spv::OpTypeFloat: return (spv[typeStart+2]+31)/32;
+ default:
+ return 0;
+ }
+ }
+
+ // Looks up the type of a given const or variable ID, and
+ // returns its size in 32-bit words.
+ unsigned spirvbin_t::idTypeSizeInWords(spv::Id id) const
+ {
+ const auto tid_it = idTypeSizeMap.find(id);
+ if (tid_it == idTypeSizeMap.end()) {
+ error("type size for ID not found");
+ return 0;
+ }
+
+ return tid_it->second;
+ }
+
+ // Is this an opcode we should remove when using --strip?
+ bool spirvbin_t::isStripOp(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpSource:
+ case spv::OpSourceExtension:
+ case spv::OpName:
+ case spv::OpMemberName:
+ case spv::OpLine: return true;
+ default: return false;
+ }
+ }
+
+ // Return true if this opcode is flow control
+ bool spirvbin_t::isFlowCtrl(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpBranchConditional:
+ case spv::OpBranch:
+ case spv::OpSwitch:
+ case spv::OpLoopMerge:
+ case spv::OpSelectionMerge:
+ case spv::OpLabel:
+ case spv::OpFunction:
+ case spv::OpFunctionEnd: return true;
+ default: return false;
+ }
+ }
+
+ // Return true if this opcode defines a type
+ bool spirvbin_t::isTypeOp(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpTypeVoid:
+ case spv::OpTypeBool:
+ case spv::OpTypeInt:
+ case spv::OpTypeFloat:
+ case spv::OpTypeVector:
+ case spv::OpTypeMatrix:
+ case spv::OpTypeImage:
+ case spv::OpTypeSampler:
+ case spv::OpTypeArray:
+ case spv::OpTypeRuntimeArray:
+ case spv::OpTypeStruct:
+ case spv::OpTypeOpaque:
+ case spv::OpTypePointer:
+ case spv::OpTypeFunction:
+ case spv::OpTypeEvent:
+ case spv::OpTypeDeviceEvent:
+ case spv::OpTypeReserveId:
+ case spv::OpTypeQueue:
+ case spv::OpTypeSampledImage:
+ case spv::OpTypePipe: return true;
+ default: return false;
+ }
+ }
+
+ // Return true if this opcode defines a constant
+ bool spirvbin_t::isConstOp(spv::Op opCode) const
+ {
+ switch (opCode) {
+ case spv::OpConstantSampler:
+ error("unimplemented constant type");
+ return true;
+
+ case spv::OpConstantNull:
+ case spv::OpConstantTrue:
+ case spv::OpConstantFalse:
+ case spv::OpConstantComposite:
+ case spv::OpConstant:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ const auto inst_fn_nop = [](spv::Op, unsigned) { return false; };
+ const auto op_fn_nop = [](spv::Id&) { };
+
+ // g++ doesn't like these defined in the class proper in an anonymous namespace.
+ // Dunno why. Also MSVC doesn't like the constexpr keyword. Also dunno why.
+ // Defining them externally seems to please both compilers, so, here they are.
+ const spv::Id spirvbin_t::unmapped = spv::Id(-10000);
+ const spv::Id spirvbin_t::unused = spv::Id(-10001);
+ const int spirvbin_t::header_size = 5;
+
+ spv::Id spirvbin_t::nextUnusedId(spv::Id id)
+ {
+ while (isNewIdMapped(id)) // search for an unused ID
+ ++id;
+
+ return id;
+ }
+
+ spv::Id spirvbin_t::localId(spv::Id id, spv::Id newId)
+ {
+ //assert(id != spv::NoResult && newId != spv::NoResult);
+
+ if (id > bound()) {
+ error(std::string("ID out of range: ") + std::to_string(id));
+ return spirvbin_t::unused;
+ }
+
+ if (id >= idMapL.size())
+ idMapL.resize(id+1, unused);
+
+ if (newId != unmapped && newId != unused) {
+ if (isOldIdUnused(id)) {
+ error(std::string("ID unused in module: ") + std::to_string(id));
+ return spirvbin_t::unused;
+ }
+
+ if (!isOldIdUnmapped(id)) {
+ error(std::string("ID already mapped: ") + std::to_string(id) + " -> "
+ + std::to_string(localId(id)));
+
+ return spirvbin_t::unused;
+ }
+
+ if (isNewIdMapped(newId)) {
+ error(std::string("ID already used in module: ") + std::to_string(newId));
+ return spirvbin_t::unused;
+ }
+
+ msg(4, 4, std::string("map: ") + std::to_string(id) + " -> " + std::to_string(newId));
+ setMapped(newId);
+ largestNewId = std::max(largestNewId, newId);
+ }
+
+ return idMapL[id] = newId;
+ }
+
+ // Parse a literal string from the SPIR binary and return it as an std::string
+ // Due to C++11 RValue references, this doesn't copy the result string.
+ std::string spirvbin_t::literalString(unsigned word) const
+ {
+ std::string literal;
+
+ literal.reserve(16);
+
+ const char* bytes = reinterpret_cast<const char*>(spv.data() + word);
+
+ while (bytes && *bytes)
+ literal += *bytes++;
+
+ return literal;
+ }
+
+ void spirvbin_t::applyMap()
+ {
+ msg(3, 2, std::string("Applying map: "));
+
+ // Map local IDs through the ID map
+ process(inst_fn_nop, // ignore instructions
+ [this](spv::Id& id) {
+ id = localId(id);
+
+ if (errorLatch)
+ return;
+
+ assert(id != unused && id != unmapped);
+ }
+ );
+ }
+
+ // Find free IDs for anything we haven't mapped
+ void spirvbin_t::mapRemainder()
+ {
+ msg(3, 2, std::string("Remapping remainder: "));
+
+ spv::Id unusedId = 1; // can't use 0: that's NoResult
+ spirword_t maxBound = 0;
+
+ for (spv::Id id = 0; id < idMapL.size(); ++id) {
+ if (isOldIdUnused(id))
+ continue;
+
+ // Find a new mapping for any used but unmapped IDs
+ if (isOldIdUnmapped(id)) {
+ localId(id, unusedId = nextUnusedId(unusedId));
+ if (errorLatch)
+ return;
+ }
+
+ if (isOldIdUnmapped(id)) {
+ error(std::string("old ID not mapped: ") + std::to_string(id));
+ return;
+ }
+
+ // Track max bound
+ maxBound = std::max(maxBound, localId(id) + 1);
+
+ if (errorLatch)
+ return;
+ }
+
+ bound(maxBound); // reset header ID bound to as big as it now needs to be
+ }
+
+ // Mark debug instructions for stripping
+ void spirvbin_t::stripDebug()
+ {
+ // Strip instructions in the stripOp set: debug info.
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // remember opcodes we want to strip later
+ if (isStripOp(opCode))
+ stripInst(start);
+ return true;
+ },
+ op_fn_nop);
+ }
+
+ // Mark instructions that refer to now-removed IDs for stripping
+ void spirvbin_t::stripDeadRefs()
+ {
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // strip opcodes pointing to removed data
+ switch (opCode) {
+ case spv::OpName:
+ case spv::OpMemberName:
+ case spv::OpDecorate:
+ case spv::OpMemberDecorate:
+ if (idPosR.find(asId(start+1)) == idPosR.end())
+ stripInst(start);
+ break;
+ default:
+ break; // leave it alone
+ }
+
+ return true;
+ },
+ op_fn_nop);
+
+ strip();
+ }
+
+ // Update local maps of ID, type, etc positions
+ void spirvbin_t::buildLocalMaps()
+ {
+ msg(2, 2, std::string("build local maps: "));
+
+ mapped.clear();
+ idMapL.clear();
+// preserve nameMap, so we don't clear that.
+ fnPos.clear();
+ fnCalls.clear();
+ typeConstPos.clear();
+ idPosR.clear();
+ entryPoint = spv::NoResult;
+ largestNewId = 0;
+
+ idMapL.resize(bound(), unused);
+
+ int fnStart = 0;
+ spv::Id fnRes = spv::NoResult;
+
+ // build local Id and name maps
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ unsigned word = start+1;
+ spv::Id typeId = spv::NoResult;
+
+ if (spv::InstructionDesc[opCode].hasType())
+ typeId = asId(word++);
+
+ // If there's a result ID, remember the size of its type
+ if (spv::InstructionDesc[opCode].hasResult()) {
+ const spv::Id resultId = asId(word++);
+ idPosR[resultId] = start;
+
+ if (typeId != spv::NoResult) {
+ const unsigned idTypeSize = typeSizeInWords(typeId);
+
+ if (errorLatch)
+ return false;
+
+ if (idTypeSize != 0)
+ idTypeSizeMap[resultId] = idTypeSize;
+ }
+ }
+
+ if (opCode == spv::Op::OpName) {
+ const spv::Id target = asId(start+1);
+ const std::string name = literalString(start+2);
+ nameMap[name] = target;
+
+ } else if (opCode == spv::Op::OpFunctionCall) {
+ ++fnCalls[asId(start + 3)];
+ } else if (opCode == spv::Op::OpEntryPoint) {
+ entryPoint = asId(start + 2);
+ } else if (opCode == spv::Op::OpFunction) {
+ if (fnStart != 0) {
+ error("nested function found");
+ return false;
+ }
+
+ fnStart = start;
+ fnRes = asId(start + 2);
+ } else if (opCode == spv::Op::OpFunctionEnd) {
+ assert(fnRes != spv::NoResult);
+ if (fnStart == 0) {
+ error("function end without function start");
+ return false;
+ }
+
+ fnPos[fnRes] = range_t(fnStart, start + asWordCount(start));
+ fnStart = 0;
+ } else if (isConstOp(opCode)) {
+ if (errorLatch)
+ return false;
+
+ assert(asId(start + 2) != spv::NoResult);
+ typeConstPos.insert(start);
+ } else if (isTypeOp(opCode)) {
+ assert(asId(start + 1) != spv::NoResult);
+ typeConstPos.insert(start);
+ }
+
+ return false;
+ },
+
+ [this](spv::Id& id) { localId(id, unmapped); }
+ );
+ }
+
+ // Validate the SPIR header
+ void spirvbin_t::validate() const
+ {
+ msg(2, 2, std::string("validating: "));
+
+ if (spv.size() < header_size) {
+ error("file too short: ");
+ return;
+ }
+
+ if (magic() != spv::MagicNumber) {
+ error("bad magic number");
+ return;
+ }
+
+ // field 1 = version
+ // field 2 = generator magic
+ // field 3 = result <id> bound
+
+ if (schemaNum() != 0) {
+ error("bad schema, must be 0");
+ return;
+ }
+ }
+
+ int spirvbin_t::processInstruction(unsigned word, instfn_t instFn, idfn_t idFn)
+ {
+ const auto instructionStart = word;
+ const unsigned wordCount = asWordCount(instructionStart);
+ const int nextInst = word++ + wordCount;
+ spv::Op opCode = asOpCode(instructionStart);
+
+ if (nextInst > int(spv.size())) {
+ error("spir instruction terminated too early");
+ return -1;
+ }
+
+ // Base for computing number of operands; will be updated as more is learned
+ unsigned numOperands = wordCount - 1;
+
+ if (instFn(opCode, instructionStart))
+ return nextInst;
+
+ // Read type and result ID from instruction desc table
+ if (spv::InstructionDesc[opCode].hasType()) {
+ idFn(asId(word++));
+ --numOperands;
+ }
+
+ if (spv::InstructionDesc[opCode].hasResult()) {
+ idFn(asId(word++));
+ --numOperands;
+ }
+
+ // Extended instructions: currently, assume everything is an ID.
+ // TODO: add whatever data we need for exceptions to that
+ if (opCode == spv::OpExtInst) {
+ word += 2; // instruction set, and instruction from set
+ numOperands -= 2;
+
+ for (unsigned op=0; op < numOperands; ++op)
+ idFn(asId(word++)); // ID
+
+ return nextInst;
+ }
+
+ // Circular buffer so we can look back at previous unmapped values during the mapping pass.
+ static const unsigned idBufferSize = 4;
+ spv::Id idBuffer[idBufferSize];
+ unsigned idBufferPos = 0;
+
+ // Store IDs from instruction in our map
+ for (int op = 0; numOperands > 0; ++op, --numOperands) {
+ // SpecConstantOp is special: it includes the operands of another opcode which is
+ // given as a literal in the 3rd word. We will switch over to pretending that the
+ // opcode being processed is the literal opcode value of the SpecConstantOp. See the
+ // SPIRV spec for details. This way we will handle IDs and literals as appropriate for
+ // the embedded op.
+ if (opCode == spv::OpSpecConstantOp) {
+ if (op == 0) {
+ opCode = asOpCode(word++); // this is the opcode embedded in the SpecConstantOp.
+ --numOperands;
+ }
+ }
+
+ switch (spv::InstructionDesc[opCode].operands.getClass(op)) {
+ case spv::OperandId:
+ case spv::OperandScope:
+ case spv::OperandMemorySemantics:
+ idBuffer[idBufferPos] = asId(word);
+ idBufferPos = (idBufferPos + 1) % idBufferSize;
+ idFn(asId(word++));
+ break;
+
+ case spv::OperandVariableIds:
+ for (unsigned i = 0; i < numOperands; ++i)
+ idFn(asId(word++));
+ return nextInst;
+
+ case spv::OperandVariableLiterals:
+ // for clarity
+ // if (opCode == spv::OpDecorate && asDecoration(word - 1) == spv::DecorationBuiltIn) {
+ // ++word;
+ // --numOperands;
+ // }
+ // word += numOperands;
+ return nextInst;
+
+ case spv::OperandVariableLiteralId: {
+ if (opCode == OpSwitch) {
+ // word-2 is the position of the selector ID. OpSwitch Literals match its type.
+ // In case the IDs are currently being remapped, we get the word[-2] ID from
+ // the circular idBuffer.
+ const unsigned literalSizePos = (idBufferPos+idBufferSize-2) % idBufferSize;
+ const unsigned literalSize = idTypeSizeInWords(idBuffer[literalSizePos]);
+ const unsigned numLiteralIdPairs = (nextInst-word) / (1+literalSize);
+
+ if (errorLatch)
+ return -1;
+
+ for (unsigned arg=0; arg<numLiteralIdPairs; ++arg) {
+ word += literalSize; // literal
+ idFn(asId(word++)); // label
+ }
+ } else {
+ assert(0); // currentely, only OpSwitch uses OperandVariableLiteralId
+ }
+
+ return nextInst;
+ }
+
+ case spv::OperandLiteralString: {
+ const int stringWordCount = literalStringWords(literalString(word));
+ word += stringWordCount;
+ numOperands -= (stringWordCount-1); // -1 because for() header post-decrements
+ break;
+ }
+
+ // Execution mode might have extra literal operands. Skip them.
+ case spv::OperandExecutionMode:
+ return nextInst;
+
+ // Single word operands we simply ignore, as they hold no IDs
+ case spv::OperandLiteralNumber:
+ case spv::OperandSource:
+ case spv::OperandExecutionModel:
+ case spv::OperandAddressing:
+ case spv::OperandMemory:
+ case spv::OperandStorage:
+ case spv::OperandDimensionality:
+ case spv::OperandSamplerAddressingMode:
+ case spv::OperandSamplerFilterMode:
+ case spv::OperandSamplerImageFormat:
+ case spv::OperandImageChannelOrder:
+ case spv::OperandImageChannelDataType:
+ case spv::OperandImageOperands:
+ case spv::OperandFPFastMath:
+ case spv::OperandFPRoundingMode:
+ case spv::OperandLinkageType:
+ case spv::OperandAccessQualifier:
+ case spv::OperandFuncParamAttr:
+ case spv::OperandDecoration:
+ case spv::OperandBuiltIn:
+ case spv::OperandSelect:
+ case spv::OperandLoop:
+ case spv::OperandFunction:
+ case spv::OperandMemoryAccess:
+ case spv::OperandGroupOperation:
+ case spv::OperandKernelEnqueueFlags:
+ case spv::OperandKernelProfilingInfo:
+ case spv::OperandCapability:
+ ++word;
+ break;
+
+ default:
+ assert(0 && "Unhandled Operand Class");
+ break;
+ }
+ }
+
+ return nextInst;
+ }
+
+ // Make a pass over all the instructions and process them given appropriate functions
+ spirvbin_t& spirvbin_t::process(instfn_t instFn, idfn_t idFn, unsigned begin, unsigned end)
+ {
+ // For efficiency, reserve name map space. It can grow if needed.
+ nameMap.reserve(32);
+
+ // If begin or end == 0, use defaults
+ begin = (begin == 0 ? header_size : begin);
+ end = (end == 0 ? unsigned(spv.size()) : end);
+
+ // basic parsing and InstructionDesc table borrowed from SpvDisassemble.cpp...
+ unsigned nextInst = unsigned(spv.size());
+
+ for (unsigned word = begin; word < end; word = nextInst) {
+ nextInst = processInstruction(word, instFn, idFn);
+
+ if (errorLatch)
+ return *this;
+ }
+
+ return *this;
+ }
+
+ // Apply global name mapping to a single module
+ void spirvbin_t::mapNames()
+ {
+ static const std::uint32_t softTypeIdLimit = 3011; // small prime. TODO: get from options
+ static const std::uint32_t firstMappedID = 3019; // offset into ID space
+
+ for (const auto& name : nameMap) {
+ std::uint32_t hashval = 1911;
+ for (const char c : name.first)
+ hashval = hashval * 1009 + c;
+
+ if (isOldIdUnmapped(name.second)) {
+ localId(name.second, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ if (errorLatch)
+ return;
+ }
+ }
+ }
+
+ // Map fn contents to IDs of similar functions in other modules
+ void spirvbin_t::mapFnBodies()
+ {
+ static const std::uint32_t softTypeIdLimit = 19071; // small prime. TODO: get from options
+ static const std::uint32_t firstMappedID = 6203; // offset into ID space
+
+ // Initial approach: go through some high priority opcodes first and assign them
+ // hash values.
+
+ spv::Id fnId = spv::NoResult;
+ std::vector<unsigned> instPos;
+ instPos.reserve(unsigned(spv.size()) / 16); // initial estimate; can grow if needed.
+
+ // Build local table of instruction start positions
+ process(
+ [&](spv::Op, unsigned start) { instPos.push_back(start); return true; },
+ op_fn_nop);
+
+ if (errorLatch)
+ return;
+
+ // Window size for context-sensitive canonicalization values
+ // Empirical best size from a single data set. TODO: Would be a good tunable.
+ // We essentially perform a little convolution around each instruction,
+ // to capture the flavor of nearby code, to hopefully match to similar
+ // code in other modules.
+ static const unsigned windowSize = 2;
+
+ for (unsigned entry = 0; entry < unsigned(instPos.size()); ++entry) {
+ const unsigned start = instPos[entry];
+ const spv::Op opCode = asOpCode(start);
+
+ if (opCode == spv::OpFunction)
+ fnId = asId(start + 2);
+
+ if (opCode == spv::OpFunctionEnd)
+ fnId = spv::NoResult;
+
+ if (fnId != spv::NoResult) { // if inside a function
+ if (spv::InstructionDesc[opCode].hasResult()) {
+ const unsigned word = start + (spv::InstructionDesc[opCode].hasType() ? 2 : 1);
+ const spv::Id resId = asId(word);
+ std::uint32_t hashval = fnId * 17; // small prime
+
+ for (unsigned i = entry-1; i >= entry-windowSize; --i) {
+ if (asOpCode(instPos[i]) == spv::OpFunction)
+ break;
+ hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime
+ }
+
+ for (unsigned i = entry; i <= entry + windowSize; ++i) {
+ if (asOpCode(instPos[i]) == spv::OpFunctionEnd)
+ break;
+ hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime
+ }
+
+ if (isOldIdUnmapped(resId)) {
+ localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ if (errorLatch)
+ return;
+ }
+
+ }
+ }
+ }
+
+ spv::Op thisOpCode(spv::OpNop);
+ std::unordered_map<int, int> opCounter;
+ int idCounter(0);
+ fnId = spv::NoResult;
+
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ switch (opCode) {
+ case spv::OpFunction:
+ // Reset counters at each function
+ idCounter = 0;
+ opCounter.clear();
+ fnId = asId(start + 2);
+ break;
+
+ case spv::OpImageSampleImplicitLod:
+ case spv::OpImageSampleExplicitLod:
+ case spv::OpImageSampleDrefImplicitLod:
+ case spv::OpImageSampleDrefExplicitLod:
+ case spv::OpImageSampleProjImplicitLod:
+ case spv::OpImageSampleProjExplicitLod:
+ case spv::OpImageSampleProjDrefImplicitLod:
+ case spv::OpImageSampleProjDrefExplicitLod:
+ case spv::OpDot:
+ case spv::OpCompositeExtract:
+ case spv::OpCompositeInsert:
+ case spv::OpVectorShuffle:
+ case spv::OpLabel:
+ case spv::OpVariable:
+
+ case spv::OpAccessChain:
+ case spv::OpLoad:
+ case spv::OpStore:
+ case spv::OpCompositeConstruct:
+ case spv::OpFunctionCall:
+ ++opCounter[opCode];
+ idCounter = 0;
+ thisOpCode = opCode;
+ break;
+ default:
+ thisOpCode = spv::OpNop;
+ }
+
+ return false;
+ },
+
+ [&](spv::Id& id) {
+ if (thisOpCode != spv::OpNop) {
+ ++idCounter;
+ const std::uint32_t hashval = opCounter[thisOpCode] * thisOpCode * 50047 + idCounter + fnId * 117;
+
+ if (isOldIdUnmapped(id))
+ localId(id, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ }
+ });
+ }
+
+ // EXPERIMENTAL: forward IO and uniform load/stores into operands
+ // This produces invalid Schema-0 SPIRV
+ void spirvbin_t::forwardLoadStores()
+ {
+ idset_t fnLocalVars; // set of function local vars
+ idmap_t idMap; // Map of load result IDs to what they load
+
+ // EXPERIMENTAL: Forward input and access chain loads into consumptions
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // Add inputs and uniforms to the map
+ if ((opCode == spv::OpVariable && asWordCount(start) == 4) &&
+ (spv[start+3] == spv::StorageClassUniform ||
+ spv[start+3] == spv::StorageClassUniformConstant ||
+ spv[start+3] == spv::StorageClassInput))
+ fnLocalVars.insert(asId(start+2));
+
+ if (opCode == spv::OpAccessChain && fnLocalVars.count(asId(start+3)) > 0)
+ fnLocalVars.insert(asId(start+2));
+
+ if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) {
+ idMap[asId(start+2)] = asId(start+3);
+ stripInst(start);
+ }
+
+ return false;
+ },
+
+ [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ // EXPERIMENTAL: Implicit output stores
+ fnLocalVars.clear();
+ idMap.clear();
+
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ // Add inputs and uniforms to the map
+ if ((opCode == spv::OpVariable && asWordCount(start) == 4) &&
+ (spv[start+3] == spv::StorageClassOutput))
+ fnLocalVars.insert(asId(start+2));
+
+ if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) {
+ idMap[asId(start+2)] = asId(start+1);
+ stripInst(start);
+ }
+
+ return false;
+ },
+ op_fn_nop);
+
+ if (errorLatch)
+ return;
+
+ process(
+ inst_fn_nop,
+ [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ strip(); // strip out data we decided to eliminate
+ }
+
+ // optimize loads and stores
+ void spirvbin_t::optLoadStore()
+ {
+ idset_t fnLocalVars; // candidates for removal (only locals)
+ idmap_t idMap; // Map of load result IDs to what they load
+ blockmap_t blockMap; // Map of IDs to blocks they first appear in
+ int blockNum = 0; // block count, to avoid crossing flow control
+
+ // Find all the function local pointers stored at most once, and not via access chains
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ const int wordCount = asWordCount(start);
+
+ // Count blocks, so we can avoid crossing flow control
+ if (isFlowCtrl(opCode))
+ ++blockNum;
+
+ // Add local variables to the map
+ if ((opCode == spv::OpVariable && spv[start+3] == spv::StorageClassFunction && asWordCount(start) == 4)) {
+ fnLocalVars.insert(asId(start+2));
+ return true;
+ }
+
+ // Ignore process vars referenced via access chain
+ if ((opCode == spv::OpAccessChain || opCode == spv::OpInBoundsAccessChain) && fnLocalVars.count(asId(start+3)) > 0) {
+ fnLocalVars.erase(asId(start+3));
+ idMap.erase(asId(start+3));
+ return true;
+ }
+
+ if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) {
+ const spv::Id varId = asId(start+3);
+
+ // Avoid loads before stores
+ if (idMap.find(varId) == idMap.end()) {
+ fnLocalVars.erase(varId);
+ idMap.erase(varId);
+ }
+
+ // don't do for volatile references
+ if (wordCount > 4 && (spv[start+4] & spv::MemoryAccessVolatileMask)) {
+ fnLocalVars.erase(varId);
+ idMap.erase(varId);
+ }
+
+ // Handle flow control
+ if (blockMap.find(varId) == blockMap.end()) {
+ blockMap[varId] = blockNum; // track block we found it in.
+ } else if (blockMap[varId] != blockNum) {
+ fnLocalVars.erase(varId); // Ignore if crosses flow control
+ idMap.erase(varId);
+ }
+
+ return true;
+ }
+
+ if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) {
+ const spv::Id varId = asId(start+1);
+
+ if (idMap.find(varId) == idMap.end()) {
+ idMap[varId] = asId(start+2);
+ } else {
+ // Remove if it has more than one store to the same pointer
+ fnLocalVars.erase(varId);
+ idMap.erase(varId);
+ }
+
+ // don't do for volatile references
+ if (wordCount > 3 && (spv[start+3] & spv::MemoryAccessVolatileMask)) {
+ fnLocalVars.erase(asId(start+3));
+ idMap.erase(asId(start+3));
+ }
+
+ // Handle flow control
+ if (blockMap.find(varId) == blockMap.end()) {
+ blockMap[varId] = blockNum; // track block we found it in.
+ } else if (blockMap[varId] != blockNum) {
+ fnLocalVars.erase(varId); // Ignore if crosses flow control
+ idMap.erase(varId);
+ }
+
+ return true;
+ }
+
+ return false;
+ },
+
+ // If local var id used anywhere else, don't eliminate
+ [&](spv::Id& id) {
+ if (fnLocalVars.count(id) > 0) {
+ fnLocalVars.erase(id);
+ idMap.erase(id);
+ }
+ }
+ );
+
+ if (errorLatch)
+ return;
+
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0)
+ idMap[asId(start+2)] = idMap[asId(start+3)];
+ return false;
+ },
+ op_fn_nop);
+
+ if (errorLatch)
+ return;
+
+ // Chase replacements to their origins, in case there is a chain such as:
+ // 2 = store 1
+ // 3 = load 2
+ // 4 = store 3
+ // 5 = load 4
+ // We want to replace uses of 5 with 1.
+ for (const auto& idPair : idMap) {
+ spv::Id id = idPair.first;
+ while (idMap.find(id) != idMap.end()) // Chase to end of chain
+ id = idMap[id];
+
+ idMap[idPair.first] = id; // replace with final result
+ }
+
+ // Remove the load/store/variables for the ones we've discovered
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if ((opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) ||
+ (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) ||
+ (opCode == spv::OpVariable && fnLocalVars.count(asId(start+2)) > 0)) {
+
+ stripInst(start);
+ return true;
+ }
+
+ return false;
+ },
+
+ [&](spv::Id& id) {
+ if (idMap.find(id) != idMap.end()) id = idMap[id];
+ }
+ );
+
+ if (errorLatch)
+ return;
+
+ strip(); // strip out data we decided to eliminate
+ }
+
+ // remove bodies of uncalled functions
+ void spirvbin_t::dceFuncs()
+ {
+ msg(3, 2, std::string("Removing Dead Functions: "));
+
+ // TODO: There are more efficient ways to do this.
+ bool changed = true;
+
+ while (changed) {
+ changed = false;
+
+ for (auto fn = fnPos.begin(); fn != fnPos.end(); ) {
+ if (fn->first == entryPoint) { // don't DCE away the entry point!
+ ++fn;
+ continue;
+ }
+
+ const auto call_it = fnCalls.find(fn->first);
+
+ if (call_it == fnCalls.end() || call_it->second == 0) {
+ changed = true;
+ stripRange.push_back(fn->second);
+
+ // decrease counts of called functions
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if (opCode == spv::Op::OpFunctionCall) {
+ const auto call_it = fnCalls.find(asId(start + 3));
+ if (call_it != fnCalls.end()) {
+ if (--call_it->second <= 0)
+ fnCalls.erase(call_it);
+ }
+ }
+
+ return true;
+ },
+ op_fn_nop,
+ fn->second.first,
+ fn->second.second);
+
+ if (errorLatch)
+ return;
+
+ fn = fnPos.erase(fn);
+ } else ++fn;
+ }
+ }
+ }
+
+ // remove unused function variables + decorations
+ void spirvbin_t::dceVars()
+ {
+ msg(3, 2, std::string("DCE Vars: "));
+
+ std::unordered_map<spv::Id, int> varUseCount;
+
+ // Count function variable use
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ if (opCode == spv::OpVariable) {
+ ++varUseCount[asId(start+2)];
+ return true;
+ } else if (opCode == spv::OpEntryPoint) {
+ const int wordCount = asWordCount(start);
+ for (int i = 4; i < wordCount; i++) {
+ ++varUseCount[asId(start+i)];
+ }
+ return true;
+ } else
+ return false;
+ },
+
+ [&](spv::Id& id) { if (varUseCount[id]) ++varUseCount[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ // Remove single-use function variables + associated decorations and names
+ process(
+ [&](spv::Op opCode, unsigned start) {
+ spv::Id id = spv::NoResult;
+ if (opCode == spv::OpVariable)
+ id = asId(start+2);
+ if (opCode == spv::OpDecorate || opCode == spv::OpName)
+ id = asId(start+1);
+
+ if (id != spv::NoResult && varUseCount[id] == 1)
+ stripInst(start);
+
+ return true;
+ },
+ op_fn_nop);
+ }
+
+ // remove unused types
+ void spirvbin_t::dceTypes()
+ {
+ std::vector<bool> isType(bound(), false);
+
+ // for speed, make O(1) way to get to type query (map is log(n))
+ for (const auto typeStart : typeConstPos)
+ isType[asTypeConstId(typeStart)] = true;
+
+ std::unordered_map<spv::Id, int> typeUseCount;
+
+ // This is not the most efficient algorithm, but this is an offline tool, and
+ // it's easy to write this way. Can be improved opportunistically if needed.
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ strip();
+ typeUseCount.clear();
+
+ // Count total type usage
+ process(inst_fn_nop,
+ [&](spv::Id& id) { if (isType[id]) ++typeUseCount[id]; }
+ );
+
+ if (errorLatch)
+ return;
+
+ // Remove single reference types
+ for (const auto typeStart : typeConstPos) {
+ const spv::Id typeId = asTypeConstId(typeStart);
+ if (typeUseCount[typeId] == 1) {
+ changed = true;
+ --typeUseCount[typeId];
+ stripInst(typeStart);
+ }
+ }
+
+ if (errorLatch)
+ return;
+ }
+ }
+
+#ifdef NOTDEF
+ bool spirvbin_t::matchType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const
+ {
+ // Find the local type id "lt" and global type id "gt"
+ const auto lt_it = typeConstPosR.find(lt);
+ if (lt_it == typeConstPosR.end())
+ return false;
+
+ const auto typeStart = lt_it->second;
+
+ // Search for entry in global table
+ const auto gtype = globalTypes.find(gt);
+ if (gtype == globalTypes.end())
+ return false;
+
+ const auto& gdata = gtype->second;
+
+ // local wordcount and opcode
+ const int wordCount = asWordCount(typeStart);
+ const spv::Op opCode = asOpCode(typeStart);
+
+ // no type match if opcodes don't match, or operand count doesn't match
+ if (opCode != opOpCode(gdata[0]) || wordCount != opWordCount(gdata[0]))
+ return false;
+
+ const unsigned numOperands = wordCount - 2; // all types have a result
+
+ const auto cmpIdRange = [&](range_t range) {
+ for (int x=range.first; x<std::min(range.second, wordCount); ++x)
+ if (!matchType(globalTypes, asId(typeStart+x), gdata[x]))
+ return false;
+ return true;
+ };
+
+ const auto cmpConst = [&]() { return cmpIdRange(constRange(opCode)); };
+ const auto cmpSubType = [&]() { return cmpIdRange(typeRange(opCode)); };
+
+ // Compare literals in range [start,end)
+ const auto cmpLiteral = [&]() {
+ const auto range = literalRange(opCode);
+ return std::equal(spir.begin() + typeStart + range.first,
+ spir.begin() + typeStart + std::min(range.second, wordCount),
+ gdata.begin() + range.first);
+ };
+
+ assert(isTypeOp(opCode) || isConstOp(opCode));
+
+ switch (opCode) {
+ case spv::OpTypeOpaque: // TODO: disable until we compare the literal strings.
+ case spv::OpTypeQueue: return false;
+ case spv::OpTypeEvent: // fall through...
+ case spv::OpTypeDeviceEvent: // ...
+ case spv::OpTypeReserveId: return false;
+ // for samplers, we don't handle the optional parameters yet
+ case spv::OpTypeSampler: return cmpLiteral() && cmpConst() && cmpSubType() && wordCount == 8;
+ default: return cmpLiteral() && cmpConst() && cmpSubType();
+ }
+ }
+
+ // Look for an equivalent type in the globalTypes map
+ spv::Id spirvbin_t::findType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt) const
+ {
+ // Try a recursive type match on each in turn, and return a match if we find one
+ for (const auto& gt : globalTypes)
+ if (matchType(globalTypes, lt, gt.first))
+ return gt.first;
+
+ return spv::NoType;
+ }
+#endif // NOTDEF
+
+ // Return start position in SPV of given Id. error if not found.
+ unsigned spirvbin_t::idPos(spv::Id id) const
+ {
+ const auto tid_it = idPosR.find(id);
+ if (tid_it == idPosR.end()) {
+ error("ID not found");
+ return 0;
+ }
+
+ return tid_it->second;
+ }
+
+ // Hash types to canonical values. This can return ID collisions (it's a bit
+ // inevitable): it's up to the caller to handle that gracefully.
+ std::uint32_t spirvbin_t::hashType(unsigned typeStart) const
+ {
+ const unsigned wordCount = asWordCount(typeStart);
+ const spv::Op opCode = asOpCode(typeStart);
+
+ switch (opCode) {
+ case spv::OpTypeVoid: return 0;
+ case spv::OpTypeBool: return 1;
+ case spv::OpTypeInt: return 3 + (spv[typeStart+3]);
+ case spv::OpTypeFloat: return 5;
+ case spv::OpTypeVector:
+ return 6 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
+ case spv::OpTypeMatrix:
+ return 30 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1);
+ case spv::OpTypeImage:
+ return 120 + hashType(idPos(spv[typeStart+2])) +
+ spv[typeStart+3] + // dimensionality
+ spv[typeStart+4] * 8 * 16 + // depth
+ spv[typeStart+5] * 4 * 16 + // arrayed
+ spv[typeStart+6] * 2 * 16 + // multisampled
+ spv[typeStart+7] * 1 * 16; // format
+ case spv::OpTypeSampler:
+ return 500;
+ case spv::OpTypeSampledImage:
+ return 502;
+ case spv::OpTypeArray:
+ return 501 + hashType(idPos(spv[typeStart+2])) * spv[typeStart+3];
+ case spv::OpTypeRuntimeArray:
+ return 5000 + hashType(idPos(spv[typeStart+2]));
+ case spv::OpTypeStruct:
+ {
+ std::uint32_t hash = 10000;
+ for (unsigned w=2; w < wordCount; ++w)
+ hash += w * hashType(idPos(spv[typeStart+w]));
+ return hash;
+ }
+
+ case spv::OpTypeOpaque: return 6000 + spv[typeStart+2];
+ case spv::OpTypePointer: return 100000 + hashType(idPos(spv[typeStart+3]));
+ case spv::OpTypeFunction:
+ {
+ std::uint32_t hash = 200000;
+ for (unsigned w=2; w < wordCount; ++w)
+ hash += w * hashType(idPos(spv[typeStart+w]));
+ return hash;
+ }
+
+ case spv::OpTypeEvent: return 300000;
+ case spv::OpTypeDeviceEvent: return 300001;
+ case spv::OpTypeReserveId: return 300002;
+ case spv::OpTypeQueue: return 300003;
+ case spv::OpTypePipe: return 300004;
+ case spv::OpConstantTrue: return 300007;
+ case spv::OpConstantFalse: return 300008;
+ case spv::OpConstantComposite:
+ {
+ std::uint32_t hash = 300011 + hashType(idPos(spv[typeStart+1]));
+ for (unsigned w=3; w < wordCount; ++w)
+ hash += w * hashType(idPos(spv[typeStart+w]));
+ return hash;
+ }
+ case spv::OpConstant:
+ {
+ std::uint32_t hash = 400011 + hashType(idPos(spv[typeStart+1]));
+ for (unsigned w=3; w < wordCount; ++w)
+ hash += w * spv[typeStart+w];
+ return hash;
+ }
+ case spv::OpConstantNull:
+ {
+ std::uint32_t hash = 500009 + hashType(idPos(spv[typeStart+1]));
+ return hash;
+ }
+ case spv::OpConstantSampler:
+ {
+ std::uint32_t hash = 600011 + hashType(idPos(spv[typeStart+1]));
+ for (unsigned w=3; w < wordCount; ++w)
+ hash += w * spv[typeStart+w];
+ return hash;
+ }
+
+ default:
+ error("unknown type opcode");
+ return 0;
+ }
+ }
+
+ void spirvbin_t::mapTypeConst()
+ {
+ globaltypes_t globalTypeMap;
+
+ msg(3, 2, std::string("Remapping Consts & Types: "));
+
+ static const std::uint32_t softTypeIdLimit = 3011; // small prime. TODO: get from options
+ static const std::uint32_t firstMappedID = 8; // offset into ID space
+
+ for (auto& typeStart : typeConstPos) {
+ const spv::Id resId = asTypeConstId(typeStart);
+ const std::uint32_t hashval = hashType(typeStart);
+
+ if (errorLatch)
+ return;
+
+ if (isOldIdUnmapped(resId)) {
+ localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));
+ if (errorLatch)
+ return;
+ }
+ }
+ }
+
+ // Strip a single binary by removing ranges given in stripRange
+ void spirvbin_t::strip()
+ {
+ if (stripRange.empty()) // nothing to do
+ return;
+
+ // Sort strip ranges in order of traversal
+ std::sort(stripRange.begin(), stripRange.end());
+
+ // Allocate a new binary big enough to hold old binary
+ // We'll step this iterator through the strip ranges as we go through the binary
+ auto strip_it = stripRange.begin();
+
+ int strippedPos = 0;
+ for (unsigned word = 0; word < unsigned(spv.size()); ++word) {
+ while (strip_it != stripRange.end() && word >= strip_it->second)
+ ++strip_it;
+
+ if (strip_it == stripRange.end() || word < strip_it->first || word >= strip_it->second)
+ spv[strippedPos++] = spv[word];
+ }
+
+ spv.resize(strippedPos);
+ stripRange.clear();
+
+ buildLocalMaps();
+ }
+
+ // Strip a single binary by removing ranges given in stripRange
+ void spirvbin_t::remap(std::uint32_t opts)
+ {
+ options = opts;
+
+ // Set up opcode tables from SpvDoc
+ spv::Parameterize();
+
+ validate(); // validate header
+ buildLocalMaps(); // build ID maps
+
+ msg(3, 4, std::string("ID bound: ") + std::to_string(bound()));
+
+ if (options & STRIP) stripDebug();
+ if (errorLatch) return;
+
+ strip(); // strip out data we decided to eliminate
+ if (errorLatch) return;
+
+ if (options & OPT_LOADSTORE) optLoadStore();
+ if (errorLatch) return;
+
+ if (options & OPT_FWD_LS) forwardLoadStores();
+ if (errorLatch) return;
+
+ if (options & DCE_FUNCS) dceFuncs();
+ if (errorLatch) return;
+
+ if (options & DCE_VARS) dceVars();
+ if (errorLatch) return;
+
+ if (options & DCE_TYPES) dceTypes();
+ if (errorLatch) return;
+
+ strip(); // strip out data we decided to eliminate
+ if (errorLatch) return;
+
+ stripDeadRefs(); // remove references to things we DCEed
+ if (errorLatch) return;
+
+ // after the last strip, we must clean any debug info referring to now-deleted data
+
+ if (options & MAP_TYPES) mapTypeConst();
+ if (errorLatch) return;
+
+ if (options & MAP_NAMES) mapNames();
+ if (errorLatch) return;
+
+ if (options & MAP_FUNCS) mapFnBodies();
+ if (errorLatch) return;
+
+ if (options & MAP_ALL) {
+ mapRemainder(); // map any unmapped IDs
+ if (errorLatch) return;
+
+ applyMap(); // Now remap each shader to the new IDs we've come up with
+ if (errorLatch) return;
+ }
+ }
+
+ // remap from a memory image
+ void spirvbin_t::remap(std::vector<std::uint32_t>& in_spv, std::uint32_t opts)
+ {
+ spv.swap(in_spv);
+ remap(opts);
+ spv.swap(in_spv);
+ }
+
+} // namespace SPV
+
+#endif // defined (use_cpp11)
+
diff --git a/thirdparty/glslang/SPIRV/SPVRemapper.h b/thirdparty/glslang/SPIRV/SPVRemapper.h
new file mode 100644
index 0000000000..fa61bb94d8
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/SPVRemapper.h
@@ -0,0 +1,304 @@
+//
+// Copyright (C) 2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef SPIRVREMAPPER_H
+#define SPIRVREMAPPER_H
+
+#include <string>
+#include <vector>
+#include <cstdlib>
+#include <exception>
+
+namespace spv {
+
+// MSVC defines __cplusplus as an older value, even when it supports almost all of 11.
+// We handle that here by making our own symbol.
+#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1700)
+# define use_cpp11 1
+#endif
+
+class spirvbin_base_t
+{
+public:
+ enum Options {
+ NONE = 0,
+ STRIP = (1<<0),
+ MAP_TYPES = (1<<1),
+ MAP_NAMES = (1<<2),
+ MAP_FUNCS = (1<<3),
+ DCE_FUNCS = (1<<4),
+ DCE_VARS = (1<<5),
+ DCE_TYPES = (1<<6),
+ OPT_LOADSTORE = (1<<7),
+ OPT_FWD_LS = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV
+ MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS),
+ DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES),
+ OPT_ALL = (OPT_LOADSTORE),
+
+ ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL),
+ DO_EVERYTHING = (STRIP | ALL_BUT_STRIP)
+ };
+};
+
+} // namespace SPV
+
+#if !defined (use_cpp11)
+#include <cstdio>
+#include <cstdint>
+
+namespace spv {
+class spirvbin_t : public spirvbin_base_t
+{
+public:
+ spirvbin_t(int /*verbose = 0*/) { }
+
+ void remap(std::vector<std::uint32_t>& /*spv*/, unsigned int /*opts = 0*/)
+ {
+ printf("Tool not compiled for C++11, which is required for SPIR-V remapping.\n");
+ exit(5);
+ }
+};
+
+} // namespace SPV
+
+#else // defined (use_cpp11)
+
+#include <functional>
+#include <cstdint>
+#include <unordered_map>
+#include <unordered_set>
+#include <map>
+#include <set>
+#include <cassert>
+
+#include "spirv.hpp"
+#include "spvIR.h"
+
+namespace spv {
+
+// class to hold SPIR-V binary data for remapping, DCE, and debug stripping
+class spirvbin_t : public spirvbin_base_t
+{
+public:
+ spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false)
+ { }
+
+ virtual ~spirvbin_t() { }
+
+ // remap on an existing binary in memory
+ void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
+
+ // Type for error/log handler functions
+ typedef std::function<void(const std::string&)> errorfn_t;
+ typedef std::function<void(const std::string&)> logfn_t;
+
+ // Register error/log handling functions (can be lambda fn / functor / etc)
+ static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; }
+ static void registerLogHandler(logfn_t handler) { logHandler = handler; }
+
+protected:
+ // This can be overridden to provide other message behavior if needed
+ virtual void msg(int minVerbosity, int indent, const std::string& txt) const;
+
+private:
+ // Local to global, or global to local ID map
+ typedef std::unordered_map<spv::Id, spv::Id> idmap_t;
+ typedef std::unordered_set<spv::Id> idset_t;
+ typedef std::unordered_map<spv::Id, int> blockmap_t;
+
+ void remap(std::uint32_t opts = DO_EVERYTHING);
+
+ // Map of names to IDs
+ typedef std::unordered_map<std::string, spv::Id> namemap_t;
+
+ typedef std::uint32_t spirword_t;
+
+ typedef std::pair<unsigned, unsigned> range_t;
+ typedef std::function<void(spv::Id&)> idfn_t;
+ typedef std::function<bool(spv::Op, unsigned start)> instfn_t;
+
+ // Special Values for ID map:
+ static const spv::Id unmapped; // unchanged from default value
+ static const spv::Id unused; // unused ID
+ static const int header_size; // SPIR header = 5 words
+
+ class id_iterator_t;
+
+ // For mapping type entries between different shaders
+ typedef std::vector<spirword_t> typeentry_t;
+ typedef std::map<spv::Id, typeentry_t> globaltypes_t;
+
+ // A set that preserves position order, and a reverse map
+ typedef std::set<int> posmap_t;
+ typedef std::unordered_map<spv::Id, int> posmap_rev_t;
+
+ // Maps and ID to the size of its base type, if known.
+ typedef std::unordered_map<spv::Id, unsigned> typesize_map_t;
+
+ // handle error
+ void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); }
+
+ bool isConstOp(spv::Op opCode) const;
+ bool isTypeOp(spv::Op opCode) const;
+ bool isStripOp(spv::Op opCode) const;
+ bool isFlowCtrl(spv::Op opCode) const;
+ range_t literalRange(spv::Op opCode) const;
+ range_t typeRange(spv::Op opCode) const;
+ range_t constRange(spv::Op opCode) const;
+ unsigned typeSizeInWords(spv::Id id) const;
+ unsigned idTypeSizeInWords(spv::Id id) const;
+
+ spv::Id& asId(unsigned word) { return spv[word]; }
+ const spv::Id& asId(unsigned word) const { return spv[word]; }
+ spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); }
+ std::uint32_t asOpCodeHash(unsigned word);
+ spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); }
+ unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); }
+ spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); }
+ unsigned idPos(spv::Id id) const;
+
+ static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; }
+ static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); }
+
+ // Header access & set methods
+ spirword_t magic() const { return spv[0]; } // return magic number
+ spirword_t bound() const { return spv[3]; } // return Id bound from header
+ spirword_t bound(spirword_t b) { return spv[3] = b; };
+ spirword_t genmagic() const { return spv[2]; } // generator magic
+ spirword_t genmagic(spirword_t m) { return spv[2] = m; }
+ spirword_t schemaNum() const { return spv[4]; } // schema number from header
+
+ // Mapping fns: get
+ spv::Id localId(spv::Id id) const { return idMapL[id]; }
+
+ // Mapping fns: set
+ inline spv::Id localId(spv::Id id, spv::Id newId);
+ void countIds(spv::Id id);
+
+ // Return next unused new local ID.
+ // NOTE: boost::dynamic_bitset would be more efficient due to find_next(),
+ // which std::vector<bool> doens't have.
+ inline spv::Id nextUnusedId(spv::Id id);
+
+ void buildLocalMaps();
+ std::string literalString(unsigned word) const; // Return literal as a std::string
+ int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; }
+
+ bool isNewIdMapped(spv::Id newId) const { return isMapped(newId); }
+ bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; }
+ bool isOldIdUnused(spv::Id oldId) const { return localId(oldId) == unused; }
+ bool isOldIdMapped(spv::Id oldId) const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); }
+ bool isFunction(spv::Id oldId) const { return fnPos.find(oldId) != fnPos.end(); }
+
+ // bool matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const;
+ // spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const;
+ std::uint32_t hashType(unsigned typeStart) const;
+
+ spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0);
+ int processInstruction(unsigned word, instfn_t, idfn_t);
+
+ void validate() const;
+ void mapTypeConst();
+ void mapFnBodies();
+ void optLoadStore();
+ void dceFuncs();
+ void dceVars();
+ void dceTypes();
+ void mapNames();
+ void foldIds(); // fold IDs to smallest space
+ void forwardLoadStores(); // load store forwarding (EXPERIMENTAL)
+ void offsetIds(); // create relative offset IDs
+
+ void applyMap(); // remap per local name map
+ void mapRemainder(); // map any IDs we haven't touched yet
+ void stripDebug(); // strip all debug info
+ void stripDeadRefs(); // strips debug info for now-dead references after DCE
+ void strip(); // remove debug symbols
+
+ std::vector<spirword_t> spv; // SPIR words
+
+ namemap_t nameMap; // ID names from OpName
+
+ // Since we want to also do binary ops, we can't use std::vector<bool>. we could use
+ // boost::dynamic_bitset, but we're trying to avoid a boost dependency.
+ typedef std::uint64_t bits_t;
+ std::vector<bits_t> mapped; // which new IDs have been mapped
+ static const int mBits = sizeof(bits_t) * 4;
+
+ bool isMapped(spv::Id id) const { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); }
+ void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); }
+ void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); }
+ size_t maxMappedId() const { return mapped.size() * mBits; }
+
+ // Add a strip range for a given instruction starting at 'start'
+ // Note: avoiding brace initializers to please older versions os MSVC.
+ void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); }
+
+ // Function start and end. use unordered_map because we'll have
+ // many fewer functions than IDs.
+ std::unordered_map<spv::Id, range_t> fnPos;
+
+ // Which functions are called, anywhere in the module, with a call count
+ std::unordered_map<spv::Id, int> fnCalls;
+
+ posmap_t typeConstPos; // word positions that define types & consts (ordered)
+ posmap_rev_t idPosR; // reverse map from IDs to positions
+ typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known.
+
+ std::vector<spv::Id> idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs
+
+ spv::Id entryPoint; // module entry point
+ spv::Id largestNewId; // biggest new ID we have mapped anything to
+
+ // Sections of the binary to strip, given as [begin,end)
+ std::vector<range_t> stripRange;
+
+ // processing options:
+ std::uint32_t options;
+ int verbose; // verbosity level
+
+ // Error latch: this is set if the error handler is ever executed. It would be better to
+ // use a try/catch block and throw, but that's not desired for certain environments, so
+ // this is the alternative.
+ mutable bool errorLatch;
+
+ static errorfn_t errorHandler;
+ static logfn_t logHandler;
+};
+
+} // namespace SPV
+
+#endif // defined (use_cpp11)
+#endif // SPIRVREMAPPER_H
diff --git a/thirdparty/glslang/SPIRV/SpvBuilder.cpp b/thirdparty/glslang/SPIRV/SpvBuilder.cpp
new file mode 100644
index 0000000000..4ef7e5fe7f
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/SpvBuilder.cpp
@@ -0,0 +1,3058 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Helper for making SPIR-V IR. Generally, this is documented in the header
+// SpvBuilder.h.
+//
+
+#include <cassert>
+#include <cstdlib>
+
+#include <unordered_set>
+#include <algorithm>
+
+#include "SpvBuilder.h"
+
+#include "hex_float.h"
+
+#ifndef _WIN32
+ #include <cstdio>
+#endif
+
+namespace spv {
+
+Builder::Builder(unsigned int spvVersion, unsigned int magicNumber, SpvBuildLogger* buildLogger) :
+ spvVersion(spvVersion),
+ source(SourceLanguageUnknown),
+ sourceVersion(0),
+ sourceFileStringId(NoResult),
+ currentLine(0),
+ currentFile(nullptr),
+ emitOpLines(false),
+ addressModel(AddressingModelLogical),
+ memoryModel(MemoryModelGLSL450),
+ builderNumber(magicNumber),
+ buildPoint(0),
+ uniqueId(0),
+ entryPointFunction(0),
+ generatingOpCodeForSpecConst(false),
+ logger(buildLogger)
+{
+ clearAccessChain();
+}
+
+Builder::~Builder()
+{
+}
+
+Id Builder::import(const char* name)
+{
+ Instruction* import = new Instruction(getUniqueId(), NoType, OpExtInstImport);
+ import->addStringOperand(name);
+ module.mapInstruction(import);
+
+ imports.push_back(std::unique_ptr<Instruction>(import));
+ return import->getResultId();
+}
+
+// Emit instruction for non-filename-based #line directives (ie. no filename
+// seen yet): emit an OpLine if we've been asked to emit OpLines and the line
+// number has changed since the last time, and is a valid line number.
+void Builder::setLine(int lineNum)
+{
+ if (lineNum != 0 && lineNum != currentLine) {
+ currentLine = lineNum;
+ if (emitOpLines)
+ addLine(sourceFileStringId, currentLine, 0);
+ }
+}
+
+// If no filename, do non-filename-based #line emit. Else do filename-based emit.
+// Emit OpLine if we've been asked to emit OpLines and the line number or filename
+// has changed since the last time, and line number is valid.
+void Builder::setLine(int lineNum, const char* filename)
+{
+ if (filename == nullptr) {
+ setLine(lineNum);
+ return;
+ }
+ if ((lineNum != 0 && lineNum != currentLine) || currentFile == nullptr ||
+ strncmp(filename, currentFile, strlen(currentFile) + 1) != 0) {
+ currentLine = lineNum;
+ currentFile = filename;
+ if (emitOpLines) {
+ spv::Id strId = getStringId(filename);
+ addLine(strId, currentLine, 0);
+ }
+ }
+}
+
+void Builder::addLine(Id fileName, int lineNum, int column)
+{
+ Instruction* line = new Instruction(OpLine);
+ line->addIdOperand(fileName);
+ line->addImmediateOperand(lineNum);
+ line->addImmediateOperand(column);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(line));
+}
+
+// For creating new groupedTypes (will return old type if the requested one was already made).
+Id Builder::makeVoidType()
+{
+ Instruction* type;
+ if (groupedTypes[OpTypeVoid].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeVoid);
+ groupedTypes[OpTypeVoid].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else
+ type = groupedTypes[OpTypeVoid].back();
+
+ return type->getResultId();
+}
+
+Id Builder::makeBoolType()
+{
+ Instruction* type;
+ if (groupedTypes[OpTypeBool].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeBool);
+ groupedTypes[OpTypeBool].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else
+ type = groupedTypes[OpTypeBool].back();
+
+ return type->getResultId();
+}
+
+Id Builder::makeSamplerType()
+{
+ Instruction* type;
+ if (groupedTypes[OpTypeSampler].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeSampler);
+ groupedTypes[OpTypeSampler].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else
+ type = groupedTypes[OpTypeSampler].back();
+
+ return type->getResultId();
+}
+
+Id Builder::makePointer(StorageClass storageClass, Id pointee)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
+ type = groupedTypes[OpTypePointer][t];
+ if (type->getImmediateOperand(0) == (unsigned)storageClass &&
+ type->getIdOperand(1) == pointee)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypePointer);
+ type->addImmediateOperand(storageClass);
+ type->addIdOperand(pointee);
+ groupedTypes[OpTypePointer].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeForwardPointer(StorageClass storageClass)
+{
+ // Caching/uniquifying doesn't work here, because we don't know the
+ // pointee type and there can be multiple forward pointers of the same
+ // storage type. Somebody higher up in the stack must keep track.
+ Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeForwardPointer);
+ type->addImmediateOperand(storageClass);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardPointerType, Id pointee)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
+ type = groupedTypes[OpTypePointer][t];
+ if (type->getImmediateOperand(0) == (unsigned)storageClass &&
+ type->getIdOperand(1) == pointee)
+ return type->getResultId();
+ }
+
+ type = new Instruction(forwardPointerType, NoType, OpTypePointer);
+ type->addImmediateOperand(storageClass);
+ type->addIdOperand(pointee);
+ groupedTypes[OpTypePointer].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeIntegerType(int width, bool hasSign)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) {
+ type = groupedTypes[OpTypeInt][t];
+ if (type->getImmediateOperand(0) == (unsigned)width &&
+ type->getImmediateOperand(1) == (hasSign ? 1u : 0u))
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeInt);
+ type->addImmediateOperand(width);
+ type->addImmediateOperand(hasSign ? 1 : 0);
+ groupedTypes[OpTypeInt].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ // deal with capabilities
+ switch (width) {
+ case 8:
+ case 16:
+ // these are currently handled by storage-type declarations and post processing
+ break;
+ case 64:
+ addCapability(CapabilityInt64);
+ break;
+ default:
+ break;
+ }
+
+ return type->getResultId();
+}
+
+Id Builder::makeFloatType(int width)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) {
+ type = groupedTypes[OpTypeFloat][t];
+ if (type->getImmediateOperand(0) == (unsigned)width)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeFloat);
+ type->addImmediateOperand(width);
+ groupedTypes[OpTypeFloat].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ // deal with capabilities
+ switch (width) {
+ case 16:
+ // currently handled by storage-type declarations and post processing
+ break;
+ case 64:
+ addCapability(CapabilityFloat64);
+ break;
+ default:
+ break;
+ }
+
+ return type->getResultId();
+}
+
+// Make a struct without checking for duplication.
+// See makeStructResultType() for non-decorated structs
+// needed as the result of some instructions, which does
+// check for duplicates.
+Id Builder::makeStructType(const std::vector<Id>& members, const char* name)
+{
+ // Don't look for previous one, because in the general case,
+ // structs can be duplicated except for decorations.
+
+ // not found, make it
+ Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeStruct);
+ for (int op = 0; op < (int)members.size(); ++op)
+ type->addIdOperand(members[op]);
+ groupedTypes[OpTypeStruct].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ addName(type->getResultId(), name);
+
+ return type->getResultId();
+}
+
+// Make a struct for the simple results of several instructions,
+// checking for duplication.
+Id Builder::makeStructResultType(Id type0, Id type1)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeStruct].size(); ++t) {
+ type = groupedTypes[OpTypeStruct][t];
+ if (type->getNumOperands() != 2)
+ continue;
+ if (type->getIdOperand(0) != type0 ||
+ type->getIdOperand(1) != type1)
+ continue;
+ return type->getResultId();
+ }
+
+ // not found, make it
+ std::vector<spv::Id> members;
+ members.push_back(type0);
+ members.push_back(type1);
+
+ return makeStructType(members, "ResType");
+}
+
+Id Builder::makeVectorType(Id component, int size)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeVector].size(); ++t) {
+ type = groupedTypes[OpTypeVector][t];
+ if (type->getIdOperand(0) == component &&
+ type->getImmediateOperand(1) == (unsigned)size)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeVector);
+ type->addIdOperand(component);
+ type->addImmediateOperand(size);
+ groupedTypes[OpTypeVector].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeMatrixType(Id component, int cols, int rows)
+{
+ assert(cols <= maxMatrixSize && rows <= maxMatrixSize);
+
+ Id column = makeVectorType(component, rows);
+
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeMatrix].size(); ++t) {
+ type = groupedTypes[OpTypeMatrix][t];
+ if (type->getIdOperand(0) == column &&
+ type->getImmediateOperand(1) == (unsigned)cols)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeMatrix);
+ type->addIdOperand(column);
+ type->addImmediateOperand(cols);
+ groupedTypes[OpTypeMatrix].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeCooperativeMatrixNV].size(); ++t) {
+ type = groupedTypes[OpTypeCooperativeMatrixNV][t];
+ if (type->getIdOperand(0) == component &&
+ type->getIdOperand(1) == scope &&
+ type->getIdOperand(2) == rows &&
+ type->getIdOperand(3) == cols)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeCooperativeMatrixNV);
+ type->addIdOperand(component);
+ type->addIdOperand(scope);
+ type->addIdOperand(rows);
+ type->addIdOperand(cols);
+ groupedTypes[OpTypeCooperativeMatrixNV].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+
+// TODO: performance: track arrays per stride
+// If a stride is supplied (non-zero) make an array.
+// If no stride (0), reuse previous array types.
+// 'size' is an Id of a constant or specialization constant of the array size
+Id Builder::makeArrayType(Id element, Id sizeId, int stride)
+{
+ Instruction* type;
+ if (stride == 0) {
+ // try to find existing type
+ for (int t = 0; t < (int)groupedTypes[OpTypeArray].size(); ++t) {
+ type = groupedTypes[OpTypeArray][t];
+ if (type->getIdOperand(0) == element &&
+ type->getIdOperand(1) == sizeId)
+ return type->getResultId();
+ }
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeArray);
+ type->addIdOperand(element);
+ type->addIdOperand(sizeId);
+ groupedTypes[OpTypeArray].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeRuntimeArray(Id element)
+{
+ Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeRuntimeArray);
+ type->addIdOperand(element);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeFunctionType(Id returnType, const std::vector<Id>& paramTypes)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeFunction].size(); ++t) {
+ type = groupedTypes[OpTypeFunction][t];
+ if (type->getIdOperand(0) != returnType || (int)paramTypes.size() != type->getNumOperands() - 1)
+ continue;
+ bool mismatch = false;
+ for (int p = 0; p < (int)paramTypes.size(); ++p) {
+ if (paramTypes[p] != type->getIdOperand(p + 1)) {
+ mismatch = true;
+ break;
+ }
+ }
+ if (! mismatch)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeFunction);
+ type->addIdOperand(returnType);
+ for (int p = 0; p < (int)paramTypes.size(); ++p)
+ type->addIdOperand(paramTypes[p]);
+ groupedTypes[OpTypeFunction].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format)
+{
+ assert(sampled == 1 || sampled == 2);
+
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeImage].size(); ++t) {
+ type = groupedTypes[OpTypeImage][t];
+ if (type->getIdOperand(0) == sampledType &&
+ type->getImmediateOperand(1) == (unsigned int)dim &&
+ type->getImmediateOperand(2) == ( depth ? 1u : 0u) &&
+ type->getImmediateOperand(3) == (arrayed ? 1u : 0u) &&
+ type->getImmediateOperand(4) == ( ms ? 1u : 0u) &&
+ type->getImmediateOperand(5) == sampled &&
+ type->getImmediateOperand(6) == (unsigned int)format)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeImage);
+ type->addIdOperand(sampledType);
+ type->addImmediateOperand( dim);
+ type->addImmediateOperand( depth ? 1 : 0);
+ type->addImmediateOperand(arrayed ? 1 : 0);
+ type->addImmediateOperand( ms ? 1 : 0);
+ type->addImmediateOperand(sampled);
+ type->addImmediateOperand((unsigned int)format);
+
+ groupedTypes[OpTypeImage].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ // deal with capabilities
+ switch (dim) {
+ case DimBuffer:
+ if (sampled == 1)
+ addCapability(CapabilitySampledBuffer);
+ else
+ addCapability(CapabilityImageBuffer);
+ break;
+ case Dim1D:
+ if (sampled == 1)
+ addCapability(CapabilitySampled1D);
+ else
+ addCapability(CapabilityImage1D);
+ break;
+ case DimCube:
+ if (arrayed) {
+ if (sampled == 1)
+ addCapability(CapabilitySampledCubeArray);
+ else
+ addCapability(CapabilityImageCubeArray);
+ }
+ break;
+ case DimRect:
+ if (sampled == 1)
+ addCapability(CapabilitySampledRect);
+ else
+ addCapability(CapabilityImageRect);
+ break;
+ case DimSubpassData:
+ addCapability(CapabilityInputAttachment);
+ break;
+ default:
+ break;
+ }
+
+ if (ms) {
+ if (sampled == 2) {
+ // Images used with subpass data are not storage
+ // images, so don't require the capability for them.
+ if (dim != Dim::DimSubpassData)
+ addCapability(CapabilityStorageImageMultisample);
+ if (arrayed)
+ addCapability(CapabilityImageMSArray);
+ }
+ }
+
+ return type->getResultId();
+}
+
+Id Builder::makeSampledImageType(Id imageType)
+{
+ // try to find it
+ Instruction* type;
+ for (int t = 0; t < (int)groupedTypes[OpTypeSampledImage].size(); ++t) {
+ type = groupedTypes[OpTypeSampledImage][t];
+ if (type->getIdOperand(0) == imageType)
+ return type->getResultId();
+ }
+
+ // not found, make it
+ type = new Instruction(getUniqueId(), NoType, OpTypeSampledImage);
+ type->addIdOperand(imageType);
+
+ groupedTypes[OpTypeSampledImage].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+
+ return type->getResultId();
+}
+
+#ifdef NV_EXTENSIONS
+Id Builder::makeAccelerationStructureNVType()
+{
+ Instruction *type;
+ if (groupedTypes[OpTypeAccelerationStructureNV].size() == 0) {
+ type = new Instruction(getUniqueId(), NoType, OpTypeAccelerationStructureNV);
+ groupedTypes[OpTypeAccelerationStructureNV].push_back(type);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
+ module.mapInstruction(type);
+ } else {
+ type = groupedTypes[OpTypeAccelerationStructureNV].back();
+ }
+
+ return type->getResultId();
+}
+#endif
+Id Builder::getDerefTypeId(Id resultId) const
+{
+ Id typeId = getTypeId(resultId);
+ assert(isPointerType(typeId));
+
+ return module.getInstruction(typeId)->getIdOperand(1);
+}
+
+Op Builder::getMostBasicTypeClass(Id typeId) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ Op typeClass = instr->getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ return getMostBasicTypeClass(instr->getIdOperand(0));
+ case OpTypePointer:
+ return getMostBasicTypeClass(instr->getIdOperand(1));
+ default:
+ return typeClass;
+ }
+}
+
+int Builder::getNumTypeConstituents(Id typeId) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ switch (instr->getOpCode())
+ {
+ case OpTypeBool:
+ case OpTypeInt:
+ case OpTypeFloat:
+ case OpTypePointer:
+ return 1;
+ case OpTypeVector:
+ case OpTypeMatrix:
+ return instr->getImmediateOperand(1);
+ case OpTypeArray:
+ {
+ Id lengthId = instr->getIdOperand(1);
+ return module.getInstruction(lengthId)->getImmediateOperand(0);
+ }
+ case OpTypeStruct:
+ return instr->getNumOperands();
+ case OpTypeCooperativeMatrixNV:
+ // has only one constituent when used with OpCompositeConstruct.
+ return 1;
+ default:
+ assert(0);
+ return 1;
+ }
+}
+
+// Return the lowest-level type of scalar that an homogeneous composite is made out of.
+// Typically, this is just to find out if something is made out of ints or floats.
+// However, it includes returning a structure, if say, it is an array of structure.
+Id Builder::getScalarTypeId(Id typeId) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ Op typeClass = instr->getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeVoid:
+ case OpTypeBool:
+ case OpTypeInt:
+ case OpTypeFloat:
+ case OpTypeStruct:
+ return instr->getResultId();
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ case OpTypePointer:
+ return getScalarTypeId(getContainedTypeId(typeId));
+ default:
+ assert(0);
+ return NoResult;
+ }
+}
+
+// Return the type of 'member' of a composite.
+Id Builder::getContainedTypeId(Id typeId, int member) const
+{
+ Instruction* instr = module.getInstruction(typeId);
+
+ Op typeClass = instr->getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ case OpTypeCooperativeMatrixNV:
+ return instr->getIdOperand(0);
+ case OpTypePointer:
+ return instr->getIdOperand(1);
+ case OpTypeStruct:
+ return instr->getIdOperand(member);
+ default:
+ assert(0);
+ return NoResult;
+ }
+}
+
+// Return the immediately contained type of a given composite type.
+Id Builder::getContainedTypeId(Id typeId) const
+{
+ return getContainedTypeId(typeId, 0);
+}
+
+// Returns true if 'typeId' is or contains a scalar type declared with 'typeOp'
+// of width 'width'. The 'width' is only consumed for int and float types.
+// Returns false otherwise.
+bool Builder::containsType(Id typeId, spv::Op typeOp, unsigned int width) const
+{
+ const Instruction& instr = *module.getInstruction(typeId);
+
+ Op typeClass = instr.getOpCode();
+ switch (typeClass)
+ {
+ case OpTypeInt:
+ case OpTypeFloat:
+ return typeClass == typeOp && instr.getImmediateOperand(0) == width;
+ case OpTypeStruct:
+ for (int m = 0; m < instr.getNumOperands(); ++m) {
+ if (containsType(instr.getIdOperand(m), typeOp, width))
+ return true;
+ }
+ return false;
+ case OpTypePointer:
+ return false;
+ case OpTypeVector:
+ case OpTypeMatrix:
+ case OpTypeArray:
+ case OpTypeRuntimeArray:
+ return containsType(getContainedTypeId(typeId), typeOp, width);
+ default:
+ return typeClass == typeOp;
+ }
+}
+
+// return true if the type is a pointer to PhysicalStorageBufferEXT or an
+// array of such pointers. These require restrict/aliased decorations.
+bool Builder::containsPhysicalStorageBufferOrArray(Id typeId) const
+{
+ const Instruction& instr = *module.getInstruction(typeId);
+
+ Op typeClass = instr.getOpCode();
+ switch (typeClass)
+ {
+ case OpTypePointer:
+ return getTypeStorageClass(typeId) == StorageClassPhysicalStorageBufferEXT;
+ case OpTypeArray:
+ return containsPhysicalStorageBufferOrArray(getContainedTypeId(typeId));
+ default:
+ return false;
+ }
+}
+
+// See if a scalar constant of this type has already been created, so it
+// can be reused rather than duplicated. (Required by the specification).
+Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value)
+{
+ Instruction* constant;
+ for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+ constant = groupedConstants[typeClass][i];
+ if (constant->getOpCode() == opcode &&
+ constant->getTypeId() == typeId &&
+ constant->getImmediateOperand(0) == value)
+ return constant->getResultId();
+ }
+
+ return 0;
+}
+
+// Version of findScalarConstant (see above) for scalars that take two operands (e.g. a 'double' or 'int64').
+Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2)
+{
+ Instruction* constant;
+ for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+ constant = groupedConstants[typeClass][i];
+ if (constant->getOpCode() == opcode &&
+ constant->getTypeId() == typeId &&
+ constant->getImmediateOperand(0) == v1 &&
+ constant->getImmediateOperand(1) == v2)
+ return constant->getResultId();
+ }
+
+ return 0;
+}
+
+// Return true if consuming 'opcode' means consuming a constant.
+// "constant" here means after final transform to executable code,
+// the value consumed will be a constant, so includes specialization.
+bool Builder::isConstantOpCode(Op opcode) const
+{
+ switch (opcode) {
+ case OpUndef:
+ case OpConstantTrue:
+ case OpConstantFalse:
+ case OpConstant:
+ case OpConstantComposite:
+ case OpConstantSampler:
+ case OpConstantNull:
+ case OpSpecConstantTrue:
+ case OpSpecConstantFalse:
+ case OpSpecConstant:
+ case OpSpecConstantComposite:
+ case OpSpecConstantOp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Return true if consuming 'opcode' means consuming a specialization constant.
+bool Builder::isSpecConstantOpCode(Op opcode) const
+{
+ switch (opcode) {
+ case OpSpecConstantTrue:
+ case OpSpecConstantFalse:
+ case OpSpecConstant:
+ case OpSpecConstantComposite:
+ case OpSpecConstantOp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Id Builder::makeBoolConstant(bool b, bool specConstant)
+{
+ Id typeId = makeBoolType();
+ Instruction* constant;
+ Op opcode = specConstant ? (b ? OpSpecConstantTrue : OpSpecConstantFalse) : (b ? OpConstantTrue : OpConstantFalse);
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = 0;
+ for (int i = 0; i < (int)groupedConstants[OpTypeBool].size(); ++i) {
+ constant = groupedConstants[OpTypeBool][i];
+ if (constant->getTypeId() == typeId && constant->getOpCode() == opcode)
+ existing = constant->getResultId();
+ }
+
+ if (existing)
+ return existing;
+ }
+
+ // Make it
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeBool].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeIntConstant(Id typeId, unsigned value, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeInt, opcode, typeId, value);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(value);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeInt].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeInt64Constant(Id typeId, unsigned long long value, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+
+ unsigned op1 = value & 0xFFFFFFFF;
+ unsigned op2 = value >> 32;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeInt, opcode, typeId, op1, op2);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(op1);
+ c->addImmediateOperand(op2);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeInt].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeFloatConstant(float f, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+ Id typeId = makeFloatType(32);
+ union { float fl; unsigned int ui; } u;
+ u.fl = f;
+ unsigned value = u.ui;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(value);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeFloat].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeDoubleConstant(double d, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+ Id typeId = makeFloatType(64);
+ union { double db; unsigned long long ull; } u;
+ u.db = d;
+ unsigned long long value = u.ull;
+ unsigned op1 = value & 0xFFFFFFFF;
+ unsigned op2 = value >> 32;
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (! specConstant) {
+ Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, op1, op2);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(op1);
+ c->addImmediateOperand(op2);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeFloat].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeFloat16Constant(float f16, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstant : OpConstant;
+ Id typeId = makeFloatType(16);
+
+ spvutils::HexFloat<spvutils::FloatProxy<float>> fVal(f16);
+ spvutils::HexFloat<spvutils::FloatProxy<spvutils::Float16>> f16Val(0);
+ fVal.castTo(f16Val, spvutils::kRoundToZero);
+
+ unsigned value = f16Val.value().getAsFloat().get_value();
+
+ // See if we already made it. Applies only to regular constants, because specialization constants
+ // must remain distinct for the purpose of applying a SpecId decoration.
+ if (!specConstant) {
+ Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value);
+ if (existing)
+ return existing;
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ c->addImmediateOperand(value);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ groupedConstants[OpTypeFloat].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Id Builder::makeFpConstant(Id type, double d, bool specConstant)
+{
+ assert(isFloatType(type));
+
+ switch (getScalarTypeWidth(type)) {
+ case 16:
+ return makeFloat16Constant((float)d, specConstant);
+ case 32:
+ return makeFloatConstant((float)d, specConstant);
+ case 64:
+ return makeDoubleConstant(d, specConstant);
+ default:
+ break;
+ }
+
+ assert(false);
+ return NoResult;
+}
+
+Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps)
+{
+ Instruction* constant = 0;
+ bool found = false;
+ for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
+ constant = groupedConstants[typeClass][i];
+
+ if (constant->getTypeId() != typeId)
+ continue;
+
+ // same contents?
+ bool mismatch = false;
+ for (int op = 0; op < constant->getNumOperands(); ++op) {
+ if (constant->getIdOperand(op) != comps[op]) {
+ mismatch = true;
+ break;
+ }
+ }
+ if (! mismatch) {
+ found = true;
+ break;
+ }
+ }
+
+ return found ? constant->getResultId() : NoResult;
+}
+
+Id Builder::findStructConstant(Id typeId, const std::vector<Id>& comps)
+{
+ Instruction* constant = 0;
+ bool found = false;
+ for (int i = 0; i < (int)groupedStructConstants[typeId].size(); ++i) {
+ constant = groupedStructConstants[typeId][i];
+
+ // same contents?
+ bool mismatch = false;
+ for (int op = 0; op < constant->getNumOperands(); ++op) {
+ if (constant->getIdOperand(op) != comps[op]) {
+ mismatch = true;
+ break;
+ }
+ }
+ if (! mismatch) {
+ found = true;
+ break;
+ }
+ }
+
+ return found ? constant->getResultId() : NoResult;
+}
+
+// Comments in header
+Id Builder::makeCompositeConstant(Id typeId, const std::vector<Id>& members, bool specConstant)
+{
+ Op opcode = specConstant ? OpSpecConstantComposite : OpConstantComposite;
+ assert(typeId);
+ Op typeClass = getTypeClass(typeId);
+
+ switch (typeClass) {
+ case OpTypeVector:
+ case OpTypeArray:
+ case OpTypeMatrix:
+ case OpTypeCooperativeMatrixNV:
+ if (! specConstant) {
+ Id existing = findCompositeConstant(typeClass, typeId, members);
+ if (existing)
+ return existing;
+ }
+ break;
+ case OpTypeStruct:
+ if (! specConstant) {
+ Id existing = findStructConstant(typeId, members);
+ if (existing)
+ return existing;
+ }
+ break;
+ default:
+ assert(0);
+ return makeFloatConstant(0.0);
+ }
+
+ Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
+ for (int op = 0; op < (int)members.size(); ++op)
+ c->addIdOperand(members[op]);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
+ if (typeClass == OpTypeStruct)
+ groupedStructConstants[typeId].push_back(c);
+ else
+ groupedConstants[typeClass].push_back(c);
+ module.mapInstruction(c);
+
+ return c->getResultId();
+}
+
+Instruction* Builder::addEntryPoint(ExecutionModel model, Function* function, const char* name)
+{
+ Instruction* entryPoint = new Instruction(OpEntryPoint);
+ entryPoint->addImmediateOperand(model);
+ entryPoint->addIdOperand(function->getId());
+ entryPoint->addStringOperand(name);
+
+ entryPoints.push_back(std::unique_ptr<Instruction>(entryPoint));
+
+ return entryPoint;
+}
+
+// Currently relying on the fact that all 'value' of interest are small non-negative values.
+void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, int value1, int value2, int value3)
+{
+ Instruction* instr = new Instruction(OpExecutionMode);
+ instr->addIdOperand(entryPoint->getId());
+ instr->addImmediateOperand(mode);
+ if (value1 >= 0)
+ instr->addImmediateOperand(value1);
+ if (value2 >= 0)
+ instr->addImmediateOperand(value2);
+ if (value3 >= 0)
+ instr->addImmediateOperand(value3);
+
+ executionModes.push_back(std::unique_ptr<Instruction>(instr));
+}
+
+void Builder::addName(Id id, const char* string)
+{
+ Instruction* name = new Instruction(OpName);
+ name->addIdOperand(id);
+ name->addStringOperand(string);
+
+ names.push_back(std::unique_ptr<Instruction>(name));
+}
+
+void Builder::addMemberName(Id id, int memberNumber, const char* string)
+{
+ Instruction* name = new Instruction(OpMemberName);
+ name->addIdOperand(id);
+ name->addImmediateOperand(memberNumber);
+ name->addStringOperand(string);
+
+ names.push_back(std::unique_ptr<Instruction>(name));
+}
+
+void Builder::addDecoration(Id id, Decoration decoration, int num)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpDecorate);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(decoration);
+ if (num >= 0)
+ dec->addImmediateOperand(num);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addDecoration(Id id, Decoration decoration, const char* s)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpDecorateStringGOOGLE);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(decoration);
+ dec->addStringOperand(s);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addDecorationId(Id id, Decoration decoration, Id idDecoration)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpDecorateId);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(decoration);
+ dec->addIdOperand(idDecoration);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, int num)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpMemberDecorate);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(member);
+ dec->addImmediateOperand(decoration);
+ if (num >= 0)
+ dec->addImmediateOperand(num);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const char *s)
+{
+ if (decoration == spv::DecorationMax)
+ return;
+
+ Instruction* dec = new Instruction(OpMemberDecorateStringGOOGLE);
+ dec->addIdOperand(id);
+ dec->addImmediateOperand(member);
+ dec->addImmediateOperand(decoration);
+ dec->addStringOperand(s);
+
+ decorations.push_back(std::unique_ptr<Instruction>(dec));
+}
+
+// Comments in header
+Function* Builder::makeEntryPoint(const char* entryPoint)
+{
+ assert(! entryPointFunction);
+
+ Block* entry;
+ std::vector<Id> params;
+ std::vector<std::vector<Decoration>> decorations;
+
+ entryPointFunction = makeFunctionEntry(NoPrecision, makeVoidType(), entryPoint, params, decorations, &entry);
+
+ return entryPointFunction;
+}
+
+// Comments in header
+Function* Builder::makeFunctionEntry(Decoration precision, Id returnType, const char* name,
+ const std::vector<Id>& paramTypes, const std::vector<std::vector<Decoration>>& decorations, Block **entry)
+{
+ // Make the function and initial instructions in it
+ Id typeId = makeFunctionType(returnType, paramTypes);
+ Id firstParamId = paramTypes.size() == 0 ? 0 : getUniqueIds((int)paramTypes.size());
+ Function* function = new Function(getUniqueId(), returnType, typeId, firstParamId, module);
+
+ // Set up the precisions
+ setPrecision(function->getId(), precision);
+ for (unsigned p = 0; p < (unsigned)decorations.size(); ++p) {
+ for (int d = 0; d < (int)decorations[p].size(); ++d)
+ addDecoration(firstParamId + p, decorations[p][d]);
+ }
+
+ // CFG
+ if (entry) {
+ *entry = new Block(getUniqueId(), *function);
+ function->addBlock(*entry);
+ setBuildPoint(*entry);
+ }
+
+ if (name)
+ addName(function->getId(), name);
+
+ functions.push_back(std::unique_ptr<Function>(function));
+
+ return function;
+}
+
+// Comments in header
+void Builder::makeReturn(bool implicit, Id retVal)
+{
+ if (retVal) {
+ Instruction* inst = new Instruction(NoResult, NoType, OpReturnValue);
+ inst->addIdOperand(retVal);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+ } else
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(NoResult, NoType, OpReturn)));
+
+ if (! implicit)
+ createAndSetNoPredecessorBlock("post-return");
+}
+
+// Comments in header
+void Builder::leaveFunction()
+{
+ Block* block = buildPoint;
+ Function& function = buildPoint->getParent();
+ assert(block);
+
+ // If our function did not contain a return, add a return void now.
+ if (! block->isTerminated()) {
+ if (function.getReturnType() == makeVoidType())
+ makeReturn(true);
+ else {
+ makeReturn(true, createUndefined(function.getReturnType()));
+ }
+ }
+}
+
+// Comments in header
+void Builder::makeDiscard()
+{
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(OpKill)));
+ createAndSetNoPredecessorBlock("post-discard");
+}
+
+// Comments in header
+Id Builder::createVariable(StorageClass storageClass, Id type, const char* name, Id initializer)
+{
+ Id pointerType = makePointer(storageClass, type);
+ Instruction* inst = new Instruction(getUniqueId(), pointerType, OpVariable);
+ inst->addImmediateOperand(storageClass);
+ if (initializer != NoResult)
+ inst->addIdOperand(initializer);
+
+ switch (storageClass) {
+ case StorageClassFunction:
+ // Validation rules require the declaration in the entry block
+ buildPoint->getParent().addLocalVariable(std::unique_ptr<Instruction>(inst));
+ break;
+
+ default:
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(inst));
+ module.mapInstruction(inst);
+ break;
+ }
+
+ if (name)
+ addName(inst->getResultId(), name);
+
+ return inst->getResultId();
+}
+
+// Comments in header
+Id Builder::createUndefined(Id type)
+{
+ Instruction* inst = new Instruction(getUniqueId(), type, OpUndef);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+ return inst->getResultId();
+}
+
+// av/vis/nonprivate are unnecessary and illegal for some storage classes.
+spv::MemoryAccessMask Builder::sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) const
+{
+ switch (sc) {
+ case spv::StorageClassUniform:
+ case spv::StorageClassWorkgroup:
+ case spv::StorageClassStorageBuffer:
+ case spv::StorageClassPhysicalStorageBufferEXT:
+ break;
+ default:
+ memoryAccess = spv::MemoryAccessMask(memoryAccess &
+ ~(spv::MemoryAccessMakePointerAvailableKHRMask |
+ spv::MemoryAccessMakePointerVisibleKHRMask |
+ spv::MemoryAccessNonPrivatePointerKHRMask));
+ break;
+ }
+ return memoryAccess;
+}
+
+// Comments in header
+void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ Instruction* store = new Instruction(OpStore);
+ store->addIdOperand(lValue);
+ store->addIdOperand(rValue);
+
+ memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
+
+ if (memoryAccess != MemoryAccessMaskNone) {
+ store->addImmediateOperand(memoryAccess);
+ if (memoryAccess & spv::MemoryAccessAlignedMask) {
+ store->addImmediateOperand(alignment);
+ }
+ if (memoryAccess & spv::MemoryAccessMakePointerAvailableKHRMask) {
+ store->addIdOperand(makeUintConstant(scope));
+ }
+ }
+
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(store));
+}
+
+// Comments in header
+Id Builder::createLoad(Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad);
+ load->addIdOperand(lValue);
+
+ memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue));
+
+ if (memoryAccess != MemoryAccessMaskNone) {
+ load->addImmediateOperand(memoryAccess);
+ if (memoryAccess & spv::MemoryAccessAlignedMask) {
+ load->addImmediateOperand(alignment);
+ }
+ if (memoryAccess & spv::MemoryAccessMakePointerVisibleKHRMask) {
+ load->addIdOperand(makeUintConstant(scope));
+ }
+ }
+
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(load));
+
+ return load->getResultId();
+}
+
+// Comments in header
+Id Builder::createAccessChain(StorageClass storageClass, Id base, const std::vector<Id>& offsets)
+{
+ // Figure out the final resulting type.
+ spv::Id typeId = getTypeId(base);
+ assert(isPointerType(typeId) && offsets.size() > 0);
+ typeId = getContainedTypeId(typeId);
+ for (int i = 0; i < (int)offsets.size(); ++i) {
+ if (isStructType(typeId)) {
+ assert(isConstantScalar(offsets[i]));
+ typeId = getContainedTypeId(typeId, getConstantScalar(offsets[i]));
+ } else
+ typeId = getContainedTypeId(typeId, offsets[i]);
+ }
+ typeId = makePointer(storageClass, typeId);
+
+ // Make the instruction
+ Instruction* chain = new Instruction(getUniqueId(), typeId, OpAccessChain);
+ chain->addIdOperand(base);
+ for (int i = 0; i < (int)offsets.size(); ++i)
+ chain->addIdOperand(offsets[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(chain));
+
+ return chain->getResultId();
+}
+
+Id Builder::createArrayLength(Id base, unsigned int member)
+{
+ spv::Id intType = makeUintType(32);
+ Instruction* length = new Instruction(getUniqueId(), intType, OpArrayLength);
+ length->addIdOperand(base);
+ length->addImmediateOperand(member);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(length));
+
+ return length->getResultId();
+}
+
+Id Builder::createCooperativeMatrixLength(Id type)
+{
+ spv::Id intType = makeUintType(32);
+
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(OpCooperativeMatrixLengthNV, intType, std::vector<Id>(1, type), std::vector<Id>());
+ }
+
+ Instruction* length = new Instruction(getUniqueId(), intType, OpCooperativeMatrixLengthNV);
+ length->addIdOperand(type);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(length));
+
+ return length->getResultId();
+}
+
+Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(OpCompositeExtract, typeId, std::vector<Id>(1, composite), std::vector<Id>(1, index));
+ }
+ Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract);
+ extract->addIdOperand(composite);
+ extract->addImmediateOperand(index);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+ return extract->getResultId();
+}
+
+Id Builder::createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(OpCompositeExtract, typeId, std::vector<Id>(1, composite), indexes);
+ }
+ Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract);
+ extract->addIdOperand(composite);
+ for (int i = 0; i < (int)indexes.size(); ++i)
+ extract->addImmediateOperand(indexes[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+ return extract->getResultId();
+}
+
+Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, unsigned index)
+{
+ Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert);
+ insert->addIdOperand(object);
+ insert->addIdOperand(composite);
+ insert->addImmediateOperand(index);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+ return insert->getResultId();
+}
+
+Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, const std::vector<unsigned>& indexes)
+{
+ Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert);
+ insert->addIdOperand(object);
+ insert->addIdOperand(composite);
+ for (int i = 0; i < (int)indexes.size(); ++i)
+ insert->addImmediateOperand(indexes[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+ return insert->getResultId();
+}
+
+Id Builder::createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex)
+{
+ Instruction* extract = new Instruction(getUniqueId(), typeId, OpVectorExtractDynamic);
+ extract->addIdOperand(vector);
+ extract->addIdOperand(componentIndex);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(extract));
+
+ return extract->getResultId();
+}
+
+Id Builder::createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex)
+{
+ Instruction* insert = new Instruction(getUniqueId(), typeId, OpVectorInsertDynamic);
+ insert->addIdOperand(vector);
+ insert->addIdOperand(component);
+ insert->addIdOperand(componentIndex);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(insert));
+
+ return insert->getResultId();
+}
+
+// An opcode that has no operands, no result id, and no type
+void Builder::createNoResultOp(Op opCode)
+{
+ Instruction* op = new Instruction(opCode);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one id operand, no result id, and no type
+void Builder::createNoResultOp(Op opCode, Id operand)
+{
+ Instruction* op = new Instruction(opCode);
+ op->addIdOperand(operand);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one or more operands, no result id, and no type
+void Builder::createNoResultOp(Op opCode, const std::vector<Id>& operands)
+{
+ Instruction* op = new Instruction(opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
+ op->addIdOperand(*it);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has multiple operands, no result id, and no type
+void Builder::createNoResultOp(Op opCode, const std::vector<IdImmediate>& operands)
+{
+ Instruction* op = new Instruction(opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
+ if (it->isId)
+ op->addIdOperand(it->word);
+ else
+ op->addImmediateOperand(it->word);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+void Builder::createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask semantics)
+{
+ Instruction* op = new Instruction(OpControlBarrier);
+ op->addIdOperand(makeUintConstant(execution));
+ op->addIdOperand(makeUintConstant(memory));
+ op->addIdOperand(makeUintConstant(semantics));
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+void Builder::createMemoryBarrier(unsigned executionScope, unsigned memorySemantics)
+{
+ Instruction* op = new Instruction(OpMemoryBarrier);
+ op->addIdOperand(makeUintConstant(executionScope));
+ op->addIdOperand(makeUintConstant(memorySemantics));
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+}
+
+// An opcode that has one operands, a result id, and a type
+Id Builder::createUnaryOp(Op opCode, Id typeId, Id operand)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ return createSpecConstantOp(opCode, typeId, std::vector<Id>(1, operand), std::vector<Id>());
+ }
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ op->addIdOperand(operand);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createBinOp(Op opCode, Id typeId, Id left, Id right)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ std::vector<Id> operands(2);
+ operands[0] = left; operands[1] = right;
+ return createSpecConstantOp(opCode, typeId, operands, std::vector<Id>());
+ }
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ op->addIdOperand(left);
+ op->addIdOperand(right);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createTriOp(Op opCode, Id typeId, Id op1, Id op2, Id op3)
+{
+ // Generate code for spec constants if in spec constant operation
+ // generation mode.
+ if (generatingOpCodeForSpecConst) {
+ std::vector<Id> operands(3);
+ operands[0] = op1;
+ operands[1] = op2;
+ operands[2] = op3;
+ return createSpecConstantOp(
+ opCode, typeId, operands, std::vector<Id>());
+ }
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ op->addIdOperand(op1);
+ op->addIdOperand(op2);
+ op->addIdOperand(op3);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createOp(Op opCode, Id typeId, const std::vector<Id>& operands)
+{
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it)
+ op->addIdOperand(*it);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createOp(Op opCode, Id typeId, const std::vector<IdImmediate>& operands)
+{
+ Instruction* op = new Instruction(getUniqueId(), typeId, opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it) {
+ if (it->isId)
+ op->addIdOperand(it->word);
+ else
+ op->addImmediateOperand(it->word);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createSpecConstantOp(Op opCode, Id typeId, const std::vector<Id>& operands, const std::vector<unsigned>& literals)
+{
+ Instruction* op = new Instruction(getUniqueId(), typeId, OpSpecConstantOp);
+ op->addImmediateOperand((unsigned) opCode);
+ for (auto it = operands.cbegin(); it != operands.cend(); ++it)
+ op->addIdOperand(*it);
+ for (auto it = literals.cbegin(); it != literals.cend(); ++it)
+ op->addImmediateOperand(*it);
+ module.mapInstruction(op);
+ constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+Id Builder::createFunctionCall(spv::Function* function, const std::vector<spv::Id>& args)
+{
+ Instruction* op = new Instruction(getUniqueId(), function->getReturnType(), OpFunctionCall);
+ op->addIdOperand(function->getId());
+ for (int a = 0; a < (int)args.size(); ++a)
+ op->addIdOperand(args[a]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+// Comments in header
+Id Builder::createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector<unsigned>& channels)
+{
+ if (channels.size() == 1)
+ return setPrecision(createCompositeExtract(source, typeId, channels.front()), precision);
+
+ if (generatingOpCodeForSpecConst) {
+ std::vector<Id> operands(2);
+ operands[0] = operands[1] = source;
+ return setPrecision(createSpecConstantOp(OpVectorShuffle, typeId, operands, channels), precision);
+ }
+ Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle);
+ assert(isVector(source));
+ swizzle->addIdOperand(source);
+ swizzle->addIdOperand(source);
+ for (int i = 0; i < (int)channels.size(); ++i)
+ swizzle->addImmediateOperand(channels[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(swizzle));
+
+ return setPrecision(swizzle->getResultId(), precision);
+}
+
+// Comments in header
+Id Builder::createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector<unsigned>& channels)
+{
+ if (channels.size() == 1 && getNumComponents(source) == 1)
+ return createCompositeInsert(source, target, typeId, channels.front());
+
+ Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle);
+
+ assert(isVector(target));
+ swizzle->addIdOperand(target);
+
+ assert(getNumComponents(source) == (int)channels.size());
+ assert(isVector(source));
+ swizzle->addIdOperand(source);
+
+ // Set up an identity shuffle from the base value to the result value
+ unsigned int components[4];
+ int numTargetComponents = getNumComponents(target);
+ for (int i = 0; i < numTargetComponents; ++i)
+ components[i] = i;
+
+ // Punch in the l-value swizzle
+ for (int i = 0; i < (int)channels.size(); ++i)
+ components[channels[i]] = numTargetComponents + i;
+
+ // finish the instruction with these components selectors
+ for (int i = 0; i < numTargetComponents; ++i)
+ swizzle->addImmediateOperand(components[i]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(swizzle));
+
+ return swizzle->getResultId();
+}
+
+// Comments in header
+void Builder::promoteScalar(Decoration precision, Id& left, Id& right)
+{
+ int direction = getNumComponents(right) - getNumComponents(left);
+
+ if (direction > 0)
+ left = smearScalar(precision, left, makeVectorType(getTypeId(left), getNumComponents(right)));
+ else if (direction < 0)
+ right = smearScalar(precision, right, makeVectorType(getTypeId(right), getNumComponents(left)));
+
+ return;
+}
+
+// Comments in header
+Id Builder::smearScalar(Decoration precision, Id scalar, Id vectorType)
+{
+ assert(getNumComponents(scalar) == 1);
+ assert(getTypeId(scalar) == getScalarTypeId(vectorType));
+
+ int numComponents = getNumTypeComponents(vectorType);
+ if (numComponents == 1)
+ return scalar;
+
+ Instruction* smear = nullptr;
+ if (generatingOpCodeForSpecConst) {
+ auto members = std::vector<spv::Id>(numComponents, scalar);
+ // Sometime even in spec-constant-op mode, the temporary vector created by
+ // promoting a scalar might not be a spec constant. This should depend on
+ // the scalar.
+ // e.g.:
+ // const vec2 spec_const_result = a_spec_const_vec2 + a_front_end_const_scalar;
+ // In such cases, the temporary vector created from a_front_end_const_scalar
+ // is not a spec constant vector, even though the binary operation node is marked
+ // as 'specConstant' and we are in spec-constant-op mode.
+ auto result_id = makeCompositeConstant(vectorType, members, isSpecConstant(scalar));
+ smear = module.getInstruction(result_id);
+ } else {
+ smear = new Instruction(getUniqueId(), vectorType, OpCompositeConstruct);
+ for (int c = 0; c < numComponents; ++c)
+ smear->addIdOperand(scalar);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(smear));
+ }
+
+ return setPrecision(smear->getResultId(), precision);
+}
+
+// Comments in header
+Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector<Id>& args)
+{
+ Instruction* inst = new Instruction(getUniqueId(), resultType, OpExtInst);
+ inst->addIdOperand(builtins);
+ inst->addImmediateOperand(entryPoint);
+ for (int arg = 0; arg < (int)args.size(); ++arg)
+ inst->addIdOperand(args[arg]);
+
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(inst));
+
+ return inst->getResultId();
+}
+
+// Accept all parameters needed to create a texture instruction.
+// Create the correct instruction based on the inputs, and make the call.
+Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
+ bool noImplicitLod, const TextureParameters& parameters, ImageOperandsMask signExtensionMask)
+{
+ static const int maxTextureArgs = 10;
+ Id texArgs[maxTextureArgs] = {};
+
+ //
+ // Set up the fixed arguments
+ //
+ int numArgs = 0;
+ bool explicitLod = false;
+ texArgs[numArgs++] = parameters.sampler;
+ texArgs[numArgs++] = parameters.coords;
+ if (parameters.Dref != NoResult)
+ texArgs[numArgs++] = parameters.Dref;
+ if (parameters.component != NoResult)
+ texArgs[numArgs++] = parameters.component;
+
+#ifdef NV_EXTENSIONS
+ if (parameters.granularity != NoResult)
+ texArgs[numArgs++] = parameters.granularity;
+ if (parameters.coarse != NoResult)
+ texArgs[numArgs++] = parameters.coarse;
+#endif
+
+ //
+ // Set up the optional arguments
+ //
+ int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments
+ ++numArgs; // speculatively make room for the mask operand
+ ImageOperandsMask mask = ImageOperandsMaskNone; // the mask operand
+ if (parameters.bias) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsBiasMask);
+ texArgs[numArgs++] = parameters.bias;
+ }
+ if (parameters.lod) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsLodMask);
+ texArgs[numArgs++] = parameters.lod;
+ explicitLod = true;
+ } else if (parameters.gradX) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsGradMask);
+ texArgs[numArgs++] = parameters.gradX;
+ texArgs[numArgs++] = parameters.gradY;
+ explicitLod = true;
+ } else if (noImplicitLod && ! fetch && ! gather) {
+ // have to explicitly use lod of 0 if not allowed to have them be implicit, and
+ // we would otherwise be about to issue an implicit instruction
+ mask = (ImageOperandsMask)(mask | ImageOperandsLodMask);
+ texArgs[numArgs++] = makeFloatConstant(0.0);
+ explicitLod = true;
+ }
+ if (parameters.offset) {
+ if (isConstant(parameters.offset))
+ mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetMask);
+ else {
+ addCapability(CapabilityImageGatherExtended);
+ mask = (ImageOperandsMask)(mask | ImageOperandsOffsetMask);
+ }
+ texArgs[numArgs++] = parameters.offset;
+ }
+ if (parameters.offsets) {
+ addCapability(CapabilityImageGatherExtended);
+ mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask);
+ texArgs[numArgs++] = parameters.offsets;
+ }
+ if (parameters.sample) {
+ mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask);
+ texArgs[numArgs++] = parameters.sample;
+ }
+ if (parameters.lodClamp) {
+ // capability if this bit is used
+ addCapability(CapabilityMinLod);
+
+ mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask);
+ texArgs[numArgs++] = parameters.lodClamp;
+ }
+ if (parameters.nonprivate) {
+ mask = mask | ImageOperandsNonPrivateTexelKHRMask;
+ }
+ if (parameters.volatil) {
+ mask = mask | ImageOperandsVolatileTexelKHRMask;
+ }
+ mask = mask | signExtensionMask;
+ if (mask == ImageOperandsMaskNone)
+ --numArgs; // undo speculative reservation for the mask argument
+ else
+ texArgs[optArgNum] = mask;
+
+ //
+ // Set up the instruction
+ //
+ Op opCode = OpNop; // All paths below need to set this
+ if (fetch) {
+ if (sparse)
+ opCode = OpImageSparseFetch;
+ else
+ opCode = OpImageFetch;
+#ifdef NV_EXTENSIONS
+ } else if (parameters.granularity && parameters.coarse) {
+ opCode = OpImageSampleFootprintNV;
+#endif
+ } else if (gather) {
+ if (parameters.Dref)
+ if (sparse)
+ opCode = OpImageSparseDrefGather;
+ else
+ opCode = OpImageDrefGather;
+ else
+ if (sparse)
+ opCode = OpImageSparseGather;
+ else
+ opCode = OpImageGather;
+ } else if (explicitLod) {
+ if (parameters.Dref) {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjDrefExplicitLod;
+ else
+ opCode = OpImageSampleProjDrefExplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleDrefExplicitLod;
+ else
+ opCode = OpImageSampleDrefExplicitLod;
+ } else {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjExplicitLod;
+ else
+ opCode = OpImageSampleProjExplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleExplicitLod;
+ else
+ opCode = OpImageSampleExplicitLod;
+ }
+ } else {
+ if (parameters.Dref) {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjDrefImplicitLod;
+ else
+ opCode = OpImageSampleProjDrefImplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleDrefImplicitLod;
+ else
+ opCode = OpImageSampleDrefImplicitLod;
+ } else {
+ if (proj)
+ if (sparse)
+ opCode = OpImageSparseSampleProjImplicitLod;
+ else
+ opCode = OpImageSampleProjImplicitLod;
+ else
+ if (sparse)
+ opCode = OpImageSparseSampleImplicitLod;
+ else
+ opCode = OpImageSampleImplicitLod;
+ }
+ }
+
+ // See if the result type is expecting a smeared result.
+ // This happens when a legacy shadow*() call is made, which
+ // gets a vec4 back instead of a float.
+ Id smearedType = resultType;
+ if (! isScalarType(resultType)) {
+ switch (opCode) {
+ case OpImageSampleDrefImplicitLod:
+ case OpImageSampleDrefExplicitLod:
+ case OpImageSampleProjDrefImplicitLod:
+ case OpImageSampleProjDrefExplicitLod:
+ resultType = getScalarTypeId(resultType);
+ break;
+ default:
+ break;
+ }
+ }
+
+ Id typeId0 = 0;
+ Id typeId1 = 0;
+
+ if (sparse) {
+ typeId0 = resultType;
+ typeId1 = getDerefTypeId(parameters.texelOut);
+ resultType = makeStructResultType(typeId0, typeId1);
+ }
+
+ // Build the SPIR-V instruction
+ Instruction* textureInst = new Instruction(getUniqueId(), resultType, opCode);
+ for (int op = 0; op < optArgNum; ++op)
+ textureInst->addIdOperand(texArgs[op]);
+ if (optArgNum < numArgs)
+ textureInst->addImmediateOperand(texArgs[optArgNum]);
+ for (int op = optArgNum + 1; op < numArgs; ++op)
+ textureInst->addIdOperand(texArgs[op]);
+ setPrecision(textureInst->getResultId(), precision);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(textureInst));
+
+ Id resultId = textureInst->getResultId();
+
+ if (sparse) {
+ // set capability
+ addCapability(CapabilitySparseResidency);
+
+ // Decode the return type that was a special structure
+ createStore(createCompositeExtract(resultId, typeId1, 1), parameters.texelOut);
+ resultId = createCompositeExtract(resultId, typeId0, 0);
+ setPrecision(resultId, precision);
+ } else {
+ // When a smear is needed, do it, as per what was computed
+ // above when resultType was changed to a scalar type.
+ if (resultType != smearedType)
+ resultId = smearScalar(precision, resultId, smearedType);
+ }
+
+ return resultId;
+}
+
+// Comments in header
+Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameters, bool isUnsignedResult)
+{
+ // Figure out the result type
+ Id resultType = 0;
+ switch (opCode) {
+ case OpImageQuerySize:
+ case OpImageQuerySizeLod:
+ {
+ int numComponents = 0;
+ switch (getTypeDimensionality(getImageType(parameters.sampler))) {
+ case Dim1D:
+ case DimBuffer:
+ numComponents = 1;
+ break;
+ case Dim2D:
+ case DimCube:
+ case DimRect:
+ case DimSubpassData:
+ numComponents = 2;
+ break;
+ case Dim3D:
+ numComponents = 3;
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+ if (isArrayedImageType(getImageType(parameters.sampler)))
+ ++numComponents;
+
+ Id intType = isUnsignedResult ? makeUintType(32) : makeIntType(32);
+ if (numComponents == 1)
+ resultType = intType;
+ else
+ resultType = makeVectorType(intType, numComponents);
+
+ break;
+ }
+ case OpImageQueryLod:
+#ifdef AMD_EXTENSIONS
+ resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2);
+#else
+ resultType = makeVectorType(makeFloatType(32), 2);
+#endif
+ break;
+ case OpImageQueryLevels:
+ case OpImageQuerySamples:
+ resultType = isUnsignedResult ? makeUintType(32) : makeIntType(32);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ Instruction* query = new Instruction(getUniqueId(), resultType, opCode);
+ query->addIdOperand(parameters.sampler);
+ if (parameters.coords)
+ query->addIdOperand(parameters.coords);
+ if (parameters.lod)
+ query->addIdOperand(parameters.lod);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(query));
+
+ return query->getResultId();
+}
+
+// External comments in header.
+// Operates recursively to visit the composite's hierarchy.
+Id Builder::createCompositeCompare(Decoration precision, Id value1, Id value2, bool equal)
+{
+ Id boolType = makeBoolType();
+ Id valueType = getTypeId(value1);
+
+ Id resultId = NoResult;
+
+ int numConstituents = getNumTypeConstituents(valueType);
+
+ // Scalars and Vectors
+
+ if (isScalarType(valueType) || isVectorType(valueType)) {
+ assert(valueType == getTypeId(value2));
+ // These just need a single comparison, just have
+ // to figure out what it is.
+ Op op;
+ switch (getMostBasicTypeClass(valueType)) {
+ case OpTypeFloat:
+ op = equal ? OpFOrdEqual : OpFOrdNotEqual;
+ break;
+ case OpTypeInt:
+ default:
+ op = equal ? OpIEqual : OpINotEqual;
+ break;
+ case OpTypeBool:
+ op = equal ? OpLogicalEqual : OpLogicalNotEqual;
+ precision = NoPrecision;
+ break;
+ }
+
+ if (isScalarType(valueType)) {
+ // scalar
+ resultId = createBinOp(op, boolType, value1, value2);
+ } else {
+ // vector
+ resultId = createBinOp(op, makeVectorType(boolType, numConstituents), value1, value2);
+ setPrecision(resultId, precision);
+ // reduce vector compares...
+ resultId = createUnaryOp(equal ? OpAll : OpAny, boolType, resultId);
+ }
+
+ return setPrecision(resultId, precision);
+ }
+
+ // Only structs, arrays, and matrices should be left.
+ // They share in common the reduction operation across their constituents.
+ assert(isAggregateType(valueType) || isMatrixType(valueType));
+
+ // Compare each pair of constituents
+ for (int constituent = 0; constituent < numConstituents; ++constituent) {
+ std::vector<unsigned> indexes(1, constituent);
+ Id constituentType1 = getContainedTypeId(getTypeId(value1), constituent);
+ Id constituentType2 = getContainedTypeId(getTypeId(value2), constituent);
+ Id constituent1 = createCompositeExtract(value1, constituentType1, indexes);
+ Id constituent2 = createCompositeExtract(value2, constituentType2, indexes);
+
+ Id subResultId = createCompositeCompare(precision, constituent1, constituent2, equal);
+
+ if (constituent == 0)
+ resultId = subResultId;
+ else
+ resultId = setPrecision(createBinOp(equal ? OpLogicalAnd : OpLogicalOr, boolType, resultId, subResultId), precision);
+ }
+
+ return resultId;
+}
+
+// OpCompositeConstruct
+Id Builder::createCompositeConstruct(Id typeId, const std::vector<Id>& constituents)
+{
+ assert(isAggregateType(typeId) || (getNumTypeConstituents(typeId) > 1 && getNumTypeConstituents(typeId) == (int)constituents.size()));
+
+ if (generatingOpCodeForSpecConst) {
+ // Sometime, even in spec-constant-op mode, the constant composite to be
+ // constructed may not be a specialization constant.
+ // e.g.:
+ // const mat2 m2 = mat2(a_spec_const, a_front_end_const, another_front_end_const, third_front_end_const);
+ // The first column vector should be a spec constant one, as a_spec_const is a spec constant.
+ // The second column vector should NOT be spec constant, as it does not contain any spec constants.
+ // To handle such cases, we check the constituents of the constant vector to determine whether this
+ // vector should be created as a spec constant.
+ return makeCompositeConstant(typeId, constituents,
+ std::any_of(constituents.begin(), constituents.end(),
+ [&](spv::Id id) { return isSpecConstant(id); }));
+ }
+
+ Instruction* op = new Instruction(getUniqueId(), typeId, OpCompositeConstruct);
+ for (int c = 0; c < (int)constituents.size(); ++c)
+ op->addIdOperand(constituents[c]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(op));
+
+ return op->getResultId();
+}
+
+// Vector or scalar constructor
+Id Builder::createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
+{
+ Id result = NoResult;
+ unsigned int numTargetComponents = getNumTypeComponents(resultTypeId);
+ unsigned int targetComponent = 0;
+
+ // Special case: when calling a vector constructor with a single scalar
+ // argument, smear the scalar
+ if (sources.size() == 1 && isScalar(sources[0]) && numTargetComponents > 1)
+ return smearScalar(precision, sources[0], resultTypeId);
+
+ // accumulate the arguments for OpCompositeConstruct
+ std::vector<Id> constituents;
+ Id scalarTypeId = getScalarTypeId(resultTypeId);
+
+ // lambda to store the result of visiting an argument component
+ const auto latchResult = [&](Id comp) {
+ if (numTargetComponents > 1)
+ constituents.push_back(comp);
+ else
+ result = comp;
+ ++targetComponent;
+ };
+
+ // lambda to visit a vector argument's components
+ const auto accumulateVectorConstituents = [&](Id sourceArg) {
+ unsigned int sourceSize = getNumComponents(sourceArg);
+ unsigned int sourcesToUse = sourceSize;
+ if (sourcesToUse + targetComponent > numTargetComponents)
+ sourcesToUse = numTargetComponents - targetComponent;
+
+ for (unsigned int s = 0; s < sourcesToUse; ++s) {
+ std::vector<unsigned> swiz;
+ swiz.push_back(s);
+ latchResult(createRvalueSwizzle(precision, scalarTypeId, sourceArg, swiz));
+ }
+ };
+
+ // lambda to visit a matrix argument's components
+ const auto accumulateMatrixConstituents = [&](Id sourceArg) {
+ unsigned int sourceSize = getNumColumns(sourceArg) * getNumRows(sourceArg);
+ unsigned int sourcesToUse = sourceSize;
+ if (sourcesToUse + targetComponent > numTargetComponents)
+ sourcesToUse = numTargetComponents - targetComponent;
+
+ int col = 0;
+ int row = 0;
+ for (unsigned int s = 0; s < sourcesToUse; ++s) {
+ if (row >= getNumRows(sourceArg)) {
+ row = 0;
+ col++;
+ }
+ std::vector<Id> indexes;
+ indexes.push_back(col);
+ indexes.push_back(row);
+ latchResult(createCompositeExtract(sourceArg, scalarTypeId, indexes));
+ row++;
+ }
+ };
+
+ // Go through the source arguments, each one could have either
+ // a single or multiple components to contribute.
+ for (unsigned int i = 0; i < sources.size(); ++i) {
+
+ if (isScalar(sources[i]) || isPointer(sources[i]))
+ latchResult(sources[i]);
+ else if (isVector(sources[i]))
+ accumulateVectorConstituents(sources[i]);
+ else if (isMatrix(sources[i]))
+ accumulateMatrixConstituents(sources[i]);
+ else
+ assert(0);
+
+ if (targetComponent >= numTargetComponents)
+ break;
+ }
+
+ // If the result is a vector, make it from the gathered constituents.
+ if (constituents.size() > 0)
+ result = createCompositeConstruct(resultTypeId, constituents);
+
+ return setPrecision(result, precision);
+}
+
+// Comments in header
+Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId)
+{
+ Id componentTypeId = getScalarTypeId(resultTypeId);
+ int numCols = getTypeNumColumns(resultTypeId);
+ int numRows = getTypeNumRows(resultTypeId);
+
+ Instruction* instr = module.getInstruction(componentTypeId);
+ unsigned bitCount = instr->getImmediateOperand(0);
+
+ // Optimize matrix constructed from a bigger matrix
+ if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) {
+ // To truncate the matrix to a smaller number of rows/columns, we need to:
+ // 1. For each column, extract the column and truncate it to the required size using shuffle
+ // 2. Assemble the resulting matrix from all columns
+ Id matrix = sources[0];
+ Id columnTypeId = getContainedTypeId(resultTypeId);
+ Id sourceColumnTypeId = getContainedTypeId(getTypeId(matrix));
+
+ std::vector<unsigned> channels;
+ for (int row = 0; row < numRows; ++row)
+ channels.push_back(row);
+
+ std::vector<Id> matrixColumns;
+ for (int col = 0; col < numCols; ++col) {
+ std::vector<unsigned> indexes;
+ indexes.push_back(col);
+ Id colv = createCompositeExtract(matrix, sourceColumnTypeId, indexes);
+ setPrecision(colv, precision);
+
+ if (numRows != getNumRows(matrix)) {
+ matrixColumns.push_back(createRvalueSwizzle(precision, columnTypeId, colv, channels));
+ } else {
+ matrixColumns.push_back(colv);
+ }
+ }
+
+ return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
+ }
+
+ // Otherwise, will use a two step process
+ // 1. make a compile-time 2D array of values
+ // 2. construct a matrix from that array
+
+ // Step 1.
+
+ // initialize the array to the identity matrix
+ Id ids[maxMatrixSize][maxMatrixSize];
+ Id one = (bitCount == 64 ? makeDoubleConstant(1.0) : makeFloatConstant(1.0));
+ Id zero = (bitCount == 64 ? makeDoubleConstant(0.0) : makeFloatConstant(0.0));
+ for (int col = 0; col < 4; ++col) {
+ for (int row = 0; row < 4; ++row) {
+ if (col == row)
+ ids[col][row] = one;
+ else
+ ids[col][row] = zero;
+ }
+ }
+
+ // modify components as dictated by the arguments
+ if (sources.size() == 1 && isScalar(sources[0])) {
+ // a single scalar; resets the diagonals
+ for (int col = 0; col < 4; ++col)
+ ids[col][col] = sources[0];
+ } else if (isMatrix(sources[0])) {
+ // constructing from another matrix; copy over the parts that exist in both the argument and constructee
+ Id matrix = sources[0];
+ int minCols = std::min(numCols, getNumColumns(matrix));
+ int minRows = std::min(numRows, getNumRows(matrix));
+ for (int col = 0; col < minCols; ++col) {
+ std::vector<unsigned> indexes;
+ indexes.push_back(col);
+ for (int row = 0; row < minRows; ++row) {
+ indexes.push_back(row);
+ ids[col][row] = createCompositeExtract(matrix, componentTypeId, indexes);
+ indexes.pop_back();
+ setPrecision(ids[col][row], precision);
+ }
+ }
+ } else {
+ // fill in the matrix in column-major order with whatever argument components are available
+ int row = 0;
+ int col = 0;
+
+ for (int arg = 0; arg < (int)sources.size(); ++arg) {
+ Id argComp = sources[arg];
+ for (int comp = 0; comp < getNumComponents(sources[arg]); ++comp) {
+ if (getNumComponents(sources[arg]) > 1) {
+ argComp = createCompositeExtract(sources[arg], componentTypeId, comp);
+ setPrecision(argComp, precision);
+ }
+ ids[col][row++] = argComp;
+ if (row == numRows) {
+ row = 0;
+ col++;
+ }
+ }
+ }
+ }
+
+ // Step 2: Construct a matrix from that array.
+ // First make the column vectors, then make the matrix.
+
+ // make the column vectors
+ Id columnTypeId = getContainedTypeId(resultTypeId);
+ std::vector<Id> matrixColumns;
+ for (int col = 0; col < numCols; ++col) {
+ std::vector<Id> vectorComponents;
+ for (int row = 0; row < numRows; ++row)
+ vectorComponents.push_back(ids[col][row]);
+ Id column = createCompositeConstruct(columnTypeId, vectorComponents);
+ setPrecision(column, precision);
+ matrixColumns.push_back(column);
+ }
+
+ // make the matrix
+ return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision);
+}
+
+// Comments in header
+Builder::If::If(Id cond, unsigned int ctrl, Builder& gb) :
+ builder(gb),
+ condition(cond),
+ control(ctrl),
+ elseBlock(0)
+{
+ function = &builder.getBuildPoint()->getParent();
+
+ // make the blocks, but only put the then-block into the function,
+ // the else-block and merge-block will be added later, in order, after
+ // earlier code is emitted
+ thenBlock = new Block(builder.getUniqueId(), *function);
+ mergeBlock = new Block(builder.getUniqueId(), *function);
+
+ // Save the current block, so that we can add in the flow control split when
+ // makeEndIf is called.
+ headerBlock = builder.getBuildPoint();
+
+ function->addBlock(thenBlock);
+ builder.setBuildPoint(thenBlock);
+}
+
+// Comments in header
+void Builder::If::makeBeginElse()
+{
+ // Close out the "then" by having it jump to the mergeBlock
+ builder.createBranch(mergeBlock);
+
+ // Make the first else block and add it to the function
+ elseBlock = new Block(builder.getUniqueId(), *function);
+ function->addBlock(elseBlock);
+
+ // Start building the else block
+ builder.setBuildPoint(elseBlock);
+}
+
+// Comments in header
+void Builder::If::makeEndIf()
+{
+ // jump to the merge block
+ builder.createBranch(mergeBlock);
+
+ // Go back to the headerBlock and make the flow control split
+ builder.setBuildPoint(headerBlock);
+ builder.createSelectionMerge(mergeBlock, control);
+ if (elseBlock)
+ builder.createConditionalBranch(condition, thenBlock, elseBlock);
+ else
+ builder.createConditionalBranch(condition, thenBlock, mergeBlock);
+
+ // add the merge block to the function
+ function->addBlock(mergeBlock);
+ builder.setBuildPoint(mergeBlock);
+}
+
+// Comments in header
+void Builder::makeSwitch(Id selector, unsigned int control, int numSegments, const std::vector<int>& caseValues,
+ const std::vector<int>& valueIndexToSegment, int defaultSegment,
+ std::vector<Block*>& segmentBlocks)
+{
+ Function& function = buildPoint->getParent();
+
+ // make all the blocks
+ for (int s = 0; s < numSegments; ++s)
+ segmentBlocks.push_back(new Block(getUniqueId(), function));
+
+ Block* mergeBlock = new Block(getUniqueId(), function);
+
+ // make and insert the switch's selection-merge instruction
+ createSelectionMerge(mergeBlock, control);
+
+ // make the switch instruction
+ Instruction* switchInst = new Instruction(NoResult, NoType, OpSwitch);
+ switchInst->addIdOperand(selector);
+ auto defaultOrMerge = (defaultSegment >= 0) ? segmentBlocks[defaultSegment] : mergeBlock;
+ switchInst->addIdOperand(defaultOrMerge->getId());
+ defaultOrMerge->addPredecessor(buildPoint);
+ for (int i = 0; i < (int)caseValues.size(); ++i) {
+ switchInst->addImmediateOperand(caseValues[i]);
+ switchInst->addIdOperand(segmentBlocks[valueIndexToSegment[i]]->getId());
+ segmentBlocks[valueIndexToSegment[i]]->addPredecessor(buildPoint);
+ }
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(switchInst));
+
+ // push the merge block
+ switchMerges.push(mergeBlock);
+}
+
+// Comments in header
+void Builder::addSwitchBreak()
+{
+ // branch to the top of the merge block stack
+ createBranch(switchMerges.top());
+ createAndSetNoPredecessorBlock("post-switch-break");
+}
+
+// Comments in header
+void Builder::nextSwitchSegment(std::vector<Block*>& segmentBlock, int nextSegment)
+{
+ int lastSegment = nextSegment - 1;
+ if (lastSegment >= 0) {
+ // Close out previous segment by jumping, if necessary, to next segment
+ if (! buildPoint->isTerminated())
+ createBranch(segmentBlock[nextSegment]);
+ }
+ Block* block = segmentBlock[nextSegment];
+ block->getParent().addBlock(block);
+ setBuildPoint(block);
+}
+
+// Comments in header
+void Builder::endSwitch(std::vector<Block*>& /*segmentBlock*/)
+{
+ // Close out previous segment by jumping, if necessary, to next segment
+ if (! buildPoint->isTerminated())
+ addSwitchBreak();
+
+ switchMerges.top()->getParent().addBlock(switchMerges.top());
+ setBuildPoint(switchMerges.top());
+
+ switchMerges.pop();
+}
+
+Block& Builder::makeNewBlock()
+{
+ Function& function = buildPoint->getParent();
+ auto block = new Block(getUniqueId(), function);
+ function.addBlock(block);
+ return *block;
+}
+
+Builder::LoopBlocks& Builder::makeNewLoop()
+{
+ // This verbosity is needed to simultaneously get the same behavior
+ // everywhere (id's in the same order), have a syntax that works
+ // across lots of versions of C++, have no warnings from pedantic
+ // compilation modes, and leave the rest of the code alone.
+ Block& head = makeNewBlock();
+ Block& body = makeNewBlock();
+ Block& merge = makeNewBlock();
+ Block& continue_target = makeNewBlock();
+ LoopBlocks blocks(head, body, merge, continue_target);
+ loops.push(blocks);
+ return loops.top();
+}
+
+void Builder::createLoopContinue()
+{
+ createBranch(&loops.top().continue_target);
+ // Set up a block for dead code.
+ createAndSetNoPredecessorBlock("post-loop-continue");
+}
+
+void Builder::createLoopExit()
+{
+ createBranch(&loops.top().merge);
+ // Set up a block for dead code.
+ createAndSetNoPredecessorBlock("post-loop-break");
+}
+
+void Builder::closeLoop()
+{
+ loops.pop();
+}
+
+void Builder::clearAccessChain()
+{
+ accessChain.base = NoResult;
+ accessChain.indexChain.clear();
+ accessChain.instr = NoResult;
+ accessChain.swizzle.clear();
+ accessChain.component = NoResult;
+ accessChain.preSwizzleBaseType = NoType;
+ accessChain.isRValue = false;
+ accessChain.coherentFlags.clear();
+ accessChain.alignment = 0;
+}
+
+// Comments in header
+void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
+{
+ accessChain.coherentFlags |= coherentFlags;
+ accessChain.alignment |= alignment;
+
+ // swizzles can be stacked in GLSL, but simplified to a single
+ // one here; the base type doesn't change
+ if (accessChain.preSwizzleBaseType == NoType)
+ accessChain.preSwizzleBaseType = preSwizzleBaseType;
+
+ // if needed, propagate the swizzle for the current access chain
+ if (accessChain.swizzle.size() > 0) {
+ std::vector<unsigned> oldSwizzle = accessChain.swizzle;
+ accessChain.swizzle.resize(0);
+ for (unsigned int i = 0; i < swizzle.size(); ++i) {
+ assert(swizzle[i] < oldSwizzle.size());
+ accessChain.swizzle.push_back(oldSwizzle[swizzle[i]]);
+ }
+ } else
+ accessChain.swizzle = swizzle;
+
+ // determine if we need to track this swizzle anymore
+ simplifyAccessChainSwizzle();
+}
+
+// Comments in header
+void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ assert(accessChain.isRValue == false);
+
+ transferAccessChainSwizzle(true);
+ Id base = collapseAccessChain();
+ Id source = rvalue;
+
+ // dynamic component should be gone
+ assert(accessChain.component == NoResult);
+
+ // If swizzle still exists, it is out-of-order or not full, we must load the target vector,
+ // extract and insert elements to perform writeMask and/or swizzle.
+ if (accessChain.swizzle.size() > 0) {
+ Id tempBaseId = createLoad(base);
+ source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle);
+ }
+
+ // take LSB of alignment
+ alignment = alignment & ~(alignment & (alignment-1));
+ if (getStorageClass(base) == StorageClassPhysicalStorageBufferEXT) {
+ memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
+ }
+
+ createStore(source, base, memoryAccess, scope, alignment);
+}
+
+// Comments in header
+Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
+{
+ Id id;
+
+ if (accessChain.isRValue) {
+ // transfer access chain, but try to stay in registers
+ transferAccessChainSwizzle(false);
+ if (accessChain.indexChain.size() > 0) {
+ Id swizzleBase = accessChain.preSwizzleBaseType != NoType ? accessChain.preSwizzleBaseType : resultType;
+
+ // if all the accesses are constants, we can use OpCompositeExtract
+ std::vector<unsigned> indexes;
+ bool constant = true;
+ for (int i = 0; i < (int)accessChain.indexChain.size(); ++i) {
+ if (isConstantScalar(accessChain.indexChain[i]))
+ indexes.push_back(getConstantScalar(accessChain.indexChain[i]));
+ else {
+ constant = false;
+ break;
+ }
+ }
+
+ if (constant) {
+ id = createCompositeExtract(accessChain.base, swizzleBase, indexes);
+ } else {
+ Id lValue = NoResult;
+ if (spvVersion >= Spv_1_4) {
+ // make a new function variable for this r-value, using an initializer,
+ // and mark it as NonWritable so that downstream it can be detected as a lookup
+ // table
+ lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable",
+ accessChain.base);
+ addDecoration(lValue, DecorationNonWritable);
+ } else {
+ lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable");
+ // store into it
+ createStore(accessChain.base, lValue);
+ }
+ // move base to the new variable
+ accessChain.base = lValue;
+ accessChain.isRValue = false;
+
+ // load through the access chain
+ id = createLoad(collapseAccessChain());
+ }
+ setPrecision(id, precision);
+ } else
+ id = accessChain.base; // no precision, it was set when this was defined
+ } else {
+ transferAccessChainSwizzle(true);
+
+ // take LSB of alignment
+ alignment = alignment & ~(alignment & (alignment-1));
+ if (getStorageClass(accessChain.base) == StorageClassPhysicalStorageBufferEXT) {
+ memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask);
+ }
+
+ // load through the access chain
+ id = createLoad(collapseAccessChain(), memoryAccess, scope, alignment);
+ setPrecision(id, precision);
+ addDecoration(id, nonUniform);
+ }
+
+ // Done, unless there are swizzles to do
+ if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
+ return id;
+
+ // Do remaining swizzling
+
+ // Do the basic swizzle
+ if (accessChain.swizzle.size() > 0) {
+ Id swizzledType = getScalarTypeId(getTypeId(id));
+ if (accessChain.swizzle.size() > 1)
+ swizzledType = makeVectorType(swizzledType, (int)accessChain.swizzle.size());
+ id = createRvalueSwizzle(precision, swizzledType, id, accessChain.swizzle);
+ }
+
+ // Do the dynamic component
+ if (accessChain.component != NoResult)
+ id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision);
+
+ addDecoration(id, nonUniform);
+ return id;
+}
+
+Id Builder::accessChainGetLValue()
+{
+ assert(accessChain.isRValue == false);
+
+ transferAccessChainSwizzle(true);
+ Id lvalue = collapseAccessChain();
+
+ // If swizzle exists, it is out-of-order or not full, we must load the target vector,
+ // extract and insert elements to perform writeMask and/or swizzle. This does not
+ // go with getting a direct l-value pointer.
+ assert(accessChain.swizzle.size() == 0);
+ assert(accessChain.component == NoResult);
+
+ return lvalue;
+}
+
+// comment in header
+Id Builder::accessChainGetInferredType()
+{
+ // anything to operate on?
+ if (accessChain.base == NoResult)
+ return NoType;
+ Id type = getTypeId(accessChain.base);
+
+ // do initial dereference
+ if (! accessChain.isRValue)
+ type = getContainedTypeId(type);
+
+ // dereference each index
+ for (auto it = accessChain.indexChain.cbegin(); it != accessChain.indexChain.cend(); ++it) {
+ if (isStructType(type))
+ type = getContainedTypeId(type, getConstantScalar(*it));
+ else
+ type = getContainedTypeId(type);
+ }
+
+ // dereference swizzle
+ if (accessChain.swizzle.size() == 1)
+ type = getContainedTypeId(type);
+ else if (accessChain.swizzle.size() > 1)
+ type = makeVectorType(getContainedTypeId(type), (int)accessChain.swizzle.size());
+
+ // dereference component selection
+ if (accessChain.component)
+ type = getContainedTypeId(type);
+
+ return type;
+}
+
+void Builder::dump(std::vector<unsigned int>& out) const
+{
+ // Header, before first instructions:
+ out.push_back(MagicNumber);
+ out.push_back(spvVersion);
+ out.push_back(builderNumber);
+ out.push_back(uniqueId + 1);
+ out.push_back(0);
+
+ // Capabilities
+ for (auto it = capabilities.cbegin(); it != capabilities.cend(); ++it) {
+ Instruction capInst(0, 0, OpCapability);
+ capInst.addImmediateOperand(*it);
+ capInst.dump(out);
+ }
+
+ for (auto it = extensions.cbegin(); it != extensions.cend(); ++it) {
+ Instruction extInst(0, 0, OpExtension);
+ extInst.addStringOperand(it->c_str());
+ extInst.dump(out);
+ }
+
+ dumpInstructions(out, imports);
+ Instruction memInst(0, 0, OpMemoryModel);
+ memInst.addImmediateOperand(addressModel);
+ memInst.addImmediateOperand(memoryModel);
+ memInst.dump(out);
+
+ // Instructions saved up while building:
+ dumpInstructions(out, entryPoints);
+ dumpInstructions(out, executionModes);
+
+ // Debug instructions
+ dumpInstructions(out, strings);
+ dumpSourceInstructions(out);
+ for (int e = 0; e < (int)sourceExtensions.size(); ++e) {
+ Instruction sourceExtInst(0, 0, OpSourceExtension);
+ sourceExtInst.addStringOperand(sourceExtensions[e]);
+ sourceExtInst.dump(out);
+ }
+ dumpInstructions(out, names);
+ dumpModuleProcesses(out);
+
+ // Annotation instructions
+ dumpInstructions(out, decorations);
+
+ dumpInstructions(out, constantsTypesGlobals);
+ dumpInstructions(out, externals);
+
+ // The functions
+ module.dump(out);
+}
+
+//
+// Protected methods.
+//
+
+// Turn the described access chain in 'accessChain' into an instruction(s)
+// computing its address. This *cannot* include complex swizzles, which must
+// be handled after this is called.
+//
+// Can generate code.
+Id Builder::collapseAccessChain()
+{
+ assert(accessChain.isRValue == false);
+
+ // did we already emit an access chain for this?
+ if (accessChain.instr != NoResult)
+ return accessChain.instr;
+
+ // If we have a dynamic component, we can still transfer
+ // that into a final operand to the access chain. We need to remap the
+ // dynamic component through the swizzle to get a new dynamic component to
+ // update.
+ //
+ // This was not done in transferAccessChainSwizzle() because it might
+ // generate code.
+ remapDynamicSwizzle();
+ if (accessChain.component != NoResult) {
+ // transfer the dynamic component to the access chain
+ accessChain.indexChain.push_back(accessChain.component);
+ accessChain.component = NoResult;
+ }
+
+ // note that non-trivial swizzling is left pending
+
+ // do we have an access chain?
+ if (accessChain.indexChain.size() == 0)
+ return accessChain.base;
+
+ // emit the access chain
+ StorageClass storageClass = (StorageClass)module.getStorageClass(getTypeId(accessChain.base));
+ accessChain.instr = createAccessChain(storageClass, accessChain.base, accessChain.indexChain);
+
+ return accessChain.instr;
+}
+
+// For a dynamic component selection of a swizzle.
+//
+// Turn the swizzle and dynamic component into just a dynamic component.
+//
+// Generates code.
+void Builder::remapDynamicSwizzle()
+{
+ // do we have a swizzle to remap a dynamic component through?
+ if (accessChain.component != NoResult && accessChain.swizzle.size() > 1) {
+ // build a vector of the swizzle for the component to map into
+ std::vector<Id> components;
+ for (int c = 0; c < (int)accessChain.swizzle.size(); ++c)
+ components.push_back(makeUintConstant(accessChain.swizzle[c]));
+ Id mapType = makeVectorType(makeUintType(32), (int)accessChain.swizzle.size());
+ Id map = makeCompositeConstant(mapType, components);
+
+ // use it
+ accessChain.component = createVectorExtractDynamic(map, makeUintType(32), accessChain.component);
+ accessChain.swizzle.clear();
+ }
+}
+
+// clear out swizzle if it is redundant, that is reselecting the same components
+// that would be present without the swizzle.
+void Builder::simplifyAccessChainSwizzle()
+{
+ // If the swizzle has fewer components than the vector, it is subsetting, and must stay
+ // to preserve that fact.
+ if (getNumTypeComponents(accessChain.preSwizzleBaseType) > (int)accessChain.swizzle.size())
+ return;
+
+ // if components are out of order, it is a swizzle
+ for (unsigned int i = 0; i < accessChain.swizzle.size(); ++i) {
+ if (i != accessChain.swizzle[i])
+ return;
+ }
+
+ // otherwise, there is no need to track this swizzle
+ accessChain.swizzle.clear();
+ if (accessChain.component == NoResult)
+ accessChain.preSwizzleBaseType = NoType;
+}
+
+// To the extent any swizzling can become part of the chain
+// of accesses instead of a post operation, make it so.
+// If 'dynamic' is true, include transferring the dynamic component,
+// otherwise, leave it pending.
+//
+// Does not generate code. just updates the access chain.
+void Builder::transferAccessChainSwizzle(bool dynamic)
+{
+ // non existent?
+ if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult)
+ return;
+
+ // too complex?
+ // (this requires either a swizzle, or generating code for a dynamic component)
+ if (accessChain.swizzle.size() > 1)
+ return;
+
+ // single component, either in the swizzle and/or dynamic component
+ if (accessChain.swizzle.size() == 1) {
+ assert(accessChain.component == NoResult);
+ // handle static component selection
+ accessChain.indexChain.push_back(makeUintConstant(accessChain.swizzle.front()));
+ accessChain.swizzle.clear();
+ accessChain.preSwizzleBaseType = NoType;
+ } else if (dynamic && accessChain.component != NoResult) {
+ assert(accessChain.swizzle.size() == 0);
+ // handle dynamic component
+ accessChain.indexChain.push_back(accessChain.component);
+ accessChain.preSwizzleBaseType = NoType;
+ accessChain.component = NoResult;
+ }
+}
+
+// Utility method for creating a new block and setting the insert point to
+// be in it. This is useful for flow-control operations that need a "dummy"
+// block proceeding them (e.g. instructions after a discard, etc).
+void Builder::createAndSetNoPredecessorBlock(const char* /*name*/)
+{
+ Block* block = new Block(getUniqueId(), buildPoint->getParent());
+ block->setUnreachable();
+ buildPoint->getParent().addBlock(block);
+ setBuildPoint(block);
+
+ // if (name)
+ // addName(block->getId(), name);
+}
+
+// Comments in header
+void Builder::createBranch(Block* block)
+{
+ Instruction* branch = new Instruction(OpBranch);
+ branch->addIdOperand(block->getId());
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(branch));
+ block->addPredecessor(buildPoint);
+}
+
+void Builder::createSelectionMerge(Block* mergeBlock, unsigned int control)
+{
+ Instruction* merge = new Instruction(OpSelectionMerge);
+ merge->addIdOperand(mergeBlock->getId());
+ merge->addImmediateOperand(control);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
+}
+
+void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control,
+ const std::vector<unsigned int>& operands)
+{
+ Instruction* merge = new Instruction(OpLoopMerge);
+ merge->addIdOperand(mergeBlock->getId());
+ merge->addIdOperand(continueBlock->getId());
+ merge->addImmediateOperand(control);
+ for (int op = 0; op < (int)operands.size(); ++op)
+ merge->addImmediateOperand(operands[op]);
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
+}
+
+void Builder::createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock)
+{
+ Instruction* branch = new Instruction(OpBranchConditional);
+ branch->addIdOperand(condition);
+ branch->addIdOperand(thenBlock->getId());
+ branch->addIdOperand(elseBlock->getId());
+ buildPoint->addInstruction(std::unique_ptr<Instruction>(branch));
+ thenBlock->addPredecessor(buildPoint);
+ elseBlock->addPredecessor(buildPoint);
+}
+
+// OpSource
+// [OpSourceContinued]
+// ...
+void Builder::dumpSourceInstructions(const spv::Id fileId, const std::string& text,
+ std::vector<unsigned int>& out) const
+{
+ const int maxWordCount = 0xFFFF;
+ const int opSourceWordCount = 4;
+ const int nonNullBytesPerInstruction = 4 * (maxWordCount - opSourceWordCount) - 1;
+
+ if (source != SourceLanguageUnknown) {
+ // OpSource Language Version File Source
+ Instruction sourceInst(NoResult, NoType, OpSource);
+ sourceInst.addImmediateOperand(source);
+ sourceInst.addImmediateOperand(sourceVersion);
+ // File operand
+ if (fileId != NoResult) {
+ sourceInst.addIdOperand(fileId);
+ // Source operand
+ if (text.size() > 0) {
+ int nextByte = 0;
+ std::string subString;
+ while ((int)text.size() - nextByte > 0) {
+ subString = text.substr(nextByte, nonNullBytesPerInstruction);
+ if (nextByte == 0) {
+ // OpSource
+ sourceInst.addStringOperand(subString.c_str());
+ sourceInst.dump(out);
+ } else {
+ // OpSourcContinued
+ Instruction sourceContinuedInst(OpSourceContinued);
+ sourceContinuedInst.addStringOperand(subString.c_str());
+ sourceContinuedInst.dump(out);
+ }
+ nextByte += nonNullBytesPerInstruction;
+ }
+ } else
+ sourceInst.dump(out);
+ } else
+ sourceInst.dump(out);
+ }
+}
+
+// Dump an OpSource[Continued] sequence for the source and every include file
+void Builder::dumpSourceInstructions(std::vector<unsigned int>& out) const
+{
+ dumpSourceInstructions(sourceFileStringId, sourceText, out);
+ for (auto iItr = includeFiles.begin(); iItr != includeFiles.end(); ++iItr)
+ dumpSourceInstructions(iItr->first, *iItr->second, out);
+}
+
+void Builder::dumpInstructions(std::vector<unsigned int>& out, const std::vector<std::unique_ptr<Instruction> >& instructions) const
+{
+ for (int i = 0; i < (int)instructions.size(); ++i) {
+ instructions[i]->dump(out);
+ }
+}
+
+void Builder::dumpModuleProcesses(std::vector<unsigned int>& out) const
+{
+ for (int i = 0; i < (int)moduleProcesses.size(); ++i) {
+ Instruction moduleProcessed(OpModuleProcessed);
+ moduleProcessed.addStringOperand(moduleProcesses[i]);
+ moduleProcessed.dump(out);
+ }
+}
+
+}; // end spv namespace
diff --git a/thirdparty/glslang/SPIRV/SpvBuilder.h b/thirdparty/glslang/SPIRV/SpvBuilder.h
new file mode 100644
index 0000000000..faed8e8230
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/SpvBuilder.h
@@ -0,0 +1,758 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// "Builder" is an interface to fully build SPIR-V IR. Allocate one of
+// these to build (a thread safe) internal SPIR-V representation (IR),
+// and then dump it as a binary stream according to the SPIR-V specification.
+//
+// A Builder has a 1:1 relationship with a SPIR-V module.
+//
+
+#pragma once
+#ifndef SpvBuilder_H
+#define SpvBuilder_H
+
+#include "Logger.h"
+#include "spirv.hpp"
+#include "spvIR.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <sstream>
+#include <stack>
+#include <unordered_map>
+#include <map>
+
+namespace spv {
+
+typedef enum {
+ Spv_1_0 = (1 << 16),
+ Spv_1_1 = (1 << 16) | (1 << 8),
+ Spv_1_2 = (1 << 16) | (2 << 8),
+ Spv_1_3 = (1 << 16) | (3 << 8),
+ Spv_1_4 = (1 << 16) | (4 << 8),
+} SpvVersion;
+
+class Builder {
+public:
+ Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger);
+ virtual ~Builder();
+
+ static const int maxMatrixSize = 4;
+
+ unsigned int getSpvVersion() const { return spvVersion; }
+
+ void setSource(spv::SourceLanguage lang, int version)
+ {
+ source = lang;
+ sourceVersion = version;
+ }
+ spv::Id getStringId(const std::string& str)
+ {
+ auto sItr = stringIds.find(str);
+ if (sItr != stringIds.end())
+ return sItr->second;
+ spv::Id strId = getUniqueId();
+ Instruction* fileString = new Instruction(strId, NoType, OpString);
+ const char* file_c_str = str.c_str();
+ fileString->addStringOperand(file_c_str);
+ strings.push_back(std::unique_ptr<Instruction>(fileString));
+ stringIds[file_c_str] = strId;
+ return strId;
+ }
+ void setSourceFile(const std::string& file)
+ {
+ sourceFileStringId = getStringId(file);
+ }
+ void setSourceText(const std::string& text) { sourceText = text; }
+ void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); }
+ void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
+ void setEmitOpLines() { emitOpLines = true; }
+ void addExtension(const char* ext) { extensions.insert(ext); }
+ void addInclude(const std::string& name, const std::string& text)
+ {
+ spv::Id incId = getStringId(name);
+ includeFiles[incId] = &text;
+ }
+ Id import(const char*);
+ void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem)
+ {
+ addressModel = addr;
+ memoryModel = mem;
+ }
+
+ void addCapability(spv::Capability cap) { capabilities.insert(cap); }
+
+ // To get a new <id> for anything needing a new one.
+ Id getUniqueId() { return ++uniqueId; }
+
+ // To get a set of new <id>s, e.g., for a set of function parameters
+ Id getUniqueIds(int numIds)
+ {
+ Id id = uniqueId + 1;
+ uniqueId += numIds;
+ return id;
+ }
+
+ // Generate OpLine for non-filename-based #line directives (ie no filename
+ // seen yet): Log the current line, and if different than the last one,
+ // issue a new OpLine using the new line and current source file name.
+ void setLine(int line);
+
+ // If filename null, generate OpLine for non-filename-based line directives,
+ // else do filename-based: Log the current line and file, and if different
+ // than the last one, issue a new OpLine using the new line and file
+ // name.
+ void setLine(int line, const char* filename);
+ // Low-level OpLine. See setLine() for a layered helper.
+ void addLine(Id fileName, int line, int column);
+
+ // For creating new types (will return old type if the requested one was already made).
+ Id makeVoidType();
+ Id makeBoolType();
+ Id makePointer(StorageClass, Id pointee);
+ Id makeForwardPointer(StorageClass);
+ Id makePointerFromForwardPointer(StorageClass, Id forwardPointerType, Id pointee);
+ Id makeIntegerType(int width, bool hasSign); // generic
+ Id makeIntType(int width) { return makeIntegerType(width, true); }
+ Id makeUintType(int width) { return makeIntegerType(width, false); }
+ Id makeFloatType(int width);
+ Id makeStructType(const std::vector<Id>& members, const char*);
+ Id makeStructResultType(Id type0, Id type1);
+ Id makeVectorType(Id component, int size);
+ Id makeMatrixType(Id component, int cols, int rows);
+ Id makeArrayType(Id element, Id sizeId, int stride); // 0 stride means no stride decoration
+ Id makeRuntimeArray(Id element);
+ Id makeFunctionType(Id returnType, const std::vector<Id>& paramTypes);
+ Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format);
+ Id makeSamplerType();
+ Id makeSampledImageType(Id imageType);
+ Id makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols);
+
+ // accelerationStructureNV type
+ Id makeAccelerationStructureNVType();
+
+ // For querying about types.
+ Id getTypeId(Id resultId) const { return module.getTypeId(resultId); }
+ Id getDerefTypeId(Id resultId) const;
+ Op getOpCode(Id id) const { return module.getInstruction(id)->getOpCode(); }
+ Op getTypeClass(Id typeId) const { return getOpCode(typeId); }
+ Op getMostBasicTypeClass(Id typeId) const;
+ int getNumComponents(Id resultId) const { return getNumTypeComponents(getTypeId(resultId)); }
+ int getNumTypeConstituents(Id typeId) const;
+ int getNumTypeComponents(Id typeId) const { return getNumTypeConstituents(typeId); }
+ Id getScalarTypeId(Id typeId) const;
+ Id getContainedTypeId(Id typeId) const;
+ Id getContainedTypeId(Id typeId, int) const;
+ StorageClass getTypeStorageClass(Id typeId) const { return module.getStorageClass(typeId); }
+ ImageFormat getImageTypeFormat(Id typeId) const { return (ImageFormat)module.getInstruction(typeId)->getImmediateOperand(6); }
+
+ bool isPointer(Id resultId) const { return isPointerType(getTypeId(resultId)); }
+ bool isScalar(Id resultId) const { return isScalarType(getTypeId(resultId)); }
+ bool isVector(Id resultId) const { return isVectorType(getTypeId(resultId)); }
+ bool isMatrix(Id resultId) const { return isMatrixType(getTypeId(resultId)); }
+ bool isCooperativeMatrix(Id resultId)const { return isCooperativeMatrixType(getTypeId(resultId)); }
+ bool isAggregate(Id resultId) const { return isAggregateType(getTypeId(resultId)); }
+ bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); }
+
+ bool isBoolType(Id typeId) { return groupedTypes[OpTypeBool].size() > 0 && typeId == groupedTypes[OpTypeBool].back()->getResultId(); }
+ bool isIntType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) != 0; }
+ bool isUintType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) == 0; }
+ bool isFloatType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat; }
+ bool isPointerType(Id typeId) const { return getTypeClass(typeId) == OpTypePointer; }
+ bool isScalarType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat || getTypeClass(typeId) == OpTypeInt || getTypeClass(typeId) == OpTypeBool; }
+ bool isVectorType(Id typeId) const { return getTypeClass(typeId) == OpTypeVector; }
+ bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; }
+ bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; }
+ bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; }
+ bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; }
+ bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); }
+ bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; }
+ bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; }
+ bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; }
+ bool containsType(Id typeId, Op typeOp, unsigned int width) const;
+ bool containsPhysicalStorageBufferOrArray(Id typeId) const;
+
+ bool isConstantOpCode(Op opcode) const;
+ bool isSpecConstantOpCode(Op opcode) const;
+ bool isConstant(Id resultId) const { return isConstantOpCode(getOpCode(resultId)); }
+ bool isConstantScalar(Id resultId) const { return getOpCode(resultId) == OpConstant; }
+ bool isSpecConstant(Id resultId) const { return isSpecConstantOpCode(getOpCode(resultId)); }
+ unsigned int getConstantScalar(Id resultId) const { return module.getInstruction(resultId)->getImmediateOperand(0); }
+ StorageClass getStorageClass(Id resultId) const { return getTypeStorageClass(getTypeId(resultId)); }
+
+ int getScalarTypeWidth(Id typeId) const
+ {
+ Id scalarTypeId = getScalarTypeId(typeId);
+ assert(getTypeClass(scalarTypeId) == OpTypeInt || getTypeClass(scalarTypeId) == OpTypeFloat);
+ return module.getInstruction(scalarTypeId)->getImmediateOperand(0);
+ }
+
+ int getTypeNumColumns(Id typeId) const
+ {
+ assert(isMatrixType(typeId));
+ return getNumTypeConstituents(typeId);
+ }
+ int getNumColumns(Id resultId) const { return getTypeNumColumns(getTypeId(resultId)); }
+ int getTypeNumRows(Id typeId) const
+ {
+ assert(isMatrixType(typeId));
+ return getNumTypeComponents(getContainedTypeId(typeId));
+ }
+ int getNumRows(Id resultId) const { return getTypeNumRows(getTypeId(resultId)); }
+
+ Dim getTypeDimensionality(Id typeId) const
+ {
+ assert(isImageType(typeId));
+ return (Dim)module.getInstruction(typeId)->getImmediateOperand(1);
+ }
+ Id getImageType(Id resultId) const
+ {
+ Id typeId = getTypeId(resultId);
+ assert(isImageType(typeId) || isSampledImageType(typeId));
+ return isSampledImageType(typeId) ? module.getInstruction(typeId)->getIdOperand(0) : typeId;
+ }
+ bool isArrayedImageType(Id typeId) const
+ {
+ assert(isImageType(typeId));
+ return module.getInstruction(typeId)->getImmediateOperand(3) != 0;
+ }
+
+ // For making new constants (will return old constant if the requested one was already made).
+ Id makeBoolConstant(bool b, bool specConstant = false);
+ Id makeInt8Constant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(8), (unsigned)i, specConstant); }
+ Id makeUint8Constant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(8), u, specConstant); }
+ Id makeInt16Constant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(16), (unsigned)i, specConstant); }
+ Id makeUint16Constant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(16), u, specConstant); }
+ Id makeIntConstant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(32), (unsigned)i, specConstant); }
+ Id makeUintConstant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(32), u, specConstant); }
+ Id makeInt64Constant(long long i, bool specConstant = false) { return makeInt64Constant(makeIntType(64), (unsigned long long)i, specConstant); }
+ Id makeUint64Constant(unsigned long long u, bool specConstant = false) { return makeInt64Constant(makeUintType(64), u, specConstant); }
+ Id makeFloatConstant(float f, bool specConstant = false);
+ Id makeDoubleConstant(double d, bool specConstant = false);
+ Id makeFloat16Constant(float f16, bool specConstant = false);
+ Id makeFpConstant(Id type, double d, bool specConstant = false);
+
+ // Turn the array of constants into a proper spv constant of the requested type.
+ Id makeCompositeConstant(Id type, const std::vector<Id>& comps, bool specConst = false);
+
+ // Methods for adding information outside the CFG.
+ Instruction* addEntryPoint(ExecutionModel, Function*, const char* name);
+ void addExecutionMode(Function*, ExecutionMode mode, int value1 = -1, int value2 = -1, int value3 = -1);
+ void addName(Id, const char* name);
+ void addMemberName(Id, int member, const char* name);
+ void addDecoration(Id, Decoration, int num = -1);
+ void addDecoration(Id, Decoration, const char*);
+ void addDecorationId(Id id, Decoration, Id idDecoration);
+ void addMemberDecoration(Id, unsigned int member, Decoration, int num = -1);
+ void addMemberDecoration(Id, unsigned int member, Decoration, const char*);
+
+ // At the end of what block do the next create*() instructions go?
+ void setBuildPoint(Block* bp) { buildPoint = bp; }
+ Block* getBuildPoint() const { return buildPoint; }
+
+ // Make the entry-point function. The returned pointer is only valid
+ // for the lifetime of this builder.
+ Function* makeEntryPoint(const char*);
+
+ // Make a shader-style function, and create its entry block if entry is non-zero.
+ // Return the function, pass back the entry.
+ // The returned pointer is only valid for the lifetime of this builder.
+ Function* makeFunctionEntry(Decoration precision, Id returnType, const char* name, const std::vector<Id>& paramTypes,
+ const std::vector<std::vector<Decoration>>& precisions, Block **entry = 0);
+
+ // Create a return. An 'implicit' return is one not appearing in the source
+ // code. In the case of an implicit return, no post-return block is inserted.
+ void makeReturn(bool implicit, Id retVal = 0);
+
+ // Generate all the code needed to finish up a function.
+ void leaveFunction();
+
+ // Create a discard.
+ void makeDiscard();
+
+ // Create a global or function local or IO variable.
+ Id createVariable(StorageClass, Id type, const char* name = 0, Id initializer = NoResult);
+
+ // Create an intermediate with an undefined value.
+ Id createUndefined(Id type);
+
+ // Store into an Id and return the l-value
+ void createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // Load from an Id and return it
+ Id createLoad(Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // Create an OpAccessChain instruction
+ Id createAccessChain(StorageClass, Id base, const std::vector<Id>& offsets);
+
+ // Create an OpArrayLength instruction
+ Id createArrayLength(Id base, unsigned int member);
+
+ // Create an OpCooperativeMatrixLengthNV instruction
+ Id createCooperativeMatrixLength(Id type);
+
+ // Create an OpCompositeExtract instruction
+ Id createCompositeExtract(Id composite, Id typeId, unsigned index);
+ Id createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes);
+ Id createCompositeInsert(Id object, Id composite, Id typeId, unsigned index);
+ Id createCompositeInsert(Id object, Id composite, Id typeId, const std::vector<unsigned>& indexes);
+
+ Id createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex);
+ Id createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex);
+
+ void createNoResultOp(Op);
+ void createNoResultOp(Op, Id operand);
+ void createNoResultOp(Op, const std::vector<Id>& operands);
+ void createNoResultOp(Op, const std::vector<IdImmediate>& operands);
+ void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask);
+ void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics);
+ Id createUnaryOp(Op, Id typeId, Id operand);
+ Id createBinOp(Op, Id typeId, Id operand1, Id operand2);
+ Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3);
+ Id createOp(Op, Id typeId, const std::vector<Id>& operands);
+ Id createOp(Op, Id typeId, const std::vector<IdImmediate>& operands);
+ Id createFunctionCall(spv::Function*, const std::vector<spv::Id>&);
+ Id createSpecConstantOp(Op, Id typeId, const std::vector<spv::Id>& operands, const std::vector<unsigned>& literals);
+
+ // Take an rvalue (source) and a set of channels to extract from it to
+ // make a new rvalue, which is returned.
+ Id createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector<unsigned>& channels);
+
+ // Take a copy of an lvalue (target) and a source of components, and set the
+ // source components into the lvalue where the 'channels' say to put them.
+ // An updated version of the target is returned.
+ // (No true lvalue or stores are used.)
+ Id createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector<unsigned>& channels);
+
+ // If both the id and precision are valid, the id
+ // gets tagged with the requested precision.
+ // The passed in id is always the returned id, to simplify use patterns.
+ Id setPrecision(Id id, Decoration precision)
+ {
+ if (precision != NoPrecision && id != NoResult)
+ addDecoration(id, precision);
+
+ return id;
+ }
+
+ // Can smear a scalar to a vector for the following forms:
+ // - promoteScalar(scalar, vector) // smear scalar to width of vector
+ // - promoteScalar(vector, scalar) // smear scalar to width of vector
+ // - promoteScalar(pointer, scalar) // smear scalar to width of what pointer points to
+ // - promoteScalar(scalar, scalar) // do nothing
+ // Other forms are not allowed.
+ //
+ // Generally, the type of 'scalar' does not need to be the same type as the components in 'vector'.
+ // The type of the created vector is a vector of components of the same type as the scalar.
+ //
+ // Note: One of the arguments will change, with the result coming back that way rather than
+ // through the return value.
+ void promoteScalar(Decoration precision, Id& left, Id& right);
+
+ // Make a value by smearing the scalar to fill the type.
+ // vectorType should be the correct type for making a vector of scalarVal.
+ // (No conversions are done.)
+ Id smearScalar(Decoration precision, Id scalarVal, Id vectorType);
+
+ // Create a call to a built-in function.
+ Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector<Id>& args);
+
+ // List of parameters used to create a texture operation
+ struct TextureParameters {
+ Id sampler;
+ Id coords;
+ Id bias;
+ Id lod;
+ Id Dref;
+ Id offset;
+ Id offsets;
+ Id gradX;
+ Id gradY;
+ Id sample;
+ Id component;
+ Id texelOut;
+ Id lodClamp;
+ Id granularity;
+ Id coarse;
+ bool nonprivate;
+ bool volatil;
+ };
+
+ // Select the correct texture operation based on all inputs, and emit the correct instruction
+ Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
+ bool noImplicit, const TextureParameters&, ImageOperandsMask);
+
+ // Emit the OpTextureQuery* instruction that was passed in.
+ // Figure out the right return value and type, and return it.
+ Id createTextureQueryCall(Op, const TextureParameters&, bool isUnsignedResult);
+
+ Id createSamplePositionCall(Decoration precision, Id, Id);
+
+ Id createBitFieldExtractCall(Decoration precision, Id, Id, Id, bool isSigned);
+ Id createBitFieldInsertCall(Decoration precision, Id, Id, Id, Id);
+
+ // Reduction comparison for composites: For equal and not-equal resulting in a scalar.
+ Id createCompositeCompare(Decoration precision, Id, Id, bool /* true if for equal, false if for not-equal */);
+
+ // OpCompositeConstruct
+ Id createCompositeConstruct(Id typeId, const std::vector<Id>& constituents);
+
+ // vector or scalar constructor
+ Id createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId);
+
+ // matrix constructor
+ Id createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id constructee);
+
+ // Helper to use for building nested control flow with if-then-else.
+ class If {
+ public:
+ If(Id condition, unsigned int ctrl, Builder& builder);
+ ~If() {}
+
+ void makeBeginElse();
+ void makeEndIf();
+
+ private:
+ If(const If&);
+ If& operator=(If&);
+
+ Builder& builder;
+ Id condition;
+ unsigned int control;
+ Function* function;
+ Block* headerBlock;
+ Block* thenBlock;
+ Block* elseBlock;
+ Block* mergeBlock;
+ };
+
+ // Make a switch statement. A switch has 'numSegments' of pieces of code, not containing
+ // any case/default labels, all separated by one or more case/default labels. Each possible
+ // case value v is a jump to the caseValues[v] segment. The defaultSegment is also in this
+ // number space. How to compute the value is given by 'condition', as in switch(condition).
+ //
+ // The SPIR-V Builder will maintain the stack of post-switch merge blocks for nested switches.
+ //
+ // Use a defaultSegment < 0 if there is no default segment (to branch to post switch).
+ //
+ // Returns the right set of basic blocks to start each code segment with, so that the caller's
+ // recursion stack can hold the memory for it.
+ //
+ void makeSwitch(Id condition, unsigned int control, int numSegments, const std::vector<int>& caseValues,
+ const std::vector<int>& valueToSegment, int defaultSegment, std::vector<Block*>& segmentBB); // return argument
+
+ // Add a branch to the innermost switch's merge block.
+ void addSwitchBreak();
+
+ // Move to the next code segment, passing in the return argument in makeSwitch()
+ void nextSwitchSegment(std::vector<Block*>& segmentBB, int segment);
+
+ // Finish off the innermost switch.
+ void endSwitch(std::vector<Block*>& segmentBB);
+
+ struct LoopBlocks {
+ LoopBlocks(Block& head, Block& body, Block& merge, Block& continue_target) :
+ head(head), body(body), merge(merge), continue_target(continue_target) { }
+ Block &head, &body, &merge, &continue_target;
+ private:
+ LoopBlocks();
+ LoopBlocks& operator=(const LoopBlocks&);
+ };
+
+ // Start a new loop and prepare the builder to generate code for it. Until
+ // closeLoop() is called for this loop, createLoopContinue() and
+ // createLoopExit() will target its corresponding blocks.
+ LoopBlocks& makeNewLoop();
+
+ // Create a new block in the function containing the build point. Memory is
+ // owned by the function object.
+ Block& makeNewBlock();
+
+ // Add a branch to the continue_target of the current (innermost) loop.
+ void createLoopContinue();
+
+ // Add an exit (e.g. "break") from the innermost loop that we're currently
+ // in.
+ void createLoopExit();
+
+ // Close the innermost loop that you're in
+ void closeLoop();
+
+ //
+ // Access chain design for an R-Value vs. L-Value:
+ //
+ // There is a single access chain the builder is building at
+ // any particular time. Such a chain can be used to either to a load or
+ // a store, when desired.
+ //
+ // Expressions can be r-values, l-values, or both, or only r-values:
+ // a[b.c].d = .... // l-value
+ // ... = a[b.c].d; // r-value, that also looks like an l-value
+ // ++a[b.c].d; // r-value and l-value
+ // (x + y)[2]; // r-value only, can't possibly be l-value
+ //
+ // Computing an r-value means generating code. Hence,
+ // r-values should only be computed when they are needed, not speculatively.
+ //
+ // Computing an l-value means saving away information for later use in the compiler,
+ // no code is generated until the l-value is later dereferenced. It is okay
+ // to speculatively generate an l-value, just not okay to speculatively dereference it.
+ //
+ // The base of the access chain (the left-most variable or expression
+ // from which everything is based) can be set either as an l-value
+ // or as an r-value. Most efficient would be to set an l-value if one
+ // is available. If an expression was evaluated, the resulting r-value
+ // can be set as the chain base.
+ //
+ // The users of this single access chain can save and restore if they
+ // want to nest or manage multiple chains.
+ //
+
+ struct AccessChain {
+ Id base; // for l-values, pointer to the base object, for r-values, the base object
+ std::vector<Id> indexChain;
+ Id instr; // cache the instruction that generates this access chain
+ std::vector<unsigned> swizzle; // each std::vector element selects the next GLSL component number
+ Id component; // a dynamic component index, can coexist with a swizzle, done after the swizzle, NoResult if not present
+ Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; NoType unless a swizzle or component is present
+ bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value
+ unsigned int alignment; // bitwise OR of alignment values passed in. Accumulates worst alignment. Only tracks base and (optional) component selection alignment.
+
+ // Accumulate whether anything in the chain of structures has coherent decorations.
+ struct CoherentFlags {
+ unsigned coherent : 1;
+ unsigned devicecoherent : 1;
+ unsigned queuefamilycoherent : 1;
+ unsigned workgroupcoherent : 1;
+ unsigned subgroupcoherent : 1;
+ unsigned nonprivate : 1;
+ unsigned volatil : 1;
+ unsigned isImage : 1;
+
+ void clear() {
+ coherent = 0;
+ devicecoherent = 0;
+ queuefamilycoherent = 0;
+ workgroupcoherent = 0;
+ subgroupcoherent = 0;
+ nonprivate = 0;
+ volatil = 0;
+ isImage = 0;
+ }
+
+ CoherentFlags() { clear(); }
+ CoherentFlags operator |=(const CoherentFlags &other) {
+ coherent |= other.coherent;
+ devicecoherent |= other.devicecoherent;
+ queuefamilycoherent |= other.queuefamilycoherent;
+ workgroupcoherent |= other.workgroupcoherent;
+ subgroupcoherent |= other.subgroupcoherent;
+ nonprivate |= other.nonprivate;
+ volatil |= other.volatil;
+ isImage |= other.isImage;
+ return *this;
+ }
+ };
+ CoherentFlags coherentFlags;
+ };
+
+ //
+ // the SPIR-V builder maintains a single active chain that
+ // the following methods operate on
+ //
+
+ // for external save and restore
+ AccessChain getAccessChain() { return accessChain; }
+ void setAccessChain(AccessChain newChain) { accessChain = newChain; }
+
+ // clear accessChain
+ void clearAccessChain();
+
+ // set new base as an l-value base
+ void setAccessChainLValue(Id lValue)
+ {
+ assert(isPointer(lValue));
+ accessChain.base = lValue;
+ }
+
+ // set new base value as an r-value
+ void setAccessChainRValue(Id rValue)
+ {
+ accessChain.isRValue = true;
+ accessChain.base = rValue;
+ }
+
+ // push offset onto the end of the chain
+ void accessChainPush(Id offset, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
+ {
+ accessChain.indexChain.push_back(offset);
+ accessChain.coherentFlags |= coherentFlags;
+ accessChain.alignment |= alignment;
+ }
+
+ // push new swizzle onto the end of any existing swizzle, merging into a single swizzle
+ void accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment);
+
+ // push a dynamic component selection onto the access chain, only applicable with a
+ // non-trivial swizzle or no swizzle
+ void accessChainPushComponent(Id component, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, unsigned int alignment)
+ {
+ if (accessChain.swizzle.size() != 1) {
+ accessChain.component = component;
+ if (accessChain.preSwizzleBaseType == NoType)
+ accessChain.preSwizzleBaseType = preSwizzleBaseType;
+ }
+ accessChain.coherentFlags |= coherentFlags;
+ accessChain.alignment |= alignment;
+ }
+
+ // use accessChain and swizzle to store value
+ void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // use accessChain and swizzle to load an r-value
+ Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
+
+ // get the direct pointer for an l-value
+ Id accessChainGetLValue();
+
+ // Get the inferred SPIR-V type of the result of the current access chain,
+ // based on the type of the base and the chain of dereferences.
+ Id accessChainGetInferredType();
+
+ // Add capabilities, extensions, remove unneeded decorations, etc.,
+ // based on the resulting SPIR-V.
+ void postProcess();
+
+ // Hook to visit each instruction in a block in a function
+ void postProcess(Instruction&);
+ // Hook to visit each instruction in a reachable block in a function.
+ void postProcessReachable(const Instruction&);
+ // Hook to visit each non-32-bit sized float/int operation in a block.
+ void postProcessType(const Instruction&, spv::Id typeId);
+
+ void dump(std::vector<unsigned int>&) const;
+
+ void createBranch(Block* block);
+ void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock);
+ void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, const std::vector<unsigned int>& operands);
+
+ // Sets to generate opcode for specialization constants.
+ void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; }
+ // Sets to generate opcode for non-specialization constants (normal mode).
+ void setToNormalCodeGenMode() { generatingOpCodeForSpecConst = false; }
+ // Check if the builder is generating code for spec constants.
+ bool isInSpecConstCodeGenMode() { return generatingOpCodeForSpecConst; }
+
+ protected:
+ Id makeIntConstant(Id typeId, unsigned value, bool specConstant);
+ Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant);
+ Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value);
+ Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2);
+ Id findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps);
+ Id findStructConstant(Id typeId, const std::vector<Id>& comps);
+ Id collapseAccessChain();
+ void remapDynamicSwizzle();
+ void transferAccessChainSwizzle(bool dynamic);
+ void simplifyAccessChainSwizzle();
+ void createAndSetNoPredecessorBlock(const char*);
+ void createSelectionMerge(Block* mergeBlock, unsigned int control);
+ void dumpSourceInstructions(std::vector<unsigned int>&) const;
+ void dumpSourceInstructions(const spv::Id fileId, const std::string& text, std::vector<unsigned int>&) const;
+ void dumpInstructions(std::vector<unsigned int>&, const std::vector<std::unique_ptr<Instruction> >&) const;
+ void dumpModuleProcesses(std::vector<unsigned int>&) const;
+ spv::MemoryAccessMask sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) const;
+
+ unsigned int spvVersion; // the version of SPIR-V to emit in the header
+ SourceLanguage source;
+ int sourceVersion;
+ spv::Id sourceFileStringId;
+ std::string sourceText;
+ int currentLine;
+ const char* currentFile;
+ bool emitOpLines;
+ std::set<std::string> extensions;
+ std::vector<const char*> sourceExtensions;
+ std::vector<const char*> moduleProcesses;
+ AddressingModel addressModel;
+ MemoryModel memoryModel;
+ std::set<spv::Capability> capabilities;
+ int builderNumber;
+ Module module;
+ Block* buildPoint;
+ Id uniqueId;
+ Function* entryPointFunction;
+ bool generatingOpCodeForSpecConst;
+ AccessChain accessChain;
+
+ // special blocks of instructions for output
+ std::vector<std::unique_ptr<Instruction> > strings;
+ std::vector<std::unique_ptr<Instruction> > imports;
+ std::vector<std::unique_ptr<Instruction> > entryPoints;
+ std::vector<std::unique_ptr<Instruction> > executionModes;
+ std::vector<std::unique_ptr<Instruction> > names;
+ std::vector<std::unique_ptr<Instruction> > decorations;
+ std::vector<std::unique_ptr<Instruction> > constantsTypesGlobals;
+ std::vector<std::unique_ptr<Instruction> > externals;
+ std::vector<std::unique_ptr<Function> > functions;
+
+ // not output, internally used for quick & dirty canonical (unique) creation
+ std::unordered_map<unsigned int, std::vector<Instruction*>> groupedConstants; // map type opcodes to constant inst.
+ std::unordered_map<unsigned int, std::vector<Instruction*>> groupedStructConstants; // map struct-id to constant instructions
+ std::unordered_map<unsigned int, std::vector<Instruction*>> groupedTypes; // map type opcodes to type instructions
+
+ // stack of switches
+ std::stack<Block*> switchMerges;
+
+ // Our loop stack.
+ std::stack<LoopBlocks> loops;
+
+ // map from strings to their string ids
+ std::unordered_map<std::string, spv::Id> stringIds;
+
+ // map from include file name ids to their contents
+ std::map<spv::Id, const std::string*> includeFiles;
+
+ // The stream for outputting warnings and errors.
+ SpvBuildLogger* logger;
+}; // end Builder class
+
+}; // end spv namespace
+
+#endif // SpvBuilder_H
diff --git a/thirdparty/glslang/SPIRV/SpvPostProcess.cpp b/thirdparty/glslang/SPIRV/SpvPostProcess.cpp
new file mode 100644
index 0000000000..6e1f7cf61f
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/SpvPostProcess.cpp
@@ -0,0 +1,426 @@
+//
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Post-processing for SPIR-V IR, in internal form, not standard binary form.
+//
+
+#include <cassert>
+#include <cstdlib>
+
+#include <unordered_set>
+#include <algorithm>
+
+#include "SpvBuilder.h"
+
+#include "spirv.hpp"
+#include "GlslangToSpv.h"
+#include "SpvBuilder.h"
+namespace spv {
+ #include "GLSL.std.450.h"
+ #include "GLSL.ext.KHR.h"
+ #include "GLSL.ext.EXT.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+#ifdef NV_EXTENSIONS
+ #include "GLSL.ext.NV.h"
+#endif
+}
+
+namespace spv {
+
+// Hook to visit each operand type and result type of an instruction.
+// Will be called multiple times for one instruction, once for each typed
+// operand and the result.
+void Builder::postProcessType(const Instruction& inst, Id typeId)
+{
+ // Characterize the type being questioned
+ Id basicTypeOp = getMostBasicTypeClass(typeId);
+ int width = 0;
+ if (basicTypeOp == OpTypeFloat || basicTypeOp == OpTypeInt)
+ width = getScalarTypeWidth(typeId);
+
+ // Do opcode-specific checks
+ switch (inst.getOpCode()) {
+ case OpLoad:
+ case OpStore:
+ if (basicTypeOp == OpTypeStruct) {
+ if (containsType(typeId, OpTypeInt, 8))
+ addCapability(CapabilityInt8);
+ if (containsType(typeId, OpTypeInt, 16))
+ addCapability(CapabilityInt16);
+ if (containsType(typeId, OpTypeFloat, 16))
+ addCapability(CapabilityFloat16);
+ } else {
+ StorageClass storageClass = getStorageClass(inst.getIdOperand(0));
+ if (width == 8) {
+ switch (storageClass) {
+ case StorageClassPhysicalStorageBufferEXT:
+ case StorageClassUniform:
+ case StorageClassStorageBuffer:
+ case StorageClassPushConstant:
+ break;
+ default:
+ addCapability(CapabilityInt8);
+ break;
+ }
+ } else if (width == 16) {
+ switch (storageClass) {
+ case StorageClassPhysicalStorageBufferEXT:
+ case StorageClassUniform:
+ case StorageClassStorageBuffer:
+ case StorageClassPushConstant:
+ case StorageClassInput:
+ case StorageClassOutput:
+ break;
+ default:
+ if (basicTypeOp == OpTypeInt)
+ addCapability(CapabilityInt16);
+ if (basicTypeOp == OpTypeFloat)
+ addCapability(CapabilityFloat16);
+ break;
+ }
+ }
+ }
+ break;
+ case OpAccessChain:
+ case OpPtrAccessChain:
+ case OpCopyObject:
+ break;
+ case OpFConvert:
+ case OpSConvert:
+ case OpUConvert:
+ // Look for any 8/16-bit storage capabilities. If there are none, assume that
+ // the convert instruction requires the Float16/Int8/16 capability.
+ if (containsType(typeId, OpTypeFloat, 16) || containsType(typeId, OpTypeInt, 16)) {
+ bool foundStorage = false;
+ for (auto it = capabilities.begin(); it != capabilities.end(); ++it) {
+ spv::Capability cap = *it;
+ if (cap == spv::CapabilityStorageInputOutput16 ||
+ cap == spv::CapabilityStoragePushConstant16 ||
+ cap == spv::CapabilityStorageUniformBufferBlock16 ||
+ cap == spv::CapabilityStorageUniform16) {
+ foundStorage = true;
+ break;
+ }
+ }
+ if (!foundStorage) {
+ if (containsType(typeId, OpTypeFloat, 16))
+ addCapability(CapabilityFloat16);
+ if (containsType(typeId, OpTypeInt, 16))
+ addCapability(CapabilityInt16);
+ }
+ }
+ if (containsType(typeId, OpTypeInt, 8)) {
+ bool foundStorage = false;
+ for (auto it = capabilities.begin(); it != capabilities.end(); ++it) {
+ spv::Capability cap = *it;
+ if (cap == spv::CapabilityStoragePushConstant8 ||
+ cap == spv::CapabilityUniformAndStorageBuffer8BitAccess ||
+ cap == spv::CapabilityStorageBuffer8BitAccess) {
+ foundStorage = true;
+ break;
+ }
+ }
+ if (!foundStorage) {
+ addCapability(CapabilityInt8);
+ }
+ }
+ break;
+ case OpExtInst:
+#if AMD_EXTENSIONS
+ switch (inst.getImmediateOperand(1)) {
+ case GLSLstd450Frexp:
+ case GLSLstd450FrexpStruct:
+ if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeInt, 16))
+ addExtension(spv::E_SPV_AMD_gpu_shader_int16);
+ break;
+ case GLSLstd450InterpolateAtCentroid:
+ case GLSLstd450InterpolateAtSample:
+ case GLSLstd450InterpolateAtOffset:
+ if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeFloat, 16))
+ addExtension(spv::E_SPV_AMD_gpu_shader_half_float);
+ break;
+ default:
+ break;
+ }
+#endif
+ break;
+ default:
+ if (basicTypeOp == OpTypeFloat && width == 16)
+ addCapability(CapabilityFloat16);
+ if (basicTypeOp == OpTypeInt && width == 16)
+ addCapability(CapabilityInt16);
+ if (basicTypeOp == OpTypeInt && width == 8)
+ addCapability(CapabilityInt8);
+ break;
+ }
+}
+
+// Called for each instruction that resides in a block.
+void Builder::postProcess(Instruction& inst)
+{
+ // Add capabilities based simply on the opcode.
+ switch (inst.getOpCode()) {
+ case OpExtInst:
+ switch (inst.getImmediateOperand(1)) {
+ case GLSLstd450InterpolateAtCentroid:
+ case GLSLstd450InterpolateAtSample:
+ case GLSLstd450InterpolateAtOffset:
+ addCapability(CapabilityInterpolationFunction);
+ break;
+ default:
+ break;
+ }
+ break;
+ case OpDPdxFine:
+ case OpDPdyFine:
+ case OpFwidthFine:
+ case OpDPdxCoarse:
+ case OpDPdyCoarse:
+ case OpFwidthCoarse:
+ addCapability(CapabilityDerivativeControl);
+ break;
+
+ case OpImageQueryLod:
+ case OpImageQuerySize:
+ case OpImageQuerySizeLod:
+ case OpImageQuerySamples:
+ case OpImageQueryLevels:
+ addCapability(CapabilityImageQuery);
+ break;
+
+#ifdef NV_EXTENSIONS
+ case OpGroupNonUniformPartitionNV:
+ addExtension(E_SPV_NV_shader_subgroup_partitioned);
+ addCapability(CapabilityGroupNonUniformPartitionedNV);
+ break;
+#endif
+
+ case OpLoad:
+ case OpStore:
+ {
+ // For any load/store to a PhysicalStorageBufferEXT, walk the accesschain
+ // index list to compute the misalignment. The pre-existing alignment value
+ // (set via Builder::AccessChain::alignment) only accounts for the base of
+ // the reference type and any scalar component selection in the accesschain,
+ // and this function computes the rest from the SPIR-V Offset decorations.
+ Instruction *accessChain = module.getInstruction(inst.getIdOperand(0));
+ if (accessChain->getOpCode() == OpAccessChain) {
+ Instruction *base = module.getInstruction(accessChain->getIdOperand(0));
+ // Get the type of the base of the access chain. It must be a pointer type.
+ Id typeId = base->getTypeId();
+ Instruction *type = module.getInstruction(typeId);
+ assert(type->getOpCode() == OpTypePointer);
+ if (type->getImmediateOperand(0) != StorageClassPhysicalStorageBufferEXT) {
+ break;
+ }
+ // Get the pointee type.
+ typeId = type->getIdOperand(1);
+ type = module.getInstruction(typeId);
+ // Walk the index list for the access chain. For each index, find any
+ // misalignment that can apply when accessing the member/element via
+ // Offset/ArrayStride/MatrixStride decorations, and bitwise OR them all
+ // together.
+ int alignment = 0;
+ for (int i = 1; i < accessChain->getNumOperands(); ++i) {
+ Instruction *idx = module.getInstruction(accessChain->getIdOperand(i));
+ if (type->getOpCode() == OpTypeStruct) {
+ assert(idx->getOpCode() == OpConstant);
+ unsigned int c = idx->getImmediateOperand(0);
+
+ const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
+ if (decoration.get()->getOpCode() == OpMemberDecorate &&
+ decoration.get()->getIdOperand(0) == typeId &&
+ decoration.get()->getImmediateOperand(1) == c &&
+ (decoration.get()->getImmediateOperand(2) == DecorationOffset ||
+ decoration.get()->getImmediateOperand(2) == DecorationMatrixStride)) {
+ alignment |= decoration.get()->getImmediateOperand(3);
+ }
+ };
+ std::for_each(decorations.begin(), decorations.end(), function);
+ // get the next member type
+ typeId = type->getIdOperand(c);
+ type = module.getInstruction(typeId);
+ } else if (type->getOpCode() == OpTypeArray ||
+ type->getOpCode() == OpTypeRuntimeArray) {
+ const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
+ if (decoration.get()->getOpCode() == OpDecorate &&
+ decoration.get()->getIdOperand(0) == typeId &&
+ decoration.get()->getImmediateOperand(1) == DecorationArrayStride) {
+ alignment |= decoration.get()->getImmediateOperand(2);
+ }
+ };
+ std::for_each(decorations.begin(), decorations.end(), function);
+ // Get the element type
+ typeId = type->getIdOperand(0);
+ type = module.getInstruction(typeId);
+ } else {
+ // Once we get to any non-aggregate type, we're done.
+ break;
+ }
+ }
+ assert(inst.getNumOperands() >= 3);
+ unsigned int memoryAccess = inst.getImmediateOperand((inst.getOpCode() == OpStore) ? 2 : 1);
+ assert(memoryAccess & MemoryAccessAlignedMask);
+ static_cast<void>(memoryAccess);
+ // Compute the index of the alignment operand.
+ int alignmentIdx = 2;
+ if (inst.getOpCode() == OpStore)
+ alignmentIdx++;
+ // Merge new and old (mis)alignment
+ alignment |= inst.getImmediateOperand(alignmentIdx);
+ // Pick the LSB
+ alignment = alignment & ~(alignment & (alignment-1));
+ // update the Aligned operand
+ inst.setImmediateOperand(alignmentIdx, alignment);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // Checks based on type
+ if (inst.getTypeId() != NoType)
+ postProcessType(inst, inst.getTypeId());
+ for (int op = 0; op < inst.getNumOperands(); ++op) {
+ if (inst.isIdOperand(op)) {
+ // In blocks, these are always result ids, but we are relying on
+ // getTypeId() to return NoType for things like OpLabel.
+ if (getTypeId(inst.getIdOperand(op)) != NoType)
+ postProcessType(inst, getTypeId(inst.getIdOperand(op)));
+ }
+ }
+}
+
+// Called for each instruction in a reachable block.
+void Builder::postProcessReachable(const Instruction&)
+{
+ // did have code here, but questionable to do so without deleting the instructions
+}
+
+// comment in header
+void Builder::postProcess()
+{
+ std::unordered_set<const Block*> reachableBlocks;
+ std::unordered_set<Id> unreachableDefinitions;
+ // Collect IDs defined in unreachable blocks. For each function, label the
+ // reachable blocks first. Then for each unreachable block, collect the
+ // result IDs of the instructions in it.
+ for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
+ Function* f = *fi;
+ Block* entry = f->getEntryBlock();
+ inReadableOrder(entry, [&reachableBlocks](const Block* b) { reachableBlocks.insert(b); });
+ for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
+ Block* b = *bi;
+ if (reachableBlocks.count(b) == 0) {
+ for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
+ unreachableDefinitions.insert(ii->get()->getResultId());
+ }
+ }
+ }
+
+ // Remove unneeded decorations, for unreachable instructions
+ decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
+ [&unreachableDefinitions](std::unique_ptr<Instruction>& I) -> bool {
+ Id decoration_id = I.get()->getIdOperand(0);
+ return unreachableDefinitions.count(decoration_id) != 0;
+ }),
+ decorations.end());
+
+ // Add per-instruction capabilities, extensions, etc.,
+
+ // Look for any 8/16 bit type in physical storage buffer class, and set the
+ // appropriate capability. This happens in createSpvVariable for other storage
+ // classes, but there isn't always a variable for physical storage buffer.
+ for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
+ Instruction* type = groupedTypes[OpTypePointer][t];
+ if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) {
+ if (containsType(type->getIdOperand(1), OpTypeInt, 8)) {
+ addExtension(spv::E_SPV_KHR_8bit_storage);
+ addCapability(spv::CapabilityStorageBuffer8BitAccess);
+ }
+ if (containsType(type->getIdOperand(1), OpTypeInt, 16) ||
+ containsType(type->getIdOperand(1), OpTypeFloat, 16)) {
+ addExtension(spv::E_SPV_KHR_16bit_storage);
+ addCapability(spv::CapabilityStorageBuffer16BitAccess);
+ }
+ }
+ }
+
+ // process all reachable instructions...
+ for (auto bi = reachableBlocks.cbegin(); bi != reachableBlocks.cend(); ++bi) {
+ const Block* block = *bi;
+ const auto function = [this](const std::unique_ptr<Instruction>& inst) { postProcessReachable(*inst.get()); };
+ std::for_each(block->getInstructions().begin(), block->getInstructions().end(), function);
+ }
+
+ // process all block-contained instructions
+ for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
+ Function* f = *fi;
+ for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
+ Block* b = *bi;
+ for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
+ postProcess(*ii->get());
+
+ // For all local variables that contain pointers to PhysicalStorageBufferEXT, check whether
+ // there is an existing restrict/aliased decoration. If we don't find one, add Aliased as the
+ // default.
+ for (auto vi = b->getLocalVariables().cbegin(); vi != b->getLocalVariables().cend(); vi++) {
+ const Instruction& inst = *vi->get();
+ Id resultId = inst.getResultId();
+ if (containsPhysicalStorageBufferOrArray(getDerefTypeId(resultId))) {
+ bool foundDecoration = false;
+ const auto function = [&](const std::unique_ptr<Instruction>& decoration) {
+ if (decoration.get()->getIdOperand(0) == resultId &&
+ decoration.get()->getOpCode() == OpDecorate &&
+ (decoration.get()->getImmediateOperand(1) == spv::DecorationAliasedPointerEXT ||
+ decoration.get()->getImmediateOperand(1) == spv::DecorationRestrictPointerEXT)) {
+ foundDecoration = true;
+ }
+ };
+ std::for_each(decorations.begin(), decorations.end(), function);
+ if (!foundDecoration) {
+ addDecoration(resultId, spv::DecorationAliasedPointerEXT);
+ }
+ }
+ }
+ }
+ }
+}
+
+}; // end spv namespace
diff --git a/thirdparty/glslang/SPIRV/SpvTools.cpp b/thirdparty/glslang/SPIRV/SpvTools.cpp
new file mode 100644
index 0000000000..db26d59089
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/SpvTools.cpp
@@ -0,0 +1,214 @@
+//
+// Copyright (C) 2014-2016 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Call into SPIRV-Tools to disassemble, validate, and optimize.
+//
+
+#if ENABLE_OPT
+
+#include <cstdio>
+#include <iostream>
+
+#include "SpvTools.h"
+#include "spirv-tools/optimizer.hpp"
+#include "spirv-tools/libspirv.h"
+
+namespace glslang {
+
+// Translate glslang's view of target versioning to what SPIRV-Tools uses.
+spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger)
+{
+ switch (spvVersion.vulkan) {
+ case glslang::EShTargetVulkan_1_0:
+ return spv_target_env::SPV_ENV_VULKAN_1_0;
+ case glslang::EShTargetVulkan_1_1:
+ switch (spvVersion.spv) {
+ case EShTargetSpv_1_0:
+ case EShTargetSpv_1_1:
+ case EShTargetSpv_1_2:
+ case EShTargetSpv_1_3:
+ return spv_target_env::SPV_ENV_VULKAN_1_1;
+ case EShTargetSpv_1_4:
+ return spv_target_env::SPV_ENV_VULKAN_1_1_SPIRV_1_4;
+ default:
+ logger->missingFunctionality("Target version for SPIRV-Tools validator");
+ return spv_target_env::SPV_ENV_VULKAN_1_1;
+ }
+ default:
+ break;
+ }
+
+ if (spvVersion.openGl > 0)
+ return spv_target_env::SPV_ENV_OPENGL_4_5;
+
+ logger->missingFunctionality("Target version for SPIRV-Tools validator");
+ return spv_target_env::SPV_ENV_UNIVERSAL_1_0;
+}
+
+
+// Use the SPIRV-Tools disassembler to print SPIR-V.
+void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv)
+{
+ // disassemble
+ spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
+ spv_text text;
+ spv_diagnostic diagnostic = nullptr;
+ spvBinaryToText(context, spirv.data(), spirv.size(),
+ SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT,
+ &text, &diagnostic);
+
+ // dump
+ if (diagnostic == nullptr)
+ out << text->str;
+ else
+ spvDiagnosticPrint(diagnostic);
+
+ // teardown
+ spvDiagnosticDestroy(diagnostic);
+ spvContextDestroy(context);
+}
+
+// Apply the SPIRV-Tools validator to generated SPIR-V.
+void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger* logger)
+{
+ // validate
+ spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger));
+ spv_const_binary_t binary = { spirv.data(), spirv.size() };
+ spv_diagnostic diagnostic = nullptr;
+ spv_validator_options options = spvValidatorOptionsCreate();
+ spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets());
+ spvValidateWithOptions(context, options, &binary, &diagnostic);
+
+ // report
+ if (diagnostic != nullptr) {
+ logger->error("SPIRV-Tools Validation Errors");
+ logger->error(diagnostic->error);
+ }
+
+ // tear down
+ spvValidatorOptionsDestroy(options);
+ spvDiagnosticDestroy(diagnostic);
+ spvContextDestroy(context);
+}
+
+// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
+// legalizing HLSL SPIR-V.
+void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger*, const SpvOptions* options)
+{
+ spv_target_env target_env = SPV_ENV_UNIVERSAL_1_2;
+
+ spvtools::Optimizer optimizer(target_env);
+ optimizer.SetMessageConsumer(
+ [](spv_message_level_t level, const char *source, const spv_position_t &position, const char *message) {
+ auto &out = std::cerr;
+ switch (level)
+ {
+ case SPV_MSG_FATAL:
+ case SPV_MSG_INTERNAL_ERROR:
+ case SPV_MSG_ERROR:
+ out << "error: ";
+ break;
+ case SPV_MSG_WARNING:
+ out << "warning: ";
+ break;
+ case SPV_MSG_INFO:
+ case SPV_MSG_DEBUG:
+ out << "info: ";
+ break;
+ default:
+ break;
+ }
+ if (source)
+ {
+ out << source << ":";
+ }
+ out << position.line << ":" << position.column << ":" << position.index << ":";
+ if (message)
+ {
+ out << " " << message;
+ }
+ out << std::endl;
+ });
+
+ // If debug (specifically source line info) is being generated, propagate
+ // line information into all SPIR-V instructions. This avoids loss of
+ // information when instructions are deleted or moved. Later, remove
+ // redundant information to minimize final SPRIR-V size.
+ if (options->generateDebugInfo) {
+ optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass());
+ }
+ optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
+ optimizer.RegisterPass(spvtools::CreateMergeReturnPass());
+ optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass());
+ optimizer.RegisterPass(spvtools::CreateEliminateDeadFunctionsPass());
+ optimizer.RegisterPass(spvtools::CreateScalarReplacementPass());
+ optimizer.RegisterPass(spvtools::CreateLocalAccessChainConvertPass());
+ optimizer.RegisterPass(spvtools::CreateLocalSingleBlockLoadStoreElimPass());
+ optimizer.RegisterPass(spvtools::CreateLocalSingleStoreElimPass());
+ optimizer.RegisterPass(spvtools::CreateSimplificationPass());
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateVectorDCEPass());
+ optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
+ optimizer.RegisterPass(spvtools::CreateBlockMergePass());
+ optimizer.RegisterPass(spvtools::CreateLocalMultiStoreElimPass());
+ optimizer.RegisterPass(spvtools::CreateIfConversionPass());
+ optimizer.RegisterPass(spvtools::CreateSimplificationPass());
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateVectorDCEPass());
+ optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
+ if (options->optimizeSize) {
+ optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass());
+ // TODO(greg-lunarg): Add this when AMD driver issues are resolved
+ // optimizer.RegisterPass(CreateCommonUniformElimPass());
+ }
+ optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
+ optimizer.RegisterPass(spvtools::CreateCFGCleanupPass());
+ if (options->generateDebugInfo) {
+ optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass());
+ }
+
+ spvtools::OptimizerOptions spvOptOptions;
+ spvOptOptions.set_run_validator(false); // The validator may run as a seperate step later on
+ optimizer.Run(spirv.data(), spirv.size(), &spirv, spvOptOptions);
+}
+
+}; // end namespace glslang
+
+#endif
diff --git a/thirdparty/glslang/SPIRV/SpvTools.h b/thirdparty/glslang/SPIRV/SpvTools.h
new file mode 100644
index 0000000000..7e49ae0b30
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/SpvTools.h
@@ -0,0 +1,80 @@
+//
+// Copyright (C) 2014-2016 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Call into SPIRV-Tools to disassemble, validate, and optimize.
+//
+
+#pragma once
+#ifndef GLSLANG_SPV_TOOLS_H
+#define GLSLANG_SPV_TOOLS_H
+
+#include <vector>
+#include <ostream>
+
+#include "../glslang/MachineIndependent/localintermediate.h"
+#include "Logger.h"
+
+namespace glslang {
+
+struct SpvOptions {
+ SpvOptions() : generateDebugInfo(false), disableOptimizer(true),
+ optimizeSize(false), disassemble(false), validate(false) { }
+ bool generateDebugInfo;
+ bool disableOptimizer;
+ bool optimizeSize;
+ bool disassemble;
+ bool validate;
+};
+
+#if ENABLE_OPT
+
+// Use the SPIRV-Tools disassembler to print SPIR-V.
+void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
+
+// Apply the SPIRV-Tools validator to generated SPIR-V.
+void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger*);
+
+// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
+// legalizing HLSL SPIR-V.
+void SpirvToolsLegalize(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
+ spv::SpvBuildLogger*, const SpvOptions*);
+
+#endif
+
+} // end namespace glslang
+
+#endif // GLSLANG_SPV_TOOLS_H
diff --git a/thirdparty/glslang/SPIRV/bitutils.h b/thirdparty/glslang/SPIRV/bitutils.h
new file mode 100644
index 0000000000..22e44cec26
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/bitutils.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2015-2016 The Khronos Group Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef LIBSPIRV_UTIL_BITUTILS_H_
+#define LIBSPIRV_UTIL_BITUTILS_H_
+
+#include <cstdint>
+#include <cstring>
+
+namespace spvutils {
+
+// Performs a bitwise copy of source to the destination type Dest.
+template <typename Dest, typename Src>
+Dest BitwiseCast(Src source) {
+ Dest dest;
+ static_assert(sizeof(source) == sizeof(dest),
+ "BitwiseCast: Source and destination must have the same size");
+ std::memcpy(static_cast<void*>(&dest), &source, sizeof(dest));
+ return dest;
+}
+
+// SetBits<T, First, Num> returns an integer of type <T> with bits set
+// for position <First> through <First + Num - 1>, counting from the least
+// significant bit. In particular when Num == 0, no positions are set to 1.
+// A static assert will be triggered if First + Num > sizeof(T) * 8, that is,
+// a bit that will not fit in the underlying type is set.
+template <typename T, size_t First = 0, size_t Num = 0>
+struct SetBits {
+ static_assert(First < sizeof(T) * 8,
+ "Tried to set a bit that is shifted too far.");
+ const static T get = (T(1) << First) | SetBits<T, First + 1, Num - 1>::get;
+};
+
+template <typename T, size_t Last>
+struct SetBits<T, Last, 0> {
+ const static T get = T(0);
+};
+
+// This is all compile-time so we can put our tests right here.
+static_assert(SetBits<uint32_t, 0, 0>::get == uint32_t(0x00000000),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 1>::get == uint32_t(0x00000001),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 31, 1>::get == uint32_t(0x80000000),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 1, 2>::get == uint32_t(0x00000006),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 30, 2>::get == uint32_t(0xc0000000),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 31>::get == uint32_t(0x7FFFFFFF),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 0, 32>::get == uint32_t(0xFFFFFFFF),
+ "SetBits failed");
+static_assert(SetBits<uint32_t, 16, 16>::get == uint32_t(0xFFFF0000),
+ "SetBits failed");
+
+static_assert(SetBits<uint64_t, 0, 1>::get == uint64_t(0x0000000000000001LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 63, 1>::get == uint64_t(0x8000000000000000LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 62, 2>::get == uint64_t(0xc000000000000000LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 31, 1>::get == uint64_t(0x0000000080000000LL),
+ "SetBits failed");
+static_assert(SetBits<uint64_t, 16, 16>::get == uint64_t(0x00000000FFFF0000LL),
+ "SetBits failed");
+
+} // namespace spvutils
+
+#endif // LIBSPIRV_UTIL_BITUTILS_H_
diff --git a/thirdparty/glslang/SPIRV/disassemble.cpp b/thirdparty/glslang/SPIRV/disassemble.cpp
new file mode 100644
index 0000000000..631173c0ec
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/disassemble.cpp
@@ -0,0 +1,759 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Disassembler for SPIR-V.
+//
+
+#include <cstdlib>
+#include <cstring>
+#include <cassert>
+#include <iomanip>
+#include <stack>
+#include <sstream>
+#include <cstring>
+
+#include "disassemble.h"
+#include "doc.h"
+#include "SpvTools.h"
+
+namespace spv {
+ extern "C" {
+ // Include C-based headers that don't have a namespace
+ #include "GLSL.std.450.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+
+#ifdef NV_EXTENSIONS
+ #include "GLSL.ext.NV.h"
+#endif
+ }
+}
+const char* GlslStd450DebugNames[spv::GLSLstd450Count];
+
+namespace spv {
+
+#ifdef AMD_EXTENSIONS
+static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
+#endif
+
+#ifdef NV_EXTENSIONS
+static const char* GLSLextNVGetDebugNames(const char*, unsigned);
+#endif
+
+static void Kill(std::ostream& out, const char* message)
+{
+ out << std::endl << "Disassembly failed: " << message << std::endl;
+ exit(1);
+}
+
+// used to identify the extended instruction library imported when printing
+enum ExtInstSet {
+ GLSL450Inst,
+
+#ifdef AMD_EXTENSIONS
+ GLSLextAMDInst,
+#endif
+
+#ifdef NV_EXTENSIONS
+ GLSLextNVInst,
+#endif
+
+ OpenCLExtInst,
+};
+
+// Container class for a single instance of a SPIR-V stream, with methods for disassembly.
+class SpirvStream {
+public:
+ SpirvStream(std::ostream& out, const std::vector<unsigned int>& stream) : out(out), stream(stream), word(0), nextNestedControl(0) { }
+ virtual ~SpirvStream() { }
+
+ void validate();
+ void processInstructions();
+
+protected:
+ SpirvStream(const SpirvStream&);
+ SpirvStream& operator=(const SpirvStream&);
+ Op getOpCode(int id) const { return idInstruction[id] ? (Op)(stream[idInstruction[id]] & OpCodeMask) : OpNop; }
+
+ // Output methods
+ void outputIndent();
+ void formatId(Id id, std::stringstream&);
+ void outputResultId(Id id);
+ void outputTypeId(Id id);
+ void outputId(Id id);
+ void outputMask(OperandClass operandClass, unsigned mask);
+ void disassembleImmediates(int numOperands);
+ void disassembleIds(int numOperands);
+ int disassembleString();
+ void disassembleInstruction(Id resultId, Id typeId, Op opCode, int numOperands);
+
+ // Data
+ std::ostream& out; // where to write the disassembly
+ const std::vector<unsigned int>& stream; // the actual word stream
+ int size; // the size of the word stream
+ int word; // the next word of the stream to read
+
+ // map each <id> to the instruction that created it
+ Id bound;
+ std::vector<unsigned int> idInstruction; // the word offset into the stream where the instruction for result [id] starts; 0 if not yet seen (forward reference or function parameter)
+
+ std::vector<std::string> idDescriptor; // the best text string known for explaining the <id>
+
+ // schema
+ unsigned int schema;
+
+ // stack of structured-merge points
+ std::stack<Id> nestedControl;
+ Id nextNestedControl; // need a slight delay for when we are nested
+};
+
+void SpirvStream::validate()
+{
+ size = (int)stream.size();
+ if (size < 4)
+ Kill(out, "stream is too short");
+
+ // Magic number
+ if (stream[word++] != MagicNumber) {
+ out << "Bad magic number";
+ return;
+ }
+
+ // Version
+ out << "// Module Version " << std::hex << stream[word++] << std::endl;
+
+ // Generator's magic number
+ out << "// Generated by (magic number): " << std::hex << stream[word++] << std::dec << std::endl;
+
+ // Result <id> bound
+ bound = stream[word++];
+ idInstruction.resize(bound);
+ idDescriptor.resize(bound);
+ out << "// Id's are bound by " << bound << std::endl;
+ out << std::endl;
+
+ // Reserved schema, must be 0 for now
+ schema = stream[word++];
+ if (schema != 0)
+ Kill(out, "bad schema, must be 0");
+}
+
+// Loop over all the instructions, in order, processing each.
+// Boiler plate for each is handled here directly, the rest is dispatched.
+void SpirvStream::processInstructions()
+{
+ // Instructions
+ while (word < size) {
+ int instructionStart = word;
+
+ // Instruction wordCount and opcode
+ unsigned int firstWord = stream[word];
+ unsigned wordCount = firstWord >> WordCountShift;
+ Op opCode = (Op)(firstWord & OpCodeMask);
+ int nextInst = word + wordCount;
+ ++word;
+
+ // Presence of full instruction
+ if (nextInst > size)
+ Kill(out, "stream instruction terminated too early");
+
+ // Base for computing number of operands; will be updated as more is learned
+ unsigned numOperands = wordCount - 1;
+
+ // Type <id>
+ Id typeId = 0;
+ if (InstructionDesc[opCode].hasType()) {
+ typeId = stream[word++];
+ --numOperands;
+ }
+
+ // Result <id>
+ Id resultId = 0;
+ if (InstructionDesc[opCode].hasResult()) {
+ resultId = stream[word++];
+ --numOperands;
+
+ // save instruction for future reference
+ idInstruction[resultId] = instructionStart;
+ }
+
+ outputResultId(resultId);
+ outputTypeId(typeId);
+ outputIndent();
+
+ // Hand off the Op and all its operands
+ disassembleInstruction(resultId, typeId, opCode, numOperands);
+ if (word != nextInst) {
+ out << " ERROR, incorrect number of operands consumed. At " << word << " instead of " << nextInst << " instruction start was " << instructionStart;
+ word = nextInst;
+ }
+ out << std::endl;
+ }
+}
+
+void SpirvStream::outputIndent()
+{
+ for (int i = 0; i < (int)nestedControl.size(); ++i)
+ out << " ";
+}
+
+void SpirvStream::formatId(Id id, std::stringstream& idStream)
+{
+ if (id != 0) {
+ // On instructions with no IDs, this is called with "0", which does not
+ // have to be within ID bounds on null shaders.
+ if (id >= bound)
+ Kill(out, "Bad <id>");
+
+ idStream << id;
+ if (idDescriptor[id].size() > 0)
+ idStream << "(" << idDescriptor[id] << ")";
+ }
+}
+
+void SpirvStream::outputResultId(Id id)
+{
+ const int width = 16;
+ std::stringstream idStream;
+ formatId(id, idStream);
+ out << std::setw(width) << std::right << idStream.str();
+ if (id != 0)
+ out << ":";
+ else
+ out << " ";
+
+ if (nestedControl.size() && id == nestedControl.top())
+ nestedControl.pop();
+}
+
+void SpirvStream::outputTypeId(Id id)
+{
+ const int width = 12;
+ std::stringstream idStream;
+ formatId(id, idStream);
+ out << std::setw(width) << std::right << idStream.str() << " ";
+}
+
+void SpirvStream::outputId(Id id)
+{
+ if (id >= bound)
+ Kill(out, "Bad <id>");
+
+ out << id;
+ if (idDescriptor[id].size() > 0)
+ out << "(" << idDescriptor[id] << ")";
+}
+
+void SpirvStream::outputMask(OperandClass operandClass, unsigned mask)
+{
+ if (mask == 0)
+ out << "None";
+ else {
+ for (int m = 0; m < OperandClassParams[operandClass].ceiling; ++m) {
+ if (mask & (1 << m))
+ out << OperandClassParams[operandClass].getName(m) << " ";
+ }
+ }
+}
+
+void SpirvStream::disassembleImmediates(int numOperands)
+{
+ for (int i = 0; i < numOperands; ++i) {
+ out << stream[word++];
+ if (i < numOperands - 1)
+ out << " ";
+ }
+}
+
+void SpirvStream::disassembleIds(int numOperands)
+{
+ for (int i = 0; i < numOperands; ++i) {
+ outputId(stream[word++]);
+ if (i < numOperands - 1)
+ out << " ";
+ }
+}
+
+// return the number of operands consumed by the string
+int SpirvStream::disassembleString()
+{
+ int startWord = word;
+
+ out << " \"";
+
+ const char* wordString;
+ bool done = false;
+ do {
+ unsigned int content = stream[word];
+ wordString = (const char*)&content;
+ for (int charCount = 0; charCount < 4; ++charCount) {
+ if (*wordString == 0) {
+ done = true;
+ break;
+ }
+ out << *(wordString++);
+ }
+ ++word;
+ } while (! done);
+
+ out << "\"";
+
+ return word - startWord;
+}
+
+void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode, int numOperands)
+{
+ // Process the opcode
+
+ out << (OpcodeString(opCode) + 2); // leave out the "Op"
+
+ if (opCode == OpLoopMerge || opCode == OpSelectionMerge)
+ nextNestedControl = stream[word];
+ else if (opCode == OpBranchConditional || opCode == OpSwitch) {
+ if (nextNestedControl) {
+ nestedControl.push(nextNestedControl);
+ nextNestedControl = 0;
+ }
+ } else if (opCode == OpExtInstImport) {
+ idDescriptor[resultId] = (const char*)(&stream[word]);
+ }
+ else {
+ if (resultId != 0 && idDescriptor[resultId].size() == 0) {
+ switch (opCode) {
+ case OpTypeInt:
+ switch (stream[word]) {
+ case 8: idDescriptor[resultId] = "int8_t"; break;
+ case 16: idDescriptor[resultId] = "int16_t"; break;
+ default: assert(0); // fallthrough
+ case 32: idDescriptor[resultId] = "int"; break;
+ case 64: idDescriptor[resultId] = "int64_t"; break;
+ }
+ break;
+ case OpTypeFloat:
+ switch (stream[word]) {
+ case 16: idDescriptor[resultId] = "float16_t"; break;
+ default: assert(0); // fallthrough
+ case 32: idDescriptor[resultId] = "float"; break;
+ case 64: idDescriptor[resultId] = "float64_t"; break;
+ }
+ break;
+ case OpTypeBool:
+ idDescriptor[resultId] = "bool";
+ break;
+ case OpTypeStruct:
+ idDescriptor[resultId] = "struct";
+ break;
+ case OpTypePointer:
+ idDescriptor[resultId] = "ptr";
+ break;
+ case OpTypeVector:
+ if (idDescriptor[stream[word]].size() > 0) {
+ idDescriptor[resultId].append(idDescriptor[stream[word]].begin(), idDescriptor[stream[word]].begin() + 1);
+ if (strstr(idDescriptor[stream[word]].c_str(), "8")) {
+ idDescriptor[resultId].append("8");
+ }
+ if (strstr(idDescriptor[stream[word]].c_str(), "16")) {
+ idDescriptor[resultId].append("16");
+ }
+ if (strstr(idDescriptor[stream[word]].c_str(), "64")) {
+ idDescriptor[resultId].append("64");
+ }
+ }
+ idDescriptor[resultId].append("vec");
+ switch (stream[word + 1]) {
+ case 2: idDescriptor[resultId].append("2"); break;
+ case 3: idDescriptor[resultId].append("3"); break;
+ case 4: idDescriptor[resultId].append("4"); break;
+ case 8: idDescriptor[resultId].append("8"); break;
+ case 16: idDescriptor[resultId].append("16"); break;
+ case 32: idDescriptor[resultId].append("32"); break;
+ default: break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Process the operands. Note, a new context-dependent set could be
+ // swapped in mid-traversal.
+
+ // Handle images specially, so can put out helpful strings.
+ if (opCode == OpTypeImage) {
+ out << " ";
+ disassembleIds(1);
+ out << " " << DimensionString((Dim)stream[word++]);
+ out << (stream[word++] != 0 ? " depth" : "");
+ out << (stream[word++] != 0 ? " array" : "");
+ out << (stream[word++] != 0 ? " multi-sampled" : "");
+ switch (stream[word++]) {
+ case 0: out << " runtime"; break;
+ case 1: out << " sampled"; break;
+ case 2: out << " nonsampled"; break;
+ }
+ out << " format:" << ImageFormatString((ImageFormat)stream[word++]);
+
+ if (numOperands == 8) {
+ out << " " << AccessQualifierString(stream[word++]);
+ }
+ return;
+ }
+
+ // Handle all the parameterized operands
+ for (int op = 0; op < InstructionDesc[opCode].operands.getNum() && numOperands > 0; ++op) {
+ out << " ";
+ OperandClass operandClass = InstructionDesc[opCode].operands.getClass(op);
+ switch (operandClass) {
+ case OperandId:
+ case OperandScope:
+ case OperandMemorySemantics:
+ disassembleIds(1);
+ --numOperands;
+ // Get names for printing "(XXX)" for readability, *after* this id
+ if (opCode == OpName)
+ idDescriptor[stream[word - 1]] = (const char*)(&stream[word]);
+ break;
+ case OperandVariableIds:
+ disassembleIds(numOperands);
+ return;
+ case OperandImageOperands:
+ outputMask(OperandImageOperands, stream[word++]);
+ --numOperands;
+ disassembleIds(numOperands);
+ return;
+ case OperandOptionalLiteral:
+ case OperandVariableLiterals:
+ if ((opCode == OpDecorate && stream[word - 1] == DecorationBuiltIn) ||
+ (opCode == OpMemberDecorate && stream[word - 1] == DecorationBuiltIn)) {
+ out << BuiltInString(stream[word++]);
+ --numOperands;
+ ++op;
+ }
+ disassembleImmediates(numOperands);
+ return;
+ case OperandVariableIdLiteral:
+ while (numOperands > 0) {
+ out << std::endl;
+ outputResultId(0);
+ outputTypeId(0);
+ outputIndent();
+ out << " Type ";
+ disassembleIds(1);
+ out << ", member ";
+ disassembleImmediates(1);
+ numOperands -= 2;
+ }
+ return;
+ case OperandVariableLiteralId:
+ while (numOperands > 0) {
+ out << std::endl;
+ outputResultId(0);
+ outputTypeId(0);
+ outputIndent();
+ out << " case ";
+ disassembleImmediates(1);
+ out << ": ";
+ disassembleIds(1);
+ numOperands -= 2;
+ }
+ return;
+ case OperandLiteralNumber:
+ disassembleImmediates(1);
+ --numOperands;
+ if (opCode == OpExtInst) {
+ ExtInstSet extInstSet = GLSL450Inst;
+ const char* name = idDescriptor[stream[word - 2]].c_str();
+ if (0 == memcmp("OpenCL", name, 6)) {
+ extInstSet = OpenCLExtInst;
+#ifdef AMD_EXTENSIONS
+ } else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
+ strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
+ strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
+ strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
+ extInstSet = GLSLextAMDInst;
+#endif
+#ifdef NV_EXTENSIONS
+ }else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
+ strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
+ strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
+ strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
+ strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
+ strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) {
+ extInstSet = GLSLextNVInst;
+#endif
+ }
+ unsigned entrypoint = stream[word - 1];
+ if (extInstSet == GLSL450Inst) {
+ if (entrypoint < GLSLstd450Count) {
+ out << "(" << GlslStd450DebugNames[entrypoint] << ")";
+ }
+#ifdef AMD_EXTENSIONS
+ } else if (extInstSet == GLSLextAMDInst) {
+ out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
+#endif
+#ifdef NV_EXTENSIONS
+ }
+ else if (extInstSet == GLSLextNVInst) {
+ out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
+#endif
+ }
+ }
+ break;
+ case OperandOptionalLiteralString:
+ case OperandLiteralString:
+ numOperands -= disassembleString();
+ break;
+ case OperandMemoryAccess:
+ outputMask(OperandMemoryAccess, stream[word++]);
+ --numOperands;
+ // Aligned is the only memory access operand that uses an immediate
+ // value, and it is also the first operand that uses a value at all.
+ if (stream[word-1] & MemoryAccessAlignedMask) {
+ disassembleImmediates(1);
+ numOperands--;
+ if (numOperands)
+ out << " ";
+ }
+ disassembleIds(numOperands);
+ return;
+ default:
+ assert(operandClass >= OperandSource && operandClass < OperandOpcode);
+
+ if (OperandClassParams[operandClass].bitmask)
+ outputMask(operandClass, stream[word++]);
+ else
+ out << OperandClassParams[operandClass].getName(stream[word++]);
+ --numOperands;
+
+ break;
+ }
+ }
+
+ return;
+}
+
+static void GLSLstd450GetDebugNames(const char** names)
+{
+ for (int i = 0; i < GLSLstd450Count; ++i)
+ names[i] = "Unknown";
+
+ names[GLSLstd450Round] = "Round";
+ names[GLSLstd450RoundEven] = "RoundEven";
+ names[GLSLstd450Trunc] = "Trunc";
+ names[GLSLstd450FAbs] = "FAbs";
+ names[GLSLstd450SAbs] = "SAbs";
+ names[GLSLstd450FSign] = "FSign";
+ names[GLSLstd450SSign] = "SSign";
+ names[GLSLstd450Floor] = "Floor";
+ names[GLSLstd450Ceil] = "Ceil";
+ names[GLSLstd450Fract] = "Fract";
+ names[GLSLstd450Radians] = "Radians";
+ names[GLSLstd450Degrees] = "Degrees";
+ names[GLSLstd450Sin] = "Sin";
+ names[GLSLstd450Cos] = "Cos";
+ names[GLSLstd450Tan] = "Tan";
+ names[GLSLstd450Asin] = "Asin";
+ names[GLSLstd450Acos] = "Acos";
+ names[GLSLstd450Atan] = "Atan";
+ names[GLSLstd450Sinh] = "Sinh";
+ names[GLSLstd450Cosh] = "Cosh";
+ names[GLSLstd450Tanh] = "Tanh";
+ names[GLSLstd450Asinh] = "Asinh";
+ names[GLSLstd450Acosh] = "Acosh";
+ names[GLSLstd450Atanh] = "Atanh";
+ names[GLSLstd450Atan2] = "Atan2";
+ names[GLSLstd450Pow] = "Pow";
+ names[GLSLstd450Exp] = "Exp";
+ names[GLSLstd450Log] = "Log";
+ names[GLSLstd450Exp2] = "Exp2";
+ names[GLSLstd450Log2] = "Log2";
+ names[GLSLstd450Sqrt] = "Sqrt";
+ names[GLSLstd450InverseSqrt] = "InverseSqrt";
+ names[GLSLstd450Determinant] = "Determinant";
+ names[GLSLstd450MatrixInverse] = "MatrixInverse";
+ names[GLSLstd450Modf] = "Modf";
+ names[GLSLstd450ModfStruct] = "ModfStruct";
+ names[GLSLstd450FMin] = "FMin";
+ names[GLSLstd450SMin] = "SMin";
+ names[GLSLstd450UMin] = "UMin";
+ names[GLSLstd450FMax] = "FMax";
+ names[GLSLstd450SMax] = "SMax";
+ names[GLSLstd450UMax] = "UMax";
+ names[GLSLstd450FClamp] = "FClamp";
+ names[GLSLstd450SClamp] = "SClamp";
+ names[GLSLstd450UClamp] = "UClamp";
+ names[GLSLstd450FMix] = "FMix";
+ names[GLSLstd450Step] = "Step";
+ names[GLSLstd450SmoothStep] = "SmoothStep";
+ names[GLSLstd450Fma] = "Fma";
+ names[GLSLstd450Frexp] = "Frexp";
+ names[GLSLstd450FrexpStruct] = "FrexpStruct";
+ names[GLSLstd450Ldexp] = "Ldexp";
+ names[GLSLstd450PackSnorm4x8] = "PackSnorm4x8";
+ names[GLSLstd450PackUnorm4x8] = "PackUnorm4x8";
+ names[GLSLstd450PackSnorm2x16] = "PackSnorm2x16";
+ names[GLSLstd450PackUnorm2x16] = "PackUnorm2x16";
+ names[GLSLstd450PackHalf2x16] = "PackHalf2x16";
+ names[GLSLstd450PackDouble2x32] = "PackDouble2x32";
+ names[GLSLstd450UnpackSnorm2x16] = "UnpackSnorm2x16";
+ names[GLSLstd450UnpackUnorm2x16] = "UnpackUnorm2x16";
+ names[GLSLstd450UnpackHalf2x16] = "UnpackHalf2x16";
+ names[GLSLstd450UnpackSnorm4x8] = "UnpackSnorm4x8";
+ names[GLSLstd450UnpackUnorm4x8] = "UnpackUnorm4x8";
+ names[GLSLstd450UnpackDouble2x32] = "UnpackDouble2x32";
+ names[GLSLstd450Length] = "Length";
+ names[GLSLstd450Distance] = "Distance";
+ names[GLSLstd450Cross] = "Cross";
+ names[GLSLstd450Normalize] = "Normalize";
+ names[GLSLstd450FaceForward] = "FaceForward";
+ names[GLSLstd450Reflect] = "Reflect";
+ names[GLSLstd450Refract] = "Refract";
+ names[GLSLstd450FindILsb] = "FindILsb";
+ names[GLSLstd450FindSMsb] = "FindSMsb";
+ names[GLSLstd450FindUMsb] = "FindUMsb";
+ names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid";
+ names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample";
+ names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
+}
+
+#ifdef AMD_EXTENSIONS
+static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
+{
+ if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
+ switch (entrypoint) {
+ case SwizzleInvocationsAMD: return "SwizzleInvocationsAMD";
+ case SwizzleInvocationsMaskedAMD: return "SwizzleInvocationsMaskedAMD";
+ case WriteInvocationAMD: return "WriteInvocationAMD";
+ case MbcntAMD: return "MbcntAMD";
+ default: return "Bad";
+ }
+ } else if (strcmp(name, spv::E_SPV_AMD_shader_trinary_minmax) == 0) {
+ switch (entrypoint) {
+ case FMin3AMD: return "FMin3AMD";
+ case UMin3AMD: return "UMin3AMD";
+ case SMin3AMD: return "SMin3AMD";
+ case FMax3AMD: return "FMax3AMD";
+ case UMax3AMD: return "UMax3AMD";
+ case SMax3AMD: return "SMax3AMD";
+ case FMid3AMD: return "FMid3AMD";
+ case UMid3AMD: return "UMid3AMD";
+ case SMid3AMD: return "SMid3AMD";
+ default: return "Bad";
+ }
+ } else if (strcmp(name, spv::E_SPV_AMD_shader_explicit_vertex_parameter) == 0) {
+ switch (entrypoint) {
+ case InterpolateAtVertexAMD: return "InterpolateAtVertexAMD";
+ default: return "Bad";
+ }
+ }
+ else if (strcmp(name, spv::E_SPV_AMD_gcn_shader) == 0) {
+ switch (entrypoint) {
+ case CubeFaceIndexAMD: return "CubeFaceIndexAMD";
+ case CubeFaceCoordAMD: return "CubeFaceCoordAMD";
+ case TimeAMD: return "TimeAMD";
+ default:
+ break;
+ }
+ }
+
+ return "Bad";
+}
+#endif
+
+#ifdef NV_EXTENSIONS
+static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
+{
+ if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
+ strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
+ strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
+ strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
+ strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
+ strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
+ strcmp(name, spv::E_SPV_NV_mesh_shader) == 0) {
+ switch (entrypoint) {
+ // NV builtins
+ case BuiltInViewportMaskNV: return "ViewportMaskNV";
+ case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
+ case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
+ case BuiltInPositionPerViewNV: return "PositionPerViewNV";
+ case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
+ case BuiltInBaryCoordNV: return "BaryCoordNV";
+ case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
+ case BuiltInTaskCountNV: return "TaskCountNV";
+ case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
+ case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
+ case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV";
+ case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV";
+ case BuiltInLayerPerViewNV: return "LayerPerViewNV";
+ case BuiltInMeshViewCountNV: return "MeshViewCountNV";
+ case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
+
+ // NV Capabilities
+ case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
+ case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
+ case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
+ case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
+ case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
+ case CapabilityMeshShadingNV: return "MeshShadingNV";
+
+ // NV Decorations
+ case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
+ case DecorationPassthroughNV: return "PassthroughNV";
+ case DecorationViewportRelativeNV: return "ViewportRelativeNV";
+ case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
+ case DecorationPerVertexNV: return "PerVertexNV";
+ case DecorationPerPrimitiveNV: return "PerPrimitiveNV";
+ case DecorationPerViewNV: return "PerViewNV";
+ case DecorationPerTaskNV: return "PerTaskNV";
+
+ default: return "Bad";
+ }
+ }
+ return "Bad";
+}
+#endif
+
+void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
+{
+ SpirvStream SpirvStream(out, stream);
+ spv::Parameterize();
+ GLSLstd450GetDebugNames(GlslStd450DebugNames);
+ SpirvStream.validate();
+ SpirvStream.processInstructions();
+}
+
+}; // end namespace spv
diff --git a/thirdparty/glslang/SPIRV/disassemble.h b/thirdparty/glslang/SPIRV/disassemble.h
new file mode 100644
index 0000000000..b6a4635775
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/disassemble.h
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Disassembler for SPIR-V.
+//
+
+#pragma once
+#ifndef disassembler_H
+#define disassembler_H
+
+#include <iostream>
+#include <vector>
+
+namespace spv {
+
+ // disassemble with glslang custom disassembler
+ void Disassemble(std::ostream& out, const std::vector<unsigned int>&);
+
+} // end namespace spv
+
+#endif // disassembler_H
diff --git a/thirdparty/glslang/SPIRV/doc.cpp b/thirdparty/glslang/SPIRV/doc.cpp
new file mode 100644
index 0000000000..3b85767216
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/doc.cpp
@@ -0,0 +1,2767 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// 1) Programmatically fill in instruction/operand information.
+// This can be used for disassembly, printing documentation, etc.
+//
+// 2) Print documentation from this parameterization.
+//
+
+#include "doc.h"
+
+#include <cstdio>
+#include <cstring>
+#include <algorithm>
+
+namespace spv {
+ extern "C" {
+ // Include C-based headers that don't have a namespace
+ #include "GLSL.ext.KHR.h"
+ #include "GLSL.ext.EXT.h"
+#ifdef AMD_EXTENSIONS
+ #include "GLSL.ext.AMD.h"
+#endif
+#ifdef NV_EXTENSIONS
+ #include "GLSL.ext.NV.h"
+#endif
+ }
+}
+
+namespace spv {
+
+//
+// Whole set of functions that translate enumerants to their text strings for
+// the specification (or their sanitized versions for auto-generating the
+// spirv headers.
+//
+// Also, for masks the ceilings are declared next to these, to help keep them in sync.
+// Ceilings should be
+// - one more than the maximum value an enumerant takes on, for non-mask enumerants
+// (for non-sparse enums, this is the number of enumerants)
+// - the number of bits consumed by the set of masks
+// (for non-sparse mask enums, this is the number of enumerants)
+//
+
+const char* SourceString(int source)
+{
+ switch (source) {
+ case 0: return "Unknown";
+ case 1: return "ESSL";
+ case 2: return "GLSL";
+ case 3: return "OpenCL_C";
+ case 4: return "OpenCL_CPP";
+ case 5: return "HLSL";
+
+ default: return "Bad";
+ }
+}
+
+const char* ExecutionModelString(int model)
+{
+ switch (model) {
+ case 0: return "Vertex";
+ case 1: return "TessellationControl";
+ case 2: return "TessellationEvaluation";
+ case 3: return "Geometry";
+ case 4: return "Fragment";
+ case 5: return "GLCompute";
+ case 6: return "Kernel";
+#ifdef NV_EXTENSIONS
+ case ExecutionModelTaskNV: return "TaskNV";
+ case ExecutionModelMeshNV: return "MeshNV";
+#endif
+
+ default: return "Bad";
+
+#ifdef NV_EXTENSIONS
+ case ExecutionModelRayGenerationNV: return "RayGenerationNV";
+ case ExecutionModelIntersectionNV: return "IntersectionNV";
+ case ExecutionModelAnyHitNV: return "AnyHitNV";
+ case ExecutionModelClosestHitNV: return "ClosestHitNV";
+ case ExecutionModelMissNV: return "MissNV";
+ case ExecutionModelCallableNV: return "CallableNV";
+#endif
+
+ }
+}
+
+const char* AddressingString(int addr)
+{
+ switch (addr) {
+ case 0: return "Logical";
+ case 1: return "Physical32";
+ case 2: return "Physical64";
+
+ case AddressingModelPhysicalStorageBuffer64EXT: return "PhysicalStorageBuffer64EXT";
+
+ default: return "Bad";
+ }
+}
+
+const char* MemoryString(int mem)
+{
+ switch (mem) {
+ case MemoryModelSimple: return "Simple";
+ case MemoryModelGLSL450: return "GLSL450";
+ case MemoryModelOpenCL: return "OpenCL";
+ case MemoryModelVulkanKHR: return "VulkanKHR";
+
+ default: return "Bad";
+ }
+}
+
+const int ExecutionModeCeiling = 33;
+
+const char* ExecutionModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "Invocations";
+ case 1: return "SpacingEqual";
+ case 2: return "SpacingFractionalEven";
+ case 3: return "SpacingFractionalOdd";
+ case 4: return "VertexOrderCw";
+ case 5: return "VertexOrderCcw";
+ case 6: return "PixelCenterInteger";
+ case 7: return "OriginUpperLeft";
+ case 8: return "OriginLowerLeft";
+ case 9: return "EarlyFragmentTests";
+ case 10: return "PointMode";
+ case 11: return "Xfb";
+ case 12: return "DepthReplacing";
+ case 13: return "Bad";
+ case 14: return "DepthGreater";
+ case 15: return "DepthLess";
+ case 16: return "DepthUnchanged";
+ case 17: return "LocalSize";
+ case 18: return "LocalSizeHint";
+ case 19: return "InputPoints";
+ case 20: return "InputLines";
+ case 21: return "InputLinesAdjacency";
+ case 22: return "Triangles";
+ case 23: return "InputTrianglesAdjacency";
+ case 24: return "Quads";
+ case 25: return "Isolines";
+ case 26: return "OutputVertices";
+ case 27: return "OutputPoints";
+ case 28: return "OutputLineStrip";
+ case 29: return "OutputTriangleStrip";
+ case 30: return "VecTypeHint";
+ case 31: return "ContractionOff";
+ case 32: return "Bad";
+
+ case 4446: return "PostDepthCoverage";
+
+#ifdef NV_EXTENSIONS
+ case ExecutionModeOutputLinesNV: return "OutputLinesNV";
+ case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV";
+ case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV";
+ case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV";
+ case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV";
+#endif
+
+ case ExecutionModeCeiling:
+ default: return "Bad";
+ }
+}
+
+const char* StorageClassString(int StorageClass)
+{
+ switch (StorageClass) {
+ case 0: return "UniformConstant";
+ case 1: return "Input";
+ case 2: return "Uniform";
+ case 3: return "Output";
+ case 4: return "Workgroup";
+ case 5: return "CrossWorkgroup";
+ case 6: return "Private";
+ case 7: return "Function";
+ case 8: return "Generic";
+ case 9: return "PushConstant";
+ case 10: return "AtomicCounter";
+ case 11: return "Image";
+ case 12: return "StorageBuffer";
+
+#ifdef NV_EXTENSIONS
+ case StorageClassRayPayloadNV: return "RayPayloadNV";
+ case StorageClassHitAttributeNV: return "HitAttributeNV";
+ case StorageClassIncomingRayPayloadNV: return "IncomingRayPayloadNV";
+ case StorageClassShaderRecordBufferNV: return "ShaderRecordBufferNV";
+ case StorageClassCallableDataNV: return "CallableDataNV";
+ case StorageClassIncomingCallableDataNV: return "IncomingCallableDataNV";
+#endif
+
+ case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT";
+
+ default: return "Bad";
+ }
+}
+
+const int DecorationCeiling = 45;
+
+const char* DecorationString(int decoration)
+{
+ switch (decoration) {
+ case 0: return "RelaxedPrecision";
+ case 1: return "SpecId";
+ case 2: return "Block";
+ case 3: return "BufferBlock";
+ case 4: return "RowMajor";
+ case 5: return "ColMajor";
+ case 6: return "ArrayStride";
+ case 7: return "MatrixStride";
+ case 8: return "GLSLShared";
+ case 9: return "GLSLPacked";
+ case 10: return "CPacked";
+ case 11: return "BuiltIn";
+ case 12: return "Bad";
+ case 13: return "NoPerspective";
+ case 14: return "Flat";
+ case 15: return "Patch";
+ case 16: return "Centroid";
+ case 17: return "Sample";
+ case 18: return "Invariant";
+ case 19: return "Restrict";
+ case 20: return "Aliased";
+ case 21: return "Volatile";
+ case 22: return "Constant";
+ case 23: return "Coherent";
+ case 24: return "NonWritable";
+ case 25: return "NonReadable";
+ case 26: return "Uniform";
+ case 27: return "Bad";
+ case 28: return "SaturatedConversion";
+ case 29: return "Stream";
+ case 30: return "Location";
+ case 31: return "Component";
+ case 32: return "Index";
+ case 33: return "Binding";
+ case 34: return "DescriptorSet";
+ case 35: return "Offset";
+ case 36: return "XfbBuffer";
+ case 37: return "XfbStride";
+ case 38: return "FuncParamAttr";
+ case 39: return "FP Rounding Mode";
+ case 40: return "FP Fast Math Mode";
+ case 41: return "Linkage Attributes";
+ case 42: return "NoContraction";
+ case 43: return "InputAttachmentIndex";
+ case 44: return "Alignment";
+
+ case DecorationCeiling:
+ default: return "Bad";
+
+#ifdef AMD_EXTENSIONS
+ case DecorationExplicitInterpAMD: return "ExplicitInterpAMD";
+#endif
+#ifdef NV_EXTENSIONS
+ case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
+ case DecorationPassthroughNV: return "PassthroughNV";
+ case DecorationViewportRelativeNV: return "ViewportRelativeNV";
+ case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
+ case DecorationPerPrimitiveNV: return "PerPrimitiveNV";
+ case DecorationPerViewNV: return "PerViewNV";
+ case DecorationPerTaskNV: return "PerTaskNV";
+ case DecorationPerVertexNV: return "PerVertexNV";
+#endif
+
+ case DecorationNonUniformEXT: return "DecorationNonUniformEXT";
+ case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE";
+ case DecorationHlslSemanticGOOGLE: return "DecorationHlslSemanticGOOGLE";
+ case DecorationRestrictPointerEXT: return "DecorationRestrictPointerEXT";
+ case DecorationAliasedPointerEXT: return "DecorationAliasedPointerEXT";
+ }
+}
+
+const char* BuiltInString(int builtIn)
+{
+ switch (builtIn) {
+ case 0: return "Position";
+ case 1: return "PointSize";
+ case 2: return "Bad";
+ case 3: return "ClipDistance";
+ case 4: return "CullDistance";
+ case 5: return "VertexId";
+ case 6: return "InstanceId";
+ case 7: return "PrimitiveId";
+ case 8: return "InvocationId";
+ case 9: return "Layer";
+ case 10: return "ViewportIndex";
+ case 11: return "TessLevelOuter";
+ case 12: return "TessLevelInner";
+ case 13: return "TessCoord";
+ case 14: return "PatchVertices";
+ case 15: return "FragCoord";
+ case 16: return "PointCoord";
+ case 17: return "FrontFacing";
+ case 18: return "SampleId";
+ case 19: return "SamplePosition";
+ case 20: return "SampleMask";
+ case 21: return "Bad";
+ case 22: return "FragDepth";
+ case 23: return "HelperInvocation";
+ case 24: return "NumWorkgroups";
+ case 25: return "WorkgroupSize";
+ case 26: return "WorkgroupId";
+ case 27: return "LocalInvocationId";
+ case 28: return "GlobalInvocationId";
+ case 29: return "LocalInvocationIndex";
+ case 30: return "WorkDim";
+ case 31: return "GlobalSize";
+ case 32: return "EnqueuedWorkgroupSize";
+ case 33: return "GlobalOffset";
+ case 34: return "GlobalLinearId";
+ case 35: return "Bad";
+ case 36: return "SubgroupSize";
+ case 37: return "SubgroupMaxSize";
+ case 38: return "NumSubgroups";
+ case 39: return "NumEnqueuedSubgroups";
+ case 40: return "SubgroupId";
+ case 41: return "SubgroupLocalInvocationId";
+ case 42: return "VertexIndex"; // TBD: put next to VertexId?
+ case 43: return "InstanceIndex"; // TBD: put next to InstanceId?
+
+ case 4416: return "SubgroupEqMaskKHR";
+ case 4417: return "SubgroupGeMaskKHR";
+ case 4418: return "SubgroupGtMaskKHR";
+ case 4419: return "SubgroupLeMaskKHR";
+ case 4420: return "SubgroupLtMaskKHR";
+ case 4438: return "DeviceIndex";
+ case 4440: return "ViewIndex";
+ case 4424: return "BaseVertex";
+ case 4425: return "BaseInstance";
+ case 4426: return "DrawIndex";
+ case 5014: return "FragStencilRefEXT";
+
+#ifdef AMD_EXTENSIONS
+ case 4992: return "BaryCoordNoPerspAMD";
+ case 4993: return "BaryCoordNoPerspCentroidAMD";
+ case 4994: return "BaryCoordNoPerspSampleAMD";
+ case 4995: return "BaryCoordSmoothAMD";
+ case 4996: return "BaryCoordSmoothCentroidAMD";
+ case 4997: return "BaryCoordSmoothSampleAMD";
+ case 4998: return "BaryCoordPullModelAMD";
+#endif
+
+#ifdef NV_EXTENSIONS
+ case BuiltInLaunchIdNV: return "LaunchIdNV";
+ case BuiltInLaunchSizeNV: return "LaunchSizeNV";
+ case BuiltInWorldRayOriginNV: return "WorldRayOriginNV";
+ case BuiltInWorldRayDirectionNV: return "WorldRayDirectionNV";
+ case BuiltInObjectRayOriginNV: return "ObjectRayOriginNV";
+ case BuiltInObjectRayDirectionNV: return "ObjectRayDirectionNV";
+ case BuiltInRayTminNV: return "RayTminNV";
+ case BuiltInRayTmaxNV: return "RayTmaxNV";
+ case BuiltInInstanceCustomIndexNV: return "InstanceCustomIndexNV";
+ case BuiltInObjectToWorldNV: return "ObjectToWorldNV";
+ case BuiltInWorldToObjectNV: return "WorldToObjectNV";
+ case BuiltInHitTNV: return "HitTNV";
+ case BuiltInHitKindNV: return "HitKindNV";
+ case BuiltInIncomingRayFlagsNV: return "IncomingRayFlagsNV";
+ case BuiltInViewportMaskNV: return "ViewportMaskNV";
+ case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
+ case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
+ case BuiltInPositionPerViewNV: return "PositionPerViewNV";
+ case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
+// case BuiltInFragmentSizeNV: return "FragmentSizeNV"; // superseded by BuiltInFragSizeEXT
+// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT
+ case BuiltInBaryCoordNV: return "BaryCoordNV";
+ case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
+#endif
+
+ case BuiltInFragSizeEXT: return "FragSizeEXT";
+ case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT";
+
+ case 5264: return "FullyCoveredEXT";
+
+
+#ifdef NV_EXTENSIONS
+ case BuiltInTaskCountNV: return "TaskCountNV";
+ case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
+ case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
+ case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV";
+ case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV";
+ case BuiltInLayerPerViewNV: return "LayerPerViewNV";
+ case BuiltInMeshViewCountNV: return "MeshViewCountNV";
+ case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
+#endif
+
+ default: return "Bad";
+ }
+}
+
+const char* DimensionString(int dim)
+{
+ switch (dim) {
+ case 0: return "1D";
+ case 1: return "2D";
+ case 2: return "3D";
+ case 3: return "Cube";
+ case 4: return "Rect";
+ case 5: return "Buffer";
+ case 6: return "SubpassData";
+
+ default: return "Bad";
+ }
+}
+
+const char* SamplerAddressingModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "None";
+ case 1: return "ClampToEdge";
+ case 2: return "Clamp";
+ case 3: return "Repeat";
+ case 4: return "RepeatMirrored";
+
+ default: return "Bad";
+ }
+}
+
+const char* SamplerFilterModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "Nearest";
+ case 1: return "Linear";
+
+ default: return "Bad";
+ }
+}
+
+const char* ImageFormatString(int format)
+{
+ switch (format) {
+ case 0: return "Unknown";
+
+ // ES/Desktop float
+ case 1: return "Rgba32f";
+ case 2: return "Rgba16f";
+ case 3: return "R32f";
+ case 4: return "Rgba8";
+ case 5: return "Rgba8Snorm";
+
+ // Desktop float
+ case 6: return "Rg32f";
+ case 7: return "Rg16f";
+ case 8: return "R11fG11fB10f";
+ case 9: return "R16f";
+ case 10: return "Rgba16";
+ case 11: return "Rgb10A2";
+ case 12: return "Rg16";
+ case 13: return "Rg8";
+ case 14: return "R16";
+ case 15: return "R8";
+ case 16: return "Rgba16Snorm";
+ case 17: return "Rg16Snorm";
+ case 18: return "Rg8Snorm";
+ case 19: return "R16Snorm";
+ case 20: return "R8Snorm";
+
+ // ES/Desktop int
+ case 21: return "Rgba32i";
+ case 22: return "Rgba16i";
+ case 23: return "Rgba8i";
+ case 24: return "R32i";
+
+ // Desktop int
+ case 25: return "Rg32i";
+ case 26: return "Rg16i";
+ case 27: return "Rg8i";
+ case 28: return "R16i";
+ case 29: return "R8i";
+
+ // ES/Desktop uint
+ case 30: return "Rgba32ui";
+ case 31: return "Rgba16ui";
+ case 32: return "Rgba8ui";
+ case 33: return "R32ui";
+
+ // Desktop uint
+ case 34: return "Rgb10a2ui";
+ case 35: return "Rg32ui";
+ case 36: return "Rg16ui";
+ case 37: return "Rg8ui";
+ case 38: return "R16ui";
+ case 39: return "R8ui";
+
+ default:
+ return "Bad";
+ }
+}
+
+const char* ImageChannelOrderString(int format)
+{
+ switch (format) {
+ case 0: return "R";
+ case 1: return "A";
+ case 2: return "RG";
+ case 3: return "RA";
+ case 4: return "RGB";
+ case 5: return "RGBA";
+ case 6: return "BGRA";
+ case 7: return "ARGB";
+ case 8: return "Intensity";
+ case 9: return "Luminance";
+ case 10: return "Rx";
+ case 11: return "RGx";
+ case 12: return "RGBx";
+ case 13: return "Depth";
+ case 14: return "DepthStencil";
+ case 15: return "sRGB";
+ case 16: return "sRGBx";
+ case 17: return "sRGBA";
+ case 18: return "sBGRA";
+
+ default:
+ return "Bad";
+ }
+}
+
+const char* ImageChannelDataTypeString(int type)
+{
+ switch (type)
+ {
+ case 0: return "SnormInt8";
+ case 1: return "SnormInt16";
+ case 2: return "UnormInt8";
+ case 3: return "UnormInt16";
+ case 4: return "UnormShort565";
+ case 5: return "UnormShort555";
+ case 6: return "UnormInt101010";
+ case 7: return "SignedInt8";
+ case 8: return "SignedInt16";
+ case 9: return "SignedInt32";
+ case 10: return "UnsignedInt8";
+ case 11: return "UnsignedInt16";
+ case 12: return "UnsignedInt32";
+ case 13: return "HalfFloat";
+ case 14: return "Float";
+ case 15: return "UnormInt24";
+ case 16: return "UnormInt101010_2";
+
+ default:
+ return "Bad";
+ }
+}
+
+const int ImageOperandsCeiling = 14;
+
+const char* ImageOperandsString(int format)
+{
+ switch (format) {
+ case ImageOperandsBiasShift: return "Bias";
+ case ImageOperandsLodShift: return "Lod";
+ case ImageOperandsGradShift: return "Grad";
+ case ImageOperandsConstOffsetShift: return "ConstOffset";
+ case ImageOperandsOffsetShift: return "Offset";
+ case ImageOperandsConstOffsetsShift: return "ConstOffsets";
+ case ImageOperandsSampleShift: return "Sample";
+ case ImageOperandsMinLodShift: return "MinLod";
+ case ImageOperandsMakeTexelAvailableKHRShift: return "MakeTexelAvailableKHR";
+ case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR";
+ case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR";
+ case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR";
+ case ImageOperandsSignExtendShift: return "SignExtend";
+ case ImageOperandsZeroExtendShift: return "ZeroExtend";
+
+ case ImageOperandsCeiling:
+ default:
+ return "Bad";
+ }
+}
+
+const char* FPFastMathString(int mode)
+{
+ switch (mode) {
+ case 0: return "NotNaN";
+ case 1: return "NotInf";
+ case 2: return "NSZ";
+ case 3: return "AllowRecip";
+ case 4: return "Fast";
+
+ default: return "Bad";
+ }
+}
+
+const char* FPRoundingModeString(int mode)
+{
+ switch (mode) {
+ case 0: return "RTE";
+ case 1: return "RTZ";
+ case 2: return "RTP";
+ case 3: return "RTN";
+
+ default: return "Bad";
+ }
+}
+
+const char* LinkageTypeString(int type)
+{
+ switch (type) {
+ case 0: return "Export";
+ case 1: return "Import";
+
+ default: return "Bad";
+ }
+}
+
+const char* FuncParamAttrString(int attr)
+{
+ switch (attr) {
+ case 0: return "Zext";
+ case 1: return "Sext";
+ case 2: return "ByVal";
+ case 3: return "Sret";
+ case 4: return "NoAlias";
+ case 5: return "NoCapture";
+ case 6: return "NoWrite";
+ case 7: return "NoReadWrite";
+
+ default: return "Bad";
+ }
+}
+
+const char* AccessQualifierString(int attr)
+{
+ switch (attr) {
+ case 0: return "ReadOnly";
+ case 1: return "WriteOnly";
+ case 2: return "ReadWrite";
+
+ default: return "Bad";
+ }
+}
+
+const int SelectControlCeiling = 2;
+
+const char* SelectControlString(int cont)
+{
+ switch (cont) {
+ case 0: return "Flatten";
+ case 1: return "DontFlatten";
+
+ case SelectControlCeiling:
+ default: return "Bad";
+ }
+}
+
+const int LoopControlCeiling = LoopControlPartialCountShift + 1;
+
+const char* LoopControlString(int cont)
+{
+ switch (cont) {
+ case LoopControlUnrollShift: return "Unroll";
+ case LoopControlDontUnrollShift: return "DontUnroll";
+ case LoopControlDependencyInfiniteShift: return "DependencyInfinite";
+ case LoopControlDependencyLengthShift: return "DependencyLength";
+ case LoopControlMinIterationsShift: return "MinIterations";
+ case LoopControlMaxIterationsShift: return "MaxIterations";
+ case LoopControlIterationMultipleShift: return "IterationMultiple";
+ case LoopControlPeelCountShift: return "PeelCount";
+ case LoopControlPartialCountShift: return "PartialCount";
+
+ case LoopControlCeiling:
+ default: return "Bad";
+ }
+}
+
+const int FunctionControlCeiling = 4;
+
+const char* FunctionControlString(int cont)
+{
+ switch (cont) {
+ case 0: return "Inline";
+ case 1: return "DontInline";
+ case 2: return "Pure";
+ case 3: return "Const";
+
+ case FunctionControlCeiling:
+ default: return "Bad";
+ }
+}
+
+const char* MemorySemanticsString(int mem)
+{
+ // Note: No bits set (None) means "Relaxed"
+ switch (mem) {
+ case 0: return "Bad"; // Note: this is a placeholder for 'Consume'
+ case 1: return "Acquire";
+ case 2: return "Release";
+ case 3: return "AcquireRelease";
+ case 4: return "SequentiallyConsistent";
+ case 5: return "Bad"; // Note: reserved for future expansion
+ case 6: return "UniformMemory";
+ case 7: return "SubgroupMemory";
+ case 8: return "WorkgroupMemory";
+ case 9: return "CrossWorkgroupMemory";
+ case 10: return "AtomicCounterMemory";
+ case 11: return "ImageMemory";
+
+ default: return "Bad";
+ }
+}
+
+const int MemoryAccessCeiling = 6;
+
+const char* MemoryAccessString(int mem)
+{
+ switch (mem) {
+ case MemoryAccessVolatileShift: return "Volatile";
+ case MemoryAccessAlignedShift: return "Aligned";
+ case MemoryAccessNontemporalShift: return "Nontemporal";
+ case MemoryAccessMakePointerAvailableKHRShift: return "MakePointerAvailableKHR";
+ case MemoryAccessMakePointerVisibleKHRShift: return "MakePointerVisibleKHR";
+ case MemoryAccessNonPrivatePointerKHRShift: return "NonPrivatePointerKHR";
+
+ default: return "Bad";
+ }
+}
+
+const char* ScopeString(int mem)
+{
+ switch (mem) {
+ case 0: return "CrossDevice";
+ case 1: return "Device";
+ case 2: return "Workgroup";
+ case 3: return "Subgroup";
+ case 4: return "Invocation";
+
+ default: return "Bad";
+ }
+}
+
+const char* GroupOperationString(int gop)
+{
+
+ switch (gop)
+ {
+ case GroupOperationReduce: return "Reduce";
+ case GroupOperationInclusiveScan: return "InclusiveScan";
+ case GroupOperationExclusiveScan: return "ExclusiveScan";
+ case GroupOperationClusteredReduce: return "ClusteredReduce";
+#ifdef NV_EXTENSIONS
+ case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV";
+ case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV";
+ case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV";
+#endif
+
+ default: return "Bad";
+ }
+}
+
+const char* KernelEnqueueFlagsString(int flag)
+{
+ switch (flag)
+ {
+ case 0: return "NoWait";
+ case 1: return "WaitKernel";
+ case 2: return "WaitWorkGroup";
+
+ default: return "Bad";
+ }
+}
+
+const char* KernelProfilingInfoString(int info)
+{
+ switch (info)
+ {
+ case 0: return "CmdExecTime";
+
+ default: return "Bad";
+ }
+}
+
+const char* CapabilityString(int info)
+{
+ switch (info)
+ {
+ case 0: return "Matrix";
+ case 1: return "Shader";
+ case 2: return "Geometry";
+ case 3: return "Tessellation";
+ case 4: return "Addresses";
+ case 5: return "Linkage";
+ case 6: return "Kernel";
+ case 7: return "Vector16";
+ case 8: return "Float16Buffer";
+ case 9: return "Float16";
+ case 10: return "Float64";
+ case 11: return "Int64";
+ case 12: return "Int64Atomics";
+ case 13: return "ImageBasic";
+ case 14: return "ImageReadWrite";
+ case 15: return "ImageMipmap";
+ case 16: return "Bad";
+ case 17: return "Pipes";
+ case 18: return "Groups";
+ case 19: return "DeviceEnqueue";
+ case 20: return "LiteralSampler";
+ case 21: return "AtomicStorage";
+ case 22: return "Int16";
+ case 23: return "TessellationPointSize";
+ case 24: return "GeometryPointSize";
+ case 25: return "ImageGatherExtended";
+ case 26: return "Bad";
+ case 27: return "StorageImageMultisample";
+ case 28: return "UniformBufferArrayDynamicIndexing";
+ case 29: return "SampledImageArrayDynamicIndexing";
+ case 30: return "StorageBufferArrayDynamicIndexing";
+ case 31: return "StorageImageArrayDynamicIndexing";
+ case 32: return "ClipDistance";
+ case 33: return "CullDistance";
+ case 34: return "ImageCubeArray";
+ case 35: return "SampleRateShading";
+ case 36: return "ImageRect";
+ case 37: return "SampledRect";
+ case 38: return "GenericPointer";
+ case 39: return "Int8";
+ case 40: return "InputAttachment";
+ case 41: return "SparseResidency";
+ case 42: return "MinLod";
+ case 43: return "Sampled1D";
+ case 44: return "Image1D";
+ case 45: return "SampledCubeArray";
+ case 46: return "SampledBuffer";
+ case 47: return "ImageBuffer";
+ case 48: return "ImageMSArray";
+ case 49: return "StorageImageExtendedFormats";
+ case 50: return "ImageQuery";
+ case 51: return "DerivativeControl";
+ case 52: return "InterpolationFunction";
+ case 53: return "TransformFeedback";
+ case 54: return "GeometryStreams";
+ case 55: return "StorageImageReadWithoutFormat";
+ case 56: return "StorageImageWriteWithoutFormat";
+ case 57: return "MultiViewport";
+ case 61: return "GroupNonUniform";
+ case 62: return "GroupNonUniformVote";
+ case 63: return "GroupNonUniformArithmetic";
+ case 64: return "GroupNonUniformBallot";
+ case 65: return "GroupNonUniformShuffle";
+ case 66: return "GroupNonUniformShuffleRelative";
+ case 67: return "GroupNonUniformClustered";
+ case 68: return "GroupNonUniformQuad";
+
+ case CapabilitySubgroupBallotKHR: return "SubgroupBallotKHR";
+ case CapabilityDrawParameters: return "DrawParameters";
+ case CapabilitySubgroupVoteKHR: return "SubgroupVoteKHR";
+
+ case CapabilityStorageUniformBufferBlock16: return "StorageUniformBufferBlock16";
+ case CapabilityStorageUniform16: return "StorageUniform16";
+ case CapabilityStoragePushConstant16: return "StoragePushConstant16";
+ case CapabilityStorageInputOutput16: return "StorageInputOutput16";
+
+ case CapabilityStorageBuffer8BitAccess: return "CapabilityStorageBuffer8BitAccess";
+ case CapabilityUniformAndStorageBuffer8BitAccess: return "CapabilityUniformAndStorageBuffer8BitAccess";
+ case CapabilityStoragePushConstant8: return "CapabilityStoragePushConstant8";
+
+ case CapabilityDeviceGroup: return "DeviceGroup";
+ case CapabilityMultiView: return "MultiView";
+
+ case CapabilityStencilExportEXT: return "StencilExportEXT";
+
+#ifdef AMD_EXTENSIONS
+ case CapabilityFloat16ImageAMD: return "Float16ImageAMD";
+ case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
+ case CapabilityFragmentMaskAMD: return "FragmentMaskAMD";
+ case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD";
+#endif
+
+ case CapabilityAtomicStorageOps: return "AtomicStorageOps";
+
+ case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage";
+#ifdef NV_EXTENSIONS
+ case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
+ case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV";
+ case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
+ case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
+ case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
+ case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV";
+ case CapabilityRayTracingNV: return "RayTracingNV";
+ case CapabilityComputeDerivativeGroupQuadsNV: return "ComputeDerivativeGroupQuadsNV";
+ case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV";
+ case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
+ case CapabilityMeshShadingNV: return "MeshShadingNV";
+// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by CapabilityFragmentDensityEXT
+#endif
+ case CapabilityFragmentDensityEXT: return "FragmentDensityEXT";
+
+ case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT";
+
+ case CapabilityShaderNonUniformEXT: return "CapabilityShaderNonUniformEXT";
+ case CapabilityRuntimeDescriptorArrayEXT: return "CapabilityRuntimeDescriptorArrayEXT";
+ case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "CapabilityInputAttachmentArrayDynamicIndexingEXT";
+ case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "CapabilityUniformTexelBufferArrayDynamicIndexingEXT";
+ case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "CapabilityStorageTexelBufferArrayDynamicIndexingEXT";
+ case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "CapabilityUniformBufferArrayNonUniformIndexingEXT";
+ case CapabilitySampledImageArrayNonUniformIndexingEXT: return "CapabilitySampledImageArrayNonUniformIndexingEXT";
+ case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "CapabilityStorageBufferArrayNonUniformIndexingEXT";
+ case CapabilityStorageImageArrayNonUniformIndexingEXT: return "CapabilityStorageImageArrayNonUniformIndexingEXT";
+ case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "CapabilityInputAttachmentArrayNonUniformIndexingEXT";
+ case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "CapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
+ case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "CapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
+
+ case CapabilityVulkanMemoryModelKHR: return "CapabilityVulkanMemoryModelKHR";
+ case CapabilityVulkanMemoryModelDeviceScopeKHR: return "CapabilityVulkanMemoryModelDeviceScopeKHR";
+
+ case CapabilityPhysicalStorageBufferAddressesEXT: return "CapabilityPhysicalStorageBufferAddressesEXT";
+
+ case CapabilityVariablePointers: return "CapabilityVariablePointers";
+
+ case CapabilityCooperativeMatrixNV: return "CapabilityCooperativeMatrixNV";
+
+ default: return "Bad";
+ }
+}
+
+const char* OpcodeString(int op)
+{
+ switch (op) {
+ case 0: return "OpNop";
+ case 1: return "OpUndef";
+ case 2: return "OpSourceContinued";
+ case 3: return "OpSource";
+ case 4: return "OpSourceExtension";
+ case 5: return "OpName";
+ case 6: return "OpMemberName";
+ case 7: return "OpString";
+ case 8: return "OpLine";
+ case 9: return "Bad";
+ case 10: return "OpExtension";
+ case 11: return "OpExtInstImport";
+ case 12: return "OpExtInst";
+ case 13: return "Bad";
+ case 14: return "OpMemoryModel";
+ case 15: return "OpEntryPoint";
+ case 16: return "OpExecutionMode";
+ case 17: return "OpCapability";
+ case 18: return "Bad";
+ case 19: return "OpTypeVoid";
+ case 20: return "OpTypeBool";
+ case 21: return "OpTypeInt";
+ case 22: return "OpTypeFloat";
+ case 23: return "OpTypeVector";
+ case 24: return "OpTypeMatrix";
+ case 25: return "OpTypeImage";
+ case 26: return "OpTypeSampler";
+ case 27: return "OpTypeSampledImage";
+ case 28: return "OpTypeArray";
+ case 29: return "OpTypeRuntimeArray";
+ case 30: return "OpTypeStruct";
+ case 31: return "OpTypeOpaque";
+ case 32: return "OpTypePointer";
+ case 33: return "OpTypeFunction";
+ case 34: return "OpTypeEvent";
+ case 35: return "OpTypeDeviceEvent";
+ case 36: return "OpTypeReserveId";
+ case 37: return "OpTypeQueue";
+ case 38: return "OpTypePipe";
+ case 39: return "OpTypeForwardPointer";
+ case 40: return "Bad";
+ case 41: return "OpConstantTrue";
+ case 42: return "OpConstantFalse";
+ case 43: return "OpConstant";
+ case 44: return "OpConstantComposite";
+ case 45: return "OpConstantSampler";
+ case 46: return "OpConstantNull";
+ case 47: return "Bad";
+ case 48: return "OpSpecConstantTrue";
+ case 49: return "OpSpecConstantFalse";
+ case 50: return "OpSpecConstant";
+ case 51: return "OpSpecConstantComposite";
+ case 52: return "OpSpecConstantOp";
+ case 53: return "Bad";
+ case 54: return "OpFunction";
+ case 55: return "OpFunctionParameter";
+ case 56: return "OpFunctionEnd";
+ case 57: return "OpFunctionCall";
+ case 58: return "Bad";
+ case 59: return "OpVariable";
+ case 60: return "OpImageTexelPointer";
+ case 61: return "OpLoad";
+ case 62: return "OpStore";
+ case 63: return "OpCopyMemory";
+ case 64: return "OpCopyMemorySized";
+ case 65: return "OpAccessChain";
+ case 66: return "OpInBoundsAccessChain";
+ case 67: return "OpPtrAccessChain";
+ case 68: return "OpArrayLength";
+ case 69: return "OpGenericPtrMemSemantics";
+ case 70: return "OpInBoundsPtrAccessChain";
+ case 71: return "OpDecorate";
+ case 72: return "OpMemberDecorate";
+ case 73: return "OpDecorationGroup";
+ case 74: return "OpGroupDecorate";
+ case 75: return "OpGroupMemberDecorate";
+ case 76: return "Bad";
+ case 77: return "OpVectorExtractDynamic";
+ case 78: return "OpVectorInsertDynamic";
+ case 79: return "OpVectorShuffle";
+ case 80: return "OpCompositeConstruct";
+ case 81: return "OpCompositeExtract";
+ case 82: return "OpCompositeInsert";
+ case 83: return "OpCopyObject";
+ case 84: return "OpTranspose";
+ case OpCopyLogical: return "OpCopyLogical";
+ case 85: return "Bad";
+ case 86: return "OpSampledImage";
+ case 87: return "OpImageSampleImplicitLod";
+ case 88: return "OpImageSampleExplicitLod";
+ case 89: return "OpImageSampleDrefImplicitLod";
+ case 90: return "OpImageSampleDrefExplicitLod";
+ case 91: return "OpImageSampleProjImplicitLod";
+ case 92: return "OpImageSampleProjExplicitLod";
+ case 93: return "OpImageSampleProjDrefImplicitLod";
+ case 94: return "OpImageSampleProjDrefExplicitLod";
+ case 95: return "OpImageFetch";
+ case 96: return "OpImageGather";
+ case 97: return "OpImageDrefGather";
+ case 98: return "OpImageRead";
+ case 99: return "OpImageWrite";
+ case 100: return "OpImage";
+ case 101: return "OpImageQueryFormat";
+ case 102: return "OpImageQueryOrder";
+ case 103: return "OpImageQuerySizeLod";
+ case 104: return "OpImageQuerySize";
+ case 105: return "OpImageQueryLod";
+ case 106: return "OpImageQueryLevels";
+ case 107: return "OpImageQuerySamples";
+ case 108: return "Bad";
+ case 109: return "OpConvertFToU";
+ case 110: return "OpConvertFToS";
+ case 111: return "OpConvertSToF";
+ case 112: return "OpConvertUToF";
+ case 113: return "OpUConvert";
+ case 114: return "OpSConvert";
+ case 115: return "OpFConvert";
+ case 116: return "OpQuantizeToF16";
+ case 117: return "OpConvertPtrToU";
+ case 118: return "OpSatConvertSToU";
+ case 119: return "OpSatConvertUToS";
+ case 120: return "OpConvertUToPtr";
+ case 121: return "OpPtrCastToGeneric";
+ case 122: return "OpGenericCastToPtr";
+ case 123: return "OpGenericCastToPtrExplicit";
+ case 124: return "OpBitcast";
+ case 125: return "Bad";
+ case 126: return "OpSNegate";
+ case 127: return "OpFNegate";
+ case 128: return "OpIAdd";
+ case 129: return "OpFAdd";
+ case 130: return "OpISub";
+ case 131: return "OpFSub";
+ case 132: return "OpIMul";
+ case 133: return "OpFMul";
+ case 134: return "OpUDiv";
+ case 135: return "OpSDiv";
+ case 136: return "OpFDiv";
+ case 137: return "OpUMod";
+ case 138: return "OpSRem";
+ case 139: return "OpSMod";
+ case 140: return "OpFRem";
+ case 141: return "OpFMod";
+ case 142: return "OpVectorTimesScalar";
+ case 143: return "OpMatrixTimesScalar";
+ case 144: return "OpVectorTimesMatrix";
+ case 145: return "OpMatrixTimesVector";
+ case 146: return "OpMatrixTimesMatrix";
+ case 147: return "OpOuterProduct";
+ case 148: return "OpDot";
+ case 149: return "OpIAddCarry";
+ case 150: return "OpISubBorrow";
+ case 151: return "OpUMulExtended";
+ case 152: return "OpSMulExtended";
+ case 153: return "Bad";
+ case 154: return "OpAny";
+ case 155: return "OpAll";
+ case 156: return "OpIsNan";
+ case 157: return "OpIsInf";
+ case 158: return "OpIsFinite";
+ case 159: return "OpIsNormal";
+ case 160: return "OpSignBitSet";
+ case 161: return "OpLessOrGreater";
+ case 162: return "OpOrdered";
+ case 163: return "OpUnordered";
+ case 164: return "OpLogicalEqual";
+ case 165: return "OpLogicalNotEqual";
+ case 166: return "OpLogicalOr";
+ case 167: return "OpLogicalAnd";
+ case 168: return "OpLogicalNot";
+ case 169: return "OpSelect";
+ case 170: return "OpIEqual";
+ case 171: return "OpINotEqual";
+ case 172: return "OpUGreaterThan";
+ case 173: return "OpSGreaterThan";
+ case 174: return "OpUGreaterThanEqual";
+ case 175: return "OpSGreaterThanEqual";
+ case 176: return "OpULessThan";
+ case 177: return "OpSLessThan";
+ case 178: return "OpULessThanEqual";
+ case 179: return "OpSLessThanEqual";
+ case 180: return "OpFOrdEqual";
+ case 181: return "OpFUnordEqual";
+ case 182: return "OpFOrdNotEqual";
+ case 183: return "OpFUnordNotEqual";
+ case 184: return "OpFOrdLessThan";
+ case 185: return "OpFUnordLessThan";
+ case 186: return "OpFOrdGreaterThan";
+ case 187: return "OpFUnordGreaterThan";
+ case 188: return "OpFOrdLessThanEqual";
+ case 189: return "OpFUnordLessThanEqual";
+ case 190: return "OpFOrdGreaterThanEqual";
+ case 191: return "OpFUnordGreaterThanEqual";
+ case 192: return "Bad";
+ case 193: return "Bad";
+ case 194: return "OpShiftRightLogical";
+ case 195: return "OpShiftRightArithmetic";
+ case 196: return "OpShiftLeftLogical";
+ case 197: return "OpBitwiseOr";
+ case 198: return "OpBitwiseXor";
+ case 199: return "OpBitwiseAnd";
+ case 200: return "OpNot";
+ case 201: return "OpBitFieldInsert";
+ case 202: return "OpBitFieldSExtract";
+ case 203: return "OpBitFieldUExtract";
+ case 204: return "OpBitReverse";
+ case 205: return "OpBitCount";
+ case 206: return "Bad";
+ case 207: return "OpDPdx";
+ case 208: return "OpDPdy";
+ case 209: return "OpFwidth";
+ case 210: return "OpDPdxFine";
+ case 211: return "OpDPdyFine";
+ case 212: return "OpFwidthFine";
+ case 213: return "OpDPdxCoarse";
+ case 214: return "OpDPdyCoarse";
+ case 215: return "OpFwidthCoarse";
+ case 216: return "Bad";
+ case 217: return "Bad";
+ case 218: return "OpEmitVertex";
+ case 219: return "OpEndPrimitive";
+ case 220: return "OpEmitStreamVertex";
+ case 221: return "OpEndStreamPrimitive";
+ case 222: return "Bad";
+ case 223: return "Bad";
+ case 224: return "OpControlBarrier";
+ case 225: return "OpMemoryBarrier";
+ case 226: return "Bad";
+ case 227: return "OpAtomicLoad";
+ case 228: return "OpAtomicStore";
+ case 229: return "OpAtomicExchange";
+ case 230: return "OpAtomicCompareExchange";
+ case 231: return "OpAtomicCompareExchangeWeak";
+ case 232: return "OpAtomicIIncrement";
+ case 233: return "OpAtomicIDecrement";
+ case 234: return "OpAtomicIAdd";
+ case 235: return "OpAtomicISub";
+ case 236: return "OpAtomicSMin";
+ case 237: return "OpAtomicUMin";
+ case 238: return "OpAtomicSMax";
+ case 239: return "OpAtomicUMax";
+ case 240: return "OpAtomicAnd";
+ case 241: return "OpAtomicOr";
+ case 242: return "OpAtomicXor";
+ case 243: return "Bad";
+ case 244: return "Bad";
+ case 245: return "OpPhi";
+ case 246: return "OpLoopMerge";
+ case 247: return "OpSelectionMerge";
+ case 248: return "OpLabel";
+ case 249: return "OpBranch";
+ case 250: return "OpBranchConditional";
+ case 251: return "OpSwitch";
+ case 252: return "OpKill";
+ case 253: return "OpReturn";
+ case 254: return "OpReturnValue";
+ case 255: return "OpUnreachable";
+ case 256: return "OpLifetimeStart";
+ case 257: return "OpLifetimeStop";
+ case 258: return "Bad";
+ case 259: return "OpGroupAsyncCopy";
+ case 260: return "OpGroupWaitEvents";
+ case 261: return "OpGroupAll";
+ case 262: return "OpGroupAny";
+ case 263: return "OpGroupBroadcast";
+ case 264: return "OpGroupIAdd";
+ case 265: return "OpGroupFAdd";
+ case 266: return "OpGroupFMin";
+ case 267: return "OpGroupUMin";
+ case 268: return "OpGroupSMin";
+ case 269: return "OpGroupFMax";
+ case 270: return "OpGroupUMax";
+ case 271: return "OpGroupSMax";
+ case 272: return "Bad";
+ case 273: return "Bad";
+ case 274: return "OpReadPipe";
+ case 275: return "OpWritePipe";
+ case 276: return "OpReservedReadPipe";
+ case 277: return "OpReservedWritePipe";
+ case 278: return "OpReserveReadPipePackets";
+ case 279: return "OpReserveWritePipePackets";
+ case 280: return "OpCommitReadPipe";
+ case 281: return "OpCommitWritePipe";
+ case 282: return "OpIsValidReserveId";
+ case 283: return "OpGetNumPipePackets";
+ case 284: return "OpGetMaxPipePackets";
+ case 285: return "OpGroupReserveReadPipePackets";
+ case 286: return "OpGroupReserveWritePipePackets";
+ case 287: return "OpGroupCommitReadPipe";
+ case 288: return "OpGroupCommitWritePipe";
+ case 289: return "Bad";
+ case 290: return "Bad";
+ case 291: return "OpEnqueueMarker";
+ case 292: return "OpEnqueueKernel";
+ case 293: return "OpGetKernelNDrangeSubGroupCount";
+ case 294: return "OpGetKernelNDrangeMaxSubGroupSize";
+ case 295: return "OpGetKernelWorkGroupSize";
+ case 296: return "OpGetKernelPreferredWorkGroupSizeMultiple";
+ case 297: return "OpRetainEvent";
+ case 298: return "OpReleaseEvent";
+ case 299: return "OpCreateUserEvent";
+ case 300: return "OpIsValidEvent";
+ case 301: return "OpSetUserEventStatus";
+ case 302: return "OpCaptureEventProfilingInfo";
+ case 303: return "OpGetDefaultQueue";
+ case 304: return "OpBuildNDRange";
+ case 305: return "OpImageSparseSampleImplicitLod";
+ case 306: return "OpImageSparseSampleExplicitLod";
+ case 307: return "OpImageSparseSampleDrefImplicitLod";
+ case 308: return "OpImageSparseSampleDrefExplicitLod";
+ case 309: return "OpImageSparseSampleProjImplicitLod";
+ case 310: return "OpImageSparseSampleProjExplicitLod";
+ case 311: return "OpImageSparseSampleProjDrefImplicitLod";
+ case 312: return "OpImageSparseSampleProjDrefExplicitLod";
+ case 313: return "OpImageSparseFetch";
+ case 314: return "OpImageSparseGather";
+ case 315: return "OpImageSparseDrefGather";
+ case 316: return "OpImageSparseTexelsResident";
+ case 317: return "OpNoLine";
+ case 318: return "OpAtomicFlagTestAndSet";
+ case 319: return "OpAtomicFlagClear";
+ case 320: return "OpImageSparseRead";
+
+ case OpModuleProcessed: return "OpModuleProcessed";
+ case OpDecorateId: return "OpDecorateId";
+
+ case 333: return "OpGroupNonUniformElect";
+ case 334: return "OpGroupNonUniformAll";
+ case 335: return "OpGroupNonUniformAny";
+ case 336: return "OpGroupNonUniformAllEqual";
+ case 337: return "OpGroupNonUniformBroadcast";
+ case 338: return "OpGroupNonUniformBroadcastFirst";
+ case 339: return "OpGroupNonUniformBallot";
+ case 340: return "OpGroupNonUniformInverseBallot";
+ case 341: return "OpGroupNonUniformBallotBitExtract";
+ case 342: return "OpGroupNonUniformBallotBitCount";
+ case 343: return "OpGroupNonUniformBallotFindLSB";
+ case 344: return "OpGroupNonUniformBallotFindMSB";
+ case 345: return "OpGroupNonUniformShuffle";
+ case 346: return "OpGroupNonUniformShuffleXor";
+ case 347: return "OpGroupNonUniformShuffleUp";
+ case 348: return "OpGroupNonUniformShuffleDown";
+ case 349: return "OpGroupNonUniformIAdd";
+ case 350: return "OpGroupNonUniformFAdd";
+ case 351: return "OpGroupNonUniformIMul";
+ case 352: return "OpGroupNonUniformFMul";
+ case 353: return "OpGroupNonUniformSMin";
+ case 354: return "OpGroupNonUniformUMin";
+ case 355: return "OpGroupNonUniformFMin";
+ case 356: return "OpGroupNonUniformSMax";
+ case 357: return "OpGroupNonUniformUMax";
+ case 358: return "OpGroupNonUniformFMax";
+ case 359: return "OpGroupNonUniformBitwiseAnd";
+ case 360: return "OpGroupNonUniformBitwiseOr";
+ case 361: return "OpGroupNonUniformBitwiseXor";
+ case 362: return "OpGroupNonUniformLogicalAnd";
+ case 363: return "OpGroupNonUniformLogicalOr";
+ case 364: return "OpGroupNonUniformLogicalXor";
+ case 365: return "OpGroupNonUniformQuadBroadcast";
+ case 366: return "OpGroupNonUniformQuadSwap";
+
+ case 4421: return "OpSubgroupBallotKHR";
+ case 4422: return "OpSubgroupFirstInvocationKHR";
+ case 4428: return "OpSubgroupAllKHR";
+ case 4429: return "OpSubgroupAnyKHR";
+ case 4430: return "OpSubgroupAllEqualKHR";
+ case 4432: return "OpSubgroupReadInvocationKHR";
+
+#ifdef AMD_EXTENSIONS
+ case 5000: return "OpGroupIAddNonUniformAMD";
+ case 5001: return "OpGroupFAddNonUniformAMD";
+ case 5002: return "OpGroupFMinNonUniformAMD";
+ case 5003: return "OpGroupUMinNonUniformAMD";
+ case 5004: return "OpGroupSMinNonUniformAMD";
+ case 5005: return "OpGroupFMaxNonUniformAMD";
+ case 5006: return "OpGroupUMaxNonUniformAMD";
+ case 5007: return "OpGroupSMaxNonUniformAMD";
+
+ case 5011: return "OpFragmentMaskFetchAMD";
+ case 5012: return "OpFragmentFetchAMD";
+#endif
+
+ case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE";
+ case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE";
+
+#ifdef NV_EXTENSIONS
+ case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
+ case OpReportIntersectionNV: return "OpReportIntersectionNV";
+ case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV";
+ case OpTerminateRayNV: return "OpTerminateRayNV";
+ case OpTraceNV: return "OpTraceNV";
+ case OpTypeAccelerationStructureNV: return "OpTypeAccelerationStructureNV";
+ case OpExecuteCallableNV: return "OpExecuteCallableNV";
+ case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV";
+ case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV";
+#endif
+
+ case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV";
+ case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV";
+ case OpCooperativeMatrixStoreNV: return "OpCooperativeMatrixStoreNV";
+ case OpCooperativeMatrixMulAddNV: return "OpCooperativeMatrixMulAddNV";
+ case OpCooperativeMatrixLengthNV: return "OpCooperativeMatrixLengthNV";
+
+ default:
+ return "Bad";
+ }
+}
+
+// The set of objects that hold all the instruction/operand
+// parameterization information.
+InstructionParameters InstructionDesc[OpCodeMask + 1];
+OperandParameters ExecutionModeOperands[ExecutionModeCeiling];
+OperandParameters DecorationOperands[DecorationCeiling];
+
+EnumDefinition OperandClassParams[OperandCount];
+EnumParameters ExecutionModeParams[ExecutionModeCeiling];
+EnumParameters ImageOperandsParams[ImageOperandsCeiling];
+EnumParameters DecorationParams[DecorationCeiling];
+EnumParameters LoopControlParams[FunctionControlCeiling];
+EnumParameters SelectionControlParams[SelectControlCeiling];
+EnumParameters FunctionControlParams[FunctionControlCeiling];
+EnumParameters MemoryAccessParams[MemoryAccessCeiling];
+
+// Set up all the parameterizing descriptions of the opcodes, operands, etc.
+void Parameterize()
+{
+ // only do this once.
+ static bool initialized = false;
+ if (initialized)
+ return;
+ initialized = true;
+
+ // Exceptions to having a result <id> and a resulting type <id>.
+ // (Everything is initialized to have both).
+
+ InstructionDesc[OpNop].setResultAndType(false, false);
+ InstructionDesc[OpSource].setResultAndType(false, false);
+ InstructionDesc[OpSourceContinued].setResultAndType(false, false);
+ InstructionDesc[OpSourceExtension].setResultAndType(false, false);
+ InstructionDesc[OpExtension].setResultAndType(false, false);
+ InstructionDesc[OpExtInstImport].setResultAndType(true, false);
+ InstructionDesc[OpCapability].setResultAndType(false, false);
+ InstructionDesc[OpMemoryModel].setResultAndType(false, false);
+ InstructionDesc[OpEntryPoint].setResultAndType(false, false);
+ InstructionDesc[OpExecutionMode].setResultAndType(false, false);
+ InstructionDesc[OpTypeVoid].setResultAndType(true, false);
+ InstructionDesc[OpTypeBool].setResultAndType(true, false);
+ InstructionDesc[OpTypeInt].setResultAndType(true, false);
+ InstructionDesc[OpTypeFloat].setResultAndType(true, false);
+ InstructionDesc[OpTypeVector].setResultAndType(true, false);
+ InstructionDesc[OpTypeMatrix].setResultAndType(true, false);
+ InstructionDesc[OpTypeImage].setResultAndType(true, false);
+ InstructionDesc[OpTypeSampler].setResultAndType(true, false);
+ InstructionDesc[OpTypeSampledImage].setResultAndType(true, false);
+ InstructionDesc[OpTypeArray].setResultAndType(true, false);
+ InstructionDesc[OpTypeRuntimeArray].setResultAndType(true, false);
+ InstructionDesc[OpTypeStruct].setResultAndType(true, false);
+ InstructionDesc[OpTypeOpaque].setResultAndType(true, false);
+ InstructionDesc[OpTypePointer].setResultAndType(true, false);
+ InstructionDesc[OpTypeForwardPointer].setResultAndType(false, false);
+ InstructionDesc[OpTypeFunction].setResultAndType(true, false);
+ InstructionDesc[OpTypeEvent].setResultAndType(true, false);
+ InstructionDesc[OpTypeDeviceEvent].setResultAndType(true, false);
+ InstructionDesc[OpTypeReserveId].setResultAndType(true, false);
+ InstructionDesc[OpTypeQueue].setResultAndType(true, false);
+ InstructionDesc[OpTypePipe].setResultAndType(true, false);
+ InstructionDesc[OpFunctionEnd].setResultAndType(false, false);
+ InstructionDesc[OpStore].setResultAndType(false, false);
+ InstructionDesc[OpImageWrite].setResultAndType(false, false);
+ InstructionDesc[OpDecorationGroup].setResultAndType(true, false);
+ InstructionDesc[OpDecorate].setResultAndType(false, false);
+ InstructionDesc[OpDecorateId].setResultAndType(false, false);
+ InstructionDesc[OpDecorateStringGOOGLE].setResultAndType(false, false);
+ InstructionDesc[OpMemberDecorate].setResultAndType(false, false);
+ InstructionDesc[OpMemberDecorateStringGOOGLE].setResultAndType(false, false);
+ InstructionDesc[OpGroupDecorate].setResultAndType(false, false);
+ InstructionDesc[OpGroupMemberDecorate].setResultAndType(false, false);
+ InstructionDesc[OpName].setResultAndType(false, false);
+ InstructionDesc[OpMemberName].setResultAndType(false, false);
+ InstructionDesc[OpString].setResultAndType(true, false);
+ InstructionDesc[OpLine].setResultAndType(false, false);
+ InstructionDesc[OpNoLine].setResultAndType(false, false);
+ InstructionDesc[OpCopyMemory].setResultAndType(false, false);
+ InstructionDesc[OpCopyMemorySized].setResultAndType(false, false);
+ InstructionDesc[OpEmitVertex].setResultAndType(false, false);
+ InstructionDesc[OpEndPrimitive].setResultAndType(false, false);
+ InstructionDesc[OpEmitStreamVertex].setResultAndType(false, false);
+ InstructionDesc[OpEndStreamPrimitive].setResultAndType(false, false);
+ InstructionDesc[OpControlBarrier].setResultAndType(false, false);
+ InstructionDesc[OpMemoryBarrier].setResultAndType(false, false);
+ InstructionDesc[OpAtomicStore].setResultAndType(false, false);
+ InstructionDesc[OpLoopMerge].setResultAndType(false, false);
+ InstructionDesc[OpSelectionMerge].setResultAndType(false, false);
+ InstructionDesc[OpLabel].setResultAndType(true, false);
+ InstructionDesc[OpBranch].setResultAndType(false, false);
+ InstructionDesc[OpBranchConditional].setResultAndType(false, false);
+ InstructionDesc[OpSwitch].setResultAndType(false, false);
+ InstructionDesc[OpKill].setResultAndType(false, false);
+ InstructionDesc[OpReturn].setResultAndType(false, false);
+ InstructionDesc[OpReturnValue].setResultAndType(false, false);
+ InstructionDesc[OpUnreachable].setResultAndType(false, false);
+ InstructionDesc[OpLifetimeStart].setResultAndType(false, false);
+ InstructionDesc[OpLifetimeStop].setResultAndType(false, false);
+ InstructionDesc[OpCommitReadPipe].setResultAndType(false, false);
+ InstructionDesc[OpCommitWritePipe].setResultAndType(false, false);
+ InstructionDesc[OpGroupCommitWritePipe].setResultAndType(false, false);
+ InstructionDesc[OpGroupCommitReadPipe].setResultAndType(false, false);
+ InstructionDesc[OpCaptureEventProfilingInfo].setResultAndType(false, false);
+ InstructionDesc[OpSetUserEventStatus].setResultAndType(false, false);
+ InstructionDesc[OpRetainEvent].setResultAndType(false, false);
+ InstructionDesc[OpReleaseEvent].setResultAndType(false, false);
+ InstructionDesc[OpGroupWaitEvents].setResultAndType(false, false);
+ InstructionDesc[OpAtomicFlagClear].setResultAndType(false, false);
+ InstructionDesc[OpModuleProcessed].setResultAndType(false, false);
+ InstructionDesc[OpTypeCooperativeMatrixNV].setResultAndType(true, false);
+ InstructionDesc[OpCooperativeMatrixStoreNV].setResultAndType(false, false);
+
+ // Specific additional context-dependent operands
+
+ ExecutionModeOperands[ExecutionModeInvocations].push(OperandLiteralNumber, "'Number of <<Invocation,invocations>>'");
+
+ ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'x size'");
+ ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'y size'");
+ ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'z size'");
+
+ ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'x size'");
+ ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'y size'");
+ ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'z size'");
+
+ ExecutionModeOperands[ExecutionModeOutputVertices].push(OperandLiteralNumber, "'Vertex count'");
+ ExecutionModeOperands[ExecutionModeVecTypeHint].push(OperandLiteralNumber, "'Vector type'");
+
+ DecorationOperands[DecorationStream].push(OperandLiteralNumber, "'Stream Number'");
+ DecorationOperands[DecorationLocation].push(OperandLiteralNumber, "'Location'");
+ DecorationOperands[DecorationComponent].push(OperandLiteralNumber, "'Component'");
+ DecorationOperands[DecorationIndex].push(OperandLiteralNumber, "'Index'");
+ DecorationOperands[DecorationBinding].push(OperandLiteralNumber, "'Binding Point'");
+ DecorationOperands[DecorationDescriptorSet].push(OperandLiteralNumber, "'Descriptor Set'");
+ DecorationOperands[DecorationOffset].push(OperandLiteralNumber, "'Byte Offset'");
+ DecorationOperands[DecorationXfbBuffer].push(OperandLiteralNumber, "'XFB Buffer Number'");
+ DecorationOperands[DecorationXfbStride].push(OperandLiteralNumber, "'XFB Stride'");
+ DecorationOperands[DecorationArrayStride].push(OperandLiteralNumber, "'Array Stride'");
+ DecorationOperands[DecorationMatrixStride].push(OperandLiteralNumber, "'Matrix Stride'");
+ DecorationOperands[DecorationBuiltIn].push(OperandLiteralNumber, "See <<BuiltIn,*BuiltIn*>>");
+ DecorationOperands[DecorationFPRoundingMode].push(OperandFPRoundingMode, "'Floating-Point Rounding Mode'");
+ DecorationOperands[DecorationFPFastMathMode].push(OperandFPFastMath, "'Fast-Math Mode'");
+ DecorationOperands[DecorationLinkageAttributes].push(OperandLiteralString, "'Name'");
+ DecorationOperands[DecorationLinkageAttributes].push(OperandLinkageType, "'Linkage Type'");
+ DecorationOperands[DecorationFuncParamAttr].push(OperandFuncParamAttr, "'Function Parameter Attribute'");
+ DecorationOperands[DecorationSpecId].push(OperandLiteralNumber, "'Specialization Constant ID'");
+ DecorationOperands[DecorationInputAttachmentIndex].push(OperandLiteralNumber, "'Attachment Index'");
+ DecorationOperands[DecorationAlignment].push(OperandLiteralNumber, "'Alignment'");
+
+ OperandClassParams[OperandSource].set(0, SourceString, 0);
+ OperandClassParams[OperandExecutionModel].set(0, ExecutionModelString, nullptr);
+ OperandClassParams[OperandAddressing].set(0, AddressingString, nullptr);
+ OperandClassParams[OperandMemory].set(0, MemoryString, nullptr);
+ OperandClassParams[OperandExecutionMode].set(ExecutionModeCeiling, ExecutionModeString, ExecutionModeParams);
+ OperandClassParams[OperandExecutionMode].setOperands(ExecutionModeOperands);
+ OperandClassParams[OperandStorage].set(0, StorageClassString, nullptr);
+ OperandClassParams[OperandDimensionality].set(0, DimensionString, nullptr);
+ OperandClassParams[OperandSamplerAddressingMode].set(0, SamplerAddressingModeString, nullptr);
+ OperandClassParams[OperandSamplerFilterMode].set(0, SamplerFilterModeString, nullptr);
+ OperandClassParams[OperandSamplerImageFormat].set(0, ImageFormatString, nullptr);
+ OperandClassParams[OperandImageChannelOrder].set(0, ImageChannelOrderString, nullptr);
+ OperandClassParams[OperandImageChannelDataType].set(0, ImageChannelDataTypeString, nullptr);
+ OperandClassParams[OperandImageOperands].set(ImageOperandsCeiling, ImageOperandsString, ImageOperandsParams, true);
+ OperandClassParams[OperandFPFastMath].set(0, FPFastMathString, nullptr, true);
+ OperandClassParams[OperandFPRoundingMode].set(0, FPRoundingModeString, nullptr);
+ OperandClassParams[OperandLinkageType].set(0, LinkageTypeString, nullptr);
+ OperandClassParams[OperandFuncParamAttr].set(0, FuncParamAttrString, nullptr);
+ OperandClassParams[OperandAccessQualifier].set(0, AccessQualifierString, nullptr);
+ OperandClassParams[OperandDecoration].set(DecorationCeiling, DecorationString, DecorationParams);
+ OperandClassParams[OperandDecoration].setOperands(DecorationOperands);
+ OperandClassParams[OperandBuiltIn].set(0, BuiltInString, nullptr);
+ OperandClassParams[OperandSelect].set(SelectControlCeiling, SelectControlString, SelectionControlParams, true);
+ OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true);
+ OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true);
+ OperandClassParams[OperandMemorySemantics].set(0, MemorySemanticsString, nullptr, true);
+ OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true);
+ OperandClassParams[OperandScope].set(0, ScopeString, nullptr);
+ OperandClassParams[OperandGroupOperation].set(0, GroupOperationString, nullptr);
+ OperandClassParams[OperandKernelEnqueueFlags].set(0, KernelEnqueueFlagsString, nullptr);
+ OperandClassParams[OperandKernelProfilingInfo].set(0, KernelProfilingInfoString, nullptr, true);
+ OperandClassParams[OperandCapability].set(0, CapabilityString, nullptr);
+ OperandClassParams[OperandOpcode].set(OpCodeMask + 1, OpcodeString, 0);
+
+ // set name of operator, an initial set of <id> style operands, and the description
+
+ InstructionDesc[OpSource].operands.push(OperandSource, "");
+ InstructionDesc[OpSource].operands.push(OperandLiteralNumber, "'Version'");
+ InstructionDesc[OpSource].operands.push(OperandId, "'File'", true);
+ InstructionDesc[OpSource].operands.push(OperandLiteralString, "'Source'", true);
+
+ InstructionDesc[OpSourceContinued].operands.push(OperandLiteralString, "'Continued Source'");
+
+ InstructionDesc[OpSourceExtension].operands.push(OperandLiteralString, "'Extension'");
+
+ InstructionDesc[OpName].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpName].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpMemberName].operands.push(OperandId, "'Type'");
+ InstructionDesc[OpMemberName].operands.push(OperandLiteralNumber, "'Member'");
+ InstructionDesc[OpMemberName].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpString].operands.push(OperandLiteralString, "'String'");
+
+ InstructionDesc[OpLine].operands.push(OperandId, "'File'");
+ InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Line'");
+ InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Column'");
+
+ InstructionDesc[OpExtension].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpExtInstImport].operands.push(OperandLiteralString, "'Name'");
+
+ InstructionDesc[OpCapability].operands.push(OperandCapability, "'Capability'");
+
+ InstructionDesc[OpMemoryModel].operands.push(OperandAddressing, "");
+ InstructionDesc[OpMemoryModel].operands.push(OperandMemory, "");
+
+ InstructionDesc[OpEntryPoint].operands.push(OperandExecutionModel, "");
+ InstructionDesc[OpEntryPoint].operands.push(OperandId, "'Entry Point'");
+ InstructionDesc[OpEntryPoint].operands.push(OperandLiteralString, "'Name'");
+ InstructionDesc[OpEntryPoint].operands.push(OperandVariableIds, "'Interface'");
+
+ InstructionDesc[OpExecutionMode].operands.push(OperandId, "'Entry Point'");
+ InstructionDesc[OpExecutionMode].operands.push(OperandExecutionMode, "'Mode'");
+ InstructionDesc[OpExecutionMode].operands.push(OperandOptionalLiteral, "See <<Execution_Mode,Execution Mode>>");
+
+ InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Width'");
+ InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Signedness'");
+
+ InstructionDesc[OpTypeFloat].operands.push(OperandLiteralNumber, "'Width'");
+
+ InstructionDesc[OpTypeVector].operands.push(OperandId, "'Component Type'");
+ InstructionDesc[OpTypeVector].operands.push(OperandLiteralNumber, "'Component Count'");
+
+ InstructionDesc[OpTypeMatrix].operands.push(OperandId, "'Column Type'");
+ InstructionDesc[OpTypeMatrix].operands.push(OperandLiteralNumber, "'Column Count'");
+
+ InstructionDesc[OpTypeImage].operands.push(OperandId, "'Sampled Type'");
+ InstructionDesc[OpTypeImage].operands.push(OperandDimensionality, "");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Depth'");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Arrayed'");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'MS'");
+ InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Sampled'");
+ InstructionDesc[OpTypeImage].operands.push(OperandSamplerImageFormat, "");
+ InstructionDesc[OpTypeImage].operands.push(OperandAccessQualifier, "", true);
+
+ InstructionDesc[OpTypeSampledImage].operands.push(OperandId, "'Image Type'");
+
+ InstructionDesc[OpTypeArray].operands.push(OperandId, "'Element Type'");
+ InstructionDesc[OpTypeArray].operands.push(OperandId, "'Length'");
+
+ InstructionDesc[OpTypeRuntimeArray].operands.push(OperandId, "'Element Type'");
+
+ InstructionDesc[OpTypeStruct].operands.push(OperandVariableIds, "'Member 0 type', +\n'member 1 type', +\n...");
+
+ InstructionDesc[OpTypeOpaque].operands.push(OperandLiteralString, "The name of the opaque type.");
+
+ InstructionDesc[OpTypePointer].operands.push(OperandStorage, "");
+ InstructionDesc[OpTypePointer].operands.push(OperandId, "'Type'");
+
+ InstructionDesc[OpTypeForwardPointer].operands.push(OperandId, "'Pointer Type'");
+ InstructionDesc[OpTypeForwardPointer].operands.push(OperandStorage, "");
+
+ InstructionDesc[OpTypePipe].operands.push(OperandAccessQualifier, "'Qualifier'");
+
+ InstructionDesc[OpTypeFunction].operands.push(OperandId, "'Return Type'");
+ InstructionDesc[OpTypeFunction].operands.push(OperandVariableIds, "'Parameter 0 Type', +\n'Parameter 1 Type', +\n...");
+
+ InstructionDesc[OpConstant].operands.push(OperandVariableLiterals, "'Value'");
+
+ InstructionDesc[OpConstantComposite].operands.push(OperandVariableIds, "'Constituents'");
+
+ InstructionDesc[OpConstantSampler].operands.push(OperandSamplerAddressingMode, "");
+ InstructionDesc[OpConstantSampler].operands.push(OperandLiteralNumber, "'Param'");
+ InstructionDesc[OpConstantSampler].operands.push(OperandSamplerFilterMode, "");
+
+ InstructionDesc[OpSpecConstant].operands.push(OperandVariableLiterals, "'Value'");
+
+ InstructionDesc[OpSpecConstantComposite].operands.push(OperandVariableIds, "'Constituents'");
+
+ InstructionDesc[OpSpecConstantOp].operands.push(OperandLiteralNumber, "'Opcode'");
+ InstructionDesc[OpSpecConstantOp].operands.push(OperandVariableIds, "'Operands'");
+
+ InstructionDesc[OpVariable].operands.push(OperandStorage, "");
+ InstructionDesc[OpVariable].operands.push(OperandId, "'Initializer'", true);
+
+ InstructionDesc[OpFunction].operands.push(OperandFunction, "");
+ InstructionDesc[OpFunction].operands.push(OperandId, "'Function Type'");
+
+ InstructionDesc[OpFunctionCall].operands.push(OperandId, "'Function'");
+ InstructionDesc[OpFunctionCall].operands.push(OperandVariableIds, "'Argument 0', +\n'Argument 1', +\n...");
+
+ InstructionDesc[OpExtInst].operands.push(OperandId, "'Set'");
+ InstructionDesc[OpExtInst].operands.push(OperandLiteralNumber, "'Instruction'");
+ InstructionDesc[OpExtInst].operands.push(OperandVariableIds, "'Operand 1', +\n'Operand 2', +\n...");
+
+ InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true);
+ InstructionDesc[OpLoad].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpLoad].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpStore].operands.push(OperandId, "'Object'");
+ InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true);
+ InstructionDesc[OpStore].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpStore].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'");
+
+ InstructionDesc[OpDecorate].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpDecorate].operands.push(OperandDecoration, "");
+ InstructionDesc[OpDecorate].operands.push(OperandVariableLiterals, "See <<Decoration,'Decoration'>>.");
+
+ InstructionDesc[OpDecorateId].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpDecorateId].operands.push(OperandDecoration, "");
+ InstructionDesc[OpDecorateId].operands.push(OperandVariableIds, "See <<Decoration,'Decoration'>>.");
+
+ InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandDecoration, "");
+ InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandLiteralString, "'Literal String'");
+
+ InstructionDesc[OpMemberDecorate].operands.push(OperandId, "'Structure Type'");
+ InstructionDesc[OpMemberDecorate].operands.push(OperandLiteralNumber, "'Member'");
+ InstructionDesc[OpMemberDecorate].operands.push(OperandDecoration, "");
+ InstructionDesc[OpMemberDecorate].operands.push(OperandVariableLiterals, "See <<Decoration,'Decoration'>>.");
+
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandId, "'Structure Type'");
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandLiteralNumber, "'Member'");
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandDecoration, "");
+ InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandLiteralString, "'Literal String'");
+
+ InstructionDesc[OpGroupDecorate].operands.push(OperandId, "'Decoration Group'");
+ InstructionDesc[OpGroupDecorate].operands.push(OperandVariableIds, "'Targets'");
+
+ InstructionDesc[OpGroupMemberDecorate].operands.push(OperandId, "'Decoration Group'");
+ InstructionDesc[OpGroupMemberDecorate].operands.push(OperandVariableIdLiteral, "'Targets'");
+
+ InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Index'");
+
+ InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Component'");
+ InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Index'");
+
+ InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 1'");
+ InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 2'");
+ InstructionDesc[OpVectorShuffle].operands.push(OperandVariableLiterals, "'Components'");
+
+ InstructionDesc[OpCompositeConstruct].operands.push(OperandVariableIds, "'Constituents'");
+
+ InstructionDesc[OpCompositeExtract].operands.push(OperandId, "'Composite'");
+ InstructionDesc[OpCompositeExtract].operands.push(OperandVariableLiterals, "'Indexes'");
+
+ InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Object'");
+ InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Composite'");
+ InstructionDesc[OpCompositeInsert].operands.push(OperandVariableLiterals, "'Indexes'");
+
+ InstructionDesc[OpCopyObject].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Source'");
+ InstructionDesc[OpCopyMemory].operands.push(OperandMemoryAccess, "", true);
+
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Target'");
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Source'");
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Size'");
+ InstructionDesc[OpCopyMemorySized].operands.push(OperandMemoryAccess, "", true);
+
+ InstructionDesc[OpSampledImage].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpSampledImage].operands.push(OperandId, "'Sampler'");
+
+ InstructionDesc[OpImage].operands.push(OperandId, "'Sampled Image'");
+
+ InstructionDesc[OpImageRead].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageRead].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageRead].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageRead].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageWrite].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageWrite].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageWrite].operands.push(OperandId, "'Texel'");
+ InstructionDesc[OpImageWrite].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageWrite].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageFetch].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageFetch].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageFetch].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageFetch].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageGather].operands.push(OperandId, "'Component'");
+ InstructionDesc[OpImageGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageDrefGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageDrefGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseFetch].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Component'");
+ InstructionDesc[OpImageSparseGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'D~ref~'");
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseDrefGather].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSparseRead].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSparseRead].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpImageSparseTexelsResident].operands.push(OperandId, "'Resident Code'");
+
+ InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Level of Detail'");
+
+ InstructionDesc[OpImageQuerySize].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Coordinate'");
+
+ InstructionDesc[OpImageQueryLevels].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQuerySamples].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQueryFormat].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpImageQueryOrder].operands.push(OperandId, "'Image'");
+
+ InstructionDesc[OpAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpInBoundsAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpInBoundsAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Element'");
+ InstructionDesc[OpPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Element'");
+ InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'");
+
+ InstructionDesc[OpSNegate].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpFNegate].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpNot].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpAny].operands.push(OperandId, "'Vector'");
+
+ InstructionDesc[OpAll].operands.push(OperandId, "'Vector'");
+
+ InstructionDesc[OpConvertFToU].operands.push(OperandId, "'Float Value'");
+
+ InstructionDesc[OpConvertFToS].operands.push(OperandId, "'Float Value'");
+
+ InstructionDesc[OpConvertSToF].operands.push(OperandId, "'Signed Value'");
+
+ InstructionDesc[OpConvertUToF].operands.push(OperandId, "'Unsigned Value'");
+
+ InstructionDesc[OpUConvert].operands.push(OperandId, "'Unsigned Value'");
+
+ InstructionDesc[OpSConvert].operands.push(OperandId, "'Signed Value'");
+
+ InstructionDesc[OpFConvert].operands.push(OperandId, "'Float Value'");
+
+ InstructionDesc[OpSatConvertSToU].operands.push(OperandId, "'Signed Value'");
+
+ InstructionDesc[OpSatConvertUToS].operands.push(OperandId, "'Unsigned Value'");
+
+ InstructionDesc[OpConvertPtrToU].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpConvertUToPtr].operands.push(OperandId, "'Integer Value'");
+
+ InstructionDesc[OpPtrCastToGeneric].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpGenericCastToPtr].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandStorage, "'Storage'");
+
+ InstructionDesc[OpGenericPtrMemSemantics].operands.push(OperandId, "'Pointer'");
+
+ InstructionDesc[OpBitcast].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpQuantizeToF16].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpTranspose].operands.push(OperandId, "'Matrix'");
+
+ InstructionDesc[OpCopyLogical].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpIsNan].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpIsInf].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpIsFinite].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpIsNormal].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpSignBitSet].operands.push(OperandId, "'x'");
+
+ InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'x'");
+ InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'y'");
+
+ InstructionDesc[OpOrdered].operands.push(OperandId, "'x'");
+ InstructionDesc[OpOrdered].operands.push(OperandId, "'y'");
+
+ InstructionDesc[OpUnordered].operands.push(OperandId, "'x'");
+ InstructionDesc[OpUnordered].operands.push(OperandId, "'y'");
+
+ InstructionDesc[OpArrayLength].operands.push(OperandId, "'Structure'");
+ InstructionDesc[OpArrayLength].operands.push(OperandLiteralNumber, "'Array member'");
+
+ InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpISub].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpISub].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Scalar'");
+
+ InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Matrix'");
+ InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Scalar'");
+
+ InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Vector'");
+ InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Matrix'");
+
+ InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Matrix'");
+ InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Vector'");
+
+ InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'LeftMatrix'");
+ InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'RightMatrix'");
+
+ InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 1'");
+ InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 2'");
+
+ InstructionDesc[OpDot].operands.push(OperandId, "'Vector 1'");
+ InstructionDesc[OpDot].operands.push(OperandId, "'Vector 2'");
+
+ InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Shift'");
+
+ InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Shift'");
+
+ InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Shift'");
+
+ InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpLogicalNot].operands.push(OperandId, "'Operand'");
+
+ InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Insert'");
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Offset'");
+ InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Count'");
+
+ InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Offset'");
+ InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Count'");
+
+ InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Base'");
+ InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Offset'");
+ InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Count'");
+
+ InstructionDesc[OpBitReverse].operands.push(OperandId, "'Base'");
+
+ InstructionDesc[OpBitCount].operands.push(OperandId, "'Base'");
+
+ InstructionDesc[OpSelect].operands.push(OperandId, "'Condition'");
+ InstructionDesc[OpSelect].operands.push(OperandId, "'Object 1'");
+ InstructionDesc[OpSelect].operands.push(OperandId, "'Object 2'");
+
+ InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 1'");
+ InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 2'");
+
+ InstructionDesc[OpDPdx].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdy].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpFwidth].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdxFine].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdyFine].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpFwidthFine].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdxCoarse].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpDPdyCoarse].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpFwidthCoarse].operands.push(OperandId, "'P'");
+
+ InstructionDesc[OpEmitStreamVertex].operands.push(OperandId, "'Stream'");
+
+ InstructionDesc[OpEndStreamPrimitive].operands.push(OperandId, "'Stream'");
+
+ InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Memory'");
+ InstructionDesc[OpControlBarrier].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpMemoryBarrier].operands.push(OperandScope, "'Memory'");
+ InstructionDesc[OpMemoryBarrier].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Sample'");
+
+ InstructionDesc[OpAtomicLoad].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicLoad].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicLoad].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicStore].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicStore].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicExchange].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicExchange].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Equal'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Unequal'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Comparator'");
+
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Equal'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Unequal'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Comparator'");
+
+ InstructionDesc[OpAtomicIIncrement].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicIIncrement].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicIIncrement].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicIDecrement].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicIDecrement].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicIDecrement].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicISub].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicISub].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicUMin].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicUMin].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicUMax].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicUMax].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicSMin].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicSMin].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicSMax].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicSMax].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicAnd].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicAnd].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicOr].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicOr].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicXor].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicXor].operands.push(OperandMemorySemantics, "'Semantics'");
+ InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpAtomicFlagClear].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpAtomicFlagClear].operands.push(OperandScope, "'Scope'");
+ InstructionDesc[OpAtomicFlagClear].operands.push(OperandMemorySemantics, "'Semantics'");
+
+ InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Merge Block'");
+ InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Continue Target'");
+ InstructionDesc[OpLoopMerge].operands.push(OperandLoop, "");
+ InstructionDesc[OpLoopMerge].operands.push(OperandOptionalLiteral, "");
+
+ InstructionDesc[OpSelectionMerge].operands.push(OperandId, "'Merge Block'");
+ InstructionDesc[OpSelectionMerge].operands.push(OperandSelect, "");
+
+ InstructionDesc[OpBranch].operands.push(OperandId, "'Target Label'");
+
+ InstructionDesc[OpBranchConditional].operands.push(OperandId, "'Condition'");
+ InstructionDesc[OpBranchConditional].operands.push(OperandId, "'True Label'");
+ InstructionDesc[OpBranchConditional].operands.push(OperandId, "'False Label'");
+ InstructionDesc[OpBranchConditional].operands.push(OperandVariableLiterals, "'Branch weights'");
+
+ InstructionDesc[OpSwitch].operands.push(OperandId, "'Selector'");
+ InstructionDesc[OpSwitch].operands.push(OperandId, "'Default'");
+ InstructionDesc[OpSwitch].operands.push(OperandVariableLiteralId, "'Target'");
+
+
+ InstructionDesc[OpReturnValue].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpLifetimeStart].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpLifetimeStart].operands.push(OperandLiteralNumber, "'Size'");
+
+ InstructionDesc[OpLifetimeStop].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpLifetimeStop].operands.push(OperandLiteralNumber, "'Size'");
+
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Destination'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Source'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Num Elements'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Stride'");
+ InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpGroupWaitEvents].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Num Events'");
+ InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Events List'");
+
+ InstructionDesc[OpGroupAll].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupAll].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpGroupAny].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupAny].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpGroupBroadcast].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'LocalId'");
+
+ InstructionDesc[OpGroupIAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupIAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupIAdd].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupFAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFAdd].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupUMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMin].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupSMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMin].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMin].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupUMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMax].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupSMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMax].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMax].operands.push(OperandId, "X");
+
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Index'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Index'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpIsValidReserveId].operands.push(OperandId, "'Reserve Id'");
+
+ InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Num Packets'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Pipe'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Reserve Id'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Size'");
+ InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Alignment'");
+
+ InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkSize'");
+ InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'LocalWorkSize'");
+ InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkOffset'");
+
+ InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Event'");
+ InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Profiling Info'");
+ InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Event'");
+ InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Status'");
+
+ InstructionDesc[OpIsValidEvent].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpRetainEvent].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpReleaseEvent].operands.push(OperandId, "'Event'");
+
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'ND Range'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'ND Range'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Align'");
+
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Queue'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Flags'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'ND Range'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Num Events'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Wait Events'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Ret Event'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Invoke'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Size'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Align'");
+ InstructionDesc[OpEnqueueKernel].operands.push(OperandVariableIds, "'Local Size'");
+
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Queue'");
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Num Events'");
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Wait Events'");
+ InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Ret Event'");
+
+ InstructionDesc[OpGroupNonUniformElect].operands.push(OperandScope, "'Execution'");
+
+ InstructionDesc[OpGroupNonUniformAll].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformAll].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformAny].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformAny].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "ID");
+
+ InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "Bit");
+
+ InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "'Id'");
+
+ InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "Mask");
+
+ InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "Offset");
+
+ InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "Offset");
+
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "'ClusterSize'", true);
+
+ InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "'Id'");
+
+ InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandId, "X");
+ InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandLiteralNumber, "'Direction'");
+
+ InstructionDesc[OpSubgroupBallotKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupFirstInvocationKHR].operands.push(OperandId, "'Value'");
+
+ InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupAllKHR].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpSubgroupAllKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandId, "'Predicate'");
+
+ InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Value'");
+ InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Index'");
+
+ InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'");
+
+#ifdef AMD_EXTENSIONS
+ InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandId, "'X'");
+
+ InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandScope, "'Execution'");
+ InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
+ InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandId, "X");
+
+ InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Coordinate'");
+
+ InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'");
+ InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'");
+#endif
+
+#ifdef NV_EXTENSIONS
+ InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X");
+
+ InstructionDesc[OpTypeAccelerationStructureNV].setResultAndType(true, false);
+
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'NV Acceleration Structure'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Flags'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Cull Mask'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Offset'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Stride'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Miss Index'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Origin'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMin'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Direction'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMax'");
+ InstructionDesc[OpTraceNV].operands.push(OperandId, "'Payload'");
+ InstructionDesc[OpTraceNV].setResultAndType(false, false);
+
+ InstructionDesc[OpReportIntersectionNV].operands.push(OperandId, "'Hit Parameter'");
+ InstructionDesc[OpReportIntersectionNV].operands.push(OperandId, "'Hit Kind'");
+
+ InstructionDesc[OpIgnoreIntersectionNV].setResultAndType(false, false);
+
+ InstructionDesc[OpTerminateRayNV].setResultAndType(false, false);
+
+ InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "SBT Record Index");
+ InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "CallableData ID");
+ InstructionDesc[OpExecuteCallableNV].setResultAndType(false, false);
+
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Sampled Image'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coordinate'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Granularity'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coarse'");
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandImageOperands, "", true);
+ InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandVariableIds, "", true);
+
+ InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'");
+ InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'");
+#endif
+
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'");
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'");
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Rows'");
+ InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Columns'");
+
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Stride'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Column Major'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandMemoryAccess, "'Memory Access'");
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Pointer'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Object'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Stride'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Column Major'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandMemoryAccess, "'Memory Access'");
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandLiteralNumber, "", true);
+ InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "", true);
+
+ InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'A'");
+ InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'B'");
+ InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'C'");
+
+ InstructionDesc[OpCooperativeMatrixLengthNV].operands.push(OperandId, "'Type'");
+}
+
+}; // end spv namespace
diff --git a/thirdparty/glslang/SPIRV/doc.h b/thirdparty/glslang/SPIRV/doc.h
new file mode 100644
index 0000000000..293256a2c6
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/doc.h
@@ -0,0 +1,258 @@
+//
+// Copyright (C) 2014-2015 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Parameterize the SPIR-V enumerants.
+//
+
+#pragma once
+
+#include "spirv.hpp"
+
+#include <vector>
+
+namespace spv {
+
+// Fill in all the parameters
+void Parameterize();
+
+// Return the English names of all the enums.
+const char* SourceString(int);
+const char* AddressingString(int);
+const char* MemoryString(int);
+const char* ExecutionModelString(int);
+const char* ExecutionModeString(int);
+const char* StorageClassString(int);
+const char* DecorationString(int);
+const char* BuiltInString(int);
+const char* DimensionString(int);
+const char* SelectControlString(int);
+const char* LoopControlString(int);
+const char* FunctionControlString(int);
+const char* SamplerAddressingModeString(int);
+const char* SamplerFilterModeString(int);
+const char* ImageFormatString(int);
+const char* ImageChannelOrderString(int);
+const char* ImageChannelTypeString(int);
+const char* ImageChannelDataTypeString(int type);
+const char* ImageOperandsString(int format);
+const char* ImageOperands(int);
+const char* FPFastMathString(int);
+const char* FPRoundingModeString(int);
+const char* LinkageTypeString(int);
+const char* FuncParamAttrString(int);
+const char* AccessQualifierString(int);
+const char* MemorySemanticsString(int);
+const char* MemoryAccessString(int);
+const char* ExecutionScopeString(int);
+const char* GroupOperationString(int);
+const char* KernelEnqueueFlagsString(int);
+const char* KernelProfilingInfoString(int);
+const char* CapabilityString(int);
+const char* OpcodeString(int);
+const char* ScopeString(int mem);
+
+// For grouping opcodes into subsections
+enum OpcodeClass {
+ OpClassMisc,
+ OpClassDebug,
+ OpClassAnnotate,
+ OpClassExtension,
+ OpClassMode,
+ OpClassType,
+ OpClassConstant,
+ OpClassMemory,
+ OpClassFunction,
+ OpClassImage,
+ OpClassConvert,
+ OpClassComposite,
+ OpClassArithmetic,
+ OpClassBit,
+ OpClassRelationalLogical,
+ OpClassDerivative,
+ OpClassFlowControl,
+ OpClassAtomic,
+ OpClassPrimitive,
+ OpClassBarrier,
+ OpClassGroup,
+ OpClassDeviceSideEnqueue,
+ OpClassPipe,
+
+ OpClassCount,
+ OpClassMissing // all instructions start out as missing
+};
+
+// For parameterizing operands.
+enum OperandClass {
+ OperandNone,
+ OperandId,
+ OperandVariableIds,
+ OperandOptionalLiteral,
+ OperandOptionalLiteralString,
+ OperandVariableLiterals,
+ OperandVariableIdLiteral,
+ OperandVariableLiteralId,
+ OperandLiteralNumber,
+ OperandLiteralString,
+ OperandSource,
+ OperandExecutionModel,
+ OperandAddressing,
+ OperandMemory,
+ OperandExecutionMode,
+ OperandStorage,
+ OperandDimensionality,
+ OperandSamplerAddressingMode,
+ OperandSamplerFilterMode,
+ OperandSamplerImageFormat,
+ OperandImageChannelOrder,
+ OperandImageChannelDataType,
+ OperandImageOperands,
+ OperandFPFastMath,
+ OperandFPRoundingMode,
+ OperandLinkageType,
+ OperandAccessQualifier,
+ OperandFuncParamAttr,
+ OperandDecoration,
+ OperandBuiltIn,
+ OperandSelect,
+ OperandLoop,
+ OperandFunction,
+ OperandMemorySemantics,
+ OperandMemoryAccess,
+ OperandScope,
+ OperandGroupOperation,
+ OperandKernelEnqueueFlags,
+ OperandKernelProfilingInfo,
+ OperandCapability,
+
+ OperandOpcode,
+
+ OperandCount
+};
+
+// Any specific enum can have a set of capabilities that allow it:
+typedef std::vector<Capability> EnumCaps;
+
+// Parameterize a set of operands with their OperandClass(es) and descriptions.
+class OperandParameters {
+public:
+ OperandParameters() { }
+ void push(OperandClass oc, const char* d, bool opt = false)
+ {
+ opClass.push_back(oc);
+ desc.push_back(d);
+ optional.push_back(opt);
+ }
+ void setOptional();
+ OperandClass getClass(int op) const { return opClass[op]; }
+ const char* getDesc(int op) const { return desc[op]; }
+ bool isOptional(int op) const { return optional[op]; }
+ int getNum() const { return (int)opClass.size(); }
+
+protected:
+ std::vector<OperandClass> opClass;
+ std::vector<const char*> desc;
+ std::vector<bool> optional;
+};
+
+// Parameterize an enumerant
+class EnumParameters {
+public:
+ EnumParameters() : desc(0) { }
+ const char* desc;
+};
+
+// Parameterize a set of enumerants that form an enum
+class EnumDefinition : public EnumParameters {
+public:
+ EnumDefinition() :
+ ceiling(0), bitmask(false), getName(0), enumParams(0), operandParams(0) { }
+ void set(int ceil, const char* (*name)(int), EnumParameters* ep, bool mask = false)
+ {
+ ceiling = ceil;
+ getName = name;
+ bitmask = mask;
+ enumParams = ep;
+ }
+ void setOperands(OperandParameters* op) { operandParams = op; }
+ int ceiling; // ceiling of enumerants
+ bool bitmask; // true if these enumerants combine into a bitmask
+ const char* (*getName)(int); // a function that returns the name for each enumerant value (or shift)
+ EnumParameters* enumParams; // parameters for each individual enumerant
+ OperandParameters* operandParams; // sets of operands
+};
+
+// Parameterize an instruction's logical format, including its known set of operands,
+// per OperandParameters above.
+class InstructionParameters {
+public:
+ InstructionParameters() :
+ opDesc("TBD"),
+ opClass(OpClassMissing),
+ typePresent(true), // most normal, only exceptions have to be spelled out
+ resultPresent(true) // most normal, only exceptions have to be spelled out
+ { }
+
+ void setResultAndType(bool r, bool t)
+ {
+ resultPresent = r;
+ typePresent = t;
+ }
+
+ bool hasResult() const { return resultPresent != 0; }
+ bool hasType() const { return typePresent != 0; }
+
+ const char* opDesc;
+ OpcodeClass opClass;
+ OperandParameters operands;
+
+protected:
+ int typePresent : 1;
+ int resultPresent : 1;
+};
+
+// The set of objects that hold all the instruction/operand
+// parameterization information.
+extern InstructionParameters InstructionDesc[];
+
+// These hold definitions of the enumerants used for operands
+extern EnumDefinition OperandClassParams[];
+
+const char* GetOperandDesc(OperandClass operand);
+void PrintImmediateRow(int imm, const char* name, const EnumParameters* enumParams, bool caps, bool hex = false);
+const char* AccessQualifierString(int attr);
+
+void PrintOperands(const OperandParameters& operands, int reservedOperands);
+
+} // end namespace spv
diff --git a/thirdparty/glslang/SPIRV/hex_float.h b/thirdparty/glslang/SPIRV/hex_float.h
new file mode 100644
index 0000000000..905b21a45a
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/hex_float.h
@@ -0,0 +1,1078 @@
+// Copyright (c) 2015-2016 The Khronos Group Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef LIBSPIRV_UTIL_HEX_FLOAT_H_
+#define LIBSPIRV_UTIL_HEX_FLOAT_H_
+
+#include <cassert>
+#include <cctype>
+#include <cmath>
+#include <cstdint>
+#include <iomanip>
+#include <limits>
+#include <sstream>
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+namespace std {
+bool isnan(double f)
+{
+ return ::_isnan(f) != 0;
+}
+bool isinf(double f)
+{
+ return ::_finite(f) == 0;
+}
+}
+#endif
+
+#include "bitutils.h"
+
+namespace spvutils {
+
+class Float16 {
+ public:
+ Float16(uint16_t v) : val(v) {}
+ Float16() {}
+ static bool isNan(const Float16& val) {
+ return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) != 0);
+ }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(const Float16& val) {
+ return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) == 0);
+ }
+ Float16(const Float16& other) { val = other.val; }
+ uint16_t get_value() const { return val; }
+
+ // Returns the maximum normal value.
+ static Float16 max() { return Float16(0x7bff); }
+ // Returns the lowest normal value.
+ static Float16 lowest() { return Float16(0xfbff); }
+
+ private:
+ uint16_t val;
+};
+
+// To specialize this type, you must override uint_type to define
+// an unsigned integer that can fit your floating point type.
+// You must also add a isNan function that returns true if
+// a value is Nan.
+template <typename T>
+struct FloatProxyTraits {
+ typedef void uint_type;
+};
+
+template <>
+struct FloatProxyTraits<float> {
+ typedef uint32_t uint_type;
+ static bool isNan(float f) { return std::isnan(f); }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(float f) { return std::isinf(f); }
+ // Returns the maximum normal value.
+ static float max() { return std::numeric_limits<float>::max(); }
+ // Returns the lowest normal value.
+ static float lowest() { return std::numeric_limits<float>::lowest(); }
+};
+
+template <>
+struct FloatProxyTraits<double> {
+ typedef uint64_t uint_type;
+ static bool isNan(double f) { return std::isnan(f); }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(double f) { return std::isinf(f); }
+ // Returns the maximum normal value.
+ static double max() { return std::numeric_limits<double>::max(); }
+ // Returns the lowest normal value.
+ static double lowest() { return std::numeric_limits<double>::lowest(); }
+};
+
+template <>
+struct FloatProxyTraits<Float16> {
+ typedef uint16_t uint_type;
+ static bool isNan(Float16 f) { return Float16::isNan(f); }
+ // Returns true if the given value is any kind of infinity.
+ static bool isInfinity(Float16 f) { return Float16::isInfinity(f); }
+ // Returns the maximum normal value.
+ static Float16 max() { return Float16::max(); }
+ // Returns the lowest normal value.
+ static Float16 lowest() { return Float16::lowest(); }
+};
+
+// Since copying a floating point number (especially if it is NaN)
+// does not guarantee that bits are preserved, this class lets us
+// store the type and use it as a float when necessary.
+template <typename T>
+class FloatProxy {
+ public:
+ typedef typename FloatProxyTraits<T>::uint_type uint_type;
+
+ // Since this is to act similar to the normal floats,
+ // do not initialize the data by default.
+ FloatProxy() {}
+
+ // Intentionally non-explicit. This is a proxy type so
+ // implicit conversions allow us to use it more transparently.
+ FloatProxy(T val) { data_ = BitwiseCast<uint_type>(val); }
+
+ // Intentionally non-explicit. This is a proxy type so
+ // implicit conversions allow us to use it more transparently.
+ FloatProxy(uint_type val) { data_ = val; }
+
+ // This is helpful to have and is guaranteed not to stomp bits.
+ FloatProxy<T> operator-() const {
+ return static_cast<uint_type>(data_ ^
+ (uint_type(0x1) << (sizeof(T) * 8 - 1)));
+ }
+
+ // Returns the data as a floating point value.
+ T getAsFloat() const { return BitwiseCast<T>(data_); }
+
+ // Returns the raw data.
+ uint_type data() const { return data_; }
+
+ // Returns true if the value represents any type of NaN.
+ bool isNan() { return FloatProxyTraits<T>::isNan(getAsFloat()); }
+ // Returns true if the value represents any type of infinity.
+ bool isInfinity() { return FloatProxyTraits<T>::isInfinity(getAsFloat()); }
+
+ // Returns the maximum normal value.
+ static FloatProxy<T> max() {
+ return FloatProxy<T>(FloatProxyTraits<T>::max());
+ }
+ // Returns the lowest normal value.
+ static FloatProxy<T> lowest() {
+ return FloatProxy<T>(FloatProxyTraits<T>::lowest());
+ }
+
+ private:
+ uint_type data_;
+};
+
+template <typename T>
+bool operator==(const FloatProxy<T>& first, const FloatProxy<T>& second) {
+ return first.data() == second.data();
+}
+
+// Reads a FloatProxy value as a normal float from a stream.
+template <typename T>
+std::istream& operator>>(std::istream& is, FloatProxy<T>& value) {
+ T float_val;
+ is >> float_val;
+ value = FloatProxy<T>(float_val);
+ return is;
+}
+
+// This is an example traits. It is not meant to be used in practice, but will
+// be the default for any non-specialized type.
+template <typename T>
+struct HexFloatTraits {
+ // Integer type that can store this hex-float.
+ typedef void uint_type;
+ // Signed integer type that can store this hex-float.
+ typedef void int_type;
+ // The numerical type that this HexFloat represents.
+ typedef void underlying_type;
+ // The type needed to construct the underlying type.
+ typedef void native_type;
+ // The number of bits that are actually relevant in the uint_type.
+ // This allows us to deal with, for example, 24-bit values in a 32-bit
+ // integer.
+ static const uint32_t num_used_bits = 0;
+ // Number of bits that represent the exponent.
+ static const uint32_t num_exponent_bits = 0;
+ // Number of bits that represent the fractional part.
+ static const uint32_t num_fraction_bits = 0;
+ // The bias of the exponent. (How much we need to subtract from the stored
+ // value to get the correct value.)
+ static const uint32_t exponent_bias = 0;
+};
+
+// Traits for IEEE float.
+// 1 sign bit, 8 exponent bits, 23 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<float>> {
+ typedef uint32_t uint_type;
+ typedef int32_t int_type;
+ typedef FloatProxy<float> underlying_type;
+ typedef float native_type;
+ static const uint_type num_used_bits = 32;
+ static const uint_type num_exponent_bits = 8;
+ static const uint_type num_fraction_bits = 23;
+ static const uint_type exponent_bias = 127;
+};
+
+// Traits for IEEE double.
+// 1 sign bit, 11 exponent bits, 52 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<double>> {
+ typedef uint64_t uint_type;
+ typedef int64_t int_type;
+ typedef FloatProxy<double> underlying_type;
+ typedef double native_type;
+ static const uint_type num_used_bits = 64;
+ static const uint_type num_exponent_bits = 11;
+ static const uint_type num_fraction_bits = 52;
+ static const uint_type exponent_bias = 1023;
+};
+
+// Traits for IEEE half.
+// 1 sign bit, 5 exponent bits, 10 fractional bits.
+template <>
+struct HexFloatTraits<FloatProxy<Float16>> {
+ typedef uint16_t uint_type;
+ typedef int16_t int_type;
+ typedef uint16_t underlying_type;
+ typedef uint16_t native_type;
+ static const uint_type num_used_bits = 16;
+ static const uint_type num_exponent_bits = 5;
+ static const uint_type num_fraction_bits = 10;
+ static const uint_type exponent_bias = 15;
+};
+
+enum round_direction {
+ kRoundToZero,
+ kRoundToNearestEven,
+ kRoundToPositiveInfinity,
+ kRoundToNegativeInfinity
+};
+
+// Template class that houses a floating pointer number.
+// It exposes a number of constants based on the provided traits to
+// assist in interpreting the bits of the value.
+template <typename T, typename Traits = HexFloatTraits<T>>
+class HexFloat {
+ public:
+ typedef typename Traits::uint_type uint_type;
+ typedef typename Traits::int_type int_type;
+ typedef typename Traits::underlying_type underlying_type;
+ typedef typename Traits::native_type native_type;
+
+ explicit HexFloat(T f) : value_(f) {}
+
+ T value() const { return value_; }
+ void set_value(T f) { value_ = f; }
+
+ // These are all written like this because it is convenient to have
+ // compile-time constants for all of these values.
+
+ // Pass-through values to save typing.
+ static const uint32_t num_used_bits = Traits::num_used_bits;
+ static const uint32_t exponent_bias = Traits::exponent_bias;
+ static const uint32_t num_exponent_bits = Traits::num_exponent_bits;
+ static const uint32_t num_fraction_bits = Traits::num_fraction_bits;
+
+ // Number of bits to shift left to set the highest relevant bit.
+ static const uint32_t top_bit_left_shift = num_used_bits - 1;
+ // How many nibbles (hex characters) the fractional part takes up.
+ static const uint32_t fraction_nibbles = (num_fraction_bits + 3) / 4;
+ // If the fractional part does not fit evenly into a hex character (4-bits)
+ // then we have to left-shift to get rid of leading 0s. This is the amount
+ // we have to shift (might be 0).
+ static const uint32_t num_overflow_bits =
+ fraction_nibbles * 4 - num_fraction_bits;
+
+ // The representation of the fraction, not the actual bits. This
+ // includes the leading bit that is usually implicit.
+ static const uint_type fraction_represent_mask =
+ spvutils::SetBits<uint_type, 0,
+ num_fraction_bits + num_overflow_bits>::get;
+
+ // The topmost bit in the nibble-aligned fraction.
+ static const uint_type fraction_top_bit =
+ uint_type(1) << (num_fraction_bits + num_overflow_bits - 1);
+
+ // The least significant bit in the exponent, which is also the bit
+ // immediately to the left of the significand.
+ static const uint_type first_exponent_bit = uint_type(1)
+ << (num_fraction_bits);
+
+ // The mask for the encoded fraction. It does not include the
+ // implicit bit.
+ static const uint_type fraction_encode_mask =
+ spvutils::SetBits<uint_type, 0, num_fraction_bits>::get;
+
+ // The bit that is used as a sign.
+ static const uint_type sign_mask = uint_type(1) << top_bit_left_shift;
+
+ // The bits that represent the exponent.
+ static const uint_type exponent_mask =
+ spvutils::SetBits<uint_type, num_fraction_bits, num_exponent_bits>::get;
+
+ // How far left the exponent is shifted.
+ static const uint32_t exponent_left_shift = num_fraction_bits;
+
+ // How far from the right edge the fraction is shifted.
+ static const uint32_t fraction_right_shift =
+ static_cast<uint32_t>(sizeof(uint_type) * 8) - num_fraction_bits;
+
+ // The maximum representable unbiased exponent.
+ static const int_type max_exponent =
+ (exponent_mask >> num_fraction_bits) - exponent_bias;
+ // The minimum representable exponent for normalized numbers.
+ static const int_type min_exponent = -static_cast<int_type>(exponent_bias);
+
+ // Returns the bits associated with the value.
+ uint_type getBits() const { return spvutils::BitwiseCast<uint_type>(value_); }
+
+ // Returns the bits associated with the value, without the leading sign bit.
+ uint_type getUnsignedBits() const {
+ return static_cast<uint_type>(spvutils::BitwiseCast<uint_type>(value_) &
+ ~sign_mask);
+ }
+
+ // Returns the bits associated with the exponent, shifted to start at the
+ // lsb of the type.
+ const uint_type getExponentBits() const {
+ return static_cast<uint_type>((getBits() & exponent_mask) >>
+ num_fraction_bits);
+ }
+
+ // Returns the exponent in unbiased form. This is the exponent in the
+ // human-friendly form.
+ const int_type getUnbiasedExponent() const {
+ return static_cast<int_type>(getExponentBits() - exponent_bias);
+ }
+
+ // Returns just the significand bits from the value.
+ const uint_type getSignificandBits() const {
+ return getBits() & fraction_encode_mask;
+ }
+
+ // If the number was normalized, returns the unbiased exponent.
+ // If the number was denormal, normalize the exponent first.
+ const int_type getUnbiasedNormalizedExponent() const {
+ if ((getBits() & ~sign_mask) == 0) { // special case if everything is 0
+ return 0;
+ }
+ int_type exp = getUnbiasedExponent();
+ if (exp == min_exponent) { // We are in denorm land.
+ uint_type significand_bits = getSignificandBits();
+ while ((significand_bits & (first_exponent_bit >> 1)) == 0) {
+ significand_bits = static_cast<uint_type>(significand_bits << 1);
+ exp = static_cast<int_type>(exp - 1);
+ }
+ significand_bits &= fraction_encode_mask;
+ }
+ return exp;
+ }
+
+ // Returns the signficand after it has been normalized.
+ const uint_type getNormalizedSignificand() const {
+ int_type unbiased_exponent = getUnbiasedNormalizedExponent();
+ uint_type significand = getSignificandBits();
+ for (int_type i = unbiased_exponent; i <= min_exponent; ++i) {
+ significand = static_cast<uint_type>(significand << 1);
+ }
+ significand &= fraction_encode_mask;
+ return significand;
+ }
+
+ // Returns true if this number represents a negative value.
+ bool isNegative() const { return (getBits() & sign_mask) != 0; }
+
+ // Sets this HexFloat from the individual components.
+ // Note this assumes EVERY significand is normalized, and has an implicit
+ // leading one. This means that the only way that this method will set 0,
+ // is if you set a number so denormalized that it underflows.
+ // Do not use this method with raw bits extracted from a subnormal number,
+ // since subnormals do not have an implicit leading 1 in the significand.
+ // The significand is also expected to be in the
+ // lowest-most num_fraction_bits of the uint_type.
+ // The exponent is expected to be unbiased, meaning an exponent of
+ // 0 actually means 0.
+ // If underflow_round_up is set, then on underflow, if a number is non-0
+ // and would underflow, we round up to the smallest denorm.
+ void setFromSignUnbiasedExponentAndNormalizedSignificand(
+ bool negative, int_type exponent, uint_type significand,
+ bool round_denorm_up) {
+ bool significand_is_zero = significand == 0;
+
+ if (exponent <= min_exponent) {
+ // If this was denormalized, then we have to shift the bit on, meaning
+ // the significand is not zero.
+ significand_is_zero = false;
+ significand |= first_exponent_bit;
+ significand = static_cast<uint_type>(significand >> 1);
+ }
+
+ while (exponent < min_exponent) {
+ significand = static_cast<uint_type>(significand >> 1);
+ ++exponent;
+ }
+
+ if (exponent == min_exponent) {
+ if (significand == 0 && !significand_is_zero && round_denorm_up) {
+ significand = static_cast<uint_type>(0x1);
+ }
+ }
+
+ uint_type new_value = 0;
+ if (negative) {
+ new_value = static_cast<uint_type>(new_value | sign_mask);
+ }
+ exponent = static_cast<int_type>(exponent + exponent_bias);
+ assert(exponent >= 0);
+
+ // put it all together
+ exponent = static_cast<uint_type>((exponent << exponent_left_shift) &
+ exponent_mask);
+ significand = static_cast<uint_type>(significand & fraction_encode_mask);
+ new_value = static_cast<uint_type>(new_value | (exponent | significand));
+ value_ = BitwiseCast<T>(new_value);
+ }
+
+ // Increments the significand of this number by the given amount.
+ // If this would spill the significand into the implicit bit,
+ // carry is set to true and the significand is shifted to fit into
+ // the correct location, otherwise carry is set to false.
+ // All significands and to_increment are assumed to be within the bounds
+ // for a valid significand.
+ static uint_type incrementSignificand(uint_type significand,
+ uint_type to_increment, bool* carry) {
+ significand = static_cast<uint_type>(significand + to_increment);
+ *carry = false;
+ if (significand & first_exponent_bit) {
+ *carry = true;
+ // The implicit 1-bit will have carried, so we should zero-out the
+ // top bit and shift back.
+ significand = static_cast<uint_type>(significand & ~first_exponent_bit);
+ significand = static_cast<uint_type>(significand >> 1);
+ }
+ return significand;
+ }
+
+ // These exist because MSVC throws warnings on negative right-shifts
+ // even if they are not going to be executed. Eg:
+ // constant_number < 0? 0: constant_number
+ // These convert the negative left-shifts into right shifts.
+
+ template <typename int_type>
+ uint_type negatable_left_shift(int_type N, uint_type val)
+ {
+ if(N >= 0)
+ return val << N;
+
+ return val >> -N;
+ }
+
+ template <typename int_type>
+ uint_type negatable_right_shift(int_type N, uint_type val)
+ {
+ if(N >= 0)
+ return val >> N;
+
+ return val << -N;
+ }
+
+ // Returns the significand, rounded to fit in a significand in
+ // other_T. This is shifted so that the most significant
+ // bit of the rounded number lines up with the most significant bit
+ // of the returned significand.
+ template <typename other_T>
+ typename other_T::uint_type getRoundedNormalizedSignificand(
+ round_direction dir, bool* carry_bit) {
+ typedef typename other_T::uint_type other_uint_type;
+ static const int_type num_throwaway_bits =
+ static_cast<int_type>(num_fraction_bits) -
+ static_cast<int_type>(other_T::num_fraction_bits);
+
+ static const uint_type last_significant_bit =
+ (num_throwaway_bits < 0)
+ ? 0
+ : negatable_left_shift(num_throwaway_bits, 1u);
+ static const uint_type first_rounded_bit =
+ (num_throwaway_bits < 1)
+ ? 0
+ : negatable_left_shift(num_throwaway_bits - 1, 1u);
+
+ static const uint_type throwaway_mask_bits =
+ num_throwaway_bits > 0 ? num_throwaway_bits : 0;
+ static const uint_type throwaway_mask =
+ spvutils::SetBits<uint_type, 0, throwaway_mask_bits>::get;
+
+ *carry_bit = false;
+ other_uint_type out_val = 0;
+ uint_type significand = getNormalizedSignificand();
+ // If we are up-casting, then we just have to shift to the right location.
+ if (num_throwaway_bits <= 0) {
+ out_val = static_cast<other_uint_type>(significand);
+ uint_type shift_amount = static_cast<uint_type>(-num_throwaway_bits);
+ out_val = static_cast<other_uint_type>(out_val << shift_amount);
+ return out_val;
+ }
+
+ // If every non-representable bit is 0, then we don't have any casting to
+ // do.
+ if ((significand & throwaway_mask) == 0) {
+ return static_cast<other_uint_type>(
+ negatable_right_shift(num_throwaway_bits, significand));
+ }
+
+ bool round_away_from_zero = false;
+ // We actually have to narrow the significand here, so we have to follow the
+ // rounding rules.
+ switch (dir) {
+ case kRoundToZero:
+ break;
+ case kRoundToPositiveInfinity:
+ round_away_from_zero = !isNegative();
+ break;
+ case kRoundToNegativeInfinity:
+ round_away_from_zero = isNegative();
+ break;
+ case kRoundToNearestEven:
+ // Have to round down, round bit is 0
+ if ((first_rounded_bit & significand) == 0) {
+ break;
+ }
+ if (((significand & throwaway_mask) & ~first_rounded_bit) != 0) {
+ // If any subsequent bit of the rounded portion is non-0 then we round
+ // up.
+ round_away_from_zero = true;
+ break;
+ }
+ // We are exactly half-way between 2 numbers, pick even.
+ if ((significand & last_significant_bit) != 0) {
+ // 1 for our last bit, round up.
+ round_away_from_zero = true;
+ break;
+ }
+ break;
+ }
+
+ if (round_away_from_zero) {
+ return static_cast<other_uint_type>(
+ negatable_right_shift(num_throwaway_bits, incrementSignificand(
+ significand, last_significant_bit, carry_bit)));
+ } else {
+ return static_cast<other_uint_type>(
+ negatable_right_shift(num_throwaway_bits, significand));
+ }
+ }
+
+ // Casts this value to another HexFloat. If the cast is widening,
+ // then round_dir is ignored. If the cast is narrowing, then
+ // the result is rounded in the direction specified.
+ // This number will retain Nan and Inf values.
+ // It will also saturate to Inf if the number overflows, and
+ // underflow to (0 or min depending on rounding) if the number underflows.
+ template <typename other_T>
+ void castTo(other_T& other, round_direction round_dir) {
+ other = other_T(static_cast<typename other_T::native_type>(0));
+ bool negate = isNegative();
+ if (getUnsignedBits() == 0) {
+ if (negate) {
+ other.set_value(-other.value());
+ }
+ return;
+ }
+ uint_type significand = getSignificandBits();
+ bool carried = false;
+ typename other_T::uint_type rounded_significand =
+ getRoundedNormalizedSignificand<other_T>(round_dir, &carried);
+
+ int_type exponent = getUnbiasedExponent();
+ if (exponent == min_exponent) {
+ // If we are denormal, normalize the exponent, so that we can encode
+ // easily.
+ exponent = static_cast<int_type>(exponent + 1);
+ for (uint_type check_bit = first_exponent_bit >> 1; check_bit != 0;
+ check_bit = static_cast<uint_type>(check_bit >> 1)) {
+ exponent = static_cast<int_type>(exponent - 1);
+ if (check_bit & significand) break;
+ }
+ }
+
+ bool is_nan =
+ (getBits() & exponent_mask) == exponent_mask && significand != 0;
+ bool is_inf =
+ !is_nan &&
+ ((exponent + carried) > static_cast<int_type>(other_T::exponent_bias) ||
+ (significand == 0 && (getBits() & exponent_mask) == exponent_mask));
+
+ // If we are Nan or Inf we should pass that through.
+ if (is_inf) {
+ other.set_value(BitwiseCast<typename other_T::underlying_type>(
+ static_cast<typename other_T::uint_type>(
+ (negate ? other_T::sign_mask : 0) | other_T::exponent_mask)));
+ return;
+ }
+ if (is_nan) {
+ typename other_T::uint_type shifted_significand;
+ shifted_significand = static_cast<typename other_T::uint_type>(
+ negatable_left_shift(
+ static_cast<int_type>(other_T::num_fraction_bits) -
+ static_cast<int_type>(num_fraction_bits), significand));
+
+ // We are some sort of Nan. We try to keep the bit-pattern of the Nan
+ // as close as possible. If we had to shift off bits so we are 0, then we
+ // just set the last bit.
+ other.set_value(BitwiseCast<typename other_T::underlying_type>(
+ static_cast<typename other_T::uint_type>(
+ (negate ? other_T::sign_mask : 0) | other_T::exponent_mask |
+ (shifted_significand == 0 ? 0x1 : shifted_significand))));
+ return;
+ }
+
+ bool round_underflow_up =
+ isNegative() ? round_dir == kRoundToNegativeInfinity
+ : round_dir == kRoundToPositiveInfinity;
+ typedef typename other_T::int_type other_int_type;
+ // setFromSignUnbiasedExponentAndNormalizedSignificand will
+ // zero out any underflowing value (but retain the sign).
+ other.setFromSignUnbiasedExponentAndNormalizedSignificand(
+ negate, static_cast<other_int_type>(exponent), rounded_significand,
+ round_underflow_up);
+ return;
+ }
+
+ private:
+ T value_;
+
+ static_assert(num_used_bits ==
+ Traits::num_exponent_bits + Traits::num_fraction_bits + 1,
+ "The number of bits do not fit");
+ static_assert(sizeof(T) == sizeof(uint_type), "The type sizes do not match");
+};
+
+// Returns 4 bits represented by the hex character.
+inline uint8_t get_nibble_from_character(int character) {
+ const char* dec = "0123456789";
+ const char* lower = "abcdef";
+ const char* upper = "ABCDEF";
+ const char* p = nullptr;
+ if ((p = strchr(dec, character))) {
+ return static_cast<uint8_t>(p - dec);
+ } else if ((p = strchr(lower, character))) {
+ return static_cast<uint8_t>(p - lower + 0xa);
+ } else if ((p = strchr(upper, character))) {
+ return static_cast<uint8_t>(p - upper + 0xa);
+ }
+
+ assert(false && "This was called with a non-hex character");
+ return 0;
+}
+
+// Outputs the given HexFloat to the stream.
+template <typename T, typename Traits>
+std::ostream& operator<<(std::ostream& os, const HexFloat<T, Traits>& value) {
+ typedef HexFloat<T, Traits> HF;
+ typedef typename HF::uint_type uint_type;
+ typedef typename HF::int_type int_type;
+
+ static_assert(HF::num_used_bits != 0,
+ "num_used_bits must be non-zero for a valid float");
+ static_assert(HF::num_exponent_bits != 0,
+ "num_exponent_bits must be non-zero for a valid float");
+ static_assert(HF::num_fraction_bits != 0,
+ "num_fractin_bits must be non-zero for a valid float");
+
+ const uint_type bits = spvutils::BitwiseCast<uint_type>(value.value());
+ const char* const sign = (bits & HF::sign_mask) ? "-" : "";
+ const uint_type exponent = static_cast<uint_type>(
+ (bits & HF::exponent_mask) >> HF::num_fraction_bits);
+
+ uint_type fraction = static_cast<uint_type>((bits & HF::fraction_encode_mask)
+ << HF::num_overflow_bits);
+
+ const bool is_zero = exponent == 0 && fraction == 0;
+ const bool is_denorm = exponent == 0 && !is_zero;
+
+ // exponent contains the biased exponent we have to convert it back into
+ // the normal range.
+ int_type int_exponent = static_cast<int_type>(exponent - HF::exponent_bias);
+ // If the number is all zeros, then we actually have to NOT shift the
+ // exponent.
+ int_exponent = is_zero ? 0 : int_exponent;
+
+ // If we are denorm, then start shifting, and decreasing the exponent until
+ // our leading bit is 1.
+
+ if (is_denorm) {
+ while ((fraction & HF::fraction_top_bit) == 0) {
+ fraction = static_cast<uint_type>(fraction << 1);
+ int_exponent = static_cast<int_type>(int_exponent - 1);
+ }
+ // Since this is denormalized, we have to consume the leading 1 since it
+ // will end up being implicit.
+ fraction = static_cast<uint_type>(fraction << 1); // eat the leading 1
+ fraction &= HF::fraction_represent_mask;
+ }
+
+ uint_type fraction_nibbles = HF::fraction_nibbles;
+ // We do not have to display any trailing 0s, since this represents the
+ // fractional part.
+ while (fraction_nibbles > 0 && (fraction & 0xF) == 0) {
+ // Shift off any trailing values;
+ fraction = static_cast<uint_type>(fraction >> 4);
+ --fraction_nibbles;
+ }
+
+ const auto saved_flags = os.flags();
+ const auto saved_fill = os.fill();
+
+ os << sign << "0x" << (is_zero ? '0' : '1');
+ if (fraction_nibbles) {
+ // Make sure to keep the leading 0s in place, since this is the fractional
+ // part.
+ os << "." << std::setw(static_cast<int>(fraction_nibbles))
+ << std::setfill('0') << std::hex << fraction;
+ }
+ os << "p" << std::dec << (int_exponent >= 0 ? "+" : "") << int_exponent;
+
+ os.flags(saved_flags);
+ os.fill(saved_fill);
+
+ return os;
+}
+
+// Returns true if negate_value is true and the next character on the
+// input stream is a plus or minus sign. In that case we also set the fail bit
+// on the stream and set the value to the zero value for its type.
+template <typename T, typename Traits>
+inline bool RejectParseDueToLeadingSign(std::istream& is, bool negate_value,
+ HexFloat<T, Traits>& value) {
+ if (negate_value) {
+ auto next_char = is.peek();
+ if (next_char == '-' || next_char == '+') {
+ // Fail the parse. Emulate standard behaviour by setting the value to
+ // the zero value, and set the fail bit on the stream.
+ value = HexFloat<T, Traits>(typename HexFloat<T, Traits>::uint_type(0));
+ is.setstate(std::ios_base::failbit);
+ return true;
+ }
+ }
+ return false;
+}
+
+// Parses a floating point number from the given stream and stores it into the
+// value parameter.
+// If negate_value is true then the number may not have a leading minus or
+// plus, and if it successfully parses, then the number is negated before
+// being stored into the value parameter.
+// If the value cannot be correctly parsed or overflows the target floating
+// point type, then set the fail bit on the stream.
+// TODO(dneto): Promise C++11 standard behavior in how the value is set in
+// the error case, but only after all target platforms implement it correctly.
+// In particular, the Microsoft C++ runtime appears to be out of spec.
+template <typename T, typename Traits>
+inline std::istream& ParseNormalFloat(std::istream& is, bool negate_value,
+ HexFloat<T, Traits>& value) {
+ if (RejectParseDueToLeadingSign(is, negate_value, value)) {
+ return is;
+ }
+ T val;
+ is >> val;
+ if (negate_value) {
+ val = -val;
+ }
+ value.set_value(val);
+ // In the failure case, map -0.0 to 0.0.
+ if (is.fail() && value.getUnsignedBits() == 0u) {
+ value = HexFloat<T, Traits>(typename HexFloat<T, Traits>::uint_type(0));
+ }
+ if (val.isInfinity()) {
+ // Fail the parse. Emulate standard behaviour by setting the value to
+ // the closest normal value, and set the fail bit on the stream.
+ value.set_value((value.isNegative() | negate_value) ? T::lowest()
+ : T::max());
+ is.setstate(std::ios_base::failbit);
+ }
+ return is;
+}
+
+// Specialization of ParseNormalFloat for FloatProxy<Float16> values.
+// This will parse the float as it were a 32-bit floating point number,
+// and then round it down to fit into a Float16 value.
+// The number is rounded towards zero.
+// If negate_value is true then the number may not have a leading minus or
+// plus, and if it successfully parses, then the number is negated before
+// being stored into the value parameter.
+// If the value cannot be correctly parsed or overflows the target floating
+// point type, then set the fail bit on the stream.
+// TODO(dneto): Promise C++11 standard behavior in how the value is set in
+// the error case, but only after all target platforms implement it correctly.
+// In particular, the Microsoft C++ runtime appears to be out of spec.
+template <>
+inline std::istream&
+ParseNormalFloat<FloatProxy<Float16>, HexFloatTraits<FloatProxy<Float16>>>(
+ std::istream& is, bool negate_value,
+ HexFloat<FloatProxy<Float16>, HexFloatTraits<FloatProxy<Float16>>>& value) {
+ // First parse as a 32-bit float.
+ HexFloat<FloatProxy<float>> float_val(0.0f);
+ ParseNormalFloat(is, negate_value, float_val);
+
+ // Then convert to 16-bit float, saturating at infinities, and
+ // rounding toward zero.
+ float_val.castTo(value, kRoundToZero);
+
+ // Overflow on 16-bit behaves the same as for 32- and 64-bit: set the
+ // fail bit and set the lowest or highest value.
+ if (Float16::isInfinity(value.value().getAsFloat())) {
+ value.set_value(value.isNegative() ? Float16::lowest() : Float16::max());
+ is.setstate(std::ios_base::failbit);
+ }
+ return is;
+}
+
+// Reads a HexFloat from the given stream.
+// If the float is not encoded as a hex-float then it will be parsed
+// as a regular float.
+// This may fail if your stream does not support at least one unget.
+// Nan values can be encoded with "0x1.<not zero>p+exponent_bias".
+// This would normally overflow a float and round to
+// infinity but this special pattern is the exact representation for a NaN,
+// and therefore is actually encoded as the correct NaN. To encode inf,
+// either 0x0p+exponent_bias can be specified or any exponent greater than
+// exponent_bias.
+// Examples using IEEE 32-bit float encoding.
+// 0x1.0p+128 (+inf)
+// -0x1.0p-128 (-inf)
+//
+// 0x1.1p+128 (+Nan)
+// -0x1.1p+128 (-Nan)
+//
+// 0x1p+129 (+inf)
+// -0x1p+129 (-inf)
+template <typename T, typename Traits>
+std::istream& operator>>(std::istream& is, HexFloat<T, Traits>& value) {
+ using HF = HexFloat<T, Traits>;
+ using uint_type = typename HF::uint_type;
+ using int_type = typename HF::int_type;
+
+ value.set_value(static_cast<typename HF::native_type>(0.f));
+
+ if (is.flags() & std::ios::skipws) {
+ // If the user wants to skip whitespace , then we should obey that.
+ while (std::isspace(is.peek())) {
+ is.get();
+ }
+ }
+
+ auto next_char = is.peek();
+ bool negate_value = false;
+
+ if (next_char != '-' && next_char != '0') {
+ return ParseNormalFloat(is, negate_value, value);
+ }
+
+ if (next_char == '-') {
+ negate_value = true;
+ is.get();
+ next_char = is.peek();
+ }
+
+ if (next_char == '0') {
+ is.get(); // We may have to unget this.
+ auto maybe_hex_start = is.peek();
+ if (maybe_hex_start != 'x' && maybe_hex_start != 'X') {
+ is.unget();
+ return ParseNormalFloat(is, negate_value, value);
+ } else {
+ is.get(); // Throw away the 'x';
+ }
+ } else {
+ return ParseNormalFloat(is, negate_value, value);
+ }
+
+ // This "looks" like a hex-float so treat it as one.
+ bool seen_p = false;
+ bool seen_dot = false;
+ uint_type fraction_index = 0;
+
+ uint_type fraction = 0;
+ int_type exponent = HF::exponent_bias;
+
+ // Strip off leading zeros so we don't have to special-case them later.
+ while ((next_char = is.peek()) == '0') {
+ is.get();
+ }
+
+ bool is_denorm =
+ true; // Assume denorm "representation" until we hear otherwise.
+ // NB: This does not mean the value is actually denorm,
+ // it just means that it was written 0.
+ bool bits_written = false; // Stays false until we write a bit.
+ while (!seen_p && !seen_dot) {
+ // Handle characters that are left of the fractional part.
+ if (next_char == '.') {
+ seen_dot = true;
+ } else if (next_char == 'p') {
+ seen_p = true;
+ } else if (::isxdigit(next_char)) {
+ // We know this is not denormalized since we have stripped all leading
+ // zeroes and we are not a ".".
+ is_denorm = false;
+ int number = get_nibble_from_character(next_char);
+ for (int i = 0; i < 4; ++i, number <<= 1) {
+ uint_type write_bit = (number & 0x8) ? 0x1 : 0x0;
+ if (bits_written) {
+ // If we are here the bits represented belong in the fractional
+ // part of the float, and we have to adjust the exponent accordingly.
+ fraction = static_cast<uint_type>(
+ fraction |
+ static_cast<uint_type>(
+ write_bit << (HF::top_bit_left_shift - fraction_index++)));
+ exponent = static_cast<int_type>(exponent + 1);
+ }
+ bits_written |= write_bit != 0;
+ }
+ } else {
+ // We have not found our exponent yet, so we have to fail.
+ is.setstate(std::ios::failbit);
+ return is;
+ }
+ is.get();
+ next_char = is.peek();
+ }
+ bits_written = false;
+ while (seen_dot && !seen_p) {
+ // Handle only fractional parts now.
+ if (next_char == 'p') {
+ seen_p = true;
+ } else if (::isxdigit(next_char)) {
+ int number = get_nibble_from_character(next_char);
+ for (int i = 0; i < 4; ++i, number <<= 1) {
+ uint_type write_bit = (number & 0x8) ? 0x01 : 0x00;
+ bits_written |= write_bit != 0;
+ if (is_denorm && !bits_written) {
+ // Handle modifying the exponent here this way we can handle
+ // an arbitrary number of hex values without overflowing our
+ // integer.
+ exponent = static_cast<int_type>(exponent - 1);
+ } else {
+ fraction = static_cast<uint_type>(
+ fraction |
+ static_cast<uint_type>(
+ write_bit << (HF::top_bit_left_shift - fraction_index++)));
+ }
+ }
+ } else {
+ // We still have not found our 'p' exponent yet, so this is not a valid
+ // hex-float.
+ is.setstate(std::ios::failbit);
+ return is;
+ }
+ is.get();
+ next_char = is.peek();
+ }
+
+ bool seen_sign = false;
+ int8_t exponent_sign = 1;
+ int_type written_exponent = 0;
+ while (true) {
+ if ((next_char == '-' || next_char == '+')) {
+ if (seen_sign) {
+ is.setstate(std::ios::failbit);
+ return is;
+ }
+ seen_sign = true;
+ exponent_sign = (next_char == '-') ? -1 : 1;
+ } else if (::isdigit(next_char)) {
+ // Hex-floats express their exponent as decimal.
+ written_exponent = static_cast<int_type>(written_exponent * 10);
+ written_exponent =
+ static_cast<int_type>(written_exponent + (next_char - '0'));
+ } else {
+ break;
+ }
+ is.get();
+ next_char = is.peek();
+ }
+
+ written_exponent = static_cast<int_type>(written_exponent * exponent_sign);
+ exponent = static_cast<int_type>(exponent + written_exponent);
+
+ bool is_zero = is_denorm && (fraction == 0);
+ if (is_denorm && !is_zero) {
+ fraction = static_cast<uint_type>(fraction << 1);
+ exponent = static_cast<int_type>(exponent - 1);
+ } else if (is_zero) {
+ exponent = 0;
+ }
+
+ if (exponent <= 0 && !is_zero) {
+ fraction = static_cast<uint_type>(fraction >> 1);
+ fraction |= static_cast<uint_type>(1) << HF::top_bit_left_shift;
+ }
+
+ fraction = (fraction >> HF::fraction_right_shift) & HF::fraction_encode_mask;
+
+ const int_type max_exponent =
+ SetBits<uint_type, 0, HF::num_exponent_bits>::get;
+
+ // Handle actual denorm numbers
+ while (exponent < 0 && !is_zero) {
+ fraction = static_cast<uint_type>(fraction >> 1);
+ exponent = static_cast<int_type>(exponent + 1);
+
+ fraction &= HF::fraction_encode_mask;
+ if (fraction == 0) {
+ // We have underflowed our fraction. We should clamp to zero.
+ is_zero = true;
+ exponent = 0;
+ }
+ }
+
+ // We have overflowed so we should be inf/-inf.
+ if (exponent > max_exponent) {
+ exponent = max_exponent;
+ fraction = 0;
+ }
+
+ uint_type output_bits = static_cast<uint_type>(
+ static_cast<uint_type>(negate_value ? 1 : 0) << HF::top_bit_left_shift);
+ output_bits |= fraction;
+
+ uint_type shifted_exponent = static_cast<uint_type>(
+ static_cast<uint_type>(exponent << HF::exponent_left_shift) &
+ HF::exponent_mask);
+ output_bits |= shifted_exponent;
+
+ T output_float = spvutils::BitwiseCast<T>(output_bits);
+ value.set_value(output_float);
+
+ return is;
+}
+
+// Writes a FloatProxy value to a stream.
+// Zero and normal numbers are printed in the usual notation, but with
+// enough digits to fully reproduce the value. Other values (subnormal,
+// NaN, and infinity) are printed as a hex float.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const FloatProxy<T>& value) {
+ auto float_val = value.getAsFloat();
+ switch (std::fpclassify(float_val)) {
+ case FP_ZERO:
+ case FP_NORMAL: {
+ auto saved_precision = os.precision();
+ os.precision(std::numeric_limits<T>::digits10);
+ os << float_val;
+ os.precision(saved_precision);
+ } break;
+ default:
+ os << HexFloat<FloatProxy<T>>(value);
+ break;
+ }
+ return os;
+}
+
+template <>
+inline std::ostream& operator<<<Float16>(std::ostream& os,
+ const FloatProxy<Float16>& value) {
+ os << HexFloat<FloatProxy<Float16>>(value);
+ return os;
+}
+}
+
+#endif // LIBSPIRV_UTIL_HEX_FLOAT_H_
diff --git a/thirdparty/glslang/SPIRV/spirv.hpp b/thirdparty/glslang/SPIRV/spirv.hpp
new file mode 100644
index 0000000000..5297fd3902
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/spirv.hpp
@@ -0,0 +1,1881 @@
+// Copyright (c) 2014-2019 The Khronos Group Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and/or associated documentation files (the "Materials"),
+// to deal in the Materials without restriction, including without limitation
+// the rights to use, copy, modify, merge, publish, distribute, sublicense,
+// and/or sell copies of the Materials, and to permit persons to whom the
+// Materials are furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Materials.
+//
+// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+//
+// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+// IN THE MATERIALS.
+
+// This header is automatically generated by the same tool that creates
+// the Binary Section of the SPIR-V specification.
+
+// Enumeration tokens for SPIR-V, in various styles:
+// C, C++, C++11, JSON, Lua, Python, C#, D
+//
+// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+// - C# will use enum classes in the Specification class located in the "Spv" namespace,
+// e.g.: Spv.Specification.SourceLanguage.GLSL
+// - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL
+//
+// Some tokens act like mask values, which can be OR'd together,
+// while others are mutually exclusive. The mask-like ones have
+// "Mask" in their name, and a parallel enum that has the shift
+// amount (1 << x) for each corresponding enumerant.
+
+#ifndef spirv_HPP
+#define spirv_HPP
+
+namespace spv {
+
+typedef unsigned int Id;
+
+#define SPV_VERSION 0x10400
+#define SPV_REVISION 1
+
+static const unsigned int MagicNumber = 0x07230203;
+static const unsigned int Version = 0x00010400;
+static const unsigned int Revision = 1;
+static const unsigned int OpCodeMask = 0xffff;
+static const unsigned int WordCountShift = 16;
+
+enum SourceLanguage {
+ SourceLanguageUnknown = 0,
+ SourceLanguageESSL = 1,
+ SourceLanguageGLSL = 2,
+ SourceLanguageOpenCL_C = 3,
+ SourceLanguageOpenCL_CPP = 4,
+ SourceLanguageHLSL = 5,
+ SourceLanguageMax = 0x7fffffff,
+};
+
+enum ExecutionModel {
+ ExecutionModelVertex = 0,
+ ExecutionModelTessellationControl = 1,
+ ExecutionModelTessellationEvaluation = 2,
+ ExecutionModelGeometry = 3,
+ ExecutionModelFragment = 4,
+ ExecutionModelGLCompute = 5,
+ ExecutionModelKernel = 6,
+ ExecutionModelTaskNV = 5267,
+ ExecutionModelMeshNV = 5268,
+ ExecutionModelRayGenerationNV = 5313,
+ ExecutionModelIntersectionNV = 5314,
+ ExecutionModelAnyHitNV = 5315,
+ ExecutionModelClosestHitNV = 5316,
+ ExecutionModelMissNV = 5317,
+ ExecutionModelCallableNV = 5318,
+ ExecutionModelMax = 0x7fffffff,
+};
+
+enum AddressingModel {
+ AddressingModelLogical = 0,
+ AddressingModelPhysical32 = 1,
+ AddressingModelPhysical64 = 2,
+ AddressingModelPhysicalStorageBuffer64EXT = 5348,
+ AddressingModelMax = 0x7fffffff,
+};
+
+enum MemoryModel {
+ MemoryModelSimple = 0,
+ MemoryModelGLSL450 = 1,
+ MemoryModelOpenCL = 2,
+ MemoryModelVulkanKHR = 3,
+ MemoryModelMax = 0x7fffffff,
+};
+
+enum ExecutionMode {
+ ExecutionModeInvocations = 0,
+ ExecutionModeSpacingEqual = 1,
+ ExecutionModeSpacingFractionalEven = 2,
+ ExecutionModeSpacingFractionalOdd = 3,
+ ExecutionModeVertexOrderCw = 4,
+ ExecutionModeVertexOrderCcw = 5,
+ ExecutionModePixelCenterInteger = 6,
+ ExecutionModeOriginUpperLeft = 7,
+ ExecutionModeOriginLowerLeft = 8,
+ ExecutionModeEarlyFragmentTests = 9,
+ ExecutionModePointMode = 10,
+ ExecutionModeXfb = 11,
+ ExecutionModeDepthReplacing = 12,
+ ExecutionModeDepthGreater = 14,
+ ExecutionModeDepthLess = 15,
+ ExecutionModeDepthUnchanged = 16,
+ ExecutionModeLocalSize = 17,
+ ExecutionModeLocalSizeHint = 18,
+ ExecutionModeInputPoints = 19,
+ ExecutionModeInputLines = 20,
+ ExecutionModeInputLinesAdjacency = 21,
+ ExecutionModeTriangles = 22,
+ ExecutionModeInputTrianglesAdjacency = 23,
+ ExecutionModeQuads = 24,
+ ExecutionModeIsolines = 25,
+ ExecutionModeOutputVertices = 26,
+ ExecutionModeOutputPoints = 27,
+ ExecutionModeOutputLineStrip = 28,
+ ExecutionModeOutputTriangleStrip = 29,
+ ExecutionModeVecTypeHint = 30,
+ ExecutionModeContractionOff = 31,
+ ExecutionModeInitializer = 33,
+ ExecutionModeFinalizer = 34,
+ ExecutionModeSubgroupSize = 35,
+ ExecutionModeSubgroupsPerWorkgroup = 36,
+ ExecutionModeSubgroupsPerWorkgroupId = 37,
+ ExecutionModeLocalSizeId = 38,
+ ExecutionModeLocalSizeHintId = 39,
+ ExecutionModePostDepthCoverage = 4446,
+ ExecutionModeDenormPreserve = 4459,
+ ExecutionModeDenormFlushToZero = 4460,
+ ExecutionModeSignedZeroInfNanPreserve = 4461,
+ ExecutionModeRoundingModeRTE = 4462,
+ ExecutionModeRoundingModeRTZ = 4463,
+ ExecutionModeStencilRefReplacingEXT = 5027,
+ ExecutionModeOutputLinesNV = 5269,
+ ExecutionModeOutputPrimitivesNV = 5270,
+ ExecutionModeDerivativeGroupQuadsNV = 5289,
+ ExecutionModeDerivativeGroupLinearNV = 5290,
+ ExecutionModeOutputTrianglesNV = 5298,
+ ExecutionModeMax = 0x7fffffff,
+};
+
+enum StorageClass {
+ StorageClassUniformConstant = 0,
+ StorageClassInput = 1,
+ StorageClassUniform = 2,
+ StorageClassOutput = 3,
+ StorageClassWorkgroup = 4,
+ StorageClassCrossWorkgroup = 5,
+ StorageClassPrivate = 6,
+ StorageClassFunction = 7,
+ StorageClassGeneric = 8,
+ StorageClassPushConstant = 9,
+ StorageClassAtomicCounter = 10,
+ StorageClassImage = 11,
+ StorageClassStorageBuffer = 12,
+ StorageClassCallableDataNV = 5328,
+ StorageClassIncomingCallableDataNV = 5329,
+ StorageClassRayPayloadNV = 5338,
+ StorageClassHitAttributeNV = 5339,
+ StorageClassIncomingRayPayloadNV = 5342,
+ StorageClassShaderRecordBufferNV = 5343,
+ StorageClassPhysicalStorageBufferEXT = 5349,
+ StorageClassMax = 0x7fffffff,
+};
+
+enum Dim {
+ Dim1D = 0,
+ Dim2D = 1,
+ Dim3D = 2,
+ DimCube = 3,
+ DimRect = 4,
+ DimBuffer = 5,
+ DimSubpassData = 6,
+ DimMax = 0x7fffffff,
+};
+
+enum SamplerAddressingMode {
+ SamplerAddressingModeNone = 0,
+ SamplerAddressingModeClampToEdge = 1,
+ SamplerAddressingModeClamp = 2,
+ SamplerAddressingModeRepeat = 3,
+ SamplerAddressingModeRepeatMirrored = 4,
+ SamplerAddressingModeMax = 0x7fffffff,
+};
+
+enum SamplerFilterMode {
+ SamplerFilterModeNearest = 0,
+ SamplerFilterModeLinear = 1,
+ SamplerFilterModeMax = 0x7fffffff,
+};
+
+enum ImageFormat {
+ ImageFormatUnknown = 0,
+ ImageFormatRgba32f = 1,
+ ImageFormatRgba16f = 2,
+ ImageFormatR32f = 3,
+ ImageFormatRgba8 = 4,
+ ImageFormatRgba8Snorm = 5,
+ ImageFormatRg32f = 6,
+ ImageFormatRg16f = 7,
+ ImageFormatR11fG11fB10f = 8,
+ ImageFormatR16f = 9,
+ ImageFormatRgba16 = 10,
+ ImageFormatRgb10A2 = 11,
+ ImageFormatRg16 = 12,
+ ImageFormatRg8 = 13,
+ ImageFormatR16 = 14,
+ ImageFormatR8 = 15,
+ ImageFormatRgba16Snorm = 16,
+ ImageFormatRg16Snorm = 17,
+ ImageFormatRg8Snorm = 18,
+ ImageFormatR16Snorm = 19,
+ ImageFormatR8Snorm = 20,
+ ImageFormatRgba32i = 21,
+ ImageFormatRgba16i = 22,
+ ImageFormatRgba8i = 23,
+ ImageFormatR32i = 24,
+ ImageFormatRg32i = 25,
+ ImageFormatRg16i = 26,
+ ImageFormatRg8i = 27,
+ ImageFormatR16i = 28,
+ ImageFormatR8i = 29,
+ ImageFormatRgba32ui = 30,
+ ImageFormatRgba16ui = 31,
+ ImageFormatRgba8ui = 32,
+ ImageFormatR32ui = 33,
+ ImageFormatRgb10a2ui = 34,
+ ImageFormatRg32ui = 35,
+ ImageFormatRg16ui = 36,
+ ImageFormatRg8ui = 37,
+ ImageFormatR16ui = 38,
+ ImageFormatR8ui = 39,
+ ImageFormatMax = 0x7fffffff,
+};
+
+enum ImageChannelOrder {
+ ImageChannelOrderR = 0,
+ ImageChannelOrderA = 1,
+ ImageChannelOrderRG = 2,
+ ImageChannelOrderRA = 3,
+ ImageChannelOrderRGB = 4,
+ ImageChannelOrderRGBA = 5,
+ ImageChannelOrderBGRA = 6,
+ ImageChannelOrderARGB = 7,
+ ImageChannelOrderIntensity = 8,
+ ImageChannelOrderLuminance = 9,
+ ImageChannelOrderRx = 10,
+ ImageChannelOrderRGx = 11,
+ ImageChannelOrderRGBx = 12,
+ ImageChannelOrderDepth = 13,
+ ImageChannelOrderDepthStencil = 14,
+ ImageChannelOrdersRGB = 15,
+ ImageChannelOrdersRGBx = 16,
+ ImageChannelOrdersRGBA = 17,
+ ImageChannelOrdersBGRA = 18,
+ ImageChannelOrderABGR = 19,
+ ImageChannelOrderMax = 0x7fffffff,
+};
+
+enum ImageChannelDataType {
+ ImageChannelDataTypeSnormInt8 = 0,
+ ImageChannelDataTypeSnormInt16 = 1,
+ ImageChannelDataTypeUnormInt8 = 2,
+ ImageChannelDataTypeUnormInt16 = 3,
+ ImageChannelDataTypeUnormShort565 = 4,
+ ImageChannelDataTypeUnormShort555 = 5,
+ ImageChannelDataTypeUnormInt101010 = 6,
+ ImageChannelDataTypeSignedInt8 = 7,
+ ImageChannelDataTypeSignedInt16 = 8,
+ ImageChannelDataTypeSignedInt32 = 9,
+ ImageChannelDataTypeUnsignedInt8 = 10,
+ ImageChannelDataTypeUnsignedInt16 = 11,
+ ImageChannelDataTypeUnsignedInt32 = 12,
+ ImageChannelDataTypeHalfFloat = 13,
+ ImageChannelDataTypeFloat = 14,
+ ImageChannelDataTypeUnormInt24 = 15,
+ ImageChannelDataTypeUnormInt101010_2 = 16,
+ ImageChannelDataTypeMax = 0x7fffffff,
+};
+
+enum ImageOperandsShift {
+ ImageOperandsBiasShift = 0,
+ ImageOperandsLodShift = 1,
+ ImageOperandsGradShift = 2,
+ ImageOperandsConstOffsetShift = 3,
+ ImageOperandsOffsetShift = 4,
+ ImageOperandsConstOffsetsShift = 5,
+ ImageOperandsSampleShift = 6,
+ ImageOperandsMinLodShift = 7,
+ ImageOperandsMakeTexelAvailableKHRShift = 8,
+ ImageOperandsMakeTexelVisibleKHRShift = 9,
+ ImageOperandsNonPrivateTexelKHRShift = 10,
+ ImageOperandsVolatileTexelKHRShift = 11,
+ ImageOperandsSignExtendShift = 12,
+ ImageOperandsZeroExtendShift = 13,
+ ImageOperandsMax = 0x7fffffff,
+};
+
+enum ImageOperandsMask {
+ ImageOperandsMaskNone = 0,
+ ImageOperandsBiasMask = 0x00000001,
+ ImageOperandsLodMask = 0x00000002,
+ ImageOperandsGradMask = 0x00000004,
+ ImageOperandsConstOffsetMask = 0x00000008,
+ ImageOperandsOffsetMask = 0x00000010,
+ ImageOperandsConstOffsetsMask = 0x00000020,
+ ImageOperandsSampleMask = 0x00000040,
+ ImageOperandsMinLodMask = 0x00000080,
+ ImageOperandsMakeTexelAvailableKHRMask = 0x00000100,
+ ImageOperandsMakeTexelVisibleKHRMask = 0x00000200,
+ ImageOperandsNonPrivateTexelKHRMask = 0x00000400,
+ ImageOperandsVolatileTexelKHRMask = 0x00000800,
+ ImageOperandsSignExtendMask = 0x00001000,
+ ImageOperandsZeroExtendMask = 0x00002000,
+};
+
+enum FPFastMathModeShift {
+ FPFastMathModeNotNaNShift = 0,
+ FPFastMathModeNotInfShift = 1,
+ FPFastMathModeNSZShift = 2,
+ FPFastMathModeAllowRecipShift = 3,
+ FPFastMathModeFastShift = 4,
+ FPFastMathModeMax = 0x7fffffff,
+};
+
+enum FPFastMathModeMask {
+ FPFastMathModeMaskNone = 0,
+ FPFastMathModeNotNaNMask = 0x00000001,
+ FPFastMathModeNotInfMask = 0x00000002,
+ FPFastMathModeNSZMask = 0x00000004,
+ FPFastMathModeAllowRecipMask = 0x00000008,
+ FPFastMathModeFastMask = 0x00000010,
+};
+
+enum FPRoundingMode {
+ FPRoundingModeRTE = 0,
+ FPRoundingModeRTZ = 1,
+ FPRoundingModeRTP = 2,
+ FPRoundingModeRTN = 3,
+ FPRoundingModeMax = 0x7fffffff,
+};
+
+enum LinkageType {
+ LinkageTypeExport = 0,
+ LinkageTypeImport = 1,
+ LinkageTypeMax = 0x7fffffff,
+};
+
+enum AccessQualifier {
+ AccessQualifierReadOnly = 0,
+ AccessQualifierWriteOnly = 1,
+ AccessQualifierReadWrite = 2,
+ AccessQualifierMax = 0x7fffffff,
+};
+
+enum FunctionParameterAttribute {
+ FunctionParameterAttributeZext = 0,
+ FunctionParameterAttributeSext = 1,
+ FunctionParameterAttributeByVal = 2,
+ FunctionParameterAttributeSret = 3,
+ FunctionParameterAttributeNoAlias = 4,
+ FunctionParameterAttributeNoCapture = 5,
+ FunctionParameterAttributeNoWrite = 6,
+ FunctionParameterAttributeNoReadWrite = 7,
+ FunctionParameterAttributeMax = 0x7fffffff,
+};
+
+enum Decoration {
+ DecorationRelaxedPrecision = 0,
+ DecorationSpecId = 1,
+ DecorationBlock = 2,
+ DecorationBufferBlock = 3,
+ DecorationRowMajor = 4,
+ DecorationColMajor = 5,
+ DecorationArrayStride = 6,
+ DecorationMatrixStride = 7,
+ DecorationGLSLShared = 8,
+ DecorationGLSLPacked = 9,
+ DecorationCPacked = 10,
+ DecorationBuiltIn = 11,
+ DecorationNoPerspective = 13,
+ DecorationFlat = 14,
+ DecorationPatch = 15,
+ DecorationCentroid = 16,
+ DecorationSample = 17,
+ DecorationInvariant = 18,
+ DecorationRestrict = 19,
+ DecorationAliased = 20,
+ DecorationVolatile = 21,
+ DecorationConstant = 22,
+ DecorationCoherent = 23,
+ DecorationNonWritable = 24,
+ DecorationNonReadable = 25,
+ DecorationUniform = 26,
+ DecorationUniformId = 27,
+ DecorationSaturatedConversion = 28,
+ DecorationStream = 29,
+ DecorationLocation = 30,
+ DecorationComponent = 31,
+ DecorationIndex = 32,
+ DecorationBinding = 33,
+ DecorationDescriptorSet = 34,
+ DecorationOffset = 35,
+ DecorationXfbBuffer = 36,
+ DecorationXfbStride = 37,
+ DecorationFuncParamAttr = 38,
+ DecorationFPRoundingMode = 39,
+ DecorationFPFastMathMode = 40,
+ DecorationLinkageAttributes = 41,
+ DecorationNoContraction = 42,
+ DecorationInputAttachmentIndex = 43,
+ DecorationAlignment = 44,
+ DecorationMaxByteOffset = 45,
+ DecorationAlignmentId = 46,
+ DecorationMaxByteOffsetId = 47,
+ DecorationNoSignedWrap = 4469,
+ DecorationNoUnsignedWrap = 4470,
+ DecorationExplicitInterpAMD = 4999,
+ DecorationOverrideCoverageNV = 5248,
+ DecorationPassthroughNV = 5250,
+ DecorationViewportRelativeNV = 5252,
+ DecorationSecondaryViewportRelativeNV = 5256,
+ DecorationPerPrimitiveNV = 5271,
+ DecorationPerViewNV = 5272,
+ DecorationPerTaskNV = 5273,
+ DecorationPerVertexNV = 5285,
+ DecorationNonUniformEXT = 5300,
+ DecorationRestrictPointerEXT = 5355,
+ DecorationAliasedPointerEXT = 5356,
+ DecorationCounterBuffer = 5634,
+ DecorationHlslCounterBufferGOOGLE = 5634,
+ DecorationHlslSemanticGOOGLE = 5635,
+ DecorationUserSemantic = 5635,
+ DecorationMax = 0x7fffffff,
+};
+
+enum BuiltIn {
+ BuiltInPosition = 0,
+ BuiltInPointSize = 1,
+ BuiltInClipDistance = 3,
+ BuiltInCullDistance = 4,
+ BuiltInVertexId = 5,
+ BuiltInInstanceId = 6,
+ BuiltInPrimitiveId = 7,
+ BuiltInInvocationId = 8,
+ BuiltInLayer = 9,
+ BuiltInViewportIndex = 10,
+ BuiltInTessLevelOuter = 11,
+ BuiltInTessLevelInner = 12,
+ BuiltInTessCoord = 13,
+ BuiltInPatchVertices = 14,
+ BuiltInFragCoord = 15,
+ BuiltInPointCoord = 16,
+ BuiltInFrontFacing = 17,
+ BuiltInSampleId = 18,
+ BuiltInSamplePosition = 19,
+ BuiltInSampleMask = 20,
+ BuiltInFragDepth = 22,
+ BuiltInHelperInvocation = 23,
+ BuiltInNumWorkgroups = 24,
+ BuiltInWorkgroupSize = 25,
+ BuiltInWorkgroupId = 26,
+ BuiltInLocalInvocationId = 27,
+ BuiltInGlobalInvocationId = 28,
+ BuiltInLocalInvocationIndex = 29,
+ BuiltInWorkDim = 30,
+ BuiltInGlobalSize = 31,
+ BuiltInEnqueuedWorkgroupSize = 32,
+ BuiltInGlobalOffset = 33,
+ BuiltInGlobalLinearId = 34,
+ BuiltInSubgroupSize = 36,
+ BuiltInSubgroupMaxSize = 37,
+ BuiltInNumSubgroups = 38,
+ BuiltInNumEnqueuedSubgroups = 39,
+ BuiltInSubgroupId = 40,
+ BuiltInSubgroupLocalInvocationId = 41,
+ BuiltInVertexIndex = 42,
+ BuiltInInstanceIndex = 43,
+ BuiltInSubgroupEqMask = 4416,
+ BuiltInSubgroupEqMaskKHR = 4416,
+ BuiltInSubgroupGeMask = 4417,
+ BuiltInSubgroupGeMaskKHR = 4417,
+ BuiltInSubgroupGtMask = 4418,
+ BuiltInSubgroupGtMaskKHR = 4418,
+ BuiltInSubgroupLeMask = 4419,
+ BuiltInSubgroupLeMaskKHR = 4419,
+ BuiltInSubgroupLtMask = 4420,
+ BuiltInSubgroupLtMaskKHR = 4420,
+ BuiltInBaseVertex = 4424,
+ BuiltInBaseInstance = 4425,
+ BuiltInDrawIndex = 4426,
+ BuiltInDeviceIndex = 4438,
+ BuiltInViewIndex = 4440,
+ BuiltInBaryCoordNoPerspAMD = 4992,
+ BuiltInBaryCoordNoPerspCentroidAMD = 4993,
+ BuiltInBaryCoordNoPerspSampleAMD = 4994,
+ BuiltInBaryCoordSmoothAMD = 4995,
+ BuiltInBaryCoordSmoothCentroidAMD = 4996,
+ BuiltInBaryCoordSmoothSampleAMD = 4997,
+ BuiltInBaryCoordPullModelAMD = 4998,
+ BuiltInFragStencilRefEXT = 5014,
+ BuiltInViewportMaskNV = 5253,
+ BuiltInSecondaryPositionNV = 5257,
+ BuiltInSecondaryViewportMaskNV = 5258,
+ BuiltInPositionPerViewNV = 5261,
+ BuiltInViewportMaskPerViewNV = 5262,
+ BuiltInFullyCoveredEXT = 5264,
+ BuiltInTaskCountNV = 5274,
+ BuiltInPrimitiveCountNV = 5275,
+ BuiltInPrimitiveIndicesNV = 5276,
+ BuiltInClipDistancePerViewNV = 5277,
+ BuiltInCullDistancePerViewNV = 5278,
+ BuiltInLayerPerViewNV = 5279,
+ BuiltInMeshViewCountNV = 5280,
+ BuiltInMeshViewIndicesNV = 5281,
+ BuiltInBaryCoordNV = 5286,
+ BuiltInBaryCoordNoPerspNV = 5287,
+ BuiltInFragSizeEXT = 5292,
+ BuiltInFragmentSizeNV = 5292,
+ BuiltInFragInvocationCountEXT = 5293,
+ BuiltInInvocationsPerPixelNV = 5293,
+ BuiltInLaunchIdNV = 5319,
+ BuiltInLaunchSizeNV = 5320,
+ BuiltInWorldRayOriginNV = 5321,
+ BuiltInWorldRayDirectionNV = 5322,
+ BuiltInObjectRayOriginNV = 5323,
+ BuiltInObjectRayDirectionNV = 5324,
+ BuiltInRayTminNV = 5325,
+ BuiltInRayTmaxNV = 5326,
+ BuiltInInstanceCustomIndexNV = 5327,
+ BuiltInObjectToWorldNV = 5330,
+ BuiltInWorldToObjectNV = 5331,
+ BuiltInHitTNV = 5332,
+ BuiltInHitKindNV = 5333,
+ BuiltInIncomingRayFlagsNV = 5351,
+ BuiltInMax = 0x7fffffff,
+};
+
+enum SelectionControlShift {
+ SelectionControlFlattenShift = 0,
+ SelectionControlDontFlattenShift = 1,
+ SelectionControlMax = 0x7fffffff,
+};
+
+enum SelectionControlMask {
+ SelectionControlMaskNone = 0,
+ SelectionControlFlattenMask = 0x00000001,
+ SelectionControlDontFlattenMask = 0x00000002,
+};
+
+enum LoopControlShift {
+ LoopControlUnrollShift = 0,
+ LoopControlDontUnrollShift = 1,
+ LoopControlDependencyInfiniteShift = 2,
+ LoopControlDependencyLengthShift = 3,
+ LoopControlMinIterationsShift = 4,
+ LoopControlMaxIterationsShift = 5,
+ LoopControlIterationMultipleShift = 6,
+ LoopControlPeelCountShift = 7,
+ LoopControlPartialCountShift = 8,
+ LoopControlMax = 0x7fffffff,
+};
+
+enum LoopControlMask {
+ LoopControlMaskNone = 0,
+ LoopControlUnrollMask = 0x00000001,
+ LoopControlDontUnrollMask = 0x00000002,
+ LoopControlDependencyInfiniteMask = 0x00000004,
+ LoopControlDependencyLengthMask = 0x00000008,
+ LoopControlMinIterationsMask = 0x00000010,
+ LoopControlMaxIterationsMask = 0x00000020,
+ LoopControlIterationMultipleMask = 0x00000040,
+ LoopControlPeelCountMask = 0x00000080,
+ LoopControlPartialCountMask = 0x00000100,
+};
+
+enum FunctionControlShift {
+ FunctionControlInlineShift = 0,
+ FunctionControlDontInlineShift = 1,
+ FunctionControlPureShift = 2,
+ FunctionControlConstShift = 3,
+ FunctionControlMax = 0x7fffffff,
+};
+
+enum FunctionControlMask {
+ FunctionControlMaskNone = 0,
+ FunctionControlInlineMask = 0x00000001,
+ FunctionControlDontInlineMask = 0x00000002,
+ FunctionControlPureMask = 0x00000004,
+ FunctionControlConstMask = 0x00000008,
+};
+
+enum MemorySemanticsShift {
+ MemorySemanticsAcquireShift = 1,
+ MemorySemanticsReleaseShift = 2,
+ MemorySemanticsAcquireReleaseShift = 3,
+ MemorySemanticsSequentiallyConsistentShift = 4,
+ MemorySemanticsUniformMemoryShift = 6,
+ MemorySemanticsSubgroupMemoryShift = 7,
+ MemorySemanticsWorkgroupMemoryShift = 8,
+ MemorySemanticsCrossWorkgroupMemoryShift = 9,
+ MemorySemanticsAtomicCounterMemoryShift = 10,
+ MemorySemanticsImageMemoryShift = 11,
+ MemorySemanticsOutputMemoryKHRShift = 12,
+ MemorySemanticsMakeAvailableKHRShift = 13,
+ MemorySemanticsMakeVisibleKHRShift = 14,
+ MemorySemanticsMax = 0x7fffffff,
+};
+
+enum MemorySemanticsMask {
+ MemorySemanticsMaskNone = 0,
+ MemorySemanticsAcquireMask = 0x00000002,
+ MemorySemanticsReleaseMask = 0x00000004,
+ MemorySemanticsAcquireReleaseMask = 0x00000008,
+ MemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ MemorySemanticsUniformMemoryMask = 0x00000040,
+ MemorySemanticsSubgroupMemoryMask = 0x00000080,
+ MemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ MemorySemanticsImageMemoryMask = 0x00000800,
+ MemorySemanticsOutputMemoryKHRMask = 0x00001000,
+ MemorySemanticsMakeAvailableKHRMask = 0x00002000,
+ MemorySemanticsMakeVisibleKHRMask = 0x00004000,
+};
+
+enum MemoryAccessShift {
+ MemoryAccessVolatileShift = 0,
+ MemoryAccessAlignedShift = 1,
+ MemoryAccessNontemporalShift = 2,
+ MemoryAccessMakePointerAvailableKHRShift = 3,
+ MemoryAccessMakePointerVisibleKHRShift = 4,
+ MemoryAccessNonPrivatePointerKHRShift = 5,
+ MemoryAccessMax = 0x7fffffff,
+};
+
+enum MemoryAccessMask {
+ MemoryAccessMaskNone = 0,
+ MemoryAccessVolatileMask = 0x00000001,
+ MemoryAccessAlignedMask = 0x00000002,
+ MemoryAccessNontemporalMask = 0x00000004,
+ MemoryAccessMakePointerAvailableKHRMask = 0x00000008,
+ MemoryAccessMakePointerVisibleKHRMask = 0x00000010,
+ MemoryAccessNonPrivatePointerKHRMask = 0x00000020,
+};
+
+enum Scope {
+ ScopeCrossDevice = 0,
+ ScopeDevice = 1,
+ ScopeWorkgroup = 2,
+ ScopeSubgroup = 3,
+ ScopeInvocation = 4,
+ ScopeQueueFamilyKHR = 5,
+ ScopeMax = 0x7fffffff,
+};
+
+enum GroupOperation {
+ GroupOperationReduce = 0,
+ GroupOperationInclusiveScan = 1,
+ GroupOperationExclusiveScan = 2,
+ GroupOperationClusteredReduce = 3,
+ GroupOperationPartitionedReduceNV = 6,
+ GroupOperationPartitionedInclusiveScanNV = 7,
+ GroupOperationPartitionedExclusiveScanNV = 8,
+ GroupOperationMax = 0x7fffffff,
+};
+
+enum KernelEnqueueFlags {
+ KernelEnqueueFlagsNoWait = 0,
+ KernelEnqueueFlagsWaitKernel = 1,
+ KernelEnqueueFlagsWaitWorkGroup = 2,
+ KernelEnqueueFlagsMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoShift {
+ KernelProfilingInfoCmdExecTimeShift = 0,
+ KernelProfilingInfoMax = 0x7fffffff,
+};
+
+enum KernelProfilingInfoMask {
+ KernelProfilingInfoMaskNone = 0,
+ KernelProfilingInfoCmdExecTimeMask = 0x00000001,
+};
+
+enum Capability {
+ CapabilityMatrix = 0,
+ CapabilityShader = 1,
+ CapabilityGeometry = 2,
+ CapabilityTessellation = 3,
+ CapabilityAddresses = 4,
+ CapabilityLinkage = 5,
+ CapabilityKernel = 6,
+ CapabilityVector16 = 7,
+ CapabilityFloat16Buffer = 8,
+ CapabilityFloat16 = 9,
+ CapabilityFloat64 = 10,
+ CapabilityInt64 = 11,
+ CapabilityInt64Atomics = 12,
+ CapabilityImageBasic = 13,
+ CapabilityImageReadWrite = 14,
+ CapabilityImageMipmap = 15,
+ CapabilityPipes = 17,
+ CapabilityGroups = 18,
+ CapabilityDeviceEnqueue = 19,
+ CapabilityLiteralSampler = 20,
+ CapabilityAtomicStorage = 21,
+ CapabilityInt16 = 22,
+ CapabilityTessellationPointSize = 23,
+ CapabilityGeometryPointSize = 24,
+ CapabilityImageGatherExtended = 25,
+ CapabilityStorageImageMultisample = 27,
+ CapabilityUniformBufferArrayDynamicIndexing = 28,
+ CapabilitySampledImageArrayDynamicIndexing = 29,
+ CapabilityStorageBufferArrayDynamicIndexing = 30,
+ CapabilityStorageImageArrayDynamicIndexing = 31,
+ CapabilityClipDistance = 32,
+ CapabilityCullDistance = 33,
+ CapabilityImageCubeArray = 34,
+ CapabilitySampleRateShading = 35,
+ CapabilityImageRect = 36,
+ CapabilitySampledRect = 37,
+ CapabilityGenericPointer = 38,
+ CapabilityInt8 = 39,
+ CapabilityInputAttachment = 40,
+ CapabilitySparseResidency = 41,
+ CapabilityMinLod = 42,
+ CapabilitySampled1D = 43,
+ CapabilityImage1D = 44,
+ CapabilitySampledCubeArray = 45,
+ CapabilitySampledBuffer = 46,
+ CapabilityImageBuffer = 47,
+ CapabilityImageMSArray = 48,
+ CapabilityStorageImageExtendedFormats = 49,
+ CapabilityImageQuery = 50,
+ CapabilityDerivativeControl = 51,
+ CapabilityInterpolationFunction = 52,
+ CapabilityTransformFeedback = 53,
+ CapabilityGeometryStreams = 54,
+ CapabilityStorageImageReadWithoutFormat = 55,
+ CapabilityStorageImageWriteWithoutFormat = 56,
+ CapabilityMultiViewport = 57,
+ CapabilitySubgroupDispatch = 58,
+ CapabilityNamedBarrier = 59,
+ CapabilityPipeStorage = 60,
+ CapabilityGroupNonUniform = 61,
+ CapabilityGroupNonUniformVote = 62,
+ CapabilityGroupNonUniformArithmetic = 63,
+ CapabilityGroupNonUniformBallot = 64,
+ CapabilityGroupNonUniformShuffle = 65,
+ CapabilityGroupNonUniformShuffleRelative = 66,
+ CapabilityGroupNonUniformClustered = 67,
+ CapabilityGroupNonUniformQuad = 68,
+ CapabilitySubgroupBallotKHR = 4423,
+ CapabilityDrawParameters = 4427,
+ CapabilitySubgroupVoteKHR = 4431,
+ CapabilityStorageBuffer16BitAccess = 4433,
+ CapabilityStorageUniformBufferBlock16 = 4433,
+ CapabilityStorageUniform16 = 4434,
+ CapabilityUniformAndStorageBuffer16BitAccess = 4434,
+ CapabilityStoragePushConstant16 = 4435,
+ CapabilityStorageInputOutput16 = 4436,
+ CapabilityDeviceGroup = 4437,
+ CapabilityMultiView = 4439,
+ CapabilityVariablePointersStorageBuffer = 4441,
+ CapabilityVariablePointers = 4442,
+ CapabilityAtomicStorageOps = 4445,
+ CapabilitySampleMaskPostDepthCoverage = 4447,
+ CapabilityStorageBuffer8BitAccess = 4448,
+ CapabilityUniformAndStorageBuffer8BitAccess = 4449,
+ CapabilityStoragePushConstant8 = 4450,
+ CapabilityDenormPreserve = 4464,
+ CapabilityDenormFlushToZero = 4465,
+ CapabilitySignedZeroInfNanPreserve = 4466,
+ CapabilityRoundingModeRTE = 4467,
+ CapabilityRoundingModeRTZ = 4468,
+ CapabilityFloat16ImageAMD = 5008,
+ CapabilityImageGatherBiasLodAMD = 5009,
+ CapabilityFragmentMaskAMD = 5010,
+ CapabilityStencilExportEXT = 5013,
+ CapabilityImageReadWriteLodAMD = 5015,
+ CapabilitySampleMaskOverrideCoverageNV = 5249,
+ CapabilityGeometryShaderPassthroughNV = 5251,
+ CapabilityShaderViewportIndexLayerEXT = 5254,
+ CapabilityShaderViewportIndexLayerNV = 5254,
+ CapabilityShaderViewportMaskNV = 5255,
+ CapabilityShaderStereoViewNV = 5259,
+ CapabilityPerViewAttributesNV = 5260,
+ CapabilityFragmentFullyCoveredEXT = 5265,
+ CapabilityMeshShadingNV = 5266,
+ CapabilityImageFootprintNV = 5282,
+ CapabilityFragmentBarycentricNV = 5284,
+ CapabilityComputeDerivativeGroupQuadsNV = 5288,
+ CapabilityFragmentDensityEXT = 5291,
+ CapabilityShadingRateNV = 5291,
+ CapabilityGroupNonUniformPartitionedNV = 5297,
+ CapabilityShaderNonUniformEXT = 5301,
+ CapabilityRuntimeDescriptorArrayEXT = 5302,
+ CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303,
+ CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304,
+ CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305,
+ CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306,
+ CapabilitySampledImageArrayNonUniformIndexingEXT = 5307,
+ CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308,
+ CapabilityStorageImageArrayNonUniformIndexingEXT = 5309,
+ CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310,
+ CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311,
+ CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312,
+ CapabilityRayTracingNV = 5340,
+ CapabilityVulkanMemoryModelKHR = 5345,
+ CapabilityVulkanMemoryModelDeviceScopeKHR = 5346,
+ CapabilityPhysicalStorageBufferAddressesEXT = 5347,
+ CapabilityComputeDerivativeGroupLinearNV = 5350,
+ CapabilityCooperativeMatrixNV = 5357,
+ CapabilitySubgroupShuffleINTEL = 5568,
+ CapabilitySubgroupBufferBlockIOINTEL = 5569,
+ CapabilitySubgroupImageBlockIOINTEL = 5570,
+ CapabilitySubgroupImageMediaBlockIOINTEL = 5579,
+ CapabilitySubgroupAvcMotionEstimationINTEL = 5696,
+ CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697,
+ CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698,
+ CapabilityMax = 0x7fffffff,
+};
+
+enum Op {
+ OpNop = 0,
+ OpUndef = 1,
+ OpSourceContinued = 2,
+ OpSource = 3,
+ OpSourceExtension = 4,
+ OpName = 5,
+ OpMemberName = 6,
+ OpString = 7,
+ OpLine = 8,
+ OpExtension = 10,
+ OpExtInstImport = 11,
+ OpExtInst = 12,
+ OpMemoryModel = 14,
+ OpEntryPoint = 15,
+ OpExecutionMode = 16,
+ OpCapability = 17,
+ OpTypeVoid = 19,
+ OpTypeBool = 20,
+ OpTypeInt = 21,
+ OpTypeFloat = 22,
+ OpTypeVector = 23,
+ OpTypeMatrix = 24,
+ OpTypeImage = 25,
+ OpTypeSampler = 26,
+ OpTypeSampledImage = 27,
+ OpTypeArray = 28,
+ OpTypeRuntimeArray = 29,
+ OpTypeStruct = 30,
+ OpTypeOpaque = 31,
+ OpTypePointer = 32,
+ OpTypeFunction = 33,
+ OpTypeEvent = 34,
+ OpTypeDeviceEvent = 35,
+ OpTypeReserveId = 36,
+ OpTypeQueue = 37,
+ OpTypePipe = 38,
+ OpTypeForwardPointer = 39,
+ OpConstantTrue = 41,
+ OpConstantFalse = 42,
+ OpConstant = 43,
+ OpConstantComposite = 44,
+ OpConstantSampler = 45,
+ OpConstantNull = 46,
+ OpSpecConstantTrue = 48,
+ OpSpecConstantFalse = 49,
+ OpSpecConstant = 50,
+ OpSpecConstantComposite = 51,
+ OpSpecConstantOp = 52,
+ OpFunction = 54,
+ OpFunctionParameter = 55,
+ OpFunctionEnd = 56,
+ OpFunctionCall = 57,
+ OpVariable = 59,
+ OpImageTexelPointer = 60,
+ OpLoad = 61,
+ OpStore = 62,
+ OpCopyMemory = 63,
+ OpCopyMemorySized = 64,
+ OpAccessChain = 65,
+ OpInBoundsAccessChain = 66,
+ OpPtrAccessChain = 67,
+ OpArrayLength = 68,
+ OpGenericPtrMemSemantics = 69,
+ OpInBoundsPtrAccessChain = 70,
+ OpDecorate = 71,
+ OpMemberDecorate = 72,
+ OpDecorationGroup = 73,
+ OpGroupDecorate = 74,
+ OpGroupMemberDecorate = 75,
+ OpVectorExtractDynamic = 77,
+ OpVectorInsertDynamic = 78,
+ OpVectorShuffle = 79,
+ OpCompositeConstruct = 80,
+ OpCompositeExtract = 81,
+ OpCompositeInsert = 82,
+ OpCopyObject = 83,
+ OpTranspose = 84,
+ OpSampledImage = 86,
+ OpImageSampleImplicitLod = 87,
+ OpImageSampleExplicitLod = 88,
+ OpImageSampleDrefImplicitLod = 89,
+ OpImageSampleDrefExplicitLod = 90,
+ OpImageSampleProjImplicitLod = 91,
+ OpImageSampleProjExplicitLod = 92,
+ OpImageSampleProjDrefImplicitLod = 93,
+ OpImageSampleProjDrefExplicitLod = 94,
+ OpImageFetch = 95,
+ OpImageGather = 96,
+ OpImageDrefGather = 97,
+ OpImageRead = 98,
+ OpImageWrite = 99,
+ OpImage = 100,
+ OpImageQueryFormat = 101,
+ OpImageQueryOrder = 102,
+ OpImageQuerySizeLod = 103,
+ OpImageQuerySize = 104,
+ OpImageQueryLod = 105,
+ OpImageQueryLevels = 106,
+ OpImageQuerySamples = 107,
+ OpConvertFToU = 109,
+ OpConvertFToS = 110,
+ OpConvertSToF = 111,
+ OpConvertUToF = 112,
+ OpUConvert = 113,
+ OpSConvert = 114,
+ OpFConvert = 115,
+ OpQuantizeToF16 = 116,
+ OpConvertPtrToU = 117,
+ OpSatConvertSToU = 118,
+ OpSatConvertUToS = 119,
+ OpConvertUToPtr = 120,
+ OpPtrCastToGeneric = 121,
+ OpGenericCastToPtr = 122,
+ OpGenericCastToPtrExplicit = 123,
+ OpBitcast = 124,
+ OpSNegate = 126,
+ OpFNegate = 127,
+ OpIAdd = 128,
+ OpFAdd = 129,
+ OpISub = 130,
+ OpFSub = 131,
+ OpIMul = 132,
+ OpFMul = 133,
+ OpUDiv = 134,
+ OpSDiv = 135,
+ OpFDiv = 136,
+ OpUMod = 137,
+ OpSRem = 138,
+ OpSMod = 139,
+ OpFRem = 140,
+ OpFMod = 141,
+ OpVectorTimesScalar = 142,
+ OpMatrixTimesScalar = 143,
+ OpVectorTimesMatrix = 144,
+ OpMatrixTimesVector = 145,
+ OpMatrixTimesMatrix = 146,
+ OpOuterProduct = 147,
+ OpDot = 148,
+ OpIAddCarry = 149,
+ OpISubBorrow = 150,
+ OpUMulExtended = 151,
+ OpSMulExtended = 152,
+ OpAny = 154,
+ OpAll = 155,
+ OpIsNan = 156,
+ OpIsInf = 157,
+ OpIsFinite = 158,
+ OpIsNormal = 159,
+ OpSignBitSet = 160,
+ OpLessOrGreater = 161,
+ OpOrdered = 162,
+ OpUnordered = 163,
+ OpLogicalEqual = 164,
+ OpLogicalNotEqual = 165,
+ OpLogicalOr = 166,
+ OpLogicalAnd = 167,
+ OpLogicalNot = 168,
+ OpSelect = 169,
+ OpIEqual = 170,
+ OpINotEqual = 171,
+ OpUGreaterThan = 172,
+ OpSGreaterThan = 173,
+ OpUGreaterThanEqual = 174,
+ OpSGreaterThanEqual = 175,
+ OpULessThan = 176,
+ OpSLessThan = 177,
+ OpULessThanEqual = 178,
+ OpSLessThanEqual = 179,
+ OpFOrdEqual = 180,
+ OpFUnordEqual = 181,
+ OpFOrdNotEqual = 182,
+ OpFUnordNotEqual = 183,
+ OpFOrdLessThan = 184,
+ OpFUnordLessThan = 185,
+ OpFOrdGreaterThan = 186,
+ OpFUnordGreaterThan = 187,
+ OpFOrdLessThanEqual = 188,
+ OpFUnordLessThanEqual = 189,
+ OpFOrdGreaterThanEqual = 190,
+ OpFUnordGreaterThanEqual = 191,
+ OpShiftRightLogical = 194,
+ OpShiftRightArithmetic = 195,
+ OpShiftLeftLogical = 196,
+ OpBitwiseOr = 197,
+ OpBitwiseXor = 198,
+ OpBitwiseAnd = 199,
+ OpNot = 200,
+ OpBitFieldInsert = 201,
+ OpBitFieldSExtract = 202,
+ OpBitFieldUExtract = 203,
+ OpBitReverse = 204,
+ OpBitCount = 205,
+ OpDPdx = 207,
+ OpDPdy = 208,
+ OpFwidth = 209,
+ OpDPdxFine = 210,
+ OpDPdyFine = 211,
+ OpFwidthFine = 212,
+ OpDPdxCoarse = 213,
+ OpDPdyCoarse = 214,
+ OpFwidthCoarse = 215,
+ OpEmitVertex = 218,
+ OpEndPrimitive = 219,
+ OpEmitStreamVertex = 220,
+ OpEndStreamPrimitive = 221,
+ OpControlBarrier = 224,
+ OpMemoryBarrier = 225,
+ OpAtomicLoad = 227,
+ OpAtomicStore = 228,
+ OpAtomicExchange = 229,
+ OpAtomicCompareExchange = 230,
+ OpAtomicCompareExchangeWeak = 231,
+ OpAtomicIIncrement = 232,
+ OpAtomicIDecrement = 233,
+ OpAtomicIAdd = 234,
+ OpAtomicISub = 235,
+ OpAtomicSMin = 236,
+ OpAtomicUMin = 237,
+ OpAtomicSMax = 238,
+ OpAtomicUMax = 239,
+ OpAtomicAnd = 240,
+ OpAtomicOr = 241,
+ OpAtomicXor = 242,
+ OpPhi = 245,
+ OpLoopMerge = 246,
+ OpSelectionMerge = 247,
+ OpLabel = 248,
+ OpBranch = 249,
+ OpBranchConditional = 250,
+ OpSwitch = 251,
+ OpKill = 252,
+ OpReturn = 253,
+ OpReturnValue = 254,
+ OpUnreachable = 255,
+ OpLifetimeStart = 256,
+ OpLifetimeStop = 257,
+ OpGroupAsyncCopy = 259,
+ OpGroupWaitEvents = 260,
+ OpGroupAll = 261,
+ OpGroupAny = 262,
+ OpGroupBroadcast = 263,
+ OpGroupIAdd = 264,
+ OpGroupFAdd = 265,
+ OpGroupFMin = 266,
+ OpGroupUMin = 267,
+ OpGroupSMin = 268,
+ OpGroupFMax = 269,
+ OpGroupUMax = 270,
+ OpGroupSMax = 271,
+ OpReadPipe = 274,
+ OpWritePipe = 275,
+ OpReservedReadPipe = 276,
+ OpReservedWritePipe = 277,
+ OpReserveReadPipePackets = 278,
+ OpReserveWritePipePackets = 279,
+ OpCommitReadPipe = 280,
+ OpCommitWritePipe = 281,
+ OpIsValidReserveId = 282,
+ OpGetNumPipePackets = 283,
+ OpGetMaxPipePackets = 284,
+ OpGroupReserveReadPipePackets = 285,
+ OpGroupReserveWritePipePackets = 286,
+ OpGroupCommitReadPipe = 287,
+ OpGroupCommitWritePipe = 288,
+ OpEnqueueMarker = 291,
+ OpEnqueueKernel = 292,
+ OpGetKernelNDrangeSubGroupCount = 293,
+ OpGetKernelNDrangeMaxSubGroupSize = 294,
+ OpGetKernelWorkGroupSize = 295,
+ OpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ OpRetainEvent = 297,
+ OpReleaseEvent = 298,
+ OpCreateUserEvent = 299,
+ OpIsValidEvent = 300,
+ OpSetUserEventStatus = 301,
+ OpCaptureEventProfilingInfo = 302,
+ OpGetDefaultQueue = 303,
+ OpBuildNDRange = 304,
+ OpImageSparseSampleImplicitLod = 305,
+ OpImageSparseSampleExplicitLod = 306,
+ OpImageSparseSampleDrefImplicitLod = 307,
+ OpImageSparseSampleDrefExplicitLod = 308,
+ OpImageSparseSampleProjImplicitLod = 309,
+ OpImageSparseSampleProjExplicitLod = 310,
+ OpImageSparseSampleProjDrefImplicitLod = 311,
+ OpImageSparseSampleProjDrefExplicitLod = 312,
+ OpImageSparseFetch = 313,
+ OpImageSparseGather = 314,
+ OpImageSparseDrefGather = 315,
+ OpImageSparseTexelsResident = 316,
+ OpNoLine = 317,
+ OpAtomicFlagTestAndSet = 318,
+ OpAtomicFlagClear = 319,
+ OpImageSparseRead = 320,
+ OpSizeOf = 321,
+ OpTypePipeStorage = 322,
+ OpConstantPipeStorage = 323,
+ OpCreatePipeFromPipeStorage = 324,
+ OpGetKernelLocalSizeForSubgroupCount = 325,
+ OpGetKernelMaxNumSubgroups = 326,
+ OpTypeNamedBarrier = 327,
+ OpNamedBarrierInitialize = 328,
+ OpMemoryNamedBarrier = 329,
+ OpModuleProcessed = 330,
+ OpExecutionModeId = 331,
+ OpDecorateId = 332,
+ OpGroupNonUniformElect = 333,
+ OpGroupNonUniformAll = 334,
+ OpGroupNonUniformAny = 335,
+ OpGroupNonUniformAllEqual = 336,
+ OpGroupNonUniformBroadcast = 337,
+ OpGroupNonUniformBroadcastFirst = 338,
+ OpGroupNonUniformBallot = 339,
+ OpGroupNonUniformInverseBallot = 340,
+ OpGroupNonUniformBallotBitExtract = 341,
+ OpGroupNonUniformBallotBitCount = 342,
+ OpGroupNonUniformBallotFindLSB = 343,
+ OpGroupNonUniformBallotFindMSB = 344,
+ OpGroupNonUniformShuffle = 345,
+ OpGroupNonUniformShuffleXor = 346,
+ OpGroupNonUniformShuffleUp = 347,
+ OpGroupNonUniformShuffleDown = 348,
+ OpGroupNonUniformIAdd = 349,
+ OpGroupNonUniformFAdd = 350,
+ OpGroupNonUniformIMul = 351,
+ OpGroupNonUniformFMul = 352,
+ OpGroupNonUniformSMin = 353,
+ OpGroupNonUniformUMin = 354,
+ OpGroupNonUniformFMin = 355,
+ OpGroupNonUniformSMax = 356,
+ OpGroupNonUniformUMax = 357,
+ OpGroupNonUniformFMax = 358,
+ OpGroupNonUniformBitwiseAnd = 359,
+ OpGroupNonUniformBitwiseOr = 360,
+ OpGroupNonUniformBitwiseXor = 361,
+ OpGroupNonUniformLogicalAnd = 362,
+ OpGroupNonUniformLogicalOr = 363,
+ OpGroupNonUniformLogicalXor = 364,
+ OpGroupNonUniformQuadBroadcast = 365,
+ OpGroupNonUniformQuadSwap = 366,
+ OpCopyLogical = 400,
+ OpPtrEqual = 401,
+ OpPtrNotEqual = 402,
+ OpPtrDiff = 403,
+ OpSubgroupBallotKHR = 4421,
+ OpSubgroupFirstInvocationKHR = 4422,
+ OpSubgroupAllKHR = 4428,
+ OpSubgroupAnyKHR = 4429,
+ OpSubgroupAllEqualKHR = 4430,
+ OpSubgroupReadInvocationKHR = 4432,
+ OpGroupIAddNonUniformAMD = 5000,
+ OpGroupFAddNonUniformAMD = 5001,
+ OpGroupFMinNonUniformAMD = 5002,
+ OpGroupUMinNonUniformAMD = 5003,
+ OpGroupSMinNonUniformAMD = 5004,
+ OpGroupFMaxNonUniformAMD = 5005,
+ OpGroupUMaxNonUniformAMD = 5006,
+ OpGroupSMaxNonUniformAMD = 5007,
+ OpFragmentMaskFetchAMD = 5011,
+ OpFragmentFetchAMD = 5012,
+ OpImageSampleFootprintNV = 5283,
+ OpGroupNonUniformPartitionNV = 5296,
+ OpWritePackedPrimitiveIndices4x8NV = 5299,
+ OpReportIntersectionNV = 5334,
+ OpIgnoreIntersectionNV = 5335,
+ OpTerminateRayNV = 5336,
+ OpTraceNV = 5337,
+ OpTypeAccelerationStructureNV = 5341,
+ OpExecuteCallableNV = 5344,
+ OpTypeCooperativeMatrixNV = 5358,
+ OpCooperativeMatrixLoadNV = 5359,
+ OpCooperativeMatrixStoreNV = 5360,
+ OpCooperativeMatrixMulAddNV = 5361,
+ OpCooperativeMatrixLengthNV = 5362,
+ OpSubgroupShuffleINTEL = 5571,
+ OpSubgroupShuffleDownINTEL = 5572,
+ OpSubgroupShuffleUpINTEL = 5573,
+ OpSubgroupShuffleXorINTEL = 5574,
+ OpSubgroupBlockReadINTEL = 5575,
+ OpSubgroupBlockWriteINTEL = 5576,
+ OpSubgroupImageBlockReadINTEL = 5577,
+ OpSubgroupImageBlockWriteINTEL = 5578,
+ OpSubgroupImageMediaBlockReadINTEL = 5580,
+ OpSubgroupImageMediaBlockWriteINTEL = 5581,
+ OpDecorateString = 5632,
+ OpDecorateStringGOOGLE = 5632,
+ OpMemberDecorateString = 5633,
+ OpMemberDecorateStringGOOGLE = 5633,
+ OpVmeImageINTEL = 5699,
+ OpTypeVmeImageINTEL = 5700,
+ OpTypeAvcImePayloadINTEL = 5701,
+ OpTypeAvcRefPayloadINTEL = 5702,
+ OpTypeAvcSicPayloadINTEL = 5703,
+ OpTypeAvcMcePayloadINTEL = 5704,
+ OpTypeAvcMceResultINTEL = 5705,
+ OpTypeAvcImeResultINTEL = 5706,
+ OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707,
+ OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708,
+ OpTypeAvcImeSingleReferenceStreaminINTEL = 5709,
+ OpTypeAvcImeDualReferenceStreaminINTEL = 5710,
+ OpTypeAvcRefResultINTEL = 5711,
+ OpTypeAvcSicResultINTEL = 5712,
+ OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713,
+ OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714,
+ OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715,
+ OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716,
+ OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717,
+ OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718,
+ OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719,
+ OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720,
+ OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721,
+ OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722,
+ OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723,
+ OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724,
+ OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725,
+ OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726,
+ OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727,
+ OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728,
+ OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729,
+ OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730,
+ OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731,
+ OpSubgroupAvcMceConvertToImePayloadINTEL = 5732,
+ OpSubgroupAvcMceConvertToImeResultINTEL = 5733,
+ OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734,
+ OpSubgroupAvcMceConvertToRefResultINTEL = 5735,
+ OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736,
+ OpSubgroupAvcMceConvertToSicResultINTEL = 5737,
+ OpSubgroupAvcMceGetMotionVectorsINTEL = 5738,
+ OpSubgroupAvcMceGetInterDistortionsINTEL = 5739,
+ OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740,
+ OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741,
+ OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742,
+ OpSubgroupAvcMceGetInterDirectionsINTEL = 5743,
+ OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744,
+ OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745,
+ OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746,
+ OpSubgroupAvcImeInitializeINTEL = 5747,
+ OpSubgroupAvcImeSetSingleReferenceINTEL = 5748,
+ OpSubgroupAvcImeSetDualReferenceINTEL = 5749,
+ OpSubgroupAvcImeRefWindowSizeINTEL = 5750,
+ OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751,
+ OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752,
+ OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753,
+ OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754,
+ OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755,
+ OpSubgroupAvcImeSetWeightedSadINTEL = 5756,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757,
+ OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762,
+ OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763,
+ OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764,
+ OpSubgroupAvcImeConvertToMceResultINTEL = 5765,
+ OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766,
+ OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767,
+ OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768,
+ OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771,
+ OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774,
+ OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775,
+ OpSubgroupAvcImeGetBorderReachedINTEL = 5776,
+ OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777,
+ OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778,
+ OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779,
+ OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780,
+ OpSubgroupAvcFmeInitializeINTEL = 5781,
+ OpSubgroupAvcBmeInitializeINTEL = 5782,
+ OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783,
+ OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784,
+ OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785,
+ OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786,
+ OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787,
+ OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788,
+ OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789,
+ OpSubgroupAvcRefConvertToMceResultINTEL = 5790,
+ OpSubgroupAvcSicInitializeINTEL = 5791,
+ OpSubgroupAvcSicConfigureSkcINTEL = 5792,
+ OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793,
+ OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794,
+ OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795,
+ OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796,
+ OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797,
+ OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798,
+ OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799,
+ OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800,
+ OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801,
+ OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802,
+ OpSubgroupAvcSicEvaluateIpeINTEL = 5803,
+ OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804,
+ OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805,
+ OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806,
+ OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807,
+ OpSubgroupAvcSicConvertToMceResultINTEL = 5808,
+ OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809,
+ OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810,
+ OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811,
+ OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812,
+ OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813,
+ OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814,
+ OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815,
+ OpSubgroupAvcSicGetInterRawSadsINTEL = 5816,
+ OpMax = 0x7fffffff,
+};
+
+#ifdef SPV_ENABLE_UTILITY_CODE
+inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
+ *hasResult = *hasResultType = false;
+ switch (opcode) {
+ default: /* unknown opcode */ break;
+ case OpNop: *hasResult = false; *hasResultType = false; break;
+ case OpUndef: *hasResult = true; *hasResultType = true; break;
+ case OpSourceContinued: *hasResult = false; *hasResultType = false; break;
+ case OpSource: *hasResult = false; *hasResultType = false; break;
+ case OpSourceExtension: *hasResult = false; *hasResultType = false; break;
+ case OpName: *hasResult = false; *hasResultType = false; break;
+ case OpMemberName: *hasResult = false; *hasResultType = false; break;
+ case OpString: *hasResult = true; *hasResultType = false; break;
+ case OpLine: *hasResult = false; *hasResultType = false; break;
+ case OpExtension: *hasResult = false; *hasResultType = false; break;
+ case OpExtInstImport: *hasResult = true; *hasResultType = false; break;
+ case OpExtInst: *hasResult = true; *hasResultType = true; break;
+ case OpMemoryModel: *hasResult = false; *hasResultType = false; break;
+ case OpEntryPoint: *hasResult = false; *hasResultType = false; break;
+ case OpExecutionMode: *hasResult = false; *hasResultType = false; break;
+ case OpCapability: *hasResult = false; *hasResultType = false; break;
+ case OpTypeVoid: *hasResult = true; *hasResultType = false; break;
+ case OpTypeBool: *hasResult = true; *hasResultType = false; break;
+ case OpTypeInt: *hasResult = true; *hasResultType = false; break;
+ case OpTypeFloat: *hasResult = true; *hasResultType = false; break;
+ case OpTypeVector: *hasResult = true; *hasResultType = false; break;
+ case OpTypeMatrix: *hasResult = true; *hasResultType = false; break;
+ case OpTypeImage: *hasResult = true; *hasResultType = false; break;
+ case OpTypeSampler: *hasResult = true; *hasResultType = false; break;
+ case OpTypeSampledImage: *hasResult = true; *hasResultType = false; break;
+ case OpTypeArray: *hasResult = true; *hasResultType = false; break;
+ case OpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break;
+ case OpTypeStruct: *hasResult = true; *hasResultType = false; break;
+ case OpTypeOpaque: *hasResult = true; *hasResultType = false; break;
+ case OpTypePointer: *hasResult = true; *hasResultType = false; break;
+ case OpTypeFunction: *hasResult = true; *hasResultType = false; break;
+ case OpTypeEvent: *hasResult = true; *hasResultType = false; break;
+ case OpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break;
+ case OpTypeReserveId: *hasResult = true; *hasResultType = false; break;
+ case OpTypeQueue: *hasResult = true; *hasResultType = false; break;
+ case OpTypePipe: *hasResult = true; *hasResultType = false; break;
+ case OpTypeForwardPointer: *hasResult = false; *hasResultType = false; break;
+ case OpConstantTrue: *hasResult = true; *hasResultType = true; break;
+ case OpConstantFalse: *hasResult = true; *hasResultType = true; break;
+ case OpConstant: *hasResult = true; *hasResultType = true; break;
+ case OpConstantComposite: *hasResult = true; *hasResultType = true; break;
+ case OpConstantSampler: *hasResult = true; *hasResultType = true; break;
+ case OpConstantNull: *hasResult = true; *hasResultType = true; break;
+ case OpSpecConstantTrue: *hasResult = true; *hasResultType = true; break;
+ case OpSpecConstantFalse: *hasResult = true; *hasResultType = true; break;
+ case OpSpecConstant: *hasResult = true; *hasResultType = true; break;
+ case OpSpecConstantComposite: *hasResult = true; *hasResultType = true; break;
+ case OpSpecConstantOp: *hasResult = true; *hasResultType = true; break;
+ case OpFunction: *hasResult = true; *hasResultType = true; break;
+ case OpFunctionParameter: *hasResult = true; *hasResultType = true; break;
+ case OpFunctionEnd: *hasResult = false; *hasResultType = false; break;
+ case OpFunctionCall: *hasResult = true; *hasResultType = true; break;
+ case OpVariable: *hasResult = true; *hasResultType = true; break;
+ case OpImageTexelPointer: *hasResult = true; *hasResultType = true; break;
+ case OpLoad: *hasResult = true; *hasResultType = true; break;
+ case OpStore: *hasResult = false; *hasResultType = false; break;
+ case OpCopyMemory: *hasResult = false; *hasResultType = false; break;
+ case OpCopyMemorySized: *hasResult = false; *hasResultType = false; break;
+ case OpAccessChain: *hasResult = true; *hasResultType = true; break;
+ case OpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break;
+ case OpPtrAccessChain: *hasResult = true; *hasResultType = true; break;
+ case OpArrayLength: *hasResult = true; *hasResultType = true; break;
+ case OpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break;
+ case OpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break;
+ case OpDecorate: *hasResult = false; *hasResultType = false; break;
+ case OpMemberDecorate: *hasResult = false; *hasResultType = false; break;
+ case OpDecorationGroup: *hasResult = true; *hasResultType = false; break;
+ case OpGroupDecorate: *hasResult = false; *hasResultType = false; break;
+ case OpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break;
+ case OpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break;
+ case OpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break;
+ case OpVectorShuffle: *hasResult = true; *hasResultType = true; break;
+ case OpCompositeConstruct: *hasResult = true; *hasResultType = true; break;
+ case OpCompositeExtract: *hasResult = true; *hasResultType = true; break;
+ case OpCompositeInsert: *hasResult = true; *hasResultType = true; break;
+ case OpCopyObject: *hasResult = true; *hasResultType = true; break;
+ case OpTranspose: *hasResult = true; *hasResultType = true; break;
+ case OpSampledImage: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageFetch: *hasResult = true; *hasResultType = true; break;
+ case OpImageGather: *hasResult = true; *hasResultType = true; break;
+ case OpImageDrefGather: *hasResult = true; *hasResultType = true; break;
+ case OpImageRead: *hasResult = true; *hasResultType = true; break;
+ case OpImageWrite: *hasResult = false; *hasResultType = false; break;
+ case OpImage: *hasResult = true; *hasResultType = true; break;
+ case OpImageQueryFormat: *hasResult = true; *hasResultType = true; break;
+ case OpImageQueryOrder: *hasResult = true; *hasResultType = true; break;
+ case OpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageQuerySize: *hasResult = true; *hasResultType = true; break;
+ case OpImageQueryLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageQueryLevels: *hasResult = true; *hasResultType = true; break;
+ case OpImageQuerySamples: *hasResult = true; *hasResultType = true; break;
+ case OpConvertFToU: *hasResult = true; *hasResultType = true; break;
+ case OpConvertFToS: *hasResult = true; *hasResultType = true; break;
+ case OpConvertSToF: *hasResult = true; *hasResultType = true; break;
+ case OpConvertUToF: *hasResult = true; *hasResultType = true; break;
+ case OpUConvert: *hasResult = true; *hasResultType = true; break;
+ case OpSConvert: *hasResult = true; *hasResultType = true; break;
+ case OpFConvert: *hasResult = true; *hasResultType = true; break;
+ case OpQuantizeToF16: *hasResult = true; *hasResultType = true; break;
+ case OpConvertPtrToU: *hasResult = true; *hasResultType = true; break;
+ case OpSatConvertSToU: *hasResult = true; *hasResultType = true; break;
+ case OpSatConvertUToS: *hasResult = true; *hasResultType = true; break;
+ case OpConvertUToPtr: *hasResult = true; *hasResultType = true; break;
+ case OpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break;
+ case OpGenericCastToPtr: *hasResult = true; *hasResultType = true; break;
+ case OpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break;
+ case OpBitcast: *hasResult = true; *hasResultType = true; break;
+ case OpSNegate: *hasResult = true; *hasResultType = true; break;
+ case OpFNegate: *hasResult = true; *hasResultType = true; break;
+ case OpIAdd: *hasResult = true; *hasResultType = true; break;
+ case OpFAdd: *hasResult = true; *hasResultType = true; break;
+ case OpISub: *hasResult = true; *hasResultType = true; break;
+ case OpFSub: *hasResult = true; *hasResultType = true; break;
+ case OpIMul: *hasResult = true; *hasResultType = true; break;
+ case OpFMul: *hasResult = true; *hasResultType = true; break;
+ case OpUDiv: *hasResult = true; *hasResultType = true; break;
+ case OpSDiv: *hasResult = true; *hasResultType = true; break;
+ case OpFDiv: *hasResult = true; *hasResultType = true; break;
+ case OpUMod: *hasResult = true; *hasResultType = true; break;
+ case OpSRem: *hasResult = true; *hasResultType = true; break;
+ case OpSMod: *hasResult = true; *hasResultType = true; break;
+ case OpFRem: *hasResult = true; *hasResultType = true; break;
+ case OpFMod: *hasResult = true; *hasResultType = true; break;
+ case OpVectorTimesScalar: *hasResult = true; *hasResultType = true; break;
+ case OpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break;
+ case OpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break;
+ case OpMatrixTimesVector: *hasResult = true; *hasResultType = true; break;
+ case OpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break;
+ case OpOuterProduct: *hasResult = true; *hasResultType = true; break;
+ case OpDot: *hasResult = true; *hasResultType = true; break;
+ case OpIAddCarry: *hasResult = true; *hasResultType = true; break;
+ case OpISubBorrow: *hasResult = true; *hasResultType = true; break;
+ case OpUMulExtended: *hasResult = true; *hasResultType = true; break;
+ case OpSMulExtended: *hasResult = true; *hasResultType = true; break;
+ case OpAny: *hasResult = true; *hasResultType = true; break;
+ case OpAll: *hasResult = true; *hasResultType = true; break;
+ case OpIsNan: *hasResult = true; *hasResultType = true; break;
+ case OpIsInf: *hasResult = true; *hasResultType = true; break;
+ case OpIsFinite: *hasResult = true; *hasResultType = true; break;
+ case OpIsNormal: *hasResult = true; *hasResultType = true; break;
+ case OpSignBitSet: *hasResult = true; *hasResultType = true; break;
+ case OpLessOrGreater: *hasResult = true; *hasResultType = true; break;
+ case OpOrdered: *hasResult = true; *hasResultType = true; break;
+ case OpUnordered: *hasResult = true; *hasResultType = true; break;
+ case OpLogicalEqual: *hasResult = true; *hasResultType = true; break;
+ case OpLogicalNotEqual: *hasResult = true; *hasResultType = true; break;
+ case OpLogicalOr: *hasResult = true; *hasResultType = true; break;
+ case OpLogicalAnd: *hasResult = true; *hasResultType = true; break;
+ case OpLogicalNot: *hasResult = true; *hasResultType = true; break;
+ case OpSelect: *hasResult = true; *hasResultType = true; break;
+ case OpIEqual: *hasResult = true; *hasResultType = true; break;
+ case OpINotEqual: *hasResult = true; *hasResultType = true; break;
+ case OpUGreaterThan: *hasResult = true; *hasResultType = true; break;
+ case OpSGreaterThan: *hasResult = true; *hasResultType = true; break;
+ case OpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpULessThan: *hasResult = true; *hasResultType = true; break;
+ case OpSLessThan: *hasResult = true; *hasResultType = true; break;
+ case OpULessThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpSLessThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFOrdEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFUnordEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFOrdNotEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFUnordNotEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFOrdLessThan: *hasResult = true; *hasResultType = true; break;
+ case OpFUnordLessThan: *hasResult = true; *hasResultType = true; break;
+ case OpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break;
+ case OpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break;
+ case OpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
+ case OpShiftRightLogical: *hasResult = true; *hasResultType = true; break;
+ case OpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break;
+ case OpShiftLeftLogical: *hasResult = true; *hasResultType = true; break;
+ case OpBitwiseOr: *hasResult = true; *hasResultType = true; break;
+ case OpBitwiseXor: *hasResult = true; *hasResultType = true; break;
+ case OpBitwiseAnd: *hasResult = true; *hasResultType = true; break;
+ case OpNot: *hasResult = true; *hasResultType = true; break;
+ case OpBitFieldInsert: *hasResult = true; *hasResultType = true; break;
+ case OpBitFieldSExtract: *hasResult = true; *hasResultType = true; break;
+ case OpBitFieldUExtract: *hasResult = true; *hasResultType = true; break;
+ case OpBitReverse: *hasResult = true; *hasResultType = true; break;
+ case OpBitCount: *hasResult = true; *hasResultType = true; break;
+ case OpDPdx: *hasResult = true; *hasResultType = true; break;
+ case OpDPdy: *hasResult = true; *hasResultType = true; break;
+ case OpFwidth: *hasResult = true; *hasResultType = true; break;
+ case OpDPdxFine: *hasResult = true; *hasResultType = true; break;
+ case OpDPdyFine: *hasResult = true; *hasResultType = true; break;
+ case OpFwidthFine: *hasResult = true; *hasResultType = true; break;
+ case OpDPdxCoarse: *hasResult = true; *hasResultType = true; break;
+ case OpDPdyCoarse: *hasResult = true; *hasResultType = true; break;
+ case OpFwidthCoarse: *hasResult = true; *hasResultType = true; break;
+ case OpEmitVertex: *hasResult = false; *hasResultType = false; break;
+ case OpEndPrimitive: *hasResult = false; *hasResultType = false; break;
+ case OpEmitStreamVertex: *hasResult = false; *hasResultType = false; break;
+ case OpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break;
+ case OpControlBarrier: *hasResult = false; *hasResultType = false; break;
+ case OpMemoryBarrier: *hasResult = false; *hasResultType = false; break;
+ case OpAtomicLoad: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicStore: *hasResult = false; *hasResultType = false; break;
+ case OpAtomicExchange: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicIIncrement: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicIDecrement: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicIAdd: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicISub: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicSMin: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicUMin: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicSMax: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicUMax: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicAnd: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicOr: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicXor: *hasResult = true; *hasResultType = true; break;
+ case OpPhi: *hasResult = true; *hasResultType = true; break;
+ case OpLoopMerge: *hasResult = false; *hasResultType = false; break;
+ case OpSelectionMerge: *hasResult = false; *hasResultType = false; break;
+ case OpLabel: *hasResult = true; *hasResultType = false; break;
+ case OpBranch: *hasResult = false; *hasResultType = false; break;
+ case OpBranchConditional: *hasResult = false; *hasResultType = false; break;
+ case OpSwitch: *hasResult = false; *hasResultType = false; break;
+ case OpKill: *hasResult = false; *hasResultType = false; break;
+ case OpReturn: *hasResult = false; *hasResultType = false; break;
+ case OpReturnValue: *hasResult = false; *hasResultType = false; break;
+ case OpUnreachable: *hasResult = false; *hasResultType = false; break;
+ case OpLifetimeStart: *hasResult = false; *hasResultType = false; break;
+ case OpLifetimeStop: *hasResult = false; *hasResultType = false; break;
+ case OpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break;
+ case OpGroupWaitEvents: *hasResult = false; *hasResultType = false; break;
+ case OpGroupAll: *hasResult = true; *hasResultType = true; break;
+ case OpGroupAny: *hasResult = true; *hasResultType = true; break;
+ case OpGroupBroadcast: *hasResult = true; *hasResultType = true; break;
+ case OpGroupIAdd: *hasResult = true; *hasResultType = true; break;
+ case OpGroupFAdd: *hasResult = true; *hasResultType = true; break;
+ case OpGroupFMin: *hasResult = true; *hasResultType = true; break;
+ case OpGroupUMin: *hasResult = true; *hasResultType = true; break;
+ case OpGroupSMin: *hasResult = true; *hasResultType = true; break;
+ case OpGroupFMax: *hasResult = true; *hasResultType = true; break;
+ case OpGroupUMax: *hasResult = true; *hasResultType = true; break;
+ case OpGroupSMax: *hasResult = true; *hasResultType = true; break;
+ case OpReadPipe: *hasResult = true; *hasResultType = true; break;
+ case OpWritePipe: *hasResult = true; *hasResultType = true; break;
+ case OpReservedReadPipe: *hasResult = true; *hasResultType = true; break;
+ case OpReservedWritePipe: *hasResult = true; *hasResultType = true; break;
+ case OpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break;
+ case OpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break;
+ case OpCommitReadPipe: *hasResult = false; *hasResultType = false; break;
+ case OpCommitWritePipe: *hasResult = false; *hasResultType = false; break;
+ case OpIsValidReserveId: *hasResult = true; *hasResultType = true; break;
+ case OpGetNumPipePackets: *hasResult = true; *hasResultType = true; break;
+ case OpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break;
+ case OpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break;
+ case OpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break;
+ case OpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break;
+ case OpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break;
+ case OpEnqueueMarker: *hasResult = true; *hasResultType = true; break;
+ case OpEnqueueKernel: *hasResult = true; *hasResultType = true; break;
+ case OpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break;
+ case OpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break;
+ case OpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break;
+ case OpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break;
+ case OpRetainEvent: *hasResult = false; *hasResultType = false; break;
+ case OpReleaseEvent: *hasResult = false; *hasResultType = false; break;
+ case OpCreateUserEvent: *hasResult = true; *hasResultType = true; break;
+ case OpIsValidEvent: *hasResult = true; *hasResultType = true; break;
+ case OpSetUserEventStatus: *hasResult = false; *hasResultType = false; break;
+ case OpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break;
+ case OpGetDefaultQueue: *hasResult = true; *hasResultType = true; break;
+ case OpBuildNDRange: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseFetch: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseGather: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break;
+ case OpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break;
+ case OpNoLine: *hasResult = false; *hasResultType = false; break;
+ case OpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break;
+ case OpAtomicFlagClear: *hasResult = false; *hasResultType = false; break;
+ case OpImageSparseRead: *hasResult = true; *hasResultType = true; break;
+ case OpSizeOf: *hasResult = true; *hasResultType = true; break;
+ case OpTypePipeStorage: *hasResult = true; *hasResultType = false; break;
+ case OpConstantPipeStorage: *hasResult = true; *hasResultType = true; break;
+ case OpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break;
+ case OpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break;
+ case OpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break;
+ case OpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break;
+ case OpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break;
+ case OpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break;
+ case OpModuleProcessed: *hasResult = false; *hasResultType = false; break;
+ case OpExecutionModeId: *hasResult = false; *hasResultType = false; break;
+ case OpDecorateId: *hasResult = false; *hasResultType = false; break;
+ case OpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break;
+ case OpCopyLogical: *hasResult = true; *hasResultType = true; break;
+ case OpPtrEqual: *hasResult = true; *hasResultType = true; break;
+ case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break;
+ case OpPtrDiff: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break;
+ case OpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break;
+ case OpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break;
+ case OpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break;
+ case OpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break;
+ case OpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break;
+ case OpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break;
+ case OpReportIntersectionNV: *hasResult = true; *hasResultType = true; break;
+ case OpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break;
+ case OpTerminateRayNV: *hasResult = false; *hasResultType = false; break;
+ case OpTraceNV: *hasResult = false; *hasResultType = false; break;
+ case OpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break;
+ case OpExecuteCallableNV: *hasResult = false; *hasResultType = false; break;
+ case OpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break;
+ case OpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break;
+ case OpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break;
+ case OpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break;
+ case OpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
+ case OpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
+ case OpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
+ case OpDecorateString: *hasResult = false; *hasResultType = false; break;
+ case OpDecorateStringGOOGLE: *hasResult = false; *hasResultType = false; break;
+ case OpMemberDecorateString: *hasResult = false; *hasResultType = false; break;
+ case OpMemberDecorateStringGOOGLE: *hasResult = false; *hasResultType = false; break;
+ case OpVmeImageINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break;
+ case OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break;
+ case OpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break;
+ }
+}
+#endif /* SPV_ENABLE_UTILITY_CODE */
+
+// Overload operator| for mask bit combining
+
+inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); }
+inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); }
+inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); }
+inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); }
+inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); }
+inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); }
+inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); }
+inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); }
+
+} // end namespace spv
+
+#endif // #ifndef spirv_HPP
+
diff --git a/thirdparty/glslang/SPIRV/spvIR.h b/thirdparty/glslang/SPIRV/spvIR.h
new file mode 100644
index 0000000000..b3cd0b0613
--- /dev/null
+++ b/thirdparty/glslang/SPIRV/spvIR.h
@@ -0,0 +1,441 @@
+//
+// Copyright (C) 2014 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+// SPIRV-IR
+//
+// Simple in-memory representation (IR) of SPIRV. Just for holding
+// Each function's CFG of blocks. Has this hierarchy:
+// - Module, which is a list of
+// - Function, which is a list of
+// - Block, which is a list of
+// - Instruction
+//
+
+#pragma once
+#ifndef spvIR_H
+#define spvIR_H
+
+#include "spirv.hpp"
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <vector>
+
+namespace spv {
+
+class Block;
+class Function;
+class Module;
+
+const Id NoResult = 0;
+const Id NoType = 0;
+
+const Decoration NoPrecision = DecorationMax;
+
+#ifdef __GNUC__
+# define POTENTIALLY_UNUSED __attribute__((unused))
+#else
+# define POTENTIALLY_UNUSED
+#endif
+
+POTENTIALLY_UNUSED
+const MemorySemanticsMask MemorySemanticsAllMemory =
+ (MemorySemanticsMask)(MemorySemanticsUniformMemoryMask |
+ MemorySemanticsWorkgroupMemoryMask |
+ MemorySemanticsAtomicCounterMemoryMask |
+ MemorySemanticsImageMemoryMask);
+
+struct IdImmediate {
+ bool isId; // true if word is an Id, false if word is an immediate
+ unsigned word;
+ IdImmediate(bool i, unsigned w) : isId(i), word(w) {}
+};
+
+//
+// SPIR-V IR instruction.
+//
+
+class Instruction {
+public:
+ Instruction(Id resultId, Id typeId, Op opCode) : resultId(resultId), typeId(typeId), opCode(opCode), block(nullptr) { }
+ explicit Instruction(Op opCode) : resultId(NoResult), typeId(NoType), opCode(opCode), block(nullptr) { }
+ virtual ~Instruction() {}
+ void addIdOperand(Id id) {
+ operands.push_back(id);
+ idOperand.push_back(true);
+ }
+ void addImmediateOperand(unsigned int immediate) {
+ operands.push_back(immediate);
+ idOperand.push_back(false);
+ }
+ void setImmediateOperand(unsigned idx, unsigned int immediate) {
+ assert(!idOperand[idx]);
+ operands[idx] = immediate;
+ }
+
+ void addStringOperand(const char* str)
+ {
+ unsigned int word;
+ char* wordString = (char*)&word;
+ char* wordPtr = wordString;
+ int charCount = 0;
+ char c;
+ do {
+ c = *(str++);
+ *(wordPtr++) = c;
+ ++charCount;
+ if (charCount == 4) {
+ addImmediateOperand(word);
+ wordPtr = wordString;
+ charCount = 0;
+ }
+ } while (c != 0);
+
+ // deal with partial last word
+ if (charCount > 0) {
+ // pad with 0s
+ for (; charCount < 4; ++charCount)
+ *(wordPtr++) = 0;
+ addImmediateOperand(word);
+ }
+ }
+ bool isIdOperand(int op) const { return idOperand[op]; }
+ void setBlock(Block* b) { block = b; }
+ Block* getBlock() const { return block; }
+ Op getOpCode() const { return opCode; }
+ int getNumOperands() const
+ {
+ assert(operands.size() == idOperand.size());
+ return (int)operands.size();
+ }
+ Id getResultId() const { return resultId; }
+ Id getTypeId() const { return typeId; }
+ Id getIdOperand(int op) const {
+ assert(idOperand[op]);
+ return operands[op];
+ }
+ unsigned int getImmediateOperand(int op) const {
+ assert(!idOperand[op]);
+ return operands[op];
+ }
+
+ // Write out the binary form.
+ void dump(std::vector<unsigned int>& out) const
+ {
+ // Compute the wordCount
+ unsigned int wordCount = 1;
+ if (typeId)
+ ++wordCount;
+ if (resultId)
+ ++wordCount;
+ wordCount += (unsigned int)operands.size();
+
+ // Write out the beginning of the instruction
+ out.push_back(((wordCount) << WordCountShift) | opCode);
+ if (typeId)
+ out.push_back(typeId);
+ if (resultId)
+ out.push_back(resultId);
+
+ // Write out the operands
+ for (int op = 0; op < (int)operands.size(); ++op)
+ out.push_back(operands[op]);
+ }
+
+protected:
+ Instruction(const Instruction&);
+ Id resultId;
+ Id typeId;
+ Op opCode;
+ std::vector<Id> operands; // operands, both <id> and immediates (both are unsigned int)
+ std::vector<bool> idOperand; // true for operands that are <id>, false for immediates
+ Block* block;
+};
+
+//
+// SPIR-V IR block.
+//
+
+class Block {
+public:
+ Block(Id id, Function& parent);
+ virtual ~Block()
+ {
+ }
+
+ Id getId() { return instructions.front()->getResultId(); }
+
+ Function& getParent() const { return parent; }
+ void addInstruction(std::unique_ptr<Instruction> inst);
+ void addPredecessor(Block* pred) { predecessors.push_back(pred); pred->successors.push_back(this);}
+ void addLocalVariable(std::unique_ptr<Instruction> inst) { localVariables.push_back(std::move(inst)); }
+ const std::vector<Block*>& getPredecessors() const { return predecessors; }
+ const std::vector<Block*>& getSuccessors() const { return successors; }
+ const std::vector<std::unique_ptr<Instruction> >& getInstructions() const {
+ return instructions;
+ }
+ const std::vector<std::unique_ptr<Instruction> >& getLocalVariables() const { return localVariables; }
+ void setUnreachable() { unreachable = true; }
+ bool isUnreachable() const { return unreachable; }
+ // Returns the block's merge instruction, if one exists (otherwise null).
+ const Instruction* getMergeInstruction() const {
+ if (instructions.size() < 2) return nullptr;
+ const Instruction* nextToLast = (instructions.cend() - 2)->get();
+ switch (nextToLast->getOpCode()) {
+ case OpSelectionMerge:
+ case OpLoopMerge:
+ return nextToLast;
+ default:
+ return nullptr;
+ }
+ return nullptr;
+ }
+
+ bool isTerminated() const
+ {
+ switch (instructions.back()->getOpCode()) {
+ case OpBranch:
+ case OpBranchConditional:
+ case OpSwitch:
+ case OpKill:
+ case OpReturn:
+ case OpReturnValue:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ void dump(std::vector<unsigned int>& out) const
+ {
+ instructions[0]->dump(out);
+ for (int i = 0; i < (int)localVariables.size(); ++i)
+ localVariables[i]->dump(out);
+ for (int i = 1; i < (int)instructions.size(); ++i)
+ instructions[i]->dump(out);
+ }
+
+protected:
+ Block(const Block&);
+ Block& operator=(Block&);
+
+ // To enforce keeping parent and ownership in sync:
+ friend Function;
+
+ std::vector<std::unique_ptr<Instruction> > instructions;
+ std::vector<Block*> predecessors, successors;
+ std::vector<std::unique_ptr<Instruction> > localVariables;
+ Function& parent;
+
+ // track whether this block is known to be uncreachable (not necessarily
+ // true for all unreachable blocks, but should be set at least
+ // for the extraneous ones introduced by the builder).
+ bool unreachable;
+};
+
+// Traverses the control-flow graph rooted at root in an order suited for
+// readable code generation. Invokes callback at every node in the traversal
+// order.
+void inReadableOrder(Block* root, std::function<void(Block*)> callback);
+
+//
+// SPIR-V IR Function.
+//
+
+class Function {
+public:
+ Function(Id id, Id resultType, Id functionType, Id firstParam, Module& parent);
+ virtual ~Function()
+ {
+ for (int i = 0; i < (int)parameterInstructions.size(); ++i)
+ delete parameterInstructions[i];
+
+ for (int i = 0; i < (int)blocks.size(); ++i)
+ delete blocks[i];
+ }
+ Id getId() const { return functionInstruction.getResultId(); }
+ Id getParamId(int p) const { return parameterInstructions[p]->getResultId(); }
+ Id getParamType(int p) const { return parameterInstructions[p]->getTypeId(); }
+
+ void addBlock(Block* block) { blocks.push_back(block); }
+ void removeBlock(Block* block)
+ {
+ auto found = find(blocks.begin(), blocks.end(), block);
+ assert(found != blocks.end());
+ blocks.erase(found);
+ delete block;
+ }
+
+ Module& getParent() const { return parent; }
+ Block* getEntryBlock() const { return blocks.front(); }
+ Block* getLastBlock() const { return blocks.back(); }
+ const std::vector<Block*>& getBlocks() const { return blocks; }
+ void addLocalVariable(std::unique_ptr<Instruction> inst);
+ Id getReturnType() const { return functionInstruction.getTypeId(); }
+
+ void setImplicitThis() { implicitThis = true; }
+ bool hasImplicitThis() const { return implicitThis; }
+
+ void dump(std::vector<unsigned int>& out) const
+ {
+ // OpFunction
+ functionInstruction.dump(out);
+
+ // OpFunctionParameter
+ for (int p = 0; p < (int)parameterInstructions.size(); ++p)
+ parameterInstructions[p]->dump(out);
+
+ // Blocks
+ inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
+ Instruction end(0, 0, OpFunctionEnd);
+ end.dump(out);
+ }
+
+protected:
+ Function(const Function&);
+ Function& operator=(Function&);
+
+ Module& parent;
+ Instruction functionInstruction;
+ std::vector<Instruction*> parameterInstructions;
+ std::vector<Block*> blocks;
+ bool implicitThis; // true if this is a member function expecting to be passed a 'this' as the first argument
+};
+
+//
+// SPIR-V IR Module.
+//
+
+class Module {
+public:
+ Module() {}
+ virtual ~Module()
+ {
+ // TODO delete things
+ }
+
+ void addFunction(Function *fun) { functions.push_back(fun); }
+
+ void mapInstruction(Instruction *instruction)
+ {
+ spv::Id resultId = instruction->getResultId();
+ // map the instruction's result id
+ if (resultId >= idToInstruction.size())
+ idToInstruction.resize(resultId + 16);
+ idToInstruction[resultId] = instruction;
+ }
+
+ Instruction* getInstruction(Id id) const { return idToInstruction[id]; }
+ const std::vector<Function*>& getFunctions() const { return functions; }
+ spv::Id getTypeId(Id resultId) const {
+ return idToInstruction[resultId] == nullptr ? NoType : idToInstruction[resultId]->getTypeId();
+ }
+ StorageClass getStorageClass(Id typeId) const
+ {
+ assert(idToInstruction[typeId]->getOpCode() == spv::OpTypePointer);
+ return (StorageClass)idToInstruction[typeId]->getImmediateOperand(0);
+ }
+
+ void dump(std::vector<unsigned int>& out) const
+ {
+ for (int f = 0; f < (int)functions.size(); ++f)
+ functions[f]->dump(out);
+ }
+
+protected:
+ Module(const Module&);
+ std::vector<Function*> functions;
+
+ // map from result id to instruction having that result id
+ std::vector<Instruction*> idToInstruction;
+
+ // map from a result id to its type id
+};
+
+//
+// Implementation (it's here due to circular type definitions).
+//
+
+// Add both
+// - the OpFunction instruction
+// - all the OpFunctionParameter instructions
+__inline Function::Function(Id id, Id resultType, Id functionType, Id firstParamId, Module& parent)
+ : parent(parent), functionInstruction(id, resultType, OpFunction), implicitThis(false)
+{
+ // OpFunction
+ functionInstruction.addImmediateOperand(FunctionControlMaskNone);
+ functionInstruction.addIdOperand(functionType);
+ parent.mapInstruction(&functionInstruction);
+ parent.addFunction(this);
+
+ // OpFunctionParameter
+ Instruction* typeInst = parent.getInstruction(functionType);
+ int numParams = typeInst->getNumOperands() - 1;
+ for (int p = 0; p < numParams; ++p) {
+ Instruction* param = new Instruction(firstParamId + p, typeInst->getIdOperand(p + 1), OpFunctionParameter);
+ parent.mapInstruction(param);
+ parameterInstructions.push_back(param);
+ }
+}
+
+__inline void Function::addLocalVariable(std::unique_ptr<Instruction> inst)
+{
+ Instruction* raw_instruction = inst.get();
+ blocks[0]->addLocalVariable(std::move(inst));
+ parent.mapInstruction(raw_instruction);
+}
+
+__inline Block::Block(Id id, Function& parent) : parent(parent), unreachable(false)
+{
+ instructions.push_back(std::unique_ptr<Instruction>(new Instruction(id, NoType, OpLabel)));
+ instructions.back()->setBlock(this);
+ parent.getParent().mapInstruction(instructions.back().get());
+}
+
+__inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
+{
+ Instruction* raw_instruction = inst.get();
+ instructions.push_back(std::move(inst));
+ raw_instruction->setBlock(this);
+ if (raw_instruction->getResultId())
+ parent.getParent().mapInstruction(raw_instruction);
+}
+
+}; // end spv namespace
+
+#endif // spvIR_H
diff --git a/thirdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp b/thirdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp
new file mode 100644
index 0000000000..b3c7226dfa
--- /dev/null
+++ b/thirdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp
@@ -0,0 +1,76 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "../Include/ShHandle.h"
+#include "../MachineIndependent/Versions.h"
+
+//
+// Here is where real machine specific high-level data would be defined.
+//
+class TGenericCompiler : public TCompiler {
+public:
+ TGenericCompiler(EShLanguage l, int dOptions) : TCompiler(l, infoSink), debugOptions(dOptions) { }
+ virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile);
+ TInfoSink infoSink;
+ int debugOptions;
+};
+
+//
+// This function must be provided to create the actual
+// compile object used by higher level code. It returns
+// a subclass of TCompiler.
+//
+TCompiler* ConstructCompiler(EShLanguage language, int debugOptions)
+{
+ return new TGenericCompiler(language, debugOptions);
+}
+
+//
+// Delete the compiler made by ConstructCompiler
+//
+void DeleteCompiler(TCompiler* compiler)
+{
+ delete compiler;
+}
+
+//
+// Generate code from the given parse tree
+//
+bool TGenericCompiler::compile(TIntermNode* /*root*/, int /*version*/, EProfile /*profile*/)
+{
+ haveValidObjectCode = true;
+
+ return haveValidObjectCode;
+}
diff --git a/thirdparty/glslang/glslang/GenericCodeGen/Link.cpp b/thirdparty/glslang/glslang/GenericCodeGen/Link.cpp
new file mode 100644
index 0000000000..c38db0f69f
--- /dev/null
+++ b/thirdparty/glslang/glslang/GenericCodeGen/Link.cpp
@@ -0,0 +1,91 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// The top level algorithms for linking multiple
+// shaders together.
+//
+#include "../Include/Common.h"
+#include "../Include/ShHandle.h"
+
+//
+// Actual link object, derived from the shader handle base classes.
+//
+class TGenericLinker : public TLinker {
+public:
+ TGenericLinker(EShExecutable e, int dOptions) : TLinker(e, infoSink), debugOptions(dOptions) { }
+ bool link(TCompilerList&, TUniformMap*) { return true; }
+ void getAttributeBindings(ShBindingTable const **) const { }
+ TInfoSink infoSink;
+ int debugOptions;
+};
+
+//
+// The internal view of a uniform/float object exchanged with the driver.
+//
+class TUniformLinkedMap : public TUniformMap {
+public:
+ TUniformLinkedMap() { }
+ virtual int getLocation(const char*) { return 0; }
+};
+
+TShHandleBase* ConstructLinker(EShExecutable executable, int debugOptions)
+{
+ return new TGenericLinker(executable, debugOptions);
+}
+
+void DeleteLinker(TShHandleBase* linker)
+{
+ delete linker;
+}
+
+TUniformMap* ConstructUniformMap()
+{
+ return new TUniformLinkedMap();
+}
+
+void DeleteUniformMap(TUniformMap* map)
+{
+ delete map;
+}
+
+TShHandleBase* ConstructBindings()
+{
+ return 0;
+}
+
+void DeleteBindingList(TShHandleBase* bindingList)
+{
+ delete bindingList;
+}
diff --git a/thirdparty/glslang/glslang/Include/BaseTypes.h b/thirdparty/glslang/glslang/Include/BaseTypes.h
new file mode 100644
index 0000000000..1827c49653
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/BaseTypes.h
@@ -0,0 +1,545 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _BASICTYPES_INCLUDED_
+#define _BASICTYPES_INCLUDED_
+
+namespace glslang {
+
+//
+// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this.
+//
+enum TBasicType {
+ EbtVoid,
+ EbtFloat,
+ EbtDouble,
+ EbtFloat16,
+ EbtInt8,
+ EbtUint8,
+ EbtInt16,
+ EbtUint16,
+ EbtInt,
+ EbtUint,
+ EbtInt64,
+ EbtUint64,
+ EbtBool,
+ EbtAtomicUint,
+ EbtSampler,
+ EbtStruct,
+ EbtBlock,
+
+#ifdef NV_EXTENSIONS
+ EbtAccStructNV,
+#endif
+
+ EbtReference,
+
+ // HLSL types that live only temporarily.
+ EbtString,
+
+ EbtNumTypes
+};
+
+//
+// Storage qualifiers. Should align with different kinds of storage or
+// resource or GLSL storage qualifier. Expansion is deprecated.
+//
+// N.B.: You probably DON'T want to add anything here, but rather just add it
+// to the built-in variables. See the comment above TBuiltInVariable.
+//
+// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc.
+// DO NOT follow the design pattern of, say EvqInstanceId, etc.
+//
+enum TStorageQualifier {
+ EvqTemporary, // For temporaries (within a function), read/write
+ EvqGlobal, // For globals read/write
+ EvqConst, // User-defined constant values, will be semantically constant and constant folded
+ EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqUniform, // read only, shared with app
+ EvqBuffer, // read/write, shared with app
+ EvqShared, // compute shader's read/write 'shared' qualifier
+
+#ifdef NV_EXTENSIONS
+ EvqPayloadNV,
+ EvqPayloadInNV,
+ EvqHitAttrNV,
+ EvqCallableDataNV,
+ EvqCallableDataInNV,
+#endif
+
+ // parameters
+ EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
+ EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
+ EvqInOut,
+ EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics
+
+ // built-ins read by vertex shader
+ EvqVertexId,
+ EvqInstanceId,
+
+ // built-ins written by vertex shader
+ EvqPosition,
+ EvqPointSize,
+ EvqClipVertex,
+
+ // built-ins read by fragment shader
+ EvqFace,
+ EvqFragCoord,
+ EvqPointCoord,
+
+ // built-ins written by fragment shader
+ EvqFragColor,
+ EvqFragDepth,
+
+ // end of list
+ EvqLast
+};
+
+//
+// Subcategories of the TStorageQualifier, simply to give a direct mapping
+// between built-in variable names and an numerical value (the enum).
+//
+// For backward compatibility, there is some redundancy between the
+// TStorageQualifier and these. Existing members should both be maintained accurately.
+// However, any new built-in variable (and any existing non-redundant one)
+// must follow the pattern that the specific built-in is here, and only its
+// general qualifier is in TStorageQualifier.
+//
+// Something like gl_Position, which is sometimes 'in' and sometimes 'out'
+// shows up as two different built-in variables in a single stage, but
+// only has a single enum in TBuiltInVariable, so both the
+// TStorageQualifier and the TBuitinVariable are needed to distinguish
+// between them.
+//
+enum TBuiltInVariable {
+ EbvNone,
+ EbvNumWorkGroups,
+ EbvWorkGroupSize,
+ EbvWorkGroupId,
+ EbvLocalInvocationId,
+ EbvGlobalInvocationId,
+ EbvLocalInvocationIndex,
+ EbvNumSubgroups,
+ EbvSubgroupID,
+ EbvSubGroupSize,
+ EbvSubGroupInvocation,
+ EbvSubGroupEqMask,
+ EbvSubGroupGeMask,
+ EbvSubGroupGtMask,
+ EbvSubGroupLeMask,
+ EbvSubGroupLtMask,
+ EbvSubgroupSize2,
+ EbvSubgroupInvocation2,
+ EbvSubgroupEqMask2,
+ EbvSubgroupGeMask2,
+ EbvSubgroupGtMask2,
+ EbvSubgroupLeMask2,
+ EbvSubgroupLtMask2,
+ EbvVertexId,
+ EbvInstanceId,
+ EbvVertexIndex,
+ EbvInstanceIndex,
+ EbvBaseVertex,
+ EbvBaseInstance,
+ EbvDrawId,
+ EbvPosition,
+ EbvPointSize,
+ EbvClipVertex,
+ EbvClipDistance,
+ EbvCullDistance,
+ EbvNormal,
+ EbvVertex,
+ EbvMultiTexCoord0,
+ EbvMultiTexCoord1,
+ EbvMultiTexCoord2,
+ EbvMultiTexCoord3,
+ EbvMultiTexCoord4,
+ EbvMultiTexCoord5,
+ EbvMultiTexCoord6,
+ EbvMultiTexCoord7,
+ EbvFrontColor,
+ EbvBackColor,
+ EbvFrontSecondaryColor,
+ EbvBackSecondaryColor,
+ EbvTexCoord,
+ EbvFogFragCoord,
+ EbvInvocationId,
+ EbvPrimitiveId,
+ EbvLayer,
+ EbvViewportIndex,
+ EbvPatchVertices,
+ EbvTessLevelOuter,
+ EbvTessLevelInner,
+ EbvBoundingBox,
+ EbvTessCoord,
+ EbvColor,
+ EbvSecondaryColor,
+ EbvFace,
+ EbvFragCoord,
+ EbvPointCoord,
+ EbvFragColor,
+ EbvFragData,
+ EbvFragDepth,
+ EbvFragStencilRef,
+ EbvSampleId,
+ EbvSamplePosition,
+ EbvSampleMask,
+ EbvHelperInvocation,
+
+#ifdef AMD_EXTENSIONS
+ EbvBaryCoordNoPersp,
+ EbvBaryCoordNoPerspCentroid,
+ EbvBaryCoordNoPerspSample,
+ EbvBaryCoordSmooth,
+ EbvBaryCoordSmoothCentroid,
+ EbvBaryCoordSmoothSample,
+ EbvBaryCoordPullModel,
+#endif
+
+ EbvViewIndex,
+ EbvDeviceIndex,
+
+ EbvFragSizeEXT,
+ EbvFragInvocationCountEXT,
+
+#ifdef NV_EXTENSIONS
+ EbvViewportMaskNV,
+ EbvSecondaryPositionNV,
+ EbvSecondaryViewportMaskNV,
+ EbvPositionPerViewNV,
+ EbvViewportMaskPerViewNV,
+ EbvFragFullyCoveredNV,
+ EbvFragmentSizeNV,
+ EbvInvocationsPerPixelNV,
+ // raytracing
+ EbvLaunchIdNV,
+ EbvLaunchSizeNV,
+ EbvInstanceCustomIndexNV,
+ EbvWorldRayOriginNV,
+ EbvWorldRayDirectionNV,
+ EbvObjectRayOriginNV,
+ EbvObjectRayDirectionNV,
+ EbvRayTminNV,
+ EbvRayTmaxNV,
+ EbvHitTNV,
+ EbvHitKindNV,
+ EbvObjectToWorldNV,
+ EbvWorldToObjectNV,
+ EbvIncomingRayFlagsNV,
+ EbvBaryCoordNV,
+ EbvBaryCoordNoPerspNV,
+ EbvTaskCountNV,
+ EbvPrimitiveCountNV,
+ EbvPrimitiveIndicesNV,
+ EbvClipDistancePerViewNV,
+ EbvCullDistancePerViewNV,
+ EbvLayerPerViewNV,
+ EbvMeshViewCountNV,
+ EbvMeshViewIndicesNV,
+#endif
+
+ // HLSL built-ins that live only temporarily, until they get remapped
+ // to one of the above.
+ EbvFragDepthGreater,
+ EbvFragDepthLesser,
+ EbvGsOutputStream,
+ EbvOutputPatch,
+ EbvInputPatch,
+
+ // structbuffer types
+ EbvAppendConsume, // no need to differentiate append and consume
+ EbvRWStructuredBuffer,
+ EbvStructuredBuffer,
+ EbvByteAddressBuffer,
+ EbvRWByteAddressBuffer,
+
+ EbvLast
+};
+
+// These will show up in error messages
+__inline const char* GetStorageQualifierString(TStorageQualifier q)
+{
+ switch (q) {
+ case EvqTemporary: return "temp"; break;
+ case EvqGlobal: return "global"; break;
+ case EvqConst: return "const"; break;
+ case EvqConstReadOnly: return "const (read only)"; break;
+ case EvqVaryingIn: return "in"; break;
+ case EvqVaryingOut: return "out"; break;
+ case EvqUniform: return "uniform"; break;
+ case EvqBuffer: return "buffer"; break;
+ case EvqShared: return "shared"; break;
+ case EvqIn: return "in"; break;
+ case EvqOut: return "out"; break;
+ case EvqInOut: return "inout"; break;
+ case EvqVertexId: return "gl_VertexId"; break;
+ case EvqInstanceId: return "gl_InstanceId"; break;
+ case EvqPosition: return "gl_Position"; break;
+ case EvqPointSize: return "gl_PointSize"; break;
+ case EvqClipVertex: return "gl_ClipVertex"; break;
+ case EvqFace: return "gl_FrontFacing"; break;
+ case EvqFragCoord: return "gl_FragCoord"; break;
+ case EvqPointCoord: return "gl_PointCoord"; break;
+ case EvqFragColor: return "fragColor"; break;
+ case EvqFragDepth: return "gl_FragDepth"; break;
+#ifdef NV_EXTENSIONS
+ case EvqPayloadNV: return "rayPayloadNV"; break;
+ case EvqPayloadInNV: return "rayPayloadInNV"; break;
+ case EvqHitAttrNV: return "hitAttributeNV"; break;
+ case EvqCallableDataNV: return "callableDataNV"; break;
+ case EvqCallableDataInNV: return "callableDataInNV"; break;
+#endif
+ default: return "unknown qualifier";
+ }
+}
+
+__inline const char* GetBuiltInVariableString(TBuiltInVariable v)
+{
+ switch (v) {
+ case EbvNone: return "";
+ case EbvNumWorkGroups: return "NumWorkGroups";
+ case EbvWorkGroupSize: return "WorkGroupSize";
+ case EbvWorkGroupId: return "WorkGroupID";
+ case EbvLocalInvocationId: return "LocalInvocationID";
+ case EbvGlobalInvocationId: return "GlobalInvocationID";
+ case EbvLocalInvocationIndex: return "LocalInvocationIndex";
+ case EbvSubGroupSize: return "SubGroupSize";
+ case EbvSubGroupInvocation: return "SubGroupInvocation";
+ case EbvSubGroupEqMask: return "SubGroupEqMask";
+ case EbvSubGroupGeMask: return "SubGroupGeMask";
+ case EbvSubGroupGtMask: return "SubGroupGtMask";
+ case EbvSubGroupLeMask: return "SubGroupLeMask";
+ case EbvSubGroupLtMask: return "SubGroupLtMask";
+ case EbvVertexId: return "VertexId";
+ case EbvInstanceId: return "InstanceId";
+ case EbvVertexIndex: return "VertexIndex";
+ case EbvInstanceIndex: return "InstanceIndex";
+ case EbvBaseVertex: return "BaseVertex";
+ case EbvBaseInstance: return "BaseInstance";
+ case EbvDrawId: return "DrawId";
+ case EbvPosition: return "Position";
+ case EbvPointSize: return "PointSize";
+ case EbvClipVertex: return "ClipVertex";
+ case EbvClipDistance: return "ClipDistance";
+ case EbvCullDistance: return "CullDistance";
+ case EbvNormal: return "Normal";
+ case EbvVertex: return "Vertex";
+ case EbvMultiTexCoord0: return "MultiTexCoord0";
+ case EbvMultiTexCoord1: return "MultiTexCoord1";
+ case EbvMultiTexCoord2: return "MultiTexCoord2";
+ case EbvMultiTexCoord3: return "MultiTexCoord3";
+ case EbvMultiTexCoord4: return "MultiTexCoord4";
+ case EbvMultiTexCoord5: return "MultiTexCoord5";
+ case EbvMultiTexCoord6: return "MultiTexCoord6";
+ case EbvMultiTexCoord7: return "MultiTexCoord7";
+ case EbvFrontColor: return "FrontColor";
+ case EbvBackColor: return "BackColor";
+ case EbvFrontSecondaryColor: return "FrontSecondaryColor";
+ case EbvBackSecondaryColor: return "BackSecondaryColor";
+ case EbvTexCoord: return "TexCoord";
+ case EbvFogFragCoord: return "FogFragCoord";
+ case EbvInvocationId: return "InvocationID";
+ case EbvPrimitiveId: return "PrimitiveID";
+ case EbvLayer: return "Layer";
+ case EbvViewportIndex: return "ViewportIndex";
+ case EbvPatchVertices: return "PatchVertices";
+ case EbvTessLevelOuter: return "TessLevelOuter";
+ case EbvTessLevelInner: return "TessLevelInner";
+ case EbvBoundingBox: return "BoundingBox";
+ case EbvTessCoord: return "TessCoord";
+ case EbvColor: return "Color";
+ case EbvSecondaryColor: return "SecondaryColor";
+ case EbvFace: return "Face";
+ case EbvFragCoord: return "FragCoord";
+ case EbvPointCoord: return "PointCoord";
+ case EbvFragColor: return "FragColor";
+ case EbvFragData: return "FragData";
+ case EbvFragDepth: return "FragDepth";
+ case EbvFragStencilRef: return "FragStencilRef";
+ case EbvSampleId: return "SampleId";
+ case EbvSamplePosition: return "SamplePosition";
+ case EbvSampleMask: return "SampleMaskIn";
+ case EbvHelperInvocation: return "HelperInvocation";
+
+#ifdef AMD_EXTENSIONS
+ case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
+ case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
+ case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
+ case EbvBaryCoordSmooth: return "BaryCoordSmooth";
+ case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
+ case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
+ case EbvBaryCoordPullModel: return "BaryCoordPullModel";
+#endif
+
+ case EbvViewIndex: return "ViewIndex";
+ case EbvDeviceIndex: return "DeviceIndex";
+
+ case EbvFragSizeEXT: return "FragSizeEXT";
+ case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
+
+#ifdef NV_EXTENSIONS
+ case EbvViewportMaskNV: return "ViewportMaskNV";
+ case EbvSecondaryPositionNV: return "SecondaryPositionNV";
+ case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
+ case EbvPositionPerViewNV: return "PositionPerViewNV";
+ case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
+ case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
+ case EbvFragmentSizeNV: return "FragmentSizeNV";
+ case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV";
+ case EbvLaunchIdNV: return "LaunchIdNV";
+ case EbvLaunchSizeNV: return "LaunchSizeNV";
+ case EbvInstanceCustomIndexNV: return "InstanceCustomIndexNV";
+ case EbvWorldRayOriginNV: return "WorldRayOriginNV";
+ case EbvWorldRayDirectionNV: return "WorldRayDirectionNV";
+ case EbvObjectRayOriginNV: return "ObjectRayOriginNV";
+ case EbvObjectRayDirectionNV: return "ObjectRayDirectionNV";
+ case EbvRayTminNV: return "ObjectRayTminNV";
+ case EbvRayTmaxNV: return "ObjectRayTmaxNV";
+ case EbvHitTNV: return "HitTNV";
+ case EbvHitKindNV: return "HitKindNV";
+ case EbvIncomingRayFlagsNV: return "IncomingRayFlagsNV";
+ case EbvObjectToWorldNV: return "ObjectToWorldNV";
+ case EbvWorldToObjectNV: return "WorldToObjectNV";
+
+ case EbvBaryCoordNV: return "BaryCoordNV";
+ case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
+ case EbvTaskCountNV: return "TaskCountNV";
+ case EbvPrimitiveCountNV: return "PrimitiveCountNV";
+ case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV";
+ case EbvClipDistancePerViewNV: return "ClipDistancePerViewNV";
+ case EbvCullDistancePerViewNV: return "CullDistancePerViewNV";
+ case EbvLayerPerViewNV: return "LayerPerViewNV";
+ case EbvMeshViewCountNV: return "MeshViewCountNV";
+ case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
+#endif
+ default: return "unknown built-in variable";
+ }
+}
+
+// In this enum, order matters; users can assume higher precision is a bigger value
+// and EpqNone is 0.
+enum TPrecisionQualifier {
+ EpqNone = 0,
+ EpqLow,
+ EpqMedium,
+ EpqHigh
+};
+
+__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
+{
+ switch (p) {
+ case EpqNone: return ""; break;
+ case EpqLow: return "lowp"; break;
+ case EpqMedium: return "mediump"; break;
+ case EpqHigh: return "highp"; break;
+ default: return "unknown precision qualifier";
+ }
+}
+
+__inline bool isTypeSignedInt(TBasicType type)
+{
+ switch (type) {
+ case EbtInt8:
+ case EbtInt16:
+ case EbtInt:
+ case EbtInt64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline bool isTypeUnsignedInt(TBasicType type)
+{
+ switch (type) {
+ case EbtUint8:
+ case EbtUint16:
+ case EbtUint:
+ case EbtUint64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline bool isTypeInt(TBasicType type)
+{
+ return isTypeSignedInt(type) || isTypeUnsignedInt(type);
+}
+
+__inline bool isTypeFloat(TBasicType type)
+{
+ switch (type) {
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline int getTypeRank(TBasicType type) {
+ int res = -1;
+ switch(type) {
+ case EbtInt8:
+ case EbtUint8:
+ res = 0;
+ break;
+ case EbtInt16:
+ case EbtUint16:
+ res = 1;
+ break;
+ case EbtInt:
+ case EbtUint:
+ res = 2;
+ break;
+ case EbtInt64:
+ case EbtUint64:
+ res = 3;
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ return res;
+}
+
+} // end namespace glslang
+
+#endif // _BASICTYPES_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/Common.h b/thirdparty/glslang/glslang/Include/Common.h
new file mode 100644
index 0000000000..733a790cfd
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/Common.h
@@ -0,0 +1,292 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _COMMON_INCLUDED_
+#define _COMMON_INCLUDED_
+
+
+#if defined(__ANDROID__) || (defined(_MSC_VER) && _MSC_VER < 1700)
+#include <sstream>
+namespace std {
+template<typename T>
+std::string to_string(const T& val) {
+ std::ostringstream os;
+ os << val;
+ return os.str();
+}
+}
+#endif
+
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
+ #include <basetsd.h>
+ #ifndef snprintf
+ #define snprintf sprintf_s
+ #endif
+ #define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args))
+#elif defined (solaris)
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <sys/int_types.h>
+ #define UINT_PTR uintptr_t
+#else
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <stdint.h>
+ #define UINT_PTR uintptr_t
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+ #include <stdlib.h>
+ inline long long int strtoll (const char* str, char** endptr, int base)
+ {
+ return _strtoi64(str, endptr, base);
+ }
+ inline unsigned long long int strtoull (const char* str, char** endptr, int base)
+ {
+ return _strtoui64(str, endptr, base);
+ }
+ inline long long int atoll (const char* str)
+ {
+ return strtoll(str, NULL, 10);
+ }
+#endif
+
+#if defined(_MSC_VER)
+#define strdup _strdup
+#endif
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4786) // Don't warn about too long identifiers
+ #pragma warning(disable : 4514) // unused inline method
+ #pragma warning(disable : 4201) // nameless union
+#endif
+
+#include <set>
+#include <unordered_set>
+#include <vector>
+#include <map>
+#include <unordered_map>
+#include <list>
+#include <algorithm>
+#include <string>
+#include <cstdio>
+#include <cstdlib>
+#include <cassert>
+
+#include "PoolAlloc.h"
+
+//
+// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme.
+//
+#define POOL_ALLOCATOR_NEW_DELETE(A) \
+ void* operator new(size_t s) { return (A).allocate(s); } \
+ void* operator new(size_t, void *_Where) { return (_Where); } \
+ void operator delete(void*) { } \
+ void operator delete(void *, void *) { } \
+ void* operator new[](size_t s) { return (A).allocate(s); } \
+ void* operator new[](size_t, void *_Where) { return (_Where); } \
+ void operator delete[](void*) { } \
+ void operator delete[](void *, void *) { }
+
+namespace glslang {
+
+ //
+ // Pool version of string.
+ //
+ typedef pool_allocator<char> TStringAllocator;
+ typedef std::basic_string <char, std::char_traits<char>, TStringAllocator> TString;
+
+} // end namespace glslang
+
+// Repackage the std::hash for use by unordered map/set with a TString key.
+namespace std {
+
+ template<> struct hash<glslang::TString> {
+ std::size_t operator()(const glslang::TString& s) const
+ {
+ const unsigned _FNV_offset_basis = 2166136261U;
+ const unsigned _FNV_prime = 16777619U;
+ unsigned _Val = _FNV_offset_basis;
+ size_t _Count = s.size();
+ const char* _First = s.c_str();
+ for (size_t _Next = 0; _Next < _Count; ++_Next)
+ {
+ _Val ^= (unsigned)_First[_Next];
+ _Val *= _FNV_prime;
+ }
+
+ return _Val;
+ }
+ };
+}
+
+namespace glslang {
+
+inline TString* NewPoolTString(const char* s)
+{
+ void* memory = GetThreadPoolAllocator().allocate(sizeof(TString));
+ return new(memory) TString(s);
+}
+
+template<class T> inline T* NewPoolObject(T*)
+{
+ return new(GetThreadPoolAllocator().allocate(sizeof(T))) T;
+}
+
+template<class T> inline T* NewPoolObject(T, int instances)
+{
+ return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances];
+}
+
+//
+// Pool allocator versions of vectors, lists, and maps
+//
+template <class T> class TVector : public std::vector<T, pool_allocator<T> > {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ typedef typename std::vector<T, pool_allocator<T> >::size_type size_type;
+ TVector() : std::vector<T, pool_allocator<T> >() {}
+ TVector(const pool_allocator<T>& a) : std::vector<T, pool_allocator<T> >(a) {}
+ TVector(size_type i) : std::vector<T, pool_allocator<T> >(i) {}
+ TVector(size_type i, const T& val) : std::vector<T, pool_allocator<T> >(i, val) {}
+};
+
+template <class T> class TList : public std::list<T, pool_allocator<T> > {
+};
+
+template <class K, class D, class CMP = std::less<K> >
+class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
+};
+
+template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_to<K> >
+class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
+};
+
+//
+// Persistent string memory. Should only be used for strings that survive
+// across compiles/links.
+//
+typedef std::basic_string<char> TPersistString;
+
+//
+// templatized min and max functions.
+//
+template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
+template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
+
+//
+// Create a TString object from an integer.
+//
+#if defined _MSC_VER || defined MINGW_HAS_SECURE_API
+inline const TString String(const int i, const int base = 10)
+{
+ char text[16]; // 32 bit ints are at most 10 digits in base 10
+ _itoa_s(i, text, sizeof(text), base);
+ return text;
+}
+#else
+inline const TString String(const int i, const int /*base*/ = 10)
+{
+ char text[16]; // 32 bit ints are at most 10 digits in base 10
+
+ // we assume base 10 for all cases
+ snprintf(text, sizeof(text), "%d", i);
+
+ return text;
+}
+#endif
+
+struct TSourceLoc {
+ void init()
+ {
+ name = nullptr; string = 0; line = 0; column = 0;
+ }
+ void init(int stringNum) { init(); string = stringNum; }
+ // Returns the name if it exists. Otherwise, returns the string number.
+ std::string getStringNameOrNum(bool quoteStringName = true) const
+ {
+ if (name != nullptr) {
+ TString qstr = quoteStringName ? ("\"" + *name + "\"") : *name;
+ std::string ret_str(qstr.c_str());
+ return ret_str;
+ }
+ return std::to_string((long long)string);
+ }
+ const char* getFilename() const
+ {
+ if (name == nullptr)
+ return nullptr;
+ return name->c_str();
+ }
+ const char* getFilenameStr() const { return name == nullptr ? "" : name->c_str(); }
+ TString* name; // descriptive name for this string, when a textual name is available, otherwise nullptr
+ int string;
+ int line;
+ int column;
+};
+
+class TPragmaTable : public TMap<TString, TString> {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+};
+
+const int MaxTokenLength = 1024;
+
+template <class T> bool IsPow2(T powerOf2)
+{
+ if (powerOf2 <= 0)
+ return false;
+
+ return (powerOf2 & (powerOf2 - 1)) == 0;
+}
+
+// Round number up to a multiple of the given powerOf2, which is not
+// a power, just a number that must be a power of 2.
+template <class T> void RoundToPow2(T& number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ number = (number + powerOf2 - 1) & ~(powerOf2 - 1);
+}
+
+template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ return ! (number & (powerOf2 - 1));
+}
+
+} // end namespace glslang
+
+#endif // _COMMON_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/ConstantUnion.h b/thirdparty/glslang/glslang/Include/ConstantUnion.h
new file mode 100644
index 0000000000..3e93340151
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/ConstantUnion.h
@@ -0,0 +1,938 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _CONSTANT_UNION_INCLUDED_
+#define _CONSTANT_UNION_INCLUDED_
+
+#include "../Include/Common.h"
+#include "../Include/BaseTypes.h"
+
+namespace glslang {
+
+class TConstUnion {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnion() : iConst(0), type(EbtInt) { }
+
+ void setI8Const(signed char i)
+ {
+ i8Const = i;
+ type = EbtInt8;
+ }
+
+ void setU8Const(unsigned char u)
+ {
+ u8Const = u;
+ type = EbtUint8;
+ }
+
+ void setI16Const(signed short i)
+ {
+ i16Const = i;
+ type = EbtInt16;
+ }
+
+ void setU16Const(unsigned short u)
+ {
+ u16Const = u;
+ type = EbtUint16;
+ }
+
+ void setIConst(int i)
+ {
+ iConst = i;
+ type = EbtInt;
+ }
+
+ void setUConst(unsigned int u)
+ {
+ uConst = u;
+ type = EbtUint;
+ }
+
+ void setI64Const(long long i64)
+ {
+ i64Const = i64;
+ type = EbtInt64;
+ }
+
+ void setU64Const(unsigned long long u64)
+ {
+ u64Const = u64;
+ type = EbtUint64;
+ }
+
+ void setDConst(double d)
+ {
+ dConst = d;
+ type = EbtDouble;
+ }
+
+ void setBConst(bool b)
+ {
+ bConst = b;
+ type = EbtBool;
+ }
+
+ void setSConst(const TString* s)
+ {
+ sConst = s;
+ type = EbtString;
+ }
+
+ signed char getI8Const() const { return i8Const; }
+ unsigned char getU8Const() const { return u8Const; }
+ signed short getI16Const() const { return i16Const; }
+ unsigned short getU16Const() const { return u16Const; }
+ int getIConst() const { return iConst; }
+ unsigned int getUConst() const { return uConst; }
+ long long getI64Const() const { return i64Const; }
+ unsigned long long getU64Const() const { return u64Const; }
+ double getDConst() const { return dConst; }
+ bool getBConst() const { return bConst; }
+ const TString* getSConst() const { return sConst; }
+
+ bool operator==(const signed char i) const
+ {
+ if (i == i8Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned char u) const
+ {
+ if (u == u8Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const signed short i) const
+ {
+ if (i == i16Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned short u) const
+ {
+ if (u == u16Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const int i) const
+ {
+ if (i == iConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned int u) const
+ {
+ if (u == uConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const long long i64) const
+ {
+ if (i64 == i64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned long long u64) const
+ {
+ if (u64 == u64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const double d) const
+ {
+ if (d == dConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const bool b) const
+ {
+ if (b == bConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const TConstUnion& constant) const
+ {
+ if (constant.type != type)
+ return false;
+
+ switch (type) {
+ case EbtInt16:
+ if (constant.i16Const == i16Const)
+ return true;
+
+ break;
+ case EbtUint16:
+ if (constant.u16Const == u16Const)
+ return true;
+
+ break;
+ case EbtInt8:
+ if (constant.i8Const == i8Const)
+ return true;
+
+ break;
+ case EbtUint8:
+ if (constant.u8Const == u8Const)
+ return true;
+
+ break;
+ case EbtInt:
+ if (constant.iConst == iConst)
+ return true;
+
+ break;
+ case EbtUint:
+ if (constant.uConst == uConst)
+ return true;
+
+ break;
+ case EbtInt64:
+ if (constant.i64Const == i64Const)
+ return true;
+
+ break;
+ case EbtUint64:
+ if (constant.u64Const == u64Const)
+ return true;
+
+ break;
+ case EbtDouble:
+ if (constant.dConst == dConst)
+ return true;
+
+ break;
+ case EbtBool:
+ if (constant.bConst == bConst)
+ return true;
+
+ break;
+ default:
+ assert(false && "Default missing");
+ }
+
+ return false;
+ }
+
+ bool operator!=(const signed char i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned char u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const signed short i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned short u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const int i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned int u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const long long i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned long long u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const float f) const
+ {
+ return !operator==(f);
+ }
+
+ bool operator!=(const bool b) const
+ {
+ return !operator==(b);
+ }
+
+ bool operator!=(const TConstUnion& constant) const
+ {
+ return !operator==(constant);
+ }
+
+ bool operator>(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8:
+ if (i8Const > constant.i8Const)
+ return true;
+
+ return false;
+ case EbtUint8:
+ if (u8Const > constant.u8Const)
+ return true;
+
+ return false;
+ case EbtInt16:
+ if (i16Const > constant.i16Const)
+ return true;
+
+ return false;
+ case EbtUint16:
+ if (u16Const > constant.u16Const)
+ return true;
+
+ return false;
+ case EbtInt:
+ if (iConst > constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst > constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const > constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const > constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst > constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ bool operator<(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8:
+ if (i8Const < constant.i8Const)
+ return true;
+
+ return false;
+ case EbtUint8:
+ if (u8Const < constant.u8Const)
+ return true;
+
+ return false;
+ case EbtInt16:
+ if (i16Const < constant.i16Const)
+ return true;
+
+ return false;
+ case EbtUint16:
+ if (u16Const < constant.u16Const)
+ return true;
+
+ return false;
+ case EbtInt:
+ if (iConst < constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst < constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const < constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const < constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst < constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ TConstUnion operator+(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator-(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator*(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator%(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator>>(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI8Const(i8Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI8Const(i8Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI8Const(i8Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI8Const(i8Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI8Const(i8Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI8Const(i8Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU8Const(u8Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU8Const(u8Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU8Const(u8Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU8Const(u8Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU8Const(u8Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU8Const(u8Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU8Const(u8Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI16Const(i16Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI16Const(i16Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI16Const(i16Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI16Const(i16Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI16Const(i16Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI16Const(i16Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI16Const(i16Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU16Const(u16Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU16Const(u16Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU16Const(u16Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU16Const(u16Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU16Const(u16Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU16Const(u16Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU16Const(u16Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break;
+ case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break;
+ case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break;
+ case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break;
+ case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break;
+ case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break;
+ case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break;
+ case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI64Const(i64Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI64Const(i64Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI64Const(i64Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU64Const(u64Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU64Const(u64Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU64Const(u64Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU64Const(u64Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator<<(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI8Const(i8Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI8Const(i8Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI8Const(i8Const << constant.iConst); break;
+ case EbtUint: returnValue.setI8Const(i8Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI8Const(i8Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI8Const(i8Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU8Const(u8Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU8Const(u8Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU8Const(u8Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU8Const(u8Const << constant.iConst); break;
+ case EbtUint: returnValue.setU8Const(u8Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU8Const(u8Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU8Const(u8Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI16Const(i16Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI16Const(i16Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI16Const(i16Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI16Const(i16Const << constant.iConst); break;
+ case EbtUint: returnValue.setI16Const(i16Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI16Const(i16Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI16Const(i16Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU16Const(u16Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU16Const(u16Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU16Const(u16Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU16Const(u16Const << constant.iConst); break;
+ case EbtUint: returnValue.setU16Const(u16Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU16Const(u16Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU16Const(u16Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
+ case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
+ case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
+ case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
+ case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
+ case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
+ case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
+ case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI64Const(i64Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI64Const(i64Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI64Const(i64Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU64Const(u64Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU64Const(u64Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU64Const(u64Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU64Const(u64Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator|(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator^(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator~() const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(~i8Const); break;
+ case EbtUint8: returnValue.setU8Const(~u8Const); break;
+ case EbtInt16: returnValue.setI16Const(~i16Const); break;
+ case EbtUint16: returnValue.setU16Const(~u16Const); break;
+ case EbtInt: returnValue.setIConst(~iConst); break;
+ case EbtUint: returnValue.setUConst(~uConst); break;
+ case EbtInt64: returnValue.setI64Const(~i64Const); break;
+ case EbtUint64: returnValue.setU64Const(~u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst && constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator||(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst || constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TBasicType getType() const { return type; }
+
+private:
+ union {
+ signed char i8Const; // used for i8vec, scalar int8s
+ unsigned char u8Const; // used for u8vec, scalar uint8s
+ signed short i16Const; // used for i16vec, scalar int16s
+ unsigned short u16Const; // used for u16vec, scalar uint16s
+ int iConst; // used for ivec, scalar ints
+ unsigned int uConst; // used for uvec, scalar uints
+ long long i64Const; // used for i64vec, scalar int64s
+ unsigned long long u64Const; // used for u64vec, scalar uint64s
+ bool bConst; // used for bvec, scalar bools
+ double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles
+ const TString* sConst; // string constant
+ };
+
+ TBasicType type;
+};
+
+// Encapsulate having a pointer to an array of TConstUnion,
+// which only needs to be allocated if its size is going to be
+// bigger than 0.
+//
+// One convenience is being able to use [] to go inside the array, instead
+// of C++ assuming it as an array of pointers to vectors.
+//
+// General usage is that the size is known up front, and it is
+// created once with the proper size.
+//
+class TConstUnionArray {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnionArray() : unionArray(nullptr) { }
+ virtual ~TConstUnionArray() { }
+
+ explicit TConstUnionArray(int size)
+ {
+ if (size == 0)
+ unionArray = nullptr;
+ else
+ unionArray = new TConstUnionVector(size);
+ }
+ TConstUnionArray(const TConstUnionArray& a) : unionArray(a.unionArray) { }
+ TConstUnionArray(const TConstUnionArray& a, int start, int size)
+ {
+ unionArray = new TConstUnionVector(size);
+ for (int i = 0; i < size; ++i)
+ (*unionArray)[i] = a[start + i];
+ }
+
+ // Use this constructor for a smear operation
+ TConstUnionArray(int size, const TConstUnion& val)
+ {
+ unionArray = new TConstUnionVector(size, val);
+ }
+
+ int size() const { return unionArray ? (int)unionArray->size() : 0; }
+ TConstUnion& operator[](size_t index) { return (*unionArray)[index]; }
+ const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; }
+ bool operator==(const TConstUnionArray& rhs) const
+ {
+ // this includes the case that both are unallocated
+ if (unionArray == rhs.unionArray)
+ return true;
+
+ if (! unionArray || ! rhs.unionArray)
+ return false;
+
+ return *unionArray == *rhs.unionArray;
+ }
+ bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); }
+
+ double dot(const TConstUnionArray& rhs)
+ {
+ assert(rhs.unionArray->size() == unionArray->size());
+ double sum = 0.0;
+
+ for (size_t comp = 0; comp < unionArray->size(); ++comp)
+ sum += (*this)[comp].getDConst() * rhs[comp].getDConst();
+
+ return sum;
+ }
+
+ bool empty() const { return unionArray == nullptr; }
+
+protected:
+ typedef TVector<TConstUnion> TConstUnionVector;
+ TConstUnionVector* unionArray;
+};
+
+} // end namespace glslang
+
+#endif // _CONSTANT_UNION_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/InfoSink.h b/thirdparty/glslang/glslang/Include/InfoSink.h
new file mode 100644
index 0000000000..dceb603cff
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/InfoSink.h
@@ -0,0 +1,144 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _INFOSINK_INCLUDED_
+#define _INFOSINK_INCLUDED_
+
+#include "../Include/Common.h"
+#include <cmath>
+
+namespace glslang {
+
+//
+// TPrefixType is used to centralize how info log messages start.
+// See below.
+//
+enum TPrefixType {
+ EPrefixNone,
+ EPrefixWarning,
+ EPrefixError,
+ EPrefixInternalError,
+ EPrefixUnimplemented,
+ EPrefixNote
+};
+
+enum TOutputStream {
+ ENull = 0,
+ EDebugger = 0x01,
+ EStdOut = 0x02,
+ EString = 0x04,
+};
+//
+// Encapsulate info logs for all objects that have them.
+//
+// The methods are a general set of tools for getting a variety of
+// messages and types inserted into the log.
+//
+class TInfoSinkBase {
+public:
+ TInfoSinkBase() : outputStream(4) {}
+ void erase() { sink.erase(); }
+ TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(char c) { append(1, c); return *this; }
+ TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
+ TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
+ snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
+ append(buf);
+ return *this; }
+ TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const char* s) { append(s); return *this; }
+ const char* c_str() const { return sink.c_str(); }
+ void prefix(TPrefixType message) {
+ switch(message) {
+ case EPrefixNone: break;
+ case EPrefixWarning: append("WARNING: "); break;
+ case EPrefixError: append("ERROR: "); break;
+ case EPrefixInternalError: append("INTERNAL ERROR: "); break;
+ case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break;
+ case EPrefixNote: append("NOTE: "); break;
+ default: append("UNKNOWN ERROR: "); break;
+ }
+ }
+ void location(const TSourceLoc& loc) {
+ const int maxSize = 24;
+ char locText[maxSize];
+ snprintf(locText, maxSize, ":%d", loc.line);
+ append(loc.getStringNameOrNum(false).c_str());
+ append(locText);
+ append(": ");
+ }
+ void message(TPrefixType message, const char* s) {
+ prefix(message);
+ append(s);
+ append("\n");
+ }
+ void message(TPrefixType message, const char* s, const TSourceLoc& loc) {
+ prefix(message);
+ location(loc);
+ append(s);
+ append("\n");
+ }
+
+ void setOutputStream(int output = 4)
+ {
+ outputStream = output;
+ }
+
+protected:
+ void append(const char* s);
+
+ void append(int count, char c);
+ void append(const TPersistString& t);
+ void append(const TString& t);
+
+ void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
+ sink.reserve(sink.capacity() + sink.capacity() / 2); }
+ void appendToStream(const char* s);
+ TPersistString sink;
+ int outputStream;
+};
+
+} // end namespace glslang
+
+class TInfoSink {
+public:
+ glslang::TInfoSinkBase info;
+ glslang::TInfoSinkBase debug;
+};
+
+#endif // _INFOSINK_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/InitializeGlobals.h b/thirdparty/glslang/glslang/Include/InitializeGlobals.h
new file mode 100644
index 0000000000..95d0a40e99
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/InitializeGlobals.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef __INITIALIZE_GLOBALS_INCLUDED_
+#define __INITIALIZE_GLOBALS_INCLUDED_
+
+namespace glslang {
+
+bool InitializePoolIndex();
+
+} // end namespace glslang
+
+#endif // __INITIALIZE_GLOBALS_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/PoolAlloc.h b/thirdparty/glslang/glslang/Include/PoolAlloc.h
new file mode 100644
index 0000000000..0e237a6a2c
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/PoolAlloc.h
@@ -0,0 +1,317 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _POOLALLOC_INCLUDED_
+#define _POOLALLOC_INCLUDED_
+
+#ifdef _DEBUG
+# define GUARD_BLOCKS // define to enable guard block sanity checking
+#endif
+
+//
+// This header defines an allocator that can be used to efficiently
+// allocate a large number of small requests for heap memory, with the
+// intention that they are not individually deallocated, but rather
+// collectively deallocated at one time.
+//
+// This simultaneously
+//
+// * Makes each individual allocation much more efficient; the
+// typical allocation is trivial.
+// * Completely avoids the cost of doing individual deallocation.
+// * Saves the trouble of tracking down and plugging a large class of leaks.
+//
+// Individual classes can use this allocator by supplying their own
+// new and delete methods.
+//
+// STL containers can use this allocator by using the pool_allocator
+// class as the allocator (second) template argument.
+//
+
+#include <cstddef>
+#include <cstring>
+#include <vector>
+
+namespace glslang {
+
+// If we are using guard blocks, we must track each individual
+// allocation. If we aren't using guard blocks, these
+// never get instantiated, so won't have any impact.
+//
+
+class TAllocation {
+public:
+ TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
+ size(size), mem(mem), prevAlloc(prev) {
+ // Allocations are bracketed:
+ // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
+ // This would be cleaner with if (guardBlockSize)..., but that
+ // makes the compiler print warnings about 0 length memsets,
+ // even with the if() protecting them.
+# ifdef GUARD_BLOCKS
+ memset(preGuard(), guardBlockBeginVal, guardBlockSize);
+ memset(data(), userDataFill, size);
+ memset(postGuard(), guardBlockEndVal, guardBlockSize);
+# endif
+ }
+
+ void check() const {
+ checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
+ checkGuardBlock(postGuard(), guardBlockEndVal, "after");
+ }
+
+ void checkAllocList() const;
+
+ // Return total size needed to accommodate user buffer of 'size',
+ // plus our tracking data.
+ inline static size_t allocationSize(size_t size) {
+ return size + 2 * guardBlockSize + headerSize();
+ }
+
+ // Offset from surrounding buffer to get to user data buffer.
+ inline static unsigned char* offsetAllocation(unsigned char* m) {
+ return m + guardBlockSize + headerSize();
+ }
+
+private:
+ void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
+
+ // Find offsets to pre and post guard blocks, and user data buffer
+ unsigned char* preGuard() const { return mem + headerSize(); }
+ unsigned char* data() const { return preGuard() + guardBlockSize; }
+ unsigned char* postGuard() const { return data() + size; }
+
+ size_t size; // size of the user data area
+ unsigned char* mem; // beginning of our allocation (pts to header)
+ TAllocation* prevAlloc; // prior allocation in the chain
+
+ const static unsigned char guardBlockBeginVal;
+ const static unsigned char guardBlockEndVal;
+ const static unsigned char userDataFill;
+
+ const static size_t guardBlockSize;
+# ifdef GUARD_BLOCKS
+ inline static size_t headerSize() { return sizeof(TAllocation); }
+# else
+ inline static size_t headerSize() { return 0; }
+# endif
+};
+
+//
+// There are several stacks. One is to track the pushing and popping
+// of the user, and not yet implemented. The others are simply a
+// repositories of free pages or used pages.
+//
+// Page stacks are linked together with a simple header at the beginning
+// of each allocation obtained from the underlying OS. Multi-page allocations
+// are returned to the OS. Individual page allocations are kept for future
+// re-use.
+//
+// The "page size" used is not, nor must it match, the underlying OS
+// page size. But, having it be about that size or equal to a set of
+// pages is likely most optimal.
+//
+class TPoolAllocator {
+public:
+ TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
+
+ //
+ // Don't call the destructor just to free up the memory, call pop()
+ //
+ ~TPoolAllocator();
+
+ //
+ // Call push() to establish a new place to pop memory too. Does not
+ // have to be called to get things started.
+ //
+ void push();
+
+ //
+ // Call pop() to free all memory allocated since the last call to push(),
+ // or if no last call to push, frees all memory since first allocation.
+ //
+ void pop();
+
+ //
+ // Call popAll() to free all memory allocated.
+ //
+ void popAll();
+
+ //
+ // Call allocate() to actually acquire memory. Returns 0 if no memory
+ // available, otherwise a properly aligned pointer to 'numBytes' of memory.
+ //
+ void* allocate(size_t numBytes);
+
+ //
+ // There is no deallocate. The point of this class is that
+ // deallocation can be skipped by the user of it, as the model
+ // of use is to simultaneously deallocate everything at once
+ // by calling pop(), and to not have to solve memory leak problems.
+ //
+
+protected:
+ friend struct tHeader;
+
+ struct tHeader {
+ tHeader(tHeader* nextPage, size_t pageCount) :
+#ifdef GUARD_BLOCKS
+ lastAllocation(0),
+#endif
+ nextPage(nextPage), pageCount(pageCount) { }
+
+ ~tHeader() {
+#ifdef GUARD_BLOCKS
+ if (lastAllocation)
+ lastAllocation->checkAllocList();
+#endif
+ }
+
+#ifdef GUARD_BLOCKS
+ TAllocation* lastAllocation;
+#endif
+ tHeader* nextPage;
+ size_t pageCount;
+ };
+
+ struct tAllocState {
+ size_t offset;
+ tHeader* page;
+ };
+ typedef std::vector<tAllocState> tAllocStack;
+
+ // Track allocations if and only if we're using guard blocks
+#ifndef GUARD_BLOCKS
+ void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
+#else
+ void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
+ new(memory) TAllocation(numBytes, memory, block->lastAllocation);
+ block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
+#endif
+
+ // This is optimized entirely away if GUARD_BLOCKS is not defined.
+ return TAllocation::offsetAllocation(memory);
+ }
+
+ size_t pageSize; // granularity of allocation from the OS
+ size_t alignment; // all returned allocations will be aligned at
+ // this granularity, which will be a power of 2
+ size_t alignmentMask;
+ size_t headerSkip; // amount of memory to skip to make room for the
+ // header (basically, size of header, rounded
+ // up to make it aligned
+ size_t currentPageOffset; // next offset in top of inUseList to allocate from
+ tHeader* freeList; // list of popped memory
+ tHeader* inUseList; // list of all memory currently being used
+ tAllocStack stack; // stack of where to allocate from, to partition pool
+
+ int numCalls; // just an interesting statistic
+ size_t totalBytes; // just an interesting statistic
+private:
+ TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
+ TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
+};
+
+//
+// There could potentially be many pools with pops happening at
+// different times. But a simple use is to have a global pop
+// with everyone using the same global allocator.
+//
+extern TPoolAllocator& GetThreadPoolAllocator();
+void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
+
+//
+// This STL compatible allocator is intended to be used as the allocator
+// parameter to templatized STL containers, like vector and map.
+//
+// It will use the pools for allocation, and not
+// do any deallocation, but will still do destruction.
+//
+template<class T>
+class pool_allocator {
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T *pointer;
+ typedef const T *const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ template<class Other>
+ struct rebind {
+ typedef pool_allocator<Other> other;
+ };
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pool_allocator() : allocator(GetThreadPoolAllocator()) { }
+ pool_allocator(TPoolAllocator& a) : allocator(a) { }
+ pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
+
+ template<class Other>
+ pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
+
+ pointer allocate(size_type n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+ pointer allocate(size_type n, const void*) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+
+ void deallocate(void*, size_type) { }
+ void deallocate(pointer, size_type) { }
+
+ pointer _Charalloc(size_t n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
+
+ void construct(pointer p, const T& val) { new ((void *)p) T(val); }
+ void destroy(pointer p) { p->T::~T(); }
+
+ bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
+ bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
+
+ size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
+ size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
+
+ void setAllocator(TPoolAllocator* a) { allocator = *a; }
+ TPoolAllocator& getAllocator() const { return allocator; }
+
+protected:
+ pool_allocator& operator=(const pool_allocator&) { return *this; }
+ TPoolAllocator& allocator;
+};
+
+} // end namespace glslang
+
+#endif // _POOLALLOC_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/ResourceLimits.h b/thirdparty/glslang/glslang/Include/ResourceLimits.h
new file mode 100644
index 0000000000..106b21d9ca
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/ResourceLimits.h
@@ -0,0 +1,149 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _RESOURCE_LIMITS_INCLUDED_
+#define _RESOURCE_LIMITS_INCLUDED_
+
+struct TLimits {
+ bool nonInductiveForLoops;
+ bool whileLoops;
+ bool doWhileLoops;
+ bool generalUniformIndexing;
+ bool generalAttributeMatrixVectorIndexing;
+ bool generalVaryingIndexing;
+ bool generalSamplerIndexing;
+ bool generalVariableIndexing;
+ bool generalConstantMatrixVectorIndexing;
+};
+
+struct TBuiltInResource {
+ int maxLights;
+ int maxClipPlanes;
+ int maxTextureUnits;
+ int maxTextureCoords;
+ int maxVertexAttribs;
+ int maxVertexUniformComponents;
+ int maxVaryingFloats;
+ int maxVertexTextureImageUnits;
+ int maxCombinedTextureImageUnits;
+ int maxTextureImageUnits;
+ int maxFragmentUniformComponents;
+ int maxDrawBuffers;
+ int maxVertexUniformVectors;
+ int maxVaryingVectors;
+ int maxFragmentUniformVectors;
+ int maxVertexOutputVectors;
+ int maxFragmentInputVectors;
+ int minProgramTexelOffset;
+ int maxProgramTexelOffset;
+ int maxClipDistances;
+ int maxComputeWorkGroupCountX;
+ int maxComputeWorkGroupCountY;
+ int maxComputeWorkGroupCountZ;
+ int maxComputeWorkGroupSizeX;
+ int maxComputeWorkGroupSizeY;
+ int maxComputeWorkGroupSizeZ;
+ int maxComputeUniformComponents;
+ int maxComputeTextureImageUnits;
+ int maxComputeImageUniforms;
+ int maxComputeAtomicCounters;
+ int maxComputeAtomicCounterBuffers;
+ int maxVaryingComponents;
+ int maxVertexOutputComponents;
+ int maxGeometryInputComponents;
+ int maxGeometryOutputComponents;
+ int maxFragmentInputComponents;
+ int maxImageUnits;
+ int maxCombinedImageUnitsAndFragmentOutputs;
+ int maxCombinedShaderOutputResources;
+ int maxImageSamples;
+ int maxVertexImageUniforms;
+ int maxTessControlImageUniforms;
+ int maxTessEvaluationImageUniforms;
+ int maxGeometryImageUniforms;
+ int maxFragmentImageUniforms;
+ int maxCombinedImageUniforms;
+ int maxGeometryTextureImageUnits;
+ int maxGeometryOutputVertices;
+ int maxGeometryTotalOutputComponents;
+ int maxGeometryUniformComponents;
+ int maxGeometryVaryingComponents;
+ int maxTessControlInputComponents;
+ int maxTessControlOutputComponents;
+ int maxTessControlTextureImageUnits;
+ int maxTessControlUniformComponents;
+ int maxTessControlTotalOutputComponents;
+ int maxTessEvaluationInputComponents;
+ int maxTessEvaluationOutputComponents;
+ int maxTessEvaluationTextureImageUnits;
+ int maxTessEvaluationUniformComponents;
+ int maxTessPatchComponents;
+ int maxPatchVertices;
+ int maxTessGenLevel;
+ int maxViewports;
+ int maxVertexAtomicCounters;
+ int maxTessControlAtomicCounters;
+ int maxTessEvaluationAtomicCounters;
+ int maxGeometryAtomicCounters;
+ int maxFragmentAtomicCounters;
+ int maxCombinedAtomicCounters;
+ int maxAtomicCounterBindings;
+ int maxVertexAtomicCounterBuffers;
+ int maxTessControlAtomicCounterBuffers;
+ int maxTessEvaluationAtomicCounterBuffers;
+ int maxGeometryAtomicCounterBuffers;
+ int maxFragmentAtomicCounterBuffers;
+ int maxCombinedAtomicCounterBuffers;
+ int maxAtomicCounterBufferSize;
+ int maxTransformFeedbackBuffers;
+ int maxTransformFeedbackInterleavedComponents;
+ int maxCullDistances;
+ int maxCombinedClipAndCullDistances;
+ int maxSamples;
+ int maxMeshOutputVerticesNV;
+ int maxMeshOutputPrimitivesNV;
+ int maxMeshWorkGroupSizeX_NV;
+ int maxMeshWorkGroupSizeY_NV;
+ int maxMeshWorkGroupSizeZ_NV;
+ int maxTaskWorkGroupSizeX_NV;
+ int maxTaskWorkGroupSizeY_NV;
+ int maxTaskWorkGroupSizeZ_NV;
+ int maxMeshViewCountNV;
+
+ TLimits limits;
+};
+
+#endif // _RESOURCE_LIMITS_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/ShHandle.h b/thirdparty/glslang/glslang/Include/ShHandle.h
new file mode 100644
index 0000000000..df07bd8eda
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/ShHandle.h
@@ -0,0 +1,176 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _SHHANDLE_INCLUDED_
+#define _SHHANDLE_INCLUDED_
+
+//
+// Machine independent part of the compiler private objects
+// sent as ShHandle to the driver.
+//
+// This should not be included by driver code.
+//
+
+#define SH_EXPORTING
+#include "../Public/ShaderLang.h"
+#include "../MachineIndependent/Versions.h"
+#include "InfoSink.h"
+
+class TCompiler;
+class TLinker;
+class TUniformMap;
+
+//
+// The base class used to back handles returned to the driver.
+//
+class TShHandleBase {
+public:
+ TShHandleBase() { pool = new glslang::TPoolAllocator; }
+ virtual ~TShHandleBase() { delete pool; }
+ virtual TCompiler* getAsCompiler() { return 0; }
+ virtual TLinker* getAsLinker() { return 0; }
+ virtual TUniformMap* getAsUniformMap() { return 0; }
+ virtual glslang::TPoolAllocator* getPool() const { return pool; }
+private:
+ glslang::TPoolAllocator* pool;
+};
+
+//
+// The base class for the machine dependent linker to derive from
+// for managing where uniforms live.
+//
+class TUniformMap : public TShHandleBase {
+public:
+ TUniformMap() { }
+ virtual ~TUniformMap() { }
+ virtual TUniformMap* getAsUniformMap() { return this; }
+ virtual int getLocation(const char* name) = 0;
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink infoSink;
+};
+
+class TIntermNode;
+
+//
+// The base class for the machine dependent compiler to derive from
+// for managing object code from the compile.
+//
+class TCompiler : public TShHandleBase {
+public:
+ TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
+ virtual ~TCompiler() { }
+ EShLanguage getLanguage() { return language; }
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+
+ virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
+
+ virtual TCompiler* getAsCompiler() { return this; }
+ virtual bool linkable() { return haveValidObjectCode; }
+
+ TInfoSink& infoSink;
+protected:
+ TCompiler& operator=(TCompiler&);
+
+ EShLanguage language;
+ bool haveValidObjectCode;
+};
+
+//
+// Link operations are based on a list of compile results...
+//
+typedef glslang::TVector<TCompiler*> TCompilerList;
+typedef glslang::TVector<TShHandleBase*> THandleList;
+
+//
+// The base class for the machine dependent linker to derive from
+// to manage the resulting executable.
+//
+
+class TLinker : public TShHandleBase {
+public:
+ TLinker(EShExecutable e, TInfoSink& iSink) :
+ infoSink(iSink),
+ executable(e),
+ haveReturnableObjectCode(false),
+ appAttributeBindings(0),
+ fixedAttributeBindings(0),
+ excludedAttributes(0),
+ excludedCount(0),
+ uniformBindings(0) { }
+ virtual TLinker* getAsLinker() { return this; }
+ virtual ~TLinker() { }
+ virtual bool link(TCompilerList&, TUniformMap*) = 0;
+ virtual bool link(THandleList&) { return false; }
+ virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
+ virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
+ virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
+ virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
+ virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
+ virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink& infoSink;
+protected:
+ TLinker& operator=(TLinker&);
+ EShExecutable executable;
+ bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
+
+ const ShBindingTable* appAttributeBindings;
+ const ShBindingTable* fixedAttributeBindings;
+ const int* excludedAttributes;
+ int excludedCount;
+ ShBindingTable* uniformBindings; // created by the linker
+};
+
+//
+// This is the interface between the machine independent code
+// and the machine dependent code.
+//
+// The machine dependent code should derive from the classes
+// above. Then Construct*() and Delete*() will create and
+// destroy the machine dependent objects, which contain the
+// above machine independent information.
+//
+TCompiler* ConstructCompiler(EShLanguage, int);
+
+TShHandleBase* ConstructLinker(EShExecutable, int);
+TShHandleBase* ConstructBindings();
+void DeleteLinker(TShHandleBase*);
+void DeleteBindingList(TShHandleBase* bindingList);
+
+TUniformMap* ConstructUniformMap();
+void DeleteCompiler(TCompiler*);
+
+void DeleteUniformMap(TUniformMap*);
+
+#endif // _SHHANDLE_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/Types.h b/thirdparty/glslang/glslang/Include/Types.h
new file mode 100644
index 0000000000..90341dcb27
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/Types.h
@@ -0,0 +1,2276 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2015-2016 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _TYPES_INCLUDED
+#define _TYPES_INCLUDED
+
+#include "../Include/Common.h"
+#include "../Include/BaseTypes.h"
+#include "../Public/ShaderLang.h"
+#include "arrays.h"
+
+#include <algorithm>
+
+namespace glslang {
+
+const int GlslangMaxTypeLength = 200; // TODO: need to print block/struct one member per line, so this can stay bounded
+
+const char* const AnonymousPrefix = "anon@"; // for something like a block whose members can be directly accessed
+inline bool IsAnonymous(const TString& name)
+{
+ return name.compare(0, 5, AnonymousPrefix) == 0;
+}
+
+//
+// Details within a sampler type
+//
+enum TSamplerDim {
+ EsdNone,
+ Esd1D,
+ Esd2D,
+ Esd3D,
+ EsdCube,
+ EsdRect,
+ EsdBuffer,
+ EsdSubpass, // goes only with non-sampled image (image is true)
+ EsdNumDims
+};
+
+struct TSampler { // misnomer now; includes images, textures without sampler, and textures with sampler
+ TBasicType type : 8; // type returned by sampler
+ TSamplerDim dim : 8;
+ bool arrayed : 1;
+ bool shadow : 1;
+ bool ms : 1;
+ bool image : 1; // image, combined should be false
+ bool combined : 1; // true means texture is combined with a sampler, false means texture with no sampler
+ bool sampler : 1; // true means a pure sampler, other fields should be clear()
+ bool external : 1; // GL_OES_EGL_image_external
+ bool yuv : 1; // GL_EXT_YUV_target
+ unsigned int vectorSize : 3; // vector return type size.
+
+ // Some languages support structures as sample results. Storing the whole structure in the
+ // TSampler is too large, so there is an index to a separate table.
+ static const unsigned structReturnIndexBits = 4; // number of index bits to use.
+ static const unsigned structReturnSlots = (1<<structReturnIndexBits)-1; // number of valid values
+ static const unsigned noReturnStruct = structReturnSlots; // value if no return struct type.
+
+ // Index into a language specific table of texture return structures.
+ unsigned int structReturnIndex : structReturnIndexBits;
+
+ // Encapsulate getting members' vector sizes packed into the vectorSize bitfield.
+ unsigned int getVectorSize() const { return vectorSize; }
+
+ bool isImage() const { return image && dim != EsdSubpass; }
+ bool isSubpass() const { return dim == EsdSubpass; }
+ bool isCombined() const { return combined; }
+ bool isPureSampler() const { return sampler; }
+ bool isTexture() const { return !sampler && !image; }
+ bool isShadow() const { return shadow; }
+ bool isArrayed() const { return arrayed; }
+ bool isMultiSample() const { return ms; }
+ bool hasReturnStruct() const { return structReturnIndex != noReturnStruct; }
+
+ void clear()
+ {
+ type = EbtVoid;
+ dim = EsdNone;
+ arrayed = false;
+ shadow = false;
+ ms = false;
+ image = false;
+ combined = false;
+ sampler = false;
+ external = false;
+ yuv = false;
+ structReturnIndex = noReturnStruct;
+
+ // by default, returns a single vec4;
+ vectorSize = 4;
+ }
+
+ // make a combined sampler and texture
+ void set(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ combined = true;
+ }
+
+ // make an image
+ void setImage(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ image = true;
+ }
+
+ // make a texture with no sampler
+ void setTexture(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ }
+
+ // make a subpass input attachment
+ void setSubpass(TBasicType t, bool m = false)
+ {
+ clear();
+ type = t;
+ image = true;
+ dim = EsdSubpass;
+ ms = m;
+ }
+
+ // make a pure sampler, no texture, no image, nothing combined, the 'sampler' keyword
+ void setPureSampler(bool s)
+ {
+ clear();
+ sampler = true;
+ shadow = s;
+ }
+
+ bool operator==(const TSampler& right) const
+ {
+ return type == right.type &&
+ dim == right.dim &&
+ arrayed == right.arrayed &&
+ shadow == right.shadow &&
+ ms == right.ms &&
+ image == right.image &&
+ combined == right.combined &&
+ sampler == right.sampler &&
+ external == right.external &&
+ yuv == right.yuv &&
+ vectorSize == right.vectorSize &&
+ structReturnIndex == right.structReturnIndex;
+ }
+
+ bool operator!=(const TSampler& right) const
+ {
+ return ! operator==(right);
+ }
+
+ TString getString() const
+ {
+ TString s;
+
+ if (sampler) {
+ s.append("sampler");
+ return s;
+ }
+
+ switch (type) {
+ case EbtFloat: break;
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: s.append("f16"); break;
+#endif
+ case EbtInt8: s.append("i8"); break;
+ case EbtUint16: s.append("u8"); break;
+ case EbtInt16: s.append("i16"); break;
+ case EbtUint8: s.append("u16"); break;
+ case EbtInt: s.append("i"); break;
+ case EbtUint: s.append("u"); break;
+ case EbtInt64: s.append("i64"); break;
+ case EbtUint64: s.append("u64"); break;
+ default: break; // some compilers want this
+ }
+ if (image) {
+ if (dim == EsdSubpass)
+ s.append("subpass");
+ else
+ s.append("image");
+ } else if (combined) {
+ s.append("sampler");
+ } else {
+ s.append("texture");
+ }
+ if (external) {
+ s.append("ExternalOES");
+ return s;
+ }
+ if (yuv) {
+ return "__" + s + "External2DY2YEXT";
+ }
+ switch (dim) {
+ case Esd1D: s.append("1D"); break;
+ case Esd2D: s.append("2D"); break;
+ case Esd3D: s.append("3D"); break;
+ case EsdCube: s.append("Cube"); break;
+ case EsdRect: s.append("2DRect"); break;
+ case EsdBuffer: s.append("Buffer"); break;
+ case EsdSubpass: s.append("Input"); break;
+ default: break; // some compilers want this
+ }
+ if (ms)
+ s.append("MS");
+ if (arrayed)
+ s.append("Array");
+ if (shadow)
+ s.append("Shadow");
+
+ return s;
+ }
+};
+
+//
+// Need to have association of line numbers to types in a list for building structs.
+//
+class TType;
+struct TTypeLoc {
+ TType* type;
+ TSourceLoc loc;
+};
+typedef TVector<TTypeLoc> TTypeList;
+
+typedef TVector<TString*> TIdentifierList;
+
+//
+// Following are a series of helper enums for managing layouts and qualifiers,
+// used for TPublicType, TType, others.
+//
+
+enum TLayoutPacking {
+ ElpNone,
+ ElpShared, // default, but different than saying nothing
+ ElpStd140,
+ ElpStd430,
+ ElpPacked,
+ ElpScalar,
+ ElpCount // If expanding, see bitfield width below
+};
+
+enum TLayoutMatrix {
+ ElmNone,
+ ElmRowMajor,
+ ElmColumnMajor, // default, but different than saying nothing
+ ElmCount // If expanding, see bitfield width below
+};
+
+// Union of geometry shader and tessellation shader geometry types.
+// They don't go into TType, but rather have current state per shader or
+// active parser type (TPublicType).
+enum TLayoutGeometry {
+ ElgNone,
+ ElgPoints,
+ ElgLines,
+ ElgLinesAdjacency,
+ ElgLineStrip,
+ ElgTriangles,
+ ElgTrianglesAdjacency,
+ ElgTriangleStrip,
+ ElgQuads,
+ ElgIsolines,
+};
+
+enum TVertexSpacing {
+ EvsNone,
+ EvsEqual,
+ EvsFractionalEven,
+ EvsFractionalOdd
+};
+
+enum TVertexOrder {
+ EvoNone,
+ EvoCw,
+ EvoCcw
+};
+
+// Note: order matters, as type of format is done by comparison.
+enum TLayoutFormat {
+ ElfNone,
+
+ // Float image
+ ElfRgba32f,
+ ElfRgba16f,
+ ElfR32f,
+ ElfRgba8,
+ ElfRgba8Snorm,
+
+ ElfEsFloatGuard, // to help with comparisons
+
+ ElfRg32f,
+ ElfRg16f,
+ ElfR11fG11fB10f,
+ ElfR16f,
+ ElfRgba16,
+ ElfRgb10A2,
+ ElfRg16,
+ ElfRg8,
+ ElfR16,
+ ElfR8,
+ ElfRgba16Snorm,
+ ElfRg16Snorm,
+ ElfRg8Snorm,
+ ElfR16Snorm,
+ ElfR8Snorm,
+
+ ElfFloatGuard, // to help with comparisons
+
+ // Int image
+ ElfRgba32i,
+ ElfRgba16i,
+ ElfRgba8i,
+ ElfR32i,
+
+ ElfEsIntGuard, // to help with comparisons
+
+ ElfRg32i,
+ ElfRg16i,
+ ElfRg8i,
+ ElfR16i,
+ ElfR8i,
+
+ ElfIntGuard, // to help with comparisons
+
+ // Uint image
+ ElfRgba32ui,
+ ElfRgba16ui,
+ ElfRgba8ui,
+ ElfR32ui,
+
+ ElfEsUintGuard, // to help with comparisons
+
+ ElfRg32ui,
+ ElfRg16ui,
+ ElfRgb10a2ui,
+ ElfRg8ui,
+ ElfR16ui,
+ ElfR8ui,
+
+ ElfCount
+};
+
+enum TLayoutDepth {
+ EldNone,
+ EldAny,
+ EldGreater,
+ EldLess,
+ EldUnchanged,
+
+ EldCount
+};
+
+enum TBlendEquationShift {
+ // No 'EBlendNone':
+ // These are used as bit-shift amounts. A mask of such shifts will have type 'int',
+ // and in that space, 0 means no bits set, or none. In this enum, 0 means (1 << 0), a bit is set.
+ EBlendMultiply,
+ EBlendScreen,
+ EBlendOverlay,
+ EBlendDarken,
+ EBlendLighten,
+ EBlendColordodge,
+ EBlendColorburn,
+ EBlendHardlight,
+ EBlendSoftlight,
+ EBlendDifference,
+ EBlendExclusion,
+ EBlendHslHue,
+ EBlendHslSaturation,
+ EBlendHslColor,
+ EBlendHslLuminosity,
+ EBlendAllEquations,
+
+ EBlendCount
+};
+
+class TQualifier {
+public:
+ static const int layoutNotSet = -1;
+
+ void clear()
+ {
+ precision = EpqNone;
+ invariant = false;
+ noContraction = false;
+ makeTemporary();
+ declaredBuiltIn = EbvNone;
+ }
+
+ // drop qualifiers that don't belong in a temporary variable
+ void makeTemporary()
+ {
+ semanticName = nullptr;
+ storage = EvqTemporary;
+ builtIn = EbvNone;
+ clearInterstage();
+ clearMemory();
+ specConstant = false;
+ nonUniform = false;
+ clearLayout();
+ }
+
+ void clearInterstage()
+ {
+ clearInterpolation();
+ patch = false;
+ sample = false;
+ }
+
+ void clearInterpolation()
+ {
+ centroid = false;
+ smooth = false;
+ flat = false;
+ nopersp = false;
+#ifdef AMD_EXTENSIONS
+ explicitInterp = false;
+#endif
+#ifdef NV_EXTENSIONS
+ pervertexNV = false;
+ perPrimitiveNV = false;
+ perViewNV = false;
+ perTaskNV = false;
+#endif
+ }
+
+ void clearMemory()
+ {
+ coherent = false;
+ devicecoherent = false;
+ queuefamilycoherent = false;
+ workgroupcoherent = false;
+ subgroupcoherent = false;
+ nonprivate = false;
+ volatil = false;
+ restrict = false;
+ readonly = false;
+ writeonly = false;
+ }
+
+ // Drop just the storage qualification, which perhaps should
+ // never be done, as it is fundamentally inconsistent, but need to
+ // explore what downstream consumers need.
+ // E.g., in a dereference, it is an inconsistency between:
+ // A) partially dereferenced resource is still in the storage class it started in
+ // B) partially dereferenced resource is a new temporary object
+ // If A, then nothing should change, if B, then everything should change, but this is half way.
+ void makePartialTemporary()
+ {
+ storage = EvqTemporary;
+ specConstant = false;
+ nonUniform = false;
+ }
+
+ const char* semanticName;
+ TStorageQualifier storage : 6;
+ TBuiltInVariable builtIn : 8;
+ TBuiltInVariable declaredBuiltIn : 8;
+ TPrecisionQualifier precision : 3;
+ bool invariant : 1; // require canonical treatment for cross-shader invariance
+ bool noContraction: 1; // prevent contraction and reassociation, e.g., for 'precise' keyword, and expressions it affects
+ bool centroid : 1;
+ bool smooth : 1;
+ bool flat : 1;
+ bool nopersp : 1;
+#ifdef AMD_EXTENSIONS
+ bool explicitInterp : 1;
+#endif
+#ifdef NV_EXTENSIONS
+ bool pervertexNV : 1;
+ bool perPrimitiveNV : 1;
+ bool perViewNV : 1;
+ bool perTaskNV : 1;
+#endif
+ bool patch : 1;
+ bool sample : 1;
+ bool coherent : 1;
+ bool devicecoherent : 1;
+ bool queuefamilycoherent : 1;
+ bool workgroupcoherent : 1;
+ bool subgroupcoherent : 1;
+ bool nonprivate : 1;
+ bool volatil : 1;
+ bool restrict : 1;
+ bool readonly : 1;
+ bool writeonly : 1;
+ bool specConstant : 1; // having a constant_id is not sufficient: expressions have no id, but are still specConstant
+ bool nonUniform : 1;
+
+ bool isMemory() const
+ {
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate;
+ }
+ bool isMemoryQualifierImageAndSSBOOnly() const
+ {
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly;
+ }
+ bool bufferReferenceNeedsVulkanMemoryModel() const
+ {
+ // include qualifiers that map to load/store availability/visibility/nonprivate memory access operands
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || nonprivate;
+ }
+
+ bool isInterpolation() const
+ {
+#ifdef AMD_EXTENSIONS
+ return flat || smooth || nopersp || explicitInterp;
+#else
+ return flat || smooth || nopersp;
+#endif
+ }
+
+#ifdef AMD_EXTENSIONS
+ bool isExplicitInterpolation() const
+ {
+ return explicitInterp;
+ }
+#endif
+
+ bool isAuxiliary() const
+ {
+#ifdef NV_EXTENSIONS
+ return centroid || patch || sample || pervertexNV;
+#else
+ return centroid || patch || sample;
+#endif
+ }
+
+ bool isPipeInput() const
+ {
+ switch (storage) {
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPipeOutput() const
+ {
+ switch (storage) {
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamInput() const
+ {
+ switch (storage) {
+ case EvqIn:
+ case EvqInOut:
+ case EvqConstReadOnly:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamOutput() const
+ {
+ switch (storage) {
+ case EvqOut:
+ case EvqInOut:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isUniformOrBuffer() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPerPrimitive() const
+ {
+#ifdef NV_EXTENSIONS
+ return perPrimitiveNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isPerView() const
+ {
+#ifdef NV_EXTENSIONS
+ return perViewNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isTaskMemory() const
+ {
+#ifdef NV_EXTENSIONS
+ return perTaskNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isIo() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // non-built-in symbols that might link between compilation units
+ bool isLinkable() const
+ {
+ switch (storage) {
+ case EvqGlobal:
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ case EvqUniform:
+ case EvqBuffer:
+ case EvqShared:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // True if this type of IO is supposed to be arrayed with extra level for per-vertex data
+ bool isArrayedIo(EShLanguage language) const
+ {
+ switch (language) {
+ case EShLangGeometry:
+ return isPipeInput();
+ case EShLangTessControl:
+ return ! patch && (isPipeInput() || isPipeOutput());
+ case EShLangTessEvaluation:
+ return ! patch && isPipeInput();
+#ifdef NV_EXTENSIONS
+ case EShLangFragment:
+ return pervertexNV && isPipeInput();
+ case EShLangMeshNV:
+ return ! perTaskNV && isPipeOutput();
+#endif
+
+ default:
+ return false;
+ }
+ }
+
+ // Implementing an embedded layout-qualifier class here, since C++ can't have a real class bitfield
+ void clearLayout() // all layout
+ {
+ clearUniformLayout();
+
+ layoutPushConstant = false;
+ layoutBufferReference = false;
+#ifdef NV_EXTENSIONS
+ layoutPassthrough = false;
+ layoutViewportRelative = false;
+ // -2048 as the default value indicating layoutSecondaryViewportRelative is not set
+ layoutSecondaryViewportRelativeOffset = -2048;
+ layoutShaderRecordNV = false;
+#endif
+
+ layoutBufferReferenceAlign = layoutBufferReferenceAlignEnd;
+
+ clearInterstageLayout();
+
+ layoutSpecConstantId = layoutSpecConstantIdEnd;
+
+ layoutFormat = ElfNone;
+ }
+ void clearInterstageLayout()
+ {
+ layoutLocation = layoutLocationEnd;
+ layoutComponent = layoutComponentEnd;
+ layoutIndex = layoutIndexEnd;
+ clearStreamLayout();
+ clearXfbLayout();
+ }
+ void clearStreamLayout()
+ {
+ layoutStream = layoutStreamEnd;
+ }
+ void clearXfbLayout()
+ {
+ layoutXfbBuffer = layoutXfbBufferEnd;
+ layoutXfbStride = layoutXfbStrideEnd;
+ layoutXfbOffset = layoutXfbOffsetEnd;
+ }
+
+ bool hasNonXfbLayout() const
+ {
+ return hasUniformLayout() ||
+ hasAnyLocation() ||
+ hasStream() ||
+ hasFormat() ||
+#ifdef NV_EXTENSIONS
+ layoutShaderRecordNV ||
+#endif
+ layoutPushConstant ||
+ layoutBufferReference;
+ }
+ bool hasLayout() const
+ {
+ return hasNonXfbLayout() ||
+ hasXfb();
+ }
+ TLayoutMatrix layoutMatrix : 3;
+ TLayoutPacking layoutPacking : 4;
+ int layoutOffset;
+ int layoutAlign;
+
+ unsigned int layoutLocation : 12;
+ static const unsigned int layoutLocationEnd = 0xFFF;
+
+ unsigned int layoutComponent : 3;
+ static const unsigned int layoutComponentEnd = 4;
+
+ unsigned int layoutSet : 7;
+ static const unsigned int layoutSetEnd = 0x3F;
+
+ unsigned int layoutBinding : 16;
+ static const unsigned int layoutBindingEnd = 0xFFFF;
+
+ unsigned int layoutIndex : 8;
+ static const unsigned int layoutIndexEnd = 0xFF;
+
+ unsigned int layoutStream : 8;
+ static const unsigned int layoutStreamEnd = 0xFF;
+
+ unsigned int layoutXfbBuffer : 4;
+ static const unsigned int layoutXfbBufferEnd = 0xF;
+
+ unsigned int layoutXfbStride : 14;
+ static const unsigned int layoutXfbStrideEnd = 0x3FFF;
+
+ unsigned int layoutXfbOffset : 13;
+ static const unsigned int layoutXfbOffsetEnd = 0x1FFF;
+
+ unsigned int layoutAttachment : 8; // for input_attachment_index
+ static const unsigned int layoutAttachmentEnd = 0XFF;
+
+ unsigned int layoutSpecConstantId : 11;
+ static const unsigned int layoutSpecConstantIdEnd = 0x7FF;
+
+ // stored as log2 of the actual alignment value
+ unsigned int layoutBufferReferenceAlign : 6;
+ static const unsigned int layoutBufferReferenceAlignEnd = 0x3F;
+
+ TLayoutFormat layoutFormat : 8;
+
+ bool layoutPushConstant;
+ bool layoutBufferReference;
+
+#ifdef NV_EXTENSIONS
+ bool layoutPassthrough;
+ bool layoutViewportRelative;
+ int layoutSecondaryViewportRelativeOffset;
+ bool layoutShaderRecordNV;
+#endif
+
+ bool hasUniformLayout() const
+ {
+ return hasMatrix() ||
+ hasPacking() ||
+ hasOffset() ||
+ hasBinding() ||
+ hasSet() ||
+ hasAlign();
+ }
+ void clearUniformLayout() // only uniform specific
+ {
+ layoutMatrix = ElmNone;
+ layoutPacking = ElpNone;
+ layoutOffset = layoutNotSet;
+ layoutAlign = layoutNotSet;
+
+ layoutSet = layoutSetEnd;
+ layoutBinding = layoutBindingEnd;
+ layoutAttachment = layoutAttachmentEnd;
+ }
+
+ bool hasMatrix() const
+ {
+ return layoutMatrix != ElmNone;
+ }
+ bool hasPacking() const
+ {
+ return layoutPacking != ElpNone;
+ }
+ bool hasOffset() const
+ {
+ return layoutOffset != layoutNotSet;
+ }
+ bool hasAlign() const
+ {
+ return layoutAlign != layoutNotSet;
+ }
+ bool hasAnyLocation() const
+ {
+ return hasLocation() ||
+ hasComponent() ||
+ hasIndex();
+ }
+ bool hasLocation() const
+ {
+ return layoutLocation != layoutLocationEnd;
+ }
+ bool hasComponent() const
+ {
+ return layoutComponent != layoutComponentEnd;
+ }
+ bool hasIndex() const
+ {
+ return layoutIndex != layoutIndexEnd;
+ }
+ bool hasSet() const
+ {
+ return layoutSet != layoutSetEnd;
+ }
+ bool hasBinding() const
+ {
+ return layoutBinding != layoutBindingEnd;
+ }
+ bool hasStream() const
+ {
+ return layoutStream != layoutStreamEnd;
+ }
+ bool hasFormat() const
+ {
+ return layoutFormat != ElfNone;
+ }
+ bool hasXfb() const
+ {
+ return hasXfbBuffer() ||
+ hasXfbStride() ||
+ hasXfbOffset();
+ }
+ bool hasXfbBuffer() const
+ {
+ return layoutXfbBuffer != layoutXfbBufferEnd;
+ }
+ bool hasXfbStride() const
+ {
+ return layoutXfbStride != layoutXfbStrideEnd;
+ }
+ bool hasXfbOffset() const
+ {
+ return layoutXfbOffset != layoutXfbOffsetEnd;
+ }
+ bool hasAttachment() const
+ {
+ return layoutAttachment != layoutAttachmentEnd;
+ }
+ bool hasSpecConstantId() const
+ {
+ // Not the same thing as being a specialization constant, this
+ // is just whether or not it was declared with an ID.
+ return layoutSpecConstantId != layoutSpecConstantIdEnd;
+ }
+ bool hasBufferReferenceAlign() const
+ {
+ return layoutBufferReferenceAlign != layoutBufferReferenceAlignEnd;
+ }
+ bool isSpecConstant() const
+ {
+ // True if type is a specialization constant, whether or not it
+ // had a specialization-constant ID, and false if it is not a
+ // true front-end constant.
+ return specConstant;
+ }
+ bool isNonUniform() const
+ {
+ return nonUniform;
+ }
+ bool isFrontEndConstant() const
+ {
+ // True if the front-end knows the final constant value.
+ // This allows front-end constant folding.
+ return storage == EvqConst && ! specConstant;
+ }
+ bool isConstant() const
+ {
+ // True if is either kind of constant; specialization or regular.
+ return isFrontEndConstant() || isSpecConstant();
+ }
+ void makeSpecConstant()
+ {
+ storage = EvqConst;
+ specConstant = true;
+ }
+ static const char* getLayoutPackingString(TLayoutPacking packing)
+ {
+ switch (packing) {
+ case ElpPacked: return "packed";
+ case ElpShared: return "shared";
+ case ElpStd140: return "std140";
+ case ElpStd430: return "std430";
+ case ElpScalar: return "scalar";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutMatrixString(TLayoutMatrix m)
+ {
+ switch (m) {
+ case ElmColumnMajor: return "column_major";
+ case ElmRowMajor: return "row_major";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutFormatString(TLayoutFormat f)
+ {
+ switch (f) {
+ case ElfRgba32f: return "rgba32f";
+ case ElfRgba16f: return "rgba16f";
+ case ElfRg32f: return "rg32f";
+ case ElfRg16f: return "rg16f";
+ case ElfR11fG11fB10f: return "r11f_g11f_b10f";
+ case ElfR32f: return "r32f";
+ case ElfR16f: return "r16f";
+ case ElfRgba16: return "rgba16";
+ case ElfRgb10A2: return "rgb10_a2";
+ case ElfRgba8: return "rgba8";
+ case ElfRg16: return "rg16";
+ case ElfRg8: return "rg8";
+ case ElfR16: return "r16";
+ case ElfR8: return "r8";
+ case ElfRgba16Snorm: return "rgba16_snorm";
+ case ElfRgba8Snorm: return "rgba8_snorm";
+ case ElfRg16Snorm: return "rg16_snorm";
+ case ElfRg8Snorm: return "rg8_snorm";
+ case ElfR16Snorm: return "r16_snorm";
+ case ElfR8Snorm: return "r8_snorm";
+
+ case ElfRgba32i: return "rgba32i";
+ case ElfRgba16i: return "rgba16i";
+ case ElfRgba8i: return "rgba8i";
+ case ElfRg32i: return "rg32i";
+ case ElfRg16i: return "rg16i";
+ case ElfRg8i: return "rg8i";
+ case ElfR32i: return "r32i";
+ case ElfR16i: return "r16i";
+ case ElfR8i: return "r8i";
+
+ case ElfRgba32ui: return "rgba32ui";
+ case ElfRgba16ui: return "rgba16ui";
+ case ElfRgba8ui: return "rgba8ui";
+ case ElfRg32ui: return "rg32ui";
+ case ElfRg16ui: return "rg16ui";
+ case ElfRgb10a2ui: return "rgb10_a2ui";
+ case ElfRg8ui: return "rg8ui";
+ case ElfR32ui: return "r32ui";
+ case ElfR16ui: return "r16ui";
+ case ElfR8ui: return "r8ui";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutDepthString(TLayoutDepth d)
+ {
+ switch (d) {
+ case EldAny: return "depth_any";
+ case EldGreater: return "depth_greater";
+ case EldLess: return "depth_less";
+ case EldUnchanged: return "depth_unchanged";
+ default: return "none";
+ }
+ }
+ static const char* getBlendEquationString(TBlendEquationShift e)
+ {
+ switch (e) {
+ case EBlendMultiply: return "blend_support_multiply";
+ case EBlendScreen: return "blend_support_screen";
+ case EBlendOverlay: return "blend_support_overlay";
+ case EBlendDarken: return "blend_support_darken";
+ case EBlendLighten: return "blend_support_lighten";
+ case EBlendColordodge: return "blend_support_colordodge";
+ case EBlendColorburn: return "blend_support_colorburn";
+ case EBlendHardlight: return "blend_support_hardlight";
+ case EBlendSoftlight: return "blend_support_softlight";
+ case EBlendDifference: return "blend_support_difference";
+ case EBlendExclusion: return "blend_support_exclusion";
+ case EBlendHslHue: return "blend_support_hsl_hue";
+ case EBlendHslSaturation: return "blend_support_hsl_saturation";
+ case EBlendHslColor: return "blend_support_hsl_color";
+ case EBlendHslLuminosity: return "blend_support_hsl_luminosity";
+ case EBlendAllEquations: return "blend_support_all_equations";
+ default: return "unknown";
+ }
+ }
+ static const char* getGeometryString(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return "points";
+ case ElgLines: return "lines";
+ case ElgLinesAdjacency: return "lines_adjacency";
+ case ElgLineStrip: return "line_strip";
+ case ElgTriangles: return "triangles";
+ case ElgTrianglesAdjacency: return "triangles_adjacency";
+ case ElgTriangleStrip: return "triangle_strip";
+ case ElgQuads: return "quads";
+ case ElgIsolines: return "isolines";
+ default: return "none";
+ }
+ }
+ static const char* getVertexSpacingString(TVertexSpacing spacing)
+ {
+ switch (spacing) {
+ case EvsEqual: return "equal_spacing";
+ case EvsFractionalEven: return "fractional_even_spacing";
+ case EvsFractionalOdd: return "fractional_odd_spacing";
+ default: return "none";
+ }
+ }
+ static const char* getVertexOrderString(TVertexOrder order)
+ {
+ switch (order) {
+ case EvoCw: return "cw";
+ case EvoCcw: return "ccw";
+ default: return "none";
+ }
+ }
+ static int mapGeometryToSize(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return 1;
+ case ElgLines: return 2;
+ case ElgLinesAdjacency: return 4;
+ case ElgTriangles: return 3;
+ case ElgTrianglesAdjacency: return 6;
+ default: return 0;
+ }
+ }
+};
+
+// Qualifiers that don't need to be keep per object. They have shader scope, not object scope.
+// So, they will not be part of TType, TQualifier, etc.
+struct TShaderQualifiers {
+ TLayoutGeometry geometry; // geometry/tessellation shader in/out primitives
+ bool pixelCenterInteger; // fragment shader
+ bool originUpperLeft; // fragment shader
+ int invocations;
+ int vertices; // for tessellation "vertices", geometry & mesh "max_vertices"
+ TVertexSpacing spacing;
+ TVertexOrder order;
+ bool pointMode;
+ int localSize[3]; // compute shader
+ int localSizeSpecId[3]; // compute shader specialization id for gl_WorkGroupSize
+ bool earlyFragmentTests; // fragment input
+ bool postDepthCoverage; // fragment input
+ TLayoutDepth layoutDepth;
+ bool blendEquation; // true if any blend equation was specified
+ int numViews; // multiview extenstions
+
+#ifdef NV_EXTENSIONS
+ bool layoutOverrideCoverage; // true if layout override_coverage set
+ bool layoutDerivativeGroupQuads; // true if layout derivative_group_quadsNV set
+ bool layoutDerivativeGroupLinear; // true if layout derivative_group_linearNV set
+ int primitives; // mesh shader "max_primitives"DerivativeGroupLinear; // true if layout derivative_group_linearNV set
+#endif
+
+ void init()
+ {
+ geometry = ElgNone;
+ originUpperLeft = false;
+ pixelCenterInteger = false;
+ invocations = TQualifier::layoutNotSet;
+ vertices = TQualifier::layoutNotSet;
+ spacing = EvsNone;
+ order = EvoNone;
+ pointMode = false;
+ localSize[0] = 1;
+ localSize[1] = 1;
+ localSize[2] = 1;
+ localSizeSpecId[0] = TQualifier::layoutNotSet;
+ localSizeSpecId[1] = TQualifier::layoutNotSet;
+ localSizeSpecId[2] = TQualifier::layoutNotSet;
+ earlyFragmentTests = false;
+ postDepthCoverage = false;
+ layoutDepth = EldNone;
+ blendEquation = false;
+ numViews = TQualifier::layoutNotSet;
+#ifdef NV_EXTENSIONS
+ layoutOverrideCoverage = false;
+ layoutDerivativeGroupQuads = false;
+ layoutDerivativeGroupLinear = false;
+ primitives = TQualifier::layoutNotSet;
+#endif
+ }
+
+ // Merge in characteristics from the 'src' qualifier. They can override when
+ // set, but never erase when not set.
+ void merge(const TShaderQualifiers& src)
+ {
+ if (src.geometry != ElgNone)
+ geometry = src.geometry;
+ if (src.pixelCenterInteger)
+ pixelCenterInteger = src.pixelCenterInteger;
+ if (src.originUpperLeft)
+ originUpperLeft = src.originUpperLeft;
+ if (src.invocations != TQualifier::layoutNotSet)
+ invocations = src.invocations;
+ if (src.vertices != TQualifier::layoutNotSet)
+ vertices = src.vertices;
+ if (src.spacing != EvsNone)
+ spacing = src.spacing;
+ if (src.order != EvoNone)
+ order = src.order;
+ if (src.pointMode)
+ pointMode = true;
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSize[i] > 1)
+ localSize[i] = src.localSize[i];
+ }
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSizeSpecId[i] != TQualifier::layoutNotSet)
+ localSizeSpecId[i] = src.localSizeSpecId[i];
+ }
+ if (src.earlyFragmentTests)
+ earlyFragmentTests = true;
+ if (src.postDepthCoverage)
+ postDepthCoverage = true;
+ if (src.layoutDepth)
+ layoutDepth = src.layoutDepth;
+ if (src.blendEquation)
+ blendEquation = src.blendEquation;
+ if (src.numViews != TQualifier::layoutNotSet)
+ numViews = src.numViews;
+#ifdef NV_EXTENSIONS
+ if (src.layoutOverrideCoverage)
+ layoutOverrideCoverage = src.layoutOverrideCoverage;
+ if (src.layoutDerivativeGroupQuads)
+ layoutDerivativeGroupQuads = src.layoutDerivativeGroupQuads;
+ if (src.layoutDerivativeGroupLinear)
+ layoutDerivativeGroupLinear = src.layoutDerivativeGroupLinear;
+ if (src.primitives != TQualifier::layoutNotSet)
+ primitives = src.primitives;
+#endif
+ }
+};
+
+//
+// TPublicType is just temporarily used while parsing and not quite the same
+// information kept per node in TType. Due to the bison stack, it can't have
+// types that it thinks have non-trivial constructors. It should
+// just be used while recognizing the grammar, not anything else.
+// Once enough is known about the situation, the proper information
+// moved into a TType, or the parse context, etc.
+//
+class TPublicType {
+public:
+ TBasicType basicType;
+ TSampler sampler;
+ TQualifier qualifier;
+ TShaderQualifiers shaderQualifiers;
+ int vectorSize : 4;
+ int matrixCols : 4;
+ int matrixRows : 4;
+ bool coopmat : 1;
+ TArraySizes* arraySizes;
+ const TType* userDef;
+ TSourceLoc loc;
+ TArraySizes* typeParameters;
+
+ void initType(const TSourceLoc& l)
+ {
+ basicType = EbtVoid;
+ vectorSize = 1;
+ matrixRows = 0;
+ matrixCols = 0;
+ arraySizes = nullptr;
+ userDef = nullptr;
+ loc = l;
+ typeParameters = nullptr;
+ coopmat = false;
+ }
+
+ void initQualifiers(bool global = false)
+ {
+ qualifier.clear();
+ if (global)
+ qualifier.storage = EvqGlobal;
+ }
+
+ void init(const TSourceLoc& l, bool global = false)
+ {
+ initType(l);
+ sampler.clear();
+ initQualifiers(global);
+ shaderQualifiers.init();
+ }
+
+ void setVector(int s)
+ {
+ matrixRows = 0;
+ matrixCols = 0;
+ vectorSize = s;
+ }
+
+ void setMatrix(int c, int r)
+ {
+ matrixRows = r;
+ matrixCols = c;
+ vectorSize = 0;
+ }
+
+ bool isScalar() const
+ {
+ return matrixCols == 0 && vectorSize == 1 && arraySizes == nullptr && userDef == nullptr;
+ }
+
+ // "Image" is a superset of "Subpass"
+ bool isImage() const { return basicType == EbtSampler && sampler.isImage(); }
+ bool isSubpass() const { return basicType == EbtSampler && sampler.isSubpass(); }
+};
+
+//
+// Base class for things that have a type.
+//
+class TType {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ // for "empty" type (no args) or simple scalar/vector/matrix
+ explicit TType(TBasicType t = EbtVoid, TStorageQualifier q = EvqTemporary, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices
+ }
+ // for explicit precision qualifier
+ TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ qualifier.precision = p;
+ assert(p >= EpqNone && p <= EpqHigh);
+ assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices
+ }
+ // for turning a TPublicType into a TType, using a shallow copy
+ explicit TType(const TPublicType& p) :
+ basicType(p.basicType),
+ vectorSize(p.vectorSize), matrixCols(p.matrixCols), matrixRows(p.matrixRows), vector1(false), coopmat(p.coopmat),
+ arraySizes(p.arraySizes), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(p.typeParameters)
+ {
+ if (basicType == EbtSampler)
+ sampler = p.sampler;
+ else
+ sampler.clear();
+ qualifier = p.qualifier;
+ if (p.userDef) {
+ if (p.userDef->basicType == EbtReference) {
+ basicType = EbtReference;
+ referentType = p.userDef->referentType;
+ } else {
+ structure = p.userDef->getWritableStruct(); // public type is short-lived; there are no sharing issues
+ }
+ typeName = NewPoolTString(p.userDef->getTypeName().c_str());
+ }
+ if (p.coopmat && p.basicType == EbtFloat &&
+ p.typeParameters && p.typeParameters->getNumDims() > 0 &&
+ p.typeParameters->getDimSize(0) == 16) {
+ basicType = EbtFloat16;
+ qualifier.precision = EpqNone;
+ }
+ }
+ // for construction of sampler types
+ TType(const TSampler& sampler, TStorageQualifier q = EvqUniform, TArraySizes* as = nullptr) :
+ basicType(EbtSampler), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ arraySizes(as), structure(nullptr), fieldName(nullptr), typeName(nullptr),
+ sampler(sampler), typeParameters(nullptr)
+ {
+ qualifier.clear();
+ qualifier.storage = q;
+ }
+ // to efficiently make a dereferenced type
+ // without ever duplicating the outer structure that will be thrown away
+ // and using only shallow copy
+ TType(const TType& type, int derefIndex, bool rowMajor = false)
+ {
+ if (type.isArray()) {
+ shallowCopy(type);
+ if (type.getArraySizes()->getNumDims() == 1) {
+ arraySizes = nullptr;
+ } else {
+ // want our own copy of the array, so we can edit it
+ arraySizes = new TArraySizes;
+ arraySizes->copyDereferenced(*type.arraySizes);
+ }
+ } else if (type.basicType == EbtStruct || type.basicType == EbtBlock) {
+ // do a structure dereference
+ const TTypeList& memberList = *type.getStruct();
+ shallowCopy(*memberList[derefIndex].type);
+ return;
+ } else {
+ // do a vector/matrix dereference
+ shallowCopy(type);
+ if (matrixCols > 0) {
+ // dereference from matrix to vector
+ if (rowMajor)
+ vectorSize = matrixCols;
+ else
+ vectorSize = matrixRows;
+ matrixCols = 0;
+ matrixRows = 0;
+ if (vectorSize == 1)
+ vector1 = true;
+ } else if (isVector()) {
+ // dereference from vector to scalar
+ vectorSize = 1;
+ vector1 = false;
+ } else if (isCoopMat()) {
+ coopmat = false;
+ typeParameters = nullptr;
+ }
+ }
+ }
+ // for making structures, ...
+ TType(TTypeList* userDef, const TString& n) :
+ basicType(EbtStruct), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ // For interface blocks
+ TType(TTypeList* userDef, const TString& n, const TQualifier& q) :
+ basicType(EbtBlock), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ qualifier(q), arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ // for block reference (first parameter must be EbtReference)
+ explicit TType(TBasicType t, const TType &p, const TString& n) :
+ basicType(t), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr)
+ {
+ assert(t == EbtReference);
+ typeName = NewPoolTString(n.c_str());
+ qualifier.clear();
+ qualifier.storage = p.qualifier.storage;
+ referentType = p.clone();
+ }
+ virtual ~TType() {}
+
+ // Not for use across pool pops; it will cause multiple instances of TType to point to the same information.
+ // This only works if that information (like a structure's list of types) does not change and
+ // the instances are sharing the same pool.
+ void shallowCopy(const TType& copyOf)
+ {
+ basicType = copyOf.basicType;
+ sampler = copyOf.sampler;
+ qualifier = copyOf.qualifier;
+ vectorSize = copyOf.vectorSize;
+ matrixCols = copyOf.matrixCols;
+ matrixRows = copyOf.matrixRows;
+ vector1 = copyOf.vector1;
+ arraySizes = copyOf.arraySizes; // copying the pointer only, not the contents
+ fieldName = copyOf.fieldName;
+ typeName = copyOf.typeName;
+ if (isStruct()) {
+ structure = copyOf.structure;
+ } else {
+ referentType = copyOf.referentType;
+ }
+ typeParameters = copyOf.typeParameters;
+ coopmat = copyOf.coopmat;
+ }
+
+ // Make complete copy of the whole type graph rooted at 'copyOf'.
+ void deepCopy(const TType& copyOf)
+ {
+ TMap<TTypeList*,TTypeList*> copied; // to enable copying a type graph as a graph, not a tree
+ deepCopy(copyOf, copied);
+ }
+
+ // Recursively make temporary
+ void makeTemporary()
+ {
+ getQualifier().makeTemporary();
+
+ if (isStruct())
+ for (unsigned int i = 0; i < structure->size(); ++i)
+ (*structure)[i].type->makeTemporary();
+ }
+
+ TType* clone() const
+ {
+ TType *newType = new TType();
+ newType->deepCopy(*this);
+
+ return newType;
+ }
+
+ void makeVector() { vector1 = true; }
+
+ virtual void hideMember() { basicType = EbtVoid; vectorSize = 1; }
+ virtual bool hiddenMember() const { return basicType == EbtVoid; }
+
+ virtual void setFieldName(const TString& n) { fieldName = NewPoolTString(n.c_str()); }
+ virtual const TString& getTypeName() const
+ {
+ assert(typeName);
+ return *typeName;
+ }
+
+ virtual const TString& getFieldName() const
+ {
+ assert(fieldName);
+ return *fieldName;
+ }
+
+ virtual TBasicType getBasicType() const { return basicType; }
+ virtual const TSampler& getSampler() const { return sampler; }
+ virtual TSampler& getSampler() { return sampler; }
+
+ virtual TQualifier& getQualifier() { return qualifier; }
+ virtual const TQualifier& getQualifier() const { return qualifier; }
+
+ virtual int getVectorSize() const { return vectorSize; } // returns 1 for either scalar or vector of size 1, valid for both
+ virtual int getMatrixCols() const { return matrixCols; }
+ virtual int getMatrixRows() const { return matrixRows; }
+ virtual int getOuterArraySize() const { return arraySizes->getOuterSize(); }
+ virtual TIntermTyped* getOuterArrayNode() const { return arraySizes->getOuterNode(); }
+ virtual int getCumulativeArraySize() const { return arraySizes->getCumulativeSize(); }
+ virtual bool isArrayOfArrays() const { return arraySizes != nullptr && arraySizes->getNumDims() > 1; }
+ virtual int getImplicitArraySize() const { return arraySizes->getImplicitSize(); }
+ virtual const TArraySizes* getArraySizes() const { return arraySizes; }
+ virtual TArraySizes* getArraySizes() { return arraySizes; }
+ virtual TType* getReferentType() const { return referentType; }
+ virtual const TArraySizes* getTypeParameters() const { return typeParameters; }
+ virtual TArraySizes* getTypeParameters() { return typeParameters; }
+
+ virtual bool isScalar() const { return ! isVector() && ! isMatrix() && ! isStruct() && ! isArray(); }
+ virtual bool isScalarOrVec1() const { return isScalar() || vector1; }
+ virtual bool isVector() const { return vectorSize > 1 || vector1; }
+ virtual bool isMatrix() const { return matrixCols ? true : false; }
+ virtual bool isArray() const { return arraySizes != nullptr; }
+ virtual bool isSizedArray() const { return isArray() && arraySizes->isSized(); }
+ virtual bool isUnsizedArray() const { return isArray() && !arraySizes->isSized(); }
+ virtual bool isArrayVariablyIndexed() const { assert(isArray()); return arraySizes->isVariablyIndexed(); }
+ virtual void setArrayVariablyIndexed() { assert(isArray()); arraySizes->setVariablyIndexed(); }
+ virtual void updateImplicitArraySize(int size) { assert(isArray()); arraySizes->updateImplicitSize(size); }
+ virtual bool isStruct() const { return basicType == EbtStruct || basicType == EbtBlock; }
+ virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble || basicType == EbtFloat16; }
+ virtual bool isIntegerDomain() const
+ {
+ switch (basicType) {
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtAtomicUint:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+ virtual bool isOpaque() const { return basicType == EbtSampler || basicType == EbtAtomicUint
+#ifdef NV_EXTENSIONS
+ || basicType == EbtAccStructNV
+#endif
+ ; }
+ virtual bool isBuiltIn() const { return getQualifier().builtIn != EbvNone; }
+
+ // "Image" is a superset of "Subpass"
+ virtual bool isImage() const { return basicType == EbtSampler && getSampler().isImage(); }
+ virtual bool isSubpass() const { return basicType == EbtSampler && getSampler().isSubpass(); }
+ virtual bool isTexture() const { return basicType == EbtSampler && getSampler().isTexture(); }
+ virtual bool isParameterized() const { return typeParameters != nullptr; }
+ virtual bool isCoopMat() const { return coopmat; }
+
+ // return true if this type contains any subtype which satisfies the given predicate.
+ template <typename P>
+ bool contains(P predicate) const
+ {
+ if (predicate(this))
+ return true;
+
+ const auto hasa = [predicate](const TTypeLoc& tl) { return tl.type->contains(predicate); };
+
+ return isStruct() && std::any_of(structure->begin(), structure->end(), hasa);
+ }
+
+ // Recursively checks if the type contains the given basic type
+ virtual bool containsBasicType(TBasicType checkType) const
+ {
+ return contains([checkType](const TType* t) { return t->basicType == checkType; } );
+ }
+
+ // Recursively check the structure for any arrays, needed for some error checks
+ virtual bool containsArray() const
+ {
+ return contains([](const TType* t) { return t->isArray(); } );
+ }
+
+ // Check the structure for any structures, needed for some error checks
+ virtual bool containsStructure() const
+ {
+ return contains([this](const TType* t) { return t != this && t->isStruct(); } );
+ }
+
+ // Recursively check the structure for any unsized arrays, needed for triggering a copyUp().
+ virtual bool containsUnsizedArray() const
+ {
+ return contains([](const TType* t) { return t->isUnsizedArray(); } );
+ }
+
+ virtual bool containsOpaque() const
+ {
+ return contains([](const TType* t) { return t->isOpaque(); } );
+ }
+
+ // Recursively checks if the type contains a built-in variable
+ virtual bool containsBuiltIn() const
+ {
+ return contains([](const TType* t) { return t->isBuiltIn(); } );
+ }
+
+ virtual bool containsNonOpaque() const
+ {
+ const auto nonOpaque = [](const TType* t) {
+ switch (t->basicType) {
+ case EbtVoid:
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtBool:
+ case EbtReference:
+ return true;
+ default:
+ return false;
+ }
+ };
+
+ return contains(nonOpaque);
+ }
+
+ virtual bool containsSpecializationSize() const
+ {
+ return contains([](const TType* t) { return t->isArray() && t->arraySizes->isOuterSpecialization(); } );
+ }
+
+ virtual bool contains16BitInt() const
+ {
+ return containsBasicType(EbtInt16) || containsBasicType(EbtUint16);
+ }
+
+ virtual bool contains8BitInt() const
+ {
+ return containsBasicType(EbtInt8) || containsBasicType(EbtUint8);
+ }
+
+ virtual bool containsCoopMat() const
+ {
+ return contains([](const TType* t) { return t->coopmat; } );
+ }
+
+ // Array editing methods. Array descriptors can be shared across
+ // type instances. This allows all uses of the same array
+ // to be updated at once. E.g., all nodes can be explicitly sized
+ // by tracking and correcting one implicit size. Or, all nodes
+ // can get the explicit size on a redeclaration that gives size.
+ //
+ // N.B.: Don't share with the shared symbol tables (symbols are
+ // marked as isReadOnly(). Such symbols with arrays that will be
+ // edited need to copyUp() on first use, so that
+ // A) the edits don't effect the shared symbol table, and
+ // B) the edits are shared across all users.
+ void updateArraySizes(const TType& type)
+ {
+ // For when we may already be sharing existing array descriptors,
+ // keeping the pointers the same, just updating the contents.
+ assert(arraySizes != nullptr);
+ assert(type.arraySizes != nullptr);
+ *arraySizes = *type.arraySizes;
+ }
+ void copyArraySizes(const TArraySizes& s)
+ {
+ // For setting a fresh new set of array sizes, not yet worrying about sharing.
+ arraySizes = new TArraySizes;
+ *arraySizes = s;
+ }
+ void transferArraySizes(TArraySizes* s)
+ {
+ // For setting an already allocated set of sizes that this type can use
+ // (no copy made).
+ arraySizes = s;
+ }
+ void clearArraySizes()
+ {
+ arraySizes = nullptr;
+ }
+
+ // Add inner array sizes, to any existing sizes, via copy; the
+ // sizes passed in can still be reused for other purposes.
+ void copyArrayInnerSizes(const TArraySizes* s)
+ {
+ if (s != nullptr) {
+ if (arraySizes == nullptr)
+ copyArraySizes(*s);
+ else
+ arraySizes->addInnerSizes(*s);
+ }
+ }
+ void changeOuterArraySize(int s) { arraySizes->changeOuterSize(s); }
+
+ // Recursively make the implicit array size the explicit array size.
+ // Expicit arrays are compile-time or link-time sized, never run-time sized.
+ // Sometimes, policy calls for an array to be run-time sized even if it was
+ // never variably indexed: Don't turn a 'skipNonvariablyIndexed' array into
+ // an explicit array.
+ void adoptImplicitArraySizes(bool skipNonvariablyIndexed)
+ {
+ if (isUnsizedArray() && !(skipNonvariablyIndexed || isArrayVariablyIndexed()))
+ changeOuterArraySize(getImplicitArraySize());
+#ifdef NV_EXTENSIONS
+ // For multi-dim per-view arrays, set unsized inner dimension size to 1
+ if (qualifier.isPerView() && arraySizes && arraySizes->isInnerUnsized())
+ arraySizes->clearInnerUnsized();
+#endif
+ if (isStruct() && structure->size() > 0) {
+ int lastMember = (int)structure->size() - 1;
+ for (int i = 0; i < lastMember; ++i)
+ (*structure)[i].type->adoptImplicitArraySizes(false);
+ // implement the "last member of an SSBO" policy
+ (*structure)[lastMember].type->adoptImplicitArraySizes(getQualifier().storage == EvqBuffer);
+ }
+ }
+
+
+ void updateTypeParameters(const TType& type)
+ {
+ // For when we may already be sharing existing array descriptors,
+ // keeping the pointers the same, just updating the contents.
+ assert(typeParameters != nullptr);
+ assert(type.typeParameters != nullptr);
+ *typeParameters = *type.typeParameters;
+ }
+ void copyTypeParameters(const TArraySizes& s)
+ {
+ // For setting a fresh new set of type parameters, not yet worrying about sharing.
+ typeParameters = new TArraySizes;
+ *typeParameters = s;
+ }
+ void transferTypeParameters(TArraySizes* s)
+ {
+ // For setting an already allocated set of sizes that this type can use
+ // (no copy made).
+ typeParameters = s;
+ }
+ void clearTypeParameters()
+ {
+ typeParameters = nullptr;
+ }
+
+ // Add inner array sizes, to any existing sizes, via copy; the
+ // sizes passed in can still be reused for other purposes.
+ void copyTypeParametersInnerSizes(const TArraySizes* s)
+ {
+ if (s != nullptr) {
+ if (typeParameters == nullptr)
+ copyTypeParameters(*s);
+ else
+ typeParameters->addInnerSizes(*s);
+ }
+ }
+
+
+
+ const char* getBasicString() const
+ {
+ return TType::getBasicString(basicType);
+ }
+
+ static const char* getBasicString(TBasicType t)
+ {
+ switch (t) {
+ case EbtVoid: return "void";
+ case EbtFloat: return "float";
+ case EbtDouble: return "double";
+ case EbtFloat16: return "float16_t";
+ case EbtInt8: return "int8_t";
+ case EbtUint8: return "uint8_t";
+ case EbtInt16: return "int16_t";
+ case EbtUint16: return "uint16_t";
+ case EbtInt: return "int";
+ case EbtUint: return "uint";
+ case EbtInt64: return "int64_t";
+ case EbtUint64: return "uint64_t";
+ case EbtBool: return "bool";
+ case EbtAtomicUint: return "atomic_uint";
+ case EbtSampler: return "sampler/image";
+ case EbtStruct: return "structure";
+ case EbtBlock: return "block";
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV: return "accelerationStructureNV";
+#endif
+ case EbtReference: return "reference";
+ default: return "unknown type";
+ }
+ }
+
+ TString getCompleteString() const
+ {
+ TString typeString;
+
+ const auto appendStr = [&](const char* s) { typeString.append(s); };
+ const auto appendUint = [&](unsigned int u) { typeString.append(std::to_string(u).c_str()); };
+ const auto appendInt = [&](int i) { typeString.append(std::to_string(i).c_str()); };
+
+ if (qualifier.hasLayout()) {
+ // To reduce noise, skip this if the only layout is an xfb_buffer
+ // with no triggering xfb_offset.
+ TQualifier noXfbBuffer = qualifier;
+ noXfbBuffer.layoutXfbBuffer = TQualifier::layoutXfbBufferEnd;
+ if (noXfbBuffer.hasLayout()) {
+ appendStr("layout(");
+ if (qualifier.hasAnyLocation()) {
+ appendStr(" location=");
+ appendUint(qualifier.layoutLocation);
+ if (qualifier.hasComponent()) {
+ appendStr(" component=");
+ appendUint(qualifier.layoutComponent);
+ }
+ if (qualifier.hasIndex()) {
+ appendStr(" index=");
+ appendUint(qualifier.layoutIndex);
+ }
+ }
+ if (qualifier.hasSet()) {
+ appendStr(" set=");
+ appendUint(qualifier.layoutSet);
+ }
+ if (qualifier.hasBinding()) {
+ appendStr(" binding=");
+ appendUint(qualifier.layoutBinding);
+ }
+ if (qualifier.hasStream()) {
+ appendStr(" stream=");
+ appendUint(qualifier.layoutStream);
+ }
+ if (qualifier.hasMatrix()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutMatrixString(qualifier.layoutMatrix));
+ }
+ if (qualifier.hasPacking()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutPackingString(qualifier.layoutPacking));
+ }
+ if (qualifier.hasOffset()) {
+ appendStr(" offset=");
+ appendInt(qualifier.layoutOffset);
+ }
+ if (qualifier.hasAlign()) {
+ appendStr(" align=");
+ appendInt(qualifier.layoutAlign);
+ }
+ if (qualifier.hasFormat()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutFormatString(qualifier.layoutFormat));
+ }
+ if (qualifier.hasXfbBuffer() && qualifier.hasXfbOffset()) {
+ appendStr(" xfb_buffer=");
+ appendUint(qualifier.layoutXfbBuffer);
+ }
+ if (qualifier.hasXfbOffset()) {
+ appendStr(" xfb_offset=");
+ appendUint(qualifier.layoutXfbOffset);
+ }
+ if (qualifier.hasXfbStride()) {
+ appendStr(" xfb_stride=");
+ appendUint(qualifier.layoutXfbStride);
+ }
+ if (qualifier.hasAttachment()) {
+ appendStr(" input_attachment_index=");
+ appendUint(qualifier.layoutAttachment);
+ }
+ if (qualifier.hasSpecConstantId()) {
+ appendStr(" constant_id=");
+ appendUint(qualifier.layoutSpecConstantId);
+ }
+ if (qualifier.layoutPushConstant)
+ appendStr(" push_constant");
+ if (qualifier.layoutBufferReference)
+ appendStr(" buffer_reference");
+ if (qualifier.hasBufferReferenceAlign()) {
+ appendStr(" buffer_reference_align=");
+ appendUint(1u << qualifier.layoutBufferReferenceAlign);
+ }
+
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutPassthrough)
+ appendStr(" passthrough");
+ if (qualifier.layoutViewportRelative)
+ appendStr(" layoutViewportRelative");
+ if (qualifier.layoutSecondaryViewportRelativeOffset != -2048) {
+ appendStr(" layoutSecondaryViewportRelativeOffset=");
+ appendInt(qualifier.layoutSecondaryViewportRelativeOffset);
+ }
+ if (qualifier.layoutShaderRecordNV)
+ appendStr(" shaderRecordNV");
+#endif
+
+ appendStr(")");
+ }
+ }
+
+ if (qualifier.invariant)
+ appendStr(" invariant");
+ if (qualifier.noContraction)
+ appendStr(" noContraction");
+ if (qualifier.centroid)
+ appendStr(" centroid");
+ if (qualifier.smooth)
+ appendStr(" smooth");
+ if (qualifier.flat)
+ appendStr(" flat");
+ if (qualifier.nopersp)
+ appendStr(" noperspective");
+#ifdef AMD_EXTENSIONS
+ if (qualifier.explicitInterp)
+ appendStr(" __explicitInterpAMD");
+#endif
+#ifdef NV_EXTENSIONS
+ if (qualifier.pervertexNV)
+ appendStr(" pervertexNV");
+ if (qualifier.perPrimitiveNV)
+ appendStr(" perprimitiveNV");
+ if (qualifier.perViewNV)
+ appendStr(" perviewNV");
+ if (qualifier.perTaskNV)
+ appendStr(" taskNV");
+#endif
+ if (qualifier.patch)
+ appendStr(" patch");
+ if (qualifier.sample)
+ appendStr(" sample");
+ if (qualifier.coherent)
+ appendStr(" coherent");
+ if (qualifier.devicecoherent)
+ appendStr(" devicecoherent");
+ if (qualifier.queuefamilycoherent)
+ appendStr(" queuefamilycoherent");
+ if (qualifier.workgroupcoherent)
+ appendStr(" workgroupcoherent");
+ if (qualifier.subgroupcoherent)
+ appendStr(" subgroupcoherent");
+ if (qualifier.nonprivate)
+ appendStr(" nonprivate");
+ if (qualifier.volatil)
+ appendStr(" volatile");
+ if (qualifier.restrict)
+ appendStr(" restrict");
+ if (qualifier.readonly)
+ appendStr(" readonly");
+ if (qualifier.writeonly)
+ appendStr(" writeonly");
+ if (qualifier.specConstant)
+ appendStr(" specialization-constant");
+ if (qualifier.nonUniform)
+ appendStr(" nonuniform");
+ appendStr(" ");
+ appendStr(getStorageQualifierString());
+ if (isArray()) {
+ for(int i = 0; i < (int)arraySizes->getNumDims(); ++i) {
+ int size = arraySizes->getDimSize(i);
+ if (size == UnsizedArraySize && i == 0 && arraySizes->isVariablyIndexed())
+ appendStr(" runtime-sized array of");
+ else {
+ if (size == UnsizedArraySize) {
+ appendStr(" unsized");
+ if (i == 0) {
+ appendStr(" ");
+ appendInt(arraySizes->getImplicitSize());
+ }
+ } else {
+ appendStr(" ");
+ appendInt(arraySizes->getDimSize(i));
+ }
+ appendStr("-element array of");
+ }
+ }
+ }
+ if (isParameterized()) {
+ appendStr("<");
+ for(int i = 0; i < (int)typeParameters->getNumDims(); ++i) {
+ appendInt(typeParameters->getDimSize(i));
+ if (i != (int)typeParameters->getNumDims() - 1)
+ appendStr(", ");
+ }
+ appendStr(">");
+ }
+ if (qualifier.precision != EpqNone) {
+ appendStr(" ");
+ appendStr(getPrecisionQualifierString());
+ }
+ if (isMatrix()) {
+ appendStr(" ");
+ appendInt(matrixCols);
+ appendStr("X");
+ appendInt(matrixRows);
+ appendStr(" matrix of");
+ } else if (isVector()) {
+ appendStr(" ");
+ appendInt(vectorSize);
+ appendStr("-component vector of");
+ }
+
+ appendStr(" ");
+ typeString.append(getBasicTypeString());
+
+ if (qualifier.builtIn != EbvNone) {
+ appendStr(" ");
+ appendStr(getBuiltInVariableString());
+ }
+
+ // Add struct/block members
+ if (isStruct() && structure) {
+ appendStr("{");
+ for (size_t i = 0; i < structure->size(); ++i) {
+ if (! (*structure)[i].type->hiddenMember()) {
+ typeString.append((*structure)[i].type->getCompleteString());
+ typeString.append(" ");
+ typeString.append((*structure)[i].type->getFieldName());
+ if (i < structure->size() - 1)
+ appendStr(", ");
+ }
+ }
+ appendStr("}");
+ }
+
+ return typeString;
+ }
+
+ TString getBasicTypeString() const
+ {
+ if (basicType == EbtSampler)
+ return sampler.getString();
+ else
+ return getBasicString();
+ }
+
+ const char* getStorageQualifierString() const { return GetStorageQualifierString(qualifier.storage); }
+ const char* getBuiltInVariableString() const { return GetBuiltInVariableString(qualifier.builtIn); }
+ const char* getPrecisionQualifierString() const { return GetPrecisionQualifierString(qualifier.precision); }
+ const TTypeList* getStruct() const { assert(isStruct()); return structure; }
+ void setStruct(TTypeList* s) { assert(isStruct()); structure = s; }
+ TTypeList* getWritableStruct() const { assert(isStruct()); return structure; } // This should only be used when known to not be sharing with other threads
+
+ int computeNumComponents() const
+ {
+ int components = 0;
+
+ if (getBasicType() == EbtStruct || getBasicType() == EbtBlock) {
+ for (TTypeList::const_iterator tl = getStruct()->begin(); tl != getStruct()->end(); tl++)
+ components += ((*tl).type)->computeNumComponents();
+ } else if (matrixCols)
+ components = matrixCols * matrixRows;
+ else
+ components = vectorSize;
+
+ if (arraySizes != nullptr) {
+ components *= arraySizes->getCumulativeSize();
+ }
+
+ return components;
+ }
+
+ // append this type's mangled name to the passed in 'name'
+ void appendMangledName(TString& name) const
+ {
+ buildMangledName(name);
+ name += ';' ;
+ }
+
+ // Do two structure types match? They could be declared independently,
+ // in different places, but still might satisfy the definition of matching.
+ // From the spec:
+ //
+ // "Structures must have the same name, sequence of type names, and
+ // type definitions, and member names to be considered the same type.
+ // This rule applies recursively for nested or embedded types."
+ //
+ bool sameStructType(const TType& right) const
+ {
+ // Most commonly, they are both nullptr, or the same pointer to the same actual structure
+ if ((!isStruct() && !right.isStruct()) ||
+ (isStruct() && right.isStruct() && structure == right.structure))
+ return true;
+
+ // Both being nullptr was caught above, now they both have to be structures of the same number of elements
+ if (!isStruct() || !right.isStruct() ||
+ structure->size() != right.structure->size())
+ return false;
+
+ // Structure names have to match
+ if (*typeName != *right.typeName)
+ return false;
+
+ // Compare the names and types of all the members, which have to match
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->getFieldName() != (*right.structure)[i].type->getFieldName())
+ return false;
+
+ if (*(*structure)[i].type != *(*right.structure)[i].type)
+ return false;
+ }
+
+ return true;
+ }
+
+ bool sameReferenceType(const TType& right) const
+ {
+ if ((basicType == EbtReference) != (right.basicType == EbtReference))
+ return false;
+
+ if ((basicType != EbtReference) && (right.basicType != EbtReference))
+ return true;
+
+ assert(referentType != nullptr);
+ assert(right.referentType != nullptr);
+
+ if (referentType == right.referentType)
+ return true;
+
+ return *referentType == *right.referentType;
+ }
+
+ // See if two types match, in all aspects except arrayness
+ bool sameElementType(const TType& right) const
+ {
+ return basicType == right.basicType && sameElementShape(right);
+ }
+
+ // See if two type's arrayness match
+ bool sameArrayness(const TType& right) const
+ {
+ return ((arraySizes == nullptr && right.arraySizes == nullptr) ||
+ (arraySizes != nullptr && right.arraySizes != nullptr && *arraySizes == *right.arraySizes));
+ }
+
+ // See if two type's arrayness match in everything except their outer dimension
+ bool sameInnerArrayness(const TType& right) const
+ {
+ assert(arraySizes != nullptr && right.arraySizes != nullptr);
+ return arraySizes->sameInnerArrayness(*right.arraySizes);
+ }
+
+ // See if two type's parameters match
+ bool sameTypeParameters(const TType& right) const
+ {
+ return ((typeParameters == nullptr && right.typeParameters == nullptr) ||
+ (typeParameters != nullptr && right.typeParameters != nullptr && *typeParameters == *right.typeParameters));
+ }
+
+ // See if two type's elements match in all ways except basic type
+ bool sameElementShape(const TType& right) const
+ {
+ return sampler == right.sampler &&
+ vectorSize == right.vectorSize &&
+ matrixCols == right.matrixCols &&
+ matrixRows == right.matrixRows &&
+ vector1 == right.vector1 &&
+ coopmat == right.coopmat &&
+ sameStructType(right) &&
+ sameReferenceType(right);
+ }
+
+ // See if a cooperative matrix type parameter with unspecified parameters is
+ // an OK function parameter
+ bool coopMatParameterOK(const TType& right) const
+ {
+ return coopmat && right.coopmat &&
+ typeParameters == nullptr && right.typeParameters != nullptr;
+ }
+
+ // See if two types match in all ways (just the actual type, not qualification)
+ bool operator==(const TType& right) const
+ {
+ return sameElementType(right) && sameArrayness(right) && sameTypeParameters(right);
+ }
+
+ bool operator!=(const TType& right) const
+ {
+ return ! operator==(right);
+ }
+
+ unsigned int getBufferReferenceAlignment() const
+ {
+ if (getBasicType() == glslang::EbtReference) {
+ return getReferentType()->getQualifier().hasBufferReferenceAlign() ?
+ (1u << getReferentType()->getQualifier().layoutBufferReferenceAlign) : 16u;
+ } else {
+ return 0;
+ }
+ }
+
+protected:
+ // Require consumer to pick between deep copy and shallow copy.
+ TType(const TType& type);
+ TType& operator=(const TType& type);
+
+ // Recursively copy a type graph, while preserving the graph-like
+ // quality. That is, don't make more than one copy of a structure that
+ // gets reused multiple times in the type graph.
+ void deepCopy(const TType& copyOf, TMap<TTypeList*,TTypeList*>& copiedMap)
+ {
+ shallowCopy(copyOf);
+
+ if (copyOf.arraySizes) {
+ arraySizes = new TArraySizes;
+ *arraySizes = *copyOf.arraySizes;
+ }
+
+ if (copyOf.typeParameters) {
+ typeParameters = new TArraySizes;
+ *typeParameters = *copyOf.typeParameters;
+ }
+
+ if (copyOf.isStruct() && copyOf.structure) {
+ auto prevCopy = copiedMap.find(copyOf.structure);
+ if (prevCopy != copiedMap.end())
+ structure = prevCopy->second;
+ else {
+ structure = new TTypeList;
+ copiedMap[copyOf.structure] = structure;
+ for (unsigned int i = 0; i < copyOf.structure->size(); ++i) {
+ TTypeLoc typeLoc;
+ typeLoc.loc = (*copyOf.structure)[i].loc;
+ typeLoc.type = new TType();
+ typeLoc.type->deepCopy(*(*copyOf.structure)[i].type, copiedMap);
+ structure->push_back(typeLoc);
+ }
+ }
+ }
+
+ if (copyOf.fieldName)
+ fieldName = NewPoolTString(copyOf.fieldName->c_str());
+ if (copyOf.typeName)
+ typeName = NewPoolTString(copyOf.typeName->c_str());
+ }
+
+
+ void buildMangledName(TString&) const;
+
+ TBasicType basicType : 8;
+ int vectorSize : 4; // 1 means either scalar or 1-component vector; see vector1 to disambiguate.
+ int matrixCols : 4;
+ int matrixRows : 4;
+ bool vector1 : 1; // Backward-compatible tracking of a 1-component vector distinguished from a scalar.
+ // GLSL 4.5 never has a 1-component vector; so this will always be false until such
+ // functionality is added.
+ // HLSL does have a 1-component vectors, so this will be true to disambiguate
+ // from a scalar.
+ bool coopmat : 1;
+ TQualifier qualifier;
+
+ TArraySizes* arraySizes; // nullptr unless an array; can be shared across types
+ // A type can't be both a structure (EbtStruct/EbtBlock) and a reference (EbtReference), so
+ // conserve space by making these a union
+ union {
+ TTypeList* structure; // invalid unless this is a struct; can be shared across types
+ TType *referentType; // invalid unless this is an EbtReference
+ };
+ TString *fieldName; // for structure field names
+ TString *typeName; // for structure type name
+ TSampler sampler;
+ TArraySizes* typeParameters;// nullptr unless a parameterized type; can be shared across types
+};
+
+} // end namespace glslang
+
+#endif // _TYPES_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/arrays.h b/thirdparty/glslang/glslang/Include/arrays.h
new file mode 100644
index 0000000000..7f047d9fb1
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/arrays.h
@@ -0,0 +1,341 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Implement types for tracking GLSL arrays, arrays of arrays, etc.
+//
+
+#ifndef _ARRAYS_INCLUDED
+#define _ARRAYS_INCLUDED
+
+#include <algorithm>
+
+namespace glslang {
+
+// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else.
+const int UnsizedArraySize = 0;
+
+class TIntermTyped;
+extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*);
+
+// Specialization constants need both a nominal size and a node that defines
+// the specialization constant being used. Array types are the same when their
+// size and specialization constant nodes are the same.
+struct TArraySize {
+ unsigned int size;
+ TIntermTyped* node; // nullptr means no specialization constant node
+ bool operator==(const TArraySize& rhs) const
+ {
+ if (size != rhs.size)
+ return false;
+ if (node == nullptr || rhs.node == nullptr)
+ return node == rhs.node;
+
+ return SameSpecializationConstants(node, rhs.node);
+ }
+};
+
+//
+// TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
+// It has generic-container semantics, while TArraySizes has array-of-array semantics.
+// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy.
+//
+struct TSmallArrayVector {
+ //
+ // TODO: memory: TSmallArrayVector is intended to be smaller.
+ // Almost all arrays could be handled by two sizes each fitting
+ // in 16 bits, needing a real vector only in the cases where there
+ // are more than 3 sizes or a size needing more than 16 bits.
+ //
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TSmallArrayVector() : sizes(nullptr) { }
+ virtual ~TSmallArrayVector() { dealloc(); }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TSmallArrayVector& operator=(const TSmallArrayVector& from)
+ {
+ if (from.sizes == nullptr)
+ sizes = nullptr;
+ else {
+ alloc();
+ *sizes = *from.sizes;
+ }
+
+ return *this;
+ }
+
+ int size() const
+ {
+ if (sizes == nullptr)
+ return 0;
+ return (int)sizes->size();
+ }
+
+ unsigned int frontSize() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().size;
+ }
+
+ TIntermTyped* frontNode() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().node;
+ }
+
+ void changeFront(unsigned int s)
+ {
+ assert(sizes != nullptr);
+ // this should only happen for implicitly sized arrays, not specialization constants
+ assert(sizes->front().node == nullptr);
+ sizes->front().size = s;
+ }
+
+ void push_back(unsigned int e, TIntermTyped* n)
+ {
+ alloc();
+ TArraySize pair = { e, n };
+ sizes->push_back(pair);
+ }
+
+ void push_back(const TSmallArrayVector& newDims)
+ {
+ alloc();
+ sizes->insert(sizes->end(), newDims.sizes->begin(), newDims.sizes->end());
+ }
+
+ void pop_front()
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ if (sizes->size() == 1)
+ dealloc();
+ else
+ sizes->erase(sizes->begin());
+ }
+
+ // 'this' should currently not be holding anything, and copyNonFront
+ // will make it hold a copy of all but the first element of rhs.
+ // (This would be useful for making a type that is dereferenced by
+ // one dimension.)
+ void copyNonFront(const TSmallArrayVector& rhs)
+ {
+ assert(sizes == nullptr);
+ if (rhs.size() > 1) {
+ alloc();
+ sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end());
+ }
+ }
+
+ unsigned int getDimSize(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].size;
+ }
+
+ void setDimSize(int i, unsigned int size) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ assert((*sizes)[i].node == nullptr);
+ (*sizes)[i].size = size;
+ }
+
+ TIntermTyped* getDimNode(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].node;
+ }
+
+ bool operator==(const TSmallArrayVector& rhs) const
+ {
+ if (sizes == nullptr && rhs.sizes == nullptr)
+ return true;
+ if (sizes == nullptr || rhs.sizes == nullptr)
+ return false;
+ return *sizes == *rhs.sizes;
+ }
+ bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); }
+
+protected:
+ TSmallArrayVector(const TSmallArrayVector&);
+
+ void alloc()
+ {
+ if (sizes == nullptr)
+ sizes = new TVector<TArraySize>;
+ }
+ void dealloc()
+ {
+ delete sizes;
+ sizes = nullptr;
+ }
+
+ TVector<TArraySize>* sizes; // will either hold such a pointer, or in the future, hold the two array sizes
+};
+
+//
+// Represent an array, or array of arrays, to arbitrary depth. This is not
+// done through a hierarchy of types in a type tree, rather all contiguous arrayness
+// in the type hierarchy is localized into this single cumulative object.
+//
+// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero
+// for the vast majority of types that are non-array types.
+//
+// Order Policy: these are all identical:
+// - left to right order within a contiguous set of ...[..][..][..]... in the source language
+// - index order 0, 1, 2, ... within the 'sizes' member below
+// - outer-most to inner-most
+//
+struct TArraySizes {
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TArraySizes() : implicitArraySize(1), variablyIndexed(false) { }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TArraySizes& operator=(const TArraySizes& from)
+ {
+ implicitArraySize = from.implicitArraySize;
+ variablyIndexed = from.variablyIndexed;
+ sizes = from.sizes;
+
+ return *this;
+ }
+
+ // translate from array-of-array semantics to container semantics
+ int getNumDims() const { return sizes.size(); }
+ int getDimSize(int dim) const { return sizes.getDimSize(dim); }
+ TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); }
+ void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); }
+ int getOuterSize() const { return sizes.frontSize(); }
+ TIntermTyped* getOuterNode() const { return sizes.frontNode(); }
+ int getCumulativeSize() const
+ {
+ int size = 1;
+ for (int d = 0; d < sizes.size(); ++d) {
+ // this only makes sense in paths that have a known array size
+ assert(sizes.getDimSize(d) != UnsizedArraySize);
+ size *= sizes.getDimSize(d);
+ }
+ return size;
+ }
+ void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
+ void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
+ void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
+ void addInnerSize(TArraySize pair) {
+ sizes.push_back(pair.size, pair.node);
+ }
+ void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); }
+ void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
+ int getImplicitSize() const { return implicitArraySize; }
+ void updateImplicitSize(int s) { implicitArraySize = std::max(implicitArraySize, s); }
+ bool isInnerUnsized() const
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
+ return true;
+ }
+
+ return false;
+ }
+ bool clearInnerUnsized()
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
+ setDimSize(d, 1);
+ }
+
+ return false;
+ }
+ bool isInnerSpecialization() const
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimNode(d) != nullptr)
+ return true;
+ }
+
+ return false;
+ }
+ bool isOuterSpecialization()
+ {
+ return sizes.getDimNode(0) != nullptr;
+ }
+
+ bool hasUnsized() const { return getOuterSize() == UnsizedArraySize || isInnerUnsized(); }
+ bool isSized() const { return getOuterSize() != UnsizedArraySize; }
+ void dereference() { sizes.pop_front(); }
+ void copyDereferenced(const TArraySizes& rhs)
+ {
+ assert(sizes.size() == 0);
+ if (rhs.sizes.size() > 1)
+ sizes.copyNonFront(rhs.sizes);
+ }
+
+ bool sameInnerArrayness(const TArraySizes& rhs) const
+ {
+ if (sizes.size() != rhs.sizes.size())
+ return false;
+
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) ||
+ sizes.getDimNode(d) != rhs.sizes.getDimNode(d))
+ return false;
+ }
+
+ return true;
+ }
+
+ void setVariablyIndexed() { variablyIndexed = true; }
+ bool isVariablyIndexed() const { return variablyIndexed; }
+
+ bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; }
+ bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; }
+
+protected:
+ TSmallArrayVector sizes;
+
+ TArraySizes(const TArraySizes&);
+
+ // For tracking maximum referenced compile-time constant index.
+ // Applies only to the outer-most dimension. Potentially becomes
+ // the implicit size of the array, if not variably indexed and
+ // otherwise legal.
+ int implicitArraySize;
+ bool variablyIndexed; // true if array is indexed with a non compile-time constant
+};
+
+} // end namespace glslang
+
+#endif // _ARRAYS_INCLUDED_
diff --git a/thirdparty/glslang/glslang/Include/intermediate.h b/thirdparty/glslang/glslang/Include/intermediate.h
new file mode 100644
index 0000000000..89d1954959
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/intermediate.h
@@ -0,0 +1,1764 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Definition of the in-memory high-level intermediate representation
+// of shaders. This is a tree that parser creates.
+//
+// Nodes in the tree are defined as a hierarchy of classes derived from
+// TIntermNode. Each is a node in a tree. There is no preset branching factor;
+// each node can have it's own type of list of children.
+//
+
+#ifndef __INTERMEDIATE_H
+#define __INTERMEDIATE_H
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+ #pragma warning(disable : 4464) // relative include path contains '..'
+ #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted
+#endif
+
+#include "../Include/Common.h"
+#include "../Include/Types.h"
+#include "../Include/ConstantUnion.h"
+
+namespace glslang {
+
+class TIntermediate;
+
+//
+// Operators used by the high-level (parse tree) representation.
+//
+enum TOperator {
+ EOpNull, // if in a node, should only mean a node is still being built
+ EOpSequence, // denotes a list of statements, or parameters, etc.
+ EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
+ EOpFunctionCall,
+ EOpFunction, // For function definition
+ EOpParameters, // an aggregate listing the parameters to a function
+
+ //
+ // Unary operators
+ //
+
+ EOpNegative,
+ EOpLogicalNot,
+ EOpVectorLogicalNot,
+ EOpBitwiseNot,
+
+ EOpPostIncrement,
+ EOpPostDecrement,
+ EOpPreIncrement,
+ EOpPreDecrement,
+
+ EOpCopyObject,
+
+ // (u)int* -> bool
+ EOpConvInt8ToBool,
+ EOpConvUint8ToBool,
+ EOpConvInt16ToBool,
+ EOpConvUint16ToBool,
+ EOpConvIntToBool,
+ EOpConvUintToBool,
+ EOpConvInt64ToBool,
+ EOpConvUint64ToBool,
+
+ // float* -> bool
+ EOpConvFloat16ToBool,
+ EOpConvFloatToBool,
+ EOpConvDoubleToBool,
+
+ // bool -> (u)int*
+ EOpConvBoolToInt8,
+ EOpConvBoolToUint8,
+ EOpConvBoolToInt16,
+ EOpConvBoolToUint16,
+ EOpConvBoolToInt,
+ EOpConvBoolToUint,
+ EOpConvBoolToInt64,
+ EOpConvBoolToUint64,
+
+ // bool -> float*
+ EOpConvBoolToFloat16,
+ EOpConvBoolToFloat,
+ EOpConvBoolToDouble,
+
+ // int8_t -> (u)int*
+ EOpConvInt8ToInt16,
+ EOpConvInt8ToInt,
+ EOpConvInt8ToInt64,
+ EOpConvInt8ToUint8,
+ EOpConvInt8ToUint16,
+ EOpConvInt8ToUint,
+ EOpConvInt8ToUint64,
+
+ // uint8_t -> (u)int*
+ EOpConvUint8ToInt8,
+ EOpConvUint8ToInt16,
+ EOpConvUint8ToInt,
+ EOpConvUint8ToInt64,
+ EOpConvUint8ToUint16,
+ EOpConvUint8ToUint,
+ EOpConvUint8ToUint64,
+
+ // int8_t -> float*
+ EOpConvInt8ToFloat16,
+ EOpConvInt8ToFloat,
+ EOpConvInt8ToDouble,
+
+ // uint8_t -> float*
+ EOpConvUint8ToFloat16,
+ EOpConvUint8ToFloat,
+ EOpConvUint8ToDouble,
+
+ // int16_t -> (u)int*
+ EOpConvInt16ToInt8,
+ EOpConvInt16ToInt,
+ EOpConvInt16ToInt64,
+ EOpConvInt16ToUint8,
+ EOpConvInt16ToUint16,
+ EOpConvInt16ToUint,
+ EOpConvInt16ToUint64,
+
+ // uint16_t -> (u)int*
+ EOpConvUint16ToInt8,
+ EOpConvUint16ToInt16,
+ EOpConvUint16ToInt,
+ EOpConvUint16ToInt64,
+ EOpConvUint16ToUint8,
+ EOpConvUint16ToUint,
+ EOpConvUint16ToUint64,
+
+ // int16_t -> float*
+ EOpConvInt16ToFloat16,
+ EOpConvInt16ToFloat,
+ EOpConvInt16ToDouble,
+
+ // uint16_t -> float*
+ EOpConvUint16ToFloat16,
+ EOpConvUint16ToFloat,
+ EOpConvUint16ToDouble,
+
+ // int32_t -> (u)int*
+ EOpConvIntToInt8,
+ EOpConvIntToInt16,
+ EOpConvIntToInt64,
+ EOpConvIntToUint8,
+ EOpConvIntToUint16,
+ EOpConvIntToUint,
+ EOpConvIntToUint64,
+
+ // uint32_t -> (u)int*
+ EOpConvUintToInt8,
+ EOpConvUintToInt16,
+ EOpConvUintToInt,
+ EOpConvUintToInt64,
+ EOpConvUintToUint8,
+ EOpConvUintToUint16,
+ EOpConvUintToUint64,
+
+ // int32_t -> float*
+ EOpConvIntToFloat16,
+ EOpConvIntToFloat,
+ EOpConvIntToDouble,
+
+ // uint32_t -> float*
+ EOpConvUintToFloat16,
+ EOpConvUintToFloat,
+ EOpConvUintToDouble,
+
+ // int64_t -> (u)int*
+ EOpConvInt64ToInt8,
+ EOpConvInt64ToInt16,
+ EOpConvInt64ToInt,
+ EOpConvInt64ToUint8,
+ EOpConvInt64ToUint16,
+ EOpConvInt64ToUint,
+ EOpConvInt64ToUint64,
+
+ // uint64_t -> (u)int*
+ EOpConvUint64ToInt8,
+ EOpConvUint64ToInt16,
+ EOpConvUint64ToInt,
+ EOpConvUint64ToInt64,
+ EOpConvUint64ToUint8,
+ EOpConvUint64ToUint16,
+ EOpConvUint64ToUint,
+
+ // int64_t -> float*
+ EOpConvInt64ToFloat16,
+ EOpConvInt64ToFloat,
+ EOpConvInt64ToDouble,
+
+ // uint64_t -> float*
+ EOpConvUint64ToFloat16,
+ EOpConvUint64ToFloat,
+ EOpConvUint64ToDouble,
+
+ // float16_t -> (u)int*
+ EOpConvFloat16ToInt8,
+ EOpConvFloat16ToInt16,
+ EOpConvFloat16ToInt,
+ EOpConvFloat16ToInt64,
+ EOpConvFloat16ToUint8,
+ EOpConvFloat16ToUint16,
+ EOpConvFloat16ToUint,
+ EOpConvFloat16ToUint64,
+
+ // float16_t -> float*
+ EOpConvFloat16ToFloat,
+ EOpConvFloat16ToDouble,
+
+ // float -> (u)int*
+ EOpConvFloatToInt8,
+ EOpConvFloatToInt16,
+ EOpConvFloatToInt,
+ EOpConvFloatToInt64,
+ EOpConvFloatToUint8,
+ EOpConvFloatToUint16,
+ EOpConvFloatToUint,
+ EOpConvFloatToUint64,
+
+ // float -> float*
+ EOpConvFloatToFloat16,
+ EOpConvFloatToDouble,
+
+ // float64 _t-> (u)int*
+ EOpConvDoubleToInt8,
+ EOpConvDoubleToInt16,
+ EOpConvDoubleToInt,
+ EOpConvDoubleToInt64,
+ EOpConvDoubleToUint8,
+ EOpConvDoubleToUint16,
+ EOpConvDoubleToUint,
+ EOpConvDoubleToUint64,
+
+ // float64_t -> float*
+ EOpConvDoubleToFloat16,
+ EOpConvDoubleToFloat,
+
+ // uint64_t <-> pointer
+ EOpConvUint64ToPtr,
+ EOpConvPtrToUint64,
+
+ //
+ // binary operations
+ //
+
+ EOpAdd,
+ EOpSub,
+ EOpMul,
+ EOpDiv,
+ EOpMod,
+ EOpRightShift,
+ EOpLeftShift,
+ EOpAnd,
+ EOpInclusiveOr,
+ EOpExclusiveOr,
+ EOpEqual,
+ EOpNotEqual,
+ EOpVectorEqual,
+ EOpVectorNotEqual,
+ EOpLessThan,
+ EOpGreaterThan,
+ EOpLessThanEqual,
+ EOpGreaterThanEqual,
+ EOpComma,
+
+ EOpVectorTimesScalar,
+ EOpVectorTimesMatrix,
+ EOpMatrixTimesVector,
+ EOpMatrixTimesScalar,
+
+ EOpLogicalOr,
+ EOpLogicalXor,
+ EOpLogicalAnd,
+
+ EOpIndexDirect,
+ EOpIndexIndirect,
+ EOpIndexDirectStruct,
+
+ EOpVectorSwizzle,
+
+ EOpMethod,
+ EOpScoping,
+
+ //
+ // Built-in functions mapped to operators
+ //
+
+ EOpRadians,
+ EOpDegrees,
+ EOpSin,
+ EOpCos,
+ EOpTan,
+ EOpAsin,
+ EOpAcos,
+ EOpAtan,
+ EOpSinh,
+ EOpCosh,
+ EOpTanh,
+ EOpAsinh,
+ EOpAcosh,
+ EOpAtanh,
+
+ EOpPow,
+ EOpExp,
+ EOpLog,
+ EOpExp2,
+ EOpLog2,
+ EOpSqrt,
+ EOpInverseSqrt,
+
+ EOpAbs,
+ EOpSign,
+ EOpFloor,
+ EOpTrunc,
+ EOpRound,
+ EOpRoundEven,
+ EOpCeil,
+ EOpFract,
+ EOpModf,
+ EOpMin,
+ EOpMax,
+ EOpClamp,
+ EOpMix,
+ EOpStep,
+ EOpSmoothStep,
+
+ EOpIsNan,
+ EOpIsInf,
+
+ EOpFma,
+
+ EOpFrexp,
+ EOpLdexp,
+
+ EOpFloatBitsToInt,
+ EOpFloatBitsToUint,
+ EOpIntBitsToFloat,
+ EOpUintBitsToFloat,
+ EOpDoubleBitsToInt64,
+ EOpDoubleBitsToUint64,
+ EOpInt64BitsToDouble,
+ EOpUint64BitsToDouble,
+ EOpFloat16BitsToInt16,
+ EOpFloat16BitsToUint16,
+ EOpInt16BitsToFloat16,
+ EOpUint16BitsToFloat16,
+ EOpPackSnorm2x16,
+ EOpUnpackSnorm2x16,
+ EOpPackUnorm2x16,
+ EOpUnpackUnorm2x16,
+ EOpPackSnorm4x8,
+ EOpUnpackSnorm4x8,
+ EOpPackUnorm4x8,
+ EOpUnpackUnorm4x8,
+ EOpPackHalf2x16,
+ EOpUnpackHalf2x16,
+ EOpPackDouble2x32,
+ EOpUnpackDouble2x32,
+ EOpPackInt2x32,
+ EOpUnpackInt2x32,
+ EOpPackUint2x32,
+ EOpUnpackUint2x32,
+ EOpPackFloat2x16,
+ EOpUnpackFloat2x16,
+ EOpPackInt2x16,
+ EOpUnpackInt2x16,
+ EOpPackUint2x16,
+ EOpUnpackUint2x16,
+ EOpPackInt4x16,
+ EOpUnpackInt4x16,
+ EOpPackUint4x16,
+ EOpUnpackUint4x16,
+ EOpPack16,
+ EOpPack32,
+ EOpPack64,
+ EOpUnpack32,
+ EOpUnpack16,
+ EOpUnpack8,
+
+ EOpLength,
+ EOpDistance,
+ EOpDot,
+ EOpCross,
+ EOpNormalize,
+ EOpFaceForward,
+ EOpReflect,
+ EOpRefract,
+
+#ifdef AMD_EXTENSIONS
+ EOpMin3,
+ EOpMax3,
+ EOpMid3,
+#endif
+
+ EOpDPdx, // Fragment only
+ EOpDPdy, // Fragment only
+ EOpFwidth, // Fragment only
+ EOpDPdxFine, // Fragment only
+ EOpDPdyFine, // Fragment only
+ EOpFwidthFine, // Fragment only
+ EOpDPdxCoarse, // Fragment only
+ EOpDPdyCoarse, // Fragment only
+ EOpFwidthCoarse, // Fragment only
+
+ EOpInterpolateAtCentroid, // Fragment only
+ EOpInterpolateAtSample, // Fragment only
+ EOpInterpolateAtOffset, // Fragment only
+
+#ifdef AMD_EXTENSIONS
+ EOpInterpolateAtVertex,
+#endif
+
+ EOpMatrixTimesMatrix,
+ EOpOuterProduct,
+ EOpDeterminant,
+ EOpMatrixInverse,
+ EOpTranspose,
+
+ EOpFtransform,
+
+ EOpNoise,
+
+ EOpEmitVertex, // geometry only
+ EOpEndPrimitive, // geometry only
+ EOpEmitStreamVertex, // geometry only
+ EOpEndStreamPrimitive, // geometry only
+
+ EOpBarrier,
+ EOpMemoryBarrier,
+ EOpMemoryBarrierAtomicCounter,
+ EOpMemoryBarrierBuffer,
+ EOpMemoryBarrierImage,
+ EOpMemoryBarrierShared, // compute only
+ EOpGroupMemoryBarrier, // compute only
+
+ EOpBallot,
+ EOpReadInvocation,
+ EOpReadFirstInvocation,
+
+ EOpAnyInvocation,
+ EOpAllInvocations,
+ EOpAllInvocationsEqual,
+
+ EOpSubgroupGuardStart,
+ EOpSubgroupBarrier,
+ EOpSubgroupMemoryBarrier,
+ EOpSubgroupMemoryBarrierBuffer,
+ EOpSubgroupMemoryBarrierImage,
+ EOpSubgroupMemoryBarrierShared, // compute only
+ EOpSubgroupElect,
+ EOpSubgroupAll,
+ EOpSubgroupAny,
+ EOpSubgroupAllEqual,
+ EOpSubgroupBroadcast,
+ EOpSubgroupBroadcastFirst,
+ EOpSubgroupBallot,
+ EOpSubgroupInverseBallot,
+ EOpSubgroupBallotBitExtract,
+ EOpSubgroupBallotBitCount,
+ EOpSubgroupBallotInclusiveBitCount,
+ EOpSubgroupBallotExclusiveBitCount,
+ EOpSubgroupBallotFindLSB,
+ EOpSubgroupBallotFindMSB,
+ EOpSubgroupShuffle,
+ EOpSubgroupShuffleXor,
+ EOpSubgroupShuffleUp,
+ EOpSubgroupShuffleDown,
+ EOpSubgroupAdd,
+ EOpSubgroupMul,
+ EOpSubgroupMin,
+ EOpSubgroupMax,
+ EOpSubgroupAnd,
+ EOpSubgroupOr,
+ EOpSubgroupXor,
+ EOpSubgroupInclusiveAdd,
+ EOpSubgroupInclusiveMul,
+ EOpSubgroupInclusiveMin,
+ EOpSubgroupInclusiveMax,
+ EOpSubgroupInclusiveAnd,
+ EOpSubgroupInclusiveOr,
+ EOpSubgroupInclusiveXor,
+ EOpSubgroupExclusiveAdd,
+ EOpSubgroupExclusiveMul,
+ EOpSubgroupExclusiveMin,
+ EOpSubgroupExclusiveMax,
+ EOpSubgroupExclusiveAnd,
+ EOpSubgroupExclusiveOr,
+ EOpSubgroupExclusiveXor,
+ EOpSubgroupClusteredAdd,
+ EOpSubgroupClusteredMul,
+ EOpSubgroupClusteredMin,
+ EOpSubgroupClusteredMax,
+ EOpSubgroupClusteredAnd,
+ EOpSubgroupClusteredOr,
+ EOpSubgroupClusteredXor,
+ EOpSubgroupQuadBroadcast,
+ EOpSubgroupQuadSwapHorizontal,
+ EOpSubgroupQuadSwapVertical,
+ EOpSubgroupQuadSwapDiagonal,
+
+#ifdef NV_EXTENSIONS
+ EOpSubgroupPartition,
+ EOpSubgroupPartitionedAdd,
+ EOpSubgroupPartitionedMul,
+ EOpSubgroupPartitionedMin,
+ EOpSubgroupPartitionedMax,
+ EOpSubgroupPartitionedAnd,
+ EOpSubgroupPartitionedOr,
+ EOpSubgroupPartitionedXor,
+ EOpSubgroupPartitionedInclusiveAdd,
+ EOpSubgroupPartitionedInclusiveMul,
+ EOpSubgroupPartitionedInclusiveMin,
+ EOpSubgroupPartitionedInclusiveMax,
+ EOpSubgroupPartitionedInclusiveAnd,
+ EOpSubgroupPartitionedInclusiveOr,
+ EOpSubgroupPartitionedInclusiveXor,
+ EOpSubgroupPartitionedExclusiveAdd,
+ EOpSubgroupPartitionedExclusiveMul,
+ EOpSubgroupPartitionedExclusiveMin,
+ EOpSubgroupPartitionedExclusiveMax,
+ EOpSubgroupPartitionedExclusiveAnd,
+ EOpSubgroupPartitionedExclusiveOr,
+ EOpSubgroupPartitionedExclusiveXor,
+#endif
+
+ EOpSubgroupGuardStop,
+
+#ifdef AMD_EXTENSIONS
+ EOpMinInvocations,
+ EOpMaxInvocations,
+ EOpAddInvocations,
+ EOpMinInvocationsNonUniform,
+ EOpMaxInvocationsNonUniform,
+ EOpAddInvocationsNonUniform,
+ EOpMinInvocationsInclusiveScan,
+ EOpMaxInvocationsInclusiveScan,
+ EOpAddInvocationsInclusiveScan,
+ EOpMinInvocationsInclusiveScanNonUniform,
+ EOpMaxInvocationsInclusiveScanNonUniform,
+ EOpAddInvocationsInclusiveScanNonUniform,
+ EOpMinInvocationsExclusiveScan,
+ EOpMaxInvocationsExclusiveScan,
+ EOpAddInvocationsExclusiveScan,
+ EOpMinInvocationsExclusiveScanNonUniform,
+ EOpMaxInvocationsExclusiveScanNonUniform,
+ EOpAddInvocationsExclusiveScanNonUniform,
+ EOpSwizzleInvocations,
+ EOpSwizzleInvocationsMasked,
+ EOpWriteInvocation,
+ EOpMbcnt,
+
+ EOpCubeFaceIndex,
+ EOpCubeFaceCoord,
+ EOpTime,
+#endif
+
+ EOpAtomicAdd,
+ EOpAtomicMin,
+ EOpAtomicMax,
+ EOpAtomicAnd,
+ EOpAtomicOr,
+ EOpAtomicXor,
+ EOpAtomicExchange,
+ EOpAtomicCompSwap,
+ EOpAtomicLoad,
+ EOpAtomicStore,
+
+ EOpAtomicCounterIncrement, // results in pre-increment value
+ EOpAtomicCounterDecrement, // results in post-decrement value
+ EOpAtomicCounter,
+ EOpAtomicCounterAdd,
+ EOpAtomicCounterSubtract,
+ EOpAtomicCounterMin,
+ EOpAtomicCounterMax,
+ EOpAtomicCounterAnd,
+ EOpAtomicCounterOr,
+ EOpAtomicCounterXor,
+ EOpAtomicCounterExchange,
+ EOpAtomicCounterCompSwap,
+
+ EOpAny,
+ EOpAll,
+
+ EOpCooperativeMatrixLoad,
+ EOpCooperativeMatrixStore,
+ EOpCooperativeMatrixMulAdd,
+
+ //
+ // Branch
+ //
+
+ EOpKill, // Fragment only
+ EOpReturn,
+ EOpBreak,
+ EOpContinue,
+ EOpCase,
+ EOpDefault,
+
+ //
+ // Constructors
+ //
+
+ EOpConstructGuardStart,
+ EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
+ EOpConstructUint,
+ EOpConstructInt8,
+ EOpConstructUint8,
+ EOpConstructInt16,
+ EOpConstructUint16,
+ EOpConstructInt64,
+ EOpConstructUint64,
+ EOpConstructBool,
+ EOpConstructFloat,
+ EOpConstructDouble,
+ EOpConstructVec2,
+ EOpConstructVec3,
+ EOpConstructVec4,
+ EOpConstructDVec2,
+ EOpConstructDVec3,
+ EOpConstructDVec4,
+ EOpConstructBVec2,
+ EOpConstructBVec3,
+ EOpConstructBVec4,
+ EOpConstructI8Vec2,
+ EOpConstructI8Vec3,
+ EOpConstructI8Vec4,
+ EOpConstructU8Vec2,
+ EOpConstructU8Vec3,
+ EOpConstructU8Vec4,
+ EOpConstructI16Vec2,
+ EOpConstructI16Vec3,
+ EOpConstructI16Vec4,
+ EOpConstructU16Vec2,
+ EOpConstructU16Vec3,
+ EOpConstructU16Vec4,
+ EOpConstructIVec2,
+ EOpConstructIVec3,
+ EOpConstructIVec4,
+ EOpConstructUVec2,
+ EOpConstructUVec3,
+ EOpConstructUVec4,
+ EOpConstructI64Vec2,
+ EOpConstructI64Vec3,
+ EOpConstructI64Vec4,
+ EOpConstructU64Vec2,
+ EOpConstructU64Vec3,
+ EOpConstructU64Vec4,
+ EOpConstructMat2x2,
+ EOpConstructMat2x3,
+ EOpConstructMat2x4,
+ EOpConstructMat3x2,
+ EOpConstructMat3x3,
+ EOpConstructMat3x4,
+ EOpConstructMat4x2,
+ EOpConstructMat4x3,
+ EOpConstructMat4x4,
+ EOpConstructDMat2x2,
+ EOpConstructDMat2x3,
+ EOpConstructDMat2x4,
+ EOpConstructDMat3x2,
+ EOpConstructDMat3x3,
+ EOpConstructDMat3x4,
+ EOpConstructDMat4x2,
+ EOpConstructDMat4x3,
+ EOpConstructDMat4x4,
+ EOpConstructIMat2x2,
+ EOpConstructIMat2x3,
+ EOpConstructIMat2x4,
+ EOpConstructIMat3x2,
+ EOpConstructIMat3x3,
+ EOpConstructIMat3x4,
+ EOpConstructIMat4x2,
+ EOpConstructIMat4x3,
+ EOpConstructIMat4x4,
+ EOpConstructUMat2x2,
+ EOpConstructUMat2x3,
+ EOpConstructUMat2x4,
+ EOpConstructUMat3x2,
+ EOpConstructUMat3x3,
+ EOpConstructUMat3x4,
+ EOpConstructUMat4x2,
+ EOpConstructUMat4x3,
+ EOpConstructUMat4x4,
+ EOpConstructBMat2x2,
+ EOpConstructBMat2x3,
+ EOpConstructBMat2x4,
+ EOpConstructBMat3x2,
+ EOpConstructBMat3x3,
+ EOpConstructBMat3x4,
+ EOpConstructBMat4x2,
+ EOpConstructBMat4x3,
+ EOpConstructBMat4x4,
+ EOpConstructFloat16,
+ EOpConstructF16Vec2,
+ EOpConstructF16Vec3,
+ EOpConstructF16Vec4,
+ EOpConstructF16Mat2x2,
+ EOpConstructF16Mat2x3,
+ EOpConstructF16Mat2x4,
+ EOpConstructF16Mat3x2,
+ EOpConstructF16Mat3x3,
+ EOpConstructF16Mat3x4,
+ EOpConstructF16Mat4x2,
+ EOpConstructF16Mat4x3,
+ EOpConstructF16Mat4x4,
+ EOpConstructStruct,
+ EOpConstructTextureSampler,
+ EOpConstructNonuniform, // expected to be transformed away, not present in final AST
+ EOpConstructReference,
+ EOpConstructCooperativeMatrix,
+ EOpConstructGuardEnd,
+
+ //
+ // moves
+ //
+
+ EOpAssign,
+ EOpAddAssign,
+ EOpSubAssign,
+ EOpMulAssign,
+ EOpVectorTimesMatrixAssign,
+ EOpVectorTimesScalarAssign,
+ EOpMatrixTimesScalarAssign,
+ EOpMatrixTimesMatrixAssign,
+ EOpDivAssign,
+ EOpModAssign,
+ EOpAndAssign,
+ EOpInclusiveOrAssign,
+ EOpExclusiveOrAssign,
+ EOpLeftShiftAssign,
+ EOpRightShiftAssign,
+
+ //
+ // Array operators
+ //
+
+ // Can apply to arrays, vectors, or matrices.
+ // Can be decomposed to a constant at compile time, but this does not always happen,
+ // due to link-time effects. So, consumer can expect either a link-time sized or
+ // run-time sized array.
+ EOpArrayLength,
+
+ //
+ // Image operations
+ //
+
+ EOpImageGuardBegin,
+
+ EOpImageQuerySize,
+ EOpImageQuerySamples,
+ EOpImageLoad,
+ EOpImageStore,
+#ifdef AMD_EXTENSIONS
+ EOpImageLoadLod,
+ EOpImageStoreLod,
+#endif
+ EOpImageAtomicAdd,
+ EOpImageAtomicMin,
+ EOpImageAtomicMax,
+ EOpImageAtomicAnd,
+ EOpImageAtomicOr,
+ EOpImageAtomicXor,
+ EOpImageAtomicExchange,
+ EOpImageAtomicCompSwap,
+ EOpImageAtomicLoad,
+ EOpImageAtomicStore,
+
+ EOpSubpassLoad,
+ EOpSubpassLoadMS,
+ EOpSparseImageLoad,
+#ifdef AMD_EXTENSIONS
+ EOpSparseImageLoadLod,
+#endif
+
+ EOpImageGuardEnd,
+
+ //
+ // Texture operations
+ //
+
+ EOpTextureGuardBegin,
+
+ EOpTextureQuerySize,
+ EOpTextureQueryLod,
+ EOpTextureQueryLevels,
+ EOpTextureQuerySamples,
+
+ EOpSamplingGuardBegin,
+
+ EOpTexture,
+ EOpTextureProj,
+ EOpTextureLod,
+ EOpTextureOffset,
+ EOpTextureFetch,
+ EOpTextureFetchOffset,
+ EOpTextureProjOffset,
+ EOpTextureLodOffset,
+ EOpTextureProjLod,
+ EOpTextureProjLodOffset,
+ EOpTextureGrad,
+ EOpTextureGradOffset,
+ EOpTextureProjGrad,
+ EOpTextureProjGradOffset,
+ EOpTextureGather,
+ EOpTextureGatherOffset,
+ EOpTextureGatherOffsets,
+ EOpTextureClamp,
+ EOpTextureOffsetClamp,
+ EOpTextureGradClamp,
+ EOpTextureGradOffsetClamp,
+#ifdef AMD_EXTENSIONS
+ EOpTextureGatherLod,
+ EOpTextureGatherLodOffset,
+ EOpTextureGatherLodOffsets,
+ EOpFragmentMaskFetch,
+ EOpFragmentFetch,
+#endif
+
+ EOpSparseTextureGuardBegin,
+
+ EOpSparseTexture,
+ EOpSparseTextureLod,
+ EOpSparseTextureOffset,
+ EOpSparseTextureFetch,
+ EOpSparseTextureFetchOffset,
+ EOpSparseTextureLodOffset,
+ EOpSparseTextureGrad,
+ EOpSparseTextureGradOffset,
+ EOpSparseTextureGather,
+ EOpSparseTextureGatherOffset,
+ EOpSparseTextureGatherOffsets,
+ EOpSparseTexelsResident,
+ EOpSparseTextureClamp,
+ EOpSparseTextureOffsetClamp,
+ EOpSparseTextureGradClamp,
+ EOpSparseTextureGradOffsetClamp,
+#ifdef AMD_EXTENSIONS
+ EOpSparseTextureGatherLod,
+ EOpSparseTextureGatherLodOffset,
+ EOpSparseTextureGatherLodOffsets,
+#endif
+
+ EOpSparseTextureGuardEnd,
+
+#ifdef NV_EXTENSIONS
+ EOpImageFootprintGuardBegin,
+ EOpImageSampleFootprintNV,
+ EOpImageSampleFootprintClampNV,
+ EOpImageSampleFootprintLodNV,
+ EOpImageSampleFootprintGradNV,
+ EOpImageSampleFootprintGradClampNV,
+ EOpImageFootprintGuardEnd,
+#endif
+ EOpSamplingGuardEnd,
+ EOpTextureGuardEnd,
+
+ //
+ // Integer operations
+ //
+
+ EOpAddCarry,
+ EOpSubBorrow,
+ EOpUMulExtended,
+ EOpIMulExtended,
+ EOpBitfieldExtract,
+ EOpBitfieldInsert,
+ EOpBitFieldReverse,
+ EOpBitCount,
+ EOpFindLSB,
+ EOpFindMSB,
+
+#ifdef NV_EXTENSIONS
+ EOpTraceNV,
+ EOpReportIntersectionNV,
+ EOpIgnoreIntersectionNV,
+ EOpTerminateRayNV,
+ EOpExecuteCallableNV,
+ EOpWritePackedPrimitiveIndices4x8NV,
+#endif
+ //
+ // HLSL operations
+ //
+
+ EOpClip, // discard if input value < 0
+ EOpIsFinite,
+ EOpLog10, // base 10 log
+ EOpRcp, // 1/x
+ EOpSaturate, // clamp from 0 to 1
+ EOpSinCos, // sin and cos in out parameters
+ EOpGenMul, // mul(x,y) on any of mat/vec/scalars
+ EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
+ EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
+ EOpInterlockedAnd, // ...
+ EOpInterlockedCompareExchange, // ...
+ EOpInterlockedCompareStore, // ...
+ EOpInterlockedExchange, // ...
+ EOpInterlockedMax, // ...
+ EOpInterlockedMin, // ...
+ EOpInterlockedOr, // ...
+ EOpInterlockedXor, // ...
+ EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
+ EOpDeviceMemoryBarrier, // ...
+ EOpDeviceMemoryBarrierWithGroupSync, // ...
+ EOpWorkgroupMemoryBarrier, // ...
+ EOpWorkgroupMemoryBarrierWithGroupSync, // ...
+ EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
+ EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
+ EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
+ EOpLit, // HLSL lighting coefficient vector
+ EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
+ EOpAsDouble, // slightly different from EOpUint64BitsToDouble
+ EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
+
+ EOpMethodSample, // Texture object methods. These are translated to existing
+ EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
+ EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
+ EOpMethodSampleCmpLevelZero, // ...
+ EOpMethodSampleGrad, // ...
+ EOpMethodSampleLevel, // ...
+ EOpMethodLoad, // ...
+ EOpMethodGetDimensions, // ...
+ EOpMethodGetSamplePosition, // ...
+ EOpMethodGather, // ...
+ EOpMethodCalculateLevelOfDetail, // ...
+ EOpMethodCalculateLevelOfDetailUnclamped, // ...
+
+ // Load already defined above for textures
+ EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
+ EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
+ EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
+ EOpMethodStore, // ...
+ EOpMethodStore2, // ...
+ EOpMethodStore3, // ...
+ EOpMethodStore4, // ...
+ EOpMethodIncrementCounter, // ...
+ EOpMethodDecrementCounter, // ...
+ // EOpMethodAppend is defined for geo shaders below
+ EOpMethodConsume,
+
+ // SM5 texture methods
+ EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
+ EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
+ EOpMethodGatherBlue, // because HLSL arguments are slightly different.
+ EOpMethodGatherAlpha, // ...
+ EOpMethodGatherCmp, // ...
+ EOpMethodGatherCmpRed, // ...
+ EOpMethodGatherCmpGreen, // ...
+ EOpMethodGatherCmpBlue, // ...
+ EOpMethodGatherCmpAlpha, // ...
+
+ // geometry methods
+ EOpMethodAppend, // Geometry shader methods
+ EOpMethodRestartStrip, // ...
+
+ // matrix
+ EOpMatrixSwizzle, // select multiple matrix components (non-column)
+
+ // SM6 wave ops
+ EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
+ EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
+ EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
+ EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
+};
+
+class TIntermTraverser;
+class TIntermOperator;
+class TIntermAggregate;
+class TIntermUnary;
+class TIntermBinary;
+class TIntermConstantUnion;
+class TIntermSelection;
+class TIntermSwitch;
+class TIntermBranch;
+class TIntermTyped;
+class TIntermMethod;
+class TIntermSymbol;
+class TIntermLoop;
+
+} // end namespace glslang
+
+//
+// Base class for the tree nodes
+//
+// (Put outside the glslang namespace, as it's used as part of the external interface.)
+//
+class TIntermNode {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+
+ TIntermNode() { loc.init(); }
+ virtual const glslang::TSourceLoc& getLoc() const { return loc; }
+ virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
+ virtual void traverse(glslang::TIntermTraverser*) = 0;
+ virtual glslang::TIntermTyped* getAsTyped() { return 0; }
+ virtual glslang::TIntermOperator* getAsOperator() { return 0; }
+ virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; }
+ virtual glslang::TIntermAggregate* getAsAggregate() { return 0; }
+ virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; }
+ virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; }
+ virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; }
+ virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; }
+ virtual glslang::TIntermMethod* getAsMethodNode() { return 0; }
+ virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; }
+ virtual glslang::TIntermBranch* getAsBranchNode() { return 0; }
+ virtual glslang::TIntermLoop* getAsLoopNode() { return 0; }
+
+ virtual const glslang::TIntermTyped* getAsTyped() const { return 0; }
+ virtual const glslang::TIntermOperator* getAsOperator() const { return 0; }
+ virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; }
+ virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; }
+ virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; }
+ virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; }
+ virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; }
+ virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; }
+ virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; }
+ virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; }
+ virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; }
+ virtual const glslang::TIntermLoop* getAsLoopNode() const { return 0; }
+ virtual ~TIntermNode() { }
+
+protected:
+ TIntermNode(const TIntermNode&);
+ TIntermNode& operator=(const TIntermNode&);
+ glslang::TSourceLoc loc;
+};
+
+namespace glslang {
+
+//
+// This is just to help yacc.
+//
+struct TIntermNodePair {
+ TIntermNode* node1;
+ TIntermNode* node2;
+};
+
+//
+// Intermediate class for nodes that have a type.
+//
+class TIntermTyped : public TIntermNode {
+public:
+ TIntermTyped(const TType& t) { type.shallowCopy(t); }
+ TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
+ virtual TIntermTyped* getAsTyped() { return this; }
+ virtual const TIntermTyped* getAsTyped() const { return this; }
+ virtual void setType(const TType& t) { type.shallowCopy(t); }
+ virtual const TType& getType() const { return type; }
+ virtual TType& getWritableType() { return type; }
+
+ virtual TBasicType getBasicType() const { return type.getBasicType(); }
+ virtual TQualifier& getQualifier() { return type.getQualifier(); }
+ virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
+ virtual void propagatePrecision(TPrecisionQualifier);
+ virtual int getVectorSize() const { return type.getVectorSize(); }
+ virtual int getMatrixCols() const { return type.getMatrixCols(); }
+ virtual int getMatrixRows() const { return type.getMatrixRows(); }
+ virtual bool isMatrix() const { return type.isMatrix(); }
+ virtual bool isArray() const { return type.isArray(); }
+ virtual bool isVector() const { return type.isVector(); }
+ virtual bool isScalar() const { return type.isScalar(); }
+ virtual bool isStruct() const { return type.isStruct(); }
+ virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
+ virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
+ TString getCompleteString() const { return type.getCompleteString(); }
+
+protected:
+ TIntermTyped& operator=(const TIntermTyped&);
+ TType type;
+};
+
+//
+// Handle for, do-while, and while loops.
+//
+class TIntermLoop : public TIntermNode {
+public:
+ TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
+ body(aBody),
+ test(aTest),
+ terminal(aTerminal),
+ first(testFirst),
+ unroll(false),
+ dontUnroll(false),
+ dependency(0),
+ minIterations(0),
+ maxIterations(iterationsInfinite),
+ iterationMultiple(1),
+ peelCount(0),
+ partialCount(0)
+ { }
+
+ virtual TIntermLoop* getAsLoopNode() { return this; }
+ virtual const TIntermLoop* getAsLoopNode() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ TIntermNode* getBody() const { return body; }
+ TIntermTyped* getTest() const { return test; }
+ TIntermTyped* getTerminal() const { return terminal; }
+ bool testFirst() const { return first; }
+
+ void setUnroll() { unroll = true; }
+ void setDontUnroll() {
+ dontUnroll = true;
+ peelCount = 0;
+ partialCount = 0;
+ }
+ bool getUnroll() const { return unroll; }
+ bool getDontUnroll() const { return dontUnroll; }
+
+ static const unsigned int dependencyInfinite = 0xFFFFFFFF;
+ static const unsigned int iterationsInfinite = 0xFFFFFFFF;
+ void setLoopDependency(int d) { dependency = d; }
+ int getLoopDependency() const { return dependency; }
+
+ void setMinIterations(unsigned int v) { minIterations = v; }
+ unsigned int getMinIterations() const { return minIterations; }
+ void setMaxIterations(unsigned int v) { maxIterations = v; }
+ unsigned int getMaxIterations() const { return maxIterations; }
+ void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
+ unsigned int getIterationMultiple() const { return iterationMultiple; }
+ void setPeelCount(unsigned int v) {
+ peelCount = v;
+ dontUnroll = false;
+ }
+ unsigned int getPeelCount() const { return peelCount; }
+ void setPartialCount(unsigned int v) {
+ partialCount = v;
+ dontUnroll = false;
+ }
+ unsigned int getPartialCount() const { return partialCount; }
+
+protected:
+ TIntermNode* body; // code to loop over
+ TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
+ TIntermTyped* terminal; // exists for for-loops
+ bool first; // true for while and for, not for do-while
+ bool unroll; // true if unroll requested
+ bool dontUnroll; // true if request to not unroll
+ unsigned int dependency; // loop dependency hint; 0 means not set or unknown
+ unsigned int minIterations; // as per the SPIR-V specification
+ unsigned int maxIterations; // as per the SPIR-V specification
+ unsigned int iterationMultiple; // as per the SPIR-V specification
+ unsigned int peelCount; // as per the SPIR-V specification
+ unsigned int partialCount; // as per the SPIR-V specification
+};
+
+//
+// Handle case, break, continue, return, and kill.
+//
+class TIntermBranch : public TIntermNode {
+public:
+ TIntermBranch(TOperator op, TIntermTyped* e) :
+ flowOp(op),
+ expression(e) { }
+ virtual TIntermBranch* getAsBranchNode() { return this; }
+ virtual const TIntermBranch* getAsBranchNode() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ TOperator getFlowOp() const { return flowOp; }
+ TIntermTyped* getExpression() const { return expression; }
+protected:
+ TOperator flowOp;
+ TIntermTyped* expression;
+};
+
+//
+// Represent method names before seeing their calling signature
+// or resolving them to operations. Just an expression as the base object
+// and a textural name.
+//
+class TIntermMethod : public TIntermTyped {
+public:
+ TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
+ virtual TIntermMethod* getAsMethodNode() { return this; }
+ virtual const TIntermMethod* getAsMethodNode() const { return this; }
+ virtual const TString& getMethodName() const { return method; }
+ virtual TIntermTyped* getObject() const { return object; }
+ virtual void traverse(TIntermTraverser*);
+protected:
+ TIntermTyped* object;
+ TString method;
+};
+
+//
+// Nodes that correspond to symbols or constants in the source code.
+//
+class TIntermSymbol : public TIntermTyped {
+public:
+ // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
+ // per process threadPoolAllocator, then it causes increased memory usage per compile
+ // it is essential to use "symbol = sym" to assign to symbol
+ TIntermSymbol(int i, const TString& n, const TType& t)
+ : TIntermTyped(t), id(i),
+#ifdef ENABLE_HLSL
+ flattenSubset(-1),
+#endif
+ constSubtree(nullptr)
+ { name = n; }
+ virtual int getId() const { return id; }
+ virtual void changeId(int i) { id = i; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermSymbol* getAsSymbolNode() { return this; }
+ virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
+ void setConstArray(const TConstUnionArray& c) { constArray = c; }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
+ TIntermTyped* getConstSubtree() const { return constSubtree; }
+#ifdef ENABLE_HLSL
+ void setFlattenSubset(int subset) { flattenSubset = subset; }
+ int getFlattenSubset() const { return flattenSubset; } // -1 means full object
+#endif
+
+ // This is meant for cases where a node has already been constructed, and
+ // later on, it becomes necessary to switch to a different symbol.
+ virtual void switchId(int newId) { id = newId; }
+
+protected:
+ int id; // the unique id of the symbol this node represents
+#ifdef ENABLE_HLSL
+ int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
+#endif
+ TString name; // the name of the symbol this node represents
+ TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
+ TIntermTyped* constSubtree;
+};
+
+class TIntermConstantUnion : public TIntermTyped {
+public:
+ TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
+ virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
+ virtual TIntermTyped* fold(TOperator, const TType&) const;
+ void setLiteral() { literal = true; }
+ void setExpression() { literal = false; }
+ bool isLiteral() const { return literal; }
+
+protected:
+ TIntermConstantUnion& operator=(const TIntermConstantUnion&);
+
+ const TConstUnionArray constArray;
+ bool literal; // true if node represents a literal in the source code
+};
+
+// Represent the independent aspects of a texturing TOperator
+struct TCrackedTextureOp {
+ bool query;
+ bool proj;
+ bool lod;
+ bool fetch;
+ bool offset;
+ bool offsets;
+ bool gather;
+ bool grad;
+ bool subpass;
+ bool lodClamp;
+#ifdef AMD_EXTENSIONS
+ bool fragMask;
+#endif
+};
+
+//
+// Intermediate class for node types that hold operators.
+//
+class TIntermOperator : public TIntermTyped {
+public:
+ virtual TIntermOperator* getAsOperator() { return this; }
+ virtual const TIntermOperator* getAsOperator() const { return this; }
+ TOperator getOp() const { return op; }
+ void setOp(TOperator newOp) { op = newOp; }
+ bool modifiesState() const;
+ bool isConstructor() const;
+ bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
+ bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
+ bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
+ bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
+#ifdef NV_EXTENSIONS
+ bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
+#endif
+ bool isSparseImage() const { return op == EOpSparseImageLoad; }
+
+ void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
+ TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
+ operationPrecision :
+ type.getQualifier().precision; }
+ TString getCompleteString() const
+ {
+ TString cs = type.getCompleteString();
+ if (getOperationPrecision() != type.getQualifier().precision) {
+ cs += ", operation at ";
+ cs += GetPrecisionQualifierString(getOperationPrecision());
+ }
+
+ return cs;
+ }
+
+ // Crack the op into the individual dimensions of texturing operation.
+ void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
+ {
+ cracked.query = false;
+ cracked.proj = false;
+ cracked.lod = false;
+ cracked.fetch = false;
+ cracked.offset = false;
+ cracked.offsets = false;
+ cracked.gather = false;
+ cracked.grad = false;
+ cracked.subpass = false;
+ cracked.lodClamp = false;
+#ifdef AMD_EXTENSIONS
+ cracked.fragMask = false;
+#endif
+
+ switch (op) {
+ case EOpImageQuerySize:
+ case EOpImageQuerySamples:
+ case EOpTextureQuerySize:
+ case EOpTextureQueryLod:
+ case EOpTextureQueryLevels:
+ case EOpTextureQuerySamples:
+ case EOpSparseTexelsResident:
+ cracked.query = true;
+ break;
+ case EOpTexture:
+ case EOpSparseTexture:
+ break;
+ case EOpTextureClamp:
+ case EOpSparseTextureClamp:
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureProj:
+ cracked.proj = true;
+ break;
+ case EOpTextureLod:
+ case EOpSparseTextureLod:
+ cracked.lod = true;
+ break;
+ case EOpTextureOffset:
+ case EOpSparseTextureOffset:
+ cracked.offset = true;
+ break;
+ case EOpTextureOffsetClamp:
+ case EOpSparseTextureOffsetClamp:
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureFetch:
+ case EOpSparseTextureFetch:
+ cracked.fetch = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureFetchOffset:
+ case EOpSparseTextureFetchOffset:
+ cracked.fetch = true;
+ cracked.offset = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureProjOffset:
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureLodOffset:
+ case EOpSparseTextureLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureProjLod:
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGrad:
+ case EOpSparseTextureGrad:
+ cracked.grad = true;
+ break;
+ case EOpTextureGradClamp:
+ case EOpSparseTextureGradClamp:
+ cracked.grad = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGradOffset:
+ case EOpSparseTextureGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureProjGrad:
+ cracked.grad = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGradOffsetClamp:
+ case EOpSparseTextureGradOffsetClamp:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGather:
+ case EOpSparseTextureGather:
+ cracked.gather = true;
+ break;
+ case EOpTextureGatherOffset:
+ case EOpSparseTextureGatherOffset:
+ cracked.gather = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureGatherOffsets:
+ case EOpSparseTextureGatherOffsets:
+ cracked.gather = true;
+ cracked.offsets = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case EOpTextureGatherLod:
+ case EOpSparseTextureGatherLod:
+ cracked.gather = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureGatherLodOffset:
+ case EOpSparseTextureGatherLodOffset:
+ cracked.gather = true;
+ cracked.offset = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureGatherLodOffsets:
+ case EOpSparseTextureGatherLodOffsets:
+ cracked.gather = true;
+ cracked.offsets = true;
+ cracked.lod = true;
+ break;
+ case EOpImageLoadLod:
+ case EOpImageStoreLod:
+ case EOpSparseImageLoadLod:
+ cracked.lod = true;
+ break;
+ case EOpFragmentMaskFetch:
+ cracked.subpass = sampler.dim == EsdSubpass;
+ cracked.fragMask = true;
+ break;
+ case EOpFragmentFetch:
+ cracked.subpass = sampler.dim == EsdSubpass;
+ cracked.fragMask = true;
+ break;
+#endif
+#ifdef NV_EXTENSIONS
+ case EOpImageSampleFootprintNV:
+ break;
+ case EOpImageSampleFootprintClampNV:
+ cracked.lodClamp = true;
+ break;
+ case EOpImageSampleFootprintLodNV:
+ cracked.lod = true;
+ break;
+ case EOpImageSampleFootprintGradNV:
+ cracked.grad = true;
+ break;
+ case EOpImageSampleFootprintGradClampNV:
+ cracked.lodClamp = true;
+ cracked.grad = true;
+ break;
+#endif
+ case EOpSubpassLoad:
+ case EOpSubpassLoadMS:
+ cracked.subpass = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+protected:
+ TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
+ TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
+ TOperator op;
+ // The result precision is in the inherited TType, and is usually meant to be both
+ // the operation precision and the result precision. However, some more complex things,
+ // like built-in function calls, distinguish between the two, in which case non-EqpNone
+ // 'operationPrecision' overrides the result precision as far as operation precision
+ // is concerned.
+ TPrecisionQualifier operationPrecision;
+};
+
+//
+// Nodes for all the basic binary math operators.
+//
+class TIntermBinary : public TIntermOperator {
+public:
+ TIntermBinary(TOperator o) : TIntermOperator(o) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setLeft(TIntermTyped* n) { left = n; }
+ virtual void setRight(TIntermTyped* n) { right = n; }
+ virtual TIntermTyped* getLeft() const { return left; }
+ virtual TIntermTyped* getRight() const { return right; }
+ virtual TIntermBinary* getAsBinaryNode() { return this; }
+ virtual const TIntermBinary* getAsBinaryNode() const { return this; }
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* left;
+ TIntermTyped* right;
+};
+
+//
+// Nodes for unary math operators.
+//
+class TIntermUnary : public TIntermOperator {
+public:
+ TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {}
+ TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setOperand(TIntermTyped* o) { operand = o; }
+ virtual TIntermTyped* getOperand() { return operand; }
+ virtual const TIntermTyped* getOperand() const { return operand; }
+ virtual TIntermUnary* getAsUnaryNode() { return this; }
+ virtual const TIntermUnary* getAsUnaryNode() const { return this; }
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* operand;
+};
+
+typedef TVector<TIntermNode*> TIntermSequence;
+typedef TVector<TStorageQualifier> TQualifierList;
+//
+// Nodes that operate on an arbitrary sized set of children.
+//
+class TIntermAggregate : public TIntermOperator {
+public:
+ TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
+ TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
+ ~TIntermAggregate() { delete pragmaTable; }
+ virtual TIntermAggregate* getAsAggregate() { return this; }
+ virtual const TIntermAggregate* getAsAggregate() const { return this; }
+ virtual void setOperator(TOperator o) { op = o; }
+ virtual TIntermSequence& getSequence() { return sequence; }
+ virtual const TIntermSequence& getSequence() const { return sequence; }
+ virtual void setName(const TString& n) { name = n; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual void setUserDefined() { userDefined = true; }
+ virtual bool isUserDefined() { return userDefined; }
+ virtual TQualifierList& getQualifierList() { return qualifier; }
+ virtual const TQualifierList& getQualifierList() const { return qualifier; }
+ void setOptimize(bool o) { optimize = o; }
+ void setDebug(bool d) { debug = d; }
+ bool getOptimize() const { return optimize; }
+ bool getDebug() const { return debug; }
+ void setPragmaTable(const TPragmaTable& pTable);
+ const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
+protected:
+ TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
+ TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
+ TIntermSequence sequence;
+ TQualifierList qualifier;
+ TString name;
+ bool userDefined; // used for user defined function names
+ bool optimize;
+ bool debug;
+ TPragmaTable* pragmaTable;
+};
+
+//
+// For if tests.
+//
+class TIntermSelection : public TIntermTyped {
+public:
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
+ TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
+ shortCircuit(true),
+ flatten(false), dontFlatten(false) {}
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
+ TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
+ shortCircuit(true),
+ flatten(false), dontFlatten(false) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* getCondition() const { return condition; }
+ virtual TIntermNode* getTrueBlock() const { return trueBlock; }
+ virtual TIntermNode* getFalseBlock() const { return falseBlock; }
+ virtual TIntermSelection* getAsSelectionNode() { return this; }
+ virtual const TIntermSelection* getAsSelectionNode() const { return this; }
+
+ void setNoShortCircuit() { shortCircuit = false; }
+ bool getShortCircuit() const { return shortCircuit; }
+
+ void setFlatten() { flatten = true; }
+ void setDontFlatten() { dontFlatten = true; }
+ bool getFlatten() const { return flatten; }
+ bool getDontFlatten() const { return dontFlatten; }
+
+protected:
+ TIntermTyped* condition;
+ TIntermNode* trueBlock;
+ TIntermNode* falseBlock;
+ bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
+ bool flatten; // true if flatten requested
+ bool dontFlatten; // true if requested to not flatten
+};
+
+//
+// For switch statements. Designed use is that a switch will have sequence of nodes
+// that are either case/default nodes or a *single* node that represents all the code
+// in between (if any) consecutive case/defaults. So, a traversal need only deal with
+// 0 or 1 nodes per case/default statement.
+//
+class TIntermSwitch : public TIntermNode {
+public:
+ TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
+ flatten(false), dontFlatten(false) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermNode* getCondition() const { return condition; }
+ virtual TIntermAggregate* getBody() const { return body; }
+ virtual TIntermSwitch* getAsSwitchNode() { return this; }
+ virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
+
+ void setFlatten() { flatten = true; }
+ void setDontFlatten() { dontFlatten = true; }
+ bool getFlatten() const { return flatten; }
+ bool getDontFlatten() const { return dontFlatten; }
+
+protected:
+ TIntermTyped* condition;
+ TIntermAggregate* body;
+ bool flatten; // true if flatten requested
+ bool dontFlatten; // true if requested to not flatten
+};
+
+enum TVisit
+{
+ EvPreVisit,
+ EvInVisit,
+ EvPostVisit
+};
+
+//
+// For traversing the tree. User should derive from this,
+// put their traversal specific data in it, and then pass
+// it to a Traverse method.
+//
+// When using this, just fill in the methods for nodes you want visited.
+// Return false from a pre-visit to skip visiting that node's subtree.
+//
+// Explicitly set postVisit to true if you want post visiting, otherwise,
+// filled in methods will only be called at pre-visit time (before processing
+// the subtree). Similarly for inVisit for in-order visiting of nodes with
+// multiple children.
+//
+// If you only want post-visits, explicitly turn off preVisit (and inVisit)
+// and turn on postVisit.
+//
+// In general, for the visit*() methods, return true from interior nodes
+// to have the traversal continue on to children.
+//
+// If you process children yourself, or don't want them processed, return false.
+//
+class TIntermTraverser {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+ TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
+ preVisit(preVisit),
+ inVisit(inVisit),
+ postVisit(postVisit),
+ rightToLeft(rightToLeft),
+ depth(0),
+ maxDepth(0) { }
+ virtual ~TIntermTraverser() { }
+
+ virtual void visitSymbol(TIntermSymbol*) { }
+ virtual void visitConstantUnion(TIntermConstantUnion*) { }
+ virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
+ virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
+ virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
+ virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
+ virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
+ virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
+ virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
+
+ int getMaxDepth() const { return maxDepth; }
+
+ void incrementDepth(TIntermNode *current)
+ {
+ depth++;
+ maxDepth = (std::max)(maxDepth, depth);
+ path.push_back(current);
+ }
+
+ void decrementDepth()
+ {
+ depth--;
+ path.pop_back();
+ }
+
+ TIntermNode *getParentNode()
+ {
+ return path.size() == 0 ? NULL : path.back();
+ }
+
+ const bool preVisit;
+ const bool inVisit;
+ const bool postVisit;
+ const bool rightToLeft;
+
+protected:
+ TIntermTraverser& operator=(TIntermTraverser&);
+
+ int depth;
+ int maxDepth;
+
+ // All the nodes from root to the current node's parent during traversing.
+ TVector<TIntermNode *> path;
+};
+
+// KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
+// sized with the same symbol, involving no operations"
+inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
+{
+ return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
+ node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
+}
+
+} // end namespace glslang
+
+#endif // __INTERMEDIATE_H
diff --git a/thirdparty/glslang/glslang/Include/revision.h b/thirdparty/glslang/glslang/Include/revision.h
new file mode 100644
index 0000000000..dd6c8da04f
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/revision.h
@@ -0,0 +1,3 @@
+// This header is generated by the make-revision script.
+
+#define GLSLANG_PATCH_LEVEL 3226
diff --git a/thirdparty/glslang/glslang/Include/revision.template b/thirdparty/glslang/glslang/Include/revision.template
new file mode 100644
index 0000000000..6c13630b27
--- /dev/null
+++ b/thirdparty/glslang/glslang/Include/revision.template
@@ -0,0 +1,13 @@
+// The file revision.h should be updated to the latest version, somehow, on
+// check-in, if glslang has changed.
+//
+// revision.template is the source for revision.h when using SubWCRev as the
+// method of updating revision.h. You don't have to do it this way, the
+// requirement is only that revision.h gets updated.
+//
+// revision.h is under source control so that not all consumers of glslang
+// source have to figure out how to create revision.h just to get a build
+// going. However, if it is not updated, it can be a version behind.
+
+#define GLSLANG_REVISION "$WCREV$"
+#define GLSLANG_DATE "$WCDATE$"
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Constant.cpp b/thirdparty/glslang/glslang/MachineIndependent/Constant.cpp
new file mode 100644
index 0000000000..b75e3efb00
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Constant.cpp
@@ -0,0 +1,1405 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "localintermediate.h"
+#include <cmath>
+#include <cfloat>
+#include <cstdlib>
+#include <climits>
+
+namespace {
+
+using namespace glslang;
+
+typedef union {
+ double d;
+ int i[2];
+} DoubleIntUnion;
+
+// Some helper functions
+
+bool isNan(double x)
+{
+ DoubleIntUnion u;
+ // tough to find a platform independent library function, do it directly
+ u.d = x;
+ int bitPatternL = u.i[0];
+ int bitPatternH = u.i[1];
+ return (bitPatternH & 0x7ff80000) == 0x7ff80000 &&
+ ((bitPatternH & 0xFFFFF) != 0 || bitPatternL != 0);
+}
+
+bool isInf(double x)
+{
+ DoubleIntUnion u;
+ // tough to find a platform independent library function, do it directly
+ u.d = x;
+ int bitPatternL = u.i[0];
+ int bitPatternH = u.i[1];
+ return (bitPatternH & 0x7ff00000) == 0x7ff00000 &&
+ (bitPatternH & 0xFFFFF) == 0 && bitPatternL == 0;
+}
+
+const double pi = 3.1415926535897932384626433832795;
+
+} // end anonymous namespace
+
+
+namespace glslang {
+
+//
+// The fold functions see if an operation on a constant can be done in place,
+// without generating run-time code.
+//
+// Returns the node to keep using, which may or may not be the node passed in.
+//
+// Note: As of version 1.2, all constant operations must be folded. It is
+// not opportunistic, but rather a semantic requirement.
+//
+
+//
+// Do folding between a pair of nodes.
+// 'this' is the left-hand operand and 'rightConstantNode' is the right-hand operand.
+//
+// Returns a new node representing the result.
+//
+TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* rightConstantNode) const
+{
+ // For most cases, the return type matches the argument type, so set that
+ // up and just code to exceptions below.
+ TType returnType;
+ returnType.shallowCopy(getType());
+
+ //
+ // A pair of nodes is to be folded together
+ //
+
+ const TIntermConstantUnion *rightNode = rightConstantNode->getAsConstantUnion();
+ TConstUnionArray leftUnionArray = getConstArray();
+ TConstUnionArray rightUnionArray = rightNode->getConstArray();
+
+ // Figure out the size of the result
+ int newComps;
+ int constComps;
+ switch(op) {
+ case EOpMatrixTimesMatrix:
+ newComps = rightNode->getMatrixCols() * getMatrixRows();
+ break;
+ case EOpMatrixTimesVector:
+ newComps = getMatrixRows();
+ break;
+ case EOpVectorTimesMatrix:
+ newComps = rightNode->getMatrixCols();
+ break;
+ default:
+ newComps = getType().computeNumComponents();
+ constComps = rightConstantNode->getType().computeNumComponents();
+ if (constComps == 1 && newComps > 1) {
+ // for a case like vec4 f = vec4(2,3,4,5) + 1.2;
+ TConstUnionArray smearedArray(newComps, rightNode->getConstArray()[0]);
+ rightUnionArray = smearedArray;
+ } else if (constComps > 1 && newComps == 1) {
+ // for a case like vec4 f = 1.2 + vec4(2,3,4,5);
+ newComps = constComps;
+ rightUnionArray = rightNode->getConstArray();
+ TConstUnionArray smearedArray(newComps, getConstArray()[0]);
+ leftUnionArray = smearedArray;
+ returnType.shallowCopy(rightNode->getType());
+ }
+ break;
+ }
+
+ TConstUnionArray newConstArray(newComps);
+ TType constBool(EbtBool, EvqConst);
+
+ switch(op) {
+ case EOpAdd:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] + rightUnionArray[i];
+ break;
+ case EOpSub:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] - rightUnionArray[i];
+ break;
+
+ case EOpMul:
+ case EOpVectorTimesScalar:
+ case EOpMatrixTimesScalar:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] * rightUnionArray[i];
+ break;
+ case EOpMatrixTimesMatrix:
+ for (int row = 0; row < getMatrixRows(); row++) {
+ for (int column = 0; column < rightNode->getMatrixCols(); column++) {
+ double sum = 0.0f;
+ for (int i = 0; i < rightNode->getMatrixRows(); i++)
+ sum += leftUnionArray[i * getMatrixRows() + row].getDConst() * rightUnionArray[column * rightNode->getMatrixRows() + i].getDConst();
+ newConstArray[column * getMatrixRows() + row].setDConst(sum);
+ }
+ }
+ returnType.shallowCopy(TType(getType().getBasicType(), EvqConst, 0, rightNode->getMatrixCols(), getMatrixRows()));
+ break;
+ case EOpDiv:
+ for (int i = 0; i < newComps; i++) {
+ switch (getType().getBasicType()) {
+ case EbtDouble:
+ case EbtFloat:
+ case EbtFloat16:
+ if (rightUnionArray[i].getDConst() != 0.0)
+ newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst());
+ else if (leftUnionArray[i].getDConst() > 0.0)
+ newConstArray[i].setDConst((double)INFINITY);
+ else if (leftUnionArray[i].getDConst() < 0.0)
+ newConstArray[i].setDConst(-(double)INFINITY);
+ else
+ newConstArray[i].setDConst((double)NAN);
+ break;
+ case EbtInt8:
+ if (rightUnionArray[i] == (signed char)0)
+ newConstArray[i].setI8Const((signed char)0x7F);
+ else if (rightUnionArray[i].getI8Const() == (signed char)-1 && leftUnionArray[i].getI8Const() == (signed char)-0x80)
+ newConstArray[i].setI8Const((signed char)-0x80);
+ else
+ newConstArray[i].setI8Const(leftUnionArray[i].getI8Const() / rightUnionArray[i].getI8Const());
+ break;
+
+ case EbtUint8:
+ if (rightUnionArray[i] == (unsigned char)0u)
+ newConstArray[i].setU8Const((unsigned char)0xFFu);
+ else
+ newConstArray[i].setU8Const(leftUnionArray[i].getU8Const() / rightUnionArray[i].getU8Const());
+ break;
+
+ case EbtInt16:
+ if (rightUnionArray[i] == (signed short)0)
+ newConstArray[i].setI16Const((signed short)0x7FFF);
+ else if (rightUnionArray[i].getI16Const() == (signed short)-1 && leftUnionArray[i].getI16Const() == (signed short)-0x8000)
+ newConstArray[i].setI16Const((signed short)-0x8000);
+ else
+ newConstArray[i].setI16Const(leftUnionArray[i].getI16Const() / rightUnionArray[i].getI16Const());
+ break;
+
+ case EbtUint16:
+ if (rightUnionArray[i] == (unsigned short)0u)
+ newConstArray[i].setU16Const((unsigned short)0xFFFFu);
+ else
+ newConstArray[i].setU16Const(leftUnionArray[i].getU16Const() / rightUnionArray[i].getU16Const());
+ break;
+
+ case EbtInt:
+ if (rightUnionArray[i] == 0)
+ newConstArray[i].setIConst(0x7FFFFFFF);
+ else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)-0x80000000ll)
+ newConstArray[i].setIConst((int)-0x80000000ll);
+ else
+ newConstArray[i].setIConst(leftUnionArray[i].getIConst() / rightUnionArray[i].getIConst());
+ break;
+
+ case EbtUint:
+ if (rightUnionArray[i] == 0u)
+ newConstArray[i].setUConst(0xFFFFFFFFu);
+ else
+ newConstArray[i].setUConst(leftUnionArray[i].getUConst() / rightUnionArray[i].getUConst());
+ break;
+
+ case EbtInt64:
+ if (rightUnionArray[i] == 0ll)
+ newConstArray[i].setI64Const(0x7FFFFFFFFFFFFFFFll);
+ else if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == (long long)-0x8000000000000000ll)
+ newConstArray[i].setI64Const((long long)-0x8000000000000000ll);
+ else
+ newConstArray[i].setI64Const(leftUnionArray[i].getI64Const() / rightUnionArray[i].getI64Const());
+ break;
+
+ case EbtUint64:
+ if (rightUnionArray[i] == 0ull)
+ newConstArray[i].setU64Const(0xFFFFFFFFFFFFFFFFull);
+ else
+ newConstArray[i].setU64Const(leftUnionArray[i].getU64Const() / rightUnionArray[i].getU64Const());
+ break;
+ default:
+ return 0;
+ }
+ }
+ break;
+
+ case EOpMatrixTimesVector:
+ for (int i = 0; i < getMatrixRows(); i++) {
+ double sum = 0.0f;
+ for (int j = 0; j < rightNode->getVectorSize(); j++) {
+ sum += leftUnionArray[j*getMatrixRows() + i].getDConst() * rightUnionArray[j].getDConst();
+ }
+ newConstArray[i].setDConst(sum);
+ }
+
+ returnType.shallowCopy(TType(getBasicType(), EvqConst, getMatrixRows()));
+ break;
+
+ case EOpVectorTimesMatrix:
+ for (int i = 0; i < rightNode->getMatrixCols(); i++) {
+ double sum = 0.0f;
+ for (int j = 0; j < getVectorSize(); j++)
+ sum += leftUnionArray[j].getDConst() * rightUnionArray[i*rightNode->getMatrixRows() + j].getDConst();
+ newConstArray[i].setDConst(sum);
+ }
+
+ returnType.shallowCopy(TType(getBasicType(), EvqConst, rightNode->getMatrixCols()));
+ break;
+
+ case EOpMod:
+ for (int i = 0; i < newComps; i++) {
+ if (rightUnionArray[i] == 0)
+ newConstArray[i] = leftUnionArray[i];
+ else {
+ switch (getType().getBasicType()) {
+ case EbtInt:
+ if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == INT_MIN) {
+ newConstArray[i].setIConst(0);
+ break;
+ } else goto modulo_default;
+
+ case EbtInt64:
+ if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == LLONG_MIN) {
+ newConstArray[i].setI64Const(0);
+ break;
+ } else goto modulo_default;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == SHRT_MIN) {
+ newConstArray[i].setIConst(0);
+ break;
+ } else goto modulo_default;
+#endif
+ default:
+ modulo_default:
+ newConstArray[i] = leftUnionArray[i] % rightUnionArray[i];
+ }
+ }
+ }
+ break;
+
+ case EOpRightShift:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] >> rightUnionArray[i];
+ break;
+
+ case EOpLeftShift:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] << rightUnionArray[i];
+ break;
+
+ case EOpAnd:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] & rightUnionArray[i];
+ break;
+ case EOpInclusiveOr:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] | rightUnionArray[i];
+ break;
+ case EOpExclusiveOr:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] ^ rightUnionArray[i];
+ break;
+
+ case EOpLogicalAnd: // this code is written for possible future use, will not get executed currently
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] && rightUnionArray[i];
+ break;
+
+ case EOpLogicalOr: // this code is written for possible future use, will not get executed currently
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] || rightUnionArray[i];
+ break;
+
+ case EOpLogicalXor:
+ for (int i = 0; i < newComps; i++) {
+ switch (getType().getBasicType()) {
+ case EbtBool: newConstArray[i].setBConst((leftUnionArray[i] == rightUnionArray[i]) ? false : true); break;
+ default: assert(false && "Default missing");
+ }
+ }
+ break;
+
+ case EOpLessThan:
+ newConstArray[0].setBConst(leftUnionArray[0] < rightUnionArray[0]);
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpGreaterThan:
+ newConstArray[0].setBConst(leftUnionArray[0] > rightUnionArray[0]);
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpLessThanEqual:
+ newConstArray[0].setBConst(! (leftUnionArray[0] > rightUnionArray[0]));
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpGreaterThanEqual:
+ newConstArray[0].setBConst(! (leftUnionArray[0] < rightUnionArray[0]));
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpEqual:
+ newConstArray[0].setBConst(rightNode->getConstArray() == leftUnionArray);
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpNotEqual:
+ newConstArray[0].setBConst(rightNode->getConstArray() != leftUnionArray);
+ returnType.shallowCopy(constBool);
+ break;
+
+ default:
+ return 0;
+ }
+
+ TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType);
+ newNode->setLoc(getLoc());
+
+ return newNode;
+}
+
+//
+// Do single unary node folding
+//
+// Returns a new node representing the result.
+//
+TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType) const
+{
+ // First, size the result, which is mostly the same as the argument's size,
+ // but not always, and classify what is componentwise.
+ // Also, eliminate cases that can't be compile-time constant.
+ int resultSize;
+ bool componentWise = true;
+
+ int objectSize = getType().computeNumComponents();
+ switch (op) {
+ case EOpDeterminant:
+ case EOpAny:
+ case EOpAll:
+ case EOpLength:
+ componentWise = false;
+ resultSize = 1;
+ break;
+
+ case EOpEmitStreamVertex:
+ case EOpEndStreamPrimitive:
+ // These don't actually fold
+ return 0;
+
+ case EOpPackSnorm2x16:
+ case EOpPackUnorm2x16:
+ case EOpPackHalf2x16:
+ componentWise = false;
+ resultSize = 1;
+ break;
+
+ case EOpUnpackSnorm2x16:
+ case EOpUnpackUnorm2x16:
+ case EOpUnpackHalf2x16:
+ componentWise = false;
+ resultSize = 2;
+ break;
+
+ case EOpPack16:
+ case EOpPack32:
+ case EOpPack64:
+ case EOpUnpack32:
+ case EOpUnpack16:
+ case EOpUnpack8:
+ case EOpNormalize:
+ componentWise = false;
+ resultSize = objectSize;
+ break;
+
+ default:
+ resultSize = objectSize;
+ break;
+ }
+
+ // Set up for processing
+ TConstUnionArray newConstArray(resultSize);
+ const TConstUnionArray& unionArray = getConstArray();
+
+ // Process non-component-wise operations
+ switch (op) {
+ case EOpLength:
+ case EOpNormalize:
+ {
+ double sum = 0;
+ for (int i = 0; i < objectSize; i++)
+ sum += unionArray[i].getDConst() * unionArray[i].getDConst();
+ double length = sqrt(sum);
+ if (op == EOpLength)
+ newConstArray[0].setDConst(length);
+ else {
+ for (int i = 0; i < objectSize; i++)
+ newConstArray[i].setDConst(unionArray[i].getDConst() / length);
+ }
+ break;
+ }
+
+ case EOpAny:
+ {
+ bool result = false;
+ for (int i = 0; i < objectSize; i++) {
+ if (unionArray[i].getBConst())
+ result = true;
+ }
+ newConstArray[0].setBConst(result);
+ break;
+ }
+ case EOpAll:
+ {
+ bool result = true;
+ for (int i = 0; i < objectSize; i++) {
+ if (! unionArray[i].getBConst())
+ result = false;
+ }
+ newConstArray[0].setBConst(result);
+ break;
+ }
+
+ // TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
+
+ case EOpPackSnorm2x16:
+ case EOpPackUnorm2x16:
+ case EOpPackHalf2x16:
+ case EOpPack16:
+ case EOpPack32:
+ case EOpPack64:
+ case EOpUnpack32:
+ case EOpUnpack16:
+ case EOpUnpack8:
+
+ case EOpUnpackSnorm2x16:
+ case EOpUnpackUnorm2x16:
+ case EOpUnpackHalf2x16:
+
+ case EOpDeterminant:
+ case EOpMatrixInverse:
+ case EOpTranspose:
+ return 0;
+
+ default:
+ assert(componentWise);
+ break;
+ }
+
+ // Turn off the componentwise loop
+ if (! componentWise)
+ objectSize = 0;
+
+ // Process component-wise operations
+ for (int i = 0; i < objectSize; i++) {
+ switch (op) {
+ case EOpNegative:
+ switch (getType().getBasicType()) {
+ case EbtDouble:
+ case EbtFloat16:
+ case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break;
+ case EbtInt8: newConstArray[i].setI8Const(-unionArray[i].getI8Const()); break;
+ case EbtUint8: newConstArray[i].setU8Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU8Const()))); break;
+ case EbtInt16: newConstArray[i].setI16Const(-unionArray[i].getI16Const()); break;
+ case EbtUint16:newConstArray[i].setU16Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU16Const()))); break;
+ case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
+ case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
+ case EbtInt64: newConstArray[i].setI64Const(-unionArray[i].getI64Const()); break;
+ case EbtUint64: newConstArray[i].setU64Const(static_cast<unsigned long long>(-static_cast<long long>(unionArray[i].getU64Const()))); break;
+ default:
+ return 0;
+ }
+ break;
+ case EOpLogicalNot:
+ case EOpVectorLogicalNot:
+ switch (getType().getBasicType()) {
+ case EbtBool: newConstArray[i].setBConst(!unionArray[i].getBConst()); break;
+ default:
+ return 0;
+ }
+ break;
+ case EOpBitwiseNot:
+ newConstArray[i] = ~unionArray[i];
+ break;
+ case EOpRadians:
+ newConstArray[i].setDConst(unionArray[i].getDConst() * pi / 180.0);
+ break;
+ case EOpDegrees:
+ newConstArray[i].setDConst(unionArray[i].getDConst() * 180.0 / pi);
+ break;
+ case EOpSin:
+ newConstArray[i].setDConst(sin(unionArray[i].getDConst()));
+ break;
+ case EOpCos:
+ newConstArray[i].setDConst(cos(unionArray[i].getDConst()));
+ break;
+ case EOpTan:
+ newConstArray[i].setDConst(tan(unionArray[i].getDConst()));
+ break;
+ case EOpAsin:
+ newConstArray[i].setDConst(asin(unionArray[i].getDConst()));
+ break;
+ case EOpAcos:
+ newConstArray[i].setDConst(acos(unionArray[i].getDConst()));
+ break;
+ case EOpAtan:
+ newConstArray[i].setDConst(atan(unionArray[i].getDConst()));
+ break;
+
+ case EOpDPdx:
+ case EOpDPdy:
+ case EOpFwidth:
+ case EOpDPdxFine:
+ case EOpDPdyFine:
+ case EOpFwidthFine:
+ case EOpDPdxCoarse:
+ case EOpDPdyCoarse:
+ case EOpFwidthCoarse:
+ // The derivatives are all mandated to create a constant 0.
+ newConstArray[i].setDConst(0.0);
+ break;
+
+ case EOpExp:
+ newConstArray[i].setDConst(exp(unionArray[i].getDConst()));
+ break;
+ case EOpLog:
+ newConstArray[i].setDConst(log(unionArray[i].getDConst()));
+ break;
+ case EOpExp2:
+ {
+ const double inv_log2_e = 0.69314718055994530941723212145818;
+ newConstArray[i].setDConst(exp(unionArray[i].getDConst() * inv_log2_e));
+ break;
+ }
+ case EOpLog2:
+ {
+ const double log2_e = 1.4426950408889634073599246810019;
+ newConstArray[i].setDConst(log2_e * log(unionArray[i].getDConst()));
+ break;
+ }
+ case EOpSqrt:
+ newConstArray[i].setDConst(sqrt(unionArray[i].getDConst()));
+ break;
+ case EOpInverseSqrt:
+ newConstArray[i].setDConst(1.0 / sqrt(unionArray[i].getDConst()));
+ break;
+
+ case EOpAbs:
+ if (unionArray[i].getType() == EbtDouble)
+ newConstArray[i].setDConst(fabs(unionArray[i].getDConst()));
+ else if (unionArray[i].getType() == EbtInt)
+ newConstArray[i].setIConst(abs(unionArray[i].getIConst()));
+ else
+ newConstArray[i] = unionArray[i];
+ break;
+ case EOpSign:
+ #define SIGN(X) (X == 0 ? 0 : (X < 0 ? -1 : 1))
+ if (unionArray[i].getType() == EbtDouble)
+ newConstArray[i].setDConst(SIGN(unionArray[i].getDConst()));
+ else
+ newConstArray[i].setIConst(SIGN(unionArray[i].getIConst()));
+ break;
+ case EOpFloor:
+ newConstArray[i].setDConst(floor(unionArray[i].getDConst()));
+ break;
+ case EOpTrunc:
+ if (unionArray[i].getDConst() > 0)
+ newConstArray[i].setDConst(floor(unionArray[i].getDConst()));
+ else
+ newConstArray[i].setDConst(ceil(unionArray[i].getDConst()));
+ break;
+ case EOpRound:
+ newConstArray[i].setDConst(floor(0.5 + unionArray[i].getDConst()));
+ break;
+ case EOpRoundEven:
+ {
+ double flr = floor(unionArray[i].getDConst());
+ bool even = flr / 2.0 == floor(flr / 2.0);
+ double rounded = even ? ceil(unionArray[i].getDConst() - 0.5) : floor(unionArray[i].getDConst() + 0.5);
+ newConstArray[i].setDConst(rounded);
+ break;
+ }
+ case EOpCeil:
+ newConstArray[i].setDConst(ceil(unionArray[i].getDConst()));
+ break;
+ case EOpFract:
+ {
+ double x = unionArray[i].getDConst();
+ newConstArray[i].setDConst(x - floor(x));
+ break;
+ }
+
+ case EOpIsNan:
+ {
+ newConstArray[i].setBConst(isNan(unionArray[i].getDConst()));
+ break;
+ }
+ case EOpIsInf:
+ {
+ newConstArray[i].setBConst(isInf(unionArray[i].getDConst()));
+ break;
+ }
+
+ case EOpConvInt8ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI8Const() != 0); break;
+ case EOpConvUint8ToBool:
+ newConstArray[i].setBConst(unionArray[i].getU8Const() != 0); break;
+ case EOpConvInt16ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI16Const() != 0); break;
+ case EOpConvUint16ToBool:
+ newConstArray[i].setBConst(unionArray[i].getU16Const() != 0); break;
+ case EOpConvIntToBool:
+ newConstArray[i].setBConst(unionArray[i].getIConst() != 0); break;
+ case EOpConvUintToBool:
+ newConstArray[i].setBConst(unionArray[i].getUConst() != 0); break;
+ case EOpConvInt64ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
+ case EOpConvUint64ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
+ case EOpConvFloat16ToBool:
+ newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
+ case EOpConvFloatToBool:
+ newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
+ case EOpConvDoubleToBool:
+ newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
+
+ case EOpConvBoolToInt8:
+ newConstArray[i].setI8Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint8:
+ newConstArray[i].setU8Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToInt:
+ newConstArray[i].setIConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint:
+ newConstArray[i].setUConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToFloat:
+ newConstArray[i].setDConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToDouble:
+ newConstArray[i].setDConst(unionArray[i].getBConst()); break;
+
+ case EOpConvInt8ToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToInt:
+ newConstArray[i].setIConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint8:
+ newConstArray[i].setU8Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint:
+ newConstArray[i].setUConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getI8Const()); break;
+ case EOpConvUint8ToInt8:
+ newConstArray[i].setI8Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToInt:
+ newConstArray[i].setIConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToUint:
+ newConstArray[i].setUConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getU8Const()); break;
+ case EOpConvInt8ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
+ case EOpConvUint8ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
+
+ case EOpConvInt16ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getI16Const())); break;
+ case EOpConvInt16ToInt:
+ newConstArray[i].setIConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getI16Const())); break;
+ case EOpConvInt16ToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToUint:
+ newConstArray[i].setUConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getI16Const()); break;
+ case EOpConvUint16ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getU16Const())); break;
+ case EOpConvUint16ToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToInt:
+ newConstArray[i].setIConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getU16Const())); break;
+
+ case EOpConvUint16ToUint:
+ newConstArray[i].setUConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getU16Const()); break;
+ case EOpConvInt16ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
+ case EOpConvUint16ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
+
+ case EOpConvIntToInt8:
+ newConstArray[i].setI8Const((signed char)unionArray[i].getIConst()); break;
+ case EOpConvIntToInt16:
+ newConstArray[i].setI16Const((signed short)unionArray[i].getIConst()); break;
+ case EOpConvIntToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getIConst()); break;
+ case EOpConvIntToUint8:
+ newConstArray[i].setU8Const((unsigned char)unionArray[i].getIConst()); break;
+ case EOpConvIntToUint16:
+ newConstArray[i].setU16Const((unsigned char)unionArray[i].getIConst()); break;
+ case EOpConvIntToUint:
+ newConstArray[i].setUConst(unionArray[i].getIConst()); break;
+ case EOpConvIntToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getIConst()); break;
+
+ case EOpConvUintToInt8:
+ newConstArray[i].setI8Const((signed char)unionArray[i].getUConst()); break;
+ case EOpConvUintToInt16:
+ newConstArray[i].setI16Const((signed short)unionArray[i].getUConst()); break;
+ case EOpConvUintToInt:
+ newConstArray[i].setIConst(unionArray[i].getUConst()); break;
+ case EOpConvUintToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getUConst()); break;
+ case EOpConvUintToUint8:
+ newConstArray[i].setU8Const((unsigned char)unionArray[i].getUConst()); break;
+ case EOpConvUintToUint16:
+ newConstArray[i].setU16Const((unsigned short)unionArray[i].getUConst()); break;
+ case EOpConvUintToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getUConst()); break;
+ case EOpConvIntToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getIConst()); break;
+ case EOpConvIntToFloat:
+ newConstArray[i].setDConst(unionArray[i].getIConst()); break;
+ case EOpConvIntToDouble:
+ newConstArray[i].setDConst(unionArray[i].getIConst()); break;
+ case EOpConvUintToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getUConst()); break;
+ case EOpConvUintToFloat:
+ newConstArray[i].setDConst(unionArray[i].getUConst()); break;
+ case EOpConvUintToDouble:
+ newConstArray[i].setDConst(unionArray[i].getUConst()); break;
+ case EOpConvInt64ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getI64Const()); break;
+ case EOpConvUint64ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getU64Const()); break;
+ case EOpConvUint64ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getU64Const())); break;
+ case EOpConvInt64ToFloat16:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToFloat:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToDouble:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
+ case EOpConvUint64ToFloat16:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToFloat:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToDouble:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
+ case EOpConvFloat16ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToInt64:
+ newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint64:
+ newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvFloat16ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvFloatToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToInt64:
+ newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint64:
+ newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvFloatToDouble:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvDoubleToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToInt64:
+ newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint64:
+ newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvDoubleToFloat:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvPtrToUint64:
+ case EOpConvUint64ToPtr:
+ case EOpConstructReference:
+ newConstArray[i].setU64Const(unionArray[i].getU64Const()); break;
+
+
+
+ // TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
+
+ case EOpSinh:
+ case EOpCosh:
+ case EOpTanh:
+ case EOpAsinh:
+ case EOpAcosh:
+ case EOpAtanh:
+
+ case EOpFloatBitsToInt:
+ case EOpFloatBitsToUint:
+ case EOpIntBitsToFloat:
+ case EOpUintBitsToFloat:
+ case EOpDoubleBitsToInt64:
+ case EOpDoubleBitsToUint64:
+ case EOpInt64BitsToDouble:
+ case EOpUint64BitsToDouble:
+ case EOpFloat16BitsToInt16:
+ case EOpFloat16BitsToUint16:
+ case EOpInt16BitsToFloat16:
+ case EOpUint16BitsToFloat16:
+ default:
+ return 0;
+ }
+ }
+
+ TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType);
+ newNode->getWritableType().getQualifier().storage = EvqConst;
+ newNode->setLoc(getLoc());
+
+ return newNode;
+}
+
+//
+// Do constant folding for an aggregate node that has all its children
+// as constants and an operator that requires constant folding.
+//
+TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
+{
+ if (aggrNode == nullptr)
+ return aggrNode;
+
+ if (! areAllChildConst(aggrNode))
+ return aggrNode;
+
+ if (aggrNode->isConstructor())
+ return foldConstructor(aggrNode);
+
+ TIntermSequence& children = aggrNode->getSequence();
+
+ // First, see if this is an operation to constant fold, kick out if not,
+ // see what size the result is if so.
+
+ bool componentwise = false; // will also say componentwise if a scalar argument gets repeated to make per-component results
+ int objectSize;
+ switch (aggrNode->getOp()) {
+ case EOpAtan:
+ case EOpPow:
+ case EOpMin:
+ case EOpMax:
+ case EOpMix:
+ case EOpClamp:
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ case EOpVectorEqual:
+ case EOpVectorNotEqual:
+ componentwise = true;
+ objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents();
+ break;
+ case EOpCross:
+ case EOpReflect:
+ case EOpRefract:
+ case EOpFaceForward:
+ objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents();
+ break;
+ case EOpDistance:
+ case EOpDot:
+ objectSize = 1;
+ break;
+ case EOpOuterProduct:
+ objectSize = children[0]->getAsTyped()->getType().getVectorSize() *
+ children[1]->getAsTyped()->getType().getVectorSize();
+ break;
+ case EOpStep:
+ componentwise = true;
+ objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(),
+ children[1]->getAsTyped()->getType().getVectorSize());
+ break;
+ case EOpSmoothStep:
+ componentwise = true;
+ objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(),
+ children[2]->getAsTyped()->getType().getVectorSize());
+ break;
+ default:
+ return aggrNode;
+ }
+ TConstUnionArray newConstArray(objectSize);
+
+ TVector<TConstUnionArray> childConstUnions;
+ for (unsigned int arg = 0; arg < children.size(); ++arg)
+ childConstUnions.push_back(children[arg]->getAsConstantUnion()->getConstArray());
+
+ if (componentwise) {
+ for (int comp = 0; comp < objectSize; comp++) {
+
+ // some arguments are scalars instead of matching vectors; simulate a smear
+ int arg0comp = std::min(comp, children[0]->getAsTyped()->getType().getVectorSize() - 1);
+ int arg1comp = 0;
+ if (children.size() > 1)
+ arg1comp = std::min(comp, children[1]->getAsTyped()->getType().getVectorSize() - 1);
+ int arg2comp = 0;
+ if (children.size() > 2)
+ arg2comp = std::min(comp, children[2]->getAsTyped()->getType().getVectorSize() - 1);
+
+ switch (aggrNode->getOp()) {
+ case EOpAtan:
+ newConstArray[comp].setDConst(atan2(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EOpPow:
+ newConstArray[comp].setDConst(pow(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EOpMin:
+ switch(children[0]->getAsTyped()->getBasicType()) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ newConstArray[comp].setDConst(std::min(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EbtInt8:
+ newConstArray[comp].setI8Const(std::min(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
+ break;
+ case EbtUint8:
+ newConstArray[comp].setU8Const(std::min(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()));
+ break;
+ case EbtInt16:
+ newConstArray[comp].setI16Const(std::min(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()));
+ break;
+ case EbtUint16:
+ newConstArray[comp].setU16Const(std::min(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
+ break;
+ case EbtInt:
+ newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
+ break;
+ case EbtUint:
+ newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
+ break;
+ case EbtInt64:
+ newConstArray[comp].setI64Const(std::min(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
+ break;
+ case EbtUint64:
+ newConstArray[comp].setU64Const(std::min(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
+ break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EOpMax:
+ switch(children[0]->getAsTyped()->getBasicType()) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ newConstArray[comp].setDConst(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EbtInt8:
+ newConstArray[comp].setI8Const(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
+ break;
+ case EbtUint8:
+ newConstArray[comp].setU8Const(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()));
+ break;
+ case EbtInt16:
+ newConstArray[comp].setI16Const(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()));
+ break;
+ case EbtUint16:
+ newConstArray[comp].setU16Const(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
+ break;
+ case EbtInt:
+ newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
+ break;
+ case EbtUint:
+ newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
+ break;
+ case EbtInt64:
+ newConstArray[comp].setI64Const(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
+ break;
+ case EbtUint64:
+ newConstArray[comp].setU64Const(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
+ break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EOpClamp:
+ switch(children[0]->getAsTyped()->getBasicType()) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ newConstArray[comp].setDConst(std::min(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()),
+ childConstUnions[2][arg2comp].getDConst()));
+ break;
+ case EbtInt8:
+ newConstArray[comp].setI8Const(std::min(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()),
+ childConstUnions[2][arg2comp].getI8Const()));
+ break;
+ case EbtUint8:
+ newConstArray[comp].setU8Const(std::min(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()),
+ childConstUnions[2][arg2comp].getU8Const()));
+ break;
+ case EbtInt16:
+ newConstArray[comp].setI16Const(std::min(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()),
+ childConstUnions[2][arg2comp].getI16Const()));
+ break;
+ case EbtUint16:
+ newConstArray[comp].setU16Const(std::min(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()),
+ childConstUnions[2][arg2comp].getU16Const()));
+ break;
+ case EbtInt:
+ newConstArray[comp].setIConst(std::min(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()),
+ childConstUnions[2][arg2comp].getIConst()));
+ break;
+ case EbtUint:
+ newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()),
+ childConstUnions[2][arg2comp].getUConst()));
+ break;
+ case EbtInt64:
+ newConstArray[comp].setI64Const(std::min(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()),
+ childConstUnions[2][arg2comp].getI64Const()));
+ break;
+ case EbtUint64:
+ newConstArray[comp].setU64Const(std::min(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()),
+ childConstUnions[2][arg2comp].getU64Const()));
+ break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EOpLessThan:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp]);
+ break;
+ case EOpGreaterThan:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp]);
+ break;
+ case EOpLessThanEqual:
+ newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp]));
+ break;
+ case EOpGreaterThanEqual:
+ newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp]));
+ break;
+ case EOpVectorEqual:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] == childConstUnions[1][arg1comp]);
+ break;
+ case EOpVectorNotEqual:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] != childConstUnions[1][arg1comp]);
+ break;
+ case EOpMix:
+ if (children[2]->getAsTyped()->getBasicType() == EbtBool)
+ newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst() ? childConstUnions[1][arg1comp].getDConst() :
+ childConstUnions[0][arg0comp].getDConst());
+ else
+ newConstArray[comp].setDConst(childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) +
+ childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst());
+ break;
+ case EOpStep:
+ newConstArray[comp].setDConst(childConstUnions[1][arg1comp].getDConst() < childConstUnions[0][arg0comp].getDConst() ? 0.0 : 1.0);
+ break;
+ case EOpSmoothStep:
+ {
+ double t = (childConstUnions[2][arg2comp].getDConst() - childConstUnions[0][arg0comp].getDConst()) /
+ (childConstUnions[1][arg1comp].getDConst() - childConstUnions[0][arg0comp].getDConst());
+ if (t < 0.0)
+ t = 0.0;
+ if (t > 1.0)
+ t = 1.0;
+ newConstArray[comp].setDConst(t * t * (3.0 - 2.0 * t));
+ break;
+ }
+ default:
+ return aggrNode;
+ }
+ }
+ } else {
+ // Non-componentwise...
+
+ int numComps = children[0]->getAsConstantUnion()->getType().computeNumComponents();
+ double dot;
+
+ switch (aggrNode->getOp()) {
+ case EOpDistance:
+ {
+ double sum = 0.0;
+ for (int comp = 0; comp < numComps; ++comp) {
+ double diff = childConstUnions[1][comp].getDConst() - childConstUnions[0][comp].getDConst();
+ sum += diff * diff;
+ }
+ newConstArray[0].setDConst(sqrt(sum));
+ break;
+ }
+ case EOpDot:
+ newConstArray[0].setDConst(childConstUnions[0].dot(childConstUnions[1]));
+ break;
+ case EOpCross:
+ newConstArray[0] = childConstUnions[0][1] * childConstUnions[1][2] - childConstUnions[0][2] * childConstUnions[1][1];
+ newConstArray[1] = childConstUnions[0][2] * childConstUnions[1][0] - childConstUnions[0][0] * childConstUnions[1][2];
+ newConstArray[2] = childConstUnions[0][0] * childConstUnions[1][1] - childConstUnions[0][1] * childConstUnions[1][0];
+ break;
+ case EOpFaceForward:
+ // If dot(Nref, I) < 0 return N, otherwise return -N: Arguments are (N, I, Nref).
+ dot = childConstUnions[1].dot(childConstUnions[2]);
+ for (int comp = 0; comp < numComps; ++comp) {
+ if (dot < 0.0)
+ newConstArray[comp] = childConstUnions[0][comp];
+ else
+ newConstArray[comp].setDConst(-childConstUnions[0][comp].getDConst());
+ }
+ break;
+ case EOpReflect:
+ // I - 2 * dot(N, I) * N: Arguments are (I, N).
+ dot = childConstUnions[0].dot(childConstUnions[1]);
+ dot *= 2.0;
+ for (int comp = 0; comp < numComps; ++comp)
+ newConstArray[comp].setDConst(childConstUnions[0][comp].getDConst() - dot * childConstUnions[1][comp].getDConst());
+ break;
+ case EOpRefract:
+ {
+ // Arguments are (I, N, eta).
+ // k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))
+ // if (k < 0.0)
+ // return dvec(0.0)
+ // else
+ // return eta * I - (eta * dot(N, I) + sqrt(k)) * N
+ dot = childConstUnions[0].dot(childConstUnions[1]);
+ double eta = childConstUnions[2][0].getDConst();
+ double k = 1.0 - eta * eta * (1.0 - dot * dot);
+ if (k < 0.0) {
+ for (int comp = 0; comp < numComps; ++comp)
+ newConstArray[comp].setDConst(0.0);
+ } else {
+ for (int comp = 0; comp < numComps; ++comp)
+ newConstArray[comp].setDConst(eta * childConstUnions[0][comp].getDConst() - (eta * dot + sqrt(k)) * childConstUnions[1][comp].getDConst());
+ }
+ break;
+ }
+ case EOpOuterProduct:
+ {
+ int numRows = numComps;
+ int numCols = children[1]->getAsConstantUnion()->getType().computeNumComponents();
+ for (int row = 0; row < numRows; ++row)
+ for (int col = 0; col < numCols; ++col)
+ newConstArray[col * numRows + row] = childConstUnions[0][row] * childConstUnions[1][col];
+ break;
+ }
+ default:
+ return aggrNode;
+ }
+ }
+
+ TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, aggrNode->getType());
+ newNode->getWritableType().getQualifier().storage = EvqConst;
+ newNode->setLoc(aggrNode->getLoc());
+
+ return newNode;
+}
+
+bool TIntermediate::areAllChildConst(TIntermAggregate* aggrNode)
+{
+ bool allConstant = true;
+
+ // check if all the child nodes are constants so that they can be inserted into
+ // the parent node
+ if (aggrNode) {
+ TIntermSequence& childSequenceVector = aggrNode->getSequence();
+ for (TIntermSequence::iterator p = childSequenceVector.begin();
+ p != childSequenceVector.end(); p++) {
+ if (!(*p)->getAsTyped()->getAsConstantUnion())
+ return false;
+ }
+ }
+
+ return allConstant;
+}
+
+TIntermTyped* TIntermediate::foldConstructor(TIntermAggregate* aggrNode)
+{
+ bool error = false;
+
+ TConstUnionArray unionArray(aggrNode->getType().computeNumComponents());
+ if (aggrNode->getSequence().size() == 1)
+ error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType(), true);
+ else
+ error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType());
+
+ if (error)
+ return aggrNode;
+
+ return addConstantUnion(unionArray, aggrNode->getType(), aggrNode->getLoc());
+}
+
+//
+// Constant folding of a bracket (array-style) dereference or struct-like dot
+// dereference. Can handle anything except a multi-character swizzle, though
+// all swizzles may go to foldSwizzle().
+//
+TIntermTyped* TIntermediate::foldDereference(TIntermTyped* node, int index, const TSourceLoc& loc)
+{
+ TType dereferencedType(node->getType(), index);
+ dereferencedType.getQualifier().storage = EvqConst;
+ TIntermTyped* result = 0;
+ int size = dereferencedType.computeNumComponents();
+
+ // arrays, vectors, matrices, all use simple multiplicative math
+ // while structures need to add up heterogeneous members
+ int start;
+ if (node->getType().isCoopMat())
+ start = 0;
+ else if (node->isArray() || ! node->isStruct())
+ start = size * index;
+ else {
+ // it is a structure
+ assert(node->isStruct());
+ start = 0;
+ for (int i = 0; i < index; ++i)
+ start += (*node->getType().getStruct())[i].type->computeNumComponents();
+ }
+
+ result = addConstantUnion(TConstUnionArray(node->getAsConstantUnion()->getConstArray(), start, size), node->getType(), loc);
+
+ if (result == 0)
+ result = node;
+ else
+ result->setType(dereferencedType);
+
+ return result;
+}
+
+//
+// Make a constant vector node or constant scalar node, representing a given
+// constant vector and constant swizzle into it.
+//
+TIntermTyped* TIntermediate::foldSwizzle(TIntermTyped* node, TSwizzleSelectors<TVectorSelector>& selectors, const TSourceLoc& loc)
+{
+ const TConstUnionArray& unionArray = node->getAsConstantUnion()->getConstArray();
+ TConstUnionArray constArray(selectors.size());
+
+ for (int i = 0; i < selectors.size(); i++)
+ constArray[i] = unionArray[selectors[i]];
+
+ TIntermTyped* result = addConstantUnion(constArray, node->getType(), loc);
+
+ if (result == 0)
+ result = node;
+ else
+ result->setType(TType(node->getBasicType(), EvqConst, selectors.size()));
+
+ return result;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/InfoSink.cpp b/thirdparty/glslang/glslang/MachineIndependent/InfoSink.cpp
new file mode 100644
index 0000000000..d00c422566
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/InfoSink.cpp
@@ -0,0 +1,113 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/InfoSink.h"
+
+#include <cstring>
+
+namespace glslang {
+
+void TInfoSinkBase::append(const char* s)
+{
+ if (outputStream & EString) {
+ if (s == nullptr)
+ sink.append("(null)");
+ else {
+ checkMem(strlen(s));
+ sink.append(s);
+ }
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger)
+// OutputDebugString(s);
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%s", s);
+}
+
+void TInfoSinkBase::append(int count, char c)
+{
+ if (outputStream & EString) {
+ checkMem(count);
+ sink.append(count, c);
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger) {
+// char str[2];
+// str[0] = c;
+// str[1] = '\0';
+// OutputDebugString(str);
+// }
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%c", c);
+}
+
+void TInfoSinkBase::append(const TPersistString& t)
+{
+ if (outputStream & EString) {
+ checkMem(t.size());
+ sink.append(t);
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger)
+// OutputDebugString(t.c_str());
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%s", t.c_str());
+}
+
+void TInfoSinkBase::append(const TString& t)
+{
+ if (outputStream & EString) {
+ checkMem(t.size());
+ sink.append(t.c_str());
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger)
+// OutputDebugString(t.c_str());
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%s", t.c_str());
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp b/thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp
new file mode 100644
index 0000000000..0498b4871a
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Initialize.cpp
@@ -0,0 +1,9634 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Create strings that declare built-in definitions, add built-ins programmatically
+// that cannot be expressed in the strings, and establish mappings between
+// built-in functions and operators.
+//
+// Where to put a built-in:
+// TBuiltIns::initialize(version,profile) context-independent textual built-ins; add them to the right string
+// TBuiltIns::initialize(resources,...) context-dependent textual built-ins; add them to the right string
+// TBuiltIns::identifyBuiltIns(...,symbolTable) context-independent programmatic additions/mappings to the symbol table,
+// including identifying what extensions are needed if a version does not allow a symbol
+// TBuiltIns::identifyBuiltIns(...,symbolTable, resources) context-dependent programmatic additions/mappings to the symbol table,
+// including identifying what extensions are needed if a version does not allow a symbol
+//
+
+#include "../Include/intermediate.h"
+#include "Initialize.h"
+
+namespace glslang {
+
+// TODO: ARB_Compatability: do full extension support
+const bool ARBCompatibility = true;
+
+const bool ForwardCompatibility = false;
+
+// change this back to false if depending on textual spellings of texturing calls when consuming the AST
+// Using PureOperatorBuiltins=false is deprecated.
+bool PureOperatorBuiltins = true;
+
+inline bool IncludeLegacy(int version, EProfile profile, const SpvVersion& spvVersion)
+{
+ return profile != EEsProfile && (version <= 130 || (spvVersion.spv == 0 && ARBCompatibility) || profile == ECompatibilityProfile);
+}
+
+// Construct TBuiltInParseables base class. This can be used for language-common constructs.
+TBuiltInParseables::TBuiltInParseables()
+{
+}
+
+// Destroy TBuiltInParseables.
+TBuiltInParseables::~TBuiltInParseables()
+{
+}
+
+TBuiltIns::TBuiltIns()
+{
+ // Set up textual representations for making all the permutations
+ // of texturing/imaging functions.
+ prefixes[EbtFloat] = "";
+#ifdef AMD_EXTENSIONS
+ prefixes[EbtFloat16] = "f16";
+#endif
+ prefixes[EbtInt8] = "i8";
+ prefixes[EbtUint8] = "u8";
+ prefixes[EbtInt16] = "i16";
+ prefixes[EbtUint16] = "u16";
+ prefixes[EbtInt] = "i";
+ prefixes[EbtUint] = "u";
+ postfixes[2] = "2";
+ postfixes[3] = "3";
+ postfixes[4] = "4";
+
+ // Map from symbolic class of texturing dimension to numeric dimensions.
+ dimMap[Esd1D] = 1;
+ dimMap[Esd2D] = 2;
+ dimMap[EsdRect] = 2;
+ dimMap[Esd3D] = 3;
+ dimMap[EsdCube] = 3;
+ dimMap[EsdBuffer] = 1;
+ dimMap[EsdSubpass] = 2; // potientially unused for now
+}
+
+TBuiltIns::~TBuiltIns()
+{
+}
+
+
+//
+// Add all context-independent built-in functions and variables that are present
+// for the given version and profile. Share common ones across stages, otherwise
+// make stage-specific entries.
+//
+// Most built-ins variables can be added as simple text strings. Some need to
+// be added programmatically, which is done later in IdentifyBuiltIns() below.
+//
+void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvVersion)
+{
+ //============================================================================
+ //
+ // Prototypes for built-in functions used repeatly by different shaders
+ //
+ //============================================================================
+
+ //
+ // Derivatives Functions.
+ //
+ TString derivatives (
+ "float dFdx(float p);"
+ "vec2 dFdx(vec2 p);"
+ "vec3 dFdx(vec3 p);"
+ "vec4 dFdx(vec4 p);"
+
+ "float dFdy(float p);"
+ "vec2 dFdy(vec2 p);"
+ "vec3 dFdy(vec3 p);"
+ "vec4 dFdy(vec4 p);"
+
+ "float fwidth(float p);"
+ "vec2 fwidth(vec2 p);"
+ "vec3 fwidth(vec3 p);"
+ "vec4 fwidth(vec4 p);"
+ );
+
+ TString derivativeControls (
+ "float dFdxFine(float p);"
+ "vec2 dFdxFine(vec2 p);"
+ "vec3 dFdxFine(vec3 p);"
+ "vec4 dFdxFine(vec4 p);"
+
+ "float dFdyFine(float p);"
+ "vec2 dFdyFine(vec2 p);"
+ "vec3 dFdyFine(vec3 p);"
+ "vec4 dFdyFine(vec4 p);"
+
+ "float fwidthFine(float p);"
+ "vec2 fwidthFine(vec2 p);"
+ "vec3 fwidthFine(vec3 p);"
+ "vec4 fwidthFine(vec4 p);"
+
+ "float dFdxCoarse(float p);"
+ "vec2 dFdxCoarse(vec2 p);"
+ "vec3 dFdxCoarse(vec3 p);"
+ "vec4 dFdxCoarse(vec4 p);"
+
+ "float dFdyCoarse(float p);"
+ "vec2 dFdyCoarse(vec2 p);"
+ "vec3 dFdyCoarse(vec3 p);"
+ "vec4 dFdyCoarse(vec4 p);"
+
+ "float fwidthCoarse(float p);"
+ "vec2 fwidthCoarse(vec2 p);"
+ "vec3 fwidthCoarse(vec3 p);"
+ "vec4 fwidthCoarse(vec4 p);"
+ );
+
+ TString derivativesAndControl16bits (
+ "float16_t dFdx(float16_t);"
+ "f16vec2 dFdx(f16vec2);"
+ "f16vec3 dFdx(f16vec3);"
+ "f16vec4 dFdx(f16vec4);"
+
+ "float16_t dFdy(float16_t);"
+ "f16vec2 dFdy(f16vec2);"
+ "f16vec3 dFdy(f16vec3);"
+ "f16vec4 dFdy(f16vec4);"
+
+ "float16_t dFdxFine(float16_t);"
+ "f16vec2 dFdxFine(f16vec2);"
+ "f16vec3 dFdxFine(f16vec3);"
+ "f16vec4 dFdxFine(f16vec4);"
+
+ "float16_t dFdyFine(float16_t);"
+ "f16vec2 dFdyFine(f16vec2);"
+ "f16vec3 dFdyFine(f16vec3);"
+ "f16vec4 dFdyFine(f16vec4);"
+
+ "float16_t dFdxCoarse(float16_t);"
+ "f16vec2 dFdxCoarse(f16vec2);"
+ "f16vec3 dFdxCoarse(f16vec3);"
+ "f16vec4 dFdxCoarse(f16vec4);"
+
+ "float16_t dFdyCoarse(float16_t);"
+ "f16vec2 dFdyCoarse(f16vec2);"
+ "f16vec3 dFdyCoarse(f16vec3);"
+ "f16vec4 dFdyCoarse(f16vec4);"
+
+ "float16_t fwidth(float16_t);"
+ "f16vec2 fwidth(f16vec2);"
+ "f16vec3 fwidth(f16vec3);"
+ "f16vec4 fwidth(f16vec4);"
+
+ "float16_t fwidthFine(float16_t);"
+ "f16vec2 fwidthFine(f16vec2);"
+ "f16vec3 fwidthFine(f16vec3);"
+ "f16vec4 fwidthFine(f16vec4);"
+
+ "float16_t fwidthCoarse(float16_t);"
+ "f16vec2 fwidthCoarse(f16vec2);"
+ "f16vec3 fwidthCoarse(f16vec3);"
+ "f16vec4 fwidthCoarse(f16vec4);"
+ );
+
+ TString derivativesAndControl64bits (
+ "float64_t dFdx(float64_t);"
+ "f64vec2 dFdx(f64vec2);"
+ "f64vec3 dFdx(f64vec3);"
+ "f64vec4 dFdx(f64vec4);"
+
+ "float64_t dFdy(float64_t);"
+ "f64vec2 dFdy(f64vec2);"
+ "f64vec3 dFdy(f64vec3);"
+ "f64vec4 dFdy(f64vec4);"
+
+ "float64_t dFdxFine(float64_t);"
+ "f64vec2 dFdxFine(f64vec2);"
+ "f64vec3 dFdxFine(f64vec3);"
+ "f64vec4 dFdxFine(f64vec4);"
+
+ "float64_t dFdyFine(float64_t);"
+ "f64vec2 dFdyFine(f64vec2);"
+ "f64vec3 dFdyFine(f64vec3);"
+ "f64vec4 dFdyFine(f64vec4);"
+
+ "float64_t dFdxCoarse(float64_t);"
+ "f64vec2 dFdxCoarse(f64vec2);"
+ "f64vec3 dFdxCoarse(f64vec3);"
+ "f64vec4 dFdxCoarse(f64vec4);"
+
+ "float64_t dFdyCoarse(float64_t);"
+ "f64vec2 dFdyCoarse(f64vec2);"
+ "f64vec3 dFdyCoarse(f64vec3);"
+ "f64vec4 dFdyCoarse(f64vec4);"
+
+ "float64_t fwidth(float64_t);"
+ "f64vec2 fwidth(f64vec2);"
+ "f64vec3 fwidth(f64vec3);"
+ "f64vec4 fwidth(f64vec4);"
+
+ "float64_t fwidthFine(float64_t);"
+ "f64vec2 fwidthFine(f64vec2);"
+ "f64vec3 fwidthFine(f64vec3);"
+ "f64vec4 fwidthFine(f64vec4);"
+
+ "float64_t fwidthCoarse(float64_t);"
+ "f64vec2 fwidthCoarse(f64vec2);"
+ "f64vec3 fwidthCoarse(f64vec3);"
+ "f64vec4 fwidthCoarse(f64vec4);"
+ );
+
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by both vertex and fragment shaders.
+ //
+ //============================================================================
+
+ //
+ // Angle and Trigonometric Functions.
+ //
+ commonBuiltins.append(
+ "float radians(float degrees);"
+ "vec2 radians(vec2 degrees);"
+ "vec3 radians(vec3 degrees);"
+ "vec4 radians(vec4 degrees);"
+
+ "float degrees(float radians);"
+ "vec2 degrees(vec2 radians);"
+ "vec3 degrees(vec3 radians);"
+ "vec4 degrees(vec4 radians);"
+
+ "float sin(float angle);"
+ "vec2 sin(vec2 angle);"
+ "vec3 sin(vec3 angle);"
+ "vec4 sin(vec4 angle);"
+
+ "float cos(float angle);"
+ "vec2 cos(vec2 angle);"
+ "vec3 cos(vec3 angle);"
+ "vec4 cos(vec4 angle);"
+
+ "float tan(float angle);"
+ "vec2 tan(vec2 angle);"
+ "vec3 tan(vec3 angle);"
+ "vec4 tan(vec4 angle);"
+
+ "float asin(float x);"
+ "vec2 asin(vec2 x);"
+ "vec3 asin(vec3 x);"
+ "vec4 asin(vec4 x);"
+
+ "float acos(float x);"
+ "vec2 acos(vec2 x);"
+ "vec3 acos(vec3 x);"
+ "vec4 acos(vec4 x);"
+
+ "float atan(float y, float x);"
+ "vec2 atan(vec2 y, vec2 x);"
+ "vec3 atan(vec3 y, vec3 x);"
+ "vec4 atan(vec4 y, vec4 x);"
+
+ "float atan(float y_over_x);"
+ "vec2 atan(vec2 y_over_x);"
+ "vec3 atan(vec3 y_over_x);"
+ "vec4 atan(vec4 y_over_x);"
+
+ "\n");
+
+ if (version >= 130) {
+ commonBuiltins.append(
+ "float sinh(float angle);"
+ "vec2 sinh(vec2 angle);"
+ "vec3 sinh(vec3 angle);"
+ "vec4 sinh(vec4 angle);"
+
+ "float cosh(float angle);"
+ "vec2 cosh(vec2 angle);"
+ "vec3 cosh(vec3 angle);"
+ "vec4 cosh(vec4 angle);"
+
+ "float tanh(float angle);"
+ "vec2 tanh(vec2 angle);"
+ "vec3 tanh(vec3 angle);"
+ "vec4 tanh(vec4 angle);"
+
+ "float asinh(float x);"
+ "vec2 asinh(vec2 x);"
+ "vec3 asinh(vec3 x);"
+ "vec4 asinh(vec4 x);"
+
+ "float acosh(float x);"
+ "vec2 acosh(vec2 x);"
+ "vec3 acosh(vec3 x);"
+ "vec4 acosh(vec4 x);"
+
+ "float atanh(float y_over_x);"
+ "vec2 atanh(vec2 y_over_x);"
+ "vec3 atanh(vec3 y_over_x);"
+ "vec4 atanh(vec4 y_over_x);"
+
+ "\n");
+ }
+
+ //
+ // Exponential Functions.
+ //
+ commonBuiltins.append(
+ "float pow(float x, float y);"
+ "vec2 pow(vec2 x, vec2 y);"
+ "vec3 pow(vec3 x, vec3 y);"
+ "vec4 pow(vec4 x, vec4 y);"
+
+ "float exp(float x);"
+ "vec2 exp(vec2 x);"
+ "vec3 exp(vec3 x);"
+ "vec4 exp(vec4 x);"
+
+ "float log(float x);"
+ "vec2 log(vec2 x);"
+ "vec3 log(vec3 x);"
+ "vec4 log(vec4 x);"
+
+ "float exp2(float x);"
+ "vec2 exp2(vec2 x);"
+ "vec3 exp2(vec3 x);"
+ "vec4 exp2(vec4 x);"
+
+ "float log2(float x);"
+ "vec2 log2(vec2 x);"
+ "vec3 log2(vec3 x);"
+ "vec4 log2(vec4 x);"
+
+ "float sqrt(float x);"
+ "vec2 sqrt(vec2 x);"
+ "vec3 sqrt(vec3 x);"
+ "vec4 sqrt(vec4 x);"
+
+ "float inversesqrt(float x);"
+ "vec2 inversesqrt(vec2 x);"
+ "vec3 inversesqrt(vec3 x);"
+ "vec4 inversesqrt(vec4 x);"
+
+ "\n");
+
+ //
+ // Common Functions.
+ //
+ commonBuiltins.append(
+ "float abs(float x);"
+ "vec2 abs(vec2 x);"
+ "vec3 abs(vec3 x);"
+ "vec4 abs(vec4 x);"
+
+ "float sign(float x);"
+ "vec2 sign(vec2 x);"
+ "vec3 sign(vec3 x);"
+ "vec4 sign(vec4 x);"
+
+ "float floor(float x);"
+ "vec2 floor(vec2 x);"
+ "vec3 floor(vec3 x);"
+ "vec4 floor(vec4 x);"
+
+ "float ceil(float x);"
+ "vec2 ceil(vec2 x);"
+ "vec3 ceil(vec3 x);"
+ "vec4 ceil(vec4 x);"
+
+ "float fract(float x);"
+ "vec2 fract(vec2 x);"
+ "vec3 fract(vec3 x);"
+ "vec4 fract(vec4 x);"
+
+ "float mod(float x, float y);"
+ "vec2 mod(vec2 x, float y);"
+ "vec3 mod(vec3 x, float y);"
+ "vec4 mod(vec4 x, float y);"
+ "vec2 mod(vec2 x, vec2 y);"
+ "vec3 mod(vec3 x, vec3 y);"
+ "vec4 mod(vec4 x, vec4 y);"
+
+ "float min(float x, float y);"
+ "vec2 min(vec2 x, float y);"
+ "vec3 min(vec3 x, float y);"
+ "vec4 min(vec4 x, float y);"
+ "vec2 min(vec2 x, vec2 y);"
+ "vec3 min(vec3 x, vec3 y);"
+ "vec4 min(vec4 x, vec4 y);"
+
+ "float max(float x, float y);"
+ "vec2 max(vec2 x, float y);"
+ "vec3 max(vec3 x, float y);"
+ "vec4 max(vec4 x, float y);"
+ "vec2 max(vec2 x, vec2 y);"
+ "vec3 max(vec3 x, vec3 y);"
+ "vec4 max(vec4 x, vec4 y);"
+
+ "float clamp(float x, float minVal, float maxVal);"
+ "vec2 clamp(vec2 x, float minVal, float maxVal);"
+ "vec3 clamp(vec3 x, float minVal, float maxVal);"
+ "vec4 clamp(vec4 x, float minVal, float maxVal);"
+ "vec2 clamp(vec2 x, vec2 minVal, vec2 maxVal);"
+ "vec3 clamp(vec3 x, vec3 minVal, vec3 maxVal);"
+ "vec4 clamp(vec4 x, vec4 minVal, vec4 maxVal);"
+
+ "float mix(float x, float y, float a);"
+ "vec2 mix(vec2 x, vec2 y, float a);"
+ "vec3 mix(vec3 x, vec3 y, float a);"
+ "vec4 mix(vec4 x, vec4 y, float a);"
+ "vec2 mix(vec2 x, vec2 y, vec2 a);"
+ "vec3 mix(vec3 x, vec3 y, vec3 a);"
+ "vec4 mix(vec4 x, vec4 y, vec4 a);"
+
+ "float step(float edge, float x);"
+ "vec2 step(vec2 edge, vec2 x);"
+ "vec3 step(vec3 edge, vec3 x);"
+ "vec4 step(vec4 edge, vec4 x);"
+ "vec2 step(float edge, vec2 x);"
+ "vec3 step(float edge, vec3 x);"
+ "vec4 step(float edge, vec4 x);"
+
+ "float smoothstep(float edge0, float edge1, float x);"
+ "vec2 smoothstep(vec2 edge0, vec2 edge1, vec2 x);"
+ "vec3 smoothstep(vec3 edge0, vec3 edge1, vec3 x);"
+ "vec4 smoothstep(vec4 edge0, vec4 edge1, vec4 x);"
+ "vec2 smoothstep(float edge0, float edge1, vec2 x);"
+ "vec3 smoothstep(float edge0, float edge1, vec3 x);"
+ "vec4 smoothstep(float edge0, float edge1, vec4 x);"
+
+ "\n");
+
+ if (version >= 130) {
+ commonBuiltins.append(
+ " int abs( int x);"
+ "ivec2 abs(ivec2 x);"
+ "ivec3 abs(ivec3 x);"
+ "ivec4 abs(ivec4 x);"
+
+ " int sign( int x);"
+ "ivec2 sign(ivec2 x);"
+ "ivec3 sign(ivec3 x);"
+ "ivec4 sign(ivec4 x);"
+
+ "float trunc(float x);"
+ "vec2 trunc(vec2 x);"
+ "vec3 trunc(vec3 x);"
+ "vec4 trunc(vec4 x);"
+
+ "float round(float x);"
+ "vec2 round(vec2 x);"
+ "vec3 round(vec3 x);"
+ "vec4 round(vec4 x);"
+
+ "float roundEven(float x);"
+ "vec2 roundEven(vec2 x);"
+ "vec3 roundEven(vec3 x);"
+ "vec4 roundEven(vec4 x);"
+
+ "float modf(float, out float);"
+ "vec2 modf(vec2, out vec2 );"
+ "vec3 modf(vec3, out vec3 );"
+ "vec4 modf(vec4, out vec4 );"
+
+ " int min(int x, int y);"
+ "ivec2 min(ivec2 x, int y);"
+ "ivec3 min(ivec3 x, int y);"
+ "ivec4 min(ivec4 x, int y);"
+ "ivec2 min(ivec2 x, ivec2 y);"
+ "ivec3 min(ivec3 x, ivec3 y);"
+ "ivec4 min(ivec4 x, ivec4 y);"
+
+ " uint min(uint x, uint y);"
+ "uvec2 min(uvec2 x, uint y);"
+ "uvec3 min(uvec3 x, uint y);"
+ "uvec4 min(uvec4 x, uint y);"
+ "uvec2 min(uvec2 x, uvec2 y);"
+ "uvec3 min(uvec3 x, uvec3 y);"
+ "uvec4 min(uvec4 x, uvec4 y);"
+
+ " int max(int x, int y);"
+ "ivec2 max(ivec2 x, int y);"
+ "ivec3 max(ivec3 x, int y);"
+ "ivec4 max(ivec4 x, int y);"
+ "ivec2 max(ivec2 x, ivec2 y);"
+ "ivec3 max(ivec3 x, ivec3 y);"
+ "ivec4 max(ivec4 x, ivec4 y);"
+
+ " uint max(uint x, uint y);"
+ "uvec2 max(uvec2 x, uint y);"
+ "uvec3 max(uvec3 x, uint y);"
+ "uvec4 max(uvec4 x, uint y);"
+ "uvec2 max(uvec2 x, uvec2 y);"
+ "uvec3 max(uvec3 x, uvec3 y);"
+ "uvec4 max(uvec4 x, uvec4 y);"
+
+ "int clamp(int x, int minVal, int maxVal);"
+ "ivec2 clamp(ivec2 x, int minVal, int maxVal);"
+ "ivec3 clamp(ivec3 x, int minVal, int maxVal);"
+ "ivec4 clamp(ivec4 x, int minVal, int maxVal);"
+ "ivec2 clamp(ivec2 x, ivec2 minVal, ivec2 maxVal);"
+ "ivec3 clamp(ivec3 x, ivec3 minVal, ivec3 maxVal);"
+ "ivec4 clamp(ivec4 x, ivec4 minVal, ivec4 maxVal);"
+
+ "uint clamp(uint x, uint minVal, uint maxVal);"
+ "uvec2 clamp(uvec2 x, uint minVal, uint maxVal);"
+ "uvec3 clamp(uvec3 x, uint minVal, uint maxVal);"
+ "uvec4 clamp(uvec4 x, uint minVal, uint maxVal);"
+ "uvec2 clamp(uvec2 x, uvec2 minVal, uvec2 maxVal);"
+ "uvec3 clamp(uvec3 x, uvec3 minVal, uvec3 maxVal);"
+ "uvec4 clamp(uvec4 x, uvec4 minVal, uvec4 maxVal);"
+
+ "float mix(float x, float y, bool a);"
+ "vec2 mix(vec2 x, vec2 y, bvec2 a);"
+ "vec3 mix(vec3 x, vec3 y, bvec3 a);"
+ "vec4 mix(vec4 x, vec4 y, bvec4 a);"
+
+ "bool isnan(float x);"
+ "bvec2 isnan(vec2 x);"
+ "bvec3 isnan(vec3 x);"
+ "bvec4 isnan(vec4 x);"
+
+ "bool isinf(float x);"
+ "bvec2 isinf(vec2 x);"
+ "bvec3 isinf(vec3 x);"
+ "bvec4 isinf(vec4 x);"
+
+ "\n");
+ }
+
+ //
+ // double functions added to desktop 4.00, but not fma, frexp, ldexp, or pack/unpack
+ //
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+
+ "double sqrt(double);"
+ "dvec2 sqrt(dvec2);"
+ "dvec3 sqrt(dvec3);"
+ "dvec4 sqrt(dvec4);"
+
+ "double inversesqrt(double);"
+ "dvec2 inversesqrt(dvec2);"
+ "dvec3 inversesqrt(dvec3);"
+ "dvec4 inversesqrt(dvec4);"
+
+ "double abs(double);"
+ "dvec2 abs(dvec2);"
+ "dvec3 abs(dvec3);"
+ "dvec4 abs(dvec4);"
+
+ "double sign(double);"
+ "dvec2 sign(dvec2);"
+ "dvec3 sign(dvec3);"
+ "dvec4 sign(dvec4);"
+
+ "double floor(double);"
+ "dvec2 floor(dvec2);"
+ "dvec3 floor(dvec3);"
+ "dvec4 floor(dvec4);"
+
+ "double trunc(double);"
+ "dvec2 trunc(dvec2);"
+ "dvec3 trunc(dvec3);"
+ "dvec4 trunc(dvec4);"
+
+ "double round(double);"
+ "dvec2 round(dvec2);"
+ "dvec3 round(dvec3);"
+ "dvec4 round(dvec4);"
+
+ "double roundEven(double);"
+ "dvec2 roundEven(dvec2);"
+ "dvec3 roundEven(dvec3);"
+ "dvec4 roundEven(dvec4);"
+
+ "double ceil(double);"
+ "dvec2 ceil(dvec2);"
+ "dvec3 ceil(dvec3);"
+ "dvec4 ceil(dvec4);"
+
+ "double fract(double);"
+ "dvec2 fract(dvec2);"
+ "dvec3 fract(dvec3);"
+ "dvec4 fract(dvec4);"
+
+ "double mod(double, double);"
+ "dvec2 mod(dvec2 , double);"
+ "dvec3 mod(dvec3 , double);"
+ "dvec4 mod(dvec4 , double);"
+ "dvec2 mod(dvec2 , dvec2);"
+ "dvec3 mod(dvec3 , dvec3);"
+ "dvec4 mod(dvec4 , dvec4);"
+
+ "double modf(double, out double);"
+ "dvec2 modf(dvec2, out dvec2);"
+ "dvec3 modf(dvec3, out dvec3);"
+ "dvec4 modf(dvec4, out dvec4);"
+
+ "double min(double, double);"
+ "dvec2 min(dvec2, double);"
+ "dvec3 min(dvec3, double);"
+ "dvec4 min(dvec4, double);"
+ "dvec2 min(dvec2, dvec2);"
+ "dvec3 min(dvec3, dvec3);"
+ "dvec4 min(dvec4, dvec4);"
+
+ "double max(double, double);"
+ "dvec2 max(dvec2 , double);"
+ "dvec3 max(dvec3 , double);"
+ "dvec4 max(dvec4 , double);"
+ "dvec2 max(dvec2 , dvec2);"
+ "dvec3 max(dvec3 , dvec3);"
+ "dvec4 max(dvec4 , dvec4);"
+
+ "double clamp(double, double, double);"
+ "dvec2 clamp(dvec2 , double, double);"
+ "dvec3 clamp(dvec3 , double, double);"
+ "dvec4 clamp(dvec4 , double, double);"
+ "dvec2 clamp(dvec2 , dvec2 , dvec2);"
+ "dvec3 clamp(dvec3 , dvec3 , dvec3);"
+ "dvec4 clamp(dvec4 , dvec4 , dvec4);"
+
+ "double mix(double, double, double);"
+ "dvec2 mix(dvec2, dvec2, double);"
+ "dvec3 mix(dvec3, dvec3, double);"
+ "dvec4 mix(dvec4, dvec4, double);"
+ "dvec2 mix(dvec2, dvec2, dvec2);"
+ "dvec3 mix(dvec3, dvec3, dvec3);"
+ "dvec4 mix(dvec4, dvec4, dvec4);"
+ "double mix(double, double, bool);"
+ "dvec2 mix(dvec2, dvec2, bvec2);"
+ "dvec3 mix(dvec3, dvec3, bvec3);"
+ "dvec4 mix(dvec4, dvec4, bvec4);"
+
+ "double step(double, double);"
+ "dvec2 step(dvec2 , dvec2);"
+ "dvec3 step(dvec3 , dvec3);"
+ "dvec4 step(dvec4 , dvec4);"
+ "dvec2 step(double, dvec2);"
+ "dvec3 step(double, dvec3);"
+ "dvec4 step(double, dvec4);"
+
+ "double smoothstep(double, double, double);"
+ "dvec2 smoothstep(dvec2 , dvec2 , dvec2);"
+ "dvec3 smoothstep(dvec3 , dvec3 , dvec3);"
+ "dvec4 smoothstep(dvec4 , dvec4 , dvec4);"
+ "dvec2 smoothstep(double, double, dvec2);"
+ "dvec3 smoothstep(double, double, dvec3);"
+ "dvec4 smoothstep(double, double, dvec4);"
+
+ "bool isnan(double);"
+ "bvec2 isnan(dvec2);"
+ "bvec3 isnan(dvec3);"
+ "bvec4 isnan(dvec4);"
+
+ "bool isinf(double);"
+ "bvec2 isinf(dvec2);"
+ "bvec3 isinf(dvec3);"
+ "bvec4 isinf(dvec4);"
+
+ "double length(double);"
+ "double length(dvec2);"
+ "double length(dvec3);"
+ "double length(dvec4);"
+
+ "double distance(double, double);"
+ "double distance(dvec2 , dvec2);"
+ "double distance(dvec3 , dvec3);"
+ "double distance(dvec4 , dvec4);"
+
+ "double dot(double, double);"
+ "double dot(dvec2 , dvec2);"
+ "double dot(dvec3 , dvec3);"
+ "double dot(dvec4 , dvec4);"
+
+ "dvec3 cross(dvec3, dvec3);"
+
+ "double normalize(double);"
+ "dvec2 normalize(dvec2);"
+ "dvec3 normalize(dvec3);"
+ "dvec4 normalize(dvec4);"
+
+ "double faceforward(double, double, double);"
+ "dvec2 faceforward(dvec2, dvec2, dvec2);"
+ "dvec3 faceforward(dvec3, dvec3, dvec3);"
+ "dvec4 faceforward(dvec4, dvec4, dvec4);"
+
+ "double reflect(double, double);"
+ "dvec2 reflect(dvec2 , dvec2 );"
+ "dvec3 reflect(dvec3 , dvec3 );"
+ "dvec4 reflect(dvec4 , dvec4 );"
+
+ "double refract(double, double, double);"
+ "dvec2 refract(dvec2 , dvec2 , double);"
+ "dvec3 refract(dvec3 , dvec3 , double);"
+ "dvec4 refract(dvec4 , dvec4 , double);"
+
+ "dmat2 matrixCompMult(dmat2, dmat2);"
+ "dmat3 matrixCompMult(dmat3, dmat3);"
+ "dmat4 matrixCompMult(dmat4, dmat4);"
+ "dmat2x3 matrixCompMult(dmat2x3, dmat2x3);"
+ "dmat2x4 matrixCompMult(dmat2x4, dmat2x4);"
+ "dmat3x2 matrixCompMult(dmat3x2, dmat3x2);"
+ "dmat3x4 matrixCompMult(dmat3x4, dmat3x4);"
+ "dmat4x2 matrixCompMult(dmat4x2, dmat4x2);"
+ "dmat4x3 matrixCompMult(dmat4x3, dmat4x3);"
+
+ "dmat2 outerProduct(dvec2, dvec2);"
+ "dmat3 outerProduct(dvec3, dvec3);"
+ "dmat4 outerProduct(dvec4, dvec4);"
+ "dmat2x3 outerProduct(dvec3, dvec2);"
+ "dmat3x2 outerProduct(dvec2, dvec3);"
+ "dmat2x4 outerProduct(dvec4, dvec2);"
+ "dmat4x2 outerProduct(dvec2, dvec4);"
+ "dmat3x4 outerProduct(dvec4, dvec3);"
+ "dmat4x3 outerProduct(dvec3, dvec4);"
+
+ "dmat2 transpose(dmat2);"
+ "dmat3 transpose(dmat3);"
+ "dmat4 transpose(dmat4);"
+ "dmat2x3 transpose(dmat3x2);"
+ "dmat3x2 transpose(dmat2x3);"
+ "dmat2x4 transpose(dmat4x2);"
+ "dmat4x2 transpose(dmat2x4);"
+ "dmat3x4 transpose(dmat4x3);"
+ "dmat4x3 transpose(dmat3x4);"
+
+ "double determinant(dmat2);"
+ "double determinant(dmat3);"
+ "double determinant(dmat4);"
+
+ "dmat2 inverse(dmat2);"
+ "dmat3 inverse(dmat3);"
+ "dmat4 inverse(dmat4);"
+
+ "bvec2 lessThan(dvec2, dvec2);"
+ "bvec3 lessThan(dvec3, dvec3);"
+ "bvec4 lessThan(dvec4, dvec4);"
+
+ "bvec2 lessThanEqual(dvec2, dvec2);"
+ "bvec3 lessThanEqual(dvec3, dvec3);"
+ "bvec4 lessThanEqual(dvec4, dvec4);"
+
+ "bvec2 greaterThan(dvec2, dvec2);"
+ "bvec3 greaterThan(dvec3, dvec3);"
+ "bvec4 greaterThan(dvec4, dvec4);"
+
+ "bvec2 greaterThanEqual(dvec2, dvec2);"
+ "bvec3 greaterThanEqual(dvec3, dvec3);"
+ "bvec4 greaterThanEqual(dvec4, dvec4);"
+
+ "bvec2 equal(dvec2, dvec2);"
+ "bvec3 equal(dvec3, dvec3);"
+ "bvec4 equal(dvec4, dvec4);"
+
+ "bvec2 notEqual(dvec2, dvec2);"
+ "bvec3 notEqual(dvec3, dvec3);"
+ "bvec4 notEqual(dvec4, dvec4);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+
+ "int64_t abs(int64_t);"
+ "i64vec2 abs(i64vec2);"
+ "i64vec3 abs(i64vec3);"
+ "i64vec4 abs(i64vec4);"
+
+ "int64_t sign(int64_t);"
+ "i64vec2 sign(i64vec2);"
+ "i64vec3 sign(i64vec3);"
+ "i64vec4 sign(i64vec4);"
+
+ "int64_t min(int64_t, int64_t);"
+ "i64vec2 min(i64vec2, int64_t);"
+ "i64vec3 min(i64vec3, int64_t);"
+ "i64vec4 min(i64vec4, int64_t);"
+ "i64vec2 min(i64vec2, i64vec2);"
+ "i64vec3 min(i64vec3, i64vec3);"
+ "i64vec4 min(i64vec4, i64vec4);"
+ "uint64_t min(uint64_t, uint64_t);"
+ "u64vec2 min(u64vec2, uint64_t);"
+ "u64vec3 min(u64vec3, uint64_t);"
+ "u64vec4 min(u64vec4, uint64_t);"
+ "u64vec2 min(u64vec2, u64vec2);"
+ "u64vec3 min(u64vec3, u64vec3);"
+ "u64vec4 min(u64vec4, u64vec4);"
+
+ "int64_t max(int64_t, int64_t);"
+ "i64vec2 max(i64vec2, int64_t);"
+ "i64vec3 max(i64vec3, int64_t);"
+ "i64vec4 max(i64vec4, int64_t);"
+ "i64vec2 max(i64vec2, i64vec2);"
+ "i64vec3 max(i64vec3, i64vec3);"
+ "i64vec4 max(i64vec4, i64vec4);"
+ "uint64_t max(uint64_t, uint64_t);"
+ "u64vec2 max(u64vec2, uint64_t);"
+ "u64vec3 max(u64vec3, uint64_t);"
+ "u64vec4 max(u64vec4, uint64_t);"
+ "u64vec2 max(u64vec2, u64vec2);"
+ "u64vec3 max(u64vec3, u64vec3);"
+ "u64vec4 max(u64vec4, u64vec4);"
+
+ "int64_t clamp(int64_t, int64_t, int64_t);"
+ "i64vec2 clamp(i64vec2, int64_t, int64_t);"
+ "i64vec3 clamp(i64vec3, int64_t, int64_t);"
+ "i64vec4 clamp(i64vec4, int64_t, int64_t);"
+ "i64vec2 clamp(i64vec2, i64vec2, i64vec2);"
+ "i64vec3 clamp(i64vec3, i64vec3, i64vec3);"
+ "i64vec4 clamp(i64vec4, i64vec4, i64vec4);"
+ "uint64_t clamp(uint64_t, uint64_t, uint64_t);"
+ "u64vec2 clamp(u64vec2, uint64_t, uint64_t);"
+ "u64vec3 clamp(u64vec3, uint64_t, uint64_t);"
+ "u64vec4 clamp(u64vec4, uint64_t, uint64_t);"
+ "u64vec2 clamp(u64vec2, u64vec2, u64vec2);"
+ "u64vec3 clamp(u64vec3, u64vec3, u64vec3);"
+ "u64vec4 clamp(u64vec4, u64vec4, u64vec4);"
+
+ "int64_t mix(int64_t, int64_t, bool);"
+ "i64vec2 mix(i64vec2, i64vec2, bvec2);"
+ "i64vec3 mix(i64vec3, i64vec3, bvec3);"
+ "i64vec4 mix(i64vec4, i64vec4, bvec4);"
+ "uint64_t mix(uint64_t, uint64_t, bool);"
+ "u64vec2 mix(u64vec2, u64vec2, bvec2);"
+ "u64vec3 mix(u64vec3, u64vec3, bvec3);"
+ "u64vec4 mix(u64vec4, u64vec4, bvec4);"
+
+ "int64_t doubleBitsToInt64(double);"
+ "i64vec2 doubleBitsToInt64(dvec2);"
+ "i64vec3 doubleBitsToInt64(dvec3);"
+ "i64vec4 doubleBitsToInt64(dvec4);"
+
+ "uint64_t doubleBitsToUint64(double);"
+ "u64vec2 doubleBitsToUint64(dvec2);"
+ "u64vec3 doubleBitsToUint64(dvec3);"
+ "u64vec4 doubleBitsToUint64(dvec4);"
+
+ "double int64BitsToDouble(int64_t);"
+ "dvec2 int64BitsToDouble(i64vec2);"
+ "dvec3 int64BitsToDouble(i64vec3);"
+ "dvec4 int64BitsToDouble(i64vec4);"
+
+ "double uint64BitsToDouble(uint64_t);"
+ "dvec2 uint64BitsToDouble(u64vec2);"
+ "dvec3 uint64BitsToDouble(u64vec3);"
+ "dvec4 uint64BitsToDouble(u64vec4);"
+
+ "int64_t packInt2x32(ivec2);"
+ "uint64_t packUint2x32(uvec2);"
+ "ivec2 unpackInt2x32(int64_t);"
+ "uvec2 unpackUint2x32(uint64_t);"
+
+ "bvec2 lessThan(i64vec2, i64vec2);"
+ "bvec3 lessThan(i64vec3, i64vec3);"
+ "bvec4 lessThan(i64vec4, i64vec4);"
+ "bvec2 lessThan(u64vec2, u64vec2);"
+ "bvec3 lessThan(u64vec3, u64vec3);"
+ "bvec4 lessThan(u64vec4, u64vec4);"
+
+ "bvec2 lessThanEqual(i64vec2, i64vec2);"
+ "bvec3 lessThanEqual(i64vec3, i64vec3);"
+ "bvec4 lessThanEqual(i64vec4, i64vec4);"
+ "bvec2 lessThanEqual(u64vec2, u64vec2);"
+ "bvec3 lessThanEqual(u64vec3, u64vec3);"
+ "bvec4 lessThanEqual(u64vec4, u64vec4);"
+
+ "bvec2 greaterThan(i64vec2, i64vec2);"
+ "bvec3 greaterThan(i64vec3, i64vec3);"
+ "bvec4 greaterThan(i64vec4, i64vec4);"
+ "bvec2 greaterThan(u64vec2, u64vec2);"
+ "bvec3 greaterThan(u64vec3, u64vec3);"
+ "bvec4 greaterThan(u64vec4, u64vec4);"
+
+ "bvec2 greaterThanEqual(i64vec2, i64vec2);"
+ "bvec3 greaterThanEqual(i64vec3, i64vec3);"
+ "bvec4 greaterThanEqual(i64vec4, i64vec4);"
+ "bvec2 greaterThanEqual(u64vec2, u64vec2);"
+ "bvec3 greaterThanEqual(u64vec3, u64vec3);"
+ "bvec4 greaterThanEqual(u64vec4, u64vec4);"
+
+ "bvec2 equal(i64vec2, i64vec2);"
+ "bvec3 equal(i64vec3, i64vec3);"
+ "bvec4 equal(i64vec4, i64vec4);"
+ "bvec2 equal(u64vec2, u64vec2);"
+ "bvec3 equal(u64vec3, u64vec3);"
+ "bvec4 equal(u64vec4, u64vec4);"
+
+ "bvec2 notEqual(i64vec2, i64vec2);"
+ "bvec3 notEqual(i64vec3, i64vec3);"
+ "bvec4 notEqual(i64vec4, i64vec4);"
+ "bvec2 notEqual(u64vec2, u64vec2);"
+ "bvec3 notEqual(u64vec3, u64vec3);"
+ "bvec4 notEqual(u64vec4, u64vec4);"
+
+ "int findLSB(int64_t);"
+ "ivec2 findLSB(i64vec2);"
+ "ivec3 findLSB(i64vec3);"
+ "ivec4 findLSB(i64vec4);"
+
+ "int findLSB(uint64_t);"
+ "ivec2 findLSB(u64vec2);"
+ "ivec3 findLSB(u64vec3);"
+ "ivec4 findLSB(u64vec4);"
+
+ "int findMSB(int64_t);"
+ "ivec2 findMSB(i64vec2);"
+ "ivec3 findMSB(i64vec3);"
+ "ivec4 findMSB(i64vec4);"
+
+ "int findMSB(uint64_t);"
+ "ivec2 findMSB(u64vec2);"
+ "ivec3 findMSB(u64vec3);"
+ "ivec4 findMSB(u64vec4);"
+
+ "\n"
+ );
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_shader_trinary_minmax
+ if (profile != EEsProfile && version >= 430) {
+ commonBuiltins.append(
+ "float min3(float, float, float);"
+ "vec2 min3(vec2, vec2, vec2);"
+ "vec3 min3(vec3, vec3, vec3);"
+ "vec4 min3(vec4, vec4, vec4);"
+
+ "int min3(int, int, int);"
+ "ivec2 min3(ivec2, ivec2, ivec2);"
+ "ivec3 min3(ivec3, ivec3, ivec3);"
+ "ivec4 min3(ivec4, ivec4, ivec4);"
+
+ "uint min3(uint, uint, uint);"
+ "uvec2 min3(uvec2, uvec2, uvec2);"
+ "uvec3 min3(uvec3, uvec3, uvec3);"
+ "uvec4 min3(uvec4, uvec4, uvec4);"
+
+ "float max3(float, float, float);"
+ "vec2 max3(vec2, vec2, vec2);"
+ "vec3 max3(vec3, vec3, vec3);"
+ "vec4 max3(vec4, vec4, vec4);"
+
+ "int max3(int, int, int);"
+ "ivec2 max3(ivec2, ivec2, ivec2);"
+ "ivec3 max3(ivec3, ivec3, ivec3);"
+ "ivec4 max3(ivec4, ivec4, ivec4);"
+
+ "uint max3(uint, uint, uint);"
+ "uvec2 max3(uvec2, uvec2, uvec2);"
+ "uvec3 max3(uvec3, uvec3, uvec3);"
+ "uvec4 max3(uvec4, uvec4, uvec4);"
+
+ "float mid3(float, float, float);"
+ "vec2 mid3(vec2, vec2, vec2);"
+ "vec3 mid3(vec3, vec3, vec3);"
+ "vec4 mid3(vec4, vec4, vec4);"
+
+ "int mid3(int, int, int);"
+ "ivec2 mid3(ivec2, ivec2, ivec2);"
+ "ivec3 mid3(ivec3, ivec3, ivec3);"
+ "ivec4 mid3(ivec4, ivec4, ivec4);"
+
+ "uint mid3(uint, uint, uint);"
+ "uvec2 mid3(uvec2, uvec2, uvec2);"
+ "uvec3 mid3(uvec3, uvec3, uvec3);"
+ "uvec4 mid3(uvec4, uvec4, uvec4);"
+
+ "float16_t min3(float16_t, float16_t, float16_t);"
+ "f16vec2 min3(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 min3(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 min3(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t max3(float16_t, float16_t, float16_t);"
+ "f16vec2 max3(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 max3(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 max3(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t mid3(float16_t, float16_t, float16_t);"
+ "f16vec2 mid3(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 mid3(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 mid3(f16vec4, f16vec4, f16vec4);"
+
+ "int16_t min3(int16_t, int16_t, int16_t);"
+ "i16vec2 min3(i16vec2, i16vec2, i16vec2);"
+ "i16vec3 min3(i16vec3, i16vec3, i16vec3);"
+ "i16vec4 min3(i16vec4, i16vec4, i16vec4);"
+
+ "int16_t max3(int16_t, int16_t, int16_t);"
+ "i16vec2 max3(i16vec2, i16vec2, i16vec2);"
+ "i16vec3 max3(i16vec3, i16vec3, i16vec3);"
+ "i16vec4 max3(i16vec4, i16vec4, i16vec4);"
+
+ "int16_t mid3(int16_t, int16_t, int16_t);"
+ "i16vec2 mid3(i16vec2, i16vec2, i16vec2);"
+ "i16vec3 mid3(i16vec3, i16vec3, i16vec3);"
+ "i16vec4 mid3(i16vec4, i16vec4, i16vec4);"
+
+ "uint16_t min3(uint16_t, uint16_t, uint16_t);"
+ "u16vec2 min3(u16vec2, u16vec2, u16vec2);"
+ "u16vec3 min3(u16vec3, u16vec3, u16vec3);"
+ "u16vec4 min3(u16vec4, u16vec4, u16vec4);"
+
+ "uint16_t max3(uint16_t, uint16_t, uint16_t);"
+ "u16vec2 max3(u16vec2, u16vec2, u16vec2);"
+ "u16vec3 max3(u16vec3, u16vec3, u16vec3);"
+ "u16vec4 max3(u16vec4, u16vec4, u16vec4);"
+
+ "uint16_t mid3(uint16_t, uint16_t, uint16_t);"
+ "u16vec2 mid3(u16vec2, u16vec2, u16vec2);"
+ "u16vec3 mid3(u16vec3, u16vec3, u16vec3);"
+ "u16vec4 mid3(u16vec4, u16vec4, u16vec4);"
+
+ "\n"
+ );
+ }
+#endif
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 430)) {
+ commonBuiltins.append(
+ "uint atomicAdd(coherent volatile inout uint, uint);"
+ " int atomicAdd(coherent volatile inout int, int);"
+ "uint atomicAdd(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicAdd(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicMin(coherent volatile inout uint, uint);"
+ " int atomicMin(coherent volatile inout int, int);"
+ "uint atomicMin(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicMin(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicMax(coherent volatile inout uint, uint);"
+ " int atomicMax(coherent volatile inout int, int);"
+ "uint atomicMax(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicMax(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicAnd(coherent volatile inout uint, uint);"
+ " int atomicAnd(coherent volatile inout int, int);"
+ "uint atomicAnd(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicAnd(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicOr (coherent volatile inout uint, uint);"
+ " int atomicOr (coherent volatile inout int, int);"
+ "uint atomicOr (coherent volatile inout uint, uint, int, int, int);"
+ " int atomicOr (coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicXor(coherent volatile inout uint, uint);"
+ " int atomicXor(coherent volatile inout int, int);"
+ "uint atomicXor(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicXor(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicExchange(coherent volatile inout uint, uint);"
+ " int atomicExchange(coherent volatile inout int, int);"
+ "uint atomicExchange(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicExchange(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicCompSwap(coherent volatile inout uint, uint, uint);"
+ " int atomicCompSwap(coherent volatile inout int, int, int);"
+ "uint atomicCompSwap(coherent volatile inout uint, uint, uint, int, int, int, int, int);"
+ " int atomicCompSwap(coherent volatile inout int, int, int, int, int, int, int, int);"
+
+ "uint atomicLoad(coherent volatile in uint, int, int, int);"
+ " int atomicLoad(coherent volatile in int, int, int, int);"
+
+ "void atomicStore(coherent volatile out uint, uint, int, int, int);"
+ "void atomicStore(coherent volatile out int, int, int, int, int);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 440) {
+ commonBuiltins.append(
+ "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicMin(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicMin(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicMax(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicMax(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicAnd(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicAnd(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicOr (coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicOr (coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicXor(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicXor(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicAdd(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicAdd(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicExchange(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicExchange(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t);"
+ " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t);"
+ "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t, int, int, int, int, int);"
+ " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t, int, int, int, int, int);"
+
+ "uint64_t atomicLoad(coherent volatile in uint64_t, int, int, int);"
+ " int64_t atomicLoad(coherent volatile in int64_t, int, int, int);"
+
+ "void atomicStore(coherent volatile out uint64_t, uint64_t, int, int, int);"
+ "void atomicStore(coherent volatile out int64_t, int64_t, int, int, int);"
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 450)) {
+ commonBuiltins.append(
+ "int mix(int x, int y, bool a);"
+ "ivec2 mix(ivec2 x, ivec2 y, bvec2 a);"
+ "ivec3 mix(ivec3 x, ivec3 y, bvec3 a);"
+ "ivec4 mix(ivec4 x, ivec4 y, bvec4 a);"
+
+ "uint mix(uint x, uint y, bool a);"
+ "uvec2 mix(uvec2 x, uvec2 y, bvec2 a);"
+ "uvec3 mix(uvec3 x, uvec3 y, bvec3 a);"
+ "uvec4 mix(uvec4 x, uvec4 y, bvec4 a);"
+
+ "bool mix(bool x, bool y, bool a);"
+ "bvec2 mix(bvec2 x, bvec2 y, bvec2 a);"
+ "bvec3 mix(bvec3 x, bvec3 y, bvec3 a);"
+ "bvec4 mix(bvec4 x, bvec4 y, bvec4 a);"
+
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && version >= 330)) {
+ commonBuiltins.append(
+ "int floatBitsToInt(highp float value);"
+ "ivec2 floatBitsToInt(highp vec2 value);"
+ "ivec3 floatBitsToInt(highp vec3 value);"
+ "ivec4 floatBitsToInt(highp vec4 value);"
+
+ "uint floatBitsToUint(highp float value);"
+ "uvec2 floatBitsToUint(highp vec2 value);"
+ "uvec3 floatBitsToUint(highp vec3 value);"
+ "uvec4 floatBitsToUint(highp vec4 value);"
+
+ "float intBitsToFloat(highp int value);"
+ "vec2 intBitsToFloat(highp ivec2 value);"
+ "vec3 intBitsToFloat(highp ivec3 value);"
+ "vec4 intBitsToFloat(highp ivec4 value);"
+
+ "float uintBitsToFloat(highp uint value);"
+ "vec2 uintBitsToFloat(highp uvec2 value);"
+ "vec3 uintBitsToFloat(highp uvec3 value);"
+ "vec4 uintBitsToFloat(highp uvec4 value);"
+
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 400) ||
+ (profile == EEsProfile && version >= 310)) { // GL_OES_gpu_shader5
+
+ commonBuiltins.append(
+ "float fma(float, float, float );"
+ "vec2 fma(vec2, vec2, vec2 );"
+ "vec3 fma(vec3, vec3, vec3 );"
+ "vec4 fma(vec4, vec4, vec4 );"
+ "\n");
+
+ if (profile != EEsProfile) {
+ commonBuiltins.append(
+ "double fma(double, double, double);"
+ "dvec2 fma(dvec2, dvec2, dvec2 );"
+ "dvec3 fma(dvec3, dvec3, dvec3 );"
+ "dvec4 fma(dvec4, dvec4, dvec4 );"
+ "\n");
+ }
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ "float frexp(highp float, out highp int);"
+ "vec2 frexp(highp vec2, out highp ivec2);"
+ "vec3 frexp(highp vec3, out highp ivec3);"
+ "vec4 frexp(highp vec4, out highp ivec4);"
+
+ "float ldexp(highp float, highp int);"
+ "vec2 ldexp(highp vec2, highp ivec2);"
+ "vec3 ldexp(highp vec3, highp ivec3);"
+ "vec4 ldexp(highp vec4, highp ivec4);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ "double frexp(double, out int);"
+ "dvec2 frexp( dvec2, out ivec2);"
+ "dvec3 frexp( dvec3, out ivec3);"
+ "dvec4 frexp( dvec4, out ivec4);"
+
+ "double ldexp(double, int);"
+ "dvec2 ldexp( dvec2, ivec2);"
+ "dvec3 ldexp( dvec3, ivec3);"
+ "dvec4 ldexp( dvec4, ivec4);"
+
+ "double packDouble2x32(uvec2);"
+ "uvec2 unpackDouble2x32(double);"
+
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ "highp uint packUnorm2x16(vec2);"
+ "vec2 unpackUnorm2x16(highp uint);"
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && version >= 420)) {
+ commonBuiltins.append(
+ "highp uint packSnorm2x16(vec2);"
+ " vec2 unpackSnorm2x16(highp uint);"
+ "highp uint packHalf2x16(vec2);"
+ "\n");
+ }
+
+ if (profile == EEsProfile && version >= 300) {
+ commonBuiltins.append(
+ "mediump vec2 unpackHalf2x16(highp uint);"
+ "\n");
+ } else if (profile != EEsProfile && version >= 420) {
+ commonBuiltins.append(
+ " vec2 unpackHalf2x16(highp uint);"
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ "highp uint packSnorm4x8(vec4);"
+ "highp uint packUnorm4x8(vec4);"
+ "\n");
+ }
+
+ if (profile == EEsProfile && version >= 310) {
+ commonBuiltins.append(
+ "mediump vec4 unpackSnorm4x8(highp uint);"
+ "mediump vec4 unpackUnorm4x8(highp uint);"
+ "\n");
+ } else if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ "vec4 unpackSnorm4x8(highp uint);"
+ "vec4 unpackUnorm4x8(highp uint);"
+ "\n");
+ }
+
+ //
+ // Geometric Functions.
+ //
+ commonBuiltins.append(
+ "float length(float x);"
+ "float length(vec2 x);"
+ "float length(vec3 x);"
+ "float length(vec4 x);"
+
+ "float distance(float p0, float p1);"
+ "float distance(vec2 p0, vec2 p1);"
+ "float distance(vec3 p0, vec3 p1);"
+ "float distance(vec4 p0, vec4 p1);"
+
+ "float dot(float x, float y);"
+ "float dot(vec2 x, vec2 y);"
+ "float dot(vec3 x, vec3 y);"
+ "float dot(vec4 x, vec4 y);"
+
+ "vec3 cross(vec3 x, vec3 y);"
+ "float normalize(float x);"
+ "vec2 normalize(vec2 x);"
+ "vec3 normalize(vec3 x);"
+ "vec4 normalize(vec4 x);"
+
+ "float faceforward(float N, float I, float Nref);"
+ "vec2 faceforward(vec2 N, vec2 I, vec2 Nref);"
+ "vec3 faceforward(vec3 N, vec3 I, vec3 Nref);"
+ "vec4 faceforward(vec4 N, vec4 I, vec4 Nref);"
+
+ "float reflect(float I, float N);"
+ "vec2 reflect(vec2 I, vec2 N);"
+ "vec3 reflect(vec3 I, vec3 N);"
+ "vec4 reflect(vec4 I, vec4 N);"
+
+ "float refract(float I, float N, float eta);"
+ "vec2 refract(vec2 I, vec2 N, float eta);"
+ "vec3 refract(vec3 I, vec3 N, float eta);"
+ "vec4 refract(vec4 I, vec4 N, float eta);"
+
+ "\n");
+
+ //
+ // Matrix Functions.
+ //
+ commonBuiltins.append(
+ "mat2 matrixCompMult(mat2 x, mat2 y);"
+ "mat3 matrixCompMult(mat3 x, mat3 y);"
+ "mat4 matrixCompMult(mat4 x, mat4 y);"
+
+ "\n");
+
+ // 120 is correct for both ES and desktop
+ if (version >= 120) {
+ commonBuiltins.append(
+ "mat2 outerProduct(vec2 c, vec2 r);"
+ "mat3 outerProduct(vec3 c, vec3 r);"
+ "mat4 outerProduct(vec4 c, vec4 r);"
+ "mat2x3 outerProduct(vec3 c, vec2 r);"
+ "mat3x2 outerProduct(vec2 c, vec3 r);"
+ "mat2x4 outerProduct(vec4 c, vec2 r);"
+ "mat4x2 outerProduct(vec2 c, vec4 r);"
+ "mat3x4 outerProduct(vec4 c, vec3 r);"
+ "mat4x3 outerProduct(vec3 c, vec4 r);"
+
+ "mat2 transpose(mat2 m);"
+ "mat3 transpose(mat3 m);"
+ "mat4 transpose(mat4 m);"
+ "mat2x3 transpose(mat3x2 m);"
+ "mat3x2 transpose(mat2x3 m);"
+ "mat2x4 transpose(mat4x2 m);"
+ "mat4x2 transpose(mat2x4 m);"
+ "mat3x4 transpose(mat4x3 m);"
+ "mat4x3 transpose(mat3x4 m);"
+
+ "mat2x3 matrixCompMult(mat2x3, mat2x3);"
+ "mat2x4 matrixCompMult(mat2x4, mat2x4);"
+ "mat3x2 matrixCompMult(mat3x2, mat3x2);"
+ "mat3x4 matrixCompMult(mat3x4, mat3x4);"
+ "mat4x2 matrixCompMult(mat4x2, mat4x2);"
+ "mat4x3 matrixCompMult(mat4x3, mat4x3);"
+
+ "\n");
+
+ // 150 is correct for both ES and desktop
+ if (version >= 150) {
+ commonBuiltins.append(
+ "float determinant(mat2 m);"
+ "float determinant(mat3 m);"
+ "float determinant(mat4 m);"
+
+ "mat2 inverse(mat2 m);"
+ "mat3 inverse(mat3 m);"
+ "mat4 inverse(mat4 m);"
+
+ "\n");
+ }
+ }
+
+ //
+ // Vector relational functions.
+ //
+ commonBuiltins.append(
+ "bvec2 lessThan(vec2 x, vec2 y);"
+ "bvec3 lessThan(vec3 x, vec3 y);"
+ "bvec4 lessThan(vec4 x, vec4 y);"
+
+ "bvec2 lessThan(ivec2 x, ivec2 y);"
+ "bvec3 lessThan(ivec3 x, ivec3 y);"
+ "bvec4 lessThan(ivec4 x, ivec4 y);"
+
+ "bvec2 lessThanEqual(vec2 x, vec2 y);"
+ "bvec3 lessThanEqual(vec3 x, vec3 y);"
+ "bvec4 lessThanEqual(vec4 x, vec4 y);"
+
+ "bvec2 lessThanEqual(ivec2 x, ivec2 y);"
+ "bvec3 lessThanEqual(ivec3 x, ivec3 y);"
+ "bvec4 lessThanEqual(ivec4 x, ivec4 y);"
+
+ "bvec2 greaterThan(vec2 x, vec2 y);"
+ "bvec3 greaterThan(vec3 x, vec3 y);"
+ "bvec4 greaterThan(vec4 x, vec4 y);"
+
+ "bvec2 greaterThan(ivec2 x, ivec2 y);"
+ "bvec3 greaterThan(ivec3 x, ivec3 y);"
+ "bvec4 greaterThan(ivec4 x, ivec4 y);"
+
+ "bvec2 greaterThanEqual(vec2 x, vec2 y);"
+ "bvec3 greaterThanEqual(vec3 x, vec3 y);"
+ "bvec4 greaterThanEqual(vec4 x, vec4 y);"
+
+ "bvec2 greaterThanEqual(ivec2 x, ivec2 y);"
+ "bvec3 greaterThanEqual(ivec3 x, ivec3 y);"
+ "bvec4 greaterThanEqual(ivec4 x, ivec4 y);"
+
+ "bvec2 equal(vec2 x, vec2 y);"
+ "bvec3 equal(vec3 x, vec3 y);"
+ "bvec4 equal(vec4 x, vec4 y);"
+
+ "bvec2 equal(ivec2 x, ivec2 y);"
+ "bvec3 equal(ivec3 x, ivec3 y);"
+ "bvec4 equal(ivec4 x, ivec4 y);"
+
+ "bvec2 equal(bvec2 x, bvec2 y);"
+ "bvec3 equal(bvec3 x, bvec3 y);"
+ "bvec4 equal(bvec4 x, bvec4 y);"
+
+ "bvec2 notEqual(vec2 x, vec2 y);"
+ "bvec3 notEqual(vec3 x, vec3 y);"
+ "bvec4 notEqual(vec4 x, vec4 y);"
+
+ "bvec2 notEqual(ivec2 x, ivec2 y);"
+ "bvec3 notEqual(ivec3 x, ivec3 y);"
+ "bvec4 notEqual(ivec4 x, ivec4 y);"
+
+ "bvec2 notEqual(bvec2 x, bvec2 y);"
+ "bvec3 notEqual(bvec3 x, bvec3 y);"
+ "bvec4 notEqual(bvec4 x, bvec4 y);"
+
+ "bool any(bvec2 x);"
+ "bool any(bvec3 x);"
+ "bool any(bvec4 x);"
+
+ "bool all(bvec2 x);"
+ "bool all(bvec3 x);"
+ "bool all(bvec4 x);"
+
+ "bvec2 not(bvec2 x);"
+ "bvec3 not(bvec3 x);"
+ "bvec4 not(bvec4 x);"
+
+ "\n");
+
+ if (version >= 130) {
+ commonBuiltins.append(
+ "bvec2 lessThan(uvec2 x, uvec2 y);"
+ "bvec3 lessThan(uvec3 x, uvec3 y);"
+ "bvec4 lessThan(uvec4 x, uvec4 y);"
+
+ "bvec2 lessThanEqual(uvec2 x, uvec2 y);"
+ "bvec3 lessThanEqual(uvec3 x, uvec3 y);"
+ "bvec4 lessThanEqual(uvec4 x, uvec4 y);"
+
+ "bvec2 greaterThan(uvec2 x, uvec2 y);"
+ "bvec3 greaterThan(uvec3 x, uvec3 y);"
+ "bvec4 greaterThan(uvec4 x, uvec4 y);"
+
+ "bvec2 greaterThanEqual(uvec2 x, uvec2 y);"
+ "bvec3 greaterThanEqual(uvec3 x, uvec3 y);"
+ "bvec4 greaterThanEqual(uvec4 x, uvec4 y);"
+
+ "bvec2 equal(uvec2 x, uvec2 y);"
+ "bvec3 equal(uvec3 x, uvec3 y);"
+ "bvec4 equal(uvec4 x, uvec4 y);"
+
+ "bvec2 notEqual(uvec2 x, uvec2 y);"
+ "bvec3 notEqual(uvec3 x, uvec3 y);"
+ "bvec4 notEqual(uvec4 x, uvec4 y);"
+
+ "\n");
+ }
+
+ //
+ // Original-style texture functions existing in all stages.
+ // (Per-stage functions below.)
+ //
+ if ((profile == EEsProfile && version == 100) ||
+ profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ commonBuiltins.append(
+ "vec4 texture2D(sampler2D, vec2);"
+
+ "vec4 texture2DProj(sampler2D, vec3);"
+ "vec4 texture2DProj(sampler2D, vec4);"
+
+ "vec4 texture3D(sampler3D, vec3);" // OES_texture_3D, but caught by keyword check
+ "vec4 texture3DProj(sampler3D, vec4);" // OES_texture_3D, but caught by keyword check
+
+ "vec4 textureCube(samplerCube, vec3);"
+
+ "\n");
+ }
+ }
+
+ if ( profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ commonBuiltins.append(
+ "vec4 texture1D(sampler1D, float);"
+
+ "vec4 texture1DProj(sampler1D, vec2);"
+ "vec4 texture1DProj(sampler1D, vec4);"
+
+ "vec4 shadow1D(sampler1DShadow, vec3);"
+ "vec4 shadow2D(sampler2DShadow, vec3);"
+ "vec4 shadow1DProj(sampler1DShadow, vec4);"
+ "vec4 shadow2DProj(sampler2DShadow, vec4);"
+
+ "vec4 texture2DRect(sampler2DRect, vec2);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 texture2DRectProj(sampler2DRect, vec3);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 texture2DRectProj(sampler2DRect, vec4);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 shadow2DRect(sampler2DRectShadow, vec3);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 shadow2DRectProj(sampler2DRectShadow, vec4);" // GL_ARB_texture_rectangle, caught by keyword check
+
+ "\n");
+ }
+ }
+
+ if (profile == EEsProfile) {
+ if (spvVersion.spv == 0) {
+ if (version < 300) {
+ commonBuiltins.append(
+ "vec4 texture2D(samplerExternalOES, vec2 coord);" // GL_OES_EGL_image_external
+ "vec4 texture2DProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external
+ "vec4 texture2DProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external
+ "\n");
+ } else {
+ commonBuiltins.append(
+ "highp ivec2 textureSize(samplerExternalOES, int lod);" // GL_OES_EGL_image_external_essl3
+ "vec4 texture(samplerExternalOES, vec2);" // GL_OES_EGL_image_external_essl3
+ "vec4 texture(samplerExternalOES, vec2, float bias);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec3, float bias);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec4, float bias);" // GL_OES_EGL_image_external_essl3
+ "vec4 texelFetch(samplerExternalOES, ivec2, int lod);" // GL_OES_EGL_image_external_essl3
+ "\n");
+ }
+ commonBuiltins.append(
+ "highp ivec2 textureSize(__samplerExternal2DY2YEXT, int lod);" // GL_EXT_YUV_target
+ "vec4 texture(__samplerExternal2DY2YEXT, vec2);" // GL_EXT_YUV_target
+ "vec4 texture(__samplerExternal2DY2YEXT, vec2, float bias);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec3);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec3, float bias);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec4);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec4, float bias);" // GL_EXT_YUV_target
+ "vec4 texelFetch(__samplerExternal2DY2YEXT sampler, ivec2, int lod);" // GL_EXT_YUV_target
+ "\n");
+ commonBuiltins.append(
+ "vec4 texture2DGradEXT(sampler2D, vec2, vec2, vec2);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjGradEXT(sampler2D, vec3, vec2, vec2);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjGradEXT(sampler2D, vec4, vec2, vec2);" // GL_EXT_shader_texture_lod
+ "vec4 textureCubeGradEXT(samplerCube, vec3, vec3, vec3);" // GL_EXT_shader_texture_lod
+
+ "float shadow2DEXT(sampler2DShadow, vec3);" // GL_EXT_shadow_samplers
+ "float shadow2DProjEXT(sampler2DShadow, vec4);" // GL_EXT_shadow_samplers
+
+ "\n");
+ }
+ }
+
+ //
+ // Noise functions.
+ //
+ if (spvVersion.spv == 0 && profile != EEsProfile) {
+ commonBuiltins.append(
+ "float noise1(float x);"
+ "float noise1(vec2 x);"
+ "float noise1(vec3 x);"
+ "float noise1(vec4 x);"
+
+ "vec2 noise2(float x);"
+ "vec2 noise2(vec2 x);"
+ "vec2 noise2(vec3 x);"
+ "vec2 noise2(vec4 x);"
+
+ "vec3 noise3(float x);"
+ "vec3 noise3(vec2 x);"
+ "vec3 noise3(vec3 x);"
+ "vec3 noise3(vec4 x);"
+
+ "vec4 noise4(float x);"
+ "vec4 noise4(vec2 x);"
+ "vec4 noise4(vec3 x);"
+ "vec4 noise4(vec4 x);"
+
+ "\n");
+ }
+
+ if (spvVersion.vulkan == 0) {
+ //
+ // Atomic counter functions.
+ //
+ if ((profile != EEsProfile && version >= 300) ||
+ (profile == EEsProfile && version >= 310)) {
+ commonBuiltins.append(
+ "uint atomicCounterIncrement(atomic_uint);"
+ "uint atomicCounterDecrement(atomic_uint);"
+ "uint atomicCounter(atomic_uint);"
+
+ "\n");
+ }
+ if (profile != EEsProfile && version >= 460) {
+ commonBuiltins.append(
+ "uint atomicCounterAdd(atomic_uint, uint);"
+ "uint atomicCounterSubtract(atomic_uint, uint);"
+ "uint atomicCounterMin(atomic_uint, uint);"
+ "uint atomicCounterMax(atomic_uint, uint);"
+ "uint atomicCounterAnd(atomic_uint, uint);"
+ "uint atomicCounterOr(atomic_uint, uint);"
+ "uint atomicCounterXor(atomic_uint, uint);"
+ "uint atomicCounterExchange(atomic_uint, uint);"
+ "uint atomicCounterCompSwap(atomic_uint, uint, uint);"
+
+ "\n");
+ }
+ }
+
+ // Bitfield
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ " int bitfieldExtract( int, int, int);"
+ "ivec2 bitfieldExtract(ivec2, int, int);"
+ "ivec3 bitfieldExtract(ivec3, int, int);"
+ "ivec4 bitfieldExtract(ivec4, int, int);"
+
+ " uint bitfieldExtract( uint, int, int);"
+ "uvec2 bitfieldExtract(uvec2, int, int);"
+ "uvec3 bitfieldExtract(uvec3, int, int);"
+ "uvec4 bitfieldExtract(uvec4, int, int);"
+
+ " int bitfieldInsert( int base, int, int, int);"
+ "ivec2 bitfieldInsert(ivec2 base, ivec2, int, int);"
+ "ivec3 bitfieldInsert(ivec3 base, ivec3, int, int);"
+ "ivec4 bitfieldInsert(ivec4 base, ivec4, int, int);"
+
+ " uint bitfieldInsert( uint base, uint, int, int);"
+ "uvec2 bitfieldInsert(uvec2 base, uvec2, int, int);"
+ "uvec3 bitfieldInsert(uvec3 base, uvec3, int, int);"
+ "uvec4 bitfieldInsert(uvec4 base, uvec4, int, int);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ " int findLSB( int);"
+ "ivec2 findLSB(ivec2);"
+ "ivec3 findLSB(ivec3);"
+ "ivec4 findLSB(ivec4);"
+
+ " int findLSB( uint);"
+ "ivec2 findLSB(uvec2);"
+ "ivec3 findLSB(uvec3);"
+ "ivec4 findLSB(uvec4);"
+
+ "\n");
+ } else if (profile == EEsProfile && version >= 310) {
+ commonBuiltins.append(
+ "lowp int findLSB( int);"
+ "lowp ivec2 findLSB(ivec2);"
+ "lowp ivec3 findLSB(ivec3);"
+ "lowp ivec4 findLSB(ivec4);"
+
+ "lowp int findLSB( uint);"
+ "lowp ivec2 findLSB(uvec2);"
+ "lowp ivec3 findLSB(uvec3);"
+ "lowp ivec4 findLSB(uvec4);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ " int bitCount( int);"
+ "ivec2 bitCount(ivec2);"
+ "ivec3 bitCount(ivec3);"
+ "ivec4 bitCount(ivec4);"
+
+ " int bitCount( uint);"
+ "ivec2 bitCount(uvec2);"
+ "ivec3 bitCount(uvec3);"
+ "ivec4 bitCount(uvec4);"
+
+ " int findMSB(highp int);"
+ "ivec2 findMSB(highp ivec2);"
+ "ivec3 findMSB(highp ivec3);"
+ "ivec4 findMSB(highp ivec4);"
+
+ " int findMSB(highp uint);"
+ "ivec2 findMSB(highp uvec2);"
+ "ivec3 findMSB(highp uvec3);"
+ "ivec4 findMSB(highp uvec4);"
+
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ " uint uaddCarry(highp uint, highp uint, out lowp uint carry);"
+ "uvec2 uaddCarry(highp uvec2, highp uvec2, out lowp uvec2 carry);"
+ "uvec3 uaddCarry(highp uvec3, highp uvec3, out lowp uvec3 carry);"
+ "uvec4 uaddCarry(highp uvec4, highp uvec4, out lowp uvec4 carry);"
+
+ " uint usubBorrow(highp uint, highp uint, out lowp uint borrow);"
+ "uvec2 usubBorrow(highp uvec2, highp uvec2, out lowp uvec2 borrow);"
+ "uvec3 usubBorrow(highp uvec3, highp uvec3, out lowp uvec3 borrow);"
+ "uvec4 usubBorrow(highp uvec4, highp uvec4, out lowp uvec4 borrow);"
+
+ "void umulExtended(highp uint, highp uint, out highp uint, out highp uint lsb);"
+ "void umulExtended(highp uvec2, highp uvec2, out highp uvec2, out highp uvec2 lsb);"
+ "void umulExtended(highp uvec3, highp uvec3, out highp uvec3, out highp uvec3 lsb);"
+ "void umulExtended(highp uvec4, highp uvec4, out highp uvec4, out highp uvec4 lsb);"
+
+ "void imulExtended(highp int, highp int, out highp int, out highp int lsb);"
+ "void imulExtended(highp ivec2, highp ivec2, out highp ivec2, out highp ivec2 lsb);"
+ "void imulExtended(highp ivec3, highp ivec3, out highp ivec3, out highp ivec3 lsb);"
+ "void imulExtended(highp ivec4, highp ivec4, out highp ivec4, out highp ivec4 lsb);"
+
+ " int bitfieldReverse(highp int);"
+ "ivec2 bitfieldReverse(highp ivec2);"
+ "ivec3 bitfieldReverse(highp ivec3);"
+ "ivec4 bitfieldReverse(highp ivec4);"
+
+ " uint bitfieldReverse(highp uint);"
+ "uvec2 bitfieldReverse(highp uvec2);"
+ "uvec3 bitfieldReverse(highp uvec3);"
+ "uvec4 bitfieldReverse(highp uvec4);"
+
+ "\n");
+ }
+
+ if (profile == EEsProfile && version >= 310) {
+ commonBuiltins.append(
+ "lowp int bitCount( int);"
+ "lowp ivec2 bitCount(ivec2);"
+ "lowp ivec3 bitCount(ivec3);"
+ "lowp ivec4 bitCount(ivec4);"
+
+ "lowp int bitCount( uint);"
+ "lowp ivec2 bitCount(uvec2);"
+ "lowp ivec3 bitCount(uvec3);"
+ "lowp ivec4 bitCount(uvec4);"
+
+ "lowp int findMSB(highp int);"
+ "lowp ivec2 findMSB(highp ivec2);"
+ "lowp ivec3 findMSB(highp ivec3);"
+ "lowp ivec4 findMSB(highp ivec4);"
+
+ "lowp int findMSB(highp uint);"
+ "lowp ivec2 findMSB(highp uvec2);"
+ "lowp ivec3 findMSB(highp uvec3);"
+ "lowp ivec4 findMSB(highp uvec4);"
+
+ "\n");
+ }
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "uint64_t ballotARB(bool);"
+
+ "float readInvocationARB(float, uint);"
+ "vec2 readInvocationARB(vec2, uint);"
+ "vec3 readInvocationARB(vec3, uint);"
+ "vec4 readInvocationARB(vec4, uint);"
+
+ "int readInvocationARB(int, uint);"
+ "ivec2 readInvocationARB(ivec2, uint);"
+ "ivec3 readInvocationARB(ivec3, uint);"
+ "ivec4 readInvocationARB(ivec4, uint);"
+
+ "uint readInvocationARB(uint, uint);"
+ "uvec2 readInvocationARB(uvec2, uint);"
+ "uvec3 readInvocationARB(uvec3, uint);"
+ "uvec4 readInvocationARB(uvec4, uint);"
+
+ "float readFirstInvocationARB(float);"
+ "vec2 readFirstInvocationARB(vec2);"
+ "vec3 readFirstInvocationARB(vec3);"
+ "vec4 readFirstInvocationARB(vec4);"
+
+ "int readFirstInvocationARB(int);"
+ "ivec2 readFirstInvocationARB(ivec2);"
+ "ivec3 readFirstInvocationARB(ivec3);"
+ "ivec4 readFirstInvocationARB(ivec4);"
+
+ "uint readFirstInvocationARB(uint);"
+ "uvec2 readFirstInvocationARB(uvec2);"
+ "uvec3 readFirstInvocationARB(uvec3);"
+ "uvec4 readFirstInvocationARB(uvec4);"
+
+ "\n");
+ }
+
+ // GL_ARB_shader_group_vote
+ if (profile != EEsProfile && version >= 430) {
+ commonBuiltins.append(
+ "bool anyInvocationARB(bool);"
+ "bool allInvocationsARB(bool);"
+ "bool allInvocationsEqualARB(bool);"
+
+ "\n");
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ commonBuiltins.append(
+ "void subgroupBarrier();"
+ "void subgroupMemoryBarrier();"
+ "void subgroupMemoryBarrierBuffer();"
+ "void subgroupMemoryBarrierImage();"
+ "bool subgroupElect();"
+
+ "bool subgroupAll(bool);\n"
+ "bool subgroupAny(bool);\n"
+
+ "bool subgroupAllEqual(float);\n"
+ "bool subgroupAllEqual(vec2);\n"
+ "bool subgroupAllEqual(vec3);\n"
+ "bool subgroupAllEqual(vec4);\n"
+ "bool subgroupAllEqual(int);\n"
+ "bool subgroupAllEqual(ivec2);\n"
+ "bool subgroupAllEqual(ivec3);\n"
+ "bool subgroupAllEqual(ivec4);\n"
+ "bool subgroupAllEqual(uint);\n"
+ "bool subgroupAllEqual(uvec2);\n"
+ "bool subgroupAllEqual(uvec3);\n"
+ "bool subgroupAllEqual(uvec4);\n"
+ "bool subgroupAllEqual(bool);\n"
+ "bool subgroupAllEqual(bvec2);\n"
+ "bool subgroupAllEqual(bvec3);\n"
+ "bool subgroupAllEqual(bvec4);\n"
+
+ "float subgroupBroadcast(float, uint);\n"
+ "vec2 subgroupBroadcast(vec2, uint);\n"
+ "vec3 subgroupBroadcast(vec3, uint);\n"
+ "vec4 subgroupBroadcast(vec4, uint);\n"
+ "int subgroupBroadcast(int, uint);\n"
+ "ivec2 subgroupBroadcast(ivec2, uint);\n"
+ "ivec3 subgroupBroadcast(ivec3, uint);\n"
+ "ivec4 subgroupBroadcast(ivec4, uint);\n"
+ "uint subgroupBroadcast(uint, uint);\n"
+ "uvec2 subgroupBroadcast(uvec2, uint);\n"
+ "uvec3 subgroupBroadcast(uvec3, uint);\n"
+ "uvec4 subgroupBroadcast(uvec4, uint);\n"
+ "bool subgroupBroadcast(bool, uint);\n"
+ "bvec2 subgroupBroadcast(bvec2, uint);\n"
+ "bvec3 subgroupBroadcast(bvec3, uint);\n"
+ "bvec4 subgroupBroadcast(bvec4, uint);\n"
+
+ "float subgroupBroadcastFirst(float);\n"
+ "vec2 subgroupBroadcastFirst(vec2);\n"
+ "vec3 subgroupBroadcastFirst(vec3);\n"
+ "vec4 subgroupBroadcastFirst(vec4);\n"
+ "int subgroupBroadcastFirst(int);\n"
+ "ivec2 subgroupBroadcastFirst(ivec2);\n"
+ "ivec3 subgroupBroadcastFirst(ivec3);\n"
+ "ivec4 subgroupBroadcastFirst(ivec4);\n"
+ "uint subgroupBroadcastFirst(uint);\n"
+ "uvec2 subgroupBroadcastFirst(uvec2);\n"
+ "uvec3 subgroupBroadcastFirst(uvec3);\n"
+ "uvec4 subgroupBroadcastFirst(uvec4);\n"
+ "bool subgroupBroadcastFirst(bool);\n"
+ "bvec2 subgroupBroadcastFirst(bvec2);\n"
+ "bvec3 subgroupBroadcastFirst(bvec3);\n"
+ "bvec4 subgroupBroadcastFirst(bvec4);\n"
+
+ "uvec4 subgroupBallot(bool);\n"
+ "bool subgroupInverseBallot(uvec4);\n"
+ "bool subgroupBallotBitExtract(uvec4, uint);\n"
+ "uint subgroupBallotBitCount(uvec4);\n"
+ "uint subgroupBallotInclusiveBitCount(uvec4);\n"
+ "uint subgroupBallotExclusiveBitCount(uvec4);\n"
+ "uint subgroupBallotFindLSB(uvec4);\n"
+ "uint subgroupBallotFindMSB(uvec4);\n"
+
+ "float subgroupShuffle(float, uint);\n"
+ "vec2 subgroupShuffle(vec2, uint);\n"
+ "vec3 subgroupShuffle(vec3, uint);\n"
+ "vec4 subgroupShuffle(vec4, uint);\n"
+ "int subgroupShuffle(int, uint);\n"
+ "ivec2 subgroupShuffle(ivec2, uint);\n"
+ "ivec3 subgroupShuffle(ivec3, uint);\n"
+ "ivec4 subgroupShuffle(ivec4, uint);\n"
+ "uint subgroupShuffle(uint, uint);\n"
+ "uvec2 subgroupShuffle(uvec2, uint);\n"
+ "uvec3 subgroupShuffle(uvec3, uint);\n"
+ "uvec4 subgroupShuffle(uvec4, uint);\n"
+ "bool subgroupShuffle(bool, uint);\n"
+ "bvec2 subgroupShuffle(bvec2, uint);\n"
+ "bvec3 subgroupShuffle(bvec3, uint);\n"
+ "bvec4 subgroupShuffle(bvec4, uint);\n"
+
+ "float subgroupShuffleXor(float, uint);\n"
+ "vec2 subgroupShuffleXor(vec2, uint);\n"
+ "vec3 subgroupShuffleXor(vec3, uint);\n"
+ "vec4 subgroupShuffleXor(vec4, uint);\n"
+ "int subgroupShuffleXor(int, uint);\n"
+ "ivec2 subgroupShuffleXor(ivec2, uint);\n"
+ "ivec3 subgroupShuffleXor(ivec3, uint);\n"
+ "ivec4 subgroupShuffleXor(ivec4, uint);\n"
+ "uint subgroupShuffleXor(uint, uint);\n"
+ "uvec2 subgroupShuffleXor(uvec2, uint);\n"
+ "uvec3 subgroupShuffleXor(uvec3, uint);\n"
+ "uvec4 subgroupShuffleXor(uvec4, uint);\n"
+ "bool subgroupShuffleXor(bool, uint);\n"
+ "bvec2 subgroupShuffleXor(bvec2, uint);\n"
+ "bvec3 subgroupShuffleXor(bvec3, uint);\n"
+ "bvec4 subgroupShuffleXor(bvec4, uint);\n"
+
+ "float subgroupShuffleUp(float, uint delta);\n"
+ "vec2 subgroupShuffleUp(vec2, uint delta);\n"
+ "vec3 subgroupShuffleUp(vec3, uint delta);\n"
+ "vec4 subgroupShuffleUp(vec4, uint delta);\n"
+ "int subgroupShuffleUp(int, uint delta);\n"
+ "ivec2 subgroupShuffleUp(ivec2, uint delta);\n"
+ "ivec3 subgroupShuffleUp(ivec3, uint delta);\n"
+ "ivec4 subgroupShuffleUp(ivec4, uint delta);\n"
+ "uint subgroupShuffleUp(uint, uint delta);\n"
+ "uvec2 subgroupShuffleUp(uvec2, uint delta);\n"
+ "uvec3 subgroupShuffleUp(uvec3, uint delta);\n"
+ "uvec4 subgroupShuffleUp(uvec4, uint delta);\n"
+ "bool subgroupShuffleUp(bool, uint delta);\n"
+ "bvec2 subgroupShuffleUp(bvec2, uint delta);\n"
+ "bvec3 subgroupShuffleUp(bvec3, uint delta);\n"
+ "bvec4 subgroupShuffleUp(bvec4, uint delta);\n"
+
+ "float subgroupShuffleDown(float, uint delta);\n"
+ "vec2 subgroupShuffleDown(vec2, uint delta);\n"
+ "vec3 subgroupShuffleDown(vec3, uint delta);\n"
+ "vec4 subgroupShuffleDown(vec4, uint delta);\n"
+ "int subgroupShuffleDown(int, uint delta);\n"
+ "ivec2 subgroupShuffleDown(ivec2, uint delta);\n"
+ "ivec3 subgroupShuffleDown(ivec3, uint delta);\n"
+ "ivec4 subgroupShuffleDown(ivec4, uint delta);\n"
+ "uint subgroupShuffleDown(uint, uint delta);\n"
+ "uvec2 subgroupShuffleDown(uvec2, uint delta);\n"
+ "uvec3 subgroupShuffleDown(uvec3, uint delta);\n"
+ "uvec4 subgroupShuffleDown(uvec4, uint delta);\n"
+ "bool subgroupShuffleDown(bool, uint delta);\n"
+ "bvec2 subgroupShuffleDown(bvec2, uint delta);\n"
+ "bvec3 subgroupShuffleDown(bvec3, uint delta);\n"
+ "bvec4 subgroupShuffleDown(bvec4, uint delta);\n"
+
+ "float subgroupAdd(float);\n"
+ "vec2 subgroupAdd(vec2);\n"
+ "vec3 subgroupAdd(vec3);\n"
+ "vec4 subgroupAdd(vec4);\n"
+ "int subgroupAdd(int);\n"
+ "ivec2 subgroupAdd(ivec2);\n"
+ "ivec3 subgroupAdd(ivec3);\n"
+ "ivec4 subgroupAdd(ivec4);\n"
+ "uint subgroupAdd(uint);\n"
+ "uvec2 subgroupAdd(uvec2);\n"
+ "uvec3 subgroupAdd(uvec3);\n"
+ "uvec4 subgroupAdd(uvec4);\n"
+
+ "float subgroupMul(float);\n"
+ "vec2 subgroupMul(vec2);\n"
+ "vec3 subgroupMul(vec3);\n"
+ "vec4 subgroupMul(vec4);\n"
+ "int subgroupMul(int);\n"
+ "ivec2 subgroupMul(ivec2);\n"
+ "ivec3 subgroupMul(ivec3);\n"
+ "ivec4 subgroupMul(ivec4);\n"
+ "uint subgroupMul(uint);\n"
+ "uvec2 subgroupMul(uvec2);\n"
+ "uvec3 subgroupMul(uvec3);\n"
+ "uvec4 subgroupMul(uvec4);\n"
+
+ "float subgroupMin(float);\n"
+ "vec2 subgroupMin(vec2);\n"
+ "vec3 subgroupMin(vec3);\n"
+ "vec4 subgroupMin(vec4);\n"
+ "int subgroupMin(int);\n"
+ "ivec2 subgroupMin(ivec2);\n"
+ "ivec3 subgroupMin(ivec3);\n"
+ "ivec4 subgroupMin(ivec4);\n"
+ "uint subgroupMin(uint);\n"
+ "uvec2 subgroupMin(uvec2);\n"
+ "uvec3 subgroupMin(uvec3);\n"
+ "uvec4 subgroupMin(uvec4);\n"
+
+ "float subgroupMax(float);\n"
+ "vec2 subgroupMax(vec2);\n"
+ "vec3 subgroupMax(vec3);\n"
+ "vec4 subgroupMax(vec4);\n"
+ "int subgroupMax(int);\n"
+ "ivec2 subgroupMax(ivec2);\n"
+ "ivec3 subgroupMax(ivec3);\n"
+ "ivec4 subgroupMax(ivec4);\n"
+ "uint subgroupMax(uint);\n"
+ "uvec2 subgroupMax(uvec2);\n"
+ "uvec3 subgroupMax(uvec3);\n"
+ "uvec4 subgroupMax(uvec4);\n"
+
+ "int subgroupAnd(int);\n"
+ "ivec2 subgroupAnd(ivec2);\n"
+ "ivec3 subgroupAnd(ivec3);\n"
+ "ivec4 subgroupAnd(ivec4);\n"
+ "uint subgroupAnd(uint);\n"
+ "uvec2 subgroupAnd(uvec2);\n"
+ "uvec3 subgroupAnd(uvec3);\n"
+ "uvec4 subgroupAnd(uvec4);\n"
+ "bool subgroupAnd(bool);\n"
+ "bvec2 subgroupAnd(bvec2);\n"
+ "bvec3 subgroupAnd(bvec3);\n"
+ "bvec4 subgroupAnd(bvec4);\n"
+
+ "int subgroupOr(int);\n"
+ "ivec2 subgroupOr(ivec2);\n"
+ "ivec3 subgroupOr(ivec3);\n"
+ "ivec4 subgroupOr(ivec4);\n"
+ "uint subgroupOr(uint);\n"
+ "uvec2 subgroupOr(uvec2);\n"
+ "uvec3 subgroupOr(uvec3);\n"
+ "uvec4 subgroupOr(uvec4);\n"
+ "bool subgroupOr(bool);\n"
+ "bvec2 subgroupOr(bvec2);\n"
+ "bvec3 subgroupOr(bvec3);\n"
+ "bvec4 subgroupOr(bvec4);\n"
+
+ "int subgroupXor(int);\n"
+ "ivec2 subgroupXor(ivec2);\n"
+ "ivec3 subgroupXor(ivec3);\n"
+ "ivec4 subgroupXor(ivec4);\n"
+ "uint subgroupXor(uint);\n"
+ "uvec2 subgroupXor(uvec2);\n"
+ "uvec3 subgroupXor(uvec3);\n"
+ "uvec4 subgroupXor(uvec4);\n"
+ "bool subgroupXor(bool);\n"
+ "bvec2 subgroupXor(bvec2);\n"
+ "bvec3 subgroupXor(bvec3);\n"
+ "bvec4 subgroupXor(bvec4);\n"
+
+ "float subgroupInclusiveAdd(float);\n"
+ "vec2 subgroupInclusiveAdd(vec2);\n"
+ "vec3 subgroupInclusiveAdd(vec3);\n"
+ "vec4 subgroupInclusiveAdd(vec4);\n"
+ "int subgroupInclusiveAdd(int);\n"
+ "ivec2 subgroupInclusiveAdd(ivec2);\n"
+ "ivec3 subgroupInclusiveAdd(ivec3);\n"
+ "ivec4 subgroupInclusiveAdd(ivec4);\n"
+ "uint subgroupInclusiveAdd(uint);\n"
+ "uvec2 subgroupInclusiveAdd(uvec2);\n"
+ "uvec3 subgroupInclusiveAdd(uvec3);\n"
+ "uvec4 subgroupInclusiveAdd(uvec4);\n"
+
+ "float subgroupInclusiveMul(float);\n"
+ "vec2 subgroupInclusiveMul(vec2);\n"
+ "vec3 subgroupInclusiveMul(vec3);\n"
+ "vec4 subgroupInclusiveMul(vec4);\n"
+ "int subgroupInclusiveMul(int);\n"
+ "ivec2 subgroupInclusiveMul(ivec2);\n"
+ "ivec3 subgroupInclusiveMul(ivec3);\n"
+ "ivec4 subgroupInclusiveMul(ivec4);\n"
+ "uint subgroupInclusiveMul(uint);\n"
+ "uvec2 subgroupInclusiveMul(uvec2);\n"
+ "uvec3 subgroupInclusiveMul(uvec3);\n"
+ "uvec4 subgroupInclusiveMul(uvec4);\n"
+
+ "float subgroupInclusiveMin(float);\n"
+ "vec2 subgroupInclusiveMin(vec2);\n"
+ "vec3 subgroupInclusiveMin(vec3);\n"
+ "vec4 subgroupInclusiveMin(vec4);\n"
+ "int subgroupInclusiveMin(int);\n"
+ "ivec2 subgroupInclusiveMin(ivec2);\n"
+ "ivec3 subgroupInclusiveMin(ivec3);\n"
+ "ivec4 subgroupInclusiveMin(ivec4);\n"
+ "uint subgroupInclusiveMin(uint);\n"
+ "uvec2 subgroupInclusiveMin(uvec2);\n"
+ "uvec3 subgroupInclusiveMin(uvec3);\n"
+ "uvec4 subgroupInclusiveMin(uvec4);\n"
+
+ "float subgroupInclusiveMax(float);\n"
+ "vec2 subgroupInclusiveMax(vec2);\n"
+ "vec3 subgroupInclusiveMax(vec3);\n"
+ "vec4 subgroupInclusiveMax(vec4);\n"
+ "int subgroupInclusiveMax(int);\n"
+ "ivec2 subgroupInclusiveMax(ivec2);\n"
+ "ivec3 subgroupInclusiveMax(ivec3);\n"
+ "ivec4 subgroupInclusiveMax(ivec4);\n"
+ "uint subgroupInclusiveMax(uint);\n"
+ "uvec2 subgroupInclusiveMax(uvec2);\n"
+ "uvec3 subgroupInclusiveMax(uvec3);\n"
+ "uvec4 subgroupInclusiveMax(uvec4);\n"
+
+ "int subgroupInclusiveAnd(int);\n"
+ "ivec2 subgroupInclusiveAnd(ivec2);\n"
+ "ivec3 subgroupInclusiveAnd(ivec3);\n"
+ "ivec4 subgroupInclusiveAnd(ivec4);\n"
+ "uint subgroupInclusiveAnd(uint);\n"
+ "uvec2 subgroupInclusiveAnd(uvec2);\n"
+ "uvec3 subgroupInclusiveAnd(uvec3);\n"
+ "uvec4 subgroupInclusiveAnd(uvec4);\n"
+ "bool subgroupInclusiveAnd(bool);\n"
+ "bvec2 subgroupInclusiveAnd(bvec2);\n"
+ "bvec3 subgroupInclusiveAnd(bvec3);\n"
+ "bvec4 subgroupInclusiveAnd(bvec4);\n"
+
+ "int subgroupInclusiveOr(int);\n"
+ "ivec2 subgroupInclusiveOr(ivec2);\n"
+ "ivec3 subgroupInclusiveOr(ivec3);\n"
+ "ivec4 subgroupInclusiveOr(ivec4);\n"
+ "uint subgroupInclusiveOr(uint);\n"
+ "uvec2 subgroupInclusiveOr(uvec2);\n"
+ "uvec3 subgroupInclusiveOr(uvec3);\n"
+ "uvec4 subgroupInclusiveOr(uvec4);\n"
+ "bool subgroupInclusiveOr(bool);\n"
+ "bvec2 subgroupInclusiveOr(bvec2);\n"
+ "bvec3 subgroupInclusiveOr(bvec3);\n"
+ "bvec4 subgroupInclusiveOr(bvec4);\n"
+
+ "int subgroupInclusiveXor(int);\n"
+ "ivec2 subgroupInclusiveXor(ivec2);\n"
+ "ivec3 subgroupInclusiveXor(ivec3);\n"
+ "ivec4 subgroupInclusiveXor(ivec4);\n"
+ "uint subgroupInclusiveXor(uint);\n"
+ "uvec2 subgroupInclusiveXor(uvec2);\n"
+ "uvec3 subgroupInclusiveXor(uvec3);\n"
+ "uvec4 subgroupInclusiveXor(uvec4);\n"
+ "bool subgroupInclusiveXor(bool);\n"
+ "bvec2 subgroupInclusiveXor(bvec2);\n"
+ "bvec3 subgroupInclusiveXor(bvec3);\n"
+ "bvec4 subgroupInclusiveXor(bvec4);\n"
+
+ "float subgroupExclusiveAdd(float);\n"
+ "vec2 subgroupExclusiveAdd(vec2);\n"
+ "vec3 subgroupExclusiveAdd(vec3);\n"
+ "vec4 subgroupExclusiveAdd(vec4);\n"
+ "int subgroupExclusiveAdd(int);\n"
+ "ivec2 subgroupExclusiveAdd(ivec2);\n"
+ "ivec3 subgroupExclusiveAdd(ivec3);\n"
+ "ivec4 subgroupExclusiveAdd(ivec4);\n"
+ "uint subgroupExclusiveAdd(uint);\n"
+ "uvec2 subgroupExclusiveAdd(uvec2);\n"
+ "uvec3 subgroupExclusiveAdd(uvec3);\n"
+ "uvec4 subgroupExclusiveAdd(uvec4);\n"
+
+ "float subgroupExclusiveMul(float);\n"
+ "vec2 subgroupExclusiveMul(vec2);\n"
+ "vec3 subgroupExclusiveMul(vec3);\n"
+ "vec4 subgroupExclusiveMul(vec4);\n"
+ "int subgroupExclusiveMul(int);\n"
+ "ivec2 subgroupExclusiveMul(ivec2);\n"
+ "ivec3 subgroupExclusiveMul(ivec3);\n"
+ "ivec4 subgroupExclusiveMul(ivec4);\n"
+ "uint subgroupExclusiveMul(uint);\n"
+ "uvec2 subgroupExclusiveMul(uvec2);\n"
+ "uvec3 subgroupExclusiveMul(uvec3);\n"
+ "uvec4 subgroupExclusiveMul(uvec4);\n"
+
+ "float subgroupExclusiveMin(float);\n"
+ "vec2 subgroupExclusiveMin(vec2);\n"
+ "vec3 subgroupExclusiveMin(vec3);\n"
+ "vec4 subgroupExclusiveMin(vec4);\n"
+ "int subgroupExclusiveMin(int);\n"
+ "ivec2 subgroupExclusiveMin(ivec2);\n"
+ "ivec3 subgroupExclusiveMin(ivec3);\n"
+ "ivec4 subgroupExclusiveMin(ivec4);\n"
+ "uint subgroupExclusiveMin(uint);\n"
+ "uvec2 subgroupExclusiveMin(uvec2);\n"
+ "uvec3 subgroupExclusiveMin(uvec3);\n"
+ "uvec4 subgroupExclusiveMin(uvec4);\n"
+
+ "float subgroupExclusiveMax(float);\n"
+ "vec2 subgroupExclusiveMax(vec2);\n"
+ "vec3 subgroupExclusiveMax(vec3);\n"
+ "vec4 subgroupExclusiveMax(vec4);\n"
+ "int subgroupExclusiveMax(int);\n"
+ "ivec2 subgroupExclusiveMax(ivec2);\n"
+ "ivec3 subgroupExclusiveMax(ivec3);\n"
+ "ivec4 subgroupExclusiveMax(ivec4);\n"
+ "uint subgroupExclusiveMax(uint);\n"
+ "uvec2 subgroupExclusiveMax(uvec2);\n"
+ "uvec3 subgroupExclusiveMax(uvec3);\n"
+ "uvec4 subgroupExclusiveMax(uvec4);\n"
+
+ "int subgroupExclusiveAnd(int);\n"
+ "ivec2 subgroupExclusiveAnd(ivec2);\n"
+ "ivec3 subgroupExclusiveAnd(ivec3);\n"
+ "ivec4 subgroupExclusiveAnd(ivec4);\n"
+ "uint subgroupExclusiveAnd(uint);\n"
+ "uvec2 subgroupExclusiveAnd(uvec2);\n"
+ "uvec3 subgroupExclusiveAnd(uvec3);\n"
+ "uvec4 subgroupExclusiveAnd(uvec4);\n"
+ "bool subgroupExclusiveAnd(bool);\n"
+ "bvec2 subgroupExclusiveAnd(bvec2);\n"
+ "bvec3 subgroupExclusiveAnd(bvec3);\n"
+ "bvec4 subgroupExclusiveAnd(bvec4);\n"
+
+ "int subgroupExclusiveOr(int);\n"
+ "ivec2 subgroupExclusiveOr(ivec2);\n"
+ "ivec3 subgroupExclusiveOr(ivec3);\n"
+ "ivec4 subgroupExclusiveOr(ivec4);\n"
+ "uint subgroupExclusiveOr(uint);\n"
+ "uvec2 subgroupExclusiveOr(uvec2);\n"
+ "uvec3 subgroupExclusiveOr(uvec3);\n"
+ "uvec4 subgroupExclusiveOr(uvec4);\n"
+ "bool subgroupExclusiveOr(bool);\n"
+ "bvec2 subgroupExclusiveOr(bvec2);\n"
+ "bvec3 subgroupExclusiveOr(bvec3);\n"
+ "bvec4 subgroupExclusiveOr(bvec4);\n"
+
+ "int subgroupExclusiveXor(int);\n"
+ "ivec2 subgroupExclusiveXor(ivec2);\n"
+ "ivec3 subgroupExclusiveXor(ivec3);\n"
+ "ivec4 subgroupExclusiveXor(ivec4);\n"
+ "uint subgroupExclusiveXor(uint);\n"
+ "uvec2 subgroupExclusiveXor(uvec2);\n"
+ "uvec3 subgroupExclusiveXor(uvec3);\n"
+ "uvec4 subgroupExclusiveXor(uvec4);\n"
+ "bool subgroupExclusiveXor(bool);\n"
+ "bvec2 subgroupExclusiveXor(bvec2);\n"
+ "bvec3 subgroupExclusiveXor(bvec3);\n"
+ "bvec4 subgroupExclusiveXor(bvec4);\n"
+
+ "float subgroupClusteredAdd(float, uint);\n"
+ "vec2 subgroupClusteredAdd(vec2, uint);\n"
+ "vec3 subgroupClusteredAdd(vec3, uint);\n"
+ "vec4 subgroupClusteredAdd(vec4, uint);\n"
+ "int subgroupClusteredAdd(int, uint);\n"
+ "ivec2 subgroupClusteredAdd(ivec2, uint);\n"
+ "ivec3 subgroupClusteredAdd(ivec3, uint);\n"
+ "ivec4 subgroupClusteredAdd(ivec4, uint);\n"
+ "uint subgroupClusteredAdd(uint, uint);\n"
+ "uvec2 subgroupClusteredAdd(uvec2, uint);\n"
+ "uvec3 subgroupClusteredAdd(uvec3, uint);\n"
+ "uvec4 subgroupClusteredAdd(uvec4, uint);\n"
+
+ "float subgroupClusteredMul(float, uint);\n"
+ "vec2 subgroupClusteredMul(vec2, uint);\n"
+ "vec3 subgroupClusteredMul(vec3, uint);\n"
+ "vec4 subgroupClusteredMul(vec4, uint);\n"
+ "int subgroupClusteredMul(int, uint);\n"
+ "ivec2 subgroupClusteredMul(ivec2, uint);\n"
+ "ivec3 subgroupClusteredMul(ivec3, uint);\n"
+ "ivec4 subgroupClusteredMul(ivec4, uint);\n"
+ "uint subgroupClusteredMul(uint, uint);\n"
+ "uvec2 subgroupClusteredMul(uvec2, uint);\n"
+ "uvec3 subgroupClusteredMul(uvec3, uint);\n"
+ "uvec4 subgroupClusteredMul(uvec4, uint);\n"
+
+ "float subgroupClusteredMin(float, uint);\n"
+ "vec2 subgroupClusteredMin(vec2, uint);\n"
+ "vec3 subgroupClusteredMin(vec3, uint);\n"
+ "vec4 subgroupClusteredMin(vec4, uint);\n"
+ "int subgroupClusteredMin(int, uint);\n"
+ "ivec2 subgroupClusteredMin(ivec2, uint);\n"
+ "ivec3 subgroupClusteredMin(ivec3, uint);\n"
+ "ivec4 subgroupClusteredMin(ivec4, uint);\n"
+ "uint subgroupClusteredMin(uint, uint);\n"
+ "uvec2 subgroupClusteredMin(uvec2, uint);\n"
+ "uvec3 subgroupClusteredMin(uvec3, uint);\n"
+ "uvec4 subgroupClusteredMin(uvec4, uint);\n"
+
+ "float subgroupClusteredMax(float, uint);\n"
+ "vec2 subgroupClusteredMax(vec2, uint);\n"
+ "vec3 subgroupClusteredMax(vec3, uint);\n"
+ "vec4 subgroupClusteredMax(vec4, uint);\n"
+ "int subgroupClusteredMax(int, uint);\n"
+ "ivec2 subgroupClusteredMax(ivec2, uint);\n"
+ "ivec3 subgroupClusteredMax(ivec3, uint);\n"
+ "ivec4 subgroupClusteredMax(ivec4, uint);\n"
+ "uint subgroupClusteredMax(uint, uint);\n"
+ "uvec2 subgroupClusteredMax(uvec2, uint);\n"
+ "uvec3 subgroupClusteredMax(uvec3, uint);\n"
+ "uvec4 subgroupClusteredMax(uvec4, uint);\n"
+
+ "int subgroupClusteredAnd(int, uint);\n"
+ "ivec2 subgroupClusteredAnd(ivec2, uint);\n"
+ "ivec3 subgroupClusteredAnd(ivec3, uint);\n"
+ "ivec4 subgroupClusteredAnd(ivec4, uint);\n"
+ "uint subgroupClusteredAnd(uint, uint);\n"
+ "uvec2 subgroupClusteredAnd(uvec2, uint);\n"
+ "uvec3 subgroupClusteredAnd(uvec3, uint);\n"
+ "uvec4 subgroupClusteredAnd(uvec4, uint);\n"
+ "bool subgroupClusteredAnd(bool, uint);\n"
+ "bvec2 subgroupClusteredAnd(bvec2, uint);\n"
+ "bvec3 subgroupClusteredAnd(bvec3, uint);\n"
+ "bvec4 subgroupClusteredAnd(bvec4, uint);\n"
+
+ "int subgroupClusteredOr(int, uint);\n"
+ "ivec2 subgroupClusteredOr(ivec2, uint);\n"
+ "ivec3 subgroupClusteredOr(ivec3, uint);\n"
+ "ivec4 subgroupClusteredOr(ivec4, uint);\n"
+ "uint subgroupClusteredOr(uint, uint);\n"
+ "uvec2 subgroupClusteredOr(uvec2, uint);\n"
+ "uvec3 subgroupClusteredOr(uvec3, uint);\n"
+ "uvec4 subgroupClusteredOr(uvec4, uint);\n"
+ "bool subgroupClusteredOr(bool, uint);\n"
+ "bvec2 subgroupClusteredOr(bvec2, uint);\n"
+ "bvec3 subgroupClusteredOr(bvec3, uint);\n"
+ "bvec4 subgroupClusteredOr(bvec4, uint);\n"
+
+ "int subgroupClusteredXor(int, uint);\n"
+ "ivec2 subgroupClusteredXor(ivec2, uint);\n"
+ "ivec3 subgroupClusteredXor(ivec3, uint);\n"
+ "ivec4 subgroupClusteredXor(ivec4, uint);\n"
+ "uint subgroupClusteredXor(uint, uint);\n"
+ "uvec2 subgroupClusteredXor(uvec2, uint);\n"
+ "uvec3 subgroupClusteredXor(uvec3, uint);\n"
+ "uvec4 subgroupClusteredXor(uvec4, uint);\n"
+ "bool subgroupClusteredXor(bool, uint);\n"
+ "bvec2 subgroupClusteredXor(bvec2, uint);\n"
+ "bvec3 subgroupClusteredXor(bvec3, uint);\n"
+ "bvec4 subgroupClusteredXor(bvec4, uint);\n"
+
+ "float subgroupQuadBroadcast(float, uint);\n"
+ "vec2 subgroupQuadBroadcast(vec2, uint);\n"
+ "vec3 subgroupQuadBroadcast(vec3, uint);\n"
+ "vec4 subgroupQuadBroadcast(vec4, uint);\n"
+ "int subgroupQuadBroadcast(int, uint);\n"
+ "ivec2 subgroupQuadBroadcast(ivec2, uint);\n"
+ "ivec3 subgroupQuadBroadcast(ivec3, uint);\n"
+ "ivec4 subgroupQuadBroadcast(ivec4, uint);\n"
+ "uint subgroupQuadBroadcast(uint, uint);\n"
+ "uvec2 subgroupQuadBroadcast(uvec2, uint);\n"
+ "uvec3 subgroupQuadBroadcast(uvec3, uint);\n"
+ "uvec4 subgroupQuadBroadcast(uvec4, uint);\n"
+ "bool subgroupQuadBroadcast(bool, uint);\n"
+ "bvec2 subgroupQuadBroadcast(bvec2, uint);\n"
+ "bvec3 subgroupQuadBroadcast(bvec3, uint);\n"
+ "bvec4 subgroupQuadBroadcast(bvec4, uint);\n"
+
+ "float subgroupQuadSwapHorizontal(float);\n"
+ "vec2 subgroupQuadSwapHorizontal(vec2);\n"
+ "vec3 subgroupQuadSwapHorizontal(vec3);\n"
+ "vec4 subgroupQuadSwapHorizontal(vec4);\n"
+ "int subgroupQuadSwapHorizontal(int);\n"
+ "ivec2 subgroupQuadSwapHorizontal(ivec2);\n"
+ "ivec3 subgroupQuadSwapHorizontal(ivec3);\n"
+ "ivec4 subgroupQuadSwapHorizontal(ivec4);\n"
+ "uint subgroupQuadSwapHorizontal(uint);\n"
+ "uvec2 subgroupQuadSwapHorizontal(uvec2);\n"
+ "uvec3 subgroupQuadSwapHorizontal(uvec3);\n"
+ "uvec4 subgroupQuadSwapHorizontal(uvec4);\n"
+ "bool subgroupQuadSwapHorizontal(bool);\n"
+ "bvec2 subgroupQuadSwapHorizontal(bvec2);\n"
+ "bvec3 subgroupQuadSwapHorizontal(bvec3);\n"
+ "bvec4 subgroupQuadSwapHorizontal(bvec4);\n"
+
+ "float subgroupQuadSwapVertical(float);\n"
+ "vec2 subgroupQuadSwapVertical(vec2);\n"
+ "vec3 subgroupQuadSwapVertical(vec3);\n"
+ "vec4 subgroupQuadSwapVertical(vec4);\n"
+ "int subgroupQuadSwapVertical(int);\n"
+ "ivec2 subgroupQuadSwapVertical(ivec2);\n"
+ "ivec3 subgroupQuadSwapVertical(ivec3);\n"
+ "ivec4 subgroupQuadSwapVertical(ivec4);\n"
+ "uint subgroupQuadSwapVertical(uint);\n"
+ "uvec2 subgroupQuadSwapVertical(uvec2);\n"
+ "uvec3 subgroupQuadSwapVertical(uvec3);\n"
+ "uvec4 subgroupQuadSwapVertical(uvec4);\n"
+ "bool subgroupQuadSwapVertical(bool);\n"
+ "bvec2 subgroupQuadSwapVertical(bvec2);\n"
+ "bvec3 subgroupQuadSwapVertical(bvec3);\n"
+ "bvec4 subgroupQuadSwapVertical(bvec4);\n"
+
+ "float subgroupQuadSwapDiagonal(float);\n"
+ "vec2 subgroupQuadSwapDiagonal(vec2);\n"
+ "vec3 subgroupQuadSwapDiagonal(vec3);\n"
+ "vec4 subgroupQuadSwapDiagonal(vec4);\n"
+ "int subgroupQuadSwapDiagonal(int);\n"
+ "ivec2 subgroupQuadSwapDiagonal(ivec2);\n"
+ "ivec3 subgroupQuadSwapDiagonal(ivec3);\n"
+ "ivec4 subgroupQuadSwapDiagonal(ivec4);\n"
+ "uint subgroupQuadSwapDiagonal(uint);\n"
+ "uvec2 subgroupQuadSwapDiagonal(uvec2);\n"
+ "uvec3 subgroupQuadSwapDiagonal(uvec3);\n"
+ "uvec4 subgroupQuadSwapDiagonal(uvec4);\n"
+ "bool subgroupQuadSwapDiagonal(bool);\n"
+ "bvec2 subgroupQuadSwapDiagonal(bvec2);\n"
+ "bvec3 subgroupQuadSwapDiagonal(bvec3);\n"
+ "bvec4 subgroupQuadSwapDiagonal(bvec4);\n"
+
+#ifdef NV_EXTENSIONS
+ "uvec4 subgroupPartitionNV(float);\n"
+ "uvec4 subgroupPartitionNV(vec2);\n"
+ "uvec4 subgroupPartitionNV(vec3);\n"
+ "uvec4 subgroupPartitionNV(vec4);\n"
+ "uvec4 subgroupPartitionNV(int);\n"
+ "uvec4 subgroupPartitionNV(ivec2);\n"
+ "uvec4 subgroupPartitionNV(ivec3);\n"
+ "uvec4 subgroupPartitionNV(ivec4);\n"
+ "uvec4 subgroupPartitionNV(uint);\n"
+ "uvec4 subgroupPartitionNV(uvec2);\n"
+ "uvec4 subgroupPartitionNV(uvec3);\n"
+ "uvec4 subgroupPartitionNV(uvec4);\n"
+ "uvec4 subgroupPartitionNV(bool);\n"
+ "uvec4 subgroupPartitionNV(bvec2);\n"
+ "uvec4 subgroupPartitionNV(bvec3);\n"
+ "uvec4 subgroupPartitionNV(bvec4);\n"
+
+ "float subgroupPartitionedAddNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedAddNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedAddNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedAddNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedAddNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedAddNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedAddNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedAddNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedAddNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedAddNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedAddNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedAddNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedMulNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedMulNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedMulNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedMulNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedMulNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedMulNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedMulNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedMulNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedMulNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedMulNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedMulNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedMulNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedMinNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedMinNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedMinNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedMinNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedMinNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedMinNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedMinNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedMinNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedMinNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedMinNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedMinNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedMinNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedMaxNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedMaxNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedMaxNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedMaxNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedMaxNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedMaxNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedMaxNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedMaxNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedMaxNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedMaxNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedMaxNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedMaxNV(uvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedAndNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedAndNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedAndNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedAndNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedAndNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedAndNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedAndNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedAndNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedAndNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedAndNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedAndNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedAndNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedOrNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedOrNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedOrNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedOrNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedOrNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedOrNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedOrNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedOrNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedOrNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedOrNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedOrNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedOrNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedXorNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedXorNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedXorNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedXorNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedXorNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedXorNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedXorNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedXorNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedXorNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedXorNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedXorNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedXorNV(bvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveAddNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveAddNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveAddNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveAddNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveAddNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveAddNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveAddNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveAddNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveAddNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveAddNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveAddNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveAddNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveMulNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveMulNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveMulNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveMulNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveMulNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveMulNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveMulNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveMulNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveMulNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveMulNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveMulNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveMulNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveMinNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveMinNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveMinNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveMinNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveMinNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveMinNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveMinNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveMinNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveMinNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveMinNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveMinNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveMinNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveMaxNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveMaxNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveMaxNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveMaxNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveMaxNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveMaxNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveMaxNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveMaxNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveMaxNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveMaxNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveMaxNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveMaxNV(uvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedInclusiveAndNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveAndNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveAndNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveAndNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveAndNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveAndNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveAndNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveAndNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedInclusiveAndNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedInclusiveAndNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedInclusiveAndNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedInclusiveAndNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedInclusiveOrNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveOrNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveOrNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveOrNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveOrNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveOrNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveOrNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveOrNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedInclusiveOrNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedInclusiveOrNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedInclusiveOrNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedInclusiveOrNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedInclusiveXorNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveXorNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveXorNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveXorNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveXorNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveXorNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveXorNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveXorNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedInclusiveXorNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedInclusiveXorNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedInclusiveXorNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedInclusiveXorNV(bvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveAddNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveAddNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveAddNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveAddNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveAddNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveAddNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveAddNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveAddNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveAddNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveAddNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveAddNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveAddNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveMulNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveMulNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveMulNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveMulNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveMulNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveMulNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveMulNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveMulNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveMulNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveMulNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveMulNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveMulNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveMinNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveMinNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveMinNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveMinNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveMinNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveMinNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveMinNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveMinNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveMinNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveMinNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveMinNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveMinNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveMaxNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveMaxNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveMaxNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveMaxNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveMaxNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveMaxNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveMaxNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveMaxNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveMaxNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveMaxNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveMaxNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveMaxNV(uvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedExclusiveAndNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveAndNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveAndNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveAndNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveAndNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveAndNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveAndNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveAndNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedExclusiveAndNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedExclusiveAndNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedExclusiveAndNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedExclusiveAndNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedExclusiveOrNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveOrNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveOrNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveOrNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveOrNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveOrNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveOrNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveOrNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedExclusiveOrNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedExclusiveOrNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedExclusiveOrNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedExclusiveOrNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedExclusiveXorNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveXorNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveXorNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveXorNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveXorNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveXorNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveXorNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveXorNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedExclusiveXorNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedExclusiveXorNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedExclusiveXorNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedExclusiveXorNV(bvec4, uvec4 ballot);\n"
+#endif
+
+ "\n");
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ "bool subgroupAllEqual(double);\n"
+ "bool subgroupAllEqual(dvec2);\n"
+ "bool subgroupAllEqual(dvec3);\n"
+ "bool subgroupAllEqual(dvec4);\n"
+
+ "double subgroupBroadcast(double, uint);\n"
+ "dvec2 subgroupBroadcast(dvec2, uint);\n"
+ "dvec3 subgroupBroadcast(dvec3, uint);\n"
+ "dvec4 subgroupBroadcast(dvec4, uint);\n"
+
+ "double subgroupBroadcastFirst(double);\n"
+ "dvec2 subgroupBroadcastFirst(dvec2);\n"
+ "dvec3 subgroupBroadcastFirst(dvec3);\n"
+ "dvec4 subgroupBroadcastFirst(dvec4);\n"
+
+ "double subgroupShuffle(double, uint);\n"
+ "dvec2 subgroupShuffle(dvec2, uint);\n"
+ "dvec3 subgroupShuffle(dvec3, uint);\n"
+ "dvec4 subgroupShuffle(dvec4, uint);\n"
+
+ "double subgroupShuffleXor(double, uint);\n"
+ "dvec2 subgroupShuffleXor(dvec2, uint);\n"
+ "dvec3 subgroupShuffleXor(dvec3, uint);\n"
+ "dvec4 subgroupShuffleXor(dvec4, uint);\n"
+
+ "double subgroupShuffleUp(double, uint delta);\n"
+ "dvec2 subgroupShuffleUp(dvec2, uint delta);\n"
+ "dvec3 subgroupShuffleUp(dvec3, uint delta);\n"
+ "dvec4 subgroupShuffleUp(dvec4, uint delta);\n"
+
+ "double subgroupShuffleDown(double, uint delta);\n"
+ "dvec2 subgroupShuffleDown(dvec2, uint delta);\n"
+ "dvec3 subgroupShuffleDown(dvec3, uint delta);\n"
+ "dvec4 subgroupShuffleDown(dvec4, uint delta);\n"
+
+ "double subgroupAdd(double);\n"
+ "dvec2 subgroupAdd(dvec2);\n"
+ "dvec3 subgroupAdd(dvec3);\n"
+ "dvec4 subgroupAdd(dvec4);\n"
+
+ "double subgroupMul(double);\n"
+ "dvec2 subgroupMul(dvec2);\n"
+ "dvec3 subgroupMul(dvec3);\n"
+ "dvec4 subgroupMul(dvec4);\n"
+
+ "double subgroupMin(double);\n"
+ "dvec2 subgroupMin(dvec2);\n"
+ "dvec3 subgroupMin(dvec3);\n"
+ "dvec4 subgroupMin(dvec4);\n"
+
+ "double subgroupMax(double);\n"
+ "dvec2 subgroupMax(dvec2);\n"
+ "dvec3 subgroupMax(dvec3);\n"
+ "dvec4 subgroupMax(dvec4);\n"
+
+ "double subgroupInclusiveAdd(double);\n"
+ "dvec2 subgroupInclusiveAdd(dvec2);\n"
+ "dvec3 subgroupInclusiveAdd(dvec3);\n"
+ "dvec4 subgroupInclusiveAdd(dvec4);\n"
+
+ "double subgroupInclusiveMul(double);\n"
+ "dvec2 subgroupInclusiveMul(dvec2);\n"
+ "dvec3 subgroupInclusiveMul(dvec3);\n"
+ "dvec4 subgroupInclusiveMul(dvec4);\n"
+
+ "double subgroupInclusiveMin(double);\n"
+ "dvec2 subgroupInclusiveMin(dvec2);\n"
+ "dvec3 subgroupInclusiveMin(dvec3);\n"
+ "dvec4 subgroupInclusiveMin(dvec4);\n"
+
+ "double subgroupInclusiveMax(double);\n"
+ "dvec2 subgroupInclusiveMax(dvec2);\n"
+ "dvec3 subgroupInclusiveMax(dvec3);\n"
+ "dvec4 subgroupInclusiveMax(dvec4);\n"
+
+ "double subgroupExclusiveAdd(double);\n"
+ "dvec2 subgroupExclusiveAdd(dvec2);\n"
+ "dvec3 subgroupExclusiveAdd(dvec3);\n"
+ "dvec4 subgroupExclusiveAdd(dvec4);\n"
+
+ "double subgroupExclusiveMul(double);\n"
+ "dvec2 subgroupExclusiveMul(dvec2);\n"
+ "dvec3 subgroupExclusiveMul(dvec3);\n"
+ "dvec4 subgroupExclusiveMul(dvec4);\n"
+
+ "double subgroupExclusiveMin(double);\n"
+ "dvec2 subgroupExclusiveMin(dvec2);\n"
+ "dvec3 subgroupExclusiveMin(dvec3);\n"
+ "dvec4 subgroupExclusiveMin(dvec4);\n"
+
+ "double subgroupExclusiveMax(double);\n"
+ "dvec2 subgroupExclusiveMax(dvec2);\n"
+ "dvec3 subgroupExclusiveMax(dvec3);\n"
+ "dvec4 subgroupExclusiveMax(dvec4);\n"
+
+ "double subgroupClusteredAdd(double, uint);\n"
+ "dvec2 subgroupClusteredAdd(dvec2, uint);\n"
+ "dvec3 subgroupClusteredAdd(dvec3, uint);\n"
+ "dvec4 subgroupClusteredAdd(dvec4, uint);\n"
+
+ "double subgroupClusteredMul(double, uint);\n"
+ "dvec2 subgroupClusteredMul(dvec2, uint);\n"
+ "dvec3 subgroupClusteredMul(dvec3, uint);\n"
+ "dvec4 subgroupClusteredMul(dvec4, uint);\n"
+
+ "double subgroupClusteredMin(double, uint);\n"
+ "dvec2 subgroupClusteredMin(dvec2, uint);\n"
+ "dvec3 subgroupClusteredMin(dvec3, uint);\n"
+ "dvec4 subgroupClusteredMin(dvec4, uint);\n"
+
+ "double subgroupClusteredMax(double, uint);\n"
+ "dvec2 subgroupClusteredMax(dvec2, uint);\n"
+ "dvec3 subgroupClusteredMax(dvec3, uint);\n"
+ "dvec4 subgroupClusteredMax(dvec4, uint);\n"
+
+ "double subgroupQuadBroadcast(double, uint);\n"
+ "dvec2 subgroupQuadBroadcast(dvec2, uint);\n"
+ "dvec3 subgroupQuadBroadcast(dvec3, uint);\n"
+ "dvec4 subgroupQuadBroadcast(dvec4, uint);\n"
+
+ "double subgroupQuadSwapHorizontal(double);\n"
+ "dvec2 subgroupQuadSwapHorizontal(dvec2);\n"
+ "dvec3 subgroupQuadSwapHorizontal(dvec3);\n"
+ "dvec4 subgroupQuadSwapHorizontal(dvec4);\n"
+
+ "double subgroupQuadSwapVertical(double);\n"
+ "dvec2 subgroupQuadSwapVertical(dvec2);\n"
+ "dvec3 subgroupQuadSwapVertical(dvec3);\n"
+ "dvec4 subgroupQuadSwapVertical(dvec4);\n"
+
+ "double subgroupQuadSwapDiagonal(double);\n"
+ "dvec2 subgroupQuadSwapDiagonal(dvec2);\n"
+ "dvec3 subgroupQuadSwapDiagonal(dvec3);\n"
+ "dvec4 subgroupQuadSwapDiagonal(dvec4);\n"
+
+
+#ifdef NV_EXTENSIONS
+ "uvec4 subgroupPartitionNV(double);\n"
+ "uvec4 subgroupPartitionNV(dvec2);\n"
+ "uvec4 subgroupPartitionNV(dvec3);\n"
+ "uvec4 subgroupPartitionNV(dvec4);\n"
+
+ "double subgroupPartitionedAddNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedAddNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedAddNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedAddNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedMulNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedMulNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedMulNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedMulNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedMinNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedMinNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedMinNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedMinNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedMaxNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedMaxNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedMaxNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedMaxNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveAddNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveAddNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveAddNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveAddNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveMulNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveMulNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveMulNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveMulNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveMinNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveMinNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveMinNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveMinNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveMaxNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveMaxNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveMaxNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveMaxNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveAddNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveAddNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveAddNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveAddNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveMulNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveMulNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveMulNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveMulNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveMinNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveMinNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveMinNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveMinNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveMaxNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveMaxNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveMaxNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveMaxNV(dvec4, uvec4 ballot);\n"
+#endif
+
+ "\n");
+ }
+
+ stageBuiltins[EShLangCompute].append(
+ "void subgroupMemoryBarrierShared();"
+
+ "\n"
+ );
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV].append(
+ "void subgroupMemoryBarrierShared();"
+ "\n"
+ );
+ stageBuiltins[EShLangTaskNV].append(
+ "void subgroupMemoryBarrierShared();"
+ "\n"
+ );
+#endif
+ }
+
+ if (profile != EEsProfile && version >= 460) {
+ commonBuiltins.append(
+ "bool anyInvocation(bool);"
+ "bool allInvocations(bool);"
+ "bool allInvocationsEqual(bool);"
+
+ "\n");
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_shader_ballot
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "float minInvocationsAMD(float);"
+ "vec2 minInvocationsAMD(vec2);"
+ "vec3 minInvocationsAMD(vec3);"
+ "vec4 minInvocationsAMD(vec4);"
+
+ "int minInvocationsAMD(int);"
+ "ivec2 minInvocationsAMD(ivec2);"
+ "ivec3 minInvocationsAMD(ivec3);"
+ "ivec4 minInvocationsAMD(ivec4);"
+
+ "uint minInvocationsAMD(uint);"
+ "uvec2 minInvocationsAMD(uvec2);"
+ "uvec3 minInvocationsAMD(uvec3);"
+ "uvec4 minInvocationsAMD(uvec4);"
+
+ "double minInvocationsAMD(double);"
+ "dvec2 minInvocationsAMD(dvec2);"
+ "dvec3 minInvocationsAMD(dvec3);"
+ "dvec4 minInvocationsAMD(dvec4);"
+
+ "int64_t minInvocationsAMD(int64_t);"
+ "i64vec2 minInvocationsAMD(i64vec2);"
+ "i64vec3 minInvocationsAMD(i64vec3);"
+ "i64vec4 minInvocationsAMD(i64vec4);"
+
+ "uint64_t minInvocationsAMD(uint64_t);"
+ "u64vec2 minInvocationsAMD(u64vec2);"
+ "u64vec3 minInvocationsAMD(u64vec3);"
+ "u64vec4 minInvocationsAMD(u64vec4);"
+
+ "float16_t minInvocationsAMD(float16_t);"
+ "f16vec2 minInvocationsAMD(f16vec2);"
+ "f16vec3 minInvocationsAMD(f16vec3);"
+ "f16vec4 minInvocationsAMD(f16vec4);"
+
+ "int16_t minInvocationsAMD(int16_t);"
+ "i16vec2 minInvocationsAMD(i16vec2);"
+ "i16vec3 minInvocationsAMD(i16vec3);"
+ "i16vec4 minInvocationsAMD(i16vec4);"
+
+ "uint16_t minInvocationsAMD(uint16_t);"
+ "u16vec2 minInvocationsAMD(u16vec2);"
+ "u16vec3 minInvocationsAMD(u16vec3);"
+ "u16vec4 minInvocationsAMD(u16vec4);"
+
+ "float minInvocationsInclusiveScanAMD(float);"
+ "vec2 minInvocationsInclusiveScanAMD(vec2);"
+ "vec3 minInvocationsInclusiveScanAMD(vec3);"
+ "vec4 minInvocationsInclusiveScanAMD(vec4);"
+
+ "int minInvocationsInclusiveScanAMD(int);"
+ "ivec2 minInvocationsInclusiveScanAMD(ivec2);"
+ "ivec3 minInvocationsInclusiveScanAMD(ivec3);"
+ "ivec4 minInvocationsInclusiveScanAMD(ivec4);"
+
+ "uint minInvocationsInclusiveScanAMD(uint);"
+ "uvec2 minInvocationsInclusiveScanAMD(uvec2);"
+ "uvec3 minInvocationsInclusiveScanAMD(uvec3);"
+ "uvec4 minInvocationsInclusiveScanAMD(uvec4);"
+
+ "double minInvocationsInclusiveScanAMD(double);"
+ "dvec2 minInvocationsInclusiveScanAMD(dvec2);"
+ "dvec3 minInvocationsInclusiveScanAMD(dvec3);"
+ "dvec4 minInvocationsInclusiveScanAMD(dvec4);"
+
+ "int64_t minInvocationsInclusiveScanAMD(int64_t);"
+ "i64vec2 minInvocationsInclusiveScanAMD(i64vec2);"
+ "i64vec3 minInvocationsInclusiveScanAMD(i64vec3);"
+ "i64vec4 minInvocationsInclusiveScanAMD(i64vec4);"
+
+ "uint64_t minInvocationsInclusiveScanAMD(uint64_t);"
+ "u64vec2 minInvocationsInclusiveScanAMD(u64vec2);"
+ "u64vec3 minInvocationsInclusiveScanAMD(u64vec3);"
+ "u64vec4 minInvocationsInclusiveScanAMD(u64vec4);"
+
+ "float16_t minInvocationsInclusiveScanAMD(float16_t);"
+ "f16vec2 minInvocationsInclusiveScanAMD(f16vec2);"
+ "f16vec3 minInvocationsInclusiveScanAMD(f16vec3);"
+ "f16vec4 minInvocationsInclusiveScanAMD(f16vec4);"
+
+ "int16_t minInvocationsInclusiveScanAMD(int16_t);"
+ "i16vec2 minInvocationsInclusiveScanAMD(i16vec2);"
+ "i16vec3 minInvocationsInclusiveScanAMD(i16vec3);"
+ "i16vec4 minInvocationsInclusiveScanAMD(i16vec4);"
+
+ "uint16_t minInvocationsInclusiveScanAMD(uint16_t);"
+ "u16vec2 minInvocationsInclusiveScanAMD(u16vec2);"
+ "u16vec3 minInvocationsInclusiveScanAMD(u16vec3);"
+ "u16vec4 minInvocationsInclusiveScanAMD(u16vec4);"
+
+ "float minInvocationsExclusiveScanAMD(float);"
+ "vec2 minInvocationsExclusiveScanAMD(vec2);"
+ "vec3 minInvocationsExclusiveScanAMD(vec3);"
+ "vec4 minInvocationsExclusiveScanAMD(vec4);"
+
+ "int minInvocationsExclusiveScanAMD(int);"
+ "ivec2 minInvocationsExclusiveScanAMD(ivec2);"
+ "ivec3 minInvocationsExclusiveScanAMD(ivec3);"
+ "ivec4 minInvocationsExclusiveScanAMD(ivec4);"
+
+ "uint minInvocationsExclusiveScanAMD(uint);"
+ "uvec2 minInvocationsExclusiveScanAMD(uvec2);"
+ "uvec3 minInvocationsExclusiveScanAMD(uvec3);"
+ "uvec4 minInvocationsExclusiveScanAMD(uvec4);"
+
+ "double minInvocationsExclusiveScanAMD(double);"
+ "dvec2 minInvocationsExclusiveScanAMD(dvec2);"
+ "dvec3 minInvocationsExclusiveScanAMD(dvec3);"
+ "dvec4 minInvocationsExclusiveScanAMD(dvec4);"
+
+ "int64_t minInvocationsExclusiveScanAMD(int64_t);"
+ "i64vec2 minInvocationsExclusiveScanAMD(i64vec2);"
+ "i64vec3 minInvocationsExclusiveScanAMD(i64vec3);"
+ "i64vec4 minInvocationsExclusiveScanAMD(i64vec4);"
+
+ "uint64_t minInvocationsExclusiveScanAMD(uint64_t);"
+ "u64vec2 minInvocationsExclusiveScanAMD(u64vec2);"
+ "u64vec3 minInvocationsExclusiveScanAMD(u64vec3);"
+ "u64vec4 minInvocationsExclusiveScanAMD(u64vec4);"
+
+ "float16_t minInvocationsExclusiveScanAMD(float16_t);"
+ "f16vec2 minInvocationsExclusiveScanAMD(f16vec2);"
+ "f16vec3 minInvocationsExclusiveScanAMD(f16vec3);"
+ "f16vec4 minInvocationsExclusiveScanAMD(f16vec4);"
+
+ "int16_t minInvocationsExclusiveScanAMD(int16_t);"
+ "i16vec2 minInvocationsExclusiveScanAMD(i16vec2);"
+ "i16vec3 minInvocationsExclusiveScanAMD(i16vec3);"
+ "i16vec4 minInvocationsExclusiveScanAMD(i16vec4);"
+
+ "uint16_t minInvocationsExclusiveScanAMD(uint16_t);"
+ "u16vec2 minInvocationsExclusiveScanAMD(u16vec2);"
+ "u16vec3 minInvocationsExclusiveScanAMD(u16vec3);"
+ "u16vec4 minInvocationsExclusiveScanAMD(u16vec4);"
+
+ "float maxInvocationsAMD(float);"
+ "vec2 maxInvocationsAMD(vec2);"
+ "vec3 maxInvocationsAMD(vec3);"
+ "vec4 maxInvocationsAMD(vec4);"
+
+ "int maxInvocationsAMD(int);"
+ "ivec2 maxInvocationsAMD(ivec2);"
+ "ivec3 maxInvocationsAMD(ivec3);"
+ "ivec4 maxInvocationsAMD(ivec4);"
+
+ "uint maxInvocationsAMD(uint);"
+ "uvec2 maxInvocationsAMD(uvec2);"
+ "uvec3 maxInvocationsAMD(uvec3);"
+ "uvec4 maxInvocationsAMD(uvec4);"
+
+ "double maxInvocationsAMD(double);"
+ "dvec2 maxInvocationsAMD(dvec2);"
+ "dvec3 maxInvocationsAMD(dvec3);"
+ "dvec4 maxInvocationsAMD(dvec4);"
+
+ "int64_t maxInvocationsAMD(int64_t);"
+ "i64vec2 maxInvocationsAMD(i64vec2);"
+ "i64vec3 maxInvocationsAMD(i64vec3);"
+ "i64vec4 maxInvocationsAMD(i64vec4);"
+
+ "uint64_t maxInvocationsAMD(uint64_t);"
+ "u64vec2 maxInvocationsAMD(u64vec2);"
+ "u64vec3 maxInvocationsAMD(u64vec3);"
+ "u64vec4 maxInvocationsAMD(u64vec4);"
+
+ "float16_t maxInvocationsAMD(float16_t);"
+ "f16vec2 maxInvocationsAMD(f16vec2);"
+ "f16vec3 maxInvocationsAMD(f16vec3);"
+ "f16vec4 maxInvocationsAMD(f16vec4);"
+
+ "int16_t maxInvocationsAMD(int16_t);"
+ "i16vec2 maxInvocationsAMD(i16vec2);"
+ "i16vec3 maxInvocationsAMD(i16vec3);"
+ "i16vec4 maxInvocationsAMD(i16vec4);"
+
+ "uint16_t maxInvocationsAMD(uint16_t);"
+ "u16vec2 maxInvocationsAMD(u16vec2);"
+ "u16vec3 maxInvocationsAMD(u16vec3);"
+ "u16vec4 maxInvocationsAMD(u16vec4);"
+
+ "float maxInvocationsInclusiveScanAMD(float);"
+ "vec2 maxInvocationsInclusiveScanAMD(vec2);"
+ "vec3 maxInvocationsInclusiveScanAMD(vec3);"
+ "vec4 maxInvocationsInclusiveScanAMD(vec4);"
+
+ "int maxInvocationsInclusiveScanAMD(int);"
+ "ivec2 maxInvocationsInclusiveScanAMD(ivec2);"
+ "ivec3 maxInvocationsInclusiveScanAMD(ivec3);"
+ "ivec4 maxInvocationsInclusiveScanAMD(ivec4);"
+
+ "uint maxInvocationsInclusiveScanAMD(uint);"
+ "uvec2 maxInvocationsInclusiveScanAMD(uvec2);"
+ "uvec3 maxInvocationsInclusiveScanAMD(uvec3);"
+ "uvec4 maxInvocationsInclusiveScanAMD(uvec4);"
+
+ "double maxInvocationsInclusiveScanAMD(double);"
+ "dvec2 maxInvocationsInclusiveScanAMD(dvec2);"
+ "dvec3 maxInvocationsInclusiveScanAMD(dvec3);"
+ "dvec4 maxInvocationsInclusiveScanAMD(dvec4);"
+
+ "int64_t maxInvocationsInclusiveScanAMD(int64_t);"
+ "i64vec2 maxInvocationsInclusiveScanAMD(i64vec2);"
+ "i64vec3 maxInvocationsInclusiveScanAMD(i64vec3);"
+ "i64vec4 maxInvocationsInclusiveScanAMD(i64vec4);"
+
+ "uint64_t maxInvocationsInclusiveScanAMD(uint64_t);"
+ "u64vec2 maxInvocationsInclusiveScanAMD(u64vec2);"
+ "u64vec3 maxInvocationsInclusiveScanAMD(u64vec3);"
+ "u64vec4 maxInvocationsInclusiveScanAMD(u64vec4);"
+
+ "float16_t maxInvocationsInclusiveScanAMD(float16_t);"
+ "f16vec2 maxInvocationsInclusiveScanAMD(f16vec2);"
+ "f16vec3 maxInvocationsInclusiveScanAMD(f16vec3);"
+ "f16vec4 maxInvocationsInclusiveScanAMD(f16vec4);"
+
+ "int16_t maxInvocationsInclusiveScanAMD(int16_t);"
+ "i16vec2 maxInvocationsInclusiveScanAMD(i16vec2);"
+ "i16vec3 maxInvocationsInclusiveScanAMD(i16vec3);"
+ "i16vec4 maxInvocationsInclusiveScanAMD(i16vec4);"
+
+ "uint16_t maxInvocationsInclusiveScanAMD(uint16_t);"
+ "u16vec2 maxInvocationsInclusiveScanAMD(u16vec2);"
+ "u16vec3 maxInvocationsInclusiveScanAMD(u16vec3);"
+ "u16vec4 maxInvocationsInclusiveScanAMD(u16vec4);"
+
+ "float maxInvocationsExclusiveScanAMD(float);"
+ "vec2 maxInvocationsExclusiveScanAMD(vec2);"
+ "vec3 maxInvocationsExclusiveScanAMD(vec3);"
+ "vec4 maxInvocationsExclusiveScanAMD(vec4);"
+
+ "int maxInvocationsExclusiveScanAMD(int);"
+ "ivec2 maxInvocationsExclusiveScanAMD(ivec2);"
+ "ivec3 maxInvocationsExclusiveScanAMD(ivec3);"
+ "ivec4 maxInvocationsExclusiveScanAMD(ivec4);"
+
+ "uint maxInvocationsExclusiveScanAMD(uint);"
+ "uvec2 maxInvocationsExclusiveScanAMD(uvec2);"
+ "uvec3 maxInvocationsExclusiveScanAMD(uvec3);"
+ "uvec4 maxInvocationsExclusiveScanAMD(uvec4);"
+
+ "double maxInvocationsExclusiveScanAMD(double);"
+ "dvec2 maxInvocationsExclusiveScanAMD(dvec2);"
+ "dvec3 maxInvocationsExclusiveScanAMD(dvec3);"
+ "dvec4 maxInvocationsExclusiveScanAMD(dvec4);"
+
+ "int64_t maxInvocationsExclusiveScanAMD(int64_t);"
+ "i64vec2 maxInvocationsExclusiveScanAMD(i64vec2);"
+ "i64vec3 maxInvocationsExclusiveScanAMD(i64vec3);"
+ "i64vec4 maxInvocationsExclusiveScanAMD(i64vec4);"
+
+ "uint64_t maxInvocationsExclusiveScanAMD(uint64_t);"
+ "u64vec2 maxInvocationsExclusiveScanAMD(u64vec2);"
+ "u64vec3 maxInvocationsExclusiveScanAMD(u64vec3);"
+ "u64vec4 maxInvocationsExclusiveScanAMD(u64vec4);"
+
+ "float16_t maxInvocationsExclusiveScanAMD(float16_t);"
+ "f16vec2 maxInvocationsExclusiveScanAMD(f16vec2);"
+ "f16vec3 maxInvocationsExclusiveScanAMD(f16vec3);"
+ "f16vec4 maxInvocationsExclusiveScanAMD(f16vec4);"
+
+ "int16_t maxInvocationsExclusiveScanAMD(int16_t);"
+ "i16vec2 maxInvocationsExclusiveScanAMD(i16vec2);"
+ "i16vec3 maxInvocationsExclusiveScanAMD(i16vec3);"
+ "i16vec4 maxInvocationsExclusiveScanAMD(i16vec4);"
+
+ "uint16_t maxInvocationsExclusiveScanAMD(uint16_t);"
+ "u16vec2 maxInvocationsExclusiveScanAMD(u16vec2);"
+ "u16vec3 maxInvocationsExclusiveScanAMD(u16vec3);"
+ "u16vec4 maxInvocationsExclusiveScanAMD(u16vec4);"
+
+ "float addInvocationsAMD(float);"
+ "vec2 addInvocationsAMD(vec2);"
+ "vec3 addInvocationsAMD(vec3);"
+ "vec4 addInvocationsAMD(vec4);"
+
+ "int addInvocationsAMD(int);"
+ "ivec2 addInvocationsAMD(ivec2);"
+ "ivec3 addInvocationsAMD(ivec3);"
+ "ivec4 addInvocationsAMD(ivec4);"
+
+ "uint addInvocationsAMD(uint);"
+ "uvec2 addInvocationsAMD(uvec2);"
+ "uvec3 addInvocationsAMD(uvec3);"
+ "uvec4 addInvocationsAMD(uvec4);"
+
+ "double addInvocationsAMD(double);"
+ "dvec2 addInvocationsAMD(dvec2);"
+ "dvec3 addInvocationsAMD(dvec3);"
+ "dvec4 addInvocationsAMD(dvec4);"
+
+ "int64_t addInvocationsAMD(int64_t);"
+ "i64vec2 addInvocationsAMD(i64vec2);"
+ "i64vec3 addInvocationsAMD(i64vec3);"
+ "i64vec4 addInvocationsAMD(i64vec4);"
+
+ "uint64_t addInvocationsAMD(uint64_t);"
+ "u64vec2 addInvocationsAMD(u64vec2);"
+ "u64vec3 addInvocationsAMD(u64vec3);"
+ "u64vec4 addInvocationsAMD(u64vec4);"
+
+ "float16_t addInvocationsAMD(float16_t);"
+ "f16vec2 addInvocationsAMD(f16vec2);"
+ "f16vec3 addInvocationsAMD(f16vec3);"
+ "f16vec4 addInvocationsAMD(f16vec4);"
+
+ "int16_t addInvocationsAMD(int16_t);"
+ "i16vec2 addInvocationsAMD(i16vec2);"
+ "i16vec3 addInvocationsAMD(i16vec3);"
+ "i16vec4 addInvocationsAMD(i16vec4);"
+
+ "uint16_t addInvocationsAMD(uint16_t);"
+ "u16vec2 addInvocationsAMD(u16vec2);"
+ "u16vec3 addInvocationsAMD(u16vec3);"
+ "u16vec4 addInvocationsAMD(u16vec4);"
+
+ "float addInvocationsInclusiveScanAMD(float);"
+ "vec2 addInvocationsInclusiveScanAMD(vec2);"
+ "vec3 addInvocationsInclusiveScanAMD(vec3);"
+ "vec4 addInvocationsInclusiveScanAMD(vec4);"
+
+ "int addInvocationsInclusiveScanAMD(int);"
+ "ivec2 addInvocationsInclusiveScanAMD(ivec2);"
+ "ivec3 addInvocationsInclusiveScanAMD(ivec3);"
+ "ivec4 addInvocationsInclusiveScanAMD(ivec4);"
+
+ "uint addInvocationsInclusiveScanAMD(uint);"
+ "uvec2 addInvocationsInclusiveScanAMD(uvec2);"
+ "uvec3 addInvocationsInclusiveScanAMD(uvec3);"
+ "uvec4 addInvocationsInclusiveScanAMD(uvec4);"
+
+ "double addInvocationsInclusiveScanAMD(double);"
+ "dvec2 addInvocationsInclusiveScanAMD(dvec2);"
+ "dvec3 addInvocationsInclusiveScanAMD(dvec3);"
+ "dvec4 addInvocationsInclusiveScanAMD(dvec4);"
+
+ "int64_t addInvocationsInclusiveScanAMD(int64_t);"
+ "i64vec2 addInvocationsInclusiveScanAMD(i64vec2);"
+ "i64vec3 addInvocationsInclusiveScanAMD(i64vec3);"
+ "i64vec4 addInvocationsInclusiveScanAMD(i64vec4);"
+
+ "uint64_t addInvocationsInclusiveScanAMD(uint64_t);"
+ "u64vec2 addInvocationsInclusiveScanAMD(u64vec2);"
+ "u64vec3 addInvocationsInclusiveScanAMD(u64vec3);"
+ "u64vec4 addInvocationsInclusiveScanAMD(u64vec4);"
+
+ "float16_t addInvocationsInclusiveScanAMD(float16_t);"
+ "f16vec2 addInvocationsInclusiveScanAMD(f16vec2);"
+ "f16vec3 addInvocationsInclusiveScanAMD(f16vec3);"
+ "f16vec4 addInvocationsInclusiveScanAMD(f16vec4);"
+
+ "int16_t addInvocationsInclusiveScanAMD(int16_t);"
+ "i16vec2 addInvocationsInclusiveScanAMD(i16vec2);"
+ "i16vec3 addInvocationsInclusiveScanAMD(i16vec3);"
+ "i16vec4 addInvocationsInclusiveScanAMD(i16vec4);"
+
+ "uint16_t addInvocationsInclusiveScanAMD(uint16_t);"
+ "u16vec2 addInvocationsInclusiveScanAMD(u16vec2);"
+ "u16vec3 addInvocationsInclusiveScanAMD(u16vec3);"
+ "u16vec4 addInvocationsInclusiveScanAMD(u16vec4);"
+
+ "float addInvocationsExclusiveScanAMD(float);"
+ "vec2 addInvocationsExclusiveScanAMD(vec2);"
+ "vec3 addInvocationsExclusiveScanAMD(vec3);"
+ "vec4 addInvocationsExclusiveScanAMD(vec4);"
+
+ "int addInvocationsExclusiveScanAMD(int);"
+ "ivec2 addInvocationsExclusiveScanAMD(ivec2);"
+ "ivec3 addInvocationsExclusiveScanAMD(ivec3);"
+ "ivec4 addInvocationsExclusiveScanAMD(ivec4);"
+
+ "uint addInvocationsExclusiveScanAMD(uint);"
+ "uvec2 addInvocationsExclusiveScanAMD(uvec2);"
+ "uvec3 addInvocationsExclusiveScanAMD(uvec3);"
+ "uvec4 addInvocationsExclusiveScanAMD(uvec4);"
+
+ "double addInvocationsExclusiveScanAMD(double);"
+ "dvec2 addInvocationsExclusiveScanAMD(dvec2);"
+ "dvec3 addInvocationsExclusiveScanAMD(dvec3);"
+ "dvec4 addInvocationsExclusiveScanAMD(dvec4);"
+
+ "int64_t addInvocationsExclusiveScanAMD(int64_t);"
+ "i64vec2 addInvocationsExclusiveScanAMD(i64vec2);"
+ "i64vec3 addInvocationsExclusiveScanAMD(i64vec3);"
+ "i64vec4 addInvocationsExclusiveScanAMD(i64vec4);"
+
+ "uint64_t addInvocationsExclusiveScanAMD(uint64_t);"
+ "u64vec2 addInvocationsExclusiveScanAMD(u64vec2);"
+ "u64vec3 addInvocationsExclusiveScanAMD(u64vec3);"
+ "u64vec4 addInvocationsExclusiveScanAMD(u64vec4);"
+
+ "float16_t addInvocationsExclusiveScanAMD(float16_t);"
+ "f16vec2 addInvocationsExclusiveScanAMD(f16vec2);"
+ "f16vec3 addInvocationsExclusiveScanAMD(f16vec3);"
+ "f16vec4 addInvocationsExclusiveScanAMD(f16vec4);"
+
+ "int16_t addInvocationsExclusiveScanAMD(int16_t);"
+ "i16vec2 addInvocationsExclusiveScanAMD(i16vec2);"
+ "i16vec3 addInvocationsExclusiveScanAMD(i16vec3);"
+ "i16vec4 addInvocationsExclusiveScanAMD(i16vec4);"
+
+ "uint16_t addInvocationsExclusiveScanAMD(uint16_t);"
+ "u16vec2 addInvocationsExclusiveScanAMD(u16vec2);"
+ "u16vec3 addInvocationsExclusiveScanAMD(u16vec3);"
+ "u16vec4 addInvocationsExclusiveScanAMD(u16vec4);"
+
+ "float minInvocationsNonUniformAMD(float);"
+ "vec2 minInvocationsNonUniformAMD(vec2);"
+ "vec3 minInvocationsNonUniformAMD(vec3);"
+ "vec4 minInvocationsNonUniformAMD(vec4);"
+
+ "int minInvocationsNonUniformAMD(int);"
+ "ivec2 minInvocationsNonUniformAMD(ivec2);"
+ "ivec3 minInvocationsNonUniformAMD(ivec3);"
+ "ivec4 minInvocationsNonUniformAMD(ivec4);"
+
+ "uint minInvocationsNonUniformAMD(uint);"
+ "uvec2 minInvocationsNonUniformAMD(uvec2);"
+ "uvec3 minInvocationsNonUniformAMD(uvec3);"
+ "uvec4 minInvocationsNonUniformAMD(uvec4);"
+
+ "double minInvocationsNonUniformAMD(double);"
+ "dvec2 minInvocationsNonUniformAMD(dvec2);"
+ "dvec3 minInvocationsNonUniformAMD(dvec3);"
+ "dvec4 minInvocationsNonUniformAMD(dvec4);"
+
+ "int64_t minInvocationsNonUniformAMD(int64_t);"
+ "i64vec2 minInvocationsNonUniformAMD(i64vec2);"
+ "i64vec3 minInvocationsNonUniformAMD(i64vec3);"
+ "i64vec4 minInvocationsNonUniformAMD(i64vec4);"
+
+ "uint64_t minInvocationsNonUniformAMD(uint64_t);"
+ "u64vec2 minInvocationsNonUniformAMD(u64vec2);"
+ "u64vec3 minInvocationsNonUniformAMD(u64vec3);"
+ "u64vec4 minInvocationsNonUniformAMD(u64vec4);"
+
+ "float16_t minInvocationsNonUniformAMD(float16_t);"
+ "f16vec2 minInvocationsNonUniformAMD(f16vec2);"
+ "f16vec3 minInvocationsNonUniformAMD(f16vec3);"
+ "f16vec4 minInvocationsNonUniformAMD(f16vec4);"
+
+ "int16_t minInvocationsNonUniformAMD(int16_t);"
+ "i16vec2 minInvocationsNonUniformAMD(i16vec2);"
+ "i16vec3 minInvocationsNonUniformAMD(i16vec3);"
+ "i16vec4 minInvocationsNonUniformAMD(i16vec4);"
+
+ "uint16_t minInvocationsNonUniformAMD(uint16_t);"
+ "u16vec2 minInvocationsNonUniformAMD(u16vec2);"
+ "u16vec3 minInvocationsNonUniformAMD(u16vec3);"
+ "u16vec4 minInvocationsNonUniformAMD(u16vec4);"
+
+ "float minInvocationsInclusiveScanNonUniformAMD(float);"
+ "vec2 minInvocationsInclusiveScanNonUniformAMD(vec2);"
+ "vec3 minInvocationsInclusiveScanNonUniformAMD(vec3);"
+ "vec4 minInvocationsInclusiveScanNonUniformAMD(vec4);"
+
+ "int minInvocationsInclusiveScanNonUniformAMD(int);"
+ "ivec2 minInvocationsInclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 minInvocationsInclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 minInvocationsInclusiveScanNonUniformAMD(ivec4);"
+
+ "uint minInvocationsInclusiveScanNonUniformAMD(uint);"
+ "uvec2 minInvocationsInclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 minInvocationsInclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 minInvocationsInclusiveScanNonUniformAMD(uvec4);"
+
+ "double minInvocationsInclusiveScanNonUniformAMD(double);"
+ "dvec2 minInvocationsInclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 minInvocationsInclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 minInvocationsInclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t minInvocationsInclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 minInvocationsInclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 minInvocationsInclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 minInvocationsInclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t minInvocationsInclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 minInvocationsInclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 minInvocationsInclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 minInvocationsInclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t minInvocationsInclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 minInvocationsInclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 minInvocationsInclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 minInvocationsInclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t minInvocationsInclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 minInvocationsInclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 minInvocationsInclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 minInvocationsInclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t minInvocationsInclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 minInvocationsInclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 minInvocationsInclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 minInvocationsInclusiveScanNonUniformAMD(u16vec4);"
+
+ "float minInvocationsExclusiveScanNonUniformAMD(float);"
+ "vec2 minInvocationsExclusiveScanNonUniformAMD(vec2);"
+ "vec3 minInvocationsExclusiveScanNonUniformAMD(vec3);"
+ "vec4 minInvocationsExclusiveScanNonUniformAMD(vec4);"
+
+ "int minInvocationsExclusiveScanNonUniformAMD(int);"
+ "ivec2 minInvocationsExclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 minInvocationsExclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 minInvocationsExclusiveScanNonUniformAMD(ivec4);"
+
+ "uint minInvocationsExclusiveScanNonUniformAMD(uint);"
+ "uvec2 minInvocationsExclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 minInvocationsExclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 minInvocationsExclusiveScanNonUniformAMD(uvec4);"
+
+ "double minInvocationsExclusiveScanNonUniformAMD(double);"
+ "dvec2 minInvocationsExclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 minInvocationsExclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 minInvocationsExclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t minInvocationsExclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 minInvocationsExclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 minInvocationsExclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 minInvocationsExclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t minInvocationsExclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 minInvocationsExclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 minInvocationsExclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 minInvocationsExclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t minInvocationsExclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 minInvocationsExclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 minInvocationsExclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 minInvocationsExclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t minInvocationsExclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 minInvocationsExclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 minInvocationsExclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 minInvocationsExclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t minInvocationsExclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 minInvocationsExclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 minInvocationsExclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 minInvocationsExclusiveScanNonUniformAMD(u16vec4);"
+
+ "float maxInvocationsNonUniformAMD(float);"
+ "vec2 maxInvocationsNonUniformAMD(vec2);"
+ "vec3 maxInvocationsNonUniformAMD(vec3);"
+ "vec4 maxInvocationsNonUniformAMD(vec4);"
+
+ "int maxInvocationsNonUniformAMD(int);"
+ "ivec2 maxInvocationsNonUniformAMD(ivec2);"
+ "ivec3 maxInvocationsNonUniformAMD(ivec3);"
+ "ivec4 maxInvocationsNonUniformAMD(ivec4);"
+
+ "uint maxInvocationsNonUniformAMD(uint);"
+ "uvec2 maxInvocationsNonUniformAMD(uvec2);"
+ "uvec3 maxInvocationsNonUniformAMD(uvec3);"
+ "uvec4 maxInvocationsNonUniformAMD(uvec4);"
+
+ "double maxInvocationsNonUniformAMD(double);"
+ "dvec2 maxInvocationsNonUniformAMD(dvec2);"
+ "dvec3 maxInvocationsNonUniformAMD(dvec3);"
+ "dvec4 maxInvocationsNonUniformAMD(dvec4);"
+
+ "int64_t maxInvocationsNonUniformAMD(int64_t);"
+ "i64vec2 maxInvocationsNonUniformAMD(i64vec2);"
+ "i64vec3 maxInvocationsNonUniformAMD(i64vec3);"
+ "i64vec4 maxInvocationsNonUniformAMD(i64vec4);"
+
+ "uint64_t maxInvocationsNonUniformAMD(uint64_t);"
+ "u64vec2 maxInvocationsNonUniformAMD(u64vec2);"
+ "u64vec3 maxInvocationsNonUniformAMD(u64vec3);"
+ "u64vec4 maxInvocationsNonUniformAMD(u64vec4);"
+
+ "float16_t maxInvocationsNonUniformAMD(float16_t);"
+ "f16vec2 maxInvocationsNonUniformAMD(f16vec2);"
+ "f16vec3 maxInvocationsNonUniformAMD(f16vec3);"
+ "f16vec4 maxInvocationsNonUniformAMD(f16vec4);"
+
+ "int16_t maxInvocationsNonUniformAMD(int16_t);"
+ "i16vec2 maxInvocationsNonUniformAMD(i16vec2);"
+ "i16vec3 maxInvocationsNonUniformAMD(i16vec3);"
+ "i16vec4 maxInvocationsNonUniformAMD(i16vec4);"
+
+ "uint16_t maxInvocationsNonUniformAMD(uint16_t);"
+ "u16vec2 maxInvocationsNonUniformAMD(u16vec2);"
+ "u16vec3 maxInvocationsNonUniformAMD(u16vec3);"
+ "u16vec4 maxInvocationsNonUniformAMD(u16vec4);"
+
+ "float maxInvocationsInclusiveScanNonUniformAMD(float);"
+ "vec2 maxInvocationsInclusiveScanNonUniformAMD(vec2);"
+ "vec3 maxInvocationsInclusiveScanNonUniformAMD(vec3);"
+ "vec4 maxInvocationsInclusiveScanNonUniformAMD(vec4);"
+
+ "int maxInvocationsInclusiveScanNonUniformAMD(int);"
+ "ivec2 maxInvocationsInclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 maxInvocationsInclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 maxInvocationsInclusiveScanNonUniformAMD(ivec4);"
+
+ "uint maxInvocationsInclusiveScanNonUniformAMD(uint);"
+ "uvec2 maxInvocationsInclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 maxInvocationsInclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 maxInvocationsInclusiveScanNonUniformAMD(uvec4);"
+
+ "double maxInvocationsInclusiveScanNonUniformAMD(double);"
+ "dvec2 maxInvocationsInclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 maxInvocationsInclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 maxInvocationsInclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t maxInvocationsInclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 maxInvocationsInclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 maxInvocationsInclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 maxInvocationsInclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t maxInvocationsInclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 maxInvocationsInclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 maxInvocationsInclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 maxInvocationsInclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t maxInvocationsInclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 maxInvocationsInclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 maxInvocationsInclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 maxInvocationsInclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t maxInvocationsInclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 maxInvocationsInclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 maxInvocationsInclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 maxInvocationsInclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t maxInvocationsInclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 maxInvocationsInclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 maxInvocationsInclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 maxInvocationsInclusiveScanNonUniformAMD(u16vec4);"
+
+ "float maxInvocationsExclusiveScanNonUniformAMD(float);"
+ "vec2 maxInvocationsExclusiveScanNonUniformAMD(vec2);"
+ "vec3 maxInvocationsExclusiveScanNonUniformAMD(vec3);"
+ "vec4 maxInvocationsExclusiveScanNonUniformAMD(vec4);"
+
+ "int maxInvocationsExclusiveScanNonUniformAMD(int);"
+ "ivec2 maxInvocationsExclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 maxInvocationsExclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 maxInvocationsExclusiveScanNonUniformAMD(ivec4);"
+
+ "uint maxInvocationsExclusiveScanNonUniformAMD(uint);"
+ "uvec2 maxInvocationsExclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 maxInvocationsExclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 maxInvocationsExclusiveScanNonUniformAMD(uvec4);"
+
+ "double maxInvocationsExclusiveScanNonUniformAMD(double);"
+ "dvec2 maxInvocationsExclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 maxInvocationsExclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 maxInvocationsExclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t maxInvocationsExclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 maxInvocationsExclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 maxInvocationsExclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 maxInvocationsExclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t maxInvocationsExclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 maxInvocationsExclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 maxInvocationsExclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 maxInvocationsExclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t maxInvocationsExclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 maxInvocationsExclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 maxInvocationsExclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 maxInvocationsExclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t maxInvocationsExclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 maxInvocationsExclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 maxInvocationsExclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 maxInvocationsExclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t maxInvocationsExclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 maxInvocationsExclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 maxInvocationsExclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 maxInvocationsExclusiveScanNonUniformAMD(u16vec4);"
+
+ "float addInvocationsNonUniformAMD(float);"
+ "vec2 addInvocationsNonUniformAMD(vec2);"
+ "vec3 addInvocationsNonUniformAMD(vec3);"
+ "vec4 addInvocationsNonUniformAMD(vec4);"
+
+ "int addInvocationsNonUniformAMD(int);"
+ "ivec2 addInvocationsNonUniformAMD(ivec2);"
+ "ivec3 addInvocationsNonUniformAMD(ivec3);"
+ "ivec4 addInvocationsNonUniformAMD(ivec4);"
+
+ "uint addInvocationsNonUniformAMD(uint);"
+ "uvec2 addInvocationsNonUniformAMD(uvec2);"
+ "uvec3 addInvocationsNonUniformAMD(uvec3);"
+ "uvec4 addInvocationsNonUniformAMD(uvec4);"
+
+ "double addInvocationsNonUniformAMD(double);"
+ "dvec2 addInvocationsNonUniformAMD(dvec2);"
+ "dvec3 addInvocationsNonUniformAMD(dvec3);"
+ "dvec4 addInvocationsNonUniformAMD(dvec4);"
+
+ "int64_t addInvocationsNonUniformAMD(int64_t);"
+ "i64vec2 addInvocationsNonUniformAMD(i64vec2);"
+ "i64vec3 addInvocationsNonUniformAMD(i64vec3);"
+ "i64vec4 addInvocationsNonUniformAMD(i64vec4);"
+
+ "uint64_t addInvocationsNonUniformAMD(uint64_t);"
+ "u64vec2 addInvocationsNonUniformAMD(u64vec2);"
+ "u64vec3 addInvocationsNonUniformAMD(u64vec3);"
+ "u64vec4 addInvocationsNonUniformAMD(u64vec4);"
+
+ "float16_t addInvocationsNonUniformAMD(float16_t);"
+ "f16vec2 addInvocationsNonUniformAMD(f16vec2);"
+ "f16vec3 addInvocationsNonUniformAMD(f16vec3);"
+ "f16vec4 addInvocationsNonUniformAMD(f16vec4);"
+
+ "int16_t addInvocationsNonUniformAMD(int16_t);"
+ "i16vec2 addInvocationsNonUniformAMD(i16vec2);"
+ "i16vec3 addInvocationsNonUniformAMD(i16vec3);"
+ "i16vec4 addInvocationsNonUniformAMD(i16vec4);"
+
+ "uint16_t addInvocationsNonUniformAMD(uint16_t);"
+ "u16vec2 addInvocationsNonUniformAMD(u16vec2);"
+ "u16vec3 addInvocationsNonUniformAMD(u16vec3);"
+ "u16vec4 addInvocationsNonUniformAMD(u16vec4);"
+
+ "float addInvocationsInclusiveScanNonUniformAMD(float);"
+ "vec2 addInvocationsInclusiveScanNonUniformAMD(vec2);"
+ "vec3 addInvocationsInclusiveScanNonUniformAMD(vec3);"
+ "vec4 addInvocationsInclusiveScanNonUniformAMD(vec4);"
+
+ "int addInvocationsInclusiveScanNonUniformAMD(int);"
+ "ivec2 addInvocationsInclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 addInvocationsInclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 addInvocationsInclusiveScanNonUniformAMD(ivec4);"
+
+ "uint addInvocationsInclusiveScanNonUniformAMD(uint);"
+ "uvec2 addInvocationsInclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 addInvocationsInclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 addInvocationsInclusiveScanNonUniformAMD(uvec4);"
+
+ "double addInvocationsInclusiveScanNonUniformAMD(double);"
+ "dvec2 addInvocationsInclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 addInvocationsInclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 addInvocationsInclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t addInvocationsInclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 addInvocationsInclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 addInvocationsInclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 addInvocationsInclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t addInvocationsInclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 addInvocationsInclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 addInvocationsInclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 addInvocationsInclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t addInvocationsInclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 addInvocationsInclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 addInvocationsInclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 addInvocationsInclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t addInvocationsInclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 addInvocationsInclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 addInvocationsInclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 addInvocationsInclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t addInvocationsInclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 addInvocationsInclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 addInvocationsInclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 addInvocationsInclusiveScanNonUniformAMD(u16vec4);"
+
+ "float addInvocationsExclusiveScanNonUniformAMD(float);"
+ "vec2 addInvocationsExclusiveScanNonUniformAMD(vec2);"
+ "vec3 addInvocationsExclusiveScanNonUniformAMD(vec3);"
+ "vec4 addInvocationsExclusiveScanNonUniformAMD(vec4);"
+
+ "int addInvocationsExclusiveScanNonUniformAMD(int);"
+ "ivec2 addInvocationsExclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 addInvocationsExclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 addInvocationsExclusiveScanNonUniformAMD(ivec4);"
+
+ "uint addInvocationsExclusiveScanNonUniformAMD(uint);"
+ "uvec2 addInvocationsExclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 addInvocationsExclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 addInvocationsExclusiveScanNonUniformAMD(uvec4);"
+
+ "double addInvocationsExclusiveScanNonUniformAMD(double);"
+ "dvec2 addInvocationsExclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 addInvocationsExclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 addInvocationsExclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t addInvocationsExclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 addInvocationsExclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 addInvocationsExclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 addInvocationsExclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t addInvocationsExclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 addInvocationsExclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 addInvocationsExclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 addInvocationsExclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t addInvocationsExclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 addInvocationsExclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 addInvocationsExclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 addInvocationsExclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t addInvocationsExclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 addInvocationsExclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 addInvocationsExclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 addInvocationsExclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t addInvocationsExclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 addInvocationsExclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 addInvocationsExclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 addInvocationsExclusiveScanNonUniformAMD(u16vec4);"
+
+ "float swizzleInvocationsAMD(float, uvec4);"
+ "vec2 swizzleInvocationsAMD(vec2, uvec4);"
+ "vec3 swizzleInvocationsAMD(vec3, uvec4);"
+ "vec4 swizzleInvocationsAMD(vec4, uvec4);"
+
+ "int swizzleInvocationsAMD(int, uvec4);"
+ "ivec2 swizzleInvocationsAMD(ivec2, uvec4);"
+ "ivec3 swizzleInvocationsAMD(ivec3, uvec4);"
+ "ivec4 swizzleInvocationsAMD(ivec4, uvec4);"
+
+ "uint swizzleInvocationsAMD(uint, uvec4);"
+ "uvec2 swizzleInvocationsAMD(uvec2, uvec4);"
+ "uvec3 swizzleInvocationsAMD(uvec3, uvec4);"
+ "uvec4 swizzleInvocationsAMD(uvec4, uvec4);"
+
+ "float swizzleInvocationsMaskedAMD(float, uvec3);"
+ "vec2 swizzleInvocationsMaskedAMD(vec2, uvec3);"
+ "vec3 swizzleInvocationsMaskedAMD(vec3, uvec3);"
+ "vec4 swizzleInvocationsMaskedAMD(vec4, uvec3);"
+
+ "int swizzleInvocationsMaskedAMD(int, uvec3);"
+ "ivec2 swizzleInvocationsMaskedAMD(ivec2, uvec3);"
+ "ivec3 swizzleInvocationsMaskedAMD(ivec3, uvec3);"
+ "ivec4 swizzleInvocationsMaskedAMD(ivec4, uvec3);"
+
+ "uint swizzleInvocationsMaskedAMD(uint, uvec3);"
+ "uvec2 swizzleInvocationsMaskedAMD(uvec2, uvec3);"
+ "uvec3 swizzleInvocationsMaskedAMD(uvec3, uvec3);"
+ "uvec4 swizzleInvocationsMaskedAMD(uvec4, uvec3);"
+
+ "float writeInvocationAMD(float, float, uint);"
+ "vec2 writeInvocationAMD(vec2, vec2, uint);"
+ "vec3 writeInvocationAMD(vec3, vec3, uint);"
+ "vec4 writeInvocationAMD(vec4, vec4, uint);"
+
+ "int writeInvocationAMD(int, int, uint);"
+ "ivec2 writeInvocationAMD(ivec2, ivec2, uint);"
+ "ivec3 writeInvocationAMD(ivec3, ivec3, uint);"
+ "ivec4 writeInvocationAMD(ivec4, ivec4, uint);"
+
+ "uint writeInvocationAMD(uint, uint, uint);"
+ "uvec2 writeInvocationAMD(uvec2, uvec2, uint);"
+ "uvec3 writeInvocationAMD(uvec3, uvec3, uint);"
+ "uvec4 writeInvocationAMD(uvec4, uvec4, uint);"
+
+ "uint mbcntAMD(uint64_t);"
+
+ "\n");
+ }
+
+ // GL_AMD_gcn_shader
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "float cubeFaceIndexAMD(vec3);"
+ "vec2 cubeFaceCoordAMD(vec3);"
+ "uint64_t timeAMD();"
+
+ "\n");
+ }
+
+ // GL_AMD_shader_fragment_mask
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "uint fragmentMaskFetchAMD(sampler2DMS, ivec2);"
+ "uint fragmentMaskFetchAMD(isampler2DMS, ivec2);"
+ "uint fragmentMaskFetchAMD(usampler2DMS, ivec2);"
+
+ "uint fragmentMaskFetchAMD(sampler2DMSArray, ivec3);"
+ "uint fragmentMaskFetchAMD(isampler2DMSArray, ivec3);"
+ "uint fragmentMaskFetchAMD(usampler2DMSArray, ivec3);"
+
+ "vec4 fragmentFetchAMD(sampler2DMS, ivec2, uint);"
+ "ivec4 fragmentFetchAMD(isampler2DMS, ivec2, uint);"
+ "uvec4 fragmentFetchAMD(usampler2DMS, ivec2, uint);"
+
+ "vec4 fragmentFetchAMD(sampler2DMSArray, ivec3, uint);"
+ "ivec4 fragmentFetchAMD(isampler2DMSArray, ivec3, uint);"
+ "uvec4 fragmentFetchAMD(usampler2DMSArray, ivec3, uint);"
+
+ "\n");
+ }
+
+#endif // AMD_EXTENSIONS
+
+
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) {
+ commonBuiltins.append(
+ "struct gl_TextureFootprint2DNV {"
+ "uvec2 anchor;"
+ "uvec2 offset;"
+ "uvec2 mask;"
+ "uint lod;"
+ "uint granularity;"
+ "};"
+
+ "struct gl_TextureFootprint3DNV {"
+ "uvec3 anchor;"
+ "uvec3 offset;"
+ "uvec2 mask;"
+ "uint lod;"
+ "uint granularity;"
+ "};"
+ "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV);"
+ "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV, float);"
+ "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV, float);"
+ "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);"
+ "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV, float);"
+ "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV, float);"
+ "bool textureFootprintLodNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintLodNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);"
+ "bool textureFootprintGradNV(sampler2D, vec2, vec2, vec2, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintGradClampNV(sampler2D, vec2, vec2, vec2, float, int, bool, out gl_TextureFootprint2DNV);"
+ "\n");
+ }
+
+#endif // NV_EXTENSIONS
+ // GL_AMD_gpu_shader_half_float/Explicit types
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "float16_t radians(float16_t);"
+ "f16vec2 radians(f16vec2);"
+ "f16vec3 radians(f16vec3);"
+ "f16vec4 radians(f16vec4);"
+
+ "float16_t degrees(float16_t);"
+ "f16vec2 degrees(f16vec2);"
+ "f16vec3 degrees(f16vec3);"
+ "f16vec4 degrees(f16vec4);"
+
+ "float16_t sin(float16_t);"
+ "f16vec2 sin(f16vec2);"
+ "f16vec3 sin(f16vec3);"
+ "f16vec4 sin(f16vec4);"
+
+ "float16_t cos(float16_t);"
+ "f16vec2 cos(f16vec2);"
+ "f16vec3 cos(f16vec3);"
+ "f16vec4 cos(f16vec4);"
+
+ "float16_t tan(float16_t);"
+ "f16vec2 tan(f16vec2);"
+ "f16vec3 tan(f16vec3);"
+ "f16vec4 tan(f16vec4);"
+
+ "float16_t asin(float16_t);"
+ "f16vec2 asin(f16vec2);"
+ "f16vec3 asin(f16vec3);"
+ "f16vec4 asin(f16vec4);"
+
+ "float16_t acos(float16_t);"
+ "f16vec2 acos(f16vec2);"
+ "f16vec3 acos(f16vec3);"
+ "f16vec4 acos(f16vec4);"
+
+ "float16_t atan(float16_t, float16_t);"
+ "f16vec2 atan(f16vec2, f16vec2);"
+ "f16vec3 atan(f16vec3, f16vec3);"
+ "f16vec4 atan(f16vec4, f16vec4);"
+
+ "float16_t atan(float16_t);"
+ "f16vec2 atan(f16vec2);"
+ "f16vec3 atan(f16vec3);"
+ "f16vec4 atan(f16vec4);"
+
+ "float16_t sinh(float16_t);"
+ "f16vec2 sinh(f16vec2);"
+ "f16vec3 sinh(f16vec3);"
+ "f16vec4 sinh(f16vec4);"
+
+ "float16_t cosh(float16_t);"
+ "f16vec2 cosh(f16vec2);"
+ "f16vec3 cosh(f16vec3);"
+ "f16vec4 cosh(f16vec4);"
+
+ "float16_t tanh(float16_t);"
+ "f16vec2 tanh(f16vec2);"
+ "f16vec3 tanh(f16vec3);"
+ "f16vec4 tanh(f16vec4);"
+
+ "float16_t asinh(float16_t);"
+ "f16vec2 asinh(f16vec2);"
+ "f16vec3 asinh(f16vec3);"
+ "f16vec4 asinh(f16vec4);"
+
+ "float16_t acosh(float16_t);"
+ "f16vec2 acosh(f16vec2);"
+ "f16vec3 acosh(f16vec3);"
+ "f16vec4 acosh(f16vec4);"
+
+ "float16_t atanh(float16_t);"
+ "f16vec2 atanh(f16vec2);"
+ "f16vec3 atanh(f16vec3);"
+ "f16vec4 atanh(f16vec4);"
+
+ "float16_t pow(float16_t, float16_t);"
+ "f16vec2 pow(f16vec2, f16vec2);"
+ "f16vec3 pow(f16vec3, f16vec3);"
+ "f16vec4 pow(f16vec4, f16vec4);"
+
+ "float16_t exp(float16_t);"
+ "f16vec2 exp(f16vec2);"
+ "f16vec3 exp(f16vec3);"
+ "f16vec4 exp(f16vec4);"
+
+ "float16_t log(float16_t);"
+ "f16vec2 log(f16vec2);"
+ "f16vec3 log(f16vec3);"
+ "f16vec4 log(f16vec4);"
+
+ "float16_t exp2(float16_t);"
+ "f16vec2 exp2(f16vec2);"
+ "f16vec3 exp2(f16vec3);"
+ "f16vec4 exp2(f16vec4);"
+
+ "float16_t log2(float16_t);"
+ "f16vec2 log2(f16vec2);"
+ "f16vec3 log2(f16vec3);"
+ "f16vec4 log2(f16vec4);"
+
+ "float16_t sqrt(float16_t);"
+ "f16vec2 sqrt(f16vec2);"
+ "f16vec3 sqrt(f16vec3);"
+ "f16vec4 sqrt(f16vec4);"
+
+ "float16_t inversesqrt(float16_t);"
+ "f16vec2 inversesqrt(f16vec2);"
+ "f16vec3 inversesqrt(f16vec3);"
+ "f16vec4 inversesqrt(f16vec4);"
+
+ "float16_t abs(float16_t);"
+ "f16vec2 abs(f16vec2);"
+ "f16vec3 abs(f16vec3);"
+ "f16vec4 abs(f16vec4);"
+
+ "float16_t sign(float16_t);"
+ "f16vec2 sign(f16vec2);"
+ "f16vec3 sign(f16vec3);"
+ "f16vec4 sign(f16vec4);"
+
+ "float16_t floor(float16_t);"
+ "f16vec2 floor(f16vec2);"
+ "f16vec3 floor(f16vec3);"
+ "f16vec4 floor(f16vec4);"
+
+ "float16_t trunc(float16_t);"
+ "f16vec2 trunc(f16vec2);"
+ "f16vec3 trunc(f16vec3);"
+ "f16vec4 trunc(f16vec4);"
+
+ "float16_t round(float16_t);"
+ "f16vec2 round(f16vec2);"
+ "f16vec3 round(f16vec3);"
+ "f16vec4 round(f16vec4);"
+
+ "float16_t roundEven(float16_t);"
+ "f16vec2 roundEven(f16vec2);"
+ "f16vec3 roundEven(f16vec3);"
+ "f16vec4 roundEven(f16vec4);"
+
+ "float16_t ceil(float16_t);"
+ "f16vec2 ceil(f16vec2);"
+ "f16vec3 ceil(f16vec3);"
+ "f16vec4 ceil(f16vec4);"
+
+ "float16_t fract(float16_t);"
+ "f16vec2 fract(f16vec2);"
+ "f16vec3 fract(f16vec3);"
+ "f16vec4 fract(f16vec4);"
+
+ "float16_t mod(float16_t, float16_t);"
+ "f16vec2 mod(f16vec2, float16_t);"
+ "f16vec3 mod(f16vec3, float16_t);"
+ "f16vec4 mod(f16vec4, float16_t);"
+ "f16vec2 mod(f16vec2, f16vec2);"
+ "f16vec3 mod(f16vec3, f16vec3);"
+ "f16vec4 mod(f16vec4, f16vec4);"
+
+ "float16_t modf(float16_t, out float16_t);"
+ "f16vec2 modf(f16vec2, out f16vec2);"
+ "f16vec3 modf(f16vec3, out f16vec3);"
+ "f16vec4 modf(f16vec4, out f16vec4);"
+
+ "float16_t min(float16_t, float16_t);"
+ "f16vec2 min(f16vec2, float16_t);"
+ "f16vec3 min(f16vec3, float16_t);"
+ "f16vec4 min(f16vec4, float16_t);"
+ "f16vec2 min(f16vec2, f16vec2);"
+ "f16vec3 min(f16vec3, f16vec3);"
+ "f16vec4 min(f16vec4, f16vec4);"
+
+ "float16_t max(float16_t, float16_t);"
+ "f16vec2 max(f16vec2, float16_t);"
+ "f16vec3 max(f16vec3, float16_t);"
+ "f16vec4 max(f16vec4, float16_t);"
+ "f16vec2 max(f16vec2, f16vec2);"
+ "f16vec3 max(f16vec3, f16vec3);"
+ "f16vec4 max(f16vec4, f16vec4);"
+
+ "float16_t clamp(float16_t, float16_t, float16_t);"
+ "f16vec2 clamp(f16vec2, float16_t, float16_t);"
+ "f16vec3 clamp(f16vec3, float16_t, float16_t);"
+ "f16vec4 clamp(f16vec4, float16_t, float16_t);"
+ "f16vec2 clamp(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 clamp(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 clamp(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t mix(float16_t, float16_t, float16_t);"
+ "f16vec2 mix(f16vec2, f16vec2, float16_t);"
+ "f16vec3 mix(f16vec3, f16vec3, float16_t);"
+ "f16vec4 mix(f16vec4, f16vec4, float16_t);"
+ "f16vec2 mix(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 mix(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 mix(f16vec4, f16vec4, f16vec4);"
+ "float16_t mix(float16_t, float16_t, bool);"
+ "f16vec2 mix(f16vec2, f16vec2, bvec2);"
+ "f16vec3 mix(f16vec3, f16vec3, bvec3);"
+ "f16vec4 mix(f16vec4, f16vec4, bvec4);"
+
+ "float16_t step(float16_t, float16_t);"
+ "f16vec2 step(f16vec2, f16vec2);"
+ "f16vec3 step(f16vec3, f16vec3);"
+ "f16vec4 step(f16vec4, f16vec4);"
+ "f16vec2 step(float16_t, f16vec2);"
+ "f16vec3 step(float16_t, f16vec3);"
+ "f16vec4 step(float16_t, f16vec4);"
+
+ "float16_t smoothstep(float16_t, float16_t, float16_t);"
+ "f16vec2 smoothstep(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 smoothstep(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 smoothstep(f16vec4, f16vec4, f16vec4);"
+ "f16vec2 smoothstep(float16_t, float16_t, f16vec2);"
+ "f16vec3 smoothstep(float16_t, float16_t, f16vec3);"
+ "f16vec4 smoothstep(float16_t, float16_t, f16vec4);"
+
+ "bool isnan(float16_t);"
+ "bvec2 isnan(f16vec2);"
+ "bvec3 isnan(f16vec3);"
+ "bvec4 isnan(f16vec4);"
+
+ "bool isinf(float16_t);"
+ "bvec2 isinf(f16vec2);"
+ "bvec3 isinf(f16vec3);"
+ "bvec4 isinf(f16vec4);"
+
+ "float16_t fma(float16_t, float16_t, float16_t);"
+ "f16vec2 fma(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 fma(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 fma(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t frexp(float16_t, out int);"
+ "f16vec2 frexp(f16vec2, out ivec2);"
+ "f16vec3 frexp(f16vec3, out ivec3);"
+ "f16vec4 frexp(f16vec4, out ivec4);"
+
+ "float16_t ldexp(float16_t, in int);"
+ "f16vec2 ldexp(f16vec2, in ivec2);"
+ "f16vec3 ldexp(f16vec3, in ivec3);"
+ "f16vec4 ldexp(f16vec4, in ivec4);"
+
+ "uint packFloat2x16(f16vec2);"
+ "f16vec2 unpackFloat2x16(uint);"
+
+ "float16_t length(float16_t);"
+ "float16_t length(f16vec2);"
+ "float16_t length(f16vec3);"
+ "float16_t length(f16vec4);"
+
+ "float16_t distance(float16_t, float16_t);"
+ "float16_t distance(f16vec2, f16vec2);"
+ "float16_t distance(f16vec3, f16vec3);"
+ "float16_t distance(f16vec4, f16vec4);"
+
+ "float16_t dot(float16_t, float16_t);"
+ "float16_t dot(f16vec2, f16vec2);"
+ "float16_t dot(f16vec3, f16vec3);"
+ "float16_t dot(f16vec4, f16vec4);"
+
+ "f16vec3 cross(f16vec3, f16vec3);"
+
+ "float16_t normalize(float16_t);"
+ "f16vec2 normalize(f16vec2);"
+ "f16vec3 normalize(f16vec3);"
+ "f16vec4 normalize(f16vec4);"
+
+ "float16_t faceforward(float16_t, float16_t, float16_t);"
+ "f16vec2 faceforward(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 faceforward(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 faceforward(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t reflect(float16_t, float16_t);"
+ "f16vec2 reflect(f16vec2, f16vec2);"
+ "f16vec3 reflect(f16vec3, f16vec3);"
+ "f16vec4 reflect(f16vec4, f16vec4);"
+
+ "float16_t refract(float16_t, float16_t, float16_t);"
+ "f16vec2 refract(f16vec2, f16vec2, float16_t);"
+ "f16vec3 refract(f16vec3, f16vec3, float16_t);"
+ "f16vec4 refract(f16vec4, f16vec4, float16_t);"
+
+ "f16mat2 matrixCompMult(f16mat2, f16mat2);"
+ "f16mat3 matrixCompMult(f16mat3, f16mat3);"
+ "f16mat4 matrixCompMult(f16mat4, f16mat4);"
+ "f16mat2x3 matrixCompMult(f16mat2x3, f16mat2x3);"
+ "f16mat2x4 matrixCompMult(f16mat2x4, f16mat2x4);"
+ "f16mat3x2 matrixCompMult(f16mat3x2, f16mat3x2);"
+ "f16mat3x4 matrixCompMult(f16mat3x4, f16mat3x4);"
+ "f16mat4x2 matrixCompMult(f16mat4x2, f16mat4x2);"
+ "f16mat4x3 matrixCompMult(f16mat4x3, f16mat4x3);"
+
+ "f16mat2 outerProduct(f16vec2, f16vec2);"
+ "f16mat3 outerProduct(f16vec3, f16vec3);"
+ "f16mat4 outerProduct(f16vec4, f16vec4);"
+ "f16mat2x3 outerProduct(f16vec3, f16vec2);"
+ "f16mat3x2 outerProduct(f16vec2, f16vec3);"
+ "f16mat2x4 outerProduct(f16vec4, f16vec2);"
+ "f16mat4x2 outerProduct(f16vec2, f16vec4);"
+ "f16mat3x4 outerProduct(f16vec4, f16vec3);"
+ "f16mat4x3 outerProduct(f16vec3, f16vec4);"
+
+ "f16mat2 transpose(f16mat2);"
+ "f16mat3 transpose(f16mat3);"
+ "f16mat4 transpose(f16mat4);"
+ "f16mat2x3 transpose(f16mat3x2);"
+ "f16mat3x2 transpose(f16mat2x3);"
+ "f16mat2x4 transpose(f16mat4x2);"
+ "f16mat4x2 transpose(f16mat2x4);"
+ "f16mat3x4 transpose(f16mat4x3);"
+ "f16mat4x3 transpose(f16mat3x4);"
+
+ "float16_t determinant(f16mat2);"
+ "float16_t determinant(f16mat3);"
+ "float16_t determinant(f16mat4);"
+
+ "f16mat2 inverse(f16mat2);"
+ "f16mat3 inverse(f16mat3);"
+ "f16mat4 inverse(f16mat4);"
+
+ "bvec2 lessThan(f16vec2, f16vec2);"
+ "bvec3 lessThan(f16vec3, f16vec3);"
+ "bvec4 lessThan(f16vec4, f16vec4);"
+
+ "bvec2 lessThanEqual(f16vec2, f16vec2);"
+ "bvec3 lessThanEqual(f16vec3, f16vec3);"
+ "bvec4 lessThanEqual(f16vec4, f16vec4);"
+
+ "bvec2 greaterThan(f16vec2, f16vec2);"
+ "bvec3 greaterThan(f16vec3, f16vec3);"
+ "bvec4 greaterThan(f16vec4, f16vec4);"
+
+ "bvec2 greaterThanEqual(f16vec2, f16vec2);"
+ "bvec3 greaterThanEqual(f16vec3, f16vec3);"
+ "bvec4 greaterThanEqual(f16vec4, f16vec4);"
+
+ "bvec2 equal(f16vec2, f16vec2);"
+ "bvec3 equal(f16vec3, f16vec3);"
+ "bvec4 equal(f16vec4, f16vec4);"
+
+ "bvec2 notEqual(f16vec2, f16vec2);"
+ "bvec3 notEqual(f16vec3, f16vec3);"
+ "bvec4 notEqual(f16vec4, f16vec4);"
+
+ "\n");
+ }
+
+ // Explicit types
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "int8_t abs(int8_t);"
+ "i8vec2 abs(i8vec2);"
+ "i8vec3 abs(i8vec3);"
+ "i8vec4 abs(i8vec4);"
+
+ "int8_t sign(int8_t);"
+ "i8vec2 sign(i8vec2);"
+ "i8vec3 sign(i8vec3);"
+ "i8vec4 sign(i8vec4);"
+
+ "int8_t min(int8_t x, int8_t y);"
+ "i8vec2 min(i8vec2 x, int8_t y);"
+ "i8vec3 min(i8vec3 x, int8_t y);"
+ "i8vec4 min(i8vec4 x, int8_t y);"
+ "i8vec2 min(i8vec2 x, i8vec2 y);"
+ "i8vec3 min(i8vec3 x, i8vec3 y);"
+ "i8vec4 min(i8vec4 x, i8vec4 y);"
+
+ "uint8_t min(uint8_t x, uint8_t y);"
+ "u8vec2 min(u8vec2 x, uint8_t y);"
+ "u8vec3 min(u8vec3 x, uint8_t y);"
+ "u8vec4 min(u8vec4 x, uint8_t y);"
+ "u8vec2 min(u8vec2 x, u8vec2 y);"
+ "u8vec3 min(u8vec3 x, u8vec3 y);"
+ "u8vec4 min(u8vec4 x, u8vec4 y);"
+
+ "int8_t max(int8_t x, int8_t y);"
+ "i8vec2 max(i8vec2 x, int8_t y);"
+ "i8vec3 max(i8vec3 x, int8_t y);"
+ "i8vec4 max(i8vec4 x, int8_t y);"
+ "i8vec2 max(i8vec2 x, i8vec2 y);"
+ "i8vec3 max(i8vec3 x, i8vec3 y);"
+ "i8vec4 max(i8vec4 x, i8vec4 y);"
+
+ "uint8_t max(uint8_t x, uint8_t y);"
+ "u8vec2 max(u8vec2 x, uint8_t y);"
+ "u8vec3 max(u8vec3 x, uint8_t y);"
+ "u8vec4 max(u8vec4 x, uint8_t y);"
+ "u8vec2 max(u8vec2 x, u8vec2 y);"
+ "u8vec3 max(u8vec3 x, u8vec3 y);"
+ "u8vec4 max(u8vec4 x, u8vec4 y);"
+
+ "int8_t clamp(int8_t x, int8_t minVal, int8_t maxVal);"
+ "i8vec2 clamp(i8vec2 x, int8_t minVal, int8_t maxVal);"
+ "i8vec3 clamp(i8vec3 x, int8_t minVal, int8_t maxVal);"
+ "i8vec4 clamp(i8vec4 x, int8_t minVal, int8_t maxVal);"
+ "i8vec2 clamp(i8vec2 x, i8vec2 minVal, i8vec2 maxVal);"
+ "i8vec3 clamp(i8vec3 x, i8vec3 minVal, i8vec3 maxVal);"
+ "i8vec4 clamp(i8vec4 x, i8vec4 minVal, i8vec4 maxVal);"
+
+ "uint8_t clamp(uint8_t x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec2 clamp(u8vec2 x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec3 clamp(u8vec3 x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec4 clamp(u8vec4 x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec2 clamp(u8vec2 x, u8vec2 minVal, u8vec2 maxVal);"
+ "u8vec3 clamp(u8vec3 x, u8vec3 minVal, u8vec3 maxVal);"
+ "u8vec4 clamp(u8vec4 x, u8vec4 minVal, u8vec4 maxVal);"
+
+ "int8_t mix(int8_t, int8_t, bool);"
+ "i8vec2 mix(i8vec2, i8vec2, bvec2);"
+ "i8vec3 mix(i8vec3, i8vec3, bvec3);"
+ "i8vec4 mix(i8vec4, i8vec4, bvec4);"
+ "uint8_t mix(uint8_t, uint8_t, bool);"
+ "u8vec2 mix(u8vec2, u8vec2, bvec2);"
+ "u8vec3 mix(u8vec3, u8vec3, bvec3);"
+ "u8vec4 mix(u8vec4, u8vec4, bvec4);"
+
+ "bvec2 lessThan(i8vec2, i8vec2);"
+ "bvec3 lessThan(i8vec3, i8vec3);"
+ "bvec4 lessThan(i8vec4, i8vec4);"
+ "bvec2 lessThan(u8vec2, u8vec2);"
+ "bvec3 lessThan(u8vec3, u8vec3);"
+ "bvec4 lessThan(u8vec4, u8vec4);"
+
+ "bvec2 lessThanEqual(i8vec2, i8vec2);"
+ "bvec3 lessThanEqual(i8vec3, i8vec3);"
+ "bvec4 lessThanEqual(i8vec4, i8vec4);"
+ "bvec2 lessThanEqual(u8vec2, u8vec2);"
+ "bvec3 lessThanEqual(u8vec3, u8vec3);"
+ "bvec4 lessThanEqual(u8vec4, u8vec4);"
+
+ "bvec2 greaterThan(i8vec2, i8vec2);"
+ "bvec3 greaterThan(i8vec3, i8vec3);"
+ "bvec4 greaterThan(i8vec4, i8vec4);"
+ "bvec2 greaterThan(u8vec2, u8vec2);"
+ "bvec3 greaterThan(u8vec3, u8vec3);"
+ "bvec4 greaterThan(u8vec4, u8vec4);"
+
+ "bvec2 greaterThanEqual(i8vec2, i8vec2);"
+ "bvec3 greaterThanEqual(i8vec3, i8vec3);"
+ "bvec4 greaterThanEqual(i8vec4, i8vec4);"
+ "bvec2 greaterThanEqual(u8vec2, u8vec2);"
+ "bvec3 greaterThanEqual(u8vec3, u8vec3);"
+ "bvec4 greaterThanEqual(u8vec4, u8vec4);"
+
+ "bvec2 equal(i8vec2, i8vec2);"
+ "bvec3 equal(i8vec3, i8vec3);"
+ "bvec4 equal(i8vec4, i8vec4);"
+ "bvec2 equal(u8vec2, u8vec2);"
+ "bvec3 equal(u8vec3, u8vec3);"
+ "bvec4 equal(u8vec4, u8vec4);"
+
+ "bvec2 notEqual(i8vec2, i8vec2);"
+ "bvec3 notEqual(i8vec3, i8vec3);"
+ "bvec4 notEqual(i8vec4, i8vec4);"
+ "bvec2 notEqual(u8vec2, u8vec2);"
+ "bvec3 notEqual(u8vec3, u8vec3);"
+ "bvec4 notEqual(u8vec4, u8vec4);"
+
+ " int8_t bitfieldExtract( int8_t, int8_t, int8_t);"
+ "i8vec2 bitfieldExtract(i8vec2, int8_t, int8_t);"
+ "i8vec3 bitfieldExtract(i8vec3, int8_t, int8_t);"
+ "i8vec4 bitfieldExtract(i8vec4, int8_t, int8_t);"
+
+ " uint8_t bitfieldExtract( uint8_t, int8_t, int8_t);"
+ "u8vec2 bitfieldExtract(u8vec2, int8_t, int8_t);"
+ "u8vec3 bitfieldExtract(u8vec3, int8_t, int8_t);"
+ "u8vec4 bitfieldExtract(u8vec4, int8_t, int8_t);"
+
+ " int8_t bitfieldInsert( int8_t base, int8_t, int8_t, int8_t);"
+ "i8vec2 bitfieldInsert(i8vec2 base, i8vec2, int8_t, int8_t);"
+ "i8vec3 bitfieldInsert(i8vec3 base, i8vec3, int8_t, int8_t);"
+ "i8vec4 bitfieldInsert(i8vec4 base, i8vec4, int8_t, int8_t);"
+
+ " uint8_t bitfieldInsert( uint8_t base, uint8_t, int8_t, int8_t);"
+ "u8vec2 bitfieldInsert(u8vec2 base, u8vec2, int8_t, int8_t);"
+ "u8vec3 bitfieldInsert(u8vec3 base, u8vec3, int8_t, int8_t);"
+ "u8vec4 bitfieldInsert(u8vec4 base, u8vec4, int8_t, int8_t);"
+
+ " int8_t bitCount( int8_t);"
+ "i8vec2 bitCount(i8vec2);"
+ "i8vec3 bitCount(i8vec3);"
+ "i8vec4 bitCount(i8vec4);"
+
+ " int8_t bitCount( uint8_t);"
+ "i8vec2 bitCount(u8vec2);"
+ "i8vec3 bitCount(u8vec3);"
+ "i8vec4 bitCount(u8vec4);"
+
+ " int8_t findLSB( int8_t);"
+ "i8vec2 findLSB(i8vec2);"
+ "i8vec3 findLSB(i8vec3);"
+ "i8vec4 findLSB(i8vec4);"
+
+ " int8_t findLSB( uint8_t);"
+ "i8vec2 findLSB(u8vec2);"
+ "i8vec3 findLSB(u8vec3);"
+ "i8vec4 findLSB(u8vec4);"
+
+ " int8_t findMSB( int8_t);"
+ "i8vec2 findMSB(i8vec2);"
+ "i8vec3 findMSB(i8vec3);"
+ "i8vec4 findMSB(i8vec4);"
+
+ " int8_t findMSB( uint8_t);"
+ "i8vec2 findMSB(u8vec2);"
+ "i8vec3 findMSB(u8vec3);"
+ "i8vec4 findMSB(u8vec4);"
+
+ "int16_t abs(int16_t);"
+ "i16vec2 abs(i16vec2);"
+ "i16vec3 abs(i16vec3);"
+ "i16vec4 abs(i16vec4);"
+
+ "int16_t sign(int16_t);"
+ "i16vec2 sign(i16vec2);"
+ "i16vec3 sign(i16vec3);"
+ "i16vec4 sign(i16vec4);"
+
+ "int16_t min(int16_t x, int16_t y);"
+ "i16vec2 min(i16vec2 x, int16_t y);"
+ "i16vec3 min(i16vec3 x, int16_t y);"
+ "i16vec4 min(i16vec4 x, int16_t y);"
+ "i16vec2 min(i16vec2 x, i16vec2 y);"
+ "i16vec3 min(i16vec3 x, i16vec3 y);"
+ "i16vec4 min(i16vec4 x, i16vec4 y);"
+
+ "uint16_t min(uint16_t x, uint16_t y);"
+ "u16vec2 min(u16vec2 x, uint16_t y);"
+ "u16vec3 min(u16vec3 x, uint16_t y);"
+ "u16vec4 min(u16vec4 x, uint16_t y);"
+ "u16vec2 min(u16vec2 x, u16vec2 y);"
+ "u16vec3 min(u16vec3 x, u16vec3 y);"
+ "u16vec4 min(u16vec4 x, u16vec4 y);"
+
+ "int16_t max(int16_t x, int16_t y);"
+ "i16vec2 max(i16vec2 x, int16_t y);"
+ "i16vec3 max(i16vec3 x, int16_t y);"
+ "i16vec4 max(i16vec4 x, int16_t y);"
+ "i16vec2 max(i16vec2 x, i16vec2 y);"
+ "i16vec3 max(i16vec3 x, i16vec3 y);"
+ "i16vec4 max(i16vec4 x, i16vec4 y);"
+
+ "uint16_t max(uint16_t x, uint16_t y);"
+ "u16vec2 max(u16vec2 x, uint16_t y);"
+ "u16vec3 max(u16vec3 x, uint16_t y);"
+ "u16vec4 max(u16vec4 x, uint16_t y);"
+ "u16vec2 max(u16vec2 x, u16vec2 y);"
+ "u16vec3 max(u16vec3 x, u16vec3 y);"
+ "u16vec4 max(u16vec4 x, u16vec4 y);"
+
+ "int16_t clamp(int16_t x, int16_t minVal, int16_t maxVal);"
+ "i16vec2 clamp(i16vec2 x, int16_t minVal, int16_t maxVal);"
+ "i16vec3 clamp(i16vec3 x, int16_t minVal, int16_t maxVal);"
+ "i16vec4 clamp(i16vec4 x, int16_t minVal, int16_t maxVal);"
+ "i16vec2 clamp(i16vec2 x, i16vec2 minVal, i16vec2 maxVal);"
+ "i16vec3 clamp(i16vec3 x, i16vec3 minVal, i16vec3 maxVal);"
+ "i16vec4 clamp(i16vec4 x, i16vec4 minVal, i16vec4 maxVal);"
+
+ "uint16_t clamp(uint16_t x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec2 clamp(u16vec2 x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec3 clamp(u16vec3 x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec4 clamp(u16vec4 x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec2 clamp(u16vec2 x, u16vec2 minVal, u16vec2 maxVal);"
+ "u16vec3 clamp(u16vec3 x, u16vec3 minVal, u16vec3 maxVal);"
+ "u16vec4 clamp(u16vec4 x, u16vec4 minVal, u16vec4 maxVal);"
+
+ "int16_t mix(int16_t, int16_t, bool);"
+ "i16vec2 mix(i16vec2, i16vec2, bvec2);"
+ "i16vec3 mix(i16vec3, i16vec3, bvec3);"
+ "i16vec4 mix(i16vec4, i16vec4, bvec4);"
+ "uint16_t mix(uint16_t, uint16_t, bool);"
+ "u16vec2 mix(u16vec2, u16vec2, bvec2);"
+ "u16vec3 mix(u16vec3, u16vec3, bvec3);"
+ "u16vec4 mix(u16vec4, u16vec4, bvec4);"
+
+ "float16_t frexp(float16_t, out int16_t);"
+ "f16vec2 frexp(f16vec2, out i16vec2);"
+ "f16vec3 frexp(f16vec3, out i16vec3);"
+ "f16vec4 frexp(f16vec4, out i16vec4);"
+
+ "float16_t ldexp(float16_t, int16_t);"
+ "f16vec2 ldexp(f16vec2, i16vec2);"
+ "f16vec3 ldexp(f16vec3, i16vec3);"
+ "f16vec4 ldexp(f16vec4, i16vec4);"
+
+ "int16_t halfBitsToInt16(float16_t);"
+ "i16vec2 halfBitsToInt16(f16vec2);"
+ "i16vec3 halhBitsToInt16(f16vec3);"
+ "i16vec4 halfBitsToInt16(f16vec4);"
+
+ "uint16_t halfBitsToUint16(float16_t);"
+ "u16vec2 halfBitsToUint16(f16vec2);"
+ "u16vec3 halfBitsToUint16(f16vec3);"
+ "u16vec4 halfBitsToUint16(f16vec4);"
+
+ "int16_t float16BitsToInt16(float16_t);"
+ "i16vec2 float16BitsToInt16(f16vec2);"
+ "i16vec3 float16BitsToInt16(f16vec3);"
+ "i16vec4 float16BitsToInt16(f16vec4);"
+
+ "uint16_t float16BitsToUint16(float16_t);"
+ "u16vec2 float16BitsToUint16(f16vec2);"
+ "u16vec3 float16BitsToUint16(f16vec3);"
+ "u16vec4 float16BitsToUint16(f16vec4);"
+
+ "float16_t int16BitsToFloat16(int16_t);"
+ "f16vec2 int16BitsToFloat16(i16vec2);"
+ "f16vec3 int16BitsToFloat16(i16vec3);"
+ "f16vec4 int16BitsToFloat16(i16vec4);"
+
+ "float16_t uint16BitsToFloat16(uint16_t);"
+ "f16vec2 uint16BitsToFloat16(u16vec2);"
+ "f16vec3 uint16BitsToFloat16(u16vec3);"
+ "f16vec4 uint16BitsToFloat16(u16vec4);"
+
+ "float16_t int16BitsToHalf(int16_t);"
+ "f16vec2 int16BitsToHalf(i16vec2);"
+ "f16vec3 int16BitsToHalf(i16vec3);"
+ "f16vec4 int16BitsToHalf(i16vec4);"
+
+ "float16_t uint16BitsToHalf(uint16_t);"
+ "f16vec2 uint16BitsToHalf(u16vec2);"
+ "f16vec3 uint16BitsToHalf(u16vec3);"
+ "f16vec4 uint16BitsToHalf(u16vec4);"
+
+ "int packInt2x16(i16vec2);"
+ "uint packUint2x16(u16vec2);"
+ "int64_t packInt4x16(i16vec4);"
+ "uint64_t packUint4x16(u16vec4);"
+ "i16vec2 unpackInt2x16(int);"
+ "u16vec2 unpackUint2x16(uint);"
+ "i16vec4 unpackInt4x16(int64_t);"
+ "u16vec4 unpackUint4x16(uint64_t);"
+
+ "bvec2 lessThan(i16vec2, i16vec2);"
+ "bvec3 lessThan(i16vec3, i16vec3);"
+ "bvec4 lessThan(i16vec4, i16vec4);"
+ "bvec2 lessThan(u16vec2, u16vec2);"
+ "bvec3 lessThan(u16vec3, u16vec3);"
+ "bvec4 lessThan(u16vec4, u16vec4);"
+
+ "bvec2 lessThanEqual(i16vec2, i16vec2);"
+ "bvec3 lessThanEqual(i16vec3, i16vec3);"
+ "bvec4 lessThanEqual(i16vec4, i16vec4);"
+ "bvec2 lessThanEqual(u16vec2, u16vec2);"
+ "bvec3 lessThanEqual(u16vec3, u16vec3);"
+ "bvec4 lessThanEqual(u16vec4, u16vec4);"
+
+ "bvec2 greaterThan(i16vec2, i16vec2);"
+ "bvec3 greaterThan(i16vec3, i16vec3);"
+ "bvec4 greaterThan(i16vec4, i16vec4);"
+ "bvec2 greaterThan(u16vec2, u16vec2);"
+ "bvec3 greaterThan(u16vec3, u16vec3);"
+ "bvec4 greaterThan(u16vec4, u16vec4);"
+
+ "bvec2 greaterThanEqual(i16vec2, i16vec2);"
+ "bvec3 greaterThanEqual(i16vec3, i16vec3);"
+ "bvec4 greaterThanEqual(i16vec4, i16vec4);"
+ "bvec2 greaterThanEqual(u16vec2, u16vec2);"
+ "bvec3 greaterThanEqual(u16vec3, u16vec3);"
+ "bvec4 greaterThanEqual(u16vec4, u16vec4);"
+
+ "bvec2 equal(i16vec2, i16vec2);"
+ "bvec3 equal(i16vec3, i16vec3);"
+ "bvec4 equal(i16vec4, i16vec4);"
+ "bvec2 equal(u16vec2, u16vec2);"
+ "bvec3 equal(u16vec3, u16vec3);"
+ "bvec4 equal(u16vec4, u16vec4);"
+
+ "bvec2 notEqual(i16vec2, i16vec2);"
+ "bvec3 notEqual(i16vec3, i16vec3);"
+ "bvec4 notEqual(i16vec4, i16vec4);"
+ "bvec2 notEqual(u16vec2, u16vec2);"
+ "bvec3 notEqual(u16vec3, u16vec3);"
+ "bvec4 notEqual(u16vec4, u16vec4);"
+
+ " int16_t bitfieldExtract( int16_t, int16_t, int16_t);"
+ "i16vec2 bitfieldExtract(i16vec2, int16_t, int16_t);"
+ "i16vec3 bitfieldExtract(i16vec3, int16_t, int16_t);"
+ "i16vec4 bitfieldExtract(i16vec4, int16_t, int16_t);"
+
+ " uint16_t bitfieldExtract( uint16_t, int16_t, int16_t);"
+ "u16vec2 bitfieldExtract(u16vec2, int16_t, int16_t);"
+ "u16vec3 bitfieldExtract(u16vec3, int16_t, int16_t);"
+ "u16vec4 bitfieldExtract(u16vec4, int16_t, int16_t);"
+
+ " int16_t bitfieldInsert( int16_t base, int16_t, int16_t, int16_t);"
+ "i16vec2 bitfieldInsert(i16vec2 base, i16vec2, int16_t, int16_t);"
+ "i16vec3 bitfieldInsert(i16vec3 base, i16vec3, int16_t, int16_t);"
+ "i16vec4 bitfieldInsert(i16vec4 base, i16vec4, int16_t, int16_t);"
+
+ " uint16_t bitfieldInsert( uint16_t base, uint16_t, int16_t, int16_t);"
+ "u16vec2 bitfieldInsert(u16vec2 base, u16vec2, int16_t, int16_t);"
+ "u16vec3 bitfieldInsert(u16vec3 base, u16vec3, int16_t, int16_t);"
+ "u16vec4 bitfieldInsert(u16vec4 base, u16vec4, int16_t, int16_t);"
+
+ " int16_t bitCount( int16_t);"
+ "i16vec2 bitCount(i16vec2);"
+ "i16vec3 bitCount(i16vec3);"
+ "i16vec4 bitCount(i16vec4);"
+
+ " int16_t bitCount( uint16_t);"
+ "i16vec2 bitCount(u16vec2);"
+ "i16vec3 bitCount(u16vec3);"
+ "i16vec4 bitCount(u16vec4);"
+
+ " int16_t findLSB( int16_t);"
+ "i16vec2 findLSB(i16vec2);"
+ "i16vec3 findLSB(i16vec3);"
+ "i16vec4 findLSB(i16vec4);"
+
+ " int16_t findLSB( uint16_t);"
+ "i16vec2 findLSB(u16vec2);"
+ "i16vec3 findLSB(u16vec3);"
+ "i16vec4 findLSB(u16vec4);"
+
+ " int16_t findMSB( int16_t);"
+ "i16vec2 findMSB(i16vec2);"
+ "i16vec3 findMSB(i16vec3);"
+ "i16vec4 findMSB(i16vec4);"
+
+ " int16_t findMSB( uint16_t);"
+ "i16vec2 findMSB(u16vec2);"
+ "i16vec3 findMSB(u16vec3);"
+ "i16vec4 findMSB(u16vec4);"
+
+ "int16_t pack16(i8vec2);"
+ "uint16_t pack16(u8vec2);"
+ "int32_t pack32(i8vec4);"
+ "uint32_t pack32(u8vec4);"
+ "int32_t pack32(i16vec2);"
+ "uint32_t pack32(u16vec2);"
+ "int64_t pack64(i16vec4);"
+ "uint64_t pack64(u16vec4);"
+ "int64_t pack64(i32vec2);"
+ "uint64_t pack64(u32vec2);"
+
+ "i8vec2 unpack8(int16_t);"
+ "u8vec2 unpack8(uint16_t);"
+ "i8vec4 unpack8(int32_t);"
+ "u8vec4 unpack8(uint32_t);"
+ "i16vec2 unpack16(int32_t);"
+ "u16vec2 unpack16(uint32_t);"
+ "i16vec4 unpack16(int64_t);"
+ "u16vec4 unpack16(uint64_t);"
+ "i32vec2 unpack32(int64_t);"
+ "u32vec2 unpack32(uint64_t);"
+
+ "float64_t radians(float64_t);"
+ "f64vec2 radians(f64vec2);"
+ "f64vec3 radians(f64vec3);"
+ "f64vec4 radians(f64vec4);"
+
+ "float64_t degrees(float64_t);"
+ "f64vec2 degrees(f64vec2);"
+ "f64vec3 degrees(f64vec3);"
+ "f64vec4 degrees(f64vec4);"
+
+ "float64_t sin(float64_t);"
+ "f64vec2 sin(f64vec2);"
+ "f64vec3 sin(f64vec3);"
+ "f64vec4 sin(f64vec4);"
+
+ "float64_t cos(float64_t);"
+ "f64vec2 cos(f64vec2);"
+ "f64vec3 cos(f64vec3);"
+ "f64vec4 cos(f64vec4);"
+
+ "float64_t tan(float64_t);"
+ "f64vec2 tan(f64vec2);"
+ "f64vec3 tan(f64vec3);"
+ "f64vec4 tan(f64vec4);"
+
+ "float64_t asin(float64_t);"
+ "f64vec2 asin(f64vec2);"
+ "f64vec3 asin(f64vec3);"
+ "f64vec4 asin(f64vec4);"
+
+ "float64_t acos(float64_t);"
+ "f64vec2 acos(f64vec2);"
+ "f64vec3 acos(f64vec3);"
+ "f64vec4 acos(f64vec4);"
+
+ "float64_t atan(float64_t, float64_t);"
+ "f64vec2 atan(f64vec2, f64vec2);"
+ "f64vec3 atan(f64vec3, f64vec3);"
+ "f64vec4 atan(f64vec4, f64vec4);"
+
+ "float64_t atan(float64_t);"
+ "f64vec2 atan(f64vec2);"
+ "f64vec3 atan(f64vec3);"
+ "f64vec4 atan(f64vec4);"
+
+ "float64_t sinh(float64_t);"
+ "f64vec2 sinh(f64vec2);"
+ "f64vec3 sinh(f64vec3);"
+ "f64vec4 sinh(f64vec4);"
+
+ "float64_t cosh(float64_t);"
+ "f64vec2 cosh(f64vec2);"
+ "f64vec3 cosh(f64vec3);"
+ "f64vec4 cosh(f64vec4);"
+
+ "float64_t tanh(float64_t);"
+ "f64vec2 tanh(f64vec2);"
+ "f64vec3 tanh(f64vec3);"
+ "f64vec4 tanh(f64vec4);"
+
+ "float64_t asinh(float64_t);"
+ "f64vec2 asinh(f64vec2);"
+ "f64vec3 asinh(f64vec3);"
+ "f64vec4 asinh(f64vec4);"
+
+ "float64_t acosh(float64_t);"
+ "f64vec2 acosh(f64vec2);"
+ "f64vec3 acosh(f64vec3);"
+ "f64vec4 acosh(f64vec4);"
+
+ "float64_t atanh(float64_t);"
+ "f64vec2 atanh(f64vec2);"
+ "f64vec3 atanh(f64vec3);"
+ "f64vec4 atanh(f64vec4);"
+
+ "float64_t pow(float64_t, float64_t);"
+ "f64vec2 pow(f64vec2, f64vec2);"
+ "f64vec3 pow(f64vec3, f64vec3);"
+ "f64vec4 pow(f64vec4, f64vec4);"
+
+ "float64_t exp(float64_t);"
+ "f64vec2 exp(f64vec2);"
+ "f64vec3 exp(f64vec3);"
+ "f64vec4 exp(f64vec4);"
+
+ "float64_t log(float64_t);"
+ "f64vec2 log(f64vec2);"
+ "f64vec3 log(f64vec3);"
+ "f64vec4 log(f64vec4);"
+
+ "float64_t exp2(float64_t);"
+ "f64vec2 exp2(f64vec2);"
+ "f64vec3 exp2(f64vec3);"
+ "f64vec4 exp2(f64vec4);"
+
+ "float64_t log2(float64_t);"
+ "f64vec2 log2(f64vec2);"
+ "f64vec3 log2(f64vec3);"
+ "f64vec4 log2(f64vec4);"
+ "\n");
+ }
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangFragment].append(derivativesAndControl64bits);
+ stageBuiltins[EShLangFragment].append(
+ "float64_t interpolateAtCentroid(float64_t);"
+ "f64vec2 interpolateAtCentroid(f64vec2);"
+ "f64vec3 interpolateAtCentroid(f64vec3);"
+ "f64vec4 interpolateAtCentroid(f64vec4);"
+
+ "float64_t interpolateAtSample(float64_t, int);"
+ "f64vec2 interpolateAtSample(f64vec2, int);"
+ "f64vec3 interpolateAtSample(f64vec3, int);"
+ "f64vec4 interpolateAtSample(f64vec4, int);"
+
+ "float64_t interpolateAtOffset(float64_t, f64vec2);"
+ "f64vec2 interpolateAtOffset(f64vec2, f64vec2);"
+ "f64vec3 interpolateAtOffset(f64vec3, f64vec2);"
+ "f64vec4 interpolateAtOffset(f64vec4, f64vec2);"
+
+ "\n");
+
+ }
+
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by vertex shaders only.
+ // (Except legacy lod functions, where it depends which release they are
+ // vertex only.)
+ //
+ //============================================================================
+
+ //
+ // Geometric Functions.
+ //
+ if (IncludeLegacy(version, profile, spvVersion))
+ stageBuiltins[EShLangVertex].append("vec4 ftransform();");
+
+ //
+ // Original-style texture Functions with lod.
+ //
+ TString* s;
+ if (version == 100)
+ s = &stageBuiltins[EShLangVertex];
+ else
+ s = &commonBuiltins;
+ if ((profile == EEsProfile && version == 100) ||
+ profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ s->append(
+ "vec4 texture2DLod(sampler2D, vec2, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjLod(sampler2D, vec3, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjLod(sampler2D, vec4, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture3DLod(sampler3D, vec3, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check
+ "vec4 texture3DProjLod(sampler3D, vec4, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check
+ "vec4 textureCubeLod(samplerCube, vec3, float);" // GL_ARB_shader_texture_lod
+
+ "\n");
+ }
+ }
+ if ( profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ s->append(
+ "vec4 texture1DLod(sampler1D, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjLod(sampler1D, vec2, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjLod(sampler1D, vec4, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DLod(sampler1DShadow, vec3, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DLod(sampler2DShadow, vec3, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DProjLod(sampler1DShadow, vec4, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DProjLod(sampler2DShadow, vec4, float);" // GL_ARB_shader_texture_lod
+
+ "vec4 texture1DGradARB(sampler1D, float, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjGradARB(sampler1D, vec2, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjGradARB(sampler1D, vec4, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DGradARB(sampler2D, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjGradARB(sampler2D, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjGradARB(sampler2D, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture3DGradARB(sampler3D, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod
+ "vec4 texture3DProjGradARB(sampler3D, vec4, vec3, vec3);" // GL_ARB_shader_texture_lod
+ "vec4 textureCubeGradARB(samplerCube, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DGradARB(sampler1DShadow, vec3, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DProjGradARB( sampler1DShadow, vec4, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DGradARB(sampler2DShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DProjGradARB( sampler2DShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DRectGradARB(sampler2DRect, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DRectProjGradARB( sampler2DRect, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DRectProjGradARB( sampler2DRect, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DRectGradARB( sampler2DRectShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DRectProjGradARB(sampler2DRectShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+
+ "\n");
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 150) ||
+ (profile == EEsProfile && version >= 310)) {
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by geometry shaders only.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile && version >= 400) {
+ stageBuiltins[EShLangGeometry].append(
+ "void EmitStreamVertex(int);"
+ "void EndStreamPrimitive(int);"
+ );
+ }
+ stageBuiltins[EShLangGeometry].append(
+ "void EmitVertex();"
+ "void EndPrimitive();"
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Prototypes for all control functions.
+ //
+ //============================================================================
+ bool esBarrier = (profile == EEsProfile && version >= 310);
+ if ((profile != EEsProfile && version >= 150) || esBarrier)
+ stageBuiltins[EShLangTessControl].append(
+ "void barrier();"
+ );
+ if ((profile != EEsProfile && version >= 420) || esBarrier)
+ stageBuiltins[EShLangCompute].append(
+ "void barrier();"
+ );
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ stageBuiltins[EShLangMeshNV].append(
+ "void barrier();"
+ );
+ stageBuiltins[EShLangTaskNV].append(
+ "void barrier();"
+ );
+ }
+#endif
+ if ((profile != EEsProfile && version >= 130) || esBarrier)
+ commonBuiltins.append(
+ "void memoryBarrier();"
+ );
+ if ((profile != EEsProfile && version >= 420) || esBarrier) {
+ commonBuiltins.append(
+ "void memoryBarrierAtomicCounter();"
+ "void memoryBarrierBuffer();"
+ "void memoryBarrierImage();"
+ );
+ stageBuiltins[EShLangCompute].append(
+ "void memoryBarrierShared();"
+ "void groupMemoryBarrier();"
+ );
+ }
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ stageBuiltins[EShLangMeshNV].append(
+ "void memoryBarrierShared();"
+ "void groupMemoryBarrier();"
+ );
+ stageBuiltins[EShLangTaskNV].append(
+ "void memoryBarrierShared();"
+ "void groupMemoryBarrier();"
+ );
+ }
+#endif
+
+ commonBuiltins.append("void controlBarrier(int, int, int, int);\n"
+ "void memoryBarrier(int, int, int);\n");
+
+ if (profile != EEsProfile && version >= 450) {
+ // coopMatStoreNV perhaps ought to have "out" on the buf parameter, but
+ // adding it introduces undesirable tempArgs on the stack. What we want
+ // is more like "buf" thought of as a pointer value being an in parameter.
+ stageBuiltins[EShLangCompute].append(
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n"
+
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent float64_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n"
+
+ "fcoopmatNV coopMatMulAddNV(fcoopmatNV A, fcoopmatNV B, fcoopmatNV C);\n"
+ );
+ }
+
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by fragment shaders only.
+ //
+ //============================================================================
+
+ //
+ // Original-style texture Functions with bias.
+ //
+ if (spvVersion.spv == 0 && (profile != EEsProfile || version == 100)) {
+ stageBuiltins[EShLangFragment].append(
+ "vec4 texture2D(sampler2D, vec2, float);"
+ "vec4 texture2DProj(sampler2D, vec3, float);"
+ "vec4 texture2DProj(sampler2D, vec4, float);"
+ "vec4 texture3D(sampler3D, vec3, float);" // OES_texture_3D
+ "vec4 texture3DProj(sampler3D, vec4, float);" // OES_texture_3D
+ "vec4 textureCube(samplerCube, vec3, float);"
+
+ "\n");
+ }
+ if (spvVersion.spv == 0 && (profile != EEsProfile && version > 100)) {
+ stageBuiltins[EShLangFragment].append(
+ "vec4 texture1D(sampler1D, float, float);"
+ "vec4 texture1DProj(sampler1D, vec2, float);"
+ "vec4 texture1DProj(sampler1D, vec4, float);"
+ "vec4 shadow1D(sampler1DShadow, vec3, float);"
+ "vec4 shadow2D(sampler2DShadow, vec3, float);"
+ "vec4 shadow1DProj(sampler1DShadow, vec4, float);"
+ "vec4 shadow2DProj(sampler2DShadow, vec4, float);"
+
+ "\n");
+ }
+ if (spvVersion.spv == 0 && profile == EEsProfile) {
+ stageBuiltins[EShLangFragment].append(
+ "vec4 texture2DLodEXT(sampler2D, vec2, float);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjLodEXT(sampler2D, vec3, float);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjLodEXT(sampler2D, vec4, float);" // GL_EXT_shader_texture_lod
+ "vec4 textureCubeLodEXT(samplerCube, vec3, float);" // GL_EXT_shader_texture_lod
+
+ "\n");
+ }
+
+ stageBuiltins[EShLangFragment].append(derivatives);
+ stageBuiltins[EShLangFragment].append("\n");
+
+ // GL_ARB_derivative_control
+ if (profile != EEsProfile && version >= 400) {
+ stageBuiltins[EShLangFragment].append(derivativeControls);
+ stageBuiltins[EShLangFragment].append("\n");
+ }
+
+ // GL_OES_shader_multisample_interpolation
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ stageBuiltins[EShLangFragment].append(
+ "float interpolateAtCentroid(float);"
+ "vec2 interpolateAtCentroid(vec2);"
+ "vec3 interpolateAtCentroid(vec3);"
+ "vec4 interpolateAtCentroid(vec4);"
+
+ "float interpolateAtSample(float, int);"
+ "vec2 interpolateAtSample(vec2, int);"
+ "vec3 interpolateAtSample(vec3, int);"
+ "vec4 interpolateAtSample(vec4, int);"
+
+ "float interpolateAtOffset(float, vec2);"
+ "vec2 interpolateAtOffset(vec2, vec2);"
+ "vec3 interpolateAtOffset(vec3, vec2);"
+ "vec4 interpolateAtOffset(vec4, vec2);"
+
+ "\n");
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_shader_explicit_vertex_parameter
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangFragment].append(
+ "float interpolateAtVertexAMD(float, uint);"
+ "vec2 interpolateAtVertexAMD(vec2, uint);"
+ "vec3 interpolateAtVertexAMD(vec3, uint);"
+ "vec4 interpolateAtVertexAMD(vec4, uint);"
+
+ "int interpolateAtVertexAMD(int, uint);"
+ "ivec2 interpolateAtVertexAMD(ivec2, uint);"
+ "ivec3 interpolateAtVertexAMD(ivec3, uint);"
+ "ivec4 interpolateAtVertexAMD(ivec4, uint);"
+
+ "uint interpolateAtVertexAMD(uint, uint);"
+ "uvec2 interpolateAtVertexAMD(uvec2, uint);"
+ "uvec3 interpolateAtVertexAMD(uvec3, uint);"
+ "uvec4 interpolateAtVertexAMD(uvec4, uint);"
+
+ "float16_t interpolateAtVertexAMD(float16_t, uint);"
+ "f16vec2 interpolateAtVertexAMD(f16vec2, uint);"
+ "f16vec3 interpolateAtVertexAMD(f16vec3, uint);"
+ "f16vec4 interpolateAtVertexAMD(f16vec4, uint);"
+
+ "\n");
+ }
+
+ // GL_AMD_gpu_shader_half_float
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangFragment].append(derivativesAndControl16bits);
+ stageBuiltins[EShLangFragment].append("\n");
+
+ stageBuiltins[EShLangFragment].append(
+ "float16_t interpolateAtCentroid(float16_t);"
+ "f16vec2 interpolateAtCentroid(f16vec2);"
+ "f16vec3 interpolateAtCentroid(f16vec3);"
+ "f16vec4 interpolateAtCentroid(f16vec4);"
+
+ "float16_t interpolateAtSample(float16_t, int);"
+ "f16vec2 interpolateAtSample(f16vec2, int);"
+ "f16vec3 interpolateAtSample(f16vec3, int);"
+ "f16vec4 interpolateAtSample(f16vec4, int);"
+
+ "float16_t interpolateAtOffset(float16_t, f16vec2);"
+ "f16vec2 interpolateAtOffset(f16vec2, f16vec2);"
+ "f16vec3 interpolateAtOffset(f16vec3, f16vec2);"
+ "f16vec4 interpolateAtOffset(f16vec4, f16vec2);"
+
+ "\n");
+ }
+
+ // GL_AMD_shader_fragment_mask
+ if (profile != EEsProfile && version >= 450 && spvVersion.vulkan > 0) {
+ stageBuiltins[EShLangFragment].append(
+ "uint fragmentMaskFetchAMD(subpassInputMS);"
+ "uint fragmentMaskFetchAMD(isubpassInputMS);"
+ "uint fragmentMaskFetchAMD(usubpassInputMS);"
+
+ "vec4 fragmentFetchAMD(subpassInputMS, uint);"
+ "ivec4 fragmentFetchAMD(isubpassInputMS, uint);"
+ "uvec4 fragmentFetchAMD(usubpassInputMS, uint);"
+
+ "\n");
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+
+ // Builtins for GL_NV_ray_tracing
+ if (profile != EEsProfile && version >= 460) {
+ stageBuiltins[EShLangRayGenNV].append(
+ "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);"
+ "void executeCallableNV(uint, int);"
+ "\n");
+ stageBuiltins[EShLangIntersectNV].append(
+ "bool reportIntersectionNV(float, uint);"
+ "\n");
+ stageBuiltins[EShLangAnyHitNV].append(
+ "void ignoreIntersectionNV();"
+ "void terminateRayNV();"
+ "\n");
+ stageBuiltins[EShLangClosestHitNV].append(
+ "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);"
+ "void executeCallableNV(uint, int);"
+ "\n");
+ stageBuiltins[EShLangMissNV].append(
+ "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);"
+ "void executeCallableNV(uint, int);"
+ "\n");
+ stageBuiltins[EShLangCallableNV].append(
+ "void executeCallableNV(uint, int);"
+ "\n");
+ }
+
+ //E_SPV_NV_compute_shader_derivatives
+
+ stageBuiltins[EShLangCompute].append(derivatives);
+ stageBuiltins[EShLangCompute].append(derivativeControls);
+ stageBuiltins[EShLangCompute].append("\n");
+
+
+ if (profile != EEsProfile && version >= 450) {
+
+ stageBuiltins[EShLangCompute].append(derivativesAndControl16bits);
+ stageBuiltins[EShLangCompute].append(derivativesAndControl64bits);
+ stageBuiltins[EShLangCompute].append("\n");
+ }
+
+ // Builtins for GL_NV_mesh_shader
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ stageBuiltins[EShLangMeshNV].append(
+ "void writePackedPrimitiveIndices4x8NV(uint, uint);"
+ "\n");
+ }
+#endif
+
+ //============================================================================
+ //
+ // Standard Uniforms
+ //
+ //============================================================================
+
+ //
+ // Depth range in window coordinates, p. 33
+ //
+ if (spvVersion.spv == 0) {
+ commonBuiltins.append(
+ "struct gl_DepthRangeParameters {"
+ );
+ if (profile == EEsProfile) {
+ commonBuiltins.append(
+ "highp float near;" // n
+ "highp float far;" // f
+ "highp float diff;" // f - n
+ );
+ } else {
+ commonBuiltins.append(
+ "float near;" // n
+ "float far;" // f
+ "float diff;" // f - n
+ );
+ }
+
+ commonBuiltins.append(
+ "};"
+ "uniform gl_DepthRangeParameters gl_DepthRange;"
+ "\n");
+ }
+
+ if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) {
+ //
+ // Matrix state. p. 31, 32, 37, 39, 40.
+ //
+ commonBuiltins.append(
+ "uniform mat4 gl_ModelViewMatrix;"
+ "uniform mat4 gl_ProjectionMatrix;"
+ "uniform mat4 gl_ModelViewProjectionMatrix;"
+
+ //
+ // Derived matrix state that provides inverse and transposed versions
+ // of the matrices above.
+ //
+ "uniform mat3 gl_NormalMatrix;"
+
+ "uniform mat4 gl_ModelViewMatrixInverse;"
+ "uniform mat4 gl_ProjectionMatrixInverse;"
+ "uniform mat4 gl_ModelViewProjectionMatrixInverse;"
+
+ "uniform mat4 gl_ModelViewMatrixTranspose;"
+ "uniform mat4 gl_ProjectionMatrixTranspose;"
+ "uniform mat4 gl_ModelViewProjectionMatrixTranspose;"
+
+ "uniform mat4 gl_ModelViewMatrixInverseTranspose;"
+ "uniform mat4 gl_ProjectionMatrixInverseTranspose;"
+ "uniform mat4 gl_ModelViewProjectionMatrixInverseTranspose;"
+
+ //
+ // Normal scaling p. 39.
+ //
+ "uniform float gl_NormalScale;"
+
+ //
+ // Point Size, p. 66, 67.
+ //
+ "struct gl_PointParameters {"
+ "float size;"
+ "float sizeMin;"
+ "float sizeMax;"
+ "float fadeThresholdSize;"
+ "float distanceConstantAttenuation;"
+ "float distanceLinearAttenuation;"
+ "float distanceQuadraticAttenuation;"
+ "};"
+
+ "uniform gl_PointParameters gl_Point;"
+
+ //
+ // Material State p. 50, 55.
+ //
+ "struct gl_MaterialParameters {"
+ "vec4 emission;" // Ecm
+ "vec4 ambient;" // Acm
+ "vec4 diffuse;" // Dcm
+ "vec4 specular;" // Scm
+ "float shininess;" // Srm
+ "};"
+ "uniform gl_MaterialParameters gl_FrontMaterial;"
+ "uniform gl_MaterialParameters gl_BackMaterial;"
+
+ //
+ // Light State p 50, 53, 55.
+ //
+ "struct gl_LightSourceParameters {"
+ "vec4 ambient;" // Acli
+ "vec4 diffuse;" // Dcli
+ "vec4 specular;" // Scli
+ "vec4 position;" // Ppli
+ "vec4 halfVector;" // Derived: Hi
+ "vec3 spotDirection;" // Sdli
+ "float spotExponent;" // Srli
+ "float spotCutoff;" // Crli
+ // (range: [0.0,90.0], 180.0)
+ "float spotCosCutoff;" // Derived: cos(Crli)
+ // (range: [1.0,0.0],-1.0)
+ "float constantAttenuation;" // K0
+ "float linearAttenuation;" // K1
+ "float quadraticAttenuation;"// K2
+ "};"
+
+ "struct gl_LightModelParameters {"
+ "vec4 ambient;" // Acs
+ "};"
+
+ "uniform gl_LightModelParameters gl_LightModel;"
+
+ //
+ // Derived state from products of light and material.
+ //
+ "struct gl_LightModelProducts {"
+ "vec4 sceneColor;" // Derived. Ecm + Acm * Acs
+ "};"
+
+ "uniform gl_LightModelProducts gl_FrontLightModelProduct;"
+ "uniform gl_LightModelProducts gl_BackLightModelProduct;"
+
+ "struct gl_LightProducts {"
+ "vec4 ambient;" // Acm * Acli
+ "vec4 diffuse;" // Dcm * Dcli
+ "vec4 specular;" // Scm * Scli
+ "};"
+
+ //
+ // Fog p. 161
+ //
+ "struct gl_FogParameters {"
+ "vec4 color;"
+ "float density;"
+ "float start;"
+ "float end;"
+ "float scale;" // 1 / (gl_FogEnd - gl_FogStart)
+ "};"
+
+ "uniform gl_FogParameters gl_Fog;"
+
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the compute shader.
+ //
+ //============================================================================
+
+ if ((profile != EEsProfile && version >= 420) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangCompute].append(
+ "in highp uvec3 gl_NumWorkGroups;"
+ "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);"
+
+ "in highp uvec3 gl_WorkGroupID;"
+ "in highp uvec3 gl_LocalInvocationID;"
+
+ "in highp uvec3 gl_GlobalInvocationID;"
+ "in highp uint gl_LocalInvocationIndex;"
+
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangCompute].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "\n");
+ }
+
+#ifdef NV_EXTENSIONS
+ //============================================================================
+ //
+ // Define the interface to the mesh/task shader.
+ //
+ //============================================================================
+
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ // per-vertex attributes
+ stageBuiltins[EShLangMeshNV].append(
+ "out gl_MeshPerVertexNV {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ "float gl_CullDistance[];"
+ "perviewNV vec4 gl_PositionPerViewNV[];"
+ "perviewNV float gl_ClipDistancePerViewNV[][];"
+ "perviewNV float gl_CullDistancePerViewNV[][];"
+ "} gl_MeshVerticesNV[];"
+ );
+
+ // per-primitive attributes
+ stageBuiltins[EShLangMeshNV].append(
+ "perprimitiveNV out gl_MeshPerPrimitiveNV {"
+ "int gl_PrimitiveID;"
+ "int gl_Layer;"
+ "int gl_ViewportIndex;"
+ "int gl_ViewportMask[];"
+ "perviewNV int gl_LayerPerViewNV[];"
+ "perviewNV int gl_ViewportMaskPerViewNV[][];"
+ "} gl_MeshPrimitivesNV[];"
+ );
+
+ stageBuiltins[EShLangMeshNV].append(
+ "out uint gl_PrimitiveCountNV;"
+ "out uint gl_PrimitiveIndicesNV[];"
+
+ "in uint gl_MeshViewCountNV;"
+ "in uint gl_MeshViewIndicesNV[4];"
+
+ "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);"
+
+ "in highp uvec3 gl_WorkGroupID;"
+ "in highp uvec3 gl_LocalInvocationID;"
+
+ "in highp uvec3 gl_GlobalInvocationID;"
+ "in highp uint gl_LocalInvocationIndex;"
+
+ "\n");
+
+ stageBuiltins[EShLangTaskNV].append(
+ "out uint gl_TaskCountNV;"
+
+ "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);"
+
+ "in highp uvec3 gl_WorkGroupID;"
+ "in highp uvec3 gl_LocalInvocationID;"
+
+ "in highp uvec3 gl_GlobalInvocationID;"
+ "in highp uint gl_LocalInvocationIndex;"
+
+ "in uint gl_MeshViewCountNV;"
+ "in uint gl_MeshViewIndicesNV[4];"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangMeshNV].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters
+ "\n");
+
+ stageBuiltins[EShLangTaskNV].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters
+ "\n");
+
+ if (version >= 460) {
+ stageBuiltins[EShLangMeshNV].append(
+ "in int gl_DrawID;"
+ "\n");
+
+ stageBuiltins[EShLangTaskNV].append(
+ "in int gl_DrawID;"
+ "\n");
+ }
+ }
+#endif
+
+ //============================================================================
+ //
+ // Define the interface to the vertex shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile) {
+ if (version < 130) {
+ stageBuiltins[EShLangVertex].append(
+ "attribute vec4 gl_Color;"
+ "attribute vec4 gl_SecondaryColor;"
+ "attribute vec3 gl_Normal;"
+ "attribute vec4 gl_Vertex;"
+ "attribute vec4 gl_MultiTexCoord0;"
+ "attribute vec4 gl_MultiTexCoord1;"
+ "attribute vec4 gl_MultiTexCoord2;"
+ "attribute vec4 gl_MultiTexCoord3;"
+ "attribute vec4 gl_MultiTexCoord4;"
+ "attribute vec4 gl_MultiTexCoord5;"
+ "attribute vec4 gl_MultiTexCoord6;"
+ "attribute vec4 gl_MultiTexCoord7;"
+ "attribute float gl_FogCoord;"
+ "\n");
+ } else if (IncludeLegacy(version, profile, spvVersion)) {
+ stageBuiltins[EShLangVertex].append(
+ "in vec4 gl_Color;"
+ "in vec4 gl_SecondaryColor;"
+ "in vec3 gl_Normal;"
+ "in vec4 gl_Vertex;"
+ "in vec4 gl_MultiTexCoord0;"
+ "in vec4 gl_MultiTexCoord1;"
+ "in vec4 gl_MultiTexCoord2;"
+ "in vec4 gl_MultiTexCoord3;"
+ "in vec4 gl_MultiTexCoord4;"
+ "in vec4 gl_MultiTexCoord5;"
+ "in vec4 gl_MultiTexCoord6;"
+ "in vec4 gl_MultiTexCoord7;"
+ "in float gl_FogCoord;"
+ "\n");
+ }
+
+ if (version < 150) {
+ if (version < 130) {
+ stageBuiltins[EShLangVertex].append(
+ " vec4 gl_ClipVertex;" // needs qualifier fixed later
+ "varying vec4 gl_FrontColor;"
+ "varying vec4 gl_BackColor;"
+ "varying vec4 gl_FrontSecondaryColor;"
+ "varying vec4 gl_BackSecondaryColor;"
+ "varying vec4 gl_TexCoord[];"
+ "varying float gl_FogFragCoord;"
+ "\n");
+ } else if (IncludeLegacy(version, profile, spvVersion)) {
+ stageBuiltins[EShLangVertex].append(
+ " vec4 gl_ClipVertex;" // needs qualifier fixed later
+ "out vec4 gl_FrontColor;"
+ "out vec4 gl_BackColor;"
+ "out vec4 gl_FrontSecondaryColor;"
+ "out vec4 gl_BackSecondaryColor;"
+ "out vec4 gl_TexCoord[];"
+ "out float gl_FogFragCoord;"
+ "\n");
+ }
+ stageBuiltins[EShLangVertex].append(
+ "vec4 gl_Position;" // needs qualifier fixed later
+ "float gl_PointSize;" // needs qualifier fixed later
+ );
+
+ if (version == 130 || version == 140)
+ stageBuiltins[EShLangVertex].append(
+ "out float gl_ClipDistance[];"
+ );
+ } else {
+ // version >= 150
+ stageBuiltins[EShLangVertex].append(
+ "out gl_PerVertex {"
+ "vec4 gl_Position;" // needs qualifier fixed later
+ "float gl_PointSize;" // needs qualifier fixed later
+ "float gl_ClipDistance[];"
+ );
+ if (IncludeLegacy(version, profile, spvVersion))
+ stageBuiltins[EShLangVertex].append(
+ "vec4 gl_ClipVertex;" // needs qualifier fixed later
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangVertex].append(
+ "float gl_CullDistance[];"
+ );
+ stageBuiltins[EShLangVertex].append(
+ "};"
+ "\n");
+ }
+ if (version >= 130 && spvVersion.vulkan == 0)
+ stageBuiltins[EShLangVertex].append(
+ "int gl_VertexID;" // needs qualifier fixed later
+ );
+ if (version >= 140 && spvVersion.vulkan == 0)
+ stageBuiltins[EShLangVertex].append(
+ "int gl_InstanceID;" // needs qualifier fixed later
+ );
+ if (spvVersion.vulkan > 0 && version >= 140)
+ stageBuiltins[EShLangVertex].append(
+ "in int gl_VertexIndex;"
+ "in int gl_InstanceIndex;"
+ );
+ if (version >= 440) {
+ stageBuiltins[EShLangVertex].append(
+ "in int gl_BaseVertexARB;"
+ "in int gl_BaseInstanceARB;"
+ "in int gl_DrawIDARB;"
+ );
+ }
+ if (version >= 410) {
+ stageBuiltins[EShLangVertex].append(
+ "out int gl_ViewportIndex;"
+ "out int gl_Layer;"
+ );
+ }
+ if (version >= 460) {
+ stageBuiltins[EShLangVertex].append(
+ "in int gl_BaseVertex;"
+ "in int gl_BaseInstance;"
+ "in int gl_DrawID;"
+ );
+ }
+
+#ifdef NV_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangVertex].append(
+ "out int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ );
+#endif
+
+ } else {
+ // ES profile
+ if (version == 100) {
+ stageBuiltins[EShLangVertex].append(
+ "highp vec4 gl_Position;" // needs qualifier fixed later
+ "mediump float gl_PointSize;" // needs qualifier fixed later
+ );
+ } else {
+ if (spvVersion.vulkan == 0)
+ stageBuiltins[EShLangVertex].append(
+ "in highp int gl_VertexID;" // needs qualifier fixed later
+ "in highp int gl_InstanceID;" // needs qualifier fixed later
+ );
+ if (spvVersion.vulkan > 0)
+ stageBuiltins[EShLangVertex].append(
+ "in highp int gl_VertexIndex;"
+ "in highp int gl_InstanceIndex;"
+ );
+ if (version < 310)
+ stageBuiltins[EShLangVertex].append(
+ "highp vec4 gl_Position;" // needs qualifier fixed later
+ "highp float gl_PointSize;" // needs qualifier fixed later
+ );
+ else
+ stageBuiltins[EShLangVertex].append(
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;" // needs qualifier fixed later
+ "highp float gl_PointSize;" // needs qualifier fixed later
+ "};"
+ );
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangVertex].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ if (version >= 300 /* both ES and non-ES */) {
+ stageBuiltins[EShLangVertex].append(
+ "in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2
+ "\n");
+ }
+
+
+ //============================================================================
+ //
+ // Define the interface to the geometry shader.
+ //
+ //============================================================================
+
+ if (profile == ECoreProfile || profile == ECompatibilityProfile) {
+ stageBuiltins[EShLangGeometry].append(
+ "in gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (profile == ECompatibilityProfile)
+ stageBuiltins[EShLangGeometry].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangGeometry].append(
+ "float gl_CullDistance[];"
+#ifdef NV_EXTENSIONS
+ "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ );
+ stageBuiltins[EShLangGeometry].append(
+ "} gl_in[];"
+
+ "in int gl_PrimitiveIDIn;"
+ "out gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ "\n");
+ if (profile == ECompatibilityProfile && version >= 400)
+ stageBuiltins[EShLangGeometry].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangGeometry].append(
+ "float gl_CullDistance[];"
+ );
+ stageBuiltins[EShLangGeometry].append(
+ "};"
+
+ "out int gl_PrimitiveID;"
+ "out int gl_Layer;");
+
+ if (version >= 150)
+ stageBuiltins[EShLangGeometry].append(
+ "out int gl_ViewportIndex;"
+ );
+
+ if (profile == ECompatibilityProfile && version < 400)
+ stageBuiltins[EShLangGeometry].append(
+ "out vec4 gl_ClipVertex;"
+ );
+
+ if (version >= 400)
+ stageBuiltins[EShLangGeometry].append(
+ "in int gl_InvocationID;"
+ );
+
+#ifdef NV_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangGeometry].append(
+ "out int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ );
+#endif
+
+ stageBuiltins[EShLangGeometry].append("\n");
+ } else if (profile == EEsProfile && version >= 310) {
+ stageBuiltins[EShLangGeometry].append(
+ "in gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ "} gl_in[];"
+ "\n"
+ "in highp int gl_PrimitiveIDIn;"
+ "in highp int gl_InvocationID;"
+ "\n"
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ "};"
+ "\n"
+ "out highp int gl_PrimitiveID;"
+ "out highp int gl_Layer;"
+ "\n"
+ );
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangGeometry].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the tessellation control shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile && version >= 150) {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessControl].append(
+ "in int gl_PatchVerticesIn;"
+ "in int gl_PrimitiveID;"
+ "in int gl_InvocationID;"
+
+ "out gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (profile == ECompatibilityProfile)
+ stageBuiltins[EShLangTessControl].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangTessControl].append(
+ "float gl_CullDistance[];"
+#ifdef NV_EXTENSIONS
+ "int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ );
+ stageBuiltins[EShLangTessControl].append(
+ "} gl_out[];"
+
+ "patch out float gl_TessLevelOuter[4];"
+ "patch out float gl_TessLevelInner[2];"
+ "\n");
+
+ if (version >= 410)
+ stageBuiltins[EShLangTessControl].append(
+ "out int gl_ViewportIndex;"
+ "out int gl_Layer;"
+ "\n");
+
+ } else {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessControl].append(
+ "in highp int gl_PatchVerticesIn;"
+ "in highp int gl_PrimitiveID;"
+ "in highp int gl_InvocationID;"
+
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ );
+ stageBuiltins[EShLangTessControl].append(
+ "} gl_out[];"
+
+ "patch out highp float gl_TessLevelOuter[4];"
+ "patch out highp float gl_TessLevelInner[2];"
+ "patch out highp vec4 gl_BoundingBoxOES[2];"
+ "patch out highp vec4 gl_BoundingBoxEXT[2];"
+ "\n");
+ if (profile == EEsProfile && version >= 320) {
+ stageBuiltins[EShLangTessControl].append(
+ "patch out highp vec4 gl_BoundingBox[2];"
+ "\n"
+ );
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangTessControl].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the tessellation evaluation shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile && version >= 150) {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessEvaluation].append(
+ "in int gl_PatchVerticesIn;"
+ "in int gl_PrimitiveID;"
+ "in vec3 gl_TessCoord;"
+
+ "patch in float gl_TessLevelOuter[4];"
+ "patch in float gl_TessLevelInner[2];"
+
+ "out gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (version >= 400 && profile == ECompatibilityProfile)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "float gl_CullDistance[];"
+ );
+ stageBuiltins[EShLangTessEvaluation].append(
+ "};"
+ "\n");
+
+ if (version >= 410)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "out int gl_ViewportIndex;"
+ "out int gl_Layer;"
+ "\n");
+
+#ifdef NV_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "out int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ );
+#endif
+
+ } else if (profile == EEsProfile && version >= 310) {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessEvaluation].append(
+ "in highp int gl_PatchVerticesIn;"
+ "in highp int gl_PrimitiveID;"
+ "in highp vec3 gl_TessCoord;"
+
+ "patch in highp float gl_TessLevelOuter[4];"
+ "patch in highp float gl_TessLevelInner[2];"
+
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ );
+ stageBuiltins[EShLangTessEvaluation].append(
+ "};"
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangTessEvaluation].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the fragment shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile) {
+
+ stageBuiltins[EShLangFragment].append(
+ "vec4 gl_FragCoord;" // needs qualifier fixed later
+ "bool gl_FrontFacing;" // needs qualifier fixed later
+ "float gl_FragDepth;" // needs qualifier fixed later
+ );
+ if (version >= 120)
+ stageBuiltins[EShLangFragment].append(
+ "vec2 gl_PointCoord;" // needs qualifier fixed later
+ );
+ if (version >= 140)
+ stageBuiltins[EShLangFragment].append(
+ "out int gl_FragStencilRefARB;"
+ );
+ if (IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && version < 420))
+ stageBuiltins[EShLangFragment].append(
+ "vec4 gl_FragColor;" // needs qualifier fixed later
+ );
+
+ if (version < 130) {
+ stageBuiltins[EShLangFragment].append(
+ "varying vec4 gl_Color;"
+ "varying vec4 gl_SecondaryColor;"
+ "varying vec4 gl_TexCoord[];"
+ "varying float gl_FogFragCoord;"
+ );
+ } else {
+ stageBuiltins[EShLangFragment].append(
+ "in float gl_ClipDistance[];"
+ );
+
+ if (IncludeLegacy(version, profile, spvVersion)) {
+ if (version < 150)
+ stageBuiltins[EShLangFragment].append(
+ "in float gl_FogFragCoord;"
+ "in vec4 gl_TexCoord[];"
+ "in vec4 gl_Color;"
+ "in vec4 gl_SecondaryColor;"
+ );
+ else
+ stageBuiltins[EShLangFragment].append(
+ "in gl_PerFragment {"
+ "in float gl_FogFragCoord;"
+ "in vec4 gl_TexCoord[];"
+ "in vec4 gl_Color;"
+ "in vec4 gl_SecondaryColor;"
+ "};"
+ );
+ }
+ }
+
+ if (version >= 150)
+ stageBuiltins[EShLangFragment].append(
+ "flat in int gl_PrimitiveID;"
+ );
+
+ if (version >= 400) {
+ stageBuiltins[EShLangFragment].append(
+ "flat in int gl_SampleID;"
+ " in vec2 gl_SamplePosition;"
+ "flat in int gl_SampleMaskIn[];"
+ " out int gl_SampleMask[];"
+ );
+ if (spvVersion.spv == 0)
+ stageBuiltins[EShLangFragment].append(
+ "uniform int gl_NumSamples;"
+ );
+ }
+
+ if (version >= 430)
+ stageBuiltins[EShLangFragment].append(
+ "flat in int gl_Layer;"
+ "flat in int gl_ViewportIndex;"
+ );
+
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append(
+ "in float gl_CullDistance[];"
+ "bool gl_HelperInvocation;" // needs qualifier fixed later
+ );
+
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density
+ "flat in ivec2 gl_FragSizeEXT;"
+ "flat in int gl_FragInvocationCountEXT;"
+ );
+
+#ifdef AMD_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append(
+ "in vec2 gl_BaryCoordNoPerspAMD;"
+ "in vec2 gl_BaryCoordNoPerspCentroidAMD;"
+ "in vec2 gl_BaryCoordNoPerspSampleAMD;"
+ "in vec2 gl_BaryCoordSmoothAMD;"
+ "in vec2 gl_BaryCoordSmoothCentroidAMD;"
+ "in vec2 gl_BaryCoordSmoothSampleAMD;"
+ "in vec3 gl_BaryCoordPullModelAMD;"
+ );
+#endif
+
+#ifdef NV_EXTENSIONS
+ if (version >= 430)
+ stageBuiltins[EShLangFragment].append(
+ "in bool gl_FragFullyCoveredNV;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append(
+ "flat in ivec2 gl_FragmentSizeNV;" // GL_NV_shading_rate_image
+ "flat in int gl_InvocationsPerPixelNV;"
+ "in vec3 gl_BaryCoordNV;" // GL_NV_fragment_shader_barycentric
+ "in vec3 gl_BaryCoordNoPerspNV;"
+ );
+
+#endif
+ } else {
+ // ES profile
+
+ if (version == 100) {
+ stageBuiltins[EShLangFragment].append(
+ "mediump vec4 gl_FragCoord;" // needs qualifier fixed later
+ " bool gl_FrontFacing;" // needs qualifier fixed later
+ "mediump vec4 gl_FragColor;" // needs qualifier fixed later
+ "mediump vec2 gl_PointCoord;" // needs qualifier fixed later
+ );
+ }
+ if (version >= 300) {
+ stageBuiltins[EShLangFragment].append(
+ "highp vec4 gl_FragCoord;" // needs qualifier fixed later
+ " bool gl_FrontFacing;" // needs qualifier fixed later
+ "mediump vec2 gl_PointCoord;" // needs qualifier fixed later
+ "highp float gl_FragDepth;" // needs qualifier fixed later
+ );
+ }
+ if (version >= 310) {
+ stageBuiltins[EShLangFragment].append(
+ "bool gl_HelperInvocation;" // needs qualifier fixed later
+ "flat in highp int gl_PrimitiveID;" // needs qualifier fixed later
+ "flat in highp int gl_Layer;" // needs qualifier fixed later
+ );
+
+ stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables
+ "flat in lowp int gl_SampleID;"
+ " in mediump vec2 gl_SamplePosition;"
+ "flat in highp int gl_SampleMaskIn[];"
+ " out highp int gl_SampleMask[];"
+ );
+ if (spvVersion.spv == 0)
+ stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables
+ "uniform lowp int gl_NumSamples;"
+ );
+ }
+ stageBuiltins[EShLangFragment].append(
+ "highp float gl_FragDepthEXT;" // GL_EXT_frag_depth
+ );
+
+ if (version >= 310)
+ stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density
+ "flat in ivec2 gl_FragSizeEXT;"
+ "flat in int gl_FragInvocationCountEXT;"
+ );
+#ifdef NV_EXTENSIONS
+ if (version >= 320)
+ stageBuiltins[EShLangFragment].append( // GL_NV_shading_rate_image
+ "flat in ivec2 gl_FragmentSizeNV;"
+ "flat in int gl_InvocationsPerPixelNV;"
+ );
+ if (version >= 320)
+ stageBuiltins[EShLangFragment].append(
+ "in vec3 gl_BaryCoordNV;"
+ "in vec3 gl_BaryCoordNoPerspNV;"
+ );
+#endif
+
+ }
+ stageBuiltins[EShLangFragment].append("\n");
+
+ if (version >= 130)
+ add2ndGenerationSamplingImaging(version, profile, spvVersion);
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile && version >= 450) {
+ const char* ballotDecls =
+ "uniform uint gl_SubGroupSizeARB;"
+ "in uint gl_SubGroupInvocationARB;"
+ "in uint64_t gl_SubGroupEqMaskARB;"
+ "in uint64_t gl_SubGroupGeMaskARB;"
+ "in uint64_t gl_SubGroupGtMaskARB;"
+ "in uint64_t gl_SubGroupLeMaskARB;"
+ "in uint64_t gl_SubGroupLtMaskARB;"
+ "\n";
+ const char* fragmentBallotDecls =
+ "uniform uint gl_SubGroupSizeARB;"
+ "flat in uint gl_SubGroupInvocationARB;"
+ "flat in uint64_t gl_SubGroupEqMaskARB;"
+ "flat in uint64_t gl_SubGroupGeMaskARB;"
+ "flat in uint64_t gl_SubGroupGtMaskARB;"
+ "flat in uint64_t gl_SubGroupLeMaskARB;"
+ "flat in uint64_t gl_SubGroupLtMaskARB;"
+ "\n";
+ stageBuiltins[EShLangVertex] .append(ballotDecls);
+ stageBuiltins[EShLangTessControl] .append(ballotDecls);
+ stageBuiltins[EShLangTessEvaluation].append(ballotDecls);
+ stageBuiltins[EShLangGeometry] .append(ballotDecls);
+ stageBuiltins[EShLangCompute] .append(ballotDecls);
+ stageBuiltins[EShLangFragment] .append(fragmentBallotDecls);
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV] .append(ballotDecls);
+ stageBuiltins[EShLangTaskNV] .append(ballotDecls);
+#endif
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangFragment].append(
+ "flat in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "flat in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ const char* ballotDecls =
+ "in mediump uint gl_SubgroupSize;"
+ "in mediump uint gl_SubgroupInvocationID;"
+ "in highp uvec4 gl_SubgroupEqMask;"
+ "in highp uvec4 gl_SubgroupGeMask;"
+ "in highp uvec4 gl_SubgroupGtMask;"
+ "in highp uvec4 gl_SubgroupLeMask;"
+ "in highp uvec4 gl_SubgroupLtMask;"
+ "\n";
+ const char* fragmentBallotDecls =
+ "flat in mediump uint gl_SubgroupSize;"
+ "flat in mediump uint gl_SubgroupInvocationID;"
+ "flat in highp uvec4 gl_SubgroupEqMask;"
+ "flat in highp uvec4 gl_SubgroupGeMask;"
+ "flat in highp uvec4 gl_SubgroupGtMask;"
+ "flat in highp uvec4 gl_SubgroupLeMask;"
+ "flat in highp uvec4 gl_SubgroupLtMask;"
+ "\n";
+ stageBuiltins[EShLangVertex] .append(ballotDecls);
+ stageBuiltins[EShLangTessControl] .append(ballotDecls);
+ stageBuiltins[EShLangTessEvaluation].append(ballotDecls);
+ stageBuiltins[EShLangGeometry] .append(ballotDecls);
+ stageBuiltins[EShLangCompute] .append(ballotDecls);
+ stageBuiltins[EShLangFragment] .append(fragmentBallotDecls);
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV] .append(ballotDecls);
+ stageBuiltins[EShLangTaskNV] .append(ballotDecls);
+#endif
+
+ stageBuiltins[EShLangCompute].append(
+ "highp in uint gl_NumSubgroups;"
+ "highp in uint gl_SubgroupID;"
+ "\n");
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV].append(
+ "highp in uint gl_NumSubgroups;"
+ "highp in uint gl_SubgroupID;"
+ "\n");
+ stageBuiltins[EShLangTaskNV].append(
+ "highp in uint gl_NumSubgroups;"
+ "highp in uint gl_SubgroupID;"
+ "\n");
+#endif
+ }
+
+#ifdef NV_EXTENSIONS
+ // GL_NV_ray_tracing
+ if (profile != EEsProfile && version >= 460) {
+
+ const char *constRayFlags =
+ "const uint gl_RayFlagsNoneNV = 0U;"
+ "const uint gl_RayFlagsOpaqueNV = 1U;"
+ "const uint gl_RayFlagsNoOpaqueNV = 2U;"
+ "const uint gl_RayFlagsTerminateOnFirstHitNV = 4U;"
+ "const uint gl_RayFlagsSkipClosestHitShaderNV = 8U;"
+ "const uint gl_RayFlagsCullBackFacingTrianglesNV = 16U;"
+ "const uint gl_RayFlagsCullFrontFacingTrianglesNV = 32U;"
+ "const uint gl_RayFlagsCullOpaqueNV = 64U;"
+ "const uint gl_RayFlagsCullNoOpaqueNV = 128U;"
+ "\n";
+ const char *rayGenDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "\n";
+ const char *intersectDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "in int gl_PrimitiveID;"
+ "in int gl_InstanceID;"
+ "in int gl_InstanceCustomIndexNV;"
+ "in vec3 gl_WorldRayOriginNV;"
+ "in vec3 gl_WorldRayDirectionNV;"
+ "in vec3 gl_ObjectRayOriginNV;"
+ "in vec3 gl_ObjectRayDirectionNV;"
+ "in float gl_RayTminNV;"
+ "in float gl_RayTmaxNV;"
+ "in mat4x3 gl_ObjectToWorldNV;"
+ "in mat4x3 gl_WorldToObjectNV;"
+ "in uint gl_IncomingRayFlagsNV;"
+ "\n";
+ const char *hitDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "in int gl_PrimitiveID;"
+ "in int gl_InstanceID;"
+ "in int gl_InstanceCustomIndexNV;"
+ "in vec3 gl_WorldRayOriginNV;"
+ "in vec3 gl_WorldRayDirectionNV;"
+ "in vec3 gl_ObjectRayOriginNV;"
+ "in vec3 gl_ObjectRayDirectionNV;"
+ "in float gl_RayTminNV;"
+ "in float gl_RayTmaxNV;"
+ "in float gl_HitTNV;"
+ "in uint gl_HitKindNV;"
+ "in mat4x3 gl_ObjectToWorldNV;"
+ "in mat4x3 gl_WorldToObjectNV;"
+ "in uint gl_IncomingRayFlagsNV;"
+ "\n";
+ const char *missDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "in vec3 gl_WorldRayOriginNV;"
+ "in vec3 gl_WorldRayDirectionNV;"
+ "in vec3 gl_ObjectRayOriginNV;"
+ "in vec3 gl_ObjectRayDirectionNV;"
+ "in float gl_RayTminNV;"
+ "in float gl_RayTmaxNV;"
+ "in uint gl_IncomingRayFlagsNV;"
+ "\n";
+
+ const char *callableDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "\n";
+
+ stageBuiltins[EShLangRayGenNV].append(rayGenDecls);
+ stageBuiltins[EShLangRayGenNV].append(constRayFlags);
+
+ stageBuiltins[EShLangIntersectNV].append(intersectDecls);
+ stageBuiltins[EShLangIntersectNV].append(constRayFlags);
+
+ stageBuiltins[EShLangAnyHitNV].append(hitDecls);
+ stageBuiltins[EShLangAnyHitNV].append(constRayFlags);
+
+ stageBuiltins[EShLangClosestHitNV].append(hitDecls);
+ stageBuiltins[EShLangClosestHitNV].append(constRayFlags);
+
+ stageBuiltins[EShLangMissNV].append(missDecls);
+ stageBuiltins[EShLangMissNV].append(constRayFlags);
+
+ stageBuiltins[EShLangCallableNV].append(callableDecls);
+ stageBuiltins[EShLangCallableNV].append(constRayFlags);
+
+ }
+ if ((profile != EEsProfile && version >= 140)) {
+ const char *deviceIndex =
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "\n";
+
+ stageBuiltins[EShLangRayGenNV].append(deviceIndex);
+ stageBuiltins[EShLangIntersectNV].append(deviceIndex);
+ stageBuiltins[EShLangAnyHitNV].append(deviceIndex);
+ stageBuiltins[EShLangClosestHitNV].append(deviceIndex);
+ stageBuiltins[EShLangMissNV].append(deviceIndex);
+ }
+#endif
+
+ if (version >= 300 /* both ES and non-ES */) {
+ stageBuiltins[EShLangFragment].append(
+ "flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 420) ||
+ (profile == EEsProfile && version >= 310)) {
+ commonBuiltins.append("const int gl_ScopeDevice = 1;\n");
+ commonBuiltins.append("const int gl_ScopeWorkgroup = 2;\n");
+ commonBuiltins.append("const int gl_ScopeSubgroup = 3;\n");
+ commonBuiltins.append("const int gl_ScopeInvocation = 4;\n");
+ commonBuiltins.append("const int gl_ScopeQueueFamily = 5;\n");
+
+ commonBuiltins.append("const int gl_SemanticsRelaxed = 0x0;\n");
+ commonBuiltins.append("const int gl_SemanticsAcquire = 0x2;\n");
+ commonBuiltins.append("const int gl_SemanticsRelease = 0x4;\n");
+ commonBuiltins.append("const int gl_SemanticsAcquireRelease = 0x8;\n");
+ commonBuiltins.append("const int gl_SemanticsMakeAvailable = 0x2000;\n");
+ commonBuiltins.append("const int gl_SemanticsMakeVisible = 0x4000;\n");
+
+ commonBuiltins.append("const int gl_StorageSemanticsNone = 0x0;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsBuffer = 0x40;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsShared = 0x100;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsImage = 0x800;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsOutput = 0x1000;\n");
+ }
+
+ // printf("%s\n", commonBuiltins.c_str());
+ // printf("%s\n", stageBuiltins[EShLangFragment].c_str());
+}
+
+//
+// Helper function for initialize(), to add the second set of names for texturing,
+// when adding context-independent built-in functions.
+//
+void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion)
+{
+ //
+ // In this function proper, enumerate the types, then calls the next set of functions
+ // to enumerate all the uses for that type.
+ //
+#ifdef AMD_EXTENSIONS
+ TBasicType bTypes[4] = { EbtFloat, EbtFloat16, EbtInt, EbtUint };
+#else
+ TBasicType bTypes[3] = { EbtFloat, EbtInt, EbtUint };
+#endif
+ bool skipBuffer = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 140);
+ bool skipCubeArrayed = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 130);
+
+ // enumerate all the types
+ for (int image = 0; image <= 1; ++image) { // loop over "bool" image vs sampler
+
+ for (int shadow = 0; shadow <= 1; ++shadow) { // loop over "bool" shadow or not
+ for (int ms = 0; ms <=1; ++ms) {
+ if ((ms || image) && shadow)
+ continue;
+ if (ms && profile != EEsProfile && version < 150)
+ continue;
+ if (ms && image && profile == EEsProfile)
+ continue;
+ if (ms && profile == EEsProfile && version < 310)
+ continue;
+
+ for (int arrayed = 0; arrayed <= 1; ++arrayed) { // loop over "bool" arrayed or not
+ for (int dim = Esd1D; dim < EsdNumDims; ++dim) { // 1D, 2D, ..., buffer
+ if (dim == EsdSubpass && spvVersion.vulkan == 0)
+ continue;
+ if (dim == EsdSubpass && (image || shadow || arrayed))
+ continue;
+ if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile)
+ continue;
+ if (dim != Esd2D && dim != EsdSubpass && ms)
+ continue;
+ if ((dim == Esd3D || dim == EsdRect) && arrayed)
+ continue;
+ if (dim == Esd3D && shadow)
+ continue;
+ if (dim == EsdCube && arrayed && skipCubeArrayed)
+ continue;
+ if (dim == EsdBuffer && skipBuffer)
+ continue;
+ if (dim == EsdBuffer && (shadow || arrayed || ms))
+ continue;
+ if (ms && arrayed && profile == EEsProfile && version < 310)
+ continue;
+#ifdef AMD_EXTENSIONS
+ for (int bType = 0; bType < 4; ++bType) { // float, float16, int, uint results
+
+ if (shadow && bType > 1)
+ continue;
+
+ if (bTypes[bType] == EbtFloat16 && (profile == EEsProfile ||version < 450))
+ continue;
+#else
+ for (int bType = 0; bType < 3; ++bType) { // float, int, uint results
+
+ if (shadow && bType > 0)
+ continue;
+#endif
+ if (dim == EsdRect && version < 140 && bType > 0)
+ continue;
+
+ //
+ // Now, make all the function prototypes for the type we just built...
+ //
+
+ TSampler sampler;
+ if (dim == EsdSubpass) {
+ sampler.setSubpass(bTypes[bType], ms ? true : false);
+ } else if (image) {
+ sampler.setImage(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false,
+ shadow ? true : false,
+ ms ? true : false);
+ } else {
+ sampler.set(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false,
+ shadow ? true : false,
+ ms ? true : false);
+ }
+
+ TString typeName = sampler.getString();
+
+ if (dim == EsdSubpass) {
+ addSubpassSampling(sampler, typeName, version, profile);
+ continue;
+ }
+
+ addQueryFunctions(sampler, typeName, version, profile);
+
+ if (image)
+ addImageFunctions(sampler, typeName, version, profile);
+ else {
+ addSamplingFunctions(sampler, typeName, version, profile);
+ addGatherFunctions(sampler, typeName, version, profile);
+
+ if (spvVersion.vulkan > 0 && sampler.isCombined() && !sampler.shadow) {
+ // Base Vulkan allows texelFetch() for
+ // textureBuffer (i.e. without sampler).
+ //
+ // GL_EXT_samplerless_texture_functions
+ // allows texelFetch() and query functions
+ // (other than textureQueryLod()) for all
+ // texture types.
+ sampler.setTexture(sampler.type, sampler.dim, sampler.arrayed, sampler.shadow,
+ sampler.ms);
+ TString textureTypeName = sampler.getString();
+ addSamplingFunctions(sampler, textureTypeName, version, profile);
+ addQueryFunctions(sampler, textureTypeName, version, profile);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // sparseTexelsResidentARB()
+ //
+
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append("bool sparseTexelsResidentARB(int code);\n");
+ }
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the query functions for the given type.
+//
+void TBuiltIns::addQueryFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ if (sampler.image && ((profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 430)))
+ return;
+
+ //
+ // textureSize() and imageSize()
+ //
+
+ int sizeDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0) - (sampler.dim == EsdCube ? 1 : 0);
+ if (profile == EEsProfile)
+ commonBuiltins.append("highp ");
+ if (sizeDims == 1)
+ commonBuiltins.append("int");
+ else {
+ commonBuiltins.append("ivec");
+ commonBuiltins.append(postfixes[sizeDims]);
+ }
+ if (sampler.image)
+ commonBuiltins.append(" imageSize(readonly writeonly volatile coherent ");
+ else
+ commonBuiltins.append(" textureSize(");
+ commonBuiltins.append(typeName);
+ if (! sampler.image && sampler.dim != EsdRect && sampler.dim != EsdBuffer && ! sampler.ms)
+ commonBuiltins.append(",int);\n");
+ else
+ commonBuiltins.append(");\n");
+
+ //
+ // textureSamples() and imageSamples()
+ //
+
+ // GL_ARB_shader_texture_image_samples
+ // TODO: spec issue? there are no memory qualifiers; how to query a writeonly/readonly image, etc?
+ if (profile != EEsProfile && version >= 430 && sampler.ms) {
+ commonBuiltins.append("int ");
+ if (sampler.image)
+ commonBuiltins.append("imageSamples(readonly writeonly volatile coherent ");
+ else
+ commonBuiltins.append("textureSamples(");
+ commonBuiltins.append(typeName);
+ commonBuiltins.append(");\n");
+ }
+
+ //
+ // textureQueryLod(), fragment stage only
+ //
+
+ if (profile != EEsProfile && version >= 400 && sampler.combined && sampler.dim != EsdRect && ! sampler.ms && sampler.dim != EsdBuffer) {
+#ifdef AMD_EXTENSIONS
+ for (int f16TexAddr = 0; f16TexAddr < 2; ++f16TexAddr) {
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+#endif
+ stageBuiltins[EShLangFragment].append("vec2 textureQueryLod(");
+ stageBuiltins[EShLangFragment].append(typeName);
+ if (dimMap[sampler.dim] == 1)
+#ifdef AMD_EXTENSIONS
+ if (f16TexAddr)
+ stageBuiltins[EShLangFragment].append(", float16_t");
+ else
+ stageBuiltins[EShLangFragment].append(", float");
+#else
+ stageBuiltins[EShLangFragment].append(", float");
+#endif
+ else {
+#ifdef AMD_EXTENSIONS
+ if (f16TexAddr)
+ stageBuiltins[EShLangFragment].append(", f16vec");
+ else
+ stageBuiltins[EShLangFragment].append(", vec");
+#else
+ stageBuiltins[EShLangFragment].append(", vec");
+#endif
+ stageBuiltins[EShLangFragment].append(postfixes[dimMap[sampler.dim]]);
+ }
+ stageBuiltins[EShLangFragment].append(");\n");
+#ifdef AMD_EXTENSIONS
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangCompute].append("vec2 textureQueryLod(");
+ stageBuiltins[EShLangCompute].append(typeName);
+ if (dimMap[sampler.dim] == 1)
+ stageBuiltins[EShLangCompute].append(", float");
+ else {
+ stageBuiltins[EShLangCompute].append(", vec");
+ stageBuiltins[EShLangCompute].append(postfixes[dimMap[sampler.dim]]);
+ }
+ stageBuiltins[EShLangCompute].append(");\n");
+#endif
+ }
+
+ //
+ // textureQueryLevels()
+ //
+
+ if (profile != EEsProfile && version >= 430 && ! sampler.image && sampler.dim != EsdRect && ! sampler.ms && sampler.dim != EsdBuffer) {
+ commonBuiltins.append("int textureQueryLevels(");
+ commonBuiltins.append(typeName);
+ commonBuiltins.append(");\n");
+ }
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the image access functions for the given type.
+//
+void TBuiltIns::addImageFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ int dims = dimMap[sampler.dim];
+ // most things with an array add a dimension, except for cubemaps
+ if (sampler.arrayed && sampler.dim != EsdCube)
+ ++dims;
+
+ TString imageParams = typeName;
+ if (dims == 1)
+ imageParams.append(", int");
+ else {
+ imageParams.append(", ivec");
+ imageParams.append(postfixes[dims]);
+ }
+ if (sampler.ms)
+ imageParams.append(", int");
+
+ if (profile == EEsProfile)
+ commonBuiltins.append("highp ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4 imageLoad(readonly volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(");\n");
+
+ commonBuiltins.append("void imageStore(writeonly volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4);\n");
+
+ if (sampler.dim != Esd1D && sampler.dim != EsdBuffer && profile != EEsProfile && version >= 450) {
+ commonBuiltins.append("int sparseImageLoadARB(readonly volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", out ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4");
+ commonBuiltins.append(");\n");
+ }
+
+ if ( profile != EEsProfile ||
+ (profile == EEsProfile && version >= 310)) {
+ if (sampler.type == EbtInt || sampler.type == EbtUint) {
+ const char* dataType = sampler.type == EbtInt ? "highp int" : "highp uint";
+
+ const int numBuiltins = 7;
+
+ static const char* atomicFunc[numBuiltins] = {
+ " imageAtomicAdd(volatile coherent ",
+ " imageAtomicMin(volatile coherent ",
+ " imageAtomicMax(volatile coherent ",
+ " imageAtomicAnd(volatile coherent ",
+ " imageAtomicOr(volatile coherent ",
+ " imageAtomicXor(volatile coherent ",
+ " imageAtomicExchange(volatile coherent "
+ };
+
+ // Loop twice to add prototypes with/without scope/semantics
+ for (int j = 0; j < 2; ++j) {
+ for (size_t i = 0; i < numBuiltins; ++i) {
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(atomicFunc[i]);
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ if (j == 1) {
+ commonBuiltins.append(", int, int, int");
+ }
+ commonBuiltins.append(");\n");
+ }
+
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(" imageAtomicCompSwap(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ if (j == 1) {
+ commonBuiltins.append(", int, int, int, int, int");
+ }
+ commonBuiltins.append(");\n");
+ }
+
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(" imageAtomicLoad(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", int, int, int);\n");
+
+ commonBuiltins.append("void imageAtomicStore(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(", int, int, int);\n");
+
+ } else {
+ // not int or uint
+ // GL_ARB_ES3_1_compatibility
+ // TODO: spec issue: are there restrictions on the kind of layout() that can be used? what about dropping memory qualifiers?
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 310)) {
+ commonBuiltins.append("float imageAtomicExchange(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", float);\n");
+ }
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (sampler.dim == EsdRect || sampler.dim == EsdBuffer || sampler.shadow || sampler.ms)
+ return;
+
+ if (profile == EEsProfile || version < 450)
+ return;
+
+ TString imageLodParams = typeName;
+ if (dims == 1)
+ imageLodParams.append(", int");
+ else {
+ imageLodParams.append(", ivec");
+ imageLodParams.append(postfixes[dims]);
+ }
+ imageLodParams.append(", int");
+
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4 imageLoadLodAMD(readonly volatile coherent ");
+ commonBuiltins.append(imageLodParams);
+ commonBuiltins.append(");\n");
+
+ commonBuiltins.append("void imageStoreLodAMD(writeonly volatile coherent ");
+ commonBuiltins.append(imageLodParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4);\n");
+
+ if (sampler.dim != Esd1D) {
+ commonBuiltins.append("int sparseImageLoadLodAMD(readonly volatile coherent ");
+ commonBuiltins.append(imageLodParams);
+ commonBuiltins.append(", out ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4");
+ commonBuiltins.append(");\n");
+ }
+#endif
+}
+
+//
+// Helper function for initialize(),
+// when adding context-independent built-in functions.
+//
+// Add all the subpass access functions for the given type.
+//
+void TBuiltIns::addSubpassSampling(TSampler sampler, const TString& typeName, int /*version*/, EProfile /*profile*/)
+{
+ stageBuiltins[EShLangFragment].append(prefixes[sampler.type]);
+ stageBuiltins[EShLangFragment].append("vec4 subpassLoad");
+ stageBuiltins[EShLangFragment].append("(");
+ stageBuiltins[EShLangFragment].append(typeName.c_str());
+ if (sampler.ms)
+ stageBuiltins[EShLangFragment].append(", int");
+ stageBuiltins[EShLangFragment].append(");\n");
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the texture lookup functions for the given type.
+//
+void TBuiltIns::addSamplingFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ //
+ // texturing
+ //
+ for (int proj = 0; proj <= 1; ++proj) { // loop over "bool" projective or not
+
+ if (proj && (sampler.dim == EsdCube || sampler.dim == EsdBuffer || sampler.arrayed || sampler.ms || !sampler.combined))
+ continue;
+
+ for (int lod = 0; lod <= 1; ++lod) {
+
+ if (lod && (sampler.dim == EsdBuffer || sampler.dim == EsdRect || sampler.ms || !sampler.combined))
+ continue;
+ if (lod && sampler.dim == Esd2D && sampler.arrayed && sampler.shadow)
+ continue;
+ if (lod && sampler.dim == EsdCube && sampler.shadow)
+ continue;
+
+ for (int bias = 0; bias <= 1; ++bias) {
+
+ if (bias && (lod || sampler.ms || !sampler.combined))
+ continue;
+ if (bias && (sampler.dim == Esd2D || sampler.dim == EsdCube) && sampler.shadow && sampler.arrayed)
+ continue;
+ if (bias && (sampler.dim == EsdRect || sampler.dim == EsdBuffer))
+ continue;
+
+ for (int offset = 0; offset <= 1; ++offset) { // loop over "bool" offset or not
+
+ if (proj + offset + bias + lod > 3)
+ continue;
+ if (offset && (sampler.dim == EsdCube || sampler.dim == EsdBuffer || sampler.ms))
+ continue;
+
+ for (int fetch = 0; fetch <= 1; ++fetch) { // loop over "bool" fetch or not
+
+ if (proj + offset + fetch + bias + lod > 3)
+ continue;
+ if (fetch && (lod || bias))
+ continue;
+ if (fetch && (sampler.shadow || sampler.dim == EsdCube))
+ continue;
+ if (fetch == 0 && (sampler.ms || sampler.dim == EsdBuffer || !sampler.combined))
+ continue;
+
+ for (int grad = 0; grad <= 1; ++grad) { // loop over "bool" grad or not
+
+ if (grad && (lod || bias || sampler.ms || !sampler.combined))
+ continue;
+ if (grad && sampler.dim == EsdBuffer)
+ continue;
+ if (proj + offset + fetch + grad + bias + lod > 3)
+ continue;
+
+ for (int extraProj = 0; extraProj <= 1; ++extraProj) {
+ bool compare = false;
+ int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0);
+ // skip dummy unused second component for 1D non-array shadows
+ if (sampler.shadow && totalDims < 2)
+ totalDims = 2;
+ totalDims += (sampler.shadow ? 1 : 0) + proj;
+ if (totalDims > 4 && sampler.shadow) {
+ compare = true;
+ totalDims = 4;
+ }
+ assert(totalDims <= 4);
+
+ if (extraProj && ! proj)
+ continue;
+ if (extraProj && (sampler.dim == Esd3D || sampler.shadow || !sampler.combined))
+ continue;
+#ifdef AMD_EXTENSIONS
+ for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing
+
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+ if (f16TexAddr && sampler.shadow && ! compare) {
+ compare = true; // compare argument is always present
+ totalDims--;
+ }
+#endif
+ for (int lodClamp = 0; lodClamp <= 1 ;++lodClamp) { // loop over "bool" lod clamp
+
+ if (lodClamp && (profile == EEsProfile || version < 450))
+ continue;
+ if (lodClamp && (proj || lod || fetch))
+ continue;
+
+ for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not
+
+ if (sparse && (profile == EEsProfile || version < 450))
+ continue;
+ // Sparse sampling is not for 1D/1D array texture, buffer texture, and projective texture
+ if (sparse && (sampler.dim == Esd1D || sampler.dim == EsdBuffer || proj))
+ continue;
+
+ TString s;
+
+ // return type
+ if (sparse)
+ s.append("int ");
+ else {
+ if (sampler.shadow)
+#ifdef AMD_EXTENSIONS
+ if (sampler.type == EbtFloat16)
+ s.append("float16_t ");
+ else
+ s.append("float ");
+#else
+ s.append("float ");
+#endif
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+ }
+
+ // name
+ if (sparse) {
+ if (fetch)
+ s.append("sparseTexel");
+ else
+ s.append("sparseTexture");
+ }
+ else {
+ if (fetch)
+ s.append("texel");
+ else
+ s.append("texture");
+ }
+ if (proj)
+ s.append("Proj");
+ if (lod)
+ s.append("Lod");
+ if (grad)
+ s.append("Grad");
+ if (fetch)
+ s.append("Fetch");
+ if (offset)
+ s.append("Offset");
+ if (lodClamp)
+ s.append("Clamp");
+ if (lodClamp || sparse)
+ s.append("ARB");
+ s.append("(");
+
+ // sampler type
+ s.append(typeName);
+#ifdef AMD_EXTENSIONS
+ // P coordinate
+ if (extraProj) {
+ if (f16TexAddr)
+ s.append(",f16vec4");
+ else
+ s.append(",vec4");
+ } else {
+ s.append(",");
+ TBasicType t = fetch ? EbtInt : (f16TexAddr ? EbtFloat16 : EbtFloat);
+ if (totalDims == 1)
+ s.append(TType::getBasicString(t));
+ else {
+ s.append(prefixes[t]);
+ s.append("vec");
+ s.append(postfixes[totalDims]);
+ }
+ }
+#else
+ // P coordinate
+ if (extraProj)
+ s.append(",vec4");
+ else {
+ s.append(",");
+ TBasicType t = fetch ? EbtInt : EbtFloat;
+ if (totalDims == 1)
+ s.append(TType::getBasicString(t));
+ else {
+ s.append(prefixes[t]);
+ s.append("vec");
+ s.append(postfixes[totalDims]);
+ }
+ }
+#endif
+ // non-optional compare
+ if (compare)
+ s.append(",float");
+
+ // non-optional lod argument (lod that's not driven by lod loop) or sample
+ if ((fetch && sampler.dim != EsdBuffer && sampler.dim != EsdRect && !sampler.ms) ||
+ (sampler.ms && fetch))
+ s.append(",int");
+#ifdef AMD_EXTENSIONS
+ // non-optional lod
+ if (lod) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+
+ // gradient arguments
+ if (grad) {
+ if (dimMap[sampler.dim] == 1) {
+ if (f16TexAddr)
+ s.append(",float16_t,float16_t");
+ else
+ s.append(",float,float");
+ } else {
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ }
+ }
+#else
+ // non-optional lod
+ if (lod)
+ s.append(",float");
+
+ // gradient arguments
+ if (grad) {
+ if (dimMap[sampler.dim] == 1)
+ s.append(",float,float");
+ else {
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ }
+ }
+#endif
+ // offset
+ if (offset) {
+ if (dimMap[sampler.dim] == 1)
+ s.append(",int");
+ else {
+ s.append(",ivec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ // lod clamp
+ if (lodClamp) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+#else
+ // lod clamp
+ if (lodClamp)
+ s.append(",float");
+#endif
+ // texel out (for sparse texture)
+ if (sparse) {
+ s.append(",out ");
+ if (sampler.shadow)
+#ifdef AMD_EXTENSIONS
+ if (sampler.type == EbtFloat16)
+ s.append("float16_t");
+ else
+ s.append("float");
+#else
+ s.append("float");
+#endif
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4");
+ }
+ }
+#ifdef AMD_EXTENSIONS
+ // optional bias
+ if (bias) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+#else
+ // optional bias
+ if (bias)
+ s.append(",float");
+#endif
+ s.append(");\n");
+
+ // Add to the per-language set of built-ins
+ if (bias || lodClamp) {
+ stageBuiltins[EShLangFragment].append(s);
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangCompute].append(s);
+#endif
+ } else
+ commonBuiltins.append(s);
+
+ }
+ }
+#ifdef AMD_EXTENSIONS
+ }
+#endif
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the texture gather functions for the given type.
+//
+void TBuiltIns::addGatherFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ switch (sampler.dim) {
+ case Esd2D:
+ case EsdRect:
+ case EsdCube:
+ break;
+ default:
+ return;
+ }
+
+ if (sampler.ms)
+ return;
+
+ if (version < 140 && sampler.dim == EsdRect && sampler.type != EbtFloat)
+ return;
+
+#ifdef AMD_EXTENSIONS
+ for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing
+
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+#endif
+ for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets
+
+ for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument
+
+ if (comp > 0 && sampler.shadow)
+ continue;
+
+ if (offset > 0 && sampler.dim == EsdCube)
+ continue;
+
+ for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not
+ if (sparse && (profile == EEsProfile || version < 450))
+ continue;
+
+ TString s;
+
+ // return type
+ if (sparse)
+ s.append("int ");
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // name
+ if (sparse)
+ s.append("sparseTextureGather");
+ else
+ s.append("textureGather");
+ switch (offset) {
+ case 1:
+ s.append("Offset");
+ break;
+ case 2:
+ s.append("Offsets");
+ break;
+ default:
+ break;
+ }
+ if (sparse)
+ s.append("ARB");
+ s.append("(");
+
+ // sampler type argument
+ s.append(typeName);
+
+ // P coordinate argument
+#ifdef AMD_EXTENSIONS
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+#else
+ s.append(",vec");
+#endif
+ int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0);
+ s.append(postfixes[totalDims]);
+
+ // refZ argument
+ if (sampler.shadow)
+ s.append(",float");
+
+ // offset argument
+ if (offset > 0) {
+ s.append(",ivec2");
+ if (offset == 2)
+ s.append("[4]");
+ }
+
+ // texel out (for sparse texture)
+ if (sparse) {
+ s.append(",out ");
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // comp argument
+ if (comp)
+ s.append(",int");
+
+ s.append(");\n");
+ commonBuiltins.append(s);
+#ifdef AMD_EXTENSIONS
+ }
+#endif
+ }
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (sampler.dim == EsdRect || sampler.shadow)
+ return;
+
+ if (profile == EEsProfile || version < 450)
+ return;
+
+ for (int bias = 0; bias < 2; ++bias) { // loop over presence of bias argument
+
+ for (int lod = 0; lod < 2; ++lod) { // loop over presence of lod argument
+
+ if ((lod && bias) || (lod == 0 && bias == 0))
+ continue;
+
+ for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing
+
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+
+ for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets
+
+ for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument
+
+ if (comp == 0 && bias)
+ continue;
+
+ if (offset > 0 && sampler.dim == EsdCube)
+ continue;
+
+ for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not
+ if (sparse && (profile == EEsProfile || version < 450))
+ continue;
+
+ TString s;
+
+ // return type
+ if (sparse)
+ s.append("int ");
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // name
+ if (sparse)
+ s.append("sparseTextureGather");
+ else
+ s.append("textureGather");
+
+ if (lod)
+ s.append("Lod");
+
+ switch (offset) {
+ case 1:
+ s.append("Offset");
+ break;
+ case 2:
+ s.append("Offsets");
+ break;
+ default:
+ break;
+ }
+
+ if (lod)
+ s.append("AMD");
+ else if (sparse)
+ s.append("ARB");
+
+ s.append("(");
+
+ // sampler type argument
+ s.append(typeName);
+
+ // P coordinate argument
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+ int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0);
+ s.append(postfixes[totalDims]);
+
+ // lod argument
+ if (lod) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+
+ // offset argument
+ if (offset > 0) {
+ s.append(",ivec2");
+ if (offset == 2)
+ s.append("[4]");
+ }
+
+ // texel out (for sparse texture)
+ if (sparse) {
+ s.append(",out ");
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // comp argument
+ if (comp)
+ s.append(",int");
+
+ // bias argument
+ if (bias) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+
+ s.append(");\n");
+ if (bias)
+ stageBuiltins[EShLangFragment].append(s);
+ else
+ commonBuiltins.append(s);
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
+}
+
+//
+// Add context-dependent built-in functions and variables that are present
+// for the given version and profile. All the results are put into just the
+// commonBuiltins, because it is called for just a specific stage. So,
+// add stage-specific entries to the commonBuiltins, and only if that stage
+// was requested.
+//
+void TBuiltIns::initialize(const TBuiltInResource &resources, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language)
+{
+ //
+ // Initialize the context-dependent (resource-dependent) built-in strings for parsing.
+ //
+
+ //============================================================================
+ //
+ // Standard Uniforms
+ //
+ //============================================================================
+
+ TString& s = commonBuiltins;
+ const int maxSize = 80;
+ char builtInConstant[maxSize];
+
+ //
+ // Build string of implementation dependent constants.
+ //
+
+ if (profile == EEsProfile) {
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexUniformVectors = %d;", resources.maxVertexUniformVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentUniformVectors = %d;", resources.maxFragmentUniformVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers);
+ s.append(builtInConstant);
+
+ if (version == 100) {
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVaryingVectors = %d;", resources.maxVaryingVectors);
+ s.append(builtInConstant);
+ } else {
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexOutputVectors = %d;", resources.maxVertexOutputVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentInputVectors = %d;", resources.maxFragmentInputVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset);
+ s.append(builtInConstant);
+ }
+
+ if (version >= 310) {
+ // geometry
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources.maxGeometryAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources.maxGeometryAtomicCounterBuffers);
+ s.append(builtInConstant);
+
+ // tessellation
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel);
+ s.append(builtInConstant);
+
+ // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices
+ if (language == EShLangTessControl || language == EShLangTessEvaluation) {
+ s.append(
+ "in gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+#ifdef NV_EXTENSIONS
+ "highp vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "highp vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ "} gl_in[gl_MaxPatchVertices];"
+ "\n");
+ }
+ }
+
+ } else {
+ // non-ES profile
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxLights = %d;", resources.maxLights);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxClipPlanes = %d;", resources.maxClipPlanes);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTextureUnits = %d;", resources.maxTextureUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTextureCoords = %d;", resources.maxTextureCoords);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexUniformComponents = %d;", resources.maxVertexUniformComponents);
+ s.append(builtInConstant);
+
+ if (version < 150 || ARBCompatibility) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingFloats = %d;", resources.maxVaryingFloats);
+ s.append(builtInConstant);
+ }
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentUniformComponents = %d;", resources.maxFragmentUniformComponents);
+ s.append(builtInConstant);
+
+ if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) {
+ //
+ // OpenGL'uniform' state. Page numbers are in reference to version
+ // 1.4 of the OpenGL specification.
+ //
+
+ //
+ // Matrix state. p. 31, 32, 37, 39, 40.
+ //
+ s.append("uniform mat4 gl_TextureMatrix[gl_MaxTextureCoords];"
+
+ //
+ // Derived matrix state that provides inverse and transposed versions
+ // of the matrices above.
+ //
+ "uniform mat4 gl_TextureMatrixInverse[gl_MaxTextureCoords];"
+
+ "uniform mat4 gl_TextureMatrixTranspose[gl_MaxTextureCoords];"
+
+ "uniform mat4 gl_TextureMatrixInverseTranspose[gl_MaxTextureCoords];"
+
+ //
+ // Clip planes p. 42.
+ //
+ "uniform vec4 gl_ClipPlane[gl_MaxClipPlanes];"
+
+ //
+ // Light State p 50, 53, 55.
+ //
+ "uniform gl_LightSourceParameters gl_LightSource[gl_MaxLights];"
+
+ //
+ // Derived state from products of light.
+ //
+ "uniform gl_LightProducts gl_FrontLightProduct[gl_MaxLights];"
+ "uniform gl_LightProducts gl_BackLightProduct[gl_MaxLights];"
+
+ //
+ // Texture Environment and Generation, p. 152, p. 40-42.
+ //
+ "uniform vec4 gl_TextureEnvColor[gl_MaxTextureImageUnits];"
+ "uniform vec4 gl_EyePlaneS[gl_MaxTextureCoords];"
+ "uniform vec4 gl_EyePlaneT[gl_MaxTextureCoords];"
+ "uniform vec4 gl_EyePlaneR[gl_MaxTextureCoords];"
+ "uniform vec4 gl_EyePlaneQ[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneS[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneT[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneR[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneQ[gl_MaxTextureCoords];");
+ }
+
+ if (version >= 130) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxClipDistances = %d;", resources.maxClipDistances);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingComponents = %d;", resources.maxVaryingComponents);
+ s.append(builtInConstant);
+
+ // GL_ARB_shading_language_420pack
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset);
+ s.append(builtInConstant);
+ }
+
+ // geometry
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryVaryingComponents = %d;", resources.maxGeometryVaryingComponents);
+ s.append(builtInConstant);
+
+ }
+
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexOutputComponents = %d;", resources.maxVertexOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentInputComponents = %d;", resources.maxFragmentInputComponents);
+ s.append(builtInConstant);
+ }
+
+ // tessellation
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices);
+ s.append(builtInConstant);
+
+ // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices
+ if (language == EShLangTessControl || language == EShLangTessEvaluation) {
+ s.append(
+ "in gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (profile == ECompatibilityProfile)
+ s.append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (profile != EEsProfile && version >= 450)
+ s.append(
+ "float gl_CullDistance[];"
+#ifdef NV_EXTENSIONS
+ "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ );
+ s.append(
+ "} gl_in[gl_MaxPatchVertices];"
+ "\n");
+ }
+ }
+
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxViewports = %d;", resources.maxViewports);
+ s.append(builtInConstant);
+ }
+
+ // images
+ if (version >= 130) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUnitsAndFragmentOutputs = %d;", resources.maxCombinedImageUnitsAndFragmentOutputs);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxImageSamples = %d;", resources.maxImageSamples);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlImageUniforms = %d;", resources.maxTessControlImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationImageUniforms = %d;", resources.maxTessEvaluationImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms);
+ s.append(builtInConstant);
+ }
+
+ // enhanced layouts
+ if (version >= 430) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackBuffers = %d;", resources.maxTransformFeedbackBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackInterleavedComponents = %d;", resources.maxTransformFeedbackInterleavedComponents);
+ s.append(builtInConstant);
+ }
+ }
+
+ // images (some in compute below)
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 130)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxImageUnits = %d;", resources.maxImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedShaderOutputResources = %d;", resources.maxCombinedShaderOutputResources);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexImageUniforms = %d;", resources.maxVertexImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentImageUniforms = %d;", resources.maxFragmentImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUniforms = %d;", resources.maxCombinedImageUniforms);
+ s.append(builtInConstant);
+ }
+
+ // atomic counters (some in compute below)
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 420)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounters = %d;", resources. maxVertexAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounters = %d;", resources. maxFragmentAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounters = %d;", resources. maxCombinedAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBindings = %d;", resources. maxAtomicCounterBindings);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounterBuffers = %d;", resources. maxVertexAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounterBuffers = %d;", resources. maxFragmentAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounterBuffers = %d;", resources. maxCombinedAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBufferSize = %d;", resources. maxAtomicCounterBufferSize);
+ s.append(builtInConstant);
+ }
+ if (profile != EEsProfile && version >= 420) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounters = %d;", resources. maxTessControlAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounters = %d;", resources. maxTessEvaluationAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources. maxGeometryAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounterBuffers = %d;", resources. maxTessControlAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounterBuffers = %d;", resources. maxTessEvaluationAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources. maxGeometryAtomicCounterBuffers);
+ s.append(builtInConstant);
+
+ s.append("\n");
+ }
+
+ // compute
+ if ((profile == EEsProfile && version >= 310) || (profile != EEsProfile && version >= 420)) {
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupCount = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupCountX,
+ resources.maxComputeWorkGroupCountY,
+ resources.maxComputeWorkGroupCountZ);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupSize = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupSizeX,
+ resources.maxComputeWorkGroupSizeY,
+ resources.maxComputeWorkGroupSizeZ);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeUniformComponents = %d;", resources.maxComputeUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeTextureImageUnits = %d;", resources.maxComputeTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeImageUniforms = %d;", resources.maxComputeImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounters = %d;", resources.maxComputeAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounterBuffers = %d;", resources.maxComputeAtomicCounterBuffers);
+ s.append(builtInConstant);
+
+ s.append("\n");
+ }
+
+ // GL_ARB_cull_distance
+ if (profile != EEsProfile && version >= 450) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCullDistances = %d;", resources.maxCullDistances);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedClipAndCullDistances = %d;", resources.maxCombinedClipAndCullDistances);
+ s.append(builtInConstant);
+ }
+
+ // GL_ARB_ES3_1_compatibility
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 310)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxSamples = %d;", resources.maxSamples);
+ s.append(builtInConstant);
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_gcn_shader
+ if (profile != EEsProfile && version >= 450) {
+ snprintf(builtInConstant, maxSize, "const int gl_SIMDGroupSizeAMD = 64;");
+ s.append(builtInConstant);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ // SPV_NV_mesh_shader
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputVerticesNV = %d;", resources.maxMeshOutputVerticesNV);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputPrimitivesNV = %d;", resources.maxMeshOutputPrimitivesNV);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxMeshWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxMeshWorkGroupSizeX_NV,
+ resources.maxMeshWorkGroupSizeY_NV,
+ resources.maxMeshWorkGroupSizeZ_NV);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxTaskWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxTaskWorkGroupSizeX_NV,
+ resources.maxTaskWorkGroupSizeY_NV,
+ resources.maxTaskWorkGroupSizeZ_NV);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxMeshViewCountNV = %d;", resources.maxMeshViewCountNV);
+ s.append(builtInConstant);
+
+ s.append("\n");
+ }
+#endif
+
+ s.append("\n");
+}
+
+//
+// To support special built-ins that have a special qualifier that cannot be declared textually
+// in a shader, like gl_Position.
+//
+// This lets the type of the built-in be declared textually, and then have just its qualifier be
+// updated afterward.
+//
+// Safe to call even if name is not present.
+//
+// Only use this for built-in variables that have a special qualifier in TStorageQualifier.
+// New built-in variables should use a generic (textually declarable) qualifier in
+// TStoraregQualifier and only call BuiltInVariable().
+//
+static void SpecialQualifier(const char* name, TStorageQualifier qualifier, TBuiltInVariable builtIn, TSymbolTable& symbolTable)
+{
+ TSymbol* symbol = symbolTable.find(name);
+ if (symbol == nullptr)
+ return;
+
+ TQualifier& symQualifier = symbol->getWritableType().getQualifier();
+ symQualifier.storage = qualifier;
+ symQualifier.builtIn = builtIn;
+}
+
+//
+// To tag built-in variables with their TBuiltInVariable enum. Use this when the
+// normal declaration text already gets the qualifier right, and all that's needed
+// is setting the builtIn field. This should be the normal way for all new
+// built-in variables.
+//
+// If SpecialQualifier() was called, this does not need to be called.
+//
+// Safe to call even if name is not present.
+//
+static void BuiltInVariable(const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable)
+{
+ TSymbol* symbol = symbolTable.find(name);
+ if (symbol == nullptr)
+ return;
+
+ TQualifier& symQualifier = symbol->getWritableType().getQualifier();
+ symQualifier.builtIn = builtIn;
+}
+
+//
+// For built-in variables inside a named block.
+// SpecialQualifier() won't ever go inside a block; their member's qualifier come
+// from the qualification of the block.
+//
+// See comments above for other detail.
+//
+static void BuiltInVariable(const char* blockName, const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable)
+{
+ TSymbol* symbol = symbolTable.find(blockName);
+ if (symbol == nullptr)
+ return;
+
+ TTypeList& structure = *symbol->getWritableType().getWritableStruct();
+ for (int i = 0; i < (int)structure.size(); ++i) {
+ if (structure[i].type->getFieldName().compare(name) == 0) {
+ structure[i].type->getQualifier().builtIn = builtIn;
+ return;
+ }
+ }
+}
+
+//
+// Finish adding/processing context-independent built-in symbols.
+// 1) Programmatically add symbols that could not be added by simple text strings above.
+// 2) Map built-in functions to operators, for those that will turn into an operation node
+// instead of remaining a function call.
+// 3) Tag extension-related symbols added to their base version with their extensions, so
+// that if an early version has the extension turned off, there is an error reported on use.
+//
+void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable)
+{
+ //
+ // Tag built-in variables and functions with additional qualifier and extension information
+ // that cannot be declared with the text strings.
+ //
+
+ // N.B.: a symbol should only be tagged once, and this function is called multiple times, once
+ // per stage that's used for this profile. So
+ // - generally, stick common ones in the fragment stage to ensure they are tagged exactly once
+ // - for ES, which has different precisions for different stages, the coarsest-grained tagging
+ // for a built-in used in many stages needs to be once for the fragment stage and once for
+ // the vertex stage
+
+ switch(language) {
+ case EShLangVertex:
+ if (profile != EEsProfile) {
+ if (version >= 440) {
+ symbolTable.setVariableExtensions("gl_BaseVertexARB", 1, &E_GL_ARB_shader_draw_parameters);
+ symbolTable.setVariableExtensions("gl_BaseInstanceARB", 1, &E_GL_ARB_shader_draw_parameters);
+ symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters);
+ BuiltInVariable("gl_BaseVertexARB", EbvBaseVertex, symbolTable);
+ BuiltInVariable("gl_BaseInstanceARB", EbvBaseInstance, symbolTable);
+ BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable);
+ }
+ if (version >= 460) {
+ BuiltInVariable("gl_BaseVertex", EbvBaseVertex, symbolTable);
+ BuiltInVariable("gl_BaseInstance", EbvBaseInstance, symbolTable);
+ BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable);
+ }
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ symbolTable.setFunctionExtensions("ballotARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setFunctionExtensions("readInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setFunctionExtensions("readFirstInvocationARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+
+ if (version >= 430) {
+ symbolTable.setFunctionExtensions("anyInvocationARB", 1, &E_GL_ARB_shader_group_vote);
+ symbolTable.setFunctionExtensions("allInvocationsARB", 1, &E_GL_ARB_shader_group_vote);
+ symbolTable.setFunctionExtensions("allInvocationsEqualARB", 1, &E_GL_ARB_shader_group_vote);
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("minInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("swizzleInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("swizzleInvocationsWithPatternAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("writeInvocationAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("mbcntAMD", 1, &E_GL_AMD_shader_ballot);
+
+ symbolTable.setFunctionExtensions("minInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("min3", 1, &E_GL_AMD_shader_trinary_minmax);
+ symbolTable.setFunctionExtensions("max3", 1, &E_GL_AMD_shader_trinary_minmax);
+ symbolTable.setFunctionExtensions("mid3", 1, &E_GL_AMD_shader_trinary_minmax);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("cubeFaceIndexAMD", 1, &E_GL_AMD_gcn_shader);
+ symbolTable.setFunctionExtensions("cubeFaceCoordAMD", 1, &E_GL_AMD_gcn_shader);
+ symbolTable.setFunctionExtensions("timeAMD", 1, &E_GL_AMD_gcn_shader);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("fragmentMaskFetchAMD", 1, &E_GL_AMD_shader_fragment_mask);
+ symbolTable.setFunctionExtensions("fragmentFetchAMD", 1, &E_GL_AMD_shader_fragment_mask);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ symbolTable.setFunctionExtensions("textureFootprintNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintClampNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintLodNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintGradNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintGradClampNV", 1, &E_GL_NV_shader_texture_footprint);
+#endif
+ // Compatibility variables, vertex only
+ if (spvVersion.spv == 0) {
+ BuiltInVariable("gl_Color", EbvColor, symbolTable);
+ BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable);
+ BuiltInVariable("gl_Normal", EbvNormal, symbolTable);
+ BuiltInVariable("gl_Vertex", EbvVertex, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord0", EbvMultiTexCoord0, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord1", EbvMultiTexCoord1, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord2", EbvMultiTexCoord2, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord3", EbvMultiTexCoord3, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord4", EbvMultiTexCoord4, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord5", EbvMultiTexCoord5, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord6", EbvMultiTexCoord6, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord7", EbvMultiTexCoord7, symbolTable);
+ BuiltInVariable("gl_FogCoord", EbvFogFragCoord, symbolTable);
+ }
+
+ if (profile == EEsProfile) {
+ if (spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ if (version == 310)
+ symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ }
+ if (version == 310)
+ symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ }
+
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic);
+ }
+
+ if (spvVersion.vulkan == 0) {
+ SpecialQualifier("gl_VertexID", EvqVertexId, EbvVertexId, symbolTable);
+ SpecialQualifier("gl_InstanceID", EvqInstanceId, EbvInstanceId, symbolTable);
+ }
+
+ if (spvVersion.vulkan > 0) {
+ BuiltInVariable("gl_VertexIndex", EbvVertexIndex, symbolTable);
+ BuiltInVariable("gl_InstanceIndex", EbvInstanceIndex, symbolTable);
+ }
+
+ if (version >= 300 /* both ES and non-ES */) {
+ symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs);
+ BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable);
+ }
+
+ if (profile == EEsProfile) {
+ symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers);
+ symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers);
+ }
+
+ // Fall through
+
+ case EShLangTessControl:
+ if (profile == EEsProfile && version >= 310) {
+ BuiltInVariable("gl_BoundingBoxEXT", EbvBoundingBox, symbolTable);
+ symbolTable.setVariableExtensions("gl_BoundingBoxEXT", 1,
+ &E_GL_EXT_primitive_bounding_box);
+ BuiltInVariable("gl_BoundingBoxOES", EbvBoundingBox, symbolTable);
+ symbolTable.setVariableExtensions("gl_BoundingBoxOES", 1,
+ &E_GL_OES_primitive_bounding_box);
+
+ if (version >= 320) {
+ BuiltInVariable("gl_BoundingBox", EbvBoundingBox, symbolTable);
+ }
+ }
+
+ // Fall through
+
+ case EShLangTessEvaluation:
+ case EShLangGeometry:
+ SpecialQualifier("gl_Position", EvqPosition, EbvPosition, symbolTable);
+ SpecialQualifier("gl_PointSize", EvqPointSize, EbvPointSize, symbolTable);
+ SpecialQualifier("gl_ClipVertex", EvqClipVertex, EbvClipVertex, symbolTable);
+
+ BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ BuiltInVariable("gl_out", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_out", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_out", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_out", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable);
+ BuiltInVariable("gl_PrimitiveIDIn", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_InvocationID", EbvInvocationId, symbolTable);
+ BuiltInVariable("gl_Layer", EbvLayer, symbolTable);
+ BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable);
+
+#ifdef NV_EXTENSIONS
+ if (language != EShLangGeometry) {
+ symbolTable.setVariableExtensions("gl_Layer", Num_viewportEXTs, viewportEXTs);
+ symbolTable.setVariableExtensions("gl_ViewportIndex", Num_viewportEXTs, viewportEXTs);
+ }
+#else
+ if (language != EShLangGeometry && version >= 410) {
+ symbolTable.setVariableExtensions("gl_Layer", 1, &E_GL_ARB_shader_viewport_layer_array);
+ symbolTable.setVariableExtensions("gl_ViewportIndex", 1, &E_GL_ARB_shader_viewport_layer_array);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ symbolTable.setVariableExtensions("gl_ViewportMask", 1, &E_GL_NV_viewport_array2);
+ symbolTable.setVariableExtensions("gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+ symbolTable.setVariableExtensions("gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+
+ BuiltInVariable("gl_ViewportMask", EbvViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable);
+ BuiltInVariable("gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ BuiltInVariable("gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable);
+
+ if (language != EShLangVertex) {
+ symbolTable.setVariableExtensions("gl_in", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_in", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+
+ BuiltInVariable("gl_in", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable);
+ BuiltInVariable("gl_in", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ }
+ symbolTable.setVariableExtensions("gl_out", "gl_ViewportMask", 1, &E_GL_NV_viewport_array2);
+ symbolTable.setVariableExtensions("gl_out", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_out", "gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_out", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+ symbolTable.setVariableExtensions("gl_out", "gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+
+ BuiltInVariable("gl_out", "gl_ViewportMask", EbvViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable);
+#endif
+
+ BuiltInVariable("gl_PatchVerticesIn", EbvPatchVertices, symbolTable);
+ BuiltInVariable("gl_TessLevelOuter", EbvTessLevelOuter, symbolTable);
+ BuiltInVariable("gl_TessLevelInner", EbvTessLevelInner, symbolTable);
+ BuiltInVariable("gl_TessCoord", EbvTessCoord, symbolTable);
+
+ if (version < 410)
+ symbolTable.setVariableExtensions("gl_ViewportIndex", 1, &E_GL_ARB_viewport_array);
+
+ // Compatibility variables
+
+ BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ BuiltInVariable("gl_out", "gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_out", "gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_out", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ BuiltInVariable("gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ // gl_PointSize, when it needs to be tied to an extension, is always a member of a block.
+ // (Sometimes with an instance name, sometimes anonymous).
+ if (profile == EEsProfile) {
+ if (language == EShLangGeometry) {
+ symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size);
+ symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size);
+ } else if (language == EShLangTessEvaluation || language == EShLangTessControl) {
+ // gl_in tessellation settings of gl_PointSize are in the context-dependent paths
+ symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size);
+ symbolTable.setVariableExtensions("gl_out", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size);
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview);
+ BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+ }
+
+ break;
+
+ case EShLangFragment:
+ SpecialQualifier("gl_FrontFacing", EvqFace, EbvFace, symbolTable);
+ SpecialQualifier("gl_FragCoord", EvqFragCoord, EbvFragCoord, symbolTable);
+ SpecialQualifier("gl_PointCoord", EvqPointCoord, EbvPointCoord, symbolTable);
+ if (spvVersion.spv == 0)
+ SpecialQualifier("gl_FragColor", EvqFragColor, EbvFragColor, symbolTable);
+ else {
+ TSymbol* symbol = symbolTable.find("gl_FragColor");
+ if (symbol) {
+ symbol->getWritableType().getQualifier().storage = EvqVaryingOut;
+ symbol->getWritableType().getQualifier().layoutLocation = 0;
+ }
+ }
+ SpecialQualifier("gl_FragDepth", EvqFragDepth, EbvFragDepth, symbolTable);
+ SpecialQualifier("gl_FragDepthEXT", EvqFragDepth, EbvFragDepth, symbolTable);
+ SpecialQualifier("gl_HelperInvocation", EvqVaryingIn, EbvHelperInvocation, symbolTable);
+
+ BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable);
+ BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+
+ if (profile != EEsProfile && version >= 140) {
+ symbolTable.setVariableExtensions("gl_FragStencilRefARB", 1, &E_GL_ARB_shader_stencil_export);
+ BuiltInVariable("gl_FragStencilRefARB", EbvFragStencilRef, symbolTable);
+ }
+
+ if ((profile != EEsProfile && version >= 400) ||
+ (profile == EEsProfile && version >= 310)) {
+ BuiltInVariable("gl_SampleID", EbvSampleId, symbolTable);
+ BuiltInVariable("gl_SamplePosition", EbvSamplePosition, symbolTable);
+ BuiltInVariable("gl_SampleMaskIn", EbvSampleMask, symbolTable);
+ BuiltInVariable("gl_SampleMask", EbvSampleMask, symbolTable);
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setVariableExtensions("gl_SampleID", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_SamplePosition", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_SampleMaskIn", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_SampleMask", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_NumSamples", 1, &E_GL_OES_sample_variables);
+ }
+ }
+
+ BuiltInVariable("gl_Layer", EbvLayer, symbolTable);
+ BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable);
+
+ // Compatibility variables
+
+ BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_Color", EbvColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_SecondaryColor", EbvSecondaryColor, symbolTable);
+
+ BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+ BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_Color", EbvColor, symbolTable);
+ BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable);
+
+ // built-in functions
+
+ if (profile == EEsProfile) {
+ if (spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture2DLodEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjLodEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeLodEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ if (version < 320)
+ symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ }
+ if (version == 100) {
+ symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_OES_standard_derivatives);
+ symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_OES_standard_derivatives);
+ symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_OES_standard_derivatives);
+ }
+ if (version == 310) {
+ symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ symbolTable.setFunctionExtensions("interpolateAtCentroid", 1, &E_GL_OES_shader_multisample_interpolation);
+ symbolTable.setFunctionExtensions("interpolateAtSample", 1, &E_GL_OES_shader_multisample_interpolation);
+ symbolTable.setFunctionExtensions("interpolateAtOffset", 1, &E_GL_OES_shader_multisample_interpolation);
+ }
+ } else if (version < 130) {
+ if (spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture1DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture1DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ }
+ }
+
+ // E_GL_ARB_shader_texture_lod functions usable only with the extension enabled
+ if (profile != EEsProfile && spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture1DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ }
+
+ // E_GL_ARB_shader_image_load_store
+ if (profile != EEsProfile && version < 420)
+ symbolTable.setFunctionExtensions("memoryBarrier", 1, &E_GL_ARB_shader_image_load_store);
+ // All the image access functions are protected by checks on the type of the first argument.
+
+ // E_GL_ARB_shader_atomic_counters
+ if (profile != EEsProfile && version < 420) {
+ symbolTable.setFunctionExtensions("atomicCounterIncrement", 1, &E_GL_ARB_shader_atomic_counters);
+ symbolTable.setFunctionExtensions("atomicCounterDecrement", 1, &E_GL_ARB_shader_atomic_counters);
+ symbolTable.setFunctionExtensions("atomicCounter" , 1, &E_GL_ARB_shader_atomic_counters);
+ }
+
+ // E_GL_ARB_derivative_control
+ if (profile != EEsProfile && version < 450) {
+ symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_ARB_derivative_control);
+ }
+
+ // E_GL_ARB_sparse_texture2
+ if (profile != EEsProfile)
+ {
+ symbolTable.setFunctionExtensions("sparseTextureARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureLodARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTexelFetchARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTexelFetchOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureLodOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGradARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGradOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGatherARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGatherOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGatherOffsetsARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseImageLoadARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTexelsResident", 1, &E_GL_ARB_sparse_texture2);
+ }
+
+ // E_GL_ARB_sparse_texture_clamp
+ if (profile != EEsProfile)
+ {
+ symbolTable.setFunctionExtensions("sparseTextureClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("sparseTextureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("sparseTextureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("sparseTextureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ }
+
+#ifdef AMD_EXTENSIONS
+ // E_GL_AMD_shader_explicit_vertex_parameter
+ if (profile != EEsProfile) {
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordSmoothAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordSmoothCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordSmoothSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordPullModelAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+
+ symbolTable.setFunctionExtensions("interpolateAtVertexAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+
+ BuiltInVariable("gl_BaryCoordNoPerspAMD", EbvBaryCoordNoPersp, symbolTable);
+ BuiltInVariable("gl_BaryCoordNoPerspCentroidAMD", EbvBaryCoordNoPerspCentroid, symbolTable);
+ BuiltInVariable("gl_BaryCoordNoPerspSampleAMD", EbvBaryCoordNoPerspSample, symbolTable);
+ BuiltInVariable("gl_BaryCoordSmoothAMD", EbvBaryCoordSmooth, symbolTable);
+ BuiltInVariable("gl_BaryCoordSmoothCentroidAMD", EbvBaryCoordSmoothCentroid, symbolTable);
+ BuiltInVariable("gl_BaryCoordSmoothSampleAMD", EbvBaryCoordSmoothSample, symbolTable);
+ BuiltInVariable("gl_BaryCoordPullModelAMD", EbvBaryCoordPullModel, symbolTable);
+ }
+
+ // E_GL_AMD_texture_gather_bias_lod
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("textureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("textureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("textureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("sparseTextureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ }
+
+ // E_GL_AMD_shader_image_load_store_lod
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("imageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod);
+ symbolTable.setFunctionExtensions("imageStoreLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod);
+ symbolTable.setFunctionExtensions("sparseImageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ if (profile != EEsProfile && version >= 430) {
+ symbolTable.setVariableExtensions("gl_FragFullyCoveredNV", 1, &E_GL_NV_conservative_raster_underestimation);
+ BuiltInVariable("gl_FragFullyCoveredNV", EbvFragFullyCoveredNV, symbolTable);
+ }
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) {
+ symbolTable.setVariableExtensions("gl_FragmentSizeNV", 1, &E_GL_NV_shading_rate_image);
+ symbolTable.setVariableExtensions("gl_InvocationsPerPixelNV", 1, &E_GL_NV_shading_rate_image);
+ BuiltInVariable("gl_FragmentSizeNV", EbvFragmentSizeNV, symbolTable);
+ BuiltInVariable("gl_InvocationsPerPixelNV", EbvInvocationsPerPixelNV, symbolTable);
+ symbolTable.setVariableExtensions("gl_BaryCoordNV", 1, &E_GL_NV_fragment_shader_barycentric);
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspNV", 1, &E_GL_NV_fragment_shader_barycentric);
+ BuiltInVariable("gl_BaryCoordNV", EbvBaryCoordNV, symbolTable);
+ BuiltInVariable("gl_BaryCoordNoPerspNV", EbvBaryCoordNoPerspNV, symbolTable);
+ }
+ if (((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) &&
+ language == EShLangCompute) {
+ symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_NV_compute_shader_derivatives);
+ }
+#endif
+
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 310)) {
+ symbolTable.setVariableExtensions("gl_FragSizeEXT", 1, &E_GL_EXT_fragment_invocation_density);
+ symbolTable.setVariableExtensions("gl_FragInvocationCountEXT", 1, &E_GL_EXT_fragment_invocation_density);
+ BuiltInVariable("gl_FragSizeEXT", EbvFragSizeEXT, symbolTable);
+ BuiltInVariable("gl_FragInvocationCountEXT", EbvFragInvocationCountEXT, symbolTable);
+ }
+
+ symbolTable.setVariableExtensions("gl_FragDepthEXT", 1, &E_GL_EXT_frag_depth);
+
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setVariableExtensions("gl_PrimitiveID", Num_AEP_geometry_shader, AEP_geometry_shader);
+ symbolTable.setVariableExtensions("gl_Layer", Num_AEP_geometry_shader, AEP_geometry_shader);
+ }
+
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic);
+ }
+
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview);
+ BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable);
+ if (version >= 300 /* both ES and non-ES */) {
+ symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs);
+ BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable);
+ }
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile) {
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupBarrier", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrier", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierBuffer", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierImage", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupElect", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupAll", 1, &E_GL_KHR_shader_subgroup_vote);
+ symbolTable.setFunctionExtensions("subgroupAny", 1, &E_GL_KHR_shader_subgroup_vote);
+ symbolTable.setFunctionExtensions("subgroupAllEqual", 1, &E_GL_KHR_shader_subgroup_vote);
+ symbolTable.setFunctionExtensions("subgroupBroadcast", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBroadcastFirst", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallot", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupInverseBallot", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotBitExtract", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotBitCount", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotInclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotExclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotFindLSB", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotFindMSB", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupShuffle", 1, &E_GL_KHR_shader_subgroup_shuffle);
+ symbolTable.setFunctionExtensions("subgroupShuffleXor", 1, &E_GL_KHR_shader_subgroup_shuffle);
+ symbolTable.setFunctionExtensions("subgroupShuffleUp", 1, &E_GL_KHR_shader_subgroup_shuffle_relative);
+ symbolTable.setFunctionExtensions("subgroupShuffleDown", 1, &E_GL_KHR_shader_subgroup_shuffle_relative);
+ symbolTable.setFunctionExtensions("subgroupAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupMul", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupMin", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupMax", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupOr", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupXor", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupClusteredAdd", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredMul", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredMin", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredMax", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredAnd", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredOr", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredXor", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupQuadBroadcast", 1, &E_GL_KHR_shader_subgroup_quad);
+ symbolTable.setFunctionExtensions("subgroupQuadSwapHorizontal", 1, &E_GL_KHR_shader_subgroup_quad);
+ symbolTable.setFunctionExtensions("subgroupQuadSwapVertical", 1, &E_GL_KHR_shader_subgroup_quad);
+ symbolTable.setFunctionExtensions("subgroupQuadSwapDiagonal", 1, &E_GL_KHR_shader_subgroup_quad);
+
+#ifdef NV_EXTENSIONS
+ symbolTable.setFunctionExtensions("subgroupPartitionNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedAddNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedMulNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedMinNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedAndNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedOrNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedXorNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+#endif
+
+ }
+
+ if (profile == EEsProfile) {
+ symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers);
+ symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers);
+ }
+
+ if (spvVersion.vulkan > 0) {
+ symbolTable.setVariableExtensions("gl_ScopeDevice", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_ScopeWorkgroup", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_ScopeSubgroup", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_ScopeInvocation", 1, &E_GL_KHR_memory_scope_semantics);
+
+ symbolTable.setVariableExtensions("gl_SemanticsRelaxed", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsAcquire", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsRelease", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsAcquireRelease", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsMakeAvailable", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsMakeVisible", 1, &E_GL_KHR_memory_scope_semantics);
+
+ symbolTable.setVariableExtensions("gl_StorageSemanticsNone", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsBuffer", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsShared", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsImage", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsOutput", 1, &E_GL_KHR_memory_scope_semantics);
+ }
+ break;
+
+ case EShLangCompute:
+ BuiltInVariable("gl_NumWorkGroups", EbvNumWorkGroups, symbolTable);
+ BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable);
+ BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable);
+ BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable);
+
+ if (profile != EEsProfile && version < 430) {
+ symbolTable.setVariableExtensions("gl_NumWorkGroups", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_ARB_compute_shader);
+
+ symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupCount", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupSize", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeUniformComponents", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeTextureImageUnits", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeImageUniforms", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounters", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounterBuffers", 1, &E_GL_ARB_compute_shader);
+
+ symbolTable.setFunctionExtensions("barrier", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierAtomicCounter", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierBuffer", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierImage", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_ARB_compute_shader);
+ }
+
+ symbolTable.setFunctionExtensions("controlBarrier", 1, &E_GL_KHR_memory_scope_semantics);
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile) {
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview);
+ BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic);
+
+ BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable);
+ BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic);
+ }
+
+ symbolTable.setFunctionExtensions("coopMatLoadNV", 1, &E_GL_NV_cooperative_matrix);
+ symbolTable.setFunctionExtensions("coopMatStoreNV", 1, &E_GL_NV_cooperative_matrix);
+ symbolTable.setFunctionExtensions("coopMatMulAddNV", 1, &E_GL_NV_cooperative_matrix);
+
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.setVariableExtensions("gl_LaunchIDNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_LaunchSizeNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_PrimitiveID", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_InstanceID", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_InstanceCustomIndexNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_WorldRayOriginNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_WorldRayDirectionNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_ObjectRayOriginNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_ObjectRayDirectionNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_RayTminNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_RayTmaxNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_HitTNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_HitKindNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_ObjectToWorldNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_WorldToObjectNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_IncomingRayFlagsNV", 1, &E_GL_NV_ray_tracing);
+
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+
+ BuiltInVariable("gl_LaunchIDNV", EbvLaunchIdNV, symbolTable);
+ BuiltInVariable("gl_LaunchSizeNV", EbvLaunchSizeNV, symbolTable);
+ BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_InstanceID", EbvInstanceId, symbolTable);
+ BuiltInVariable("gl_InstanceCustomIndexNV", EbvInstanceCustomIndexNV,symbolTable);
+ BuiltInVariable("gl_WorldRayOriginNV", EbvWorldRayOriginNV, symbolTable);
+ BuiltInVariable("gl_WorldRayDirectionNV", EbvWorldRayDirectionNV, symbolTable);
+ BuiltInVariable("gl_ObjectRayOriginNV", EbvObjectRayOriginNV, symbolTable);
+ BuiltInVariable("gl_ObjectRayDirectionNV", EbvObjectRayDirectionNV, symbolTable);
+ BuiltInVariable("gl_RayTminNV", EbvRayTminNV, symbolTable);
+ BuiltInVariable("gl_RayTmaxNV", EbvRayTmaxNV, symbolTable);
+ BuiltInVariable("gl_HitTNV", EbvHitTNV, symbolTable);
+ BuiltInVariable("gl_HitKindNV", EbvHitKindNV, symbolTable);
+ BuiltInVariable("gl_ObjectToWorldNV", EbvObjectToWorldNV, symbolTable);
+ BuiltInVariable("gl_WorldToObjectNV", EbvWorldToObjectNV, symbolTable);
+ BuiltInVariable("gl_IncomingRayFlagsNV", EbvIncomingRayFlagsNV, symbolTable);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ }
+ break;
+ case EShLangMeshNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ // per-vertex builtins
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_Position", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PointSize", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistance", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistance", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshVerticesNV", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PositionPerViewNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshVerticesNV", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", EbvClipDistancePerViewNV, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", EbvCullDistancePerViewNV, symbolTable);
+
+ // per-primitive builtins
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_PrimitiveID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_Layer", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportIndex", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMask", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_Layer", EbvLayer, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportIndex", EbvViewportIndex, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMask", EbvViewportMaskNV, symbolTable);
+
+ // per-view per-primitive builtins
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", EbvLayerPerViewNV, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable);
+
+ // other builtins
+ symbolTable.setVariableExtensions("gl_PrimitiveCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_PrimitiveIndicesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_PrimitiveCountNV", EbvPrimitiveCountNV, symbolTable);
+ BuiltInVariable("gl_PrimitiveIndicesNV", EbvPrimitiveIndicesNV, symbolTable);
+ BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable);
+ BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable);
+ BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable);
+ BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable);
+ BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable);
+
+ // builtin constants
+ symbolTable.setVariableExtensions("gl_MaxMeshOutputVerticesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshOutputPrimitivesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+
+ // builtin functions
+ symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader);
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ // GL_EXT_device_group
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+
+ // GL_ARB_shader_draw_parameters
+ symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters);
+ BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable);
+ if (version >= 460) {
+ BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable);
+ }
+
+ // GL_ARB_shader_ballot
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable);
+ BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable);
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic);
+ }
+ break;
+
+ case EShLangTaskNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ symbolTable.setVariableExtensions("gl_TaskCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_TaskCountNV", EbvTaskCountNV, symbolTable);
+ BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable);
+ BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable);
+ BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable);
+ BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable);
+ BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable);
+
+ symbolTable.setVariableExtensions("gl_MaxTaskWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+
+ symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader);
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ // GL_EXT_device_group
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+
+ // GL_ARB_shader_draw_parameters
+ symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters);
+ BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable);
+ if (version >= 460) {
+ BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable);
+ }
+
+ // GL_ARB_shader_ballot
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable);
+ BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable);
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic);
+ }
+ break;
+#endif
+
+ default:
+ assert(false && "Language not supported");
+ break;
+ }
+
+ //
+ // Next, identify which built-ins have a mapping to an operator.
+ // If PureOperatorBuiltins is false, those that are not identified as such are
+ // expected to be resolved through a library of functions, versus as
+ // operations.
+ //
+ symbolTable.relateToOperator("not", EOpVectorLogicalNot);
+
+ symbolTable.relateToOperator("matrixCompMult", EOpMul);
+ // 120 and 150 are correct for both ES and desktop
+ if (version >= 120) {
+ symbolTable.relateToOperator("outerProduct", EOpOuterProduct);
+ symbolTable.relateToOperator("transpose", EOpTranspose);
+ if (version >= 150) {
+ symbolTable.relateToOperator("determinant", EOpDeterminant);
+ symbolTable.relateToOperator("inverse", EOpMatrixInverse);
+ }
+ }
+
+ symbolTable.relateToOperator("mod", EOpMod);
+ symbolTable.relateToOperator("modf", EOpModf);
+
+ symbolTable.relateToOperator("equal", EOpVectorEqual);
+ symbolTable.relateToOperator("notEqual", EOpVectorNotEqual);
+ symbolTable.relateToOperator("lessThan", EOpLessThan);
+ symbolTable.relateToOperator("greaterThan", EOpGreaterThan);
+ symbolTable.relateToOperator("lessThanEqual", EOpLessThanEqual);
+ symbolTable.relateToOperator("greaterThanEqual", EOpGreaterThanEqual);
+
+ symbolTable.relateToOperator("radians", EOpRadians);
+ symbolTable.relateToOperator("degrees", EOpDegrees);
+ symbolTable.relateToOperator("sin", EOpSin);
+ symbolTable.relateToOperator("cos", EOpCos);
+ symbolTable.relateToOperator("tan", EOpTan);
+ symbolTable.relateToOperator("asin", EOpAsin);
+ symbolTable.relateToOperator("acos", EOpAcos);
+ symbolTable.relateToOperator("atan", EOpAtan);
+ symbolTable.relateToOperator("sinh", EOpSinh);
+ symbolTable.relateToOperator("cosh", EOpCosh);
+ symbolTable.relateToOperator("tanh", EOpTanh);
+ symbolTable.relateToOperator("asinh", EOpAsinh);
+ symbolTable.relateToOperator("acosh", EOpAcosh);
+ symbolTable.relateToOperator("atanh", EOpAtanh);
+
+ symbolTable.relateToOperator("pow", EOpPow);
+ symbolTable.relateToOperator("exp2", EOpExp2);
+ symbolTable.relateToOperator("log", EOpLog);
+ symbolTable.relateToOperator("exp", EOpExp);
+ symbolTable.relateToOperator("log2", EOpLog2);
+ symbolTable.relateToOperator("sqrt", EOpSqrt);
+ symbolTable.relateToOperator("inversesqrt", EOpInverseSqrt);
+
+ symbolTable.relateToOperator("abs", EOpAbs);
+ symbolTable.relateToOperator("sign", EOpSign);
+ symbolTable.relateToOperator("floor", EOpFloor);
+ symbolTable.relateToOperator("trunc", EOpTrunc);
+ symbolTable.relateToOperator("round", EOpRound);
+ symbolTable.relateToOperator("roundEven", EOpRoundEven);
+ symbolTable.relateToOperator("ceil", EOpCeil);
+ symbolTable.relateToOperator("fract", EOpFract);
+ symbolTable.relateToOperator("min", EOpMin);
+ symbolTable.relateToOperator("max", EOpMax);
+ symbolTable.relateToOperator("clamp", EOpClamp);
+ symbolTable.relateToOperator("mix", EOpMix);
+ symbolTable.relateToOperator("step", EOpStep);
+ symbolTable.relateToOperator("smoothstep", EOpSmoothStep);
+
+ symbolTable.relateToOperator("isnan", EOpIsNan);
+ symbolTable.relateToOperator("isinf", EOpIsInf);
+
+ symbolTable.relateToOperator("floatBitsToInt", EOpFloatBitsToInt);
+ symbolTable.relateToOperator("floatBitsToUint", EOpFloatBitsToUint);
+ symbolTable.relateToOperator("intBitsToFloat", EOpIntBitsToFloat);
+ symbolTable.relateToOperator("uintBitsToFloat", EOpUintBitsToFloat);
+ symbolTable.relateToOperator("doubleBitsToInt64", EOpDoubleBitsToInt64);
+ symbolTable.relateToOperator("doubleBitsToUint64", EOpDoubleBitsToUint64);
+ symbolTable.relateToOperator("int64BitsToDouble", EOpInt64BitsToDouble);
+ symbolTable.relateToOperator("uint64BitsToDouble", EOpUint64BitsToDouble);
+ symbolTable.relateToOperator("halfBitsToInt16", EOpFloat16BitsToInt16);
+ symbolTable.relateToOperator("halfBitsToUint16", EOpFloat16BitsToUint16);
+ symbolTable.relateToOperator("float16BitsToInt16", EOpFloat16BitsToInt16);
+ symbolTable.relateToOperator("float16BitsToUint16", EOpFloat16BitsToUint16);
+ symbolTable.relateToOperator("int16BitsToFloat16", EOpInt16BitsToFloat16);
+ symbolTable.relateToOperator("uint16BitsToFloat16", EOpUint16BitsToFloat16);
+
+ symbolTable.relateToOperator("int16BitsToHalf", EOpInt16BitsToFloat16);
+ symbolTable.relateToOperator("uint16BitsToHalf", EOpUint16BitsToFloat16);
+
+ symbolTable.relateToOperator("packSnorm2x16", EOpPackSnorm2x16);
+ symbolTable.relateToOperator("unpackSnorm2x16", EOpUnpackSnorm2x16);
+ symbolTable.relateToOperator("packUnorm2x16", EOpPackUnorm2x16);
+ symbolTable.relateToOperator("unpackUnorm2x16", EOpUnpackUnorm2x16);
+
+ symbolTable.relateToOperator("packSnorm4x8", EOpPackSnorm4x8);
+ symbolTable.relateToOperator("unpackSnorm4x8", EOpUnpackSnorm4x8);
+ symbolTable.relateToOperator("packUnorm4x8", EOpPackUnorm4x8);
+ symbolTable.relateToOperator("unpackUnorm4x8", EOpUnpackUnorm4x8);
+
+ symbolTable.relateToOperator("packDouble2x32", EOpPackDouble2x32);
+ symbolTable.relateToOperator("unpackDouble2x32", EOpUnpackDouble2x32);
+
+ symbolTable.relateToOperator("packHalf2x16", EOpPackHalf2x16);
+ symbolTable.relateToOperator("unpackHalf2x16", EOpUnpackHalf2x16);
+
+ symbolTable.relateToOperator("packInt2x32", EOpPackInt2x32);
+ symbolTable.relateToOperator("unpackInt2x32", EOpUnpackInt2x32);
+ symbolTable.relateToOperator("packUint2x32", EOpPackUint2x32);
+ symbolTable.relateToOperator("unpackUint2x32", EOpUnpackUint2x32);
+
+ symbolTable.relateToOperator("packInt2x16", EOpPackInt2x16);
+ symbolTable.relateToOperator("unpackInt2x16", EOpUnpackInt2x16);
+ symbolTable.relateToOperator("packUint2x16", EOpPackUint2x16);
+ symbolTable.relateToOperator("unpackUint2x16", EOpUnpackUint2x16);
+
+ symbolTable.relateToOperator("packInt4x16", EOpPackInt4x16);
+ symbolTable.relateToOperator("unpackInt4x16", EOpUnpackInt4x16);
+ symbolTable.relateToOperator("packUint4x16", EOpPackUint4x16);
+ symbolTable.relateToOperator("unpackUint4x16", EOpUnpackUint4x16);
+ symbolTable.relateToOperator("packFloat2x16", EOpPackFloat2x16);
+ symbolTable.relateToOperator("unpackFloat2x16", EOpUnpackFloat2x16);
+
+ symbolTable.relateToOperator("pack16", EOpPack16);
+ symbolTable.relateToOperator("pack32", EOpPack32);
+ symbolTable.relateToOperator("pack64", EOpPack64);
+
+ symbolTable.relateToOperator("unpack32", EOpUnpack32);
+ symbolTable.relateToOperator("unpack16", EOpUnpack16);
+ symbolTable.relateToOperator("unpack8", EOpUnpack8);
+
+ symbolTable.relateToOperator("length", EOpLength);
+ symbolTable.relateToOperator("distance", EOpDistance);
+ symbolTable.relateToOperator("dot", EOpDot);
+ symbolTable.relateToOperator("cross", EOpCross);
+ symbolTable.relateToOperator("normalize", EOpNormalize);
+ symbolTable.relateToOperator("faceforward", EOpFaceForward);
+ symbolTable.relateToOperator("reflect", EOpReflect);
+ symbolTable.relateToOperator("refract", EOpRefract);
+
+ symbolTable.relateToOperator("any", EOpAny);
+ symbolTable.relateToOperator("all", EOpAll);
+
+ symbolTable.relateToOperator("barrier", EOpBarrier);
+ symbolTable.relateToOperator("controlBarrier", EOpBarrier);
+ symbolTable.relateToOperator("memoryBarrier", EOpMemoryBarrier);
+ symbolTable.relateToOperator("memoryBarrierAtomicCounter", EOpMemoryBarrierAtomicCounter);
+ symbolTable.relateToOperator("memoryBarrierBuffer", EOpMemoryBarrierBuffer);
+ symbolTable.relateToOperator("memoryBarrierImage", EOpMemoryBarrierImage);
+
+ symbolTable.relateToOperator("atomicAdd", EOpAtomicAdd);
+ symbolTable.relateToOperator("atomicMin", EOpAtomicMin);
+ symbolTable.relateToOperator("atomicMax", EOpAtomicMax);
+ symbolTable.relateToOperator("atomicAnd", EOpAtomicAnd);
+ symbolTable.relateToOperator("atomicOr", EOpAtomicOr);
+ symbolTable.relateToOperator("atomicXor", EOpAtomicXor);
+ symbolTable.relateToOperator("atomicExchange", EOpAtomicExchange);
+ symbolTable.relateToOperator("atomicCompSwap", EOpAtomicCompSwap);
+ symbolTable.relateToOperator("atomicLoad", EOpAtomicLoad);
+ symbolTable.relateToOperator("atomicStore", EOpAtomicStore);
+
+ symbolTable.relateToOperator("atomicCounterIncrement", EOpAtomicCounterIncrement);
+ symbolTable.relateToOperator("atomicCounterDecrement", EOpAtomicCounterDecrement);
+ symbolTable.relateToOperator("atomicCounter", EOpAtomicCounter);
+
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("atomicCounterAdd", EOpAtomicCounterAdd);
+ symbolTable.relateToOperator("atomicCounterSubtract", EOpAtomicCounterSubtract);
+ symbolTable.relateToOperator("atomicCounterMin", EOpAtomicCounterMin);
+ symbolTable.relateToOperator("atomicCounterMax", EOpAtomicCounterMax);
+ symbolTable.relateToOperator("atomicCounterAnd", EOpAtomicCounterAnd);
+ symbolTable.relateToOperator("atomicCounterOr", EOpAtomicCounterOr);
+ symbolTable.relateToOperator("atomicCounterXor", EOpAtomicCounterXor);
+ symbolTable.relateToOperator("atomicCounterExchange", EOpAtomicCounterExchange);
+ symbolTable.relateToOperator("atomicCounterCompSwap", EOpAtomicCounterCompSwap);
+ }
+
+ symbolTable.relateToOperator("fma", EOpFma);
+ symbolTable.relateToOperator("frexp", EOpFrexp);
+ symbolTable.relateToOperator("ldexp", EOpLdexp);
+ symbolTable.relateToOperator("uaddCarry", EOpAddCarry);
+ symbolTable.relateToOperator("usubBorrow", EOpSubBorrow);
+ symbolTable.relateToOperator("umulExtended", EOpUMulExtended);
+ symbolTable.relateToOperator("imulExtended", EOpIMulExtended);
+ symbolTable.relateToOperator("bitfieldExtract", EOpBitfieldExtract);
+ symbolTable.relateToOperator("bitfieldInsert", EOpBitfieldInsert);
+ symbolTable.relateToOperator("bitfieldReverse", EOpBitFieldReverse);
+ symbolTable.relateToOperator("bitCount", EOpBitCount);
+ symbolTable.relateToOperator("findLSB", EOpFindLSB);
+ symbolTable.relateToOperator("findMSB", EOpFindMSB);
+
+ if (PureOperatorBuiltins) {
+ symbolTable.relateToOperator("imageSize", EOpImageQuerySize);
+ symbolTable.relateToOperator("imageSamples", EOpImageQuerySamples);
+ symbolTable.relateToOperator("imageLoad", EOpImageLoad);
+ symbolTable.relateToOperator("imageStore", EOpImageStore);
+ symbolTable.relateToOperator("imageAtomicAdd", EOpImageAtomicAdd);
+ symbolTable.relateToOperator("imageAtomicMin", EOpImageAtomicMin);
+ symbolTable.relateToOperator("imageAtomicMax", EOpImageAtomicMax);
+ symbolTable.relateToOperator("imageAtomicAnd", EOpImageAtomicAnd);
+ symbolTable.relateToOperator("imageAtomicOr", EOpImageAtomicOr);
+ symbolTable.relateToOperator("imageAtomicXor", EOpImageAtomicXor);
+ symbolTable.relateToOperator("imageAtomicExchange", EOpImageAtomicExchange);
+ symbolTable.relateToOperator("imageAtomicCompSwap", EOpImageAtomicCompSwap);
+ symbolTable.relateToOperator("imageAtomicLoad", EOpImageAtomicLoad);
+ symbolTable.relateToOperator("imageAtomicStore", EOpImageAtomicStore);
+
+ symbolTable.relateToOperator("subpassLoad", EOpSubpassLoad);
+ symbolTable.relateToOperator("subpassLoadMS", EOpSubpassLoadMS);
+
+ symbolTable.relateToOperator("textureSize", EOpTextureQuerySize);
+ symbolTable.relateToOperator("textureQueryLod", EOpTextureQueryLod);
+ symbolTable.relateToOperator("textureQueryLevels", EOpTextureQueryLevels);
+ symbolTable.relateToOperator("textureSamples", EOpTextureQuerySamples);
+ symbolTable.relateToOperator("texture", EOpTexture);
+ symbolTable.relateToOperator("textureProj", EOpTextureProj);
+ symbolTable.relateToOperator("textureLod", EOpTextureLod);
+ symbolTable.relateToOperator("textureOffset", EOpTextureOffset);
+ symbolTable.relateToOperator("texelFetch", EOpTextureFetch);
+ symbolTable.relateToOperator("texelFetchOffset", EOpTextureFetchOffset);
+ symbolTable.relateToOperator("textureProjOffset", EOpTextureProjOffset);
+ symbolTable.relateToOperator("textureLodOffset", EOpTextureLodOffset);
+ symbolTable.relateToOperator("textureProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("textureProjLodOffset", EOpTextureProjLodOffset);
+ symbolTable.relateToOperator("textureGrad", EOpTextureGrad);
+ symbolTable.relateToOperator("textureGradOffset", EOpTextureGradOffset);
+ symbolTable.relateToOperator("textureProjGrad", EOpTextureProjGrad);
+ symbolTable.relateToOperator("textureProjGradOffset", EOpTextureProjGradOffset);
+ symbolTable.relateToOperator("textureGather", EOpTextureGather);
+ symbolTable.relateToOperator("textureGatherOffset", EOpTextureGatherOffset);
+ symbolTable.relateToOperator("textureGatherOffsets", EOpTextureGatherOffsets);
+
+ symbolTable.relateToOperator("noise1", EOpNoise);
+ symbolTable.relateToOperator("noise2", EOpNoise);
+ symbolTable.relateToOperator("noise3", EOpNoise);
+ symbolTable.relateToOperator("noise4", EOpNoise);
+
+#ifdef NV_EXTENSIONS
+ symbolTable.relateToOperator("textureFootprintNV", EOpImageSampleFootprintNV);
+ symbolTable.relateToOperator("textureFootprintClampNV", EOpImageSampleFootprintClampNV);
+ symbolTable.relateToOperator("textureFootprintLodNV", EOpImageSampleFootprintLodNV);
+ symbolTable.relateToOperator("textureFootprintGradNV", EOpImageSampleFootprintGradNV);
+ symbolTable.relateToOperator("textureFootprintGradClampNV", EOpImageSampleFootprintGradClampNV);
+#endif
+
+ if (spvVersion.spv == 0 && (IncludeLegacy(version, profile, spvVersion) ||
+ (profile == EEsProfile && version == 100))) {
+ symbolTable.relateToOperator("ftransform", EOpFtransform);
+
+ symbolTable.relateToOperator("texture1D", EOpTexture);
+ symbolTable.relateToOperator("texture1DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture1DProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture1DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture1DLod", EOpTextureLod);
+ symbolTable.relateToOperator("texture1DProjLod", EOpTextureProjLod);
+
+ symbolTable.relateToOperator("texture2DRect", EOpTexture);
+ symbolTable.relateToOperator("texture2DRectProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture2DRectGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture2DRectProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("shadow2DRect", EOpTexture);
+ symbolTable.relateToOperator("shadow2DRectProj", EOpTextureProj);
+ symbolTable.relateToOperator("shadow2DRectGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("shadow2DRectProjGradARB", EOpTextureProjGrad);
+
+ symbolTable.relateToOperator("texture2D", EOpTexture);
+ symbolTable.relateToOperator("texture2DProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture2DGradEXT", EOpTextureGrad);
+ symbolTable.relateToOperator("texture2DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture2DProjGradEXT", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture2DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture2DLod", EOpTextureLod);
+ symbolTable.relateToOperator("texture2DLodEXT", EOpTextureLod);
+ symbolTable.relateToOperator("texture2DProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("texture2DProjLodEXT", EOpTextureProjLod);
+
+ symbolTable.relateToOperator("texture3D", EOpTexture);
+ symbolTable.relateToOperator("texture3DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture3DProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture3DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture3DLod", EOpTextureLod);
+ symbolTable.relateToOperator("texture3DProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("textureCube", EOpTexture);
+ symbolTable.relateToOperator("textureCubeGradEXT", EOpTextureGrad);
+ symbolTable.relateToOperator("textureCubeGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("textureCubeLod", EOpTextureLod);
+ symbolTable.relateToOperator("textureCubeLodEXT", EOpTextureLod);
+ symbolTable.relateToOperator("shadow1D", EOpTexture);
+ symbolTable.relateToOperator("shadow1DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("shadow2D", EOpTexture);
+ symbolTable.relateToOperator("shadow2DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("shadow1DProj", EOpTextureProj);
+ symbolTable.relateToOperator("shadow2DProj", EOpTextureProj);
+ symbolTable.relateToOperator("shadow1DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("shadow2DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("shadow1DLod", EOpTextureLod);
+ symbolTable.relateToOperator("shadow2DLod", EOpTextureLod);
+ symbolTable.relateToOperator("shadow1DProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("shadow2DProjLod", EOpTextureProjLod);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.relateToOperator("sparseTextureARB", EOpSparseTexture);
+ symbolTable.relateToOperator("sparseTextureLodARB", EOpSparseTextureLod);
+ symbolTable.relateToOperator("sparseTextureOffsetARB", EOpSparseTextureOffset);
+ symbolTable.relateToOperator("sparseTexelFetchARB", EOpSparseTextureFetch);
+ symbolTable.relateToOperator("sparseTexelFetchOffsetARB", EOpSparseTextureFetchOffset);
+ symbolTable.relateToOperator("sparseTextureLodOffsetARB", EOpSparseTextureLodOffset);
+ symbolTable.relateToOperator("sparseTextureGradARB", EOpSparseTextureGrad);
+ symbolTable.relateToOperator("sparseTextureGradOffsetARB", EOpSparseTextureGradOffset);
+ symbolTable.relateToOperator("sparseTextureGatherARB", EOpSparseTextureGather);
+ symbolTable.relateToOperator("sparseTextureGatherOffsetARB", EOpSparseTextureGatherOffset);
+ symbolTable.relateToOperator("sparseTextureGatherOffsetsARB", EOpSparseTextureGatherOffsets);
+ symbolTable.relateToOperator("sparseImageLoadARB", EOpSparseImageLoad);
+ symbolTable.relateToOperator("sparseTexelsResidentARB", EOpSparseTexelsResident);
+
+ symbolTable.relateToOperator("sparseTextureClampARB", EOpSparseTextureClamp);
+ symbolTable.relateToOperator("sparseTextureOffsetClampARB", EOpSparseTextureOffsetClamp);
+ symbolTable.relateToOperator("sparseTextureGradClampARB", EOpSparseTextureGradClamp);
+ symbolTable.relateToOperator("sparseTextureGradOffsetClampARB", EOpSparseTextureGradOffsetClamp);
+ symbolTable.relateToOperator("textureClampARB", EOpTextureClamp);
+ symbolTable.relateToOperator("textureOffsetClampARB", EOpTextureOffsetClamp);
+ symbolTable.relateToOperator("textureGradClampARB", EOpTextureGradClamp);
+ symbolTable.relateToOperator("textureGradOffsetClampARB", EOpTextureGradOffsetClamp);
+
+ symbolTable.relateToOperator("ballotARB", EOpBallot);
+ symbolTable.relateToOperator("readInvocationARB", EOpReadInvocation);
+ symbolTable.relateToOperator("readFirstInvocationARB", EOpReadFirstInvocation);
+
+ if (version >= 430) {
+ symbolTable.relateToOperator("anyInvocationARB", EOpAnyInvocation);
+ symbolTable.relateToOperator("allInvocationsARB", EOpAllInvocations);
+ symbolTable.relateToOperator("allInvocationsEqualARB", EOpAllInvocationsEqual);
+ }
+ if (version >= 460) {
+ symbolTable.relateToOperator("anyInvocation", EOpAnyInvocation);
+ symbolTable.relateToOperator("allInvocations", EOpAllInvocations);
+ symbolTable.relateToOperator("allInvocationsEqual", EOpAllInvocationsEqual);
+ }
+#ifdef AMD_EXTENSIONS
+ symbolTable.relateToOperator("minInvocationsAMD", EOpMinInvocations);
+ symbolTable.relateToOperator("maxInvocationsAMD", EOpMaxInvocations);
+ symbolTable.relateToOperator("addInvocationsAMD", EOpAddInvocations);
+ symbolTable.relateToOperator("minInvocationsNonUniformAMD", EOpMinInvocationsNonUniform);
+ symbolTable.relateToOperator("maxInvocationsNonUniformAMD", EOpMaxInvocationsNonUniform);
+ symbolTable.relateToOperator("addInvocationsNonUniformAMD", EOpAddInvocationsNonUniform);
+ symbolTable.relateToOperator("minInvocationsInclusiveScanAMD", EOpMinInvocationsInclusiveScan);
+ symbolTable.relateToOperator("maxInvocationsInclusiveScanAMD", EOpMaxInvocationsInclusiveScan);
+ symbolTable.relateToOperator("addInvocationsInclusiveScanAMD", EOpAddInvocationsInclusiveScan);
+ symbolTable.relateToOperator("minInvocationsInclusiveScanNonUniformAMD", EOpMinInvocationsInclusiveScanNonUniform);
+ symbolTable.relateToOperator("maxInvocationsInclusiveScanNonUniformAMD", EOpMaxInvocationsInclusiveScanNonUniform);
+ symbolTable.relateToOperator("addInvocationsInclusiveScanNonUniformAMD", EOpAddInvocationsInclusiveScanNonUniform);
+ symbolTable.relateToOperator("minInvocationsExclusiveScanAMD", EOpMinInvocationsExclusiveScan);
+ symbolTable.relateToOperator("maxInvocationsExclusiveScanAMD", EOpMaxInvocationsExclusiveScan);
+ symbolTable.relateToOperator("addInvocationsExclusiveScanAMD", EOpAddInvocationsExclusiveScan);
+ symbolTable.relateToOperator("minInvocationsExclusiveScanNonUniformAMD", EOpMinInvocationsExclusiveScanNonUniform);
+ symbolTable.relateToOperator("maxInvocationsExclusiveScanNonUniformAMD", EOpMaxInvocationsExclusiveScanNonUniform);
+ symbolTable.relateToOperator("addInvocationsExclusiveScanNonUniformAMD", EOpAddInvocationsExclusiveScanNonUniform);
+ symbolTable.relateToOperator("swizzleInvocationsAMD", EOpSwizzleInvocations);
+ symbolTable.relateToOperator("swizzleInvocationsMaskedAMD", EOpSwizzleInvocationsMasked);
+ symbolTable.relateToOperator("writeInvocationAMD", EOpWriteInvocation);
+ symbolTable.relateToOperator("mbcntAMD", EOpMbcnt);
+
+ symbolTable.relateToOperator("min3", EOpMin3);
+ symbolTable.relateToOperator("max3", EOpMax3);
+ symbolTable.relateToOperator("mid3", EOpMid3);
+
+ symbolTable.relateToOperator("cubeFaceIndexAMD", EOpCubeFaceIndex);
+ symbolTable.relateToOperator("cubeFaceCoordAMD", EOpCubeFaceCoord);
+ symbolTable.relateToOperator("timeAMD", EOpTime);
+
+ symbolTable.relateToOperator("textureGatherLodAMD", EOpTextureGatherLod);
+ symbolTable.relateToOperator("textureGatherLodOffsetAMD", EOpTextureGatherLodOffset);
+ symbolTable.relateToOperator("textureGatherLodOffsetsAMD", EOpTextureGatherLodOffsets);
+ symbolTable.relateToOperator("sparseTextureGatherLodAMD", EOpSparseTextureGatherLod);
+ symbolTable.relateToOperator("sparseTextureGatherLodOffsetAMD", EOpSparseTextureGatherLodOffset);
+ symbolTable.relateToOperator("sparseTextureGatherLodOffsetsAMD", EOpSparseTextureGatherLodOffsets);
+
+ symbolTable.relateToOperator("imageLoadLodAMD", EOpImageLoadLod);
+ symbolTable.relateToOperator("imageStoreLodAMD", EOpImageStoreLod);
+ symbolTable.relateToOperator("sparseImageLoadLodAMD", EOpSparseImageLoadLod);
+
+ symbolTable.relateToOperator("fragmentMaskFetchAMD", EOpFragmentMaskFetch);
+ symbolTable.relateToOperator("fragmentFetchAMD", EOpFragmentFetch);
+#endif
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.relateToOperator("subgroupBarrier", EOpSubgroupBarrier);
+ symbolTable.relateToOperator("subgroupMemoryBarrier", EOpSubgroupMemoryBarrier);
+ symbolTable.relateToOperator("subgroupMemoryBarrierBuffer", EOpSubgroupMemoryBarrierBuffer);
+ symbolTable.relateToOperator("subgroupMemoryBarrierImage", EOpSubgroupMemoryBarrierImage);
+ symbolTable.relateToOperator("subgroupElect", EOpSubgroupElect);
+ symbolTable.relateToOperator("subgroupAll", EOpSubgroupAll);
+ symbolTable.relateToOperator("subgroupAny", EOpSubgroupAny);
+ symbolTable.relateToOperator("subgroupAllEqual", EOpSubgroupAllEqual);
+ symbolTable.relateToOperator("subgroupBroadcast", EOpSubgroupBroadcast);
+ symbolTable.relateToOperator("subgroupBroadcastFirst", EOpSubgroupBroadcastFirst);
+ symbolTable.relateToOperator("subgroupBallot", EOpSubgroupBallot);
+ symbolTable.relateToOperator("subgroupInverseBallot", EOpSubgroupInverseBallot);
+ symbolTable.relateToOperator("subgroupBallotBitExtract", EOpSubgroupBallotBitExtract);
+ symbolTable.relateToOperator("subgroupBallotBitCount", EOpSubgroupBallotBitCount);
+ symbolTable.relateToOperator("subgroupBallotInclusiveBitCount", EOpSubgroupBallotInclusiveBitCount);
+ symbolTable.relateToOperator("subgroupBallotExclusiveBitCount", EOpSubgroupBallotExclusiveBitCount);
+ symbolTable.relateToOperator("subgroupBallotFindLSB", EOpSubgroupBallotFindLSB);
+ symbolTable.relateToOperator("subgroupBallotFindMSB", EOpSubgroupBallotFindMSB);
+ symbolTable.relateToOperator("subgroupShuffle", EOpSubgroupShuffle);
+ symbolTable.relateToOperator("subgroupShuffleXor", EOpSubgroupShuffleXor);
+ symbolTable.relateToOperator("subgroupShuffleUp", EOpSubgroupShuffleUp);
+ symbolTable.relateToOperator("subgroupShuffleDown", EOpSubgroupShuffleDown);
+ symbolTable.relateToOperator("subgroupAdd", EOpSubgroupAdd);
+ symbolTable.relateToOperator("subgroupMul", EOpSubgroupMul);
+ symbolTable.relateToOperator("subgroupMin", EOpSubgroupMin);
+ symbolTable.relateToOperator("subgroupMax", EOpSubgroupMax);
+ symbolTable.relateToOperator("subgroupAnd", EOpSubgroupAnd);
+ symbolTable.relateToOperator("subgroupOr", EOpSubgroupOr);
+ symbolTable.relateToOperator("subgroupXor", EOpSubgroupXor);
+ symbolTable.relateToOperator("subgroupInclusiveAdd", EOpSubgroupInclusiveAdd);
+ symbolTable.relateToOperator("subgroupInclusiveMul", EOpSubgroupInclusiveMul);
+ symbolTable.relateToOperator("subgroupInclusiveMin", EOpSubgroupInclusiveMin);
+ symbolTable.relateToOperator("subgroupInclusiveMax", EOpSubgroupInclusiveMax);
+ symbolTable.relateToOperator("subgroupInclusiveAnd", EOpSubgroupInclusiveAnd);
+ symbolTable.relateToOperator("subgroupInclusiveOr", EOpSubgroupInclusiveOr);
+ symbolTable.relateToOperator("subgroupInclusiveXor", EOpSubgroupInclusiveXor);
+ symbolTable.relateToOperator("subgroupExclusiveAdd", EOpSubgroupExclusiveAdd);
+ symbolTable.relateToOperator("subgroupExclusiveMul", EOpSubgroupExclusiveMul);
+ symbolTable.relateToOperator("subgroupExclusiveMin", EOpSubgroupExclusiveMin);
+ symbolTable.relateToOperator("subgroupExclusiveMax", EOpSubgroupExclusiveMax);
+ symbolTable.relateToOperator("subgroupExclusiveAnd", EOpSubgroupExclusiveAnd);
+ symbolTable.relateToOperator("subgroupExclusiveOr", EOpSubgroupExclusiveOr);
+ symbolTable.relateToOperator("subgroupExclusiveXor", EOpSubgroupExclusiveXor);
+ symbolTable.relateToOperator("subgroupClusteredAdd", EOpSubgroupClusteredAdd);
+ symbolTable.relateToOperator("subgroupClusteredMul", EOpSubgroupClusteredMul);
+ symbolTable.relateToOperator("subgroupClusteredMin", EOpSubgroupClusteredMin);
+ symbolTable.relateToOperator("subgroupClusteredMax", EOpSubgroupClusteredMax);
+ symbolTable.relateToOperator("subgroupClusteredAnd", EOpSubgroupClusteredAnd);
+ symbolTable.relateToOperator("subgroupClusteredOr", EOpSubgroupClusteredOr);
+ symbolTable.relateToOperator("subgroupClusteredXor", EOpSubgroupClusteredXor);
+ symbolTable.relateToOperator("subgroupQuadBroadcast", EOpSubgroupQuadBroadcast);
+ symbolTable.relateToOperator("subgroupQuadSwapHorizontal", EOpSubgroupQuadSwapHorizontal);
+ symbolTable.relateToOperator("subgroupQuadSwapVertical", EOpSubgroupQuadSwapVertical);
+ symbolTable.relateToOperator("subgroupQuadSwapDiagonal", EOpSubgroupQuadSwapDiagonal);
+
+#ifdef NV_EXTENSIONS
+ symbolTable.relateToOperator("subgroupPartitionNV", EOpSubgroupPartition);
+ symbolTable.relateToOperator("subgroupPartitionedAddNV", EOpSubgroupPartitionedAdd);
+ symbolTable.relateToOperator("subgroupPartitionedMulNV", EOpSubgroupPartitionedMul);
+ symbolTable.relateToOperator("subgroupPartitionedMinNV", EOpSubgroupPartitionedMin);
+ symbolTable.relateToOperator("subgroupPartitionedMaxNV", EOpSubgroupPartitionedMax);
+ symbolTable.relateToOperator("subgroupPartitionedAndNV", EOpSubgroupPartitionedAnd);
+ symbolTable.relateToOperator("subgroupPartitionedOrNV", EOpSubgroupPartitionedOr);
+ symbolTable.relateToOperator("subgroupPartitionedXorNV", EOpSubgroupPartitionedXor);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveAddNV", EOpSubgroupPartitionedInclusiveAdd);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveMulNV", EOpSubgroupPartitionedInclusiveMul);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveMinNV", EOpSubgroupPartitionedInclusiveMin);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveMaxNV", EOpSubgroupPartitionedInclusiveMax);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveAndNV", EOpSubgroupPartitionedInclusiveAnd);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveOrNV", EOpSubgroupPartitionedInclusiveOr);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveXorNV", EOpSubgroupPartitionedInclusiveXor);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveAddNV", EOpSubgroupPartitionedExclusiveAdd);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveMulNV", EOpSubgroupPartitionedExclusiveMul);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveMinNV", EOpSubgroupPartitionedExclusiveMin);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveMaxNV", EOpSubgroupPartitionedExclusiveMax);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveAndNV", EOpSubgroupPartitionedExclusiveAnd);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveOrNV", EOpSubgroupPartitionedExclusiveOr);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveXorNV", EOpSubgroupPartitionedExclusiveXor);
+#endif
+ }
+
+ if (profile == EEsProfile) {
+ symbolTable.relateToOperator("shadow2DEXT", EOpTexture);
+ symbolTable.relateToOperator("shadow2DProjEXT", EOpTextureProj);
+ }
+ }
+
+ switch(language) {
+ case EShLangVertex:
+ break;
+
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ break;
+
+ case EShLangGeometry:
+ symbolTable.relateToOperator("EmitStreamVertex", EOpEmitStreamVertex);
+ symbolTable.relateToOperator("EndStreamPrimitive", EOpEndStreamPrimitive);
+ symbolTable.relateToOperator("EmitVertex", EOpEmitVertex);
+ symbolTable.relateToOperator("EndPrimitive", EOpEndPrimitive);
+ break;
+
+ case EShLangFragment:
+ symbolTable.relateToOperator("dFdx", EOpDPdx);
+ symbolTable.relateToOperator("dFdy", EOpDPdy);
+ symbolTable.relateToOperator("fwidth", EOpFwidth);
+ if (profile != EEsProfile && version >= 400) {
+ symbolTable.relateToOperator("dFdxFine", EOpDPdxFine);
+ symbolTable.relateToOperator("dFdyFine", EOpDPdyFine);
+ symbolTable.relateToOperator("fwidthFine", EOpFwidthFine);
+ symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse);
+ symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse);
+ symbolTable.relateToOperator("fwidthCoarse", EOpFwidthCoarse);
+ }
+ symbolTable.relateToOperator("interpolateAtCentroid", EOpInterpolateAtCentroid);
+ symbolTable.relateToOperator("interpolateAtSample", EOpInterpolateAtSample);
+ symbolTable.relateToOperator("interpolateAtOffset", EOpInterpolateAtOffset);
+
+#ifdef AMD_EXTENSIONS
+ if (profile != EEsProfile)
+ symbolTable.relateToOperator("interpolateAtVertexAMD", EOpInterpolateAtVertex);
+#endif
+ break;
+
+ case EShLangCompute:
+ symbolTable.relateToOperator("memoryBarrierShared", EOpMemoryBarrierShared);
+ symbolTable.relateToOperator("groupMemoryBarrier", EOpGroupMemoryBarrier);
+ symbolTable.relateToOperator("subgroupMemoryBarrierShared", EOpSubgroupMemoryBarrierShared);
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) {
+ symbolTable.relateToOperator("dFdx", EOpDPdx);
+ symbolTable.relateToOperator("dFdy", EOpDPdy);
+ symbolTable.relateToOperator("fwidth", EOpFwidth);
+ symbolTable.relateToOperator("dFdxFine", EOpDPdxFine);
+ symbolTable.relateToOperator("dFdyFine", EOpDPdyFine);
+ symbolTable.relateToOperator("fwidthFine", EOpFwidthFine);
+ symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse);
+ symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse);
+ symbolTable.relateToOperator("fwidthCoarse",EOpFwidthCoarse);
+ }
+#endif
+ symbolTable.relateToOperator("coopMatLoadNV", EOpCooperativeMatrixLoad);
+ symbolTable.relateToOperator("coopMatStoreNV", EOpCooperativeMatrixStore);
+ symbolTable.relateToOperator("coopMatMulAddNV", EOpCooperativeMatrixMulAdd);
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("traceNV", EOpTraceNV);
+ symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV);
+ }
+ break;
+ case EShLangIntersectNV:
+ if (profile != EEsProfile && version >= 460)
+ symbolTable.relateToOperator("reportIntersectionNV", EOpReportIntersectionNV);
+ break;
+ case EShLangAnyHitNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersectionNV);
+ symbolTable.relateToOperator("terminateRayNV", EOpTerminateRayNV);
+ }
+ break;
+ case EShLangCallableNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV);
+ }
+ break;
+ case EShLangMeshNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ symbolTable.relateToOperator("writePackedPrimitiveIndices4x8NV", EOpWritePackedPrimitiveIndices4x8NV);
+ }
+ // fall through
+ case EShLangTaskNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ symbolTable.relateToOperator("memoryBarrierShared", EOpMemoryBarrierShared);
+ symbolTable.relateToOperator("groupMemoryBarrier", EOpGroupMemoryBarrier);
+ }
+ break;
+#endif
+
+ default:
+ assert(false && "Language not supported");
+ }
+}
+
+//
+// Add context-dependent (resource-specific) built-ins not handled by the above. These
+// would be ones that need to be programmatically added because they cannot
+// be added by simple text strings. For these, also
+// 1) Map built-in functions to operators, for those that will turn into an operation node
+// instead of remaining a function call.
+// 2) Tag extension-related symbols added to their base version with their extensions, so
+// that if an early version has the extension turned off, there is an error reported on use.
+//
+void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources)
+{
+ if (profile != EEsProfile && version >= 430 && version < 440) {
+ symbolTable.setVariableExtensions("gl_MaxTransformFeedbackBuffers", 1, &E_GL_ARB_enhanced_layouts);
+ symbolTable.setVariableExtensions("gl_MaxTransformFeedbackInterleavedComponents", 1, &E_GL_ARB_enhanced_layouts);
+ }
+ if (profile != EEsProfile && version >= 130 && version < 420) {
+ symbolTable.setVariableExtensions("gl_MinProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack);
+ symbolTable.setVariableExtensions("gl_MaxProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack);
+ }
+ if (profile != EEsProfile && version >= 150 && version < 410)
+ symbolTable.setVariableExtensions("gl_MaxViewports", 1, &E_GL_ARB_viewport_array);
+
+ switch(language) {
+ case EShLangFragment:
+ // Set up gl_FragData based on current array size.
+ if (version == 100 || IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && profile != EEsProfile && version < 420)) {
+ TPrecisionQualifier pq = profile == EEsProfile ? EpqMedium : EpqNone;
+ TType fragData(EbtFloat, EvqFragColor, pq, 4);
+ TArraySizes* arraySizes = new TArraySizes;
+ arraySizes->addInnerSize(resources.maxDrawBuffers);
+ fragData.transferArraySizes(arraySizes);
+ symbolTable.insert(*new TVariable(NewPoolTString("gl_FragData"), fragData));
+ SpecialQualifier("gl_FragData", EvqFragColor, EbvFragData, symbolTable);
+ }
+ break;
+
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ // Because of the context-dependent array size (gl_MaxPatchVertices),
+ // these variables were added later than the others and need to be mapped now.
+
+ // standard members
+ BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ // compatibility members
+ BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ // extension requirements
+ if (profile == EEsProfile) {
+ symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Initialize.h b/thirdparty/glslang/glslang/MachineIndependent/Initialize.h
new file mode 100644
index 0000000000..b5de324233
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Initialize.h
@@ -0,0 +1,110 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013-2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _INITIALIZE_INCLUDED_
+#define _INITIALIZE_INCLUDED_
+
+#include "../Include/ResourceLimits.h"
+#include "../Include/Common.h"
+#include "../Include/ShHandle.h"
+#include "SymbolTable.h"
+#include "Versions.h"
+
+namespace glslang {
+
+//
+// This is made to hold parseable strings for almost all the built-in
+// functions and variables for one specific combination of version
+// and profile. (Some still need to be added programmatically.)
+// This is a base class for language-specific derivations, which
+// can be used for language independent builtins.
+//
+// The strings are organized by
+// commonBuiltins: intersection of all stages' built-ins, processed just once
+// stageBuiltins[]: anything a stage needs that's not in commonBuiltins
+//
+class TBuiltInParseables {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ TBuiltInParseables();
+ virtual ~TBuiltInParseables();
+ virtual void initialize(int version, EProfile, const SpvVersion& spvVersion) = 0;
+ virtual void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage) = 0;
+ virtual const TString& getCommonString() const { return commonBuiltins; }
+ virtual const TString& getStageString(EShLanguage language) const { return stageBuiltins[language]; }
+
+ virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) = 0;
+ virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) = 0;
+
+protected:
+ TString commonBuiltins;
+ TString stageBuiltins[EShLangCount];
+};
+
+//
+// This is a GLSL specific derivation of TBuiltInParseables. To present a stable
+// interface and match other similar code, it is called TBuiltIns, rather
+// than TBuiltInParseablesGlsl.
+//
+class TBuiltIns : public TBuiltInParseables {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ TBuiltIns();
+ virtual ~TBuiltIns();
+ void initialize(int version, EProfile, const SpvVersion& spvVersion);
+ void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage);
+
+ void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable);
+ void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
+
+protected:
+ void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion);
+ void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile);
+ void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+ void addImageFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+ void addSamplingFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+ void addGatherFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+
+ // Helpers for making textual representations of the permutations
+ // of texturing/imaging functions.
+ const char* postfixes[5];
+ const char* prefixes[EbtNumTypes];
+ int dimMap[EsdNumDims];
+};
+
+} // end namespace glslang
+
+#endif // _INITIALIZE_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp b/thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp
new file mode 100644
index 0000000000..f46010b712
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp
@@ -0,0 +1,302 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (c) 2002-2010 The ANGLE Project Authors.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/intermediate.h"
+
+namespace glslang {
+
+//
+// Traverse the intermediate representation tree, and
+// call a node type specific function for each node.
+// Done recursively through the member function Traverse().
+// Node types can be skipped if their function to call is 0,
+// but their subtree will still be traversed.
+// Nodes with children can have their whole subtree skipped
+// if preVisit is turned on and the type specific function
+// returns false.
+//
+// preVisit, postVisit, and rightToLeft control what order
+// nodes are visited in.
+//
+
+//
+// Traversal functions for terminals are straightforward....
+//
+void TIntermMethod::traverse(TIntermTraverser*)
+{
+ // Tree should always resolve all methods as a non-method.
+}
+
+void TIntermSymbol::traverse(TIntermTraverser *it)
+{
+ it->visitSymbol(this);
+}
+
+void TIntermConstantUnion::traverse(TIntermTraverser *it)
+{
+ it->visitConstantUnion(this);
+}
+
+//
+// Traverse a binary node.
+//
+void TIntermBinary::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ //
+ // visit the node before children if pre-visiting.
+ //
+ if (it->preVisit)
+ visit = it->visitBinary(EvPreVisit, this);
+
+ //
+ // Visit the children, in the right order.
+ //
+ if (visit) {
+ it->incrementDepth(this);
+
+ if (it->rightToLeft) {
+ if (right)
+ right->traverse(it);
+
+ if (it->inVisit)
+ visit = it->visitBinary(EvInVisit, this);
+
+ if (visit && left)
+ left->traverse(it);
+ } else {
+ if (left)
+ left->traverse(it);
+
+ if (it->inVisit)
+ visit = it->visitBinary(EvInVisit, this);
+
+ if (visit && right)
+ right->traverse(it);
+ }
+
+ it->decrementDepth();
+ }
+
+ //
+ // Visit the node after the children, if requested and the traversal
+ // hasn't been canceled yet.
+ //
+ if (visit && it->postVisit)
+ it->visitBinary(EvPostVisit, this);
+}
+
+//
+// Traverse a unary node. Same comments in binary node apply here.
+//
+void TIntermUnary::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitUnary(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+ operand->traverse(it);
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitUnary(EvPostVisit, this);
+}
+
+//
+// Traverse an aggregate node. Same comments in binary node apply here.
+//
+void TIntermAggregate::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitAggregate(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+
+ if (it->rightToLeft) {
+ for (TIntermSequence::reverse_iterator sit = sequence.rbegin(); sit != sequence.rend(); sit++) {
+ (*sit)->traverse(it);
+
+ if (visit && it->inVisit) {
+ if (*sit != sequence.front())
+ visit = it->visitAggregate(EvInVisit, this);
+ }
+ }
+ } else {
+ for (TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++) {
+ (*sit)->traverse(it);
+
+ if (visit && it->inVisit) {
+ if (*sit != sequence.back())
+ visit = it->visitAggregate(EvInVisit, this);
+ }
+ }
+ }
+
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitAggregate(EvPostVisit, this);
+}
+
+//
+// Traverse a selection node. Same comments in binary node apply here.
+//
+void TIntermSelection::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitSelection(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+ if (it->rightToLeft) {
+ if (falseBlock)
+ falseBlock->traverse(it);
+ if (trueBlock)
+ trueBlock->traverse(it);
+ condition->traverse(it);
+ } else {
+ condition->traverse(it);
+ if (trueBlock)
+ trueBlock->traverse(it);
+ if (falseBlock)
+ falseBlock->traverse(it);
+ }
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitSelection(EvPostVisit, this);
+}
+
+//
+// Traverse a loop node. Same comments in binary node apply here.
+//
+void TIntermLoop::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitLoop(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+
+ if (it->rightToLeft) {
+ if (terminal)
+ terminal->traverse(it);
+
+ if (body)
+ body->traverse(it);
+
+ if (test)
+ test->traverse(it);
+ } else {
+ if (test)
+ test->traverse(it);
+
+ if (body)
+ body->traverse(it);
+
+ if (terminal)
+ terminal->traverse(it);
+ }
+
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitLoop(EvPostVisit, this);
+}
+
+//
+// Traverse a branch node. Same comments in binary node apply here.
+//
+void TIntermBranch::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitBranch(EvPreVisit, this);
+
+ if (visit && expression) {
+ it->incrementDepth(this);
+ expression->traverse(it);
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitBranch(EvPostVisit, this);
+}
+
+//
+// Traverse a switch node.
+//
+void TIntermSwitch::traverse(TIntermTraverser* it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitSwitch(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+ if (it->rightToLeft) {
+ body->traverse(it);
+ condition->traverse(it);
+ } else {
+ condition->traverse(it);
+ body->traverse(it);
+ }
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitSwitch(EvPostVisit, this);
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp b/thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp
new file mode 100644
index 0000000000..584d880501
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Intermediate.cpp
@@ -0,0 +1,4095 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Build the intermediate representation.
+//
+
+#include "localintermediate.h"
+#include "RemoveTree.h"
+#include "SymbolTable.h"
+#include "propagateNoContraction.h"
+
+#include <cfloat>
+#include <utility>
+#include <tuple>
+
+namespace glslang {
+
+////////////////////////////////////////////////////////////////////////////
+//
+// First set of functions are to help build the intermediate representation.
+// These functions are not member functions of the nodes.
+// They are called from parser productions.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+//
+// Add a terminal node for an identifier in an expression.
+//
+// Returns the added node.
+//
+
+TIntermSymbol* TIntermediate::addSymbol(int id, const TString& name, const TType& type, const TConstUnionArray& constArray,
+ TIntermTyped* constSubtree, const TSourceLoc& loc)
+{
+ TIntermSymbol* node = new TIntermSymbol(id, name, type);
+ node->setLoc(loc);
+ node->setConstArray(constArray);
+ node->setConstSubtree(constSubtree);
+
+ return node;
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol)
+{
+ return addSymbol(intermSymbol.getId(),
+ intermSymbol.getName(),
+ intermSymbol.getType(),
+ intermSymbol.getConstArray(),
+ intermSymbol.getConstSubtree(),
+ intermSymbol.getLoc());
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable)
+{
+ glslang::TSourceLoc loc; // just a null location
+ loc.init();
+
+ return addSymbol(variable, loc);
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc)
+{
+ return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc);
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc)
+{
+ TConstUnionArray unionArray; // just a null constant
+
+ return addSymbol(0, "", type, unionArray, nullptr, loc);
+}
+
+//
+// Connect two nodes with a new parent that does a binary operation on the nodes.
+//
+// Returns the added node.
+//
+// Returns nullptr if the working conversions and promotions could not be found.
+//
+TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
+{
+ // No operations work on blocks
+ if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
+ return nullptr;
+
+ // Convert "reference +/- int" and "reference - reference" to integer math
+ if ((op == EOpAdd || op == EOpSub) && extensionRequested(E_GL_EXT_buffer_reference2)) {
+
+ // No addressing math on struct with unsized array.
+ if ((left->getBasicType() == EbtReference && left->getType().getReferentType()->containsUnsizedArray()) ||
+ (right->getBasicType() == EbtReference && right->getType().getReferentType()->containsUnsizedArray())) {
+ return nullptr;
+ }
+
+ if (left->getBasicType() == EbtReference && isTypeInt(right->getBasicType())) {
+ const TType& referenceType = left->getType();
+ TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
+ left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
+
+ right = createConversion(EbtInt64, right);
+ right = addBinaryMath(EOpMul, right, size, loc);
+
+ TIntermTyped *node = addBinaryMath(op, left, right, loc);
+ node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
+ return node;
+ }
+
+ if (op == EOpAdd && right->getBasicType() == EbtReference && isTypeInt(left->getBasicType())) {
+ const TType& referenceType = right->getType();
+ TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
+ right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
+
+ left = createConversion(EbtInt64, left);
+ left = addBinaryMath(EOpMul, left, size, loc);
+
+ TIntermTyped *node = addBinaryMath(op, left, right, loc);
+ node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
+ return node;
+ }
+
+ if (op == EOpSub && left->getBasicType() == EbtReference && right->getBasicType() == EbtReference) {
+ TIntermConstantUnion* size = addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
+
+ left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
+ right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
+
+ left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64));
+ right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64));
+
+ left = addBinaryMath(EOpSub, left, right, loc);
+
+ TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc);
+ return node;
+ }
+
+ // No other math operators supported on references
+ if (left->getBasicType() == EbtReference || right->getBasicType() == EbtReference) {
+ return nullptr;
+ }
+ }
+
+ // Try converting the children's base types to compatible types.
+ auto children = addConversion(op, left, right);
+ left = std::get<0>(children);
+ right = std::get<1>(children);
+
+ if (left == nullptr || right == nullptr)
+ return nullptr;
+
+ // Convert the children's type shape to be compatible.
+ addBiShapeConversion(op, left, right);
+ if (left == nullptr || right == nullptr)
+ return nullptr;
+
+ //
+ // Need a new node holding things together. Make
+ // one and promote it to the right type.
+ //
+ TIntermBinary* node = addBinaryNode(op, left, right, loc);
+ if (! promote(node))
+ return nullptr;
+
+ node->updatePrecision();
+
+ //
+ // If they are both (non-specialization) constants, they must be folded.
+ // (Unless it's the sequence (comma) operator, but that's handled in addComma().)
+ //
+ TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion();
+ TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion();
+ if (leftTempConstant && rightTempConstant) {
+ TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant);
+ if (folded)
+ return folded;
+ }
+
+ // If can propagate spec-constantness and if the operation is an allowed
+ // specialization-constant operation, make a spec-constant.
+ if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node))
+ node->getWritableType().getQualifier().makeSpecConstant();
+
+ // If must propagate nonuniform, make a nonuniform.
+ if ((node->getLeft()->getQualifier().nonUniform || node->getRight()->getQualifier().nonUniform) &&
+ isNonuniformPropagating(node->getOp()))
+ node->getWritableType().getQualifier().nonUniform = true;
+
+ return node;
+}
+
+//
+// Low level: add binary node (no promotions or other argument modifications)
+//
+TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) const
+{
+ // build the node
+ TIntermBinary* node = new TIntermBinary(op);
+ if (loc.line == 0)
+ loc = left->getLoc();
+ node->setLoc(loc);
+ node->setLeft(left);
+ node->setRight(right);
+
+ return node;
+}
+
+//
+// like non-type form, but sets node's type.
+//
+TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc, const TType& type) const
+{
+ TIntermBinary* node = addBinaryNode(op, left, right, loc);
+ node->setType(type);
+ return node;
+}
+
+//
+// Low level: add unary node (no promotions or other argument modifications)
+//
+TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc) const
+{
+ TIntermUnary* node = new TIntermUnary(op);
+ if (loc.line == 0)
+ loc = child->getLoc();
+ node->setLoc(loc);
+ node->setOperand(child);
+
+ return node;
+}
+
+//
+// like non-type form, but sets node's type.
+//
+TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc, const TType& type) const
+{
+ TIntermUnary* node = addUnaryNode(op, child, loc);
+ node->setType(type);
+ return node;
+}
+
+//
+// Connect two nodes through an assignment.
+//
+// Returns the added node.
+//
+// Returns nullptr if the 'right' type could not be converted to match the 'left' type,
+// or the resulting operation cannot be properly promoted.
+//
+TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
+{
+ // No block assignment
+ if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
+ return nullptr;
+
+ // Convert "reference += int" to "reference = reference + int". We need this because the
+ // "reference + int" calculation involves a cast back to the original type, which makes it
+ // not an lvalue.
+ if ((op == EOpAddAssign || op == EOpSubAssign) && left->getBasicType() == EbtReference &&
+ extensionRequested(E_GL_EXT_buffer_reference2)) {
+
+ if (!(right->getType().isScalar() && right->getType().isIntegerDomain()))
+ return nullptr;
+
+ TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc);
+ if (!node)
+ return nullptr;
+
+ TIntermSymbol* symbol = left->getAsSymbolNode();
+ left = addSymbol(*symbol);
+
+ node = addAssign(EOpAssign, left, node, loc);
+ return node;
+ }
+
+ //
+ // Like adding binary math, except the conversion can only go
+ // from right to left.
+ //
+
+ // convert base types, nullptr return means not possible
+ right = addConversion(op, left->getType(), right);
+ if (right == nullptr)
+ return nullptr;
+
+ // convert shape
+ right = addUniShapeConversion(op, left->getType(), right);
+
+ // build the node
+ TIntermBinary* node = addBinaryNode(op, left, right, loc);
+
+ if (! promote(node))
+ return nullptr;
+
+ node->updatePrecision();
+
+ return node;
+}
+
+//
+// Connect two nodes through an index operator, where the left node is the base
+// of an array or struct, and the right node is a direct or indirect offset.
+//
+// Returns the added node.
+// The caller should set the type of the returned node.
+//
+TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc loc)
+{
+ // caller should set the type
+ return addBinaryNode(op, base, index, loc);
+}
+
+//
+// Add one node as the parent of another that it operates on.
+//
+// Returns the added node.
+//
+TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSourceLoc loc)
+{
+ if (child == 0)
+ return nullptr;
+
+ if (child->getType().getBasicType() == EbtBlock)
+ return nullptr;
+
+ switch (op) {
+ case EOpLogicalNot:
+ if (source == EShSourceHlsl) {
+ break; // HLSL can promote logical not
+ }
+
+ if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) {
+ return nullptr;
+ }
+ break;
+
+ case EOpPostIncrement:
+ case EOpPreIncrement:
+ case EOpPostDecrement:
+ case EOpPreDecrement:
+ case EOpNegative:
+ if (child->getType().getBasicType() == EbtStruct || child->getType().isArray())
+ return nullptr;
+ default: break; // some compilers want this
+ }
+
+ //
+ // Do we need to promote the operand?
+ //
+ TBasicType newType = EbtVoid;
+ switch (op) {
+ case EOpConstructInt8: newType = EbtInt8; break;
+ case EOpConstructUint8: newType = EbtUint8; break;
+ case EOpConstructInt16: newType = EbtInt16; break;
+ case EOpConstructUint16: newType = EbtUint16; break;
+ case EOpConstructInt: newType = EbtInt; break;
+ case EOpConstructUint: newType = EbtUint; break;
+ case EOpConstructInt64: newType = EbtInt64; break;
+ case EOpConstructUint64: newType = EbtUint64; break;
+ case EOpConstructBool: newType = EbtBool; break;
+ case EOpConstructFloat: newType = EbtFloat; break;
+ case EOpConstructDouble: newType = EbtDouble; break;
+ case EOpConstructFloat16: newType = EbtFloat16; break;
+ default: break; // some compilers want this
+ }
+
+ if (newType != EbtVoid) {
+ child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(),
+ child->getMatrixCols(),
+ child->getMatrixRows(),
+ child->isVector()),
+ child);
+ if (child == nullptr)
+ return nullptr;
+ }
+
+ //
+ // For constructors, we are now done, it was all in the conversion.
+ // TODO: but, did this bypass constant folding?
+ //
+ switch (op) {
+ case EOpConstructInt8:
+ case EOpConstructUint8:
+ case EOpConstructInt16:
+ case EOpConstructUint16:
+ case EOpConstructInt:
+ case EOpConstructUint:
+ case EOpConstructInt64:
+ case EOpConstructUint64:
+ case EOpConstructBool:
+ case EOpConstructFloat:
+ case EOpConstructDouble:
+ case EOpConstructFloat16:
+ return child;
+ default: break; // some compilers want this
+ }
+
+ //
+ // Make a new node for the operator.
+ //
+ TIntermUnary* node = addUnaryNode(op, child, loc);
+
+ if (! promote(node))
+ return nullptr;
+
+ node->updatePrecision();
+
+ // If it's a (non-specialization) constant, it must be folded.
+ if (node->getOperand()->getAsConstantUnion())
+ return node->getOperand()->getAsConstantUnion()->fold(op, node->getType());
+
+ // If it's a specialization constant, the result is too,
+ // if the operation is allowed for specialization constants.
+ if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node))
+ node->getWritableType().getQualifier().makeSpecConstant();
+
+ // If must propagate nonuniform, make a nonuniform.
+ if (node->getOperand()->getQualifier().nonUniform && isNonuniformPropagating(node->getOp()))
+ node->getWritableType().getQualifier().nonUniform = true;
+
+ return node;
+}
+
+TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary,
+ TIntermNode* childNode, const TType& returnType)
+{
+ if (unary) {
+ //
+ // Treat it like a unary operator.
+ // addUnaryMath() should get the type correct on its own;
+ // including constness (which would differ from the prototype).
+ //
+ TIntermTyped* child = childNode->getAsTyped();
+ if (child == nullptr)
+ return nullptr;
+
+ if (child->getAsConstantUnion()) {
+ TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType);
+ if (folded)
+ return folded;
+ }
+
+ return addUnaryNode(op, child, child->getLoc(), returnType);
+ } else {
+ // setAggregateOperater() calls fold() for constant folding
+ TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc);
+
+ return node;
+ }
+}
+
+//
+// This is the safe way to change the operator on an aggregate, as it
+// does lots of error checking and fixing. Especially for establishing
+// a function call's operation on its set of parameters. Sequences
+// of instructions are also aggregates, but they just directly set
+// their operator to EOpSequence.
+//
+// Returns an aggregate node, which could be the one passed in if
+// it was already an aggregate.
+//
+TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type, TSourceLoc loc)
+{
+ TIntermAggregate* aggNode;
+
+ //
+ // Make sure we have an aggregate. If not turn it into one.
+ //
+ if (node != nullptr) {
+ aggNode = node->getAsAggregate();
+ if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
+ //
+ // Make an aggregate containing this node.
+ //
+ aggNode = new TIntermAggregate();
+ aggNode->getSequence().push_back(node);
+ if (loc.line == 0)
+ loc = node->getLoc();
+ }
+ } else
+ aggNode = new TIntermAggregate();
+
+ //
+ // Set the operator.
+ //
+ aggNode->setOperator(op);
+ if (loc.line != 0)
+ aggNode->setLoc(loc);
+
+ aggNode->setType(type);
+
+ return fold(aggNode);
+}
+
+bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const
+{
+ //
+ // Does the base type even allow the operation?
+ //
+ switch (node->getBasicType()) {
+ case EbtVoid:
+ return false;
+ case EbtAtomicUint:
+ case EbtSampler:
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV:
+#endif
+ // opaque types can be passed to functions
+ if (op == EOpFunction)
+ break;
+
+ // HLSL can assign samplers directly (no constructor)
+ if (source == EShSourceHlsl && node->getBasicType() == EbtSampler)
+ break;
+
+ // samplers can get assigned via a sampler constructor
+ // (well, not yet, but code in the rest of this function is ready for it)
+ if (node->getBasicType() == EbtSampler && op == EOpAssign &&
+ node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
+ break;
+
+ // otherwise, opaque types can't even be operated on, let alone converted
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+// This is 'mechanism' here, it does any conversion told.
+// It is about basic type, not about shape.
+// The policy comes from the shader or the calling code.
+TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const
+{
+ //
+ // Add a new newNode for the conversion.
+ //
+ TIntermUnary* newNode = nullptr;
+
+ TOperator newOp = EOpNull;
+
+ // Certain explicit conversions are allowed conditionally
+ bool arithemeticInt8Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
+#ifdef AMD_EXTENSIONS
+ bool arithemeticInt16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
+ extensionRequested(E_GL_AMD_gpu_shader_int16);
+
+ bool arithemeticFloat16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
+ extensionRequested(E_GL_AMD_gpu_shader_half_float);
+#else
+ bool arithemeticInt16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
+
+ bool arithemeticFloat16Enabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
+#endif
+ bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 ||
+ convertTo == EbtInt16 || convertTo == EbtUint16 ||
+ convertTo == EbtInt || convertTo == EbtUint ||
+ convertTo == EbtInt64 || convertTo == EbtUint64);
+
+ bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 ||
+ node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 ||
+ node->getBasicType() == EbtInt || node->getBasicType() == EbtUint ||
+ node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64);
+
+ bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble);
+
+ bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 ||
+ node->getBasicType() == EbtFloat ||
+ node->getBasicType() == EbtDouble);
+
+ if (! arithemeticInt8Enabled) {
+ if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) ||
+ ((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes))
+ return nullptr;
+ }
+
+ if (! arithemeticInt16Enabled) {
+ if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) ||
+ ((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes))
+ return nullptr;
+ }
+
+ if (! arithemeticFloat16Enabled) {
+ if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) ||
+ (node->getBasicType() == EbtFloat16 && ! convertToFloatTypes))
+ return nullptr;
+ }
+
+ switch (convertTo) {
+ case EbtDouble:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToDouble; break;
+ case EbtUint8: newOp = EOpConvUint8ToDouble; break;
+ case EbtInt16: newOp = EOpConvInt16ToDouble; break;
+ case EbtUint16: newOp = EOpConvUint16ToDouble; break;
+ case EbtInt: newOp = EOpConvIntToDouble; break;
+ case EbtUint: newOp = EOpConvUintToDouble; break;
+ case EbtBool: newOp = EOpConvBoolToDouble; break;
+ case EbtFloat: newOp = EOpConvFloatToDouble; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToDouble; break;
+ case EbtInt64: newOp = EOpConvInt64ToDouble; break;
+ case EbtUint64: newOp = EOpConvUint64ToDouble; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtFloat:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToFloat; break;
+ case EbtUint8: newOp = EOpConvUint8ToFloat; break;
+ case EbtInt16: newOp = EOpConvInt16ToFloat; break;
+ case EbtUint16: newOp = EOpConvUint16ToFloat; break;
+ case EbtInt: newOp = EOpConvIntToFloat; break;
+ case EbtUint: newOp = EOpConvUintToFloat; break;
+ case EbtBool: newOp = EOpConvBoolToFloat; break;
+ case EbtDouble: newOp = EOpConvDoubleToFloat; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToFloat; break;
+ case EbtInt64: newOp = EOpConvInt64ToFloat; break;
+ case EbtUint64: newOp = EOpConvUint64ToFloat; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtFloat16:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToFloat16; break;
+ case EbtUint8: newOp = EOpConvUint8ToFloat16; break;
+ case EbtInt16: newOp = EOpConvInt16ToFloat16; break;
+ case EbtUint16: newOp = EOpConvUint16ToFloat16; break;
+ case EbtInt: newOp = EOpConvIntToFloat16; break;
+ case EbtUint: newOp = EOpConvUintToFloat16; break;
+ case EbtBool: newOp = EOpConvBoolToFloat16; break;
+ case EbtFloat: newOp = EOpConvFloatToFloat16; break;
+ case EbtDouble: newOp = EOpConvDoubleToFloat16; break;
+ case EbtInt64: newOp = EOpConvInt64ToFloat16; break;
+ case EbtUint64: newOp = EOpConvUint64ToFloat16; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtBool:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToBool; break;
+ case EbtUint8: newOp = EOpConvUint8ToBool; break;
+ case EbtInt16: newOp = EOpConvInt16ToBool; break;
+ case EbtUint16: newOp = EOpConvUint16ToBool; break;
+ case EbtInt: newOp = EOpConvIntToBool; break;
+ case EbtUint: newOp = EOpConvUintToBool; break;
+ case EbtFloat: newOp = EOpConvFloatToBool; break;
+ case EbtDouble: newOp = EOpConvDoubleToBool; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToBool; break;
+ case EbtInt64: newOp = EOpConvInt64ToBool; break;
+ case EbtUint64: newOp = EOpConvUint64ToBool; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtInt8:
+ switch (node->getBasicType()) {
+ case EbtUint8: newOp = EOpConvUint8ToInt8; break;
+ case EbtInt16: newOp = EOpConvInt16ToInt8; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt8; break;
+ case EbtInt: newOp = EOpConvIntToInt8; break;
+ case EbtUint: newOp = EOpConvUintToInt8; break;
+ case EbtInt64: newOp = EOpConvInt64ToInt8; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt8; break;
+ case EbtBool: newOp = EOpConvBoolToInt8; break;
+ case EbtFloat: newOp = EOpConvFloatToInt8; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt8; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt8; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint8:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint8; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint8; break;
+ case EbtUint16: newOp = EOpConvUint16ToUint8; break;
+ case EbtInt: newOp = EOpConvIntToUint8; break;
+ case EbtUint: newOp = EOpConvUintToUint8; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint8; break;
+ case EbtUint64: newOp = EOpConvUint64ToUint8; break;
+ case EbtBool: newOp = EOpConvBoolToUint8; break;
+ case EbtFloat: newOp = EOpConvFloatToUint8; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint8; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint8; break;
+ default:
+ return nullptr;
+ }
+ break;
+
+ case EbtInt16:
+ switch (node->getBasicType()) {
+ case EbtUint8: newOp = EOpConvUint8ToInt16; break;
+ case EbtInt8: newOp = EOpConvInt8ToInt16; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt16; break;
+ case EbtInt: newOp = EOpConvIntToInt16; break;
+ case EbtUint: newOp = EOpConvUintToInt16; break;
+ case EbtInt64: newOp = EOpConvInt64ToInt16; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt16; break;
+ case EbtBool: newOp = EOpConvBoolToInt16; break;
+ case EbtFloat: newOp = EOpConvFloatToInt16; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt16; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt16; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint16:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint16; break;
+ case EbtUint8: newOp = EOpConvUint8ToUint16; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint16; break;
+ case EbtInt: newOp = EOpConvIntToUint16; break;
+ case EbtUint: newOp = EOpConvUintToUint16; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint16; break;
+ case EbtUint64: newOp = EOpConvUint64ToUint16; break;
+ case EbtBool: newOp = EOpConvBoolToUint16; break;
+ case EbtFloat: newOp = EOpConvFloatToUint16; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint16; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint16; break;
+ default:
+ return nullptr;
+ }
+ break;
+
+ case EbtInt:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToInt; break;
+ case EbtUint8: newOp = EOpConvUint8ToInt; break;
+ case EbtInt16: newOp = EOpConvInt16ToInt; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt; break;
+ case EbtUint: newOp = EOpConvUintToInt; break;
+ case EbtBool: newOp = EOpConvBoolToInt; break;
+ case EbtFloat: newOp = EOpConvFloatToInt; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt; break;
+ case EbtInt64: newOp = EOpConvInt64ToInt; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint; break;
+ case EbtUint8: newOp = EOpConvUint8ToUint; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint; break;
+ case EbtUint16: newOp = EOpConvUint16ToUint; break;
+ case EbtInt: newOp = EOpConvIntToUint; break;
+ case EbtBool: newOp = EOpConvBoolToUint; break;
+ case EbtFloat: newOp = EOpConvFloatToUint; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint; break;
+ case EbtUint64: newOp = EOpConvUint64ToUint; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtInt64:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToInt64; break;
+ case EbtUint8: newOp = EOpConvUint8ToInt64; break;
+ case EbtInt16: newOp = EOpConvInt16ToInt64; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt64; break;
+ case EbtInt: newOp = EOpConvIntToInt64; break;
+ case EbtUint: newOp = EOpConvUintToInt64; break;
+ case EbtBool: newOp = EOpConvBoolToInt64; break;
+ case EbtFloat: newOp = EOpConvFloatToInt64; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt64; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt64; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt64; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint64:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint64; break;
+ case EbtUint8: newOp = EOpConvUint8ToUint64; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint64; break;
+ case EbtUint16: newOp = EOpConvUint16ToUint64; break;
+ case EbtInt: newOp = EOpConvIntToUint64; break;
+ case EbtUint: newOp = EOpConvUintToUint64; break;
+ case EbtBool: newOp = EOpConvBoolToUint64; break;
+ case EbtFloat: newOp = EOpConvFloatToUint64; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint64; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint64; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint64; break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+
+ TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows());
+ newNode = addUnaryNode(newOp, node, node->getLoc(), newType);
+
+ if (node->getAsConstantUnion()) {
+ TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType);
+ if (folded)
+ return folded;
+ }
+
+ // Propagate specialization-constant-ness, if allowed
+ if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode))
+ newNode->getWritableType().getQualifier().makeSpecConstant();
+
+ return newNode;
+}
+
+TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const
+{
+ return createConversion(convertTo, node);
+}
+
+// For converting a pair of operands to a binary operation to compatible
+// types with each other, relative to the operation in 'op'.
+// This does not cover assignment operations, which is asymmetric in that the
+// left type is not changeable.
+// See addConversion(op, type, node) for assignments and unary operation
+// conversions.
+//
+// Generally, this is focused on basic type conversion, not shape conversion.
+// See addShapeConversion() for shape conversions.
+//
+// Returns the converted pair of nodes.
+// Returns <nullptr, nullptr> when there is no conversion.
+std::tuple<TIntermTyped*, TIntermTyped*>
+TIntermediate::addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1)
+{
+ if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1))
+ return std::make_tuple(nullptr, nullptr);
+
+ if (node0->getType() != node1->getType()) {
+ // If differing structure, then no conversions.
+ if (node0->isStruct() || node1->isStruct())
+ return std::make_tuple(nullptr, nullptr);
+
+ // If differing arrays, then no conversions.
+ if (node0->getType().isArray() || node1->getType().isArray())
+ return std::make_tuple(nullptr, nullptr);
+
+ // No implicit conversions for operations involving cooperative matrices
+ if (node0->getType().isCoopMat() || node1->getType().isCoopMat())
+ return std::make_tuple(node0, node1);
+ }
+
+ auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes);
+
+ switch (op) {
+ //
+ // List all the binary ops that can implicitly convert one operand to the other's type;
+ // This implements the 'policy' for implicit type conversion.
+ //
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ case EOpEqual:
+ case EOpNotEqual:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpMul:
+ case EOpDiv:
+ case EOpMod:
+
+ case EOpVectorTimesScalar:
+ case EOpVectorTimesMatrix:
+ case EOpMatrixTimesVector:
+ case EOpMatrixTimesScalar:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+
+ case EOpSequence: // used by ?:
+
+ if (node0->getBasicType() == node1->getBasicType())
+ return std::make_tuple(node0, node1);
+
+ promoteTo = getConversionDestinatonType(node0->getBasicType(), node1->getBasicType(), op);
+ if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes)
+ return std::make_tuple(nullptr, nullptr);
+
+ break;
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ if (source == EShSourceHlsl)
+ promoteTo = std::make_tuple(EbtBool, EbtBool);
+ else
+ return std::make_tuple(node0, node1);
+ break;
+
+ // There are no conversions needed for GLSL; the shift amount just needs to be an
+ // integer type, as does the base.
+ // HLSL can promote bools to ints to make this work.
+ case EOpLeftShift:
+ case EOpRightShift:
+ if (source == EShSourceHlsl) {
+ TBasicType node0BasicType = node0->getBasicType();
+ if (node0BasicType == EbtBool)
+ node0BasicType = EbtInt;
+ if (node1->getBasicType() == EbtBool)
+ promoteTo = std::make_tuple(node0BasicType, EbtInt);
+ else
+ promoteTo = std::make_tuple(node0BasicType, node1->getBasicType());
+ } else {
+ if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType()))
+ return std::make_tuple(node0, node1);
+ else
+ return std::make_tuple(nullptr, nullptr);
+ }
+ break;
+
+ default:
+ if (node0->getType() == node1->getType())
+ return std::make_tuple(node0, node1);
+
+ return std::make_tuple(nullptr, nullptr);
+ }
+
+ TIntermTyped* newNode0;
+ TIntermTyped* newNode1;
+
+ if (std::get<0>(promoteTo) != node0->getType().getBasicType()) {
+ if (node0->getAsConstantUnion())
+ newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion());
+ else
+ newNode0 = createConversion(std::get<0>(promoteTo), node0);
+ } else
+ newNode0 = node0;
+
+ if (std::get<1>(promoteTo) != node1->getType().getBasicType()) {
+ if (node1->getAsConstantUnion())
+ newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion());
+ else
+ newNode1 = createConversion(std::get<1>(promoteTo), node1);
+ } else
+ newNode1 = node1;
+
+ return std::make_tuple(newNode0, newNode1);
+}
+
+//
+// Convert the node's type to the given type, as allowed by the operation involved: 'op'.
+// For implicit conversions, 'op' is not the requested conversion, it is the explicit
+// operation requiring the implicit conversion.
+//
+// Binary operation conversions should be handled by addConversion(op, node, node), not here.
+//
+// Returns a node representing the conversion, which could be the same
+// node passed in if no conversion was needed.
+//
+// Generally, this is focused on basic type conversion, not shape conversion.
+// See addShapeConversion() for shape conversions.
+//
+// Return nullptr if a conversion can't be done.
+//
+TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node)
+{
+ if (!isConversionAllowed(op, node))
+ return nullptr;
+
+ // Otherwise, if types are identical, no problem
+ if (type == node->getType())
+ return node;
+
+ // If one's a structure, then no conversions.
+ if (type.isStruct() || node->isStruct())
+ return nullptr;
+
+ // If one's an array, then no conversions.
+ if (type.isArray() || node->getType().isArray())
+ return nullptr;
+
+ // Note: callers are responsible for other aspects of shape,
+ // like vector and matrix sizes.
+
+ TBasicType promoteTo;
+ // GL_EXT_shader_16bit_storage can't do OpConstantComposite with
+ // 16-bit types, so disable promotion for those types.
+ bool canPromoteConstant = true;
+
+ switch (op) {
+ //
+ // Explicit conversions (unary operations)
+ //
+ case EOpConstructBool:
+ promoteTo = EbtBool;
+ break;
+ case EOpConstructFloat:
+ promoteTo = EbtFloat;
+ break;
+ case EOpConstructDouble:
+ promoteTo = EbtDouble;
+ break;
+ case EOpConstructFloat16:
+ promoteTo = EbtFloat16;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
+ break;
+ case EOpConstructInt8:
+ promoteTo = EbtInt8;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
+ break;
+ case EOpConstructUint8:
+ promoteTo = EbtUint8;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
+ break;
+ case EOpConstructInt16:
+ promoteTo = EbtInt16;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
+ break;
+ case EOpConstructUint16:
+ promoteTo = EbtUint16;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
+ break;
+ case EOpConstructInt:
+ promoteTo = EbtInt;
+ break;
+ case EOpConstructUint:
+ promoteTo = EbtUint;
+ break;
+ case EOpConstructInt64:
+ promoteTo = EbtInt64;
+ break;
+ case EOpConstructUint64:
+ promoteTo = EbtUint64;
+ break;
+
+ case EOpLogicalNot:
+
+ case EOpFunctionCall:
+
+ case EOpReturn:
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpVectorTimesScalarAssign:
+ case EOpMatrixTimesScalarAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+
+ case EOpAtan:
+ case EOpClamp:
+ case EOpCross:
+ case EOpDistance:
+ case EOpDot:
+ case EOpDst:
+ case EOpFaceForward:
+ case EOpFma:
+ case EOpFrexp:
+ case EOpLdexp:
+ case EOpMix:
+ case EOpLit:
+ case EOpMax:
+ case EOpMin:
+ case EOpModf:
+ case EOpPow:
+ case EOpReflect:
+ case EOpRefract:
+ case EOpSmoothStep:
+ case EOpStep:
+
+ case EOpSequence:
+ case EOpConstructStruct:
+ case EOpConstructCooperativeMatrix:
+
+ if (type.getBasicType() == EbtReference || node->getType().getBasicType() == EbtReference) {
+ // types must match to assign a reference
+ if (type == node->getType())
+ return node;
+ else
+ return nullptr;
+ }
+
+ if (type.getBasicType() == node->getType().getBasicType())
+ return node;
+
+ if (canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op))
+ promoteTo = type.getBasicType();
+ else
+ return nullptr;
+ break;
+
+ // For GLSL, there are no conversions needed; the shift amount just needs to be an
+ // integer type, as do the base/result.
+ // HLSL can convert the shift from a bool to an int.
+ case EOpLeftShiftAssign:
+ case EOpRightShiftAssign:
+ {
+ if (source == EShSourceHlsl && node->getType().getBasicType() == EbtBool)
+ promoteTo = type.getBasicType();
+ else {
+ if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType()))
+ return node;
+ else
+ return nullptr;
+ }
+ break;
+ }
+
+ default:
+ // default is to require a match; all exceptions should have case statements above
+
+ if (type.getBasicType() == node->getType().getBasicType())
+ return node;
+ else
+ return nullptr;
+ }
+
+ if (canPromoteConstant && node->getAsConstantUnion())
+ return promoteConstantUnion(promoteTo, node->getAsConstantUnion());
+
+ //
+ // Add a new newNode for the conversion.
+ //
+ TIntermTyped* newNode = createConversion(promoteTo, node);
+
+ return newNode;
+}
+
+// Convert the node's shape of type for the given type, as allowed by the
+// operation involved: 'op'. This is for situations where there is only one
+// direction to consider doing the shape conversion.
+//
+// This implements policy, it call addShapeConversion() for the mechanism.
+//
+// Generally, the AST represents allowed GLSL shapes, so this isn't needed
+// for GLSL. Bad shapes are caught in conversion or promotion.
+//
+// Return 'node' if no conversion was done. Promotion handles final shape
+// checking.
+//
+TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node)
+{
+ // some source languages don't do this
+ switch (source) {
+ case EShSourceHlsl:
+ break;
+ case EShSourceGlsl:
+ default:
+ return node;
+ }
+
+ // some operations don't do this
+ switch (op) {
+ case EOpFunctionCall:
+ case EOpReturn:
+ break;
+
+ case EOpMulAssign:
+ // want to support vector *= scalar native ops in AST and lower, not smear, similarly for
+ // matrix *= scalar, etc.
+
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+ if (node->getVectorSize() == 1)
+ return node;
+ break;
+
+ case EOpAssign:
+ break;
+
+ case EOpMix:
+ break;
+
+ default:
+ return node;
+ }
+
+ return addShapeConversion(type, node);
+}
+
+// Convert the nodes' shapes to be compatible for the operation 'op'.
+//
+// This implements policy, it call addShapeConversion() for the mechanism.
+//
+// Generally, the AST represents allowed GLSL shapes, so this isn't needed
+// for GLSL. Bad shapes are caught in conversion or promotion.
+//
+void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode)
+{
+ // some source languages don't do this
+ switch (source) {
+ case EShSourceHlsl:
+ break;
+ case EShSourceGlsl:
+ default:
+ return;
+ }
+
+ // some operations don't do this
+ // 'break' will mean attempt bidirectional conversion
+ switch (op) {
+ case EOpMulAssign:
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+ // switch to unidirectional conversion (the lhs can't change)
+ rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode);
+ return;
+
+ case EOpMul:
+ // matrix multiply does not change shapes
+ if (lhsNode->isMatrix() && rhsNode->isMatrix())
+ return;
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ // want to support vector * scalar native ops in AST and lower, not smear, similarly for
+ // matrix * vector, etc.
+ if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1)
+ return;
+ break;
+
+ case EOpRightShift:
+ case EOpLeftShift:
+ // can natively support the right operand being a scalar and the left a vector,
+ // but not the reverse
+ if (rhsNode->getVectorSize() == 1)
+ return;
+ break;
+
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+
+ case EOpEqual:
+ case EOpNotEqual:
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+
+ case EOpMix:
+ break;
+
+ default:
+ return;
+ }
+
+ // Do bidirectional conversions
+ if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) {
+ if (lhsNode->getType().isScalarOrVec1())
+ lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
+ else
+ rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
+ }
+ lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
+ rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
+}
+
+// Convert the node's shape of type for the given type, as allowed by the
+// operation involved: 'op'.
+//
+// Generally, the AST represents allowed GLSL shapes, so this isn't needed
+// for GLSL. Bad shapes are caught in conversion or promotion.
+//
+// Return 'node' if no conversion was done. Promotion handles final shape
+// checking.
+//
+TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node)
+{
+ // no conversion needed
+ if (node->getType() == type)
+ return node;
+
+ // structures and arrays don't change shape, either to or from
+ if (node->getType().isStruct() || node->getType().isArray() ||
+ type.isStruct() || type.isArray())
+ return node;
+
+ // The new node that handles the conversion
+ TOperator constructorOp = mapTypeToConstructorOp(type);
+
+ if (source == EShSourceHlsl) {
+ // HLSL rules for scalar, vector and matrix conversions:
+ // 1) scalar can become anything, initializing every component with its value
+ // 2) vector and matrix can become scalar, first element is used (warning: truncation)
+ // 3) matrix can become matrix with less rows and/or columns (warning: truncation)
+ // 4) vector can become vector with less rows size (warning: truncation)
+ // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret)
+ // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret)
+
+ const TType &sourceType = node->getType();
+
+ // rule 1 for scalar to matrix is special
+ if (sourceType.isScalarOrVec1() && type.isMatrix()) {
+
+ // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its
+ // own devices, the constructor from a scalar would populate the diagonal. This forces replication
+ // to every matrix element.
+
+ // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here
+ // repeatedly, so we copy it to a temp, then use the temp.
+ const int matSize = type.computeNumComponents();
+ TIntermAggregate* rhsAggregate = new TIntermAggregate();
+
+ const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr);
+
+ if (!isSimple) {
+ assert(0); // TODO: use node replicator service when available.
+ }
+
+ for (int x = 0; x < matSize; ++x)
+ rhsAggregate->getSequence().push_back(node);
+
+ return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc());
+ }
+
+ // rule 1 and 2
+ if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar()))
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+
+ // rule 3 and 5b
+ if (sourceType.isMatrix()) {
+ // rule 3
+ if (type.isMatrix()) {
+ if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) &&
+ sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows())
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ // rule 5b
+ } else if (type.isVector()) {
+ if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2)
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ }
+ }
+
+ // rule 4 and 5a
+ if (sourceType.isVector()) {
+ // rule 4
+ if (type.isVector())
+ {
+ if (sourceType.getVectorSize() > type.getVectorSize())
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ // rule 5a
+ } else if (type.isMatrix()) {
+ if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2)
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ }
+ }
+ }
+
+ // scalar -> vector or vec1 -> vector or
+ // vector -> scalar or
+ // bigger vector -> smaller vector
+ if ((node->getType().isScalarOrVec1() && type.isVector()) ||
+ (node->getType().isVector() && type.isScalar()) ||
+ (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize()))
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+
+ return node;
+}
+
+bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const
+{
+ // integral promotions
+ if (to == EbtInt) {
+ switch(from) {
+ case EbtInt8:
+ case EbtInt16:
+ case EbtUint8:
+ case EbtUint16:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const
+{
+ // floating-point promotions
+ if (to == EbtDouble) {
+ switch(from) {
+ case EbtFloat16:
+ case EbtFloat:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const
+{
+ switch (from) {
+ case EbtInt8:
+ switch (to) {
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtUint8:
+ switch (to) {
+ case EbtInt16:
+ case EbtUint16:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt16:
+ switch(to) {
+ case EbtUint16:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtUint16:
+ switch(to) {
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt:
+ switch(to) {
+ case EbtUint:
+ return version >= 400 || (source == EShSourceHlsl);
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtUint:
+ switch(to) {
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt64:
+ if (to == EbtUint64) {
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const
+{
+ if (to == EbtFloat && from == EbtFloat16) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const
+{
+ switch (from) {
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ switch (to) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt:
+ case EbtUint:
+ switch(to) {
+ case EbtFloat:
+ case EbtDouble:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt64:
+ case EbtUint64:
+ if (to == EbtDouble) {
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+//
+// See if the 'from' type is allowed to be implicitly converted to the
+// 'to' type. This is not about vector/array/struct, only about basic type.
+//
+bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const
+{
+ if (profile == EEsProfile || version == 110)
+ return false;
+
+ if (from == to)
+ return true;
+
+ // TODO: Move more policies into language-specific handlers.
+ // Some languages allow more general (or potentially, more specific) conversions under some conditions.
+ if (source == EShSourceHlsl) {
+ const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool);
+ const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool);
+
+ if (fromConvertable && toConvertable) {
+ switch (op) {
+ case EOpAndAssign: // assignments can perform arbitrary conversions
+ case EOpInclusiveOrAssign: // ...
+ case EOpExclusiveOrAssign: // ...
+ case EOpAssign: // ...
+ case EOpAddAssign: // ...
+ case EOpSubAssign: // ...
+ case EOpMulAssign: // ...
+ case EOpVectorTimesScalarAssign: // ...
+ case EOpMatrixTimesScalarAssign: // ...
+ case EOpDivAssign: // ...
+ case EOpModAssign: // ...
+ case EOpReturn: // function returns can also perform arbitrary conversions
+ case EOpFunctionCall: // conversion of a calling parameter
+ case EOpLogicalNot:
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ case EOpConstructStruct:
+ return true;
+ default:
+ break;
+ }
+ }
+ }
+
+ bool explicitTypesEnabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int32) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int64) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float64);
+
+ if (explicitTypesEnabled) {
+ // integral promotions
+ if (isIntegralPromotion(from, to)) {
+ return true;
+ }
+
+ // floating-point promotions
+ if (isFPPromotion(from, to)) {
+ return true;
+ }
+
+ // integral conversions
+ if (isIntegralConversion(from, to)) {
+ return true;
+ }
+
+ // floating-point conversions
+ if (isFPConversion(from, to)) {
+ return true;
+ }
+
+ // floating-integral conversions
+ if (isFPIntegralConversion(from, to)) {
+ return true;
+ }
+
+ // hlsl supported conversions
+ if (source == EShSourceHlsl) {
+ if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat))
+ return true;
+ }
+ } else {
+ switch (to) {
+ case EbtDouble:
+ switch (from) {
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtFloat:
+ case EbtDouble:
+ return true;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+ case EbtFloat16:
+ return extensionRequested(E_GL_AMD_gpu_shader_half_float);
+#endif
+ default:
+ return false;
+ }
+ case EbtFloat:
+ switch (from) {
+ case EbtInt:
+ case EbtUint:
+ case EbtFloat:
+ return true;
+ case EbtBool:
+ return (source == EShSourceHlsl);
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ case EbtFloat16:
+ return
+#ifdef AMD_EXTENSIONS
+ extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
+#endif
+ (source == EShSourceHlsl);
+ default:
+ return false;
+ }
+ case EbtUint:
+ switch (from) {
+ case EbtInt:
+ return version >= 400 || (source == EShSourceHlsl);
+ case EbtUint:
+ return true;
+ case EbtBool:
+ return (source == EShSourceHlsl);
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtInt:
+ switch (from) {
+ case EbtInt:
+ return true;
+ case EbtBool:
+ return (source == EShSourceHlsl);
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtUint64:
+ switch (from) {
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtInt64:
+ switch (from) {
+ case EbtInt:
+ case EbtInt64:
+ return true;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtFloat16:
+#ifdef AMD_EXTENSIONS
+ switch (from) {
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+ case EbtFloat16:
+ return extensionRequested(E_GL_AMD_gpu_shader_half_float);
+ default:
+ break;
+ }
+#endif
+ return false;
+ case EbtUint16:
+#ifdef AMD_EXTENSIONS
+ switch (from) {
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+ default:
+ break;
+ }
+#endif
+ return false;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType) {
+ switch(sintType) {
+ case EbtInt8:
+ switch(uintType) {
+ case EbtUint8:
+ case EbtUint16:
+ case EbtUint:
+ case EbtUint64:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ case EbtInt16:
+ switch(uintType) {
+ case EbtUint8:
+ return true;
+ case EbtUint16:
+ case EbtUint:
+ case EbtUint64:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ case EbtInt:
+ switch(uintType) {
+ case EbtUint8:
+ case EbtUint16:
+ return true;
+ case EbtUint:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ case EbtInt64:
+ switch(uintType) {
+ case EbtUint8:
+ case EbtUint16:
+ case EbtUint:
+ return true;
+ case EbtUint64:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ default:
+ assert(false);
+ return false;
+ }
+}
+
+
+static TBasicType getCorrespondingUnsignedType(TBasicType type) {
+ switch(type) {
+ case EbtInt8:
+ return EbtUint8;
+ case EbtInt16:
+ return EbtUint16;
+ case EbtInt:
+ return EbtUint;
+ case EbtInt64:
+ return EbtUint64;
+ default:
+ assert(false);
+ return EbtNumTypes;
+ }
+}
+
+// Implements the following rules
+// - If either operand has type float64_t or derived from float64_t,
+// the other shall be converted to float64_t or derived type.
+// - Otherwise, if either operand has type float32_t or derived from
+// float32_t, the other shall be converted to float32_t or derived type.
+// - Otherwise, if either operand has type float16_t or derived from
+// float16_t, the other shall be converted to float16_t or derived type.
+// - Otherwise, if both operands have integer types the following rules
+// shall be applied to the operands:
+// - If both operands have the same type, no further conversion
+// is needed.
+// - Otherwise, if both operands have signed integer types or both
+// have unsigned integer types, the operand with the type of lesser
+// integer conversion rank shall be converted to the type of the
+// operand with greater rank.
+// - Otherwise, if the operand that has unsigned integer type has rank
+// greater than or equal to the rank of the type of the other
+// operand, the operand with signed integer type shall be converted
+// to the type of the operand with unsigned integer type.
+// - Otherwise, if the type of the operand with signed integer type can
+// represent all of the values of the type of the operand with
+// unsigned integer type, the operand with unsigned integer type
+// shall be converted to the type of the operand with signed
+// integer type.
+// - Otherwise, both operands shall be converted to the unsigned
+// integer type corresponding to the type of the operand with signed
+// integer type.
+
+std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const
+{
+ TBasicType res0 = EbtNumTypes;
+ TBasicType res1 = EbtNumTypes;
+
+ if (profile == EEsProfile || version == 110)
+ return std::make_tuple(res0, res1);;
+
+ if (source == EShSourceHlsl) {
+ if (canImplicitlyPromote(type1, type0, op)) {
+ res0 = type0;
+ res1 = type0;
+ } else if (canImplicitlyPromote(type0, type1, op)) {
+ res0 = type1;
+ res1 = type1;
+ }
+ return std::make_tuple(res0, res1);
+ }
+
+ if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) ||
+ (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) {
+ res0 = EbtDouble;
+ res1 = EbtDouble;
+ } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) ||
+ (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) {
+ res0 = EbtFloat;
+ res1 = EbtFloat;
+ } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) ||
+ (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) {
+ res0 = EbtFloat16;
+ res1 = EbtFloat16;
+ } else if (isTypeInt(type0) && isTypeInt(type1) &&
+ (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) {
+ if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) ||
+ (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) {
+ if (getTypeRank(type0) < getTypeRank(type1)) {
+ res0 = type1;
+ res1 = type1;
+ } else {
+ res0 = type0;
+ res1 = type0;
+ }
+ } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) {
+ res0 = type0;
+ res1 = type0;
+ } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) {
+ res0 = type1;
+ res1 = type1;
+ } else if (isTypeSignedInt(type0)) {
+ if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) {
+ res0 = type0;
+ res1 = type0;
+ } else {
+ res0 = getCorrespondingUnsignedType(type0);
+ res1 = getCorrespondingUnsignedType(type0);
+ }
+ } else if (isTypeSignedInt(type1)) {
+ if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) {
+ res0 = type1;
+ res1 = type1;
+ } else {
+ res0 = getCorrespondingUnsignedType(type1);
+ res1 = getCorrespondingUnsignedType(type1);
+ }
+ }
+ }
+
+ return std::make_tuple(res0, res1);
+}
+
+//
+// Given a type, find what operation would fully construct it.
+//
+TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
+{
+ TOperator op = EOpNull;
+
+ if (type.getQualifier().nonUniform)
+ return EOpConstructNonuniform;
+
+ if (type.isCoopMat())
+ return EOpConstructCooperativeMatrix;
+
+ switch (type.getBasicType()) {
+ case EbtStruct:
+ op = EOpConstructStruct;
+ break;
+ case EbtSampler:
+ if (type.getSampler().combined)
+ op = EOpConstructTextureSampler;
+ break;
+ case EbtFloat:
+ if (type.isMatrix()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructMat2x2; break;
+ case 3: op = EOpConstructMat2x3; break;
+ case 4: op = EOpConstructMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructMat3x2; break;
+ case 3: op = EOpConstructMat3x3; break;
+ case 4: op = EOpConstructMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructMat4x2; break;
+ case 3: op = EOpConstructMat4x3; break;
+ case 4: op = EOpConstructMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ default: break; // some compilers want this
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructFloat; break;
+ case 2: op = EOpConstructVec2; break;
+ case 3: op = EOpConstructVec3; break;
+ case 4: op = EOpConstructVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtDouble:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructDMat2x2; break;
+ case 3: op = EOpConstructDMat2x3; break;
+ case 4: op = EOpConstructDMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructDMat3x2; break;
+ case 3: op = EOpConstructDMat3x3; break;
+ case 4: op = EOpConstructDMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructDMat4x2; break;
+ case 3: op = EOpConstructDMat4x3; break;
+ case 4: op = EOpConstructDMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructDouble; break;
+ case 2: op = EOpConstructDVec2; break;
+ case 3: op = EOpConstructDVec3; break;
+ case 4: op = EOpConstructDVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtFloat16:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructF16Mat2x2; break;
+ case 3: op = EOpConstructF16Mat2x3; break;
+ case 4: op = EOpConstructF16Mat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructF16Mat3x2; break;
+ case 3: op = EOpConstructF16Mat3x3; break;
+ case 4: op = EOpConstructF16Mat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructF16Mat4x2; break;
+ case 3: op = EOpConstructF16Mat4x3; break;
+ case 4: op = EOpConstructF16Mat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ }
+ else {
+ switch (type.getVectorSize()) {
+ case 1: op = EOpConstructFloat16; break;
+ case 2: op = EOpConstructF16Vec2; break;
+ case 3: op = EOpConstructF16Vec3; break;
+ case 4: op = EOpConstructF16Vec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtInt8:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt8; break;
+ case 2: op = EOpConstructI8Vec2; break;
+ case 3: op = EOpConstructI8Vec3; break;
+ case 4: op = EOpConstructI8Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtUint8:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint8; break;
+ case 2: op = EOpConstructU8Vec2; break;
+ case 3: op = EOpConstructU8Vec3; break;
+ case 4: op = EOpConstructU8Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtInt16:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt16; break;
+ case 2: op = EOpConstructI16Vec2; break;
+ case 3: op = EOpConstructI16Vec3; break;
+ case 4: op = EOpConstructI16Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtUint16:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint16; break;
+ case 2: op = EOpConstructU16Vec2; break;
+ case 3: op = EOpConstructU16Vec3; break;
+ case 4: op = EOpConstructU16Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtInt:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructIMat2x2; break;
+ case 3: op = EOpConstructIMat2x3; break;
+ case 4: op = EOpConstructIMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructIMat3x2; break;
+ case 3: op = EOpConstructIMat3x3; break;
+ case 4: op = EOpConstructIMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructIMat4x2; break;
+ case 3: op = EOpConstructIMat4x3; break;
+ case 4: op = EOpConstructIMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt; break;
+ case 2: op = EOpConstructIVec2; break;
+ case 3: op = EOpConstructIVec3; break;
+ case 4: op = EOpConstructIVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtUint:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructUMat2x2; break;
+ case 3: op = EOpConstructUMat2x3; break;
+ case 4: op = EOpConstructUMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructUMat3x2; break;
+ case 3: op = EOpConstructUMat3x3; break;
+ case 4: op = EOpConstructUMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructUMat4x2; break;
+ case 3: op = EOpConstructUMat4x3; break;
+ case 4: op = EOpConstructUMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint; break;
+ case 2: op = EOpConstructUVec2; break;
+ case 3: op = EOpConstructUVec3; break;
+ case 4: op = EOpConstructUVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtInt64:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt64; break;
+ case 2: op = EOpConstructI64Vec2; break;
+ case 3: op = EOpConstructI64Vec3; break;
+ case 4: op = EOpConstructI64Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtUint64:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint64; break;
+ case 2: op = EOpConstructU64Vec2; break;
+ case 3: op = EOpConstructU64Vec3; break;
+ case 4: op = EOpConstructU64Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtBool:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructBMat2x2; break;
+ case 3: op = EOpConstructBMat2x3; break;
+ case 4: op = EOpConstructBMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructBMat3x2; break;
+ case 3: op = EOpConstructBMat3x3; break;
+ case 4: op = EOpConstructBMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructBMat4x2; break;
+ case 3: op = EOpConstructBMat4x3; break;
+ case 4: op = EOpConstructBMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructBool; break;
+ case 2: op = EOpConstructBVec2; break;
+ case 3: op = EOpConstructBVec3; break;
+ case 4: op = EOpConstructBVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtReference:
+ op = EOpConstructReference;
+ break;
+ default:
+ break;
+ }
+
+ return op;
+}
+
+//
+// Safe way to combine two nodes into an aggregate. Works with null pointers,
+// a node that's not a aggregate yet, etc.
+//
+// Returns the resulting aggregate, unless nullptr was passed in for
+// both existing nodes.
+//
+TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right)
+{
+ if (left == nullptr && right == nullptr)
+ return nullptr;
+
+ TIntermAggregate* aggNode = nullptr;
+ if (left != nullptr)
+ aggNode = left->getAsAggregate();
+ if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
+ aggNode = new TIntermAggregate;
+ if (left != nullptr)
+ aggNode->getSequence().push_back(left);
+ }
+
+ if (right != nullptr)
+ aggNode->getSequence().push_back(right);
+
+ return aggNode;
+}
+
+TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc)
+{
+ TIntermAggregate* aggNode = growAggregate(left, right);
+ if (aggNode)
+ aggNode->setLoc(loc);
+
+ return aggNode;
+}
+
+//
+// Turn an existing node into an aggregate.
+//
+// Returns an aggregate, unless nullptr was passed in for the existing node.
+//
+TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node)
+{
+ if (node == nullptr)
+ return nullptr;
+
+ TIntermAggregate* aggNode = new TIntermAggregate;
+ aggNode->getSequence().push_back(node);
+ aggNode->setLoc(node->getLoc());
+
+ return aggNode;
+}
+
+TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc)
+{
+ if (node == nullptr)
+ return nullptr;
+
+ TIntermAggregate* aggNode = new TIntermAggregate;
+ aggNode->getSequence().push_back(node);
+ aggNode->setLoc(loc);
+
+ return aggNode;
+}
+
+//
+// Make an aggregate with an empty sequence.
+//
+TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc)
+{
+ TIntermAggregate* aggNode = new TIntermAggregate;
+ aggNode->setLoc(loc);
+
+ return aggNode;
+}
+
+//
+// For "if" test nodes. There are three children; a condition,
+// a true path, and a false path. The two paths are in the
+// nodePair.
+//
+// Returns the selection node created.
+//
+TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc)
+{
+ //
+ // Don't prune the false path for compile-time constants; it's needed
+ // for static access analysis.
+ //
+
+ TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2);
+ node->setLoc(loc);
+
+ return node;
+}
+
+TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
+{
+ // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators
+ // ... are not included in the operators that can create a constant expression.
+ //
+ // if (left->getType().getQualifier().storage == EvqConst &&
+ // right->getType().getQualifier().storage == EvqConst) {
+
+ // return right;
+ //}
+
+ TIntermTyped *commaAggregate = growAggregate(left, right, loc);
+ commaAggregate->getAsAggregate()->setOperator(EOpComma);
+ commaAggregate->setType(right->getType());
+ commaAggregate->getWritableType().getQualifier().makeTemporary();
+
+ return commaAggregate;
+}
+
+TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc)
+{
+ TIntermMethod* method = new TIntermMethod(object, type, *name);
+ method->setLoc(loc);
+
+ return method;
+}
+
+//
+// For "?:" test nodes. There are three children; a condition,
+// a true path, and a false path. The two paths are specified
+// as separate parameters. For vector 'cond', the true and false
+// are not paths, but vectors to mix.
+//
+// Specialization constant operations include
+// - The ternary operator ( ? : )
+//
+// Returns the selection node created, or nullptr if one could not be.
+//
+TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock,
+ const TSourceLoc& loc)
+{
+ // If it's void, go to the if-then-else selection()
+ if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) {
+ TIntermNodePair pair = { trueBlock, falseBlock };
+ TIntermSelection* selection = addSelection(cond, pair, loc);
+ if (getSource() == EShSourceHlsl)
+ selection->setNoShortCircuit();
+
+ return selection;
+ }
+
+ //
+ // Get compatible types.
+ //
+ auto children = addConversion(EOpSequence, trueBlock, falseBlock);
+ trueBlock = std::get<0>(children);
+ falseBlock = std::get<1>(children);
+
+ if (trueBlock == nullptr || falseBlock == nullptr)
+ return nullptr;
+
+ // Handle a vector condition as a mix
+ if (!cond->getType().isScalarOrVec1()) {
+ TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary,
+ cond->getType().getVectorSize());
+ // smear true/false operands as needed
+ trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock);
+ falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock);
+
+ // After conversion, types have to match.
+ if (falseBlock->getType() != trueBlock->getType())
+ return nullptr;
+
+ // make the mix operation
+ TIntermAggregate* mix = makeAggregate(loc);
+ mix = growAggregate(mix, falseBlock);
+ mix = growAggregate(mix, trueBlock);
+ mix = growAggregate(mix, cond);
+ mix->setType(targetVectorType);
+ mix->setOp(EOpMix);
+
+ return mix;
+ }
+
+ // Now have a scalar condition...
+
+ // Convert true and false expressions to matching types
+ addBiShapeConversion(EOpMix, trueBlock, falseBlock);
+
+ // After conversion, types have to match.
+ if (falseBlock->getType() != trueBlock->getType())
+ return nullptr;
+
+ // Eliminate the selection when the condition is a scalar and all operands are constant.
+ if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) {
+ if (cond->getAsConstantUnion()->getConstArray()[0].getBConst())
+ return trueBlock;
+ else
+ return falseBlock;
+ }
+
+ //
+ // Make a selection node.
+ //
+ TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType());
+ node->setLoc(loc);
+ node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision);
+
+ if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) ||
+ (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() &&
+ falseBlock->getQualifier().isConstant()))
+ node->getQualifier().makeSpecConstant();
+ else
+ node->getQualifier().makeTemporary();
+
+ if (getSource() == EShSourceHlsl)
+ node->setNoShortCircuit();
+
+ return node;
+}
+
+//
+// Constant terminal nodes. Has a union that contains bool, float or int constants
+//
+// Returns the constant union node created.
+//
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const
+{
+ TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t);
+ node->getQualifier().storage = EvqConst;
+ node->setLoc(loc);
+ if (literal)
+ node->setLiteral();
+
+ return node;
+}
+TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setI8Const(i8);
+
+ return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setUConst(u8);
+
+ return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setI16Const(i16);
+
+ return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setU16Const(u16);
+
+ return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setIConst(i);
+
+ return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setUConst(u);
+
+ return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setI64Const(i64);
+
+ return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setU64Const(u64);
+
+ return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setBConst(b);
+
+ return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const
+{
+ assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16);
+
+ TConstUnionArray unionArray(1);
+ unionArray[0].setDConst(d);
+
+ return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setSConst(s);
+
+ return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal);
+}
+
+// Put vector swizzle selectors onto the given sequence
+void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc)
+{
+ TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc);
+ sequence.push_back(constIntNode);
+}
+
+// Put matrix swizzle selectors onto the given sequence
+void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc)
+{
+ TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc);
+ sequence.push_back(constIntNode);
+ constIntNode = addConstantUnion(selector.coord2, loc);
+ sequence.push_back(constIntNode);
+}
+
+// Make an aggregate node that has a sequence of all selectors.
+template TIntermTyped* TIntermediate::addSwizzle<TVectorSelector>(TSwizzleSelectors<TVectorSelector>& selector, const TSourceLoc& loc);
+template TIntermTyped* TIntermediate::addSwizzle<TMatrixSelector>(TSwizzleSelectors<TMatrixSelector>& selector, const TSourceLoc& loc);
+template<typename selectorType>
+TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors<selectorType>& selector, const TSourceLoc& loc)
+{
+ TIntermAggregate* node = new TIntermAggregate(EOpSequence);
+
+ node->setLoc(loc);
+ TIntermSequence &sequenceVector = node->getSequence();
+
+ for (int i = 0; i < selector.size(); i++)
+ pushSelector(sequenceVector, selector[i], loc);
+
+ return node;
+}
+
+//
+// Follow the left branches down to the root of an l-value
+// expression (just "." and []).
+//
+// Return the base of the l-value (where following indexing quits working).
+// Return nullptr if a chain following dereferences cannot be followed.
+//
+// 'swizzleOkay' says whether or not it is okay to consider a swizzle
+// a valid part of the dereference chain.
+//
+const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay)
+{
+ do {
+ const TIntermBinary* binary = node->getAsBinaryNode();
+ if (binary == nullptr)
+ return node;
+ TOperator op = binary->getOp();
+ if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle)
+ return nullptr;
+ if (! swizzleOkay) {
+ if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle)
+ return nullptr;
+ if ((op == EOpIndexDirect || op == EOpIndexIndirect) &&
+ (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) &&
+ ! binary->getLeft()->getType().isArray())
+ return nullptr;
+ }
+ node = node->getAsBinaryNode()->getLeft();
+ } while (true);
+}
+
+//
+// Create while and do-while loop nodes.
+//
+TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst,
+ const TSourceLoc& loc)
+{
+ TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst);
+ node->setLoc(loc);
+
+ return node;
+}
+
+//
+// Create a for-loop sequence.
+//
+TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test,
+ TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node)
+{
+ node = new TIntermLoop(body, test, terminal, testFirst);
+ node->setLoc(loc);
+
+ // make a sequence of the initializer and statement, but try to reuse the
+ // aggregate already created for whatever is in the initializer, if there is one
+ TIntermAggregate* loopSequence = (initializer == nullptr ||
+ initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc)
+ : initializer->getAsAggregate();
+ if (loopSequence != nullptr && loopSequence->getOp() == EOpSequence)
+ loopSequence->setOp(EOpNull);
+ loopSequence = growAggregate(loopSequence, node);
+ loopSequence->setOperator(EOpSequence);
+
+ return loopSequence;
+}
+
+//
+// Add branches.
+//
+TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc)
+{
+ return addBranch(branchOp, nullptr, loc);
+}
+
+TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc)
+{
+ TIntermBranch* node = new TIntermBranch(branchOp, expression);
+ node->setLoc(loc);
+
+ return node;
+}
+
+//
+// This is to be executed after the final root is put on top by the parsing
+// process.
+//
+bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/)
+{
+ if (root == nullptr)
+ return true;
+
+ // Finish off the top-level sequence
+ TIntermAggregate* aggRoot = root->getAsAggregate();
+ if (aggRoot && aggRoot->getOp() == EOpNull)
+ aggRoot->setOperator(EOpSequence);
+
+ // Propagate 'noContraction' label in backward from 'precise' variables.
+ glslang::PropagateNoContraction(*this);
+
+ switch (textureSamplerTransformMode) {
+ case EShTexSampTransKeep:
+ break;
+ case EShTexSampTransUpgradeTextureRemoveSampler:
+ performTextureUpgradeAndSamplerRemovalTransformation(root);
+ break;
+ }
+
+ return true;
+}
+
+void TIntermediate::addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage language, TSymbolTable& symbolTable)
+{
+ // Add top-level nodes for declarations that must be checked cross
+ // compilation unit by a linker, yet might not have been referenced
+ // by the AST.
+ //
+ // Almost entirely, translation of symbols is driven by what's present
+ // in the AST traversal, not by translating the symbol table.
+ //
+ // However, there are some special cases:
+ // - From the specification: "Special built-in inputs gl_VertexID and
+ // gl_InstanceID are also considered active vertex attributes."
+ // - Linker-based type mismatch error reporting needs to see all
+ // uniforms/ins/outs variables and blocks.
+ // - ftransform() can make gl_Vertex and gl_ModelViewProjectionMatrix active.
+ //
+
+ // if (ftransformUsed) {
+ // TODO: 1.1 lowering functionality: track ftransform() usage
+ // addSymbolLinkageNode(root, symbolTable, "gl_Vertex");
+ // addSymbolLinkageNode(root, symbolTable, "gl_ModelViewProjectionMatrix");
+ //}
+
+ if (language == EShLangVertex) {
+ // the names won't be found in the symbol table unless the versions are right,
+ // so version logic does not need to be repeated here
+ addSymbolLinkageNode(linkage, symbolTable, "gl_VertexID");
+ addSymbolLinkageNode(linkage, symbolTable, "gl_InstanceID");
+ }
+
+ // Add a child to the root node for the linker objects
+ linkage->setOperator(EOpLinkerObjects);
+ treeRoot = growAggregate(treeRoot, linkage);
+}
+
+//
+// Add the given name or symbol to the list of nodes at the end of the tree used
+// for link-time checking and external linkage.
+//
+
+void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable& symbolTable, const TString& name)
+{
+ TSymbol* symbol = symbolTable.find(name);
+ if (symbol)
+ addSymbolLinkageNode(linkage, *symbol->getAsVariable());
+}
+
+void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol& symbol)
+{
+ const TVariable* variable = symbol.getAsVariable();
+ if (! variable) {
+ // This must be a member of an anonymous block, and we need to add the whole block
+ const TAnonMember* anon = symbol.getAsAnonMember();
+ variable = &anon->getAnonContainer();
+ }
+ TIntermSymbol* node = addSymbol(*variable);
+ linkage = growAggregate(linkage, node);
+}
+
+//
+// Add a caller->callee relationship to the call graph.
+// Assumes the strings are unique per signature.
+//
+void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& caller, const TString& callee)
+{
+ // Duplicates are okay, but faster to not keep them, and they come grouped by caller,
+ // as long as new ones are push on the same end we check on for duplicates
+ for (TGraph::const_iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->caller != caller)
+ break;
+ if (call->callee == callee)
+ return;
+ }
+
+ callGraph.push_front(TCall(caller, callee));
+}
+
+//
+// This deletes the tree.
+//
+void TIntermediate::removeTree()
+{
+ if (treeRoot)
+ RemoveAllTreeNodes(treeRoot);
+}
+
+//
+// Implement the part of KHR_vulkan_glsl that lists the set of operations
+// that can result in a specialization constant operation.
+//
+// "5.x Specialization Constant Operations"
+//
+// Only some operations discussed in this section may be applied to a
+// specialization constant and still yield a result that is as
+// specialization constant. The operations allowed are listed below.
+// When a specialization constant is operated on with one of these
+// operators and with another constant or specialization constant, the
+// result is implicitly a specialization constant.
+//
+// - int(), uint(), and bool() constructors for type conversions
+// from any of the following types to any of the following types:
+// * int
+// * uint
+// * bool
+// - vector versions of the above conversion constructors
+// - allowed implicit conversions of the above
+// - swizzles (e.g., foo.yx)
+// - The following when applied to integer or unsigned integer types:
+// * unary negative ( - )
+// * binary operations ( + , - , * , / , % )
+// * shift ( <<, >> )
+// * bitwise operations ( & , | , ^ )
+// - The following when applied to integer or unsigned integer scalar types:
+// * comparison ( == , != , > , >= , < , <= )
+// - The following when applied to the Boolean scalar type:
+// * not ( ! )
+// * logical operations ( && , || , ^^ )
+// * comparison ( == , != )"
+//
+// This function just handles binary and unary nodes. Construction
+// rules are handled in construction paths that are not covered by the unary
+// and binary paths, while required conversions will still show up here
+// as unary converters in the from a construction operator.
+//
+bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const
+{
+ // The operations resulting in floating point are quite limited
+ // (However, some floating-point operations result in bool, like ">",
+ // so are handled later.)
+ if (node.getType().isFloatingDomain()) {
+ switch (node.getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+ case EOpConvFloatToDouble:
+ case EOpConvDoubleToFloat:
+ case EOpConvFloat16ToFloat:
+ case EOpConvFloatToFloat16:
+ case EOpConvFloat16ToDouble:
+ case EOpConvDoubleToFloat16:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // Check for floating-point arguments
+ if (const TIntermBinary* bin = node.getAsBinaryNode())
+ if (bin->getLeft() ->getType().isFloatingDomain() ||
+ bin->getRight()->getType().isFloatingDomain())
+ return false;
+
+ // So, for now, we can assume everything left is non-floating-point...
+
+ // Now check for integer/bool-based operations
+ switch (node.getOp()) {
+
+ // dereference/swizzle
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+
+ // (u)int* -> bool
+ case EOpConvInt8ToBool:
+ case EOpConvInt16ToBool:
+ case EOpConvIntToBool:
+ case EOpConvInt64ToBool:
+ case EOpConvUint8ToBool:
+ case EOpConvUint16ToBool:
+ case EOpConvUintToBool:
+ case EOpConvUint64ToBool:
+
+ // bool -> (u)int*
+ case EOpConvBoolToInt8:
+ case EOpConvBoolToInt16:
+ case EOpConvBoolToInt:
+ case EOpConvBoolToInt64:
+ case EOpConvBoolToUint8:
+ case EOpConvBoolToUint16:
+ case EOpConvBoolToUint:
+ case EOpConvBoolToUint64:
+
+ // int8_t -> (u)int*
+ case EOpConvInt8ToInt16:
+ case EOpConvInt8ToInt:
+ case EOpConvInt8ToInt64:
+ case EOpConvInt8ToUint8:
+ case EOpConvInt8ToUint16:
+ case EOpConvInt8ToUint:
+ case EOpConvInt8ToUint64:
+
+ // int16_t -> (u)int*
+ case EOpConvInt16ToInt8:
+ case EOpConvInt16ToInt:
+ case EOpConvInt16ToInt64:
+ case EOpConvInt16ToUint8:
+ case EOpConvInt16ToUint16:
+ case EOpConvInt16ToUint:
+ case EOpConvInt16ToUint64:
+
+ // int32_t -> (u)int*
+ case EOpConvIntToInt8:
+ case EOpConvIntToInt16:
+ case EOpConvIntToInt64:
+ case EOpConvIntToUint8:
+ case EOpConvIntToUint16:
+ case EOpConvIntToUint:
+ case EOpConvIntToUint64:
+
+ // int64_t -> (u)int*
+ case EOpConvInt64ToInt8:
+ case EOpConvInt64ToInt16:
+ case EOpConvInt64ToInt:
+ case EOpConvInt64ToUint8:
+ case EOpConvInt64ToUint16:
+ case EOpConvInt64ToUint:
+ case EOpConvInt64ToUint64:
+
+ // uint8_t -> (u)int*
+ case EOpConvUint8ToInt8:
+ case EOpConvUint8ToInt16:
+ case EOpConvUint8ToInt:
+ case EOpConvUint8ToInt64:
+ case EOpConvUint8ToUint16:
+ case EOpConvUint8ToUint:
+ case EOpConvUint8ToUint64:
+
+ // uint16_t -> (u)int*
+ case EOpConvUint16ToInt8:
+ case EOpConvUint16ToInt16:
+ case EOpConvUint16ToInt:
+ case EOpConvUint16ToInt64:
+ case EOpConvUint16ToUint8:
+ case EOpConvUint16ToUint:
+ case EOpConvUint16ToUint64:
+
+ // uint32_t -> (u)int*
+ case EOpConvUintToInt8:
+ case EOpConvUintToInt16:
+ case EOpConvUintToInt:
+ case EOpConvUintToInt64:
+ case EOpConvUintToUint8:
+ case EOpConvUintToUint16:
+ case EOpConvUintToUint64:
+
+ // uint64_t -> (u)int*
+ case EOpConvUint64ToInt8:
+ case EOpConvUint64ToInt16:
+ case EOpConvUint64ToInt:
+ case EOpConvUint64ToInt64:
+ case EOpConvUint64ToUint8:
+ case EOpConvUint64ToUint16:
+ case EOpConvUint64ToUint:
+
+ // unary operations
+ case EOpNegative:
+ case EOpLogicalNot:
+ case EOpBitwiseNot:
+
+ // binary operations
+ case EOpAdd:
+ case EOpSub:
+ case EOpMul:
+ case EOpVectorTimesScalar:
+ case EOpDiv:
+ case EOpMod:
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ case EOpLogicalAnd:
+ case EOpEqual:
+ case EOpNotEqual:
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Is the operation one that must propagate nonuniform?
+bool TIntermediate::isNonuniformPropagating(TOperator op) const
+{
+ // "* All Operators in Section 5.1 (Operators), except for assignment,
+ // arithmetic assignment, and sequence
+ // * Component selection in Section 5.5
+ // * Matrix components in Section 5.6
+ // * Structure and Array Operations in Section 5.7, except for the length
+ // method."
+ switch (op) {
+ case EOpPostIncrement:
+ case EOpPostDecrement:
+ case EOpPreIncrement:
+ case EOpPreDecrement:
+
+ case EOpNegative:
+ case EOpLogicalNot:
+ case EOpVectorLogicalNot:
+ case EOpBitwiseNot:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpMul:
+ case EOpDiv:
+ case EOpMod:
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpEqual:
+ case EOpNotEqual:
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ case EOpVectorTimesScalar:
+ case EOpVectorTimesMatrix:
+ case EOpMatrixTimesVector:
+ case EOpMatrixTimesScalar:
+
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ case EOpLogicalAnd:
+
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+////////////////////////////////////////////////////////////////
+//
+// Member functions of the nodes used for building the tree.
+//
+////////////////////////////////////////////////////////////////
+
+//
+// Say whether or not an operation node changes the value of a variable.
+//
+// Returns true if state is modified.
+//
+bool TIntermOperator::modifiesState() const
+{
+ switch (op) {
+ case EOpPostIncrement:
+ case EOpPostDecrement:
+ case EOpPreIncrement:
+ case EOpPreDecrement:
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpVectorTimesMatrixAssign:
+ case EOpVectorTimesScalarAssign:
+ case EOpMatrixTimesScalarAssign:
+ case EOpMatrixTimesMatrixAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpLeftShiftAssign:
+ case EOpRightShiftAssign:
+ return true;
+ default:
+ return false;
+ }
+}
+
+//
+// returns true if the operator is for one of the constructors
+//
+bool TIntermOperator::isConstructor() const
+{
+ return op > EOpConstructGuardStart && op < EOpConstructGuardEnd;
+}
+
+//
+// Make sure the type of an operator is appropriate for its
+// combination of operation and operand type. This will invoke
+// promoteUnary, promoteBinary, etc as needed.
+//
+// Returns false if nothing makes sense.
+//
+bool TIntermediate::promote(TIntermOperator* node)
+{
+ if (node == nullptr)
+ return false;
+
+ if (node->getAsUnaryNode())
+ return promoteUnary(*node->getAsUnaryNode());
+
+ if (node->getAsBinaryNode())
+ return promoteBinary(*node->getAsBinaryNode());
+
+ if (node->getAsAggregate())
+ return promoteAggregate(*node->getAsAggregate());
+
+ return false;
+}
+
+//
+// See TIntermediate::promote
+//
+bool TIntermediate::promoteUnary(TIntermUnary& node)
+{
+ const TOperator op = node.getOp();
+ TIntermTyped* operand = node.getOperand();
+
+ switch (op) {
+ case EOpLogicalNot:
+ // Convert operand to a boolean type
+ if (operand->getBasicType() != EbtBool) {
+ // Add constructor to boolean type. If that fails, we can't do it, so return false.
+ TIntermTyped* converted = addConversion(op, TType(EbtBool), operand);
+ if (converted == nullptr)
+ return false;
+
+ // Use the result of converting the node to a bool.
+ node.setOperand(operand = converted); // also updates stack variable
+ }
+ break;
+ case EOpBitwiseNot:
+ if (!isTypeInt(operand->getBasicType()))
+ return false;
+ break;
+ case EOpNegative:
+ case EOpPostIncrement:
+ case EOpPostDecrement:
+ case EOpPreIncrement:
+ case EOpPreDecrement:
+ if (!isTypeInt(operand->getBasicType()) &&
+ operand->getBasicType() != EbtFloat &&
+ operand->getBasicType() != EbtFloat16 &&
+ operand->getBasicType() != EbtDouble)
+
+ return false;
+ break;
+
+ default:
+ if (operand->getBasicType() != EbtFloat)
+
+ return false;
+ }
+
+ node.setType(operand->getType());
+ node.getWritableType().getQualifier().makeTemporary();
+
+ return true;
+}
+
+void TIntermUnary::updatePrecision()
+{
+ if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
+ if (operand->getQualifier().precision > getQualifier().precision)
+ getQualifier().precision = operand->getQualifier().precision;
+ }
+}
+
+//
+// See TIntermediate::promote
+//
+bool TIntermediate::promoteBinary(TIntermBinary& node)
+{
+ TOperator op = node.getOp();
+ TIntermTyped* left = node.getLeft();
+ TIntermTyped* right = node.getRight();
+
+ // Arrays and structures have to be exact matches.
+ if ((left->isArray() || right->isArray() || left->getBasicType() == EbtStruct || right->getBasicType() == EbtStruct)
+ && left->getType() != right->getType())
+ return false;
+
+ // Base assumption: just make the type the same as the left
+ // operand. Only deviations from this will be coded.
+ node.setType(left->getType());
+ node.getWritableType().getQualifier().clear();
+
+ // Composite and opaque types don't having pending operator changes, e.g.,
+ // array, structure, and samplers. Just establish final type and correctness.
+ if (left->isArray() || left->getBasicType() == EbtStruct || left->getBasicType() == EbtSampler) {
+ switch (op) {
+ case EOpEqual:
+ case EOpNotEqual:
+ if (left->getBasicType() == EbtSampler) {
+ // can't compare samplers
+ return false;
+ } else {
+ // Promote to conditional
+ node.setType(TType(EbtBool));
+ }
+
+ return true;
+
+ case EOpAssign:
+ // Keep type from above
+
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ //
+ // We now have only scalars, vectors, and matrices to worry about.
+ //
+
+ // HLSL implicitly promotes bool -> int for numeric operations.
+ // (Implicit conversions to make the operands match each other's types were already done.)
+ if (getSource() == EShSourceHlsl &&
+ (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) {
+ switch (op) {
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+
+ case EOpRightShift:
+ case EOpLeftShift:
+
+ case EOpMod:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpMul:
+ if (left->getBasicType() == EbtBool)
+ left = createConversion(EbtInt, left);
+ if (right->getBasicType() == EbtBool)
+ right = createConversion(EbtInt, right);
+ if (left == nullptr || right == nullptr)
+ return false;
+ node.setLeft(left);
+ node.setRight(right);
+
+ // Update the original base assumption on result type..
+ node.setType(left->getType());
+ node.getWritableType().getQualifier().clear();
+
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that)
+ switch (op) {
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ // Relational comparisons need numeric types and will promote to scalar Boolean.
+ if (left->getBasicType() == EbtBool)
+ return false;
+
+ node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
+ break;
+
+ case EOpEqual:
+ case EOpNotEqual:
+ if (getSource() == EShSourceHlsl) {
+ const int resultWidth = std::max(left->getVectorSize(), right->getVectorSize());
+
+ // In HLSL, == or != on vectors means component-wise comparison.
+ if (resultWidth > 1) {
+ op = (op == EOpEqual) ? EOpVectorEqual : EOpVectorNotEqual;
+ node.setOp(op);
+ }
+
+ node.setType(TType(EbtBool, EvqTemporary, resultWidth));
+ } else {
+ // All the above comparisons result in a bool (but not the vector compares)
+ node.setType(TType(EbtBool));
+ }
+ break;
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ // logical ops operate only on Booleans or vectors of Booleans.
+ if (left->getBasicType() != EbtBool || left->isMatrix())
+ return false;
+
+ if (getSource() == EShSourceGlsl) {
+ // logical ops operate only on scalar Booleans and will promote to scalar Boolean.
+ if (left->isVector())
+ return false;
+ }
+
+ node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
+ break;
+
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+
+ case EOpMod:
+ case EOpModAssign:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ if (getSource() == EShSourceHlsl)
+ break;
+
+ // Check for integer-only operands.
+ if (!isTypeInt(left->getBasicType()) && !isTypeInt(right->getBasicType()))
+ return false;
+ if (left->isMatrix() || right->isMatrix())
+ return false;
+
+ break;
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpMul:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpDivAssign:
+ // check for non-Boolean operands
+ if (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)
+ return false;
+
+ default:
+ break;
+ }
+
+ // Compare left and right, and finish with the cases where the operand types must match
+ switch (op) {
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+
+ case EOpEqual:
+ case EOpNotEqual:
+ case EOpVectorEqual:
+ case EOpVectorNotEqual:
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ return left->getType() == right->getType();
+
+ case EOpMod:
+ case EOpModAssign:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ // Quick out in case the types do match
+ if (left->getType() == right->getType())
+ return true;
+
+ // Fall through
+
+ case EOpMul:
+ case EOpMulAssign:
+ // At least the basic type has to match
+ if (left->getBasicType() != right->getBasicType())
+ return false;
+
+ default:
+ break;
+ }
+
+ if (left->getType().isCoopMat() || right->getType().isCoopMat()) {
+ if (left->getType().isCoopMat() && right->getType().isCoopMat() &&
+ *left->getType().getTypeParameters() != *right->getType().getTypeParameters()) {
+ return false;
+ }
+ switch (op) {
+ case EOpMul:
+ case EOpMulAssign:
+ if (left->getType().isCoopMat() && right->getType().isCoopMat()) {
+ return false;
+ }
+ if (op == EOpMulAssign && right->getType().isCoopMat()) {
+ return false;
+ }
+ node.setOp(op == EOpMulAssign ? EOpMatrixTimesScalarAssign : EOpMatrixTimesScalar);
+ if (right->getType().isCoopMat()) {
+ node.setType(right->getType());
+ }
+ return true;
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpAssign:
+ // These require both to be cooperative matrices
+ if (!left->getType().isCoopMat() || !right->getType().isCoopMat()) {
+ return false;
+ }
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+
+ // Finish handling the case, for all ops, where both operands are scalars.
+ if (left->isScalar() && right->isScalar())
+ return true;
+
+ // Finish handling the case, for all ops, where there are two vectors of different sizes
+ if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1)
+ return false;
+
+ //
+ // We now have a mix of scalars, vectors, or matrices, for non-relational operations.
+ //
+
+ // Can these two operands be combined, what is the resulting type?
+ TBasicType basicType = left->getBasicType();
+ switch (op) {
+ case EOpMul:
+ if (!left->isMatrix() && right->isMatrix()) {
+ if (left->isVector()) {
+ if (left->getVectorSize() != right->getMatrixRows())
+ return false;
+ node.setOp(op = EOpVectorTimesMatrix);
+ node.setType(TType(basicType, EvqTemporary, right->getMatrixCols()));
+ } else {
+ node.setOp(op = EOpMatrixTimesScalar);
+ node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), right->getMatrixRows()));
+ }
+ } else if (left->isMatrix() && !right->isMatrix()) {
+ if (right->isVector()) {
+ if (left->getMatrixCols() != right->getVectorSize())
+ return false;
+ node.setOp(op = EOpMatrixTimesVector);
+ node.setType(TType(basicType, EvqTemporary, left->getMatrixRows()));
+ } else {
+ node.setOp(op = EOpMatrixTimesScalar);
+ }
+ } else if (left->isMatrix() && right->isMatrix()) {
+ if (left->getMatrixCols() != right->getMatrixRows())
+ return false;
+ node.setOp(op = EOpMatrixTimesMatrix);
+ node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), left->getMatrixRows()));
+ } else if (! left->isMatrix() && ! right->isMatrix()) {
+ if (left->isVector() && right->isVector()) {
+ ; // leave as component product
+ } else if (left->isVector() || right->isVector()) {
+ node.setOp(op = EOpVectorTimesScalar);
+ if (right->isVector())
+ node.setType(TType(basicType, EvqTemporary, right->getVectorSize()));
+ }
+ } else {
+ return false;
+ }
+ break;
+ case EOpMulAssign:
+ if (! left->isMatrix() && right->isMatrix()) {
+ if (left->isVector()) {
+ if (left->getVectorSize() != right->getMatrixRows() || left->getVectorSize() != right->getMatrixCols())
+ return false;
+ node.setOp(op = EOpVectorTimesMatrixAssign);
+ } else {
+ return false;
+ }
+ } else if (left->isMatrix() && !right->isMatrix()) {
+ if (right->isVector()) {
+ return false;
+ } else {
+ node.setOp(op = EOpMatrixTimesScalarAssign);
+ }
+ } else if (left->isMatrix() && right->isMatrix()) {
+ if (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixCols() != right->getMatrixRows())
+ return false;
+ node.setOp(op = EOpMatrixTimesMatrixAssign);
+ } else if (!left->isMatrix() && !right->isMatrix()) {
+ if (left->isVector() && right->isVector()) {
+ // leave as component product
+ } else if (left->isVector() || right->isVector()) {
+ if (! left->isVector())
+ return false;
+ node.setOp(op = EOpVectorTimesScalarAssign);
+ }
+ } else {
+ return false;
+ }
+ break;
+
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+ if (right->isVector() && (! left->isVector() || right->getVectorSize() != left->getVectorSize()))
+ return false;
+ break;
+
+ case EOpAssign:
+ if (left->getVectorSize() != right->getVectorSize() || left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows())
+ return false;
+ // fall through
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpMod:
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+
+ if ((left->isMatrix() && right->isVector()) ||
+ (left->isVector() && right->isMatrix()) ||
+ left->getBasicType() != right->getBasicType())
+ return false;
+ if (left->isMatrix() && right->isMatrix() && (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows()))
+ return false;
+ if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize())
+ return false;
+ if (right->isVector() || right->isMatrix()) {
+ node.getWritableType().shallowCopy(right->getType());
+ node.getWritableType().getQualifier().makeTemporary();
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ //
+ // One more check for assignment.
+ //
+ switch (op) {
+ // The resulting type has to match the left operand.
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpLeftShiftAssign:
+ case EOpRightShiftAssign:
+ if (node.getType() != left->getType())
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+//
+// See TIntermediate::promote
+//
+bool TIntermediate::promoteAggregate(TIntermAggregate& node)
+{
+ TOperator op = node.getOp();
+ TIntermSequence& args = node.getSequence();
+ const int numArgs = static_cast<int>(args.size());
+
+ // Presently, only hlsl does intrinsic promotions.
+ if (getSource() != EShSourceHlsl)
+ return true;
+
+ // set of opcodes that can be promoted in this manner.
+ switch (op) {
+ case EOpAtan:
+ case EOpClamp:
+ case EOpCross:
+ case EOpDistance:
+ case EOpDot:
+ case EOpDst:
+ case EOpFaceForward:
+ // case EOpFindMSB: TODO:
+ // case EOpFindLSB: TODO:
+ case EOpFma:
+ case EOpMod:
+ case EOpFrexp:
+ case EOpLdexp:
+ case EOpMix:
+ case EOpLit:
+ case EOpMax:
+ case EOpMin:
+ case EOpModf:
+ // case EOpGenMul: TODO:
+ case EOpPow:
+ case EOpReflect:
+ case EOpRefract:
+ // case EOpSinCos: TODO:
+ case EOpSmoothStep:
+ case EOpStep:
+ break;
+ default:
+ return true;
+ }
+
+ // TODO: array and struct behavior
+
+ // Try converting all nodes to the given node's type
+ TIntermSequence convertedArgs(numArgs, nullptr);
+
+ // Try to convert all types to the nonConvArg type.
+ for (int nonConvArg = 0; nonConvArg < numArgs; ++nonConvArg) {
+ // Try converting all args to this arg's type
+ for (int convArg = 0; convArg < numArgs; ++convArg) {
+ convertedArgs[convArg] = addConversion(op, args[nonConvArg]->getAsTyped()->getType(),
+ args[convArg]->getAsTyped());
+ }
+
+ // If we successfully converted all the args, use the result.
+ if (std::all_of(convertedArgs.begin(), convertedArgs.end(),
+ [](const TIntermNode* node) { return node != nullptr; })) {
+
+ std::swap(args, convertedArgs);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void TIntermBinary::updatePrecision()
+{
+ if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
+ getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision);
+ if (getQualifier().precision != EpqNone) {
+ left->propagatePrecision(getQualifier().precision);
+ right->propagatePrecision(getQualifier().precision);
+ }
+ }
+}
+
+void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision)
+{
+ if (getQualifier().precision != EpqNone || (getBasicType() != EbtInt && getBasicType() != EbtUint && getBasicType() != EbtFloat && getBasicType() != EbtFloat16))
+ return;
+
+ getQualifier().precision = newPrecision;
+
+ TIntermBinary* binaryNode = getAsBinaryNode();
+ if (binaryNode) {
+ binaryNode->getLeft()->propagatePrecision(newPrecision);
+ binaryNode->getRight()->propagatePrecision(newPrecision);
+
+ return;
+ }
+
+ TIntermUnary* unaryNode = getAsUnaryNode();
+ if (unaryNode) {
+ unaryNode->getOperand()->propagatePrecision(newPrecision);
+
+ return;
+ }
+
+ TIntermAggregate* aggregateNode = getAsAggregate();
+ if (aggregateNode) {
+ TIntermSequence operands = aggregateNode->getSequence();
+ for (unsigned int i = 0; i < operands.size(); ++i) {
+ TIntermTyped* typedNode = operands[i]->getAsTyped();
+ if (! typedNode)
+ break;
+ typedNode->propagatePrecision(newPrecision);
+ }
+
+ return;
+ }
+
+ TIntermSelection* selectionNode = getAsSelectionNode();
+ if (selectionNode) {
+ TIntermTyped* typedNode = selectionNode->getTrueBlock()->getAsTyped();
+ if (typedNode) {
+ typedNode->propagatePrecision(newPrecision);
+ typedNode = selectionNode->getFalseBlock()->getAsTyped();
+ if (typedNode)
+ typedNode->propagatePrecision(newPrecision);
+ }
+
+ return;
+ }
+}
+
+TIntermTyped* TIntermediate::promoteConstantUnion(TBasicType promoteTo, TIntermConstantUnion* node) const
+{
+ const TConstUnionArray& rightUnionArray = node->getConstArray();
+ int size = node->getType().computeNumComponents();
+
+ TConstUnionArray leftUnionArray(size);
+
+ for (int i=0; i < size; i++) {
+ switch (promoteTo) {
+ case EbtFloat:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtDouble:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtFloat16:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtInt:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtUint:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtUint:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtBool:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getIConst() != 0);
+ break;
+ case EbtUint:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getUConst() != 0);
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getI64Const() != 0);
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getU64Const() != 0);
+ break;
+ case EbtBool:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getDConst() != 0.0);
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtInt64:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtUint64:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtBool:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ default:
+ return node;
+ }
+ }
+
+ const TType& t = node->getType();
+
+ return addConstantUnion(leftUnionArray, TType(promoteTo, t.getQualifier().storage, t.getVectorSize(), t.getMatrixCols(), t.getMatrixRows()),
+ node->getLoc());
+}
+
+void TIntermAggregate::setPragmaTable(const TPragmaTable& pTable)
+{
+ assert(pragmaTable == nullptr);
+ pragmaTable = new TPragmaTable;
+ *pragmaTable = pTable;
+}
+
+// If either node is a specialization constant, while the other is
+// a constant (or specialization constant), the result is still
+// a specialization constant.
+bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TIntermTyped& node2)
+{
+ return (node1.getType().getQualifier().isSpecConstant() && node2.getType().getQualifier().isConstant()) ||
+ (node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant());
+}
+
+struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser {
+ void visitSymbol(TIntermSymbol* symbol) override {
+ if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) {
+ symbol->getWritableType().getSampler().combined = true;
+ }
+ }
+ bool visitAggregate(TVisit, TIntermAggregate* ag) override {
+ using namespace std;
+ TIntermSequence& seq = ag->getSequence();
+ TQualifierList& qual = ag->getQualifierList();
+
+ // qual and seq are indexed using the same indices, so we have to modify both in lock-step
+ assert(seq.size() == qual.size() || qual.empty());
+
+ size_t write = 0;
+ for (size_t i = 0; i < seq.size(); ++i) {
+ TIntermSymbol* symbol = seq[i]->getAsSymbolNode();
+ if (symbol && symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler()) {
+ // remove pure sampler variables
+ continue;
+ }
+
+ TIntermNode* result = seq[i];
+
+ // replace constructors with sampler/textures
+ TIntermAggregate *constructor = seq[i]->getAsAggregate();
+ if (constructor && constructor->getOp() == EOpConstructTextureSampler) {
+ if (!constructor->getSequence().empty())
+ result = constructor->getSequence()[0];
+ }
+
+ // write new node & qualifier
+ seq[write] = result;
+ if (!qual.empty())
+ qual[write] = qual[i];
+ write++;
+ }
+
+ seq.resize(write);
+ if (!qual.empty())
+ qual.resize(write);
+
+ return true;
+ }
+};
+
+void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root)
+{
+ TextureUpgradeAndSamplerRemovalTransform transform;
+ root->traverse(&transform);
+}
+
+const char* TIntermediate::getResourceName(TResourceType res)
+{
+ switch (res) {
+ case EResSampler: return "shift-sampler-binding";
+ case EResTexture: return "shift-texture-binding";
+ case EResImage: return "shift-image-binding";
+ case EResUbo: return "shift-UBO-binding";
+ case EResSsbo: return "shift-ssbo-binding";
+ case EResUav: return "shift-uav-binding";
+ default:
+ assert(0); // internal error: should only be called with valid resource types.
+ return nullptr;
+ }
+}
+
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/LiveTraverser.h b/thirdparty/glslang/glslang/MachineIndependent/LiveTraverser.h
new file mode 100644
index 0000000000..7333bc964e
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/LiveTraverser.h
@@ -0,0 +1,138 @@
+//
+// Copyright (C) 2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#pragma once
+
+#include "../Include/Common.h"
+#include "reflection.h"
+#include "localintermediate.h"
+
+#include "gl_types.h"
+
+#include <list>
+#include <unordered_set>
+
+namespace glslang {
+
+//
+// The traverser: mostly pass through, except
+// - processing function-call nodes to push live functions onto the stack of functions to process
+// - processing selection nodes to trim semantically dead code
+//
+// This is in the glslang namespace directly so it can be a friend of TReflection.
+// This can be derived from to implement reflection database traversers or
+// binding mappers: anything that wants to traverse the live subset of the tree.
+//
+
+class TLiveTraverser : public TIntermTraverser {
+public:
+ TLiveTraverser(const TIntermediate& i, bool traverseAll = false,
+ bool preVisit = true, bool inVisit = false, bool postVisit = false) :
+ TIntermTraverser(preVisit, inVisit, postVisit),
+ intermediate(i), traverseAll(traverseAll)
+ { }
+
+ //
+ // Given a function name, find its subroot in the tree, and push it onto the stack of
+ // functions left to process.
+ //
+ void pushFunction(const TString& name)
+ {
+ TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence();
+ for (unsigned int f = 0; f < globals.size(); ++f) {
+ TIntermAggregate* candidate = globals[f]->getAsAggregate();
+ if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) {
+ functions.push_back(candidate);
+ break;
+ }
+ }
+ }
+
+ typedef std::list<TIntermAggregate*> TFunctionStack;
+ TFunctionStack functions;
+
+protected:
+ // To catch which function calls are not dead, and hence which functions must be visited.
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node)
+ {
+ if (!traverseAll)
+ if (node->getOp() == EOpFunctionCall)
+ addFunctionCall(node);
+
+ return true; // traverse this subtree
+ }
+
+ // To prune semantically dead paths.
+ virtual bool visitSelection(TVisit /* visit */, TIntermSelection* node)
+ {
+ if (traverseAll)
+ return true; // traverse all code
+
+ TIntermConstantUnion* constant = node->getCondition()->getAsConstantUnion();
+ if (constant) {
+ // cull the path that is dead
+ if (constant->getConstArray()[0].getBConst() == true && node->getTrueBlock())
+ node->getTrueBlock()->traverse(this);
+ if (constant->getConstArray()[0].getBConst() == false && node->getFalseBlock())
+ node->getFalseBlock()->traverse(this);
+
+ return false; // don't traverse any more, we did it all above
+ } else
+ return true; // traverse the whole subtree
+ }
+
+ // Track live functions as well as uniforms, so that we don't visit dead functions
+ // and only visit each function once.
+ void addFunctionCall(TIntermAggregate* call)
+ {
+ // // just use the map to ensure we process each function at most once
+ if (liveFunctions.find(call->getName()) == liveFunctions.end()) {
+ liveFunctions.insert(call->getName());
+ pushFunction(call->getName());
+ }
+ }
+
+ const TIntermediate& intermediate;
+ typedef std::unordered_set<TString> TLiveFunctions;
+ TLiveFunctions liveFunctions;
+ bool traverseAll;
+
+private:
+ // prevent copy & copy construct
+ TLiveTraverser(TLiveTraverser&);
+ TLiveTraverser& operator=(TLiveTraverser&);
+};
+
+} // namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp b/thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp
new file mode 100644
index 0000000000..c9ddaeadb0
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp
@@ -0,0 +1,628 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+// Implement the TParseContextBase class.
+
+#include <cstdarg>
+
+#include "ParseHelper.h"
+
+extern int yyparse(glslang::TParseContext*);
+
+namespace glslang {
+
+//
+// Used to output syntax, parsing, and semantic errors.
+//
+
+void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReason,
+ const char* szToken,
+ const char* szExtraInfoFormat,
+ TPrefixType prefix, va_list args)
+{
+ const int maxSize = MaxTokenLength + 200;
+ char szExtraInfo[maxSize];
+
+ safe_vsprintf(szExtraInfo, maxSize, szExtraInfoFormat, args);
+
+ infoSink.info.prefix(prefix);
+ infoSink.info.location(loc);
+ infoSink.info << "'" << szToken << "' : " << szReason << " " << szExtraInfo << "\n";
+
+ if (prefix == EPrefixError) {
+ ++numErrors;
+ }
+}
+
+void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ if (messages & EShMsgOnlyPreprocessor)
+ return;
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args);
+ va_end(args);
+
+ if ((messages & EShMsgCascadingErrors) == 0)
+ currentScanner->setEndOfInput();
+}
+
+void C_DECL TParseContextBase::warn(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ if (suppressWarnings())
+ return;
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args);
+ va_end(args);
+}
+
+void C_DECL TParseContextBase::ppError(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args);
+ va_end(args);
+
+ if ((messages & EShMsgCascadingErrors) == 0)
+ currentScanner->setEndOfInput();
+}
+
+void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args);
+ va_end(args);
+}
+
+//
+// Both test and if necessary, spit out an error, to see if the node is really
+// an l-value that can be operated on this way.
+//
+// Returns true if there was an error.
+//
+bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ TIntermBinary* binaryNode = node->getAsBinaryNode();
+
+ if (binaryNode) {
+ switch(binaryNode->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect: // fall through
+ case EOpIndexDirectStruct: // fall through
+ case EOpVectorSwizzle:
+ case EOpMatrixSwizzle:
+ return lValueErrorCheck(loc, op, binaryNode->getLeft());
+ default:
+ break;
+ }
+ error(loc, " l-value required", op, "", "");
+
+ return true;
+ }
+
+ const char* symbol = nullptr;
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (symNode != nullptr)
+ symbol = symNode->getName().c_str();
+
+ const char* message = nullptr;
+ switch (node->getQualifier().storage) {
+ case EvqConst: message = "can't modify a const"; break;
+ case EvqConstReadOnly: message = "can't modify a const"; break;
+ case EvqUniform: message = "can't modify a uniform"; break;
+ case EvqBuffer:
+ if (node->getQualifier().readonly)
+ message = "can't modify a readonly buffer";
+#ifdef NV_EXTENSIONS
+ if (node->getQualifier().layoutShaderRecordNV)
+ message = "can't modify a shaderrecordnv qualified buffer";
+#endif
+ break;
+#ifdef NV_EXTENSIONS
+ case EvqHitAttrNV:
+ if (language != EShLangIntersectNV)
+ message = "cannot modify hitAttributeNV in this stage";
+ break;
+#endif
+
+ default:
+ //
+ // Type that can't be written to?
+ //
+ switch (node->getBasicType()) {
+ case EbtSampler:
+ message = "can't modify a sampler";
+ break;
+ case EbtAtomicUint:
+ message = "can't modify an atomic_uint";
+ break;
+ case EbtVoid:
+ message = "can't modify void";
+ break;
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV:
+ message = "can't modify accelerationStructureNV";
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+
+ if (message == nullptr && binaryNode == nullptr && symNode == nullptr) {
+ error(loc, " l-value required", op, "", "");
+
+ return true;
+ }
+
+ //
+ // Everything else is okay, no error.
+ //
+ if (message == nullptr)
+ return false;
+
+ //
+ // If we get here, we have an error and a message.
+ //
+ if (symNode)
+ error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
+ else
+ error(loc, " l-value required", op, "(%s)", message);
+
+ return true;
+}
+
+// Test for and give an error if the node can't be read from.
+void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ if (! node)
+ return;
+
+ TIntermBinary* binaryNode = node->getAsBinaryNode();
+ if (binaryNode) {
+ switch(binaryNode->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+ case EOpMatrixSwizzle:
+ rValueErrorCheck(loc, op, binaryNode->getLeft());
+ default:
+ break;
+ }
+
+ return;
+ }
+
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (symNode && symNode->getQualifier().writeonly)
+ error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
+}
+
+// Add 'symbol' to the list of deferred linkage symbols, which
+// are later processed in finish(), at which point the symbol
+// must still be valid.
+// It is okay if the symbol's type will be subsequently edited;
+// the modifications will be tracked.
+// Order is preserved, to avoid creating novel forward references.
+void TParseContextBase::trackLinkage(TSymbol& symbol)
+{
+ if (!parsingBuiltins)
+ linkageSymbols.push_back(&symbol);
+}
+
+// Ensure index is in bounds, correct if necessary.
+// Give an error if not.
+void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index)
+{
+ if (index < 0) {
+ error(loc, "", "[", "index out of range '%d'", index);
+ index = 0;
+ } else if (type.isArray()) {
+ if (type.isSizedArray() && index >= type.getOuterArraySize()) {
+ error(loc, "", "[", "array index out of range '%d'", index);
+ index = type.getOuterArraySize() - 1;
+ }
+ } else if (type.isVector()) {
+ if (index >= type.getVectorSize()) {
+ error(loc, "", "[", "vector index out of range '%d'", index);
+ index = type.getVectorSize() - 1;
+ }
+ } else if (type.isMatrix()) {
+ if (index >= type.getMatrixCols()) {
+ error(loc, "", "[", "matrix index out of range '%d'", index);
+ index = type.getMatrixCols() - 1;
+ }
+ }
+}
+
+// Make a shared symbol have a non-shared version that can be edited by the current
+// compile, such that editing its type will not change the shared version and will
+// effect all nodes already sharing it (non-shallow type),
+// or adopting its full type after being edited (shallow type).
+void TParseContextBase::makeEditable(TSymbol*& symbol)
+{
+ // copyUp() does a deep copy of the type.
+ symbol = symbolTable.copyUp(symbol);
+
+ // Save it (deferred, so it can be edited first) in the AST for linker use.
+ if (symbol)
+ trackLinkage(*symbol);
+}
+
+// Return a writable version of the variable 'name'.
+//
+// Return nullptr if 'name' is not found. This should mean
+// something is seriously wrong (e.g., compiler asking self for
+// built-in that doesn't exist).
+TVariable* TParseContextBase::getEditableVariable(const char* name)
+{
+ bool builtIn;
+ TSymbol* symbol = symbolTable.find(name, &builtIn);
+
+ assert(symbol != nullptr);
+ if (symbol == nullptr)
+ return nullptr;
+
+ if (builtIn)
+ makeEditable(symbol);
+
+ return symbol->getAsVariable();
+}
+
+// Select the best matching function for 'call' from 'candidateList'.
+//
+// Assumptions
+//
+// There is no exact match, so a selection algorithm needs to run. That is, the
+// language-specific handler should check for exact match first, to
+// decide what to do, before calling this selector.
+//
+// Input
+//
+// * list of candidate signatures to select from
+// * the call
+// * a predicate function convertible(from, to) that says whether or not type
+// 'from' can implicitly convert to type 'to' (it includes the case of what
+// the calling language would consider a matching type with no conversion
+// needed)
+// * a predicate function better(from1, from2, to1, to2) that says whether or
+// not a conversion from <-> to2 is considered better than a conversion
+// from <-> to1 (both in and out directions need testing, as declared by the
+// formal parameter)
+//
+// Output
+//
+// * best matching candidate (or none, if no viable candidates found)
+// * whether there was a tie for the best match (ambiguous overload selection,
+// caller's choice for how to report)
+//
+const TFunction* TParseContextBase::selectFunction(
+ const TVector<const TFunction*> candidateList,
+ const TFunction& call,
+ std::function<bool(const TType& from, const TType& to, TOperator op, int arg)> convertible,
+ std::function<bool(const TType& from, const TType& to1, const TType& to2)> better,
+ /* output */ bool& tie)
+{
+//
+// Operation
+//
+// 1. Prune the input list of candidates down to a list of viable candidates,
+// where each viable candidate has
+//
+// * at least as many parameters as there are calling arguments, with any
+// remaining parameters being optional or having default values
+// * each parameter is true under convertible(A, B), where A is the calling
+// type for in and B is the formal type, and in addition, for out B is the
+// calling type and A is the formal type
+//
+// 2. If there are no viable candidates, return with no match.
+//
+// 3. If there is only one viable candidate, it is the best match.
+//
+// 4. If there are multiple viable candidates, select the first viable candidate
+// as the incumbent. Compare the incumbent to the next viable candidate, and if
+// that candidate is better (bullets below), make it the incumbent. Repeat, with
+// a linear walk through the viable candidate list. The final incumbent will be
+// returned as the best match. A viable candidate is better than the incumbent if
+//
+// * it has a function argument with a better(...) conversion than the incumbent,
+// for all directions needed by in and out
+// * the incumbent has no argument with a better(...) conversion then the
+// candidate, for either in or out (as needed)
+//
+// 5. Check for ambiguity by comparing the best match against all other viable
+// candidates. If any other viable candidate has a function argument with a
+// better(...) conversion than the best candidate (for either in or out
+// directions), return that there was a tie for best.
+//
+
+ tie = false;
+
+ // 1. prune to viable...
+ TVector<const TFunction*> viableCandidates;
+ for (auto it = candidateList.begin(); it != candidateList.end(); ++it) {
+ const TFunction& candidate = *(*it);
+
+ // to even be a potential match, number of arguments must be >= the number of
+ // fixed (non-default) parameters, and <= the total (including parameter with defaults).
+ if (call.getParamCount() < candidate.getFixedParamCount() ||
+ call.getParamCount() > candidate.getParamCount())
+ continue;
+
+ // see if arguments are convertible
+ bool viable = true;
+
+ // The call can have fewer parameters than the candidate, if some have defaults.
+ const int paramCount = std::min(call.getParamCount(), candidate.getParamCount());
+ for (int param = 0; param < paramCount; ++param) {
+ if (candidate[param].type->getQualifier().isParamInput()) {
+ if (! convertible(*call[param].type, *candidate[param].type, candidate.getBuiltInOp(), param)) {
+ viable = false;
+ break;
+ }
+ }
+ if (candidate[param].type->getQualifier().isParamOutput()) {
+ if (! convertible(*candidate[param].type, *call[param].type, candidate.getBuiltInOp(), param)) {
+ viable = false;
+ break;
+ }
+ }
+ }
+
+ if (viable)
+ viableCandidates.push_back(&candidate);
+ }
+
+ // 2. none viable...
+ if (viableCandidates.size() == 0)
+ return nullptr;
+
+ // 3. only one viable...
+ if (viableCandidates.size() == 1)
+ return viableCandidates.front();
+
+ // 4. find best...
+ const auto betterParam = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool {
+ // is call -> can2 better than call -> can1 for any parameter
+ bool hasBetterParam = false;
+ for (int param = 0; param < call.getParamCount(); ++param) {
+ if (better(*call[param].type, *can1[param].type, *can2[param].type)) {
+ hasBetterParam = true;
+ break;
+ }
+ }
+ return hasBetterParam;
+ };
+
+ const auto equivalentParams = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool {
+ // is call -> can2 equivalent to call -> can1 for all the call parameters?
+ for (int param = 0; param < call.getParamCount(); ++param) {
+ if (better(*call[param].type, *can1[param].type, *can2[param].type) ||
+ better(*call[param].type, *can2[param].type, *can1[param].type))
+ return false;
+ }
+ return true;
+ };
+
+ const TFunction* incumbent = viableCandidates.front();
+ for (auto it = viableCandidates.begin() + 1; it != viableCandidates.end(); ++it) {
+ const TFunction& candidate = *(*it);
+ if (betterParam(*incumbent, candidate) && ! betterParam(candidate, *incumbent))
+ incumbent = &candidate;
+ }
+
+ // 5. ambiguity...
+ for (auto it = viableCandidates.begin(); it != viableCandidates.end(); ++it) {
+ if (incumbent == *it)
+ continue;
+ const TFunction& candidate = *(*it);
+
+ // In the case of default parameters, it may have an identical initial set, which is
+ // also ambiguous
+ if (betterParam(*incumbent, candidate) || equivalentParams(*incumbent, candidate))
+ tie = true;
+ }
+
+ return incumbent;
+}
+
+//
+// Look at a '.' field selector string and change it into numerical selectors
+// for a vector or scalar.
+//
+// Always return some form of swizzle, so the result is always usable.
+//
+void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TString& compString, int vecSize,
+ TSwizzleSelectors<TVectorSelector>& selector)
+{
+ // Too long?
+ if (compString.size() > MaxSwizzleSelectors)
+ error(loc, "vector swizzle too long", compString.c_str(), "");
+
+ // Use this to test that all swizzle characters are from the same swizzle-namespace-set
+ enum {
+ exyzw,
+ ergba,
+ estpq,
+ } fieldSet[MaxSwizzleSelectors];
+
+ // Decode the swizzle string.
+ int size = std::min(MaxSwizzleSelectors, (int)compString.size());
+ for (int i = 0; i < size; ++i) {
+ switch (compString[i]) {
+ case 'x':
+ selector.push_back(0);
+ fieldSet[i] = exyzw;
+ break;
+ case 'r':
+ selector.push_back(0);
+ fieldSet[i] = ergba;
+ break;
+ case 's':
+ selector.push_back(0);
+ fieldSet[i] = estpq;
+ break;
+
+ case 'y':
+ selector.push_back(1);
+ fieldSet[i] = exyzw;
+ break;
+ case 'g':
+ selector.push_back(1);
+ fieldSet[i] = ergba;
+ break;
+ case 't':
+ selector.push_back(1);
+ fieldSet[i] = estpq;
+ break;
+
+ case 'z':
+ selector.push_back(2);
+ fieldSet[i] = exyzw;
+ break;
+ case 'b':
+ selector.push_back(2);
+ fieldSet[i] = ergba;
+ break;
+ case 'p':
+ selector.push_back(2);
+ fieldSet[i] = estpq;
+ break;
+
+ case 'w':
+ selector.push_back(3);
+ fieldSet[i] = exyzw;
+ break;
+ case 'a':
+ selector.push_back(3);
+ fieldSet[i] = ergba;
+ break;
+ case 'q':
+ selector.push_back(3);
+ fieldSet[i] = estpq;
+ break;
+
+ default:
+ error(loc, "unknown swizzle selection", compString.c_str(), "");
+ break;
+ }
+ }
+
+ // Additional error checking.
+ for (int i = 0; i < selector.size(); ++i) {
+ if (selector[i] >= vecSize) {
+ error(loc, "vector swizzle selection out of range", compString.c_str(), "");
+ selector.resize(i);
+ break;
+ }
+
+ if (i > 0 && fieldSet[i] != fieldSet[i-1]) {
+ error(loc, "vector swizzle selectors not from the same set", compString.c_str(), "");
+ selector.resize(i);
+ break;
+ }
+ }
+
+ // Ensure it is valid.
+ if (selector.size() == 0)
+ selector.push_back(0);
+}
+
+//
+// Make the passed-in variable information become a member of the
+// global uniform block. If this doesn't exist yet, make it.
+//
+void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList)
+{
+ // Make the global block, if not yet made.
+ if (globalUniformBlock == nullptr) {
+ TQualifier blockQualifier;
+ blockQualifier.clear();
+ blockQualifier.storage = EvqUniform;
+ TType blockType(new TTypeList, *NewPoolTString(getGlobalUniformBlockName()), blockQualifier);
+ setUniformBlockDefaults(blockType);
+ globalUniformBlock = new TVariable(NewPoolTString(""), blockType, true);
+ firstNewMember = 0;
+ }
+
+ // Update with binding and set
+ globalUniformBlock->getWritableType().getQualifier().layoutBinding = globalUniformBinding;
+ globalUniformBlock->getWritableType().getQualifier().layoutSet = globalUniformSet;
+
+ // Add the requested member as a member to the global block.
+ TType* type = new TType;
+ type->shallowCopy(memberType);
+ type->setFieldName(memberName);
+ if (typeList)
+ type->setStruct(typeList);
+ TTypeLoc typeLoc = {type, loc};
+ globalUniformBlock->getType().getWritableStruct()->push_back(typeLoc);
+
+ // Insert into the symbol table.
+ if (firstNewMember == 0) {
+ // This is the first request; we need a normal symbol table insert
+ if (symbolTable.insert(*globalUniformBlock))
+ trackLinkage(*globalUniformBlock);
+ else
+ error(loc, "failed to insert the global constant buffer", "uniform", "");
+ } else {
+ // This is a follow-on request; we need to amend the first insert
+ symbolTable.amend(*globalUniformBlock, firstNewMember);
+ }
+
+ ++firstNewMember;
+}
+
+void TParseContextBase::finish()
+{
+ if (parsingBuiltins)
+ return;
+
+ // Transfer the linkage symbols to AST nodes, preserving order.
+ TIntermAggregate* linkage = new TIntermAggregate;
+ for (auto i = linkageSymbols.begin(); i != linkageSymbols.end(); ++i)
+ intermediate.addSymbolLinkageNode(linkage, **i);
+ intermediate.addSymbolLinkageNodes(linkage, getLanguage(), symbolTable);
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp
new file mode 100644
index 0000000000..6a8d379b09
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp
@@ -0,0 +1,8062 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "ParseHelper.h"
+#include "Scan.h"
+
+#include "../OSDependent/osinclude.h"
+#include <algorithm>
+
+#include "preprocessor/PpContext.h"
+
+extern int yyparse(glslang::TParseContext*);
+
+namespace glslang {
+
+TParseContext::TParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins,
+ int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
+ TInfoSink& infoSink, bool forwardCompatible, EShMessages messages,
+ const TString* entryPoint) :
+ TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language,
+ infoSink, forwardCompatible, messages, entryPoint),
+ inMain(false),
+ blockName(nullptr),
+ limits(resources.limits),
+ atomicUintOffsets(nullptr), anyIndexLimits(false)
+{
+ // decide whether precision qualifiers should be ignored or respected
+ if (profile == EEsProfile || spvVersion.vulkan > 0) {
+ precisionManager.respectPrecisionQualifiers();
+ if (! parsingBuiltins && language == EShLangFragment && profile != EEsProfile && spvVersion.vulkan > 0)
+ precisionManager.warnAboutDefaults();
+ }
+
+ setPrecisionDefaults();
+
+ globalUniformDefaults.clear();
+ globalUniformDefaults.layoutMatrix = ElmColumnMajor;
+ globalUniformDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd140 : ElpShared;
+
+ globalBufferDefaults.clear();
+ globalBufferDefaults.layoutMatrix = ElmColumnMajor;
+ globalBufferDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd430 : ElpShared;
+
+ // use storage buffer on SPIR-V 1.3 and up
+ if (spvVersion.spv >= EShTargetSpv_1_3)
+ intermediate.setUseStorageBuffer();
+
+ globalInputDefaults.clear();
+ globalOutputDefaults.clear();
+
+ // "Shaders in the transform
+ // feedback capturing mode have an initial global default of
+ // layout(xfb_buffer = 0) out;"
+ if (language == EShLangVertex ||
+ language == EShLangTessControl ||
+ language == EShLangTessEvaluation ||
+ language == EShLangGeometry)
+ globalOutputDefaults.layoutXfbBuffer = 0;
+
+ if (language == EShLangGeometry)
+ globalOutputDefaults.layoutStream = 0;
+
+ if (entryPoint != nullptr && entryPoint->size() > 0 && *entryPoint != "main")
+ infoSink.info.message(EPrefixError, "Source entry point must be \"main\"");
+}
+
+TParseContext::~TParseContext()
+{
+ delete [] atomicUintOffsets;
+}
+
+// Set up all default precisions as needed by the current environment.
+// Intended just as a TParseContext constructor helper.
+void TParseContext::setPrecisionDefaults()
+{
+ // Set all precision defaults to EpqNone, which is correct for all types
+ // when not obeying precision qualifiers, and correct for types that don't
+ // have defaults (thus getting an error on use) when obeying precision
+ // qualifiers.
+
+ for (int type = 0; type < EbtNumTypes; ++type)
+ defaultPrecision[type] = EpqNone;
+
+ for (int type = 0; type < maxSamplerIndex; ++type)
+ defaultSamplerPrecision[type] = EpqNone;
+
+ // replace with real precision defaults for those that have them
+ if (obeyPrecisionQualifiers()) {
+ if (profile == EEsProfile) {
+ // Most don't have defaults, a few default to lowp.
+ TSampler sampler;
+ sampler.set(EbtFloat, Esd2D);
+ defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
+ sampler.set(EbtFloat, EsdCube);
+ defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
+ sampler.set(EbtFloat, Esd2D);
+ sampler.external = true;
+ defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
+ }
+
+ // If we are parsing built-in computational variables/functions, it is meaningful to record
+ // whether the built-in has no precision qualifier, as that ambiguity
+ // is used to resolve the precision from the supplied arguments/operands instead.
+ // So, we don't actually want to replace EpqNone with a default precision for built-ins.
+ if (! parsingBuiltins) {
+ if (profile == EEsProfile && language == EShLangFragment) {
+ defaultPrecision[EbtInt] = EpqMedium;
+ defaultPrecision[EbtUint] = EpqMedium;
+ } else {
+ defaultPrecision[EbtInt] = EpqHigh;
+ defaultPrecision[EbtUint] = EpqHigh;
+ defaultPrecision[EbtFloat] = EpqHigh;
+ }
+
+ if (profile != EEsProfile) {
+ // Non-ES profile
+ // All sampler precisions default to highp.
+ for (int type = 0; type < maxSamplerIndex; ++type)
+ defaultSamplerPrecision[type] = EpqHigh;
+ }
+ }
+
+ defaultPrecision[EbtSampler] = EpqLow;
+ defaultPrecision[EbtAtomicUint] = EpqHigh;
+ }
+}
+
+void TParseContext::setLimits(const TBuiltInResource& r)
+{
+ resources = r;
+
+ anyIndexLimits = ! limits.generalAttributeMatrixVectorIndexing ||
+ ! limits.generalConstantMatrixVectorIndexing ||
+ ! limits.generalSamplerIndexing ||
+ ! limits.generalUniformIndexing ||
+ ! limits.generalVariableIndexing ||
+ ! limits.generalVaryingIndexing;
+
+ intermediate.setLimits(resources);
+
+ // "Each binding point tracks its own current default offset for
+ // inheritance of subsequent variables using the same binding. The initial state of compilation is that all
+ // binding points have an offset of 0."
+ atomicUintOffsets = new int[resources.maxAtomicCounterBindings];
+ for (int b = 0; b < resources.maxAtomicCounterBindings; ++b)
+ atomicUintOffsets[b] = 0;
+}
+
+//
+// Parse an array of strings using yyparse, going through the
+// preprocessor to tokenize the shader strings, then through
+// the GLSL scanner.
+//
+// Returns true for successful acceptance of the shader, false if any errors.
+//
+bool TParseContext::parseShaderStrings(TPpContext& ppContext, TInputScanner& input, bool versionWillBeError)
+{
+ currentScanner = &input;
+ ppContext.setInput(input, versionWillBeError);
+ yyparse(this);
+
+ finish();
+
+ return numErrors == 0;
+}
+
+// This is called from bison when it has a parse (syntax) error
+// Note though that to stop cascading errors, we set EOF, which
+// will usually cause a syntax error, so be more accurate that
+// compilation is terminating.
+void TParseContext::parserError(const char* s)
+{
+ if (! getScanner()->atEndOfInput() || numErrors == 0)
+ error(getCurrentLoc(), "", "", s, "");
+ else
+ error(getCurrentLoc(), "compilation terminated", "", "");
+}
+
+void TParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>& tokens)
+{
+ if (pragmaCallback)
+ pragmaCallback(loc.line, tokens);
+
+ if (tokens.size() == 0)
+ return;
+
+ if (tokens[0].compare("optimize") == 0) {
+ if (tokens.size() != 4) {
+ error(loc, "optimize pragma syntax is incorrect", "#pragma", "");
+ return;
+ }
+
+ if (tokens[1].compare("(") != 0) {
+ error(loc, "\"(\" expected after 'optimize' keyword", "#pragma", "");
+ return;
+ }
+
+ if (tokens[2].compare("on") == 0)
+ contextPragma.optimize = true;
+ else if (tokens[2].compare("off") == 0)
+ contextPragma.optimize = false;
+ else {
+ error(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", "");
+ return;
+ }
+
+ if (tokens[3].compare(")") != 0) {
+ error(loc, "\")\" expected to end 'optimize' pragma", "#pragma", "");
+ return;
+ }
+ } else if (tokens[0].compare("debug") == 0) {
+ if (tokens.size() != 4) {
+ error(loc, "debug pragma syntax is incorrect", "#pragma", "");
+ return;
+ }
+
+ if (tokens[1].compare("(") != 0) {
+ error(loc, "\"(\" expected after 'debug' keyword", "#pragma", "");
+ return;
+ }
+
+ if (tokens[2].compare("on") == 0)
+ contextPragma.debug = true;
+ else if (tokens[2].compare("off") == 0)
+ contextPragma.debug = false;
+ else {
+ error(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", "");
+ return;
+ }
+
+ if (tokens[3].compare(")") != 0) {
+ error(loc, "\")\" expected to end 'debug' pragma", "#pragma", "");
+ return;
+ }
+ } else if (spvVersion.spv > 0 && tokens[0].compare("use_storage_buffer") == 0) {
+ if (tokens.size() != 1)
+ error(loc, "extra tokens", "#pragma", "");
+ intermediate.setUseStorageBuffer();
+ } else if (spvVersion.spv > 0 && tokens[0].compare("use_vulkan_memory_model") == 0) {
+ if (tokens.size() != 1)
+ error(loc, "extra tokens", "#pragma", "");
+ intermediate.setUseVulkanMemoryModel();
+ } else if (spvVersion.spv > 0 && tokens[0].compare("use_variable_pointers") == 0) {
+ if (tokens.size() != 1)
+ error(loc, "extra tokens", "#pragma", "");
+ if (spvVersion.spv < glslang::EShTargetSpv_1_3)
+ error(loc, "requires SPIR-V 1.3", "#pragma use_variable_pointers", "");
+ intermediate.setUseVariablePointers();
+ } else if (tokens[0].compare("once") == 0) {
+ warn(loc, "not implemented", "#pragma once", "");
+ } else if (tokens[0].compare("glslang_binary_double_output") == 0)
+ intermediate.setBinaryDoubleOutput();
+}
+
+//
+// Handle seeing a variable identifier in the grammar.
+//
+TIntermTyped* TParseContext::handleVariable(const TSourceLoc& loc, TSymbol* symbol, const TString* string)
+{
+ TIntermTyped* node = nullptr;
+
+ // Error check for requiring specific extensions present.
+ if (symbol && symbol->getNumExtensions())
+ requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str());
+
+ if (symbol && symbol->isReadOnly()) {
+ // All shared things containing an unsized array must be copied up
+ // on first use, so that all future references will share its array structure,
+ // so that editing the implicit size will effect all nodes consuming it,
+ // and so that editing the implicit size won't change the shared one.
+ //
+ // If this is a variable or a block, check it and all it contains, but if this
+ // is a member of an anonymous block, check the whole block, as the whole block
+ // will need to be copied up if it contains an unsized array.
+ if (symbol->getType().containsUnsizedArray() ||
+ (symbol->getAsAnonMember() &&
+ symbol->getAsAnonMember()->getAnonContainer().getType().containsUnsizedArray()))
+ makeEditable(symbol);
+ }
+
+ const TVariable* variable;
+ const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr;
+ if (anon) {
+ // It was a member of an anonymous container.
+
+ // Create a subtree for its dereference.
+ variable = anon->getAnonContainer().getAsVariable();
+ TIntermTyped* container = intermediate.addSymbol(*variable, loc);
+ TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc);
+ node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc);
+
+ node->setType(*(*variable->getType().getStruct())[anon->getMemberNumber()].type);
+ if (node->getType().hiddenMember())
+ error(loc, "member of nameless block was not redeclared", string->c_str(), "");
+ } else {
+ // Not a member of an anonymous container.
+
+ // The symbol table search was done in the lexical phase.
+ // See if it was a variable.
+ variable = symbol ? symbol->getAsVariable() : nullptr;
+ if (variable) {
+ if ((variable->getType().getBasicType() == EbtBlock ||
+ variable->getType().getBasicType() == EbtStruct) && variable->getType().getStruct() == nullptr) {
+ error(loc, "cannot be used (maybe an instance name is needed)", string->c_str(), "");
+ variable = nullptr;
+ }
+ } else {
+ if (symbol)
+ error(loc, "variable name expected", string->c_str(), "");
+ }
+
+ // Recovery, if it wasn't found or was not a variable.
+ if (! variable)
+ variable = new TVariable(string, TType(EbtVoid));
+
+ if (variable->getType().getQualifier().isFrontEndConstant())
+ node = intermediate.addConstantUnion(variable->getConstArray(), variable->getType(), loc);
+ else
+ node = intermediate.addSymbol(*variable, loc);
+ }
+
+ if (variable->getType().getQualifier().isIo())
+ intermediate.addIoAccessed(*string);
+
+ if (variable->getType().getBasicType() == EbtReference &&
+ variable->getType().getQualifier().bufferReferenceNeedsVulkanMemoryModel()) {
+ intermediate.setUseVulkanMemoryModel();
+ }
+
+ return node;
+}
+
+//
+// Handle seeing a base[index] dereference in the grammar.
+//
+TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index)
+{
+ int indexValue = 0;
+ if (index->getQualifier().isFrontEndConstant())
+ indexValue = index->getAsConstantUnion()->getConstArray()[0].getIConst();
+
+ // basic type checks...
+ variableCheck(base);
+
+ if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat() &&
+ base->getBasicType() != EbtReference) {
+ if (base->getAsSymbolNode())
+ error(loc, " left of '[' is not of type array, matrix, or vector ", base->getAsSymbolNode()->getName().c_str(), "");
+ else
+ error(loc, " left of '[' is not of type array, matrix, or vector ", "expression", "");
+
+ // Insert dummy error-recovery result
+ return intermediate.addConstantUnion(0.0, EbtFloat, loc);
+ }
+
+ if (!base->isArray() && base->isVector()) {
+ if (base->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, "[", "does not operate on types containing float16");
+ if (base->getType().contains16BitInt())
+ requireInt16Arithmetic(loc, "[", "does not operate on types containing (u)int16");
+ if (base->getType().contains8BitInt())
+ requireInt8Arithmetic(loc, "[", "does not operate on types containing (u)int8");
+ }
+
+ // check for constant folding
+ if (base->getType().getQualifier().isFrontEndConstant() && index->getQualifier().isFrontEndConstant()) {
+ // both base and index are front-end constants
+ checkIndex(loc, base->getType(), indexValue);
+ return intermediate.foldDereference(base, indexValue, loc);
+ }
+
+ // at least one of base and index is not a front-end constant variable...
+ TIntermTyped* result = nullptr;
+
+ if (base->getBasicType() == EbtReference && ! base->isArray()) {
+ requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference indexing");
+ result = intermediate.addBinaryMath(EOpAdd, base, index, loc);
+ result->setType(base->getType());
+ return result;
+ }
+
+ if (index->getQualifier().isFrontEndConstant())
+ checkIndex(loc, base->getType(), indexValue);
+
+ if (base->getAsSymbolNode() && isIoResizeArray(base->getType()))
+ handleIoResizeArrayAccess(loc, base);
+
+ if (index->getQualifier().isFrontEndConstant()) {
+ if (base->getType().isUnsizedArray()) {
+ base->getWritableType().updateImplicitArraySize(indexValue + 1);
+#ifdef NV_EXTENSIONS
+ // For 2D per-view builtin arrays, update the inner dimension size in parent type
+ if (base->getQualifier().isPerView() && base->getQualifier().builtIn != EbvNone) {
+ TIntermBinary* binaryNode = base->getAsBinaryNode();
+ if (binaryNode) {
+ TType& leftType = binaryNode->getLeft()->getWritableType();
+ TArraySizes& arraySizes = *leftType.getArraySizes();
+ assert(arraySizes.getNumDims() == 2);
+ arraySizes.setDimSize(1, std::max(arraySizes.getDimSize(1), indexValue + 1));
+ }
+ }
+#endif
+ } else
+ checkIndex(loc, base->getType(), indexValue);
+ result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
+ } else {
+ if (base->getType().isUnsizedArray()) {
+ // we have a variable index into an unsized array, which is okay,
+ // depending on the situation
+ if (base->getAsSymbolNode() && isIoResizeArray(base->getType()))
+ error(loc, "", "[", "array must be sized by a redeclaration or layout qualifier before being indexed with a variable");
+ else {
+ // it is okay for a run-time sized array
+ checkRuntimeSizable(loc, *base);
+ }
+ base->getWritableType().setArrayVariablyIndexed();
+ }
+ if (base->getBasicType() == EbtBlock) {
+ if (base->getQualifier().storage == EvqBuffer)
+ requireProfile(base->getLoc(), ~EEsProfile, "variable indexing buffer block array");
+ else if (base->getQualifier().storage == EvqUniform)
+ profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
+ "variable indexing uniform block array");
+ else {
+ // input/output blocks either don't exist or can be variable indexed
+ }
+ } else if (language == EShLangFragment && base->getQualifier().isPipeOutput())
+ requireProfile(base->getLoc(), ~EEsProfile, "variable indexing fragment shader output array");
+ else if (base->getBasicType() == EbtSampler && version >= 130) {
+ const char* explanation = "variable indexing sampler array";
+ requireProfile(base->getLoc(), EEsProfile | ECoreProfile | ECompatibilityProfile, explanation);
+ profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, explanation);
+ profileRequires(base->getLoc(), ECoreProfile | ECompatibilityProfile, 400, nullptr, explanation);
+ }
+
+ result = intermediate.addIndex(EOpIndexIndirect, base, index, loc);
+ }
+
+ // Insert valid dereferenced result
+ TType newType(base->getType(), 0); // dereferenced type
+ if (base->getType().getQualifier().isConstant() && index->getQualifier().isConstant()) {
+ newType.getQualifier().storage = EvqConst;
+ // If base or index is a specialization constant, the result should also be a specialization constant.
+ if (base->getType().getQualifier().isSpecConstant() || index->getQualifier().isSpecConstant()) {
+ newType.getQualifier().makeSpecConstant();
+ }
+ } else {
+ newType.getQualifier().makePartialTemporary();
+ }
+ result->setType(newType);
+
+ // Propagate nonuniform
+ if (base->getQualifier().isNonUniform() || index->getQualifier().isNonUniform())
+ result->getWritableType().getQualifier().nonUniform = true;
+
+ if (anyIndexLimits)
+ handleIndexLimits(loc, base, index);
+
+ return result;
+}
+
+// for ES 2.0 (version 100) limitations for almost all index operations except vertex-shader uniforms
+void TParseContext::handleIndexLimits(const TSourceLoc& /*loc*/, TIntermTyped* base, TIntermTyped* index)
+{
+ if ((! limits.generalSamplerIndexing && base->getBasicType() == EbtSampler) ||
+ (! limits.generalUniformIndexing && base->getQualifier().isUniformOrBuffer() && language != EShLangVertex) ||
+ (! limits.generalAttributeMatrixVectorIndexing && base->getQualifier().isPipeInput() && language == EShLangVertex && (base->getType().isMatrix() || base->getType().isVector())) ||
+ (! limits.generalConstantMatrixVectorIndexing && base->getAsConstantUnion()) ||
+ (! limits.generalVariableIndexing && ! base->getType().getQualifier().isUniformOrBuffer() &&
+ ! base->getType().getQualifier().isPipeInput() &&
+ ! base->getType().getQualifier().isPipeOutput() &&
+ ! base->getType().getQualifier().isConstant()) ||
+ (! limits.generalVaryingIndexing && (base->getType().getQualifier().isPipeInput() ||
+ base->getType().getQualifier().isPipeOutput()))) {
+ // it's too early to know what the inductive variables are, save it for post processing
+ needsIndexLimitationChecking.push_back(index);
+ }
+}
+
+// Make a shared symbol have a non-shared version that can be edited by the current
+// compile, such that editing its type will not change the shared version and will
+// effect all nodes sharing it.
+void TParseContext::makeEditable(TSymbol*& symbol)
+{
+ TParseContextBase::makeEditable(symbol);
+
+ // See if it's tied to IO resizing
+ if (isIoResizeArray(symbol->getType()))
+ ioArraySymbolResizeList.push_back(symbol);
+}
+
+// Return true if this is a geometry shader input array or tessellation control output array
+// or mesh shader output array.
+bool TParseContext::isIoResizeArray(const TType& type) const
+{
+ return type.isArray() &&
+ ((language == EShLangGeometry && type.getQualifier().storage == EvqVaryingIn) ||
+ (language == EShLangTessControl && type.getQualifier().storage == EvqVaryingOut && ! type.getQualifier().patch)
+#ifdef NV_EXTENSIONS
+ ||
+ (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn && type.getQualifier().pervertexNV) ||
+ (language == EShLangMeshNV && type.getQualifier().storage == EvqVaryingOut && !type.getQualifier().perTaskNV)
+
+#endif
+ );
+}
+
+// If an array is not isIoResizeArray() but is an io array, make sure it has the right size
+void TParseContext::fixIoArraySize(const TSourceLoc& loc, TType& type)
+{
+ if (! type.isArray() || type.getQualifier().patch || symbolTable.atBuiltInLevel())
+ return;
+
+ assert(! isIoResizeArray(type));
+
+ if (type.getQualifier().storage != EvqVaryingIn || type.getQualifier().patch)
+ return;
+
+ if (language == EShLangTessControl || language == EShLangTessEvaluation) {
+ if (type.getOuterArraySize() != resources.maxPatchVertices) {
+ if (type.isSizedArray())
+ error(loc, "tessellation input array size must be gl_MaxPatchVertices or implicitly sized", "[]", "");
+ type.changeOuterArraySize(resources.maxPatchVertices);
+ }
+ }
+}
+
+// Issue any errors if the non-array object is missing arrayness WRT
+// shader I/O that has array requirements.
+// All arrayness checking is handled in array paths, this is for
+void TParseContext::ioArrayCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (! type.isArray() && ! symbolTable.atBuiltInLevel()) {
+ if (type.getQualifier().isArrayedIo(language)
+#ifdef NV_EXTENSIONS
+ && !type.getQualifier().layoutPassthrough
+#endif
+ )
+ error(loc, "type must be an array:", type.getStorageQualifierString(), identifier.c_str());
+ }
+}
+
+// Handle a dereference of a geometry shader input array or tessellation control output array.
+// See ioArraySymbolResizeList comment in ParseHelper.h.
+//
+void TParseContext::handleIoResizeArrayAccess(const TSourceLoc& /*loc*/, TIntermTyped* base)
+{
+ TIntermSymbol* symbolNode = base->getAsSymbolNode();
+ assert(symbolNode);
+ if (! symbolNode)
+ return;
+
+ // fix array size, if it can be fixed and needs to be fixed (will allow variable indexing)
+ if (symbolNode->getType().isUnsizedArray()) {
+ int newSize = getIoArrayImplicitSize(symbolNode->getType().getQualifier());
+ if (newSize > 0)
+ symbolNode->getWritableType().changeOuterArraySize(newSize);
+ }
+}
+
+// If there has been an input primitive declaration (geometry shader) or an output
+// number of vertices declaration(tessellation shader), make sure all input array types
+// match it in size. Types come either from nodes in the AST or symbols in the
+// symbol table.
+//
+// Types without an array size will be given one.
+// Types already having a size that is wrong will get an error.
+//
+void TParseContext::checkIoArraysConsistency(const TSourceLoc &loc, bool tailOnly)
+{
+ int requiredSize = 0;
+ TString featureString;
+ size_t listSize = ioArraySymbolResizeList.size();
+ size_t i = 0;
+
+ // If tailOnly = true, only check the last array symbol in the list.
+ if (tailOnly) {
+ i = listSize - 1;
+ }
+ for (bool firstIteration = true; i < listSize; ++i) {
+ TType &type = ioArraySymbolResizeList[i]->getWritableType();
+
+ // As I/O array sizes don't change, fetch requiredSize only once,
+ // except for mesh shaders which could have different I/O array sizes based on type qualifiers.
+ if (firstIteration
+#ifdef NV_EXTENSIONS
+ || (language == EShLangMeshNV)
+#endif
+ )
+ {
+ requiredSize = getIoArrayImplicitSize(type.getQualifier(), &featureString);
+ if (requiredSize == 0)
+ break;
+ firstIteration = false;
+ }
+
+ checkIoArrayConsistency(loc, requiredSize, featureString.c_str(), type,
+ ioArraySymbolResizeList[i]->getName());
+ }
+}
+
+int TParseContext::getIoArrayImplicitSize(const TQualifier &qualifier, TString *featureString) const
+{
+ int expectedSize = 0;
+ TString str = "unknown";
+ unsigned int maxVertices = intermediate.getVertices() != TQualifier::layoutNotSet ? intermediate.getVertices() : 0;
+
+ if (language == EShLangGeometry) {
+ expectedSize = TQualifier::mapGeometryToSize(intermediate.getInputPrimitive());
+ str = TQualifier::getGeometryString(intermediate.getInputPrimitive());
+ }
+ else if (language == EShLangTessControl) {
+ expectedSize = maxVertices;
+ str = "vertices";
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangFragment) {
+ // Number of vertices for Fragment shader is always three.
+ expectedSize = 3;
+ str = "vertices";
+ }
+ else if (language == EShLangMeshNV) {
+ unsigned int maxPrimitives =
+ intermediate.getPrimitives() != TQualifier::layoutNotSet ? intermediate.getPrimitives() : 0;
+ if (qualifier.builtIn == EbvPrimitiveIndicesNV) {
+ expectedSize = maxPrimitives * TQualifier::mapGeometryToSize(intermediate.getOutputPrimitive());
+ str = "max_primitives*";
+ str += TQualifier::getGeometryString(intermediate.getOutputPrimitive());
+ }
+ else if (qualifier.isPerPrimitive()) {
+ expectedSize = maxPrimitives;
+ str = "max_primitives";
+ }
+ else {
+ expectedSize = maxVertices;
+ str = "max_vertices";
+ }
+ }
+#endif
+ if (featureString)
+ *featureString = str;
+ return expectedSize;
+}
+
+void TParseContext::checkIoArrayConsistency(const TSourceLoc& loc, int requiredSize, const char* feature, TType& type, const TString& name)
+{
+ if (type.isUnsizedArray())
+ type.changeOuterArraySize(requiredSize);
+ else if (type.getOuterArraySize() != requiredSize) {
+ if (language == EShLangGeometry)
+ error(loc, "inconsistent input primitive for array size of", feature, name.c_str());
+ else if (language == EShLangTessControl)
+ error(loc, "inconsistent output number of vertices for array size of", feature, name.c_str());
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangFragment) {
+ if (type.getOuterArraySize() > requiredSize)
+ error(loc, " cannot be greater than 3 for pervertexNV", feature, name.c_str());
+ }
+ else if (language == EShLangMeshNV)
+ error(loc, "inconsistent output array size of", feature, name.c_str());
+#endif
+ else
+ assert(0);
+ }
+}
+
+// Handle seeing a binary node with a math operation.
+// Returns nullptr if not semantically allowed.
+TIntermTyped* TParseContext::handleBinaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right)
+{
+ rValueErrorCheck(loc, str, left->getAsTyped());
+ rValueErrorCheck(loc, str, right->getAsTyped());
+
+ bool allowed = true;
+ switch (op) {
+ // TODO: Bring more source language-specific checks up from intermediate.cpp
+ // to the specific parse helpers for that source language.
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ if (! left->isScalar() || ! right->isScalar())
+ allowed = false;
+ break;
+ default:
+ break;
+ }
+
+ if (((left->getType().containsBasicType(EbtFloat16) || right->getType().containsBasicType(EbtFloat16)) && !float16Arithmetic()) ||
+ ((left->getType().contains16BitInt() || right->getType().contains16BitInt()) && !int16Arithmetic()) ||
+ ((left->getType().contains8BitInt() || right->getType().contains8BitInt()) && !int8Arithmetic())) {
+ allowed = false;
+ }
+
+ TIntermTyped* result = nullptr;
+ if (allowed)
+ result = intermediate.addBinaryMath(op, left, right, loc);
+
+ if (result == nullptr)
+ binaryOpError(loc, str, left->getCompleteString(), right->getCompleteString());
+
+ return result;
+}
+
+// Handle seeing a unary node with a math operation.
+TIntermTyped* TParseContext::handleUnaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* childNode)
+{
+ rValueErrorCheck(loc, str, childNode);
+
+ bool allowed = true;
+ if ((childNode->getType().containsBasicType(EbtFloat16) && !float16Arithmetic()) ||
+ (childNode->getType().contains16BitInt() && !int16Arithmetic()) ||
+ (childNode->getType().contains8BitInt() && !int8Arithmetic())) {
+ allowed = false;
+ }
+
+ TIntermTyped* result = nullptr;
+
+ if (allowed)
+ result = intermediate.addUnaryMath(op, childNode, loc);
+
+ if (result)
+ return result;
+ else
+ unaryOpError(loc, str, childNode->getCompleteString());
+
+ return childNode;
+}
+
+//
+// Handle seeing a base.field dereference in the grammar.
+//
+TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field)
+{
+ variableCheck(base);
+
+ //
+ // .length() can't be resolved until we later see the function-calling syntax.
+ // Save away the name in the AST for now. Processing is completed in
+ // handleLengthMethod().
+ //
+ if (field == "length") {
+ if (base->isArray()) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, ".length");
+ profileRequires(loc, EEsProfile, 300, nullptr, ".length");
+ } else if (base->isVector() || base->isMatrix()) {
+ const char* feature = ".length() on vectors and matrices";
+ requireProfile(loc, ~EEsProfile, feature);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, feature);
+ } else if (!base->getType().isCoopMat()) {
+ error(loc, "does not operate on this type:", field.c_str(), base->getType().getCompleteString().c_str());
+
+ return base;
+ }
+
+ return intermediate.addMethod(base, TType(EbtInt), &field, loc);
+ }
+
+ // It's not .length() if we get to here.
+
+ if (base->isArray()) {
+ error(loc, "cannot apply to an array:", ".", field.c_str());
+
+ return base;
+ }
+
+ if (base->getType().isCoopMat()) {
+ error(loc, "cannot apply to a cooperative matrix type:", ".", field.c_str());
+ return base;
+ }
+
+ // It's neither an array nor .length() if we get here,
+ // leaving swizzles and struct/block dereferences.
+
+ TIntermTyped* result = base;
+ if ((base->isVector() || base->isScalar()) &&
+ (base->isFloatingDomain() || base->isIntegerDomain() || base->getBasicType() == EbtBool)) {
+ if (base->isScalar()) {
+ const char* dotFeature = "scalar swizzle";
+ requireProfile(loc, ~EEsProfile, dotFeature);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature);
+ }
+
+ TSwizzleSelectors<TVectorSelector> selectors;
+ parseSwizzleSelector(loc, field, base->getVectorSize(), selectors);
+
+ if (base->isVector() && selectors.size() != 1 && base->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16");
+ if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt())
+ requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16");
+ if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt())
+ requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8");
+
+ if (base->isScalar()) {
+ if (selectors.size() == 1)
+ return result;
+ else {
+ TType type(base->getBasicType(), EvqTemporary, selectors.size());
+ // Swizzle operations propagate specialization-constantness
+ if (base->getQualifier().isSpecConstant())
+ type.getQualifier().makeSpecConstant();
+ return addConstructor(loc, base, type);
+ }
+ }
+
+ if (base->getType().getQualifier().isFrontEndConstant())
+ result = intermediate.foldSwizzle(base, selectors, loc);
+ else {
+ if (selectors.size() == 1) {
+ TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc);
+ result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
+ result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision));
+ } else {
+ TIntermTyped* index = intermediate.addSwizzle(selectors, loc);
+ result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc);
+ result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size()));
+ }
+ // Swizzle operations propagate specialization-constantness
+ if (base->getType().getQualifier().isSpecConstant())
+ result->getWritableType().getQualifier().makeSpecConstant();
+ }
+ } else if (base->getBasicType() == EbtStruct ||
+ base->getBasicType() == EbtBlock ||
+ base->getBasicType() == EbtReference) {
+ const TTypeList* fields = base->getBasicType() == EbtReference ?
+ base->getType().getReferentType()->getStruct() :
+ base->getType().getStruct();
+ bool fieldFound = false;
+ int member;
+ for (member = 0; member < (int)fields->size(); ++member) {
+ if ((*fields)[member].type->getFieldName() == field) {
+ fieldFound = true;
+ break;
+ }
+ }
+ if (fieldFound) {
+ if (base->getType().getQualifier().isFrontEndConstant())
+ result = intermediate.foldDereference(base, member, loc);
+ else {
+ blockMemberExtensionCheck(loc, base, member, field);
+ TIntermTyped* index = intermediate.addConstantUnion(member, loc);
+ result = intermediate.addIndex(EOpIndexDirectStruct, base, index, loc);
+ result->setType(*(*fields)[member].type);
+ if ((*fields)[member].type->getQualifier().isIo())
+ intermediate.addIoAccessed(field);
+ }
+ } else
+ error(loc, "no such field in structure", field.c_str(), "");
+ } else
+ error(loc, "does not apply to this type:", field.c_str(), base->getType().getCompleteString().c_str());
+
+ // Propagate noContraction up the dereference chain
+ if (base->getQualifier().noContraction)
+ result->getWritableType().getQualifier().noContraction = true;
+
+ // Propagate nonuniform
+ if (base->getQualifier().isNonUniform())
+ result->getWritableType().getQualifier().nonUniform = true;
+
+ return result;
+}
+
+void TParseContext::blockMemberExtensionCheck(const TSourceLoc& loc, const TIntermTyped* base, int member, const TString& memberName)
+{
+ // a block that needs extension checking is either 'base', or if arrayed,
+ // one level removed to the left
+ const TIntermSymbol* baseSymbol = nullptr;
+ if (base->getAsBinaryNode() == nullptr)
+ baseSymbol = base->getAsSymbolNode();
+ else
+ baseSymbol = base->getAsBinaryNode()->getLeft()->getAsSymbolNode();
+ if (baseSymbol == nullptr)
+ return;
+ const TSymbol* symbol = symbolTable.find(baseSymbol->getName());
+ if (symbol == nullptr)
+ return;
+ const TVariable* variable = symbol->getAsVariable();
+ if (variable == nullptr)
+ return;
+ if (!variable->hasMemberExtensions())
+ return;
+
+ // We now have a variable that is the base of a dot reference
+ // with members that need extension checking.
+ if (variable->getNumMemberExtensions(member) > 0)
+ requireExtensions(loc, variable->getNumMemberExtensions(member), variable->getMemberExtensions(member), memberName.c_str());
+}
+
+//
+// Handle seeing a function declarator in the grammar. This is the precursor
+// to recognizing a function prototype or function definition.
+//
+TFunction* TParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype)
+{
+ // ES can't declare prototypes inside functions
+ if (! symbolTable.atGlobalLevel())
+ requireProfile(loc, ~EEsProfile, "local function declaration");
+
+ //
+ // Multiple declarations of the same function name are allowed.
+ //
+ // If this is a definition, the definition production code will check for redefinitions
+ // (we don't know at this point if it's a definition or not).
+ //
+ // Redeclarations (full signature match) are allowed. But, return types and parameter qualifiers must also match.
+ // - except ES 100, which only allows a single prototype
+ //
+ // ES 100 does not allow redefining, but does allow overloading of built-in functions.
+ // ES 300 does not allow redefining or overloading of built-in functions.
+ //
+ bool builtIn;
+ TSymbol* symbol = symbolTable.find(function.getMangledName(), &builtIn);
+ if (symbol && symbol->getAsFunction() && builtIn)
+ requireProfile(loc, ~EEsProfile, "redefinition of built-in function");
+ const TFunction* prevDec = symbol ? symbol->getAsFunction() : 0;
+ if (prevDec) {
+ if (prevDec->isPrototyped() && prototype)
+ profileRequires(loc, EEsProfile, 300, nullptr, "multiple prototypes for same function");
+ if (prevDec->getType() != function.getType())
+ error(loc, "overloaded functions must have the same return type", function.getName().c_str(), "");
+ for (int i = 0; i < prevDec->getParamCount(); ++i) {
+ if ((*prevDec)[i].type->getQualifier().storage != function[i].type->getQualifier().storage)
+ error(loc, "overloaded functions must have the same parameter storage qualifiers for argument", function[i].type->getStorageQualifierString(), "%d", i+1);
+
+ if ((*prevDec)[i].type->getQualifier().precision != function[i].type->getQualifier().precision)
+ error(loc, "overloaded functions must have the same parameter precision qualifiers for argument", function[i].type->getPrecisionQualifierString(), "%d", i+1);
+ }
+ }
+
+ arrayObjectCheck(loc, function.getType(), "array in function return type");
+
+ if (prototype) {
+ // All built-in functions are defined, even though they don't have a body.
+ // Count their prototype as a definition instead.
+ if (symbolTable.atBuiltInLevel())
+ function.setDefined();
+ else {
+ if (prevDec && ! builtIn)
+ symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const
+ function.setPrototyped();
+ }
+ }
+
+ // This insert won't actually insert it if it's a duplicate signature, but it will still check for
+ // other forms of name collisions.
+ if (! symbolTable.insert(function))
+ error(loc, "function name is redeclaration of existing name", function.getName().c_str(), "");
+
+ //
+ // If this is a redeclaration, it could also be a definition,
+ // in which case, we need to use the parameter names from this one, and not the one that's
+ // being redeclared. So, pass back this declaration, not the one in the symbol table.
+ //
+ return &function;
+}
+
+//
+// Handle seeing the function prototype in front of a function definition in the grammar.
+// The body is handled after this function returns.
+//
+TIntermAggregate* TParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function)
+{
+ currentCaller = function.getMangledName();
+ TSymbol* symbol = symbolTable.find(function.getMangledName());
+ TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr;
+
+ if (! prevDec)
+ error(loc, "can't find function", function.getName().c_str(), "");
+ // Note: 'prevDec' could be 'function' if this is the first time we've seen function
+ // as it would have just been put in the symbol table. Otherwise, we're looking up
+ // an earlier occurrence.
+
+ if (prevDec && prevDec->isDefined()) {
+ // Then this function already has a body.
+ error(loc, "function already has a body", function.getName().c_str(), "");
+ }
+ if (prevDec && ! prevDec->isDefined()) {
+ prevDec->setDefined();
+
+ // Remember the return type for later checking for RETURN statements.
+ currentFunctionType = &(prevDec->getType());
+ } else
+ currentFunctionType = new TType(EbtVoid);
+ functionReturnsValue = false;
+
+ // Check for entry point
+ if (function.getName().compare(intermediate.getEntryPointName().c_str()) == 0) {
+ intermediate.setEntryPointMangledName(function.getMangledName().c_str());
+ intermediate.incrementEntryPointCount();
+ inMain = true;
+ } else
+ inMain = false;
+
+ //
+ // Raise error message if main function takes any parameters or returns anything other than void
+ //
+ if (inMain) {
+ if (function.getParamCount() > 0)
+ error(loc, "function cannot take any parameter(s)", function.getName().c_str(), "");
+ if (function.getType().getBasicType() != EbtVoid)
+ error(loc, "", function.getType().getBasicTypeString().c_str(), "entry point cannot return a value");
+ }
+
+ //
+ // New symbol table scope for body of function plus its arguments
+ //
+ symbolTable.push();
+
+ //
+ // Insert parameters into the symbol table.
+ // If the parameter has no name, it's not an error, just don't insert it
+ // (could be used for unused args).
+ //
+ // Also, accumulate the list of parameters into the HIL, so lower level code
+ // knows where to find parameters.
+ //
+ TIntermAggregate* paramNodes = new TIntermAggregate;
+ for (int i = 0; i < function.getParamCount(); i++) {
+ TParameter& param = function[i];
+ if (param.name != nullptr) {
+ TVariable *variable = new TVariable(param.name, *param.type);
+
+ // Insert the parameters with name in the symbol table.
+ if (! symbolTable.insert(*variable))
+ error(loc, "redefinition", variable->getName().c_str(), "");
+ else {
+ // Transfer ownership of name pointer to symbol table.
+ param.name = nullptr;
+
+ // Add the parameter to the HIL
+ paramNodes = intermediate.growAggregate(paramNodes,
+ intermediate.addSymbol(*variable, loc),
+ loc);
+ }
+ } else
+ paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc);
+ }
+ intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc);
+ loopNestingLevel = 0;
+ statementNestingLevel = 0;
+ controlFlowNestingLevel = 0;
+ postEntryPointReturn = false;
+
+ return paramNodes;
+}
+
+//
+// Handle seeing function call syntax in the grammar, which could be any of
+// - .length() method
+// - constructor
+// - a call to a built-in function mapped to an operator
+// - a call to a built-in function that will remain a function call (e.g., texturing)
+// - user function
+// - subroutine call (not implemented yet)
+//
+TIntermTyped* TParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermNode* arguments)
+{
+ TIntermTyped* result = nullptr;
+
+ if (function->getBuiltInOp() == EOpArrayLength)
+ result = handleLengthMethod(loc, function, arguments);
+ else if (function->getBuiltInOp() != EOpNull) {
+ //
+ // Then this should be a constructor.
+ // Don't go through the symbol table for constructors.
+ // Their parameters will be verified algorithmically.
+ //
+ TType type(EbtVoid); // use this to get the type back
+ if (! constructorError(loc, arguments, *function, function->getBuiltInOp(), type)) {
+ //
+ // It's a constructor, of type 'type'.
+ //
+ result = addConstructor(loc, arguments, type);
+ if (result == nullptr)
+ error(loc, "cannot construct with these arguments", type.getCompleteString().c_str(), "");
+ }
+ } else {
+ //
+ // Find it in the symbol table.
+ //
+ const TFunction* fnCandidate;
+ bool builtIn;
+ fnCandidate = findFunction(loc, *function, builtIn);
+ if (fnCandidate) {
+ // This is a declared function that might map to
+ // - a built-in operator,
+ // - a built-in function not mapped to an operator, or
+ // - a user function.
+
+ // Error check for a function requiring specific extensions present.
+ if (builtIn && fnCandidate->getNumExtensions())
+ requireExtensions(loc, fnCandidate->getNumExtensions(), fnCandidate->getExtensions(), fnCandidate->getName().c_str());
+
+ if (builtIn && fnCandidate->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, "built-in function", "float16 types can only be in uniform block or buffer storage");
+ if (builtIn && fnCandidate->getType().contains16BitInt())
+ requireInt16Arithmetic(loc, "built-in function", "(u)int16 types can only be in uniform block or buffer storage");
+ if (builtIn && fnCandidate->getType().contains8BitInt())
+ requireInt8Arithmetic(loc, "built-in function", "(u)int8 types can only be in uniform block or buffer storage");
+
+ if (arguments != nullptr) {
+ // Make sure qualifications work for these arguments.
+ TIntermAggregate* aggregate = arguments->getAsAggregate();
+ for (int i = 0; i < fnCandidate->getParamCount(); ++i) {
+ // At this early point there is a slight ambiguity between whether an aggregate 'arguments'
+ // is the single argument itself or its children are the arguments. Only one argument
+ // means take 'arguments' itself as the one argument.
+ TIntermNode* arg = fnCandidate->getParamCount() == 1 ? arguments : (aggregate ? aggregate->getSequence()[i] : arguments);
+ TQualifier& formalQualifier = (*fnCandidate)[i].type->getQualifier();
+ if (formalQualifier.isParamOutput()) {
+ if (lValueErrorCheck(arguments->getLoc(), "assign", arg->getAsTyped()))
+ error(arguments->getLoc(), "Non-L-value cannot be passed for 'out' or 'inout' parameters.", "out", "");
+ }
+ TQualifier& argQualifier = arg->getAsTyped()->getQualifier();
+ if (argQualifier.isMemory()) {
+ const char* message = "argument cannot drop memory qualifier when passed to formal parameter";
+ if (argQualifier.volatil && ! formalQualifier.volatil)
+ error(arguments->getLoc(), message, "volatile", "");
+ if (argQualifier.coherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "coherent", "");
+ if (argQualifier.devicecoherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "devicecoherent", "");
+ if (argQualifier.queuefamilycoherent && ! (formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "queuefamilycoherent", "");
+ if (argQualifier.workgroupcoherent && ! (formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "workgroupcoherent", "");
+ if (argQualifier.subgroupcoherent && ! (formalQualifier.subgroupcoherent || formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "subgroupcoherent", "");
+ if (argQualifier.readonly && ! formalQualifier.readonly)
+ error(arguments->getLoc(), message, "readonly", "");
+ if (argQualifier.writeonly && ! formalQualifier.writeonly)
+ error(arguments->getLoc(), message, "writeonly", "");
+ if (!builtIn && argQualifier.restrict && ! formalQualifier.restrict)
+ error(arguments->getLoc(), message, "restrict", "");
+ }
+ if (!builtIn && argQualifier.layoutFormat != formalQualifier.layoutFormat) {
+ // we have mismatched formats, which should only be allowed if writeonly
+ // and at least one format is unknown
+ if (!formalQualifier.writeonly || (formalQualifier.layoutFormat != ElfNone &&
+ argQualifier.layoutFormat != ElfNone))
+ error(arguments->getLoc(), "image formats must match", "format", "");
+ }
+
+ if (builtIn && arg->getAsTyped()->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(arguments->getLoc(), "built-in function", "float16 types can only be in uniform block or buffer storage");
+ if (builtIn && arg->getAsTyped()->getType().contains16BitInt())
+ requireInt16Arithmetic(arguments->getLoc(), "built-in function", "(u)int16 types can only be in uniform block or buffer storage");
+ if (builtIn && arg->getAsTyped()->getType().contains8BitInt())
+ requireInt8Arithmetic(arguments->getLoc(), "built-in function", "(u)int8 types can only be in uniform block or buffer storage");
+
+ // TODO 4.5 functionality: A shader will fail to compile
+ // if the value passed to the memargument of an atomic memory function does not correspond to a buffer or
+ // shared variable. It is acceptable to pass an element of an array or a single component of a vector to the
+ // memargument of an atomic memory function, as long as the underlying array or vector is a buffer or
+ // shared variable.
+ }
+
+ // Convert 'in' arguments
+ addInputArgumentConversions(*fnCandidate, arguments); // arguments may be modified if it's just a single argument node
+ }
+
+ if (builtIn && fnCandidate->getBuiltInOp() != EOpNull) {
+ // A function call mapped to a built-in operation.
+ result = handleBuiltInFunctionCall(loc, arguments, *fnCandidate);
+ } else {
+ // This is a function call not mapped to built-in operator.
+ // It could still be a built-in function, but only if PureOperatorBuiltins == false.
+ result = intermediate.setAggregateOperator(arguments, EOpFunctionCall, fnCandidate->getType(), loc);
+ TIntermAggregate* call = result->getAsAggregate();
+ call->setName(fnCandidate->getMangledName());
+
+ // this is how we know whether the given function is a built-in function or a user-defined function
+ // if builtIn == false, it's a userDefined -> could be an overloaded built-in function also
+ // if builtIn == true, it's definitely a built-in function with EOpNull
+ if (! builtIn) {
+ call->setUserDefined();
+ if (symbolTable.atGlobalLevel()) {
+ requireProfile(loc, ~EEsProfile, "calling user function from global scope");
+ intermediate.addToCallGraph(infoSink, "main(", fnCandidate->getMangledName());
+ } else
+ intermediate.addToCallGraph(infoSink, currentCaller, fnCandidate->getMangledName());
+ }
+
+ if (builtIn)
+ nonOpBuiltInCheck(loc, *fnCandidate, *call);
+ else
+ userFunctionCallCheck(loc, *call);
+ }
+
+ // Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore.
+ // Built-ins with a single argument aren't called with an aggregate, but they also don't have an output.
+ // Also, build the qualifier list for user function calls, which are always called with an aggregate.
+ if (result->getAsAggregate()) {
+ TQualifierList& qualifierList = result->getAsAggregate()->getQualifierList();
+ for (int i = 0; i < fnCandidate->getParamCount(); ++i) {
+ TStorageQualifier qual = (*fnCandidate)[i].type->getQualifier().storage;
+ qualifierList.push_back(qual);
+ }
+ result = addOutputArgumentConversions(*fnCandidate, *result->getAsAggregate());
+ }
+
+ if (result->getAsTyped()->getType().isCoopMat() &&
+ !result->getAsTyped()->getType().isParameterized()) {
+ assert(fnCandidate->getBuiltInOp() == EOpCooperativeMatrixMulAdd);
+
+ result->setType(result->getAsAggregate()->getSequence()[2]->getAsTyped()->getType());
+ }
+ }
+ }
+
+ // generic error recovery
+ // TODO: simplification: localize all the error recoveries that look like this, and taking type into account to reduce cascades
+ if (result == nullptr)
+ result = intermediate.addConstantUnion(0.0, EbtFloat, loc);
+
+ return result;
+}
+
+TIntermTyped* TParseContext::handleBuiltInFunctionCall(TSourceLoc loc, TIntermNode* arguments,
+ const TFunction& function)
+{
+ checkLocation(loc, function.getBuiltInOp());
+ TIntermTyped *result = intermediate.addBuiltInFunctionCall(loc, function.getBuiltInOp(),
+ function.getParamCount() == 1,
+ arguments, function.getType());
+ if (obeyPrecisionQualifiers())
+ computeBuiltinPrecisions(*result, function);
+
+ if (result == nullptr) {
+ if (arguments == nullptr)
+ error(loc, " wrong operand type", "Internal Error",
+ "built in unary operator function. Type: %s", "");
+ else
+ error(arguments->getLoc(), " wrong operand type", "Internal Error",
+ "built in unary operator function. Type: %s",
+ static_cast<TIntermTyped*>(arguments)->getCompleteString().c_str());
+ } else if (result->getAsOperator())
+ builtInOpCheck(loc, function, *result->getAsOperator());
+
+ return result;
+}
+
+// "The operation of a built-in function can have a different precision
+// qualification than the precision qualification of the resulting value.
+// These two precision qualifications are established as follows.
+//
+// The precision qualification of the operation of a built-in function is
+// based on the precision qualification of its input arguments and formal
+// parameters: When a formal parameter specifies a precision qualifier,
+// that is used, otherwise, the precision qualification of the calling
+// argument is used. The highest precision of these will be the precision
+// qualification of the operation of the built-in function. Generally,
+// this is applied across all arguments to a built-in function, with the
+// exceptions being:
+// - bitfieldExtract and bitfieldInsert ignore the 'offset' and 'bits'
+// arguments.
+// - interpolateAt* functions only look at the 'interpolant' argument.
+//
+// The precision qualification of the result of a built-in function is
+// determined in one of the following ways:
+//
+// - For the texture sampling, image load, and image store functions,
+// the precision of the return type matches the precision of the
+// sampler type
+//
+// Otherwise:
+//
+// - For prototypes that do not specify a resulting precision qualifier,
+// the precision will be the same as the precision of the operation.
+//
+// - For prototypes that do specify a resulting precision qualifier,
+// the specified precision qualifier is the precision qualification of
+// the result."
+//
+void TParseContext::computeBuiltinPrecisions(TIntermTyped& node, const TFunction& function)
+{
+ TPrecisionQualifier operationPrecision = EpqNone;
+ TPrecisionQualifier resultPrecision = EpqNone;
+
+ TIntermOperator* opNode = node.getAsOperator();
+ if (opNode == nullptr)
+ return;
+
+ if (TIntermUnary* unaryNode = node.getAsUnaryNode()) {
+ operationPrecision = std::max(function[0].type->getQualifier().precision,
+ unaryNode->getOperand()->getType().getQualifier().precision);
+ if (function.getType().getBasicType() != EbtBool)
+ resultPrecision = function.getType().getQualifier().precision == EpqNone ?
+ operationPrecision :
+ function.getType().getQualifier().precision;
+ } else if (TIntermAggregate* agg = node.getAsAggregate()) {
+ TIntermSequence& sequence = agg->getSequence();
+ unsigned int numArgs = (unsigned int)sequence.size();
+ switch (agg->getOp()) {
+ case EOpBitfieldExtract:
+ numArgs = 1;
+ break;
+ case EOpBitfieldInsert:
+ numArgs = 2;
+ break;
+ case EOpInterpolateAtCentroid:
+ case EOpInterpolateAtOffset:
+ case EOpInterpolateAtSample:
+ numArgs = 1;
+ break;
+ default:
+ break;
+ }
+ // find the maximum precision from the arguments and parameters
+ for (unsigned int arg = 0; arg < numArgs; ++arg) {
+ operationPrecision = std::max(operationPrecision, sequence[arg]->getAsTyped()->getQualifier().precision);
+ operationPrecision = std::max(operationPrecision, function[arg].type->getQualifier().precision);
+ }
+ // compute the result precision
+#ifdef AMD_EXTENSIONS
+ if (agg->isSampling() ||
+ agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore ||
+ agg->getOp() == EOpImageLoadLod || agg->getOp() == EOpImageStoreLod)
+#else
+ if (agg->isSampling() || agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore)
+#endif
+ resultPrecision = sequence[0]->getAsTyped()->getQualifier().precision;
+ else if (function.getType().getBasicType() != EbtBool)
+ resultPrecision = function.getType().getQualifier().precision == EpqNone ?
+ operationPrecision :
+ function.getType().getQualifier().precision;
+ }
+
+ // Propagate precision through this node and its children. That algorithm stops
+ // when a precision is found, so start by clearing this subroot precision
+ opNode->getQualifier().precision = EpqNone;
+ if (operationPrecision != EpqNone) {
+ opNode->propagatePrecision(operationPrecision);
+ opNode->setOperationPrecision(operationPrecision);
+ }
+ // Now, set the result precision, which might not match
+ opNode->getQualifier().precision = resultPrecision;
+}
+
+TIntermNode* TParseContext::handleReturnValue(const TSourceLoc& loc, TIntermTyped* value)
+{
+ storage16BitAssignmentCheck(loc, value->getType(), "return");
+
+ functionReturnsValue = true;
+ if (currentFunctionType->getBasicType() == EbtVoid) {
+ error(loc, "void function cannot return a value", "return", "");
+ return intermediate.addBranch(EOpReturn, loc);
+ } else if (*currentFunctionType != value->getType()) {
+ TIntermTyped* converted = intermediate.addConversion(EOpReturn, *currentFunctionType, value);
+ if (converted) {
+ if (*currentFunctionType != converted->getType())
+ error(loc, "cannot convert return value to function return type", "return", "");
+ if (version < 420)
+ warn(loc, "type conversion on return values was not explicitly allowed until version 420", "return", "");
+ return intermediate.addBranch(EOpReturn, converted, loc);
+ } else {
+ error(loc, "type does not match, or is not convertible to, the function's return type", "return", "");
+ return intermediate.addBranch(EOpReturn, value, loc);
+ }
+ } else
+ return intermediate.addBranch(EOpReturn, value, loc);
+}
+
+// See if the operation is being done in an illegal location.
+void TParseContext::checkLocation(const TSourceLoc& loc, TOperator op)
+{
+ switch (op) {
+ case EOpBarrier:
+ if (language == EShLangTessControl) {
+ if (controlFlowNestingLevel > 0)
+ error(loc, "tessellation control barrier() cannot be placed within flow control", "", "");
+ if (! inMain)
+ error(loc, "tessellation control barrier() must be in main()", "", "");
+ else if (postEntryPointReturn)
+ error(loc, "tessellation control barrier() cannot be placed after a return from main()", "", "");
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+// Finish processing object.length(). This started earlier in handleDotDereference(), where
+// the ".length" part was recognized and semantically checked, and finished here where the
+// function syntax "()" is recognized.
+//
+// Return resulting tree node.
+TIntermTyped* TParseContext::handleLengthMethod(const TSourceLoc& loc, TFunction* function, TIntermNode* intermNode)
+{
+ int length = 0;
+
+ if (function->getParamCount() > 0)
+ error(loc, "method does not accept any arguments", function->getName().c_str(), "");
+ else {
+ const TType& type = intermNode->getAsTyped()->getType();
+ if (type.isArray()) {
+ if (type.isUnsizedArray()) {
+ if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) {
+ // We could be between a layout declaration that gives a built-in io array implicit size and
+ // a user redeclaration of that array, meaning we have to substitute its implicit size here
+ // without actually redeclaring the array. (It is an error to use a member before the
+ // redeclaration, but not an error to use the array name itself.)
+ const TString& name = intermNode->getAsSymbolNode()->getName();
+ if (name == "gl_in" || name == "gl_out"
+#ifdef NV_EXTENSIONS
+ || name == "gl_MeshVerticesNV"
+ || name == "gl_MeshPrimitivesNV"
+#endif
+ )
+ {
+ length = getIoArrayImplicitSize(type.getQualifier());
+ }
+ }
+ if (length == 0) {
+ if (intermNode->getAsSymbolNode() && isIoResizeArray(type))
+ error(loc, "", function->getName().c_str(), "array must first be sized by a redeclaration or layout qualifier");
+ else if (isRuntimeLength(*intermNode->getAsTyped())) {
+ // Create a unary op and let the back end handle it
+ return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
+ } else
+ error(loc, "", function->getName().c_str(), "array must be declared with a size before using this method");
+ }
+ } else if (type.getOuterArrayNode()) {
+ // If the array's outer size is specified by an intermediate node, it means the array's length
+ // was specified by a specialization constant. In such a case, we should return the node of the
+ // specialization constants to represent the length.
+ return type.getOuterArrayNode();
+ } else
+ length = type.getOuterArraySize();
+ } else if (type.isMatrix())
+ length = type.getMatrixCols();
+ else if (type.isVector())
+ length = type.getVectorSize();
+ else if (type.isCoopMat())
+ return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
+ else {
+ // we should not get here, because earlier semantic checking should have prevented this path
+ error(loc, ".length()", "unexpected use of .length()", "");
+ }
+ }
+
+ if (length == 0)
+ length = 1;
+
+ return intermediate.addConstantUnion(length, loc);
+}
+
+//
+// Add any needed implicit conversions for function-call arguments to input parameters.
+//
+void TParseContext::addInputArgumentConversions(const TFunction& function, TIntermNode*& arguments) const
+{
+ TIntermAggregate* aggregate = arguments->getAsAggregate();
+
+ // Process each argument's conversion
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ // At this early point there is a slight ambiguity between whether an aggregate 'arguments'
+ // is the single argument itself or its children are the arguments. Only one argument
+ // means take 'arguments' itself as the one argument.
+ TIntermTyped* arg = function.getParamCount() == 1 ? arguments->getAsTyped() : (aggregate ? aggregate->getSequence()[i]->getAsTyped() : arguments->getAsTyped());
+ if (*function[i].type != arg->getType()) {
+ if (function[i].type->getQualifier().isParamInput() &&
+ !function[i].type->isCoopMat()) {
+ // In-qualified arguments just need an extra node added above the argument to
+ // convert to the correct type.
+ arg = intermediate.addConversion(EOpFunctionCall, *function[i].type, arg);
+ if (arg) {
+ if (function.getParamCount() == 1)
+ arguments = arg;
+ else {
+ if (aggregate)
+ aggregate->getSequence()[i] = arg;
+ else
+ arguments = arg;
+ }
+ }
+ }
+ }
+ }
+}
+
+//
+// Add any needed implicit output conversions for function-call arguments. This
+// can require a new tree topology, complicated further by whether the function
+// has a return value.
+//
+// Returns a node of a subtree that evaluates to the return value of the function.
+//
+TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& function, TIntermAggregate& intermNode) const
+{
+ TIntermSequence& arguments = intermNode.getSequence();
+
+ // Will there be any output conversions?
+ bool outputConversions = false;
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ if (*function[i].type != arguments[i]->getAsTyped()->getType() && function[i].type->getQualifier().isParamOutput()) {
+ outputConversions = true;
+ break;
+ }
+ }
+
+ if (! outputConversions)
+ return &intermNode;
+
+ // Setup for the new tree, if needed:
+ //
+ // Output conversions need a different tree topology.
+ // Out-qualified arguments need a temporary of the correct type, with the call
+ // followed by an assignment of the temporary to the original argument:
+ // void: function(arg, ...) -> ( function(tempArg, ...), arg = tempArg, ...)
+ // ret = function(arg, ...) -> ret = (tempRet = function(tempArg, ...), arg = tempArg, ..., tempRet)
+ // Where the "tempArg" type needs no conversion as an argument, but will convert on assignment.
+ TIntermTyped* conversionTree = nullptr;
+ TVariable* tempRet = nullptr;
+ if (intermNode.getBasicType() != EbtVoid) {
+ // do the "tempRet = function(...), " bit from above
+ tempRet = makeInternalVariable("tempReturn", intermNode.getType());
+ TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc());
+ conversionTree = intermediate.addAssign(EOpAssign, tempRetNode, &intermNode, intermNode.getLoc());
+ } else
+ conversionTree = &intermNode;
+
+ conversionTree = intermediate.makeAggregate(conversionTree);
+
+ // Process each argument's conversion
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ if (*function[i].type != arguments[i]->getAsTyped()->getType()) {
+ if (function[i].type->getQualifier().isParamOutput()) {
+ // Out-qualified arguments need to use the topology set up above.
+ // do the " ...(tempArg, ...), arg = tempArg" bit from above
+ TType paramType;
+ paramType.shallowCopy(*function[i].type);
+ if (arguments[i]->getAsTyped()->getType().isParameterized() &&
+ !paramType.isParameterized()) {
+ paramType.shallowCopy(arguments[i]->getAsTyped()->getType());
+ paramType.copyTypeParameters(*arguments[i]->getAsTyped()->getType().getTypeParameters());
+ }
+ TVariable* tempArg = makeInternalVariable("tempArg", paramType);
+ tempArg->getWritableType().getQualifier().makeTemporary();
+ TIntermSymbol* tempArgNode = intermediate.addSymbol(*tempArg, intermNode.getLoc());
+ TIntermTyped* tempAssign = intermediate.addAssign(EOpAssign, arguments[i]->getAsTyped(), tempArgNode, arguments[i]->getLoc());
+ conversionTree = intermediate.growAggregate(conversionTree, tempAssign, arguments[i]->getLoc());
+ // replace the argument with another node for the same tempArg variable
+ arguments[i] = intermediate.addSymbol(*tempArg, intermNode.getLoc());
+ }
+ }
+ }
+
+ // Finalize the tree topology (see bigger comment above).
+ if (tempRet) {
+ // do the "..., tempRet" bit from above
+ TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc());
+ conversionTree = intermediate.growAggregate(conversionTree, tempRetNode, intermNode.getLoc());
+ }
+ conversionTree = intermediate.setAggregateOperator(conversionTree, EOpComma, intermNode.getType(), intermNode.getLoc());
+
+ return conversionTree;
+}
+
+void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode)
+{
+ const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence();
+
+ //const int gl_SemanticsRelaxed = 0x0;
+ const int gl_SemanticsAcquire = 0x2;
+ const int gl_SemanticsRelease = 0x4;
+ const int gl_SemanticsAcquireRelease = 0x8;
+ const int gl_SemanticsMakeAvailable = 0x2000;
+ const int gl_SemanticsMakeVisible = 0x4000;
+
+ //const int gl_StorageSemanticsNone = 0x0;
+ const int gl_StorageSemanticsBuffer = 0x40;
+ const int gl_StorageSemanticsShared = 0x100;
+ const int gl_StorageSemanticsImage = 0x800;
+ const int gl_StorageSemanticsOutput = 0x1000;
+
+
+ unsigned int semantics = 0, storageClassSemantics = 0;
+ unsigned int semantics2 = 0, storageClassSemantics2 = 0;
+
+ // Grab the semantics and storage class semantics from the operands, based on opcode
+ switch (callNode.getOp()) {
+ case EOpAtomicAdd:
+ case EOpAtomicMin:
+ case EOpAtomicMax:
+ case EOpAtomicAnd:
+ case EOpAtomicOr:
+ case EOpAtomicXor:
+ case EOpAtomicExchange:
+ case EOpAtomicStore:
+ storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpAtomicLoad:
+ storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpAtomicCompSwap:
+ storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ storageClassSemantics2 = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+
+ case EOpImageAtomicAdd:
+ case EOpImageAtomicMin:
+ case EOpImageAtomicMax:
+ case EOpImageAtomicAnd:
+ case EOpImageAtomicOr:
+ case EOpImageAtomicXor:
+ case EOpImageAtomicExchange:
+ case EOpImageAtomicStore:
+ storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpImageAtomicLoad:
+ storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpImageAtomicCompSwap:
+ storageClassSemantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ storageClassSemantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics2 = (*argp)[8]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+
+ case EOpBarrier:
+ storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpMemoryBarrier:
+ storageClassSemantics = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ default:
+ break;
+ }
+
+ if ((semantics & gl_SemanticsAcquire) &&
+ (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore)) {
+ error(loc, "gl_SemanticsAcquire must not be used with (image) atomic store",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsRelease) &&
+ (callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
+ error(loc, "gl_SemanticsRelease must not be used with (image) atomic load",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsAcquireRelease) &&
+ (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore ||
+ callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
+ error(loc, "gl_SemanticsAcquireRelease must not be used with (image) atomic load/store",
+ fnCandidate.getName().c_str(), "");
+ }
+ if (((semantics | semantics2) & ~(gl_SemanticsAcquire |
+ gl_SemanticsRelease |
+ gl_SemanticsAcquireRelease |
+ gl_SemanticsMakeAvailable |
+ gl_SemanticsMakeVisible))) {
+ error(loc, "Invalid semantics value", fnCandidate.getName().c_str(), "");
+ }
+ if (((storageClassSemantics | storageClassSemantics2) & ~(gl_StorageSemanticsBuffer |
+ gl_StorageSemanticsShared |
+ gl_StorageSemanticsImage |
+ gl_StorageSemanticsOutput))) {
+ error(loc, "Invalid storage class semantics value", fnCandidate.getName().c_str(), "");
+ }
+
+ if (callNode.getOp() == EOpMemoryBarrier) {
+ if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or "
+ "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
+ }
+ } else {
+ if (semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
+ if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
+ "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
+ }
+ }
+ if (semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
+ if (!IsPow2(semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "semUnequal must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
+ "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
+ }
+ }
+ }
+ if (callNode.getOp() == EOpMemoryBarrier) {
+ if (storageClassSemantics == 0) {
+ error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
+ }
+ }
+ if (callNode.getOp() == EOpBarrier && semantics != 0 && storageClassSemantics == 0) {
+ error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
+ }
+ if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) &&
+ (semantics2 & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsMakeAvailable) &&
+ !(semantics & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsMakeVisible) &&
+ !(semantics & (gl_SemanticsAcquire | gl_SemanticsAcquireRelease))) {
+ error(loc, "gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease",
+ fnCandidate.getName().c_str(), "");
+ }
+
+}
+
+
+//
+// Do additional checking of built-in function calls that is not caught
+// by normal semantic checks on argument type, extension tagging, etc.
+//
+// Assumes there has been a semantically correct match to a built-in function prototype.
+//
+void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermOperator& callNode)
+{
+ // Set up convenience accessors to the argument(s). There is almost always
+ // multiple arguments for the cases below, but when there might be one,
+ // check the unaryArg first.
+ const TIntermSequence* argp = nullptr; // confusing to use [] syntax on a pointer, so this is to help get a reference
+ const TIntermTyped* unaryArg = nullptr;
+ const TIntermTyped* arg0 = nullptr;
+ if (callNode.getAsAggregate()) {
+ argp = &callNode.getAsAggregate()->getSequence();
+ if (argp->size() > 0)
+ arg0 = (*argp)[0]->getAsTyped();
+ } else {
+ assert(callNode.getAsUnaryNode());
+ unaryArg = callNode.getAsUnaryNode()->getOperand();
+ arg0 = unaryArg;
+ }
+
+ TString featureString;
+ const char* feature = nullptr;
+ switch (callNode.getOp()) {
+ case EOpTextureGather:
+ case EOpTextureGatherOffset:
+ case EOpTextureGatherOffsets:
+ {
+ // Figure out which variants are allowed by what extensions,
+ // and what arguments must be constant for which situations.
+
+ featureString = fnCandidate.getName();
+ featureString += "(...)";
+ feature = featureString.c_str();
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ int compArg = -1; // track which argument, if any, is the constant component argument
+ switch (callNode.getOp()) {
+ case EOpTextureGather:
+ // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5,
+ // otherwise, need GL_ARB_texture_gather.
+ if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) {
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 2;
+ } else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ break;
+ case EOpTextureGatherOffset:
+ // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument
+ if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3)
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion())
+ profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
+ "non-constant offset argument");
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ break;
+ case EOpTextureGatherOffsets:
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ // check for constant offsets
+ if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion())
+ error(loc, "must be a compile-time constant:", feature, "offsets argument");
+ break;
+ default:
+ break;
+ }
+
+ if (compArg > 0 && compArg < fnCandidate.getParamCount()) {
+ if ((*argp)[compArg]->getAsConstantUnion()) {
+ int value = (*argp)[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (value < 0 || value > 3)
+ error(loc, "must be 0, 1, 2, or 3:", feature, "component argument");
+ } else
+ error(loc, "must be a compile-time constant:", feature, "component argument");
+ }
+
+#ifdef AMD_EXTENSIONS
+ bool bias = false;
+ if (callNode.getOp() == EOpTextureGather)
+ bias = fnCandidate.getParamCount() > 3;
+ else if (callNode.getOp() == EOpTextureGatherOffset ||
+ callNode.getOp() == EOpTextureGatherOffsets)
+ bias = fnCandidate.getParamCount() > 4;
+
+ if (bias) {
+ featureString = fnCandidate.getName();
+ featureString += "with bias argument";
+ feature = featureString.c_str();
+ profileRequires(loc, ~EEsProfile, 450, nullptr, feature);
+ requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature);
+ }
+#endif
+
+ break;
+ }
+
+#ifdef AMD_EXTENSIONS
+ case EOpSparseTextureGather:
+ case EOpSparseTextureGatherOffset:
+ case EOpSparseTextureGatherOffsets:
+ {
+ bool bias = false;
+ if (callNode.getOp() == EOpSparseTextureGather)
+ bias = fnCandidate.getParamCount() > 4;
+ else if (callNode.getOp() == EOpSparseTextureGatherOffset ||
+ callNode.getOp() == EOpSparseTextureGatherOffsets)
+ bias = fnCandidate.getParamCount() > 5;
+
+ if (bias) {
+ featureString = fnCandidate.getName();
+ featureString += "with bias argument";
+ feature = featureString.c_str();
+ profileRequires(loc, ~EEsProfile, 450, nullptr, feature);
+ requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature);
+ }
+
+ break;
+ }
+
+ case EOpSparseTextureGatherLod:
+ case EOpSparseTextureGatherLodOffset:
+ case EOpSparseTextureGatherLodOffsets:
+ {
+ requireExtensions(loc, 1, &E_GL_ARB_sparse_texture2, fnCandidate.getName().c_str());
+ break;
+ }
+
+ case EOpSwizzleInvocations:
+ {
+ if (! (*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "offset", "");
+ else {
+ unsigned offset[4] = {};
+ offset[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
+ offset[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst();
+ offset[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst();
+ offset[3] = (*argp)[1]->getAsConstantUnion()->getConstArray()[3].getUConst();
+ if (offset[0] > 3 || offset[1] > 3 || offset[2] > 3 || offset[3] > 3)
+ error(loc, "components must be in the range [0, 3]", "offset", "");
+ }
+
+ break;
+ }
+
+ case EOpSwizzleInvocationsMasked:
+ {
+ if (! (*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "mask", "");
+ else {
+ unsigned mask[3] = {};
+ mask[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
+ mask[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst();
+ mask[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst();
+ if (mask[0] > 31 || mask[1] > 31 || mask[2] > 31)
+ error(loc, "components must be in the range [0, 31]", "mask", "");
+ }
+
+ break;
+ }
+#endif
+
+ case EOpTextureOffset:
+ case EOpTextureFetchOffset:
+ case EOpTextureProjOffset:
+ case EOpTextureLodOffset:
+ case EOpTextureProjLodOffset:
+ case EOpTextureGradOffset:
+ case EOpTextureProjGradOffset:
+ {
+ // Handle texture-offset limits checking
+ // Pick which argument has to hold constant offsets
+ int arg = -1;
+ switch (callNode.getOp()) {
+ case EOpTextureOffset: arg = 2; break;
+ case EOpTextureFetchOffset: arg = (arg0->getType().getSampler().dim != EsdRect) ? 3 : 2; break;
+ case EOpTextureProjOffset: arg = 2; break;
+ case EOpTextureLodOffset: arg = 3; break;
+ case EOpTextureProjLodOffset: arg = 3; break;
+ case EOpTextureGradOffset: arg = 4; break;
+ case EOpTextureProjGradOffset: arg = 4; break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (arg > 0) {
+
+#ifdef AMD_EXTENSIONS
+ bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 && arg0->getType().getSampler().shadow;
+ if (f16ShadowCompare)
+ ++arg;
+#endif
+ if (! (*argp)[arg]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "texel offset", "");
+ else {
+ const TType& type = (*argp)[arg]->getAsTyped()->getType();
+ for (int c = 0; c < type.getVectorSize(); ++c) {
+ int offset = (*argp)[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
+ if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
+ error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
+ }
+ }
+ }
+
+ break;
+ }
+
+#ifdef NV_EXTENSIONS
+ case EOpTraceNV:
+ if (!(*argp)[10]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "payload number", "");
+ break;
+ case EOpExecuteCallableNV:
+ if (!(*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "callable data number", "");
+ break;
+#endif
+
+ case EOpTextureQuerySamples:
+ case EOpImageQuerySamples:
+ // GL_ARB_shader_texture_image_samples
+ profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples");
+ break;
+
+ case EOpImageAtomicAdd:
+ case EOpImageAtomicMin:
+ case EOpImageAtomicMax:
+ case EOpImageAtomicAnd:
+ case EOpImageAtomicOr:
+ case EOpImageAtomicXor:
+ case EOpImageAtomicExchange:
+ case EOpImageAtomicCompSwap:
+ case EOpImageAtomicLoad:
+ case EOpImageAtomicStore:
+ {
+ // Make sure the image types have the correct layout() format and correct argument types
+ const TType& imageType = arg0->getType();
+ if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) {
+ if (imageType.getQualifier().layoutFormat != ElfR32i && imageType.getQualifier().layoutFormat != ElfR32ui)
+ error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
+ } else {
+ if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
+ error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
+ else if (imageType.getQualifier().layoutFormat != ElfR32f && profile == EEsProfile)
+ error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
+ }
+
+ const size_t maxArgs = imageType.getSampler().isMultiSample() ? 5 : 4;
+ if (argp->size() > maxArgs) {
+ requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
+ memorySemanticsCheck(loc, fnCandidate, callNode);
+ }
+
+ break;
+ }
+
+ case EOpAtomicAdd:
+ case EOpAtomicMin:
+ case EOpAtomicMax:
+ case EOpAtomicAnd:
+ case EOpAtomicOr:
+ case EOpAtomicXor:
+ case EOpAtomicExchange:
+ case EOpAtomicCompSwap:
+ case EOpAtomicLoad:
+ case EOpAtomicStore:
+ {
+ if (argp->size() > 3) {
+ requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
+ memorySemanticsCheck(loc, fnCandidate, callNode);
+ } else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64) {
+#ifdef NV_EXTENSIONS
+ const char* const extensions[2] = { E_GL_NV_shader_atomic_int64,
+ E_GL_EXT_shader_atomic_int64 };
+ requireExtensions(loc, 2, extensions, fnCandidate.getName().c_str());
+#else
+ requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_int64, fnCandidate.getName().c_str());
+#endif
+ }
+ break;
+ }
+
+ case EOpInterpolateAtCentroid:
+ case EOpInterpolateAtSample:
+ case EOpInterpolateAtOffset:
+#ifdef AMD_EXTENSIONS
+ case EOpInterpolateAtVertex:
+#endif
+ // Make sure the first argument is an interpolant, or an array element of an interpolant
+ if (arg0->getType().getQualifier().storage != EvqVaryingIn) {
+ // It might still be an array element.
+ //
+ // We could check more, but the semantics of the first argument are already met; the
+ // only way to turn an array into a float/vec* is array dereference and swizzle.
+ //
+ // ES and desktop 4.3 and earlier: swizzles may not be used
+ // desktop 4.4 and later: swizzles may be used
+ bool swizzleOkay = (profile != EEsProfile) && (version >= 440);
+ const TIntermTyped* base = TIntermediate::findLValueBase(arg0, swizzleOkay);
+ if (base == nullptr || base->getType().getQualifier().storage != EvqVaryingIn)
+ error(loc, "first argument must be an interpolant, or interpolant-array element", fnCandidate.getName().c_str(), "");
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (callNode.getOp() == EOpInterpolateAtVertex) {
+ if (!arg0->getType().getQualifier().isExplicitInterpolation())
+ error(loc, "argument must be qualified as __explicitInterpAMD in", "interpolant", "");
+ else {
+ if (! (*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "vertex index", "");
+ else {
+ unsigned vertexIdx = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
+ if (vertexIdx > 2)
+ error(loc, "must be in the range [0, 2]", "vertex index", "");
+ }
+ }
+ }
+#endif
+
+ break;
+
+ case EOpEmitStreamVertex:
+ case EOpEndStreamPrimitive:
+ intermediate.setMultiStream();
+ break;
+
+ case EOpSubgroupClusteredAdd:
+ case EOpSubgroupClusteredMul:
+ case EOpSubgroupClusteredMin:
+ case EOpSubgroupClusteredMax:
+ case EOpSubgroupClusteredAnd:
+ case EOpSubgroupClusteredOr:
+ case EOpSubgroupClusteredXor:
+ // The <clusterSize> as used in the subgroupClustered<op>() operations must be:
+ // - An integral constant expression.
+ // - At least 1.
+ // - A power of 2.
+ if ((*argp)[1]->getAsConstantUnion() == nullptr)
+ error(loc, "argument must be compile-time constant", "cluster size", "");
+ else {
+ int size = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (size < 1)
+ error(loc, "argument must be at least 1", "cluster size", "");
+ else if (!IsPow2(size))
+ error(loc, "argument must be a power of 2", "cluster size", "");
+ }
+ break;
+
+ case EOpSubgroupBroadcast:
+ // <id> must be an integral constant expression.
+ if ((*argp)[1]->getAsConstantUnion() == nullptr)
+ error(loc, "argument must be compile-time constant", "id", "");
+ break;
+
+ case EOpBarrier:
+ case EOpMemoryBarrier:
+ if (argp->size() > 0) {
+ requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
+ memorySemanticsCheck(loc, fnCandidate, callNode);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ // Texture operations on texture objects (aside from texelFetch on a
+ // textureBuffer) require EXT_samplerless_texture_functions.
+ switch (callNode.getOp()) {
+ case EOpTextureQuerySize:
+ case EOpTextureQueryLevels:
+ case EOpTextureQuerySamples:
+ case EOpTextureFetch:
+ case EOpTextureFetchOffset:
+ {
+ const TSampler& sampler = fnCandidate[0].type->getSampler();
+
+ const bool isTexture = sampler.isTexture() && !sampler.isCombined();
+ const bool isBuffer = sampler.dim == EsdBuffer;
+ const bool isFetch = callNode.getOp() == EOpTextureFetch || callNode.getOp() == EOpTextureFetchOffset;
+
+ if (isTexture && (!isBuffer || !isFetch))
+ requireExtensions(loc, 1, &E_GL_EXT_samplerless_texture_functions, fnCandidate.getName().c_str());
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (callNode.getOp() > EOpSubgroupGuardStart && callNode.getOp() < EOpSubgroupGuardStop) {
+ // these require SPIR-V 1.3
+ if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_3)
+ error(loc, "requires SPIR-V 1.3", "subgroup op", "");
+ }
+}
+
+extern bool PureOperatorBuiltins;
+
+// Deprecated! Use PureOperatorBuiltins == true instead, in which case this
+// functionality is handled in builtInOpCheck() instead of here.
+//
+// Do additional checking of built-in function calls that were not mapped
+// to built-in operations (e.g., texturing functions).
+//
+// Assumes there has been a semantically correct match to a built-in function.
+//
+void TParseContext::nonOpBuiltInCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermAggregate& callNode)
+{
+ // Further maintenance of this function is deprecated, because the "correct"
+ // future-oriented design is to not have to do string compares on function names.
+
+ // If PureOperatorBuiltins == true, then all built-ins should be mapped
+ // to a TOperator, and this function would then never get called.
+
+ assert(PureOperatorBuiltins == false);
+
+ // built-in texturing functions get their return value precision from the precision of the sampler
+ if (fnCandidate.getType().getQualifier().precision == EpqNone &&
+ fnCandidate.getParamCount() > 0 && fnCandidate[0].type->getBasicType() == EbtSampler)
+ callNode.getQualifier().precision = callNode.getSequence()[0]->getAsTyped()->getQualifier().precision;
+
+ if (fnCandidate.getName().compare(0, 7, "texture") == 0) {
+ if (fnCandidate.getName().compare(0, 13, "textureGather") == 0) {
+ TString featureString = fnCandidate.getName() + "(...)";
+ const char* feature = featureString.c_str();
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+
+ int compArg = -1; // track which argument, if any, is the constant component argument
+ if (fnCandidate.getName().compare("textureGatherOffset") == 0) {
+ // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument
+ if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3)
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2;
+ if (! callNode.getSequence()[offsetArg]->getAsConstantUnion())
+ profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
+ "non-constant offset argument");
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ } else if (fnCandidate.getName().compare("textureGatherOffsets") == 0) {
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ // check for constant offsets
+ int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2;
+ if (! callNode.getSequence()[offsetArg]->getAsConstantUnion())
+ error(loc, "must be a compile-time constant:", feature, "offsets argument");
+ } else if (fnCandidate.getName().compare("textureGather") == 0) {
+ // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5,
+ // otherwise, need GL_ARB_texture_gather.
+ if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) {
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 2;
+ } else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ }
+
+ if (compArg > 0 && compArg < fnCandidate.getParamCount()) {
+ if (callNode.getSequence()[compArg]->getAsConstantUnion()) {
+ int value = callNode.getSequence()[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (value < 0 || value > 3)
+ error(loc, "must be 0, 1, 2, or 3:", feature, "component argument");
+ } else
+ error(loc, "must be a compile-time constant:", feature, "component argument");
+ }
+ } else {
+ // this is only for functions not starting "textureGather"...
+ if (fnCandidate.getName().find("Offset") != TString::npos) {
+
+ // Handle texture-offset limits checking
+ int arg = -1;
+ if (fnCandidate.getName().compare("textureOffset") == 0)
+ arg = 2;
+ else if (fnCandidate.getName().compare("texelFetchOffset") == 0)
+ arg = 3;
+ else if (fnCandidate.getName().compare("textureProjOffset") == 0)
+ arg = 2;
+ else if (fnCandidate.getName().compare("textureLodOffset") == 0)
+ arg = 3;
+ else if (fnCandidate.getName().compare("textureProjLodOffset") == 0)
+ arg = 3;
+ else if (fnCandidate.getName().compare("textureGradOffset") == 0)
+ arg = 4;
+ else if (fnCandidate.getName().compare("textureProjGradOffset") == 0)
+ arg = 4;
+
+ if (arg > 0) {
+ if (! callNode.getSequence()[arg]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "texel offset", "");
+ else {
+ const TType& type = callNode.getSequence()[arg]->getAsTyped()->getType();
+ for (int c = 0; c < type.getVectorSize(); ++c) {
+ int offset = callNode.getSequence()[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
+ if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
+ error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // GL_ARB_shader_texture_image_samples
+ if (fnCandidate.getName().compare(0, 14, "textureSamples") == 0 || fnCandidate.getName().compare(0, 12, "imageSamples") == 0)
+ profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples");
+
+ if (fnCandidate.getName().compare(0, 11, "imageAtomic") == 0) {
+ const TType& imageType = callNode.getSequence()[0]->getAsTyped()->getType();
+ if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) {
+ if (imageType.getQualifier().layoutFormat != ElfR32i && imageType.getQualifier().layoutFormat != ElfR32ui)
+ error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
+ } else {
+ if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
+ error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
+ else if (imageType.getQualifier().layoutFormat != ElfR32f && profile == EEsProfile)
+ error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
+ }
+ }
+}
+
+//
+// Do any extra checking for a user function call.
+//
+void TParseContext::userFunctionCallCheck(const TSourceLoc& loc, TIntermAggregate& callNode)
+{
+ TIntermSequence& arguments = callNode.getSequence();
+
+ for (int i = 0; i < (int)arguments.size(); ++i)
+ samplerConstructorLocationCheck(loc, "call argument", arguments[i]);
+}
+
+//
+// Emit an error if this is a sampler constructor
+//
+void TParseContext::samplerConstructorLocationCheck(const TSourceLoc& loc, const char* token, TIntermNode* node)
+{
+ if (node->getAsOperator() && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
+ error(loc, "sampler constructor must appear at point of use", token, "");
+}
+
+//
+// Handle seeing a built-in constructor in a grammar production.
+//
+TFunction* TParseContext::handleConstructorCall(const TSourceLoc& loc, const TPublicType& publicType)
+{
+ TType type(publicType);
+ type.getQualifier().precision = EpqNone;
+
+ if (type.isArray()) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed constructor");
+ profileRequires(loc, EEsProfile, 300, nullptr, "arrayed constructor");
+ }
+
+ TOperator op = intermediate.mapTypeToConstructorOp(type);
+
+ if (op == EOpNull) {
+ error(loc, "cannot construct this type", type.getBasicString(), "");
+ op = EOpConstructFloat;
+ TType errorType(EbtFloat);
+ type.shallowCopy(errorType);
+ }
+
+ TString empty("");
+
+ return new TFunction(&empty, type, op);
+}
+
+// Handle seeing a precision qualifier in the grammar.
+void TParseContext::handlePrecisionQualifier(const TSourceLoc& /*loc*/, TQualifier& qualifier, TPrecisionQualifier precision)
+{
+ if (obeyPrecisionQualifiers())
+ qualifier.precision = precision;
+}
+
+// Check for messages to give on seeing a precision qualifier used in a
+// declaration in the grammar.
+void TParseContext::checkPrecisionQualifier(const TSourceLoc& loc, TPrecisionQualifier)
+{
+ if (precisionManager.shouldWarnAboutDefaults()) {
+ warn(loc, "all default precisions are highp; use precision statements to quiet warning, e.g.:\n"
+ " \"precision mediump int; precision highp float;\"", "", "");
+ precisionManager.defaultWarningGiven();
+ }
+}
+
+//
+// Same error message for all places assignments don't work.
+//
+void TParseContext::assignError(const TSourceLoc& loc, const char* op, TString left, TString right)
+{
+ error(loc, "", op, "cannot convert from '%s' to '%s'",
+ right.c_str(), left.c_str());
+}
+
+//
+// Same error message for all places unary operations don't work.
+//
+void TParseContext::unaryOpError(const TSourceLoc& loc, const char* op, TString operand)
+{
+ error(loc, " wrong operand type", op,
+ "no operation '%s' exists that takes an operand of type %s (or there is no acceptable conversion)",
+ op, operand.c_str());
+}
+
+//
+// Same error message for all binary operations don't work.
+//
+void TParseContext::binaryOpError(const TSourceLoc& loc, const char* op, TString left, TString right)
+{
+ error(loc, " wrong operand types:", op,
+ "no operation '%s' exists that takes a left-hand operand of type '%s' and "
+ "a right operand of type '%s' (or there is no acceptable conversion)",
+ op, left.c_str(), right.c_str());
+}
+
+//
+// A basic type of EbtVoid is a key that the name string was seen in the source, but
+// it was not found as a variable in the symbol table. If so, give the error
+// message and insert a dummy variable in the symbol table to prevent future errors.
+//
+void TParseContext::variableCheck(TIntermTyped*& nodePtr)
+{
+ TIntermSymbol* symbol = nodePtr->getAsSymbolNode();
+ if (! symbol)
+ return;
+
+ if (symbol->getType().getBasicType() == EbtVoid) {
+ const char *extraInfoFormat = "";
+ if (spvVersion.vulkan != 0 && symbol->getName() == "gl_VertexID") {
+ extraInfoFormat = "(Did you mean gl_VertexIndex?)";
+ } else if (spvVersion.vulkan != 0 && symbol->getName() == "gl_InstanceID") {
+ extraInfoFormat = "(Did you mean gl_InstanceIndex?)";
+ }
+ error(symbol->getLoc(), "undeclared identifier", symbol->getName().c_str(), extraInfoFormat);
+
+ // Add to symbol table to prevent future error messages on the same name
+ if (symbol->getName().size() > 0) {
+ TVariable* fakeVariable = new TVariable(&symbol->getName(), TType(EbtFloat));
+ symbolTable.insert(*fakeVariable);
+
+ // substitute a symbol node for this new variable
+ nodePtr = intermediate.addSymbol(*fakeVariable, symbol->getLoc());
+ }
+ } else {
+ switch (symbol->getQualifier().storage) {
+ case EvqPointCoord:
+ profileRequires(symbol->getLoc(), ENoProfile, 120, nullptr, "gl_PointCoord");
+ break;
+ default: break; // some compilers want this
+ }
+ }
+}
+
+//
+// Both test and if necessary, spit out an error, to see if the node is really
+// an l-value that can be operated on this way.
+//
+// Returns true if there was an error.
+//
+bool TParseContext::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ TIntermBinary* binaryNode = node->getAsBinaryNode();
+
+ if (binaryNode) {
+ bool errorReturn = false;
+
+ switch(binaryNode->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ // ... tessellation control shader ...
+ // If a per-vertex output variable is used as an l-value, it is a
+ // compile-time or link-time error if the expression indicating the
+ // vertex index is not the identifier gl_InvocationID.
+ if (language == EShLangTessControl) {
+ const TType& leftType = binaryNode->getLeft()->getType();
+ if (leftType.getQualifier().storage == EvqVaryingOut && ! leftType.getQualifier().patch && binaryNode->getLeft()->getAsSymbolNode()) {
+ // we have a per-vertex output
+ const TIntermSymbol* rightSymbol = binaryNode->getRight()->getAsSymbolNode();
+ if (! rightSymbol || rightSymbol->getQualifier().builtIn != EbvInvocationId)
+ error(loc, "tessellation-control per-vertex output l-value must be indexed with gl_InvocationID", "[]", "");
+ }
+ }
+
+ break; // left node is checked by base class
+ case EOpIndexDirectStruct:
+ break; // left node is checked by base class
+ case EOpVectorSwizzle:
+ errorReturn = lValueErrorCheck(loc, op, binaryNode->getLeft());
+ if (!errorReturn) {
+ int offset[4] = {0,0,0,0};
+
+ TIntermTyped* rightNode = binaryNode->getRight();
+ TIntermAggregate *aggrNode = rightNode->getAsAggregate();
+
+ for (TIntermSequence::iterator p = aggrNode->getSequence().begin();
+ p != aggrNode->getSequence().end(); p++) {
+ int value = (*p)->getAsTyped()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ offset[value]++;
+ if (offset[value] > 1) {
+ error(loc, " l-value of swizzle cannot have duplicate components", op, "", "");
+
+ return true;
+ }
+ }
+ }
+
+ return errorReturn;
+ default:
+ break;
+ }
+
+ if (errorReturn) {
+ error(loc, " l-value required", op, "", "");
+ return true;
+ }
+ }
+
+ if (binaryNode && binaryNode->getOp() == EOpIndexDirectStruct &&
+ binaryNode->getLeft()->getBasicType() == EbtReference)
+ return false;
+
+ // Let the base class check errors
+ if (TParseContextBase::lValueErrorCheck(loc, op, node))
+ return true;
+
+ const char* symbol = nullptr;
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (symNode != nullptr)
+ symbol = symNode->getName().c_str();
+
+ const char* message = nullptr;
+ switch (node->getQualifier().storage) {
+ case EvqVaryingIn: message = "can't modify shader input"; break;
+ case EvqInstanceId: message = "can't modify gl_InstanceID"; break;
+ case EvqVertexId: message = "can't modify gl_VertexID"; break;
+ case EvqFace: message = "can't modify gl_FrontFace"; break;
+ case EvqFragCoord: message = "can't modify gl_FragCoord"; break;
+ case EvqPointCoord: message = "can't modify gl_PointCoord"; break;
+ case EvqFragDepth:
+ intermediate.setDepthReplacing();
+ // "In addition, it is an error to statically write to gl_FragDepth in the fragment shader."
+ if (profile == EEsProfile && intermediate.getEarlyFragmentTests())
+ message = "can't modify gl_FragDepth if using early_fragment_tests";
+ break;
+
+ default:
+ break;
+ }
+
+ if (message == nullptr && binaryNode == nullptr && symNode == nullptr) {
+ error(loc, " l-value required", op, "", "");
+
+ return true;
+ }
+
+ //
+ // Everything else is okay, no error.
+ //
+ if (message == nullptr)
+ return false;
+
+ //
+ // If we get here, we have an error and a message.
+ //
+ if (symNode)
+ error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
+ else
+ error(loc, " l-value required", op, "(%s)", message);
+
+ return true;
+}
+
+// Test for and give an error if the node can't be read from.
+void TParseContext::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ // Let the base class check errors
+ TParseContextBase::rValueErrorCheck(loc, op, node);
+
+#ifdef AMD_EXTENSIONS
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (!(symNode && symNode->getQualifier().writeonly)) // base class checks
+ if (symNode && symNode->getQualifier().explicitInterp)
+ error(loc, "can't read from explicitly-interpolated object: ", op, symNode->getName().c_str());
+#endif
+}
+
+//
+// Both test, and if necessary spit out an error, to see if the node is really
+// a constant.
+//
+void TParseContext::constantValueCheck(TIntermTyped* node, const char* token)
+{
+ if (! node->getQualifier().isConstant())
+ error(node->getLoc(), "constant expression required", token, "");
+}
+
+//
+// Both test, and if necessary spit out an error, to see if the node is really
+// an integer.
+//
+void TParseContext::integerCheck(const TIntermTyped* node, const char* token)
+{
+ if ((node->getBasicType() == EbtInt || node->getBasicType() == EbtUint) && node->isScalar())
+ return;
+
+ error(node->getLoc(), "scalar integer expression required", token, "");
+}
+
+//
+// Both test, and if necessary spit out an error, to see if we are currently
+// globally scoped.
+//
+void TParseContext::globalCheck(const TSourceLoc& loc, const char* token)
+{
+ if (! symbolTable.atGlobalLevel())
+ error(loc, "not allowed in nested scope", token, "");
+}
+
+//
+// Reserved errors for GLSL.
+//
+void TParseContext::reservedErrorCheck(const TSourceLoc& loc, const TString& identifier)
+{
+ // "Identifiers starting with "gl_" are reserved for use by OpenGL, and may not be
+ // declared in a shader; this results in a compile-time error."
+ if (! symbolTable.atBuiltInLevel()) {
+ if (builtInName(identifier))
+ error(loc, "identifiers starting with \"gl_\" are reserved", identifier.c_str(), "");
+
+ // "__" are not supposed to be an error. ES 310 (and desktop) added the clarification:
+ // "In addition, all identifiers containing two consecutive underscores (__) are
+ // reserved; using such a name does not itself result in an error, but may result
+ // in undefined behavior."
+ // however, before that, ES tests required an error.
+ if (identifier.find("__") != TString::npos) {
+ if (profile == EEsProfile && version <= 300)
+ error(loc, "identifiers containing consecutive underscores (\"__\") are reserved, and an error if version <= 300", identifier.c_str(), "");
+ else
+ warn(loc, "identifiers containing consecutive underscores (\"__\") are reserved", identifier.c_str(), "");
+ }
+ }
+}
+
+//
+// Reserved errors for the preprocessor.
+//
+void TParseContext::reservedPpErrorCheck(const TSourceLoc& loc, const char* identifier, const char* op)
+{
+ // "__" are not supposed to be an error. ES 310 (and desktop) added the clarification:
+ // "All macro names containing two consecutive underscores ( __ ) are reserved;
+ // defining such a name does not itself result in an error, but may result in
+ // undefined behavior. All macro names prefixed with "GL_" ("GL" followed by a
+ // single underscore) are also reserved, and defining such a name results in a
+ // compile-time error."
+ // however, before that, ES tests required an error.
+ if (strncmp(identifier, "GL_", 3) == 0)
+ ppError(loc, "names beginning with \"GL_\" can't be (un)defined:", op, identifier);
+ else if (strncmp(identifier, "defined", 8) == 0)
+ ppError(loc, "\"defined\" can't be (un)defined:", op, identifier);
+ else if (strstr(identifier, "__") != 0) {
+ if (profile == EEsProfile && version >= 300 &&
+ (strcmp(identifier, "__LINE__") == 0 ||
+ strcmp(identifier, "__FILE__") == 0 ||
+ strcmp(identifier, "__VERSION__") == 0))
+ ppError(loc, "predefined names can't be (un)defined:", op, identifier);
+ else {
+ if (profile == EEsProfile && version <= 300)
+ ppError(loc, "names containing consecutive underscores are reserved, and an error if version <= 300:", op, identifier);
+ else
+ ppWarn(loc, "names containing consecutive underscores are reserved:", op, identifier);
+ }
+ }
+}
+
+//
+// See if this version/profile allows use of the line-continuation character '\'.
+//
+// Returns true if a line continuation should be done.
+//
+bool TParseContext::lineContinuationCheck(const TSourceLoc& loc, bool endOfComment)
+{
+ const char* message = "line continuation";
+
+ bool lineContinuationAllowed = (profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && (version >= 420 || extensionTurnedOn(E_GL_ARB_shading_language_420pack)));
+
+ if (endOfComment) {
+ if (lineContinuationAllowed)
+ warn(loc, "used at end of comment; the following line is still part of the comment", message, "");
+ else
+ warn(loc, "used at end of comment, but this version does not provide line continuation", message, "");
+
+ return lineContinuationAllowed;
+ }
+
+ if (relaxedErrors()) {
+ if (! lineContinuationAllowed)
+ warn(loc, "not allowed in this version", message, "");
+ return true;
+ } else {
+ profileRequires(loc, EEsProfile, 300, nullptr, message);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, message);
+ }
+
+ return lineContinuationAllowed;
+}
+
+bool TParseContext::builtInName(const TString& identifier)
+{
+ return identifier.compare(0, 3, "gl_") == 0;
+}
+
+//
+// Make sure there is enough data and not too many arguments provided to the
+// constructor to build something of the type of the constructor. Also returns
+// the type of the constructor.
+//
+// Part of establishing type is establishing specialization-constness.
+// We don't yet know "top down" whether type is a specialization constant,
+// but a const constructor can becomes a specialization constant if any of
+// its children are, subject to KHR_vulkan_glsl rules:
+//
+// - int(), uint(), and bool() constructors for type conversions
+// from any of the following types to any of the following types:
+// * int
+// * uint
+// * bool
+// - vector versions of the above conversion constructors
+//
+// Returns true if there was an error in construction.
+//
+bool TParseContext::constructorError(const TSourceLoc& loc, TIntermNode* node, TFunction& function, TOperator op, TType& type)
+{
+ type.shallowCopy(function.getType());
+
+ bool constructingMatrix = false;
+ switch(op) {
+ case EOpConstructTextureSampler:
+ return constructorTextureSamplerError(loc, function);
+ case EOpConstructMat2x2:
+ case EOpConstructMat2x3:
+ case EOpConstructMat2x4:
+ case EOpConstructMat3x2:
+ case EOpConstructMat3x3:
+ case EOpConstructMat3x4:
+ case EOpConstructMat4x2:
+ case EOpConstructMat4x3:
+ case EOpConstructMat4x4:
+ case EOpConstructDMat2x2:
+ case EOpConstructDMat2x3:
+ case EOpConstructDMat2x4:
+ case EOpConstructDMat3x2:
+ case EOpConstructDMat3x3:
+ case EOpConstructDMat3x4:
+ case EOpConstructDMat4x2:
+ case EOpConstructDMat4x3:
+ case EOpConstructDMat4x4:
+ case EOpConstructF16Mat2x2:
+ case EOpConstructF16Mat2x3:
+ case EOpConstructF16Mat2x4:
+ case EOpConstructF16Mat3x2:
+ case EOpConstructF16Mat3x3:
+ case EOpConstructF16Mat3x4:
+ case EOpConstructF16Mat4x2:
+ case EOpConstructF16Mat4x3:
+ case EOpConstructF16Mat4x4:
+ constructingMatrix = true;
+ break;
+ default:
+ break;
+ }
+
+ //
+ // Walk the arguments for first-pass checks and collection of information.
+ //
+
+ int size = 0;
+ bool constType = true;
+ bool specConstType = false; // value is only valid if constType is true
+ bool full = false;
+ bool overFull = false;
+ bool matrixInMatrix = false;
+ bool arrayArg = false;
+ bool floatArgument = false;
+ for (int arg = 0; arg < function.getParamCount(); ++arg) {
+ if (function[arg].type->isArray()) {
+ if (function[arg].type->isUnsizedArray()) {
+ // Can't construct from an unsized array.
+ error(loc, "array argument must be sized", "constructor", "");
+ return true;
+ }
+ arrayArg = true;
+ }
+ if (constructingMatrix && function[arg].type->isMatrix())
+ matrixInMatrix = true;
+
+ // 'full' will go to true when enough args have been seen. If we loop
+ // again, there is an extra argument.
+ if (full) {
+ // For vectors and matrices, it's okay to have too many components
+ // available, but not okay to have unused arguments.
+ overFull = true;
+ }
+
+ size += function[arg].type->computeNumComponents();
+ if (op != EOpConstructStruct && ! type.isArray() && size >= type.computeNumComponents())
+ full = true;
+
+ if (! function[arg].type->getQualifier().isConstant())
+ constType = false;
+ if (function[arg].type->getQualifier().isSpecConstant())
+ specConstType = true;
+ if (function[arg].type->isFloatingDomain())
+ floatArgument = true;
+ if (type.isStruct()) {
+ if (function[arg].type->containsBasicType(EbtFloat16)) {
+ requireFloat16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type");
+ }
+ if (function[arg].type->containsBasicType(EbtUint16) ||
+ function[arg].type->containsBasicType(EbtInt16)) {
+ requireInt16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type");
+ }
+ if (function[arg].type->containsBasicType(EbtUint8) ||
+ function[arg].type->containsBasicType(EbtInt8)) {
+ requireInt8Arithmetic(loc, "constructor", "can't construct structure containing 8-bit type");
+ }
+ }
+ }
+
+ switch (op) {
+ case EOpConstructFloat16:
+ case EOpConstructF16Vec2:
+ case EOpConstructF16Vec3:
+ case EOpConstructF16Vec4:
+ if (type.isArray())
+ requireFloat16Arithmetic(loc, "constructor", "16-bit arrays not supported");
+ if (type.isVector() && function.getParamCount() != 1)
+ requireFloat16Arithmetic(loc, "constructor", "16-bit vectors only take vector types");
+ break;
+ case EOpConstructUint16:
+ case EOpConstructU16Vec2:
+ case EOpConstructU16Vec3:
+ case EOpConstructU16Vec4:
+ case EOpConstructInt16:
+ case EOpConstructI16Vec2:
+ case EOpConstructI16Vec3:
+ case EOpConstructI16Vec4:
+ if (type.isArray())
+ requireInt16Arithmetic(loc, "constructor", "16-bit arrays not supported");
+ if (type.isVector() && function.getParamCount() != 1)
+ requireInt16Arithmetic(loc, "constructor", "16-bit vectors only take vector types");
+ break;
+ case EOpConstructUint8:
+ case EOpConstructU8Vec2:
+ case EOpConstructU8Vec3:
+ case EOpConstructU8Vec4:
+ case EOpConstructInt8:
+ case EOpConstructI8Vec2:
+ case EOpConstructI8Vec3:
+ case EOpConstructI8Vec4:
+ if (type.isArray())
+ requireInt8Arithmetic(loc, "constructor", "8-bit arrays not supported");
+ if (type.isVector() && function.getParamCount() != 1)
+ requireInt8Arithmetic(loc, "constructor", "8-bit vectors only take vector types");
+ break;
+ default:
+ break;
+ }
+
+ // inherit constness from children
+ if (constType) {
+ bool makeSpecConst;
+ // Finish pinning down spec-const semantics
+ if (specConstType) {
+ switch (op) {
+ case EOpConstructInt8:
+ case EOpConstructUint8:
+ case EOpConstructInt16:
+ case EOpConstructUint16:
+ case EOpConstructInt:
+ case EOpConstructUint:
+ case EOpConstructInt64:
+ case EOpConstructUint64:
+ case EOpConstructBool:
+ case EOpConstructBVec2:
+ case EOpConstructBVec3:
+ case EOpConstructBVec4:
+ case EOpConstructI8Vec2:
+ case EOpConstructI8Vec3:
+ case EOpConstructI8Vec4:
+ case EOpConstructU8Vec2:
+ case EOpConstructU8Vec3:
+ case EOpConstructU8Vec4:
+ case EOpConstructI16Vec2:
+ case EOpConstructI16Vec3:
+ case EOpConstructI16Vec4:
+ case EOpConstructU16Vec2:
+ case EOpConstructU16Vec3:
+ case EOpConstructU16Vec4:
+ case EOpConstructIVec2:
+ case EOpConstructIVec3:
+ case EOpConstructIVec4:
+ case EOpConstructUVec2:
+ case EOpConstructUVec3:
+ case EOpConstructUVec4:
+ case EOpConstructI64Vec2:
+ case EOpConstructI64Vec3:
+ case EOpConstructI64Vec4:
+ case EOpConstructU64Vec2:
+ case EOpConstructU64Vec3:
+ case EOpConstructU64Vec4:
+ // This was the list of valid ones, if they aren't converting from float
+ // and aren't making an array.
+ makeSpecConst = ! floatArgument && ! type.isArray();
+ break;
+ default:
+ // anything else wasn't white-listed in the spec as a conversion
+ makeSpecConst = false;
+ break;
+ }
+ } else
+ makeSpecConst = false;
+
+ if (makeSpecConst)
+ type.getQualifier().makeSpecConstant();
+ else if (specConstType)
+ type.getQualifier().makeTemporary();
+ else
+ type.getQualifier().storage = EvqConst;
+ }
+
+ if (type.isArray()) {
+ if (function.getParamCount() == 0) {
+ error(loc, "array constructor must have at least one argument", "constructor", "");
+ return true;
+ }
+
+ if (type.isUnsizedArray()) {
+ // auto adapt the constructor type to the number of arguments
+ type.changeOuterArraySize(function.getParamCount());
+ } else if (type.getOuterArraySize() != function.getParamCount()) {
+ error(loc, "array constructor needs one argument per array element", "constructor", "");
+ return true;
+ }
+
+ if (type.isArrayOfArrays()) {
+ // Types have to match, but we're still making the type.
+ // Finish making the type, and the comparison is done later
+ // when checking for conversion.
+ TArraySizes& arraySizes = *type.getArraySizes();
+
+ // At least the dimensionalities have to match.
+ if (! function[0].type->isArray() ||
+ arraySizes.getNumDims() != function[0].type->getArraySizes()->getNumDims() + 1) {
+ error(loc, "array constructor argument not correct type to construct array element", "constructor", "");
+ return true;
+ }
+
+ if (arraySizes.isInnerUnsized()) {
+ // "Arrays of arrays ..., and the size for any dimension is optional"
+ // That means we need to adopt (from the first argument) the other array sizes into the type.
+ for (int d = 1; d < arraySizes.getNumDims(); ++d) {
+ if (arraySizes.getDimSize(d) == UnsizedArraySize) {
+ arraySizes.setDimSize(d, function[0].type->getArraySizes()->getDimSize(d - 1));
+ }
+ }
+ }
+ }
+ }
+
+ if (arrayArg && op != EOpConstructStruct && ! type.isArrayOfArrays()) {
+ error(loc, "constructing non-array constituent from array argument", "constructor", "");
+ return true;
+ }
+
+ if (matrixInMatrix && ! type.isArray()) {
+ profileRequires(loc, ENoProfile, 120, nullptr, "constructing matrix from matrix");
+
+ // "If a matrix argument is given to a matrix constructor,
+ // it is a compile-time error to have any other arguments."
+ if (function.getParamCount() != 1)
+ error(loc, "matrix constructed from matrix can only have one argument", "constructor", "");
+ return false;
+ }
+
+ if (overFull) {
+ error(loc, "too many arguments", "constructor", "");
+ return true;
+ }
+
+ if (op == EOpConstructStruct && ! type.isArray() && (int)type.getStruct()->size() != function.getParamCount()) {
+ error(loc, "Number of constructor parameters does not match the number of structure fields", "constructor", "");
+ return true;
+ }
+
+ if ((op != EOpConstructStruct && size != 1 && size < type.computeNumComponents()) ||
+ (op == EOpConstructStruct && size < type.computeNumComponents())) {
+ error(loc, "not enough data provided for construction", "constructor", "");
+ return true;
+ }
+
+ if (type.isCoopMat() && function.getParamCount() != 1) {
+ error(loc, "wrong number of arguments", "constructor", "");
+ return true;
+ }
+ if (type.isCoopMat() &&
+ !(function[0].type->isScalar() || function[0].type->isCoopMat())) {
+ error(loc, "Cooperative matrix constructor argument must be scalar or cooperative matrix", "constructor", "");
+ return true;
+ }
+
+ TIntermTyped* typed = node->getAsTyped();
+ if (typed == nullptr) {
+ error(loc, "constructor argument does not have a type", "constructor", "");
+ return true;
+ }
+ if (op != EOpConstructStruct && typed->getBasicType() == EbtSampler) {
+ error(loc, "cannot convert a sampler", "constructor", "");
+ return true;
+ }
+ if (op != EOpConstructStruct && typed->getBasicType() == EbtAtomicUint) {
+ error(loc, "cannot convert an atomic_uint", "constructor", "");
+ return true;
+ }
+ if (typed->getBasicType() == EbtVoid) {
+ error(loc, "cannot convert a void", "constructor", "");
+ return true;
+ }
+
+ return false;
+}
+
+// Verify all the correct semantics for constructing a combined texture/sampler.
+// Return true if the semantics are incorrect.
+bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const TFunction& function)
+{
+ TString constructorName = function.getType().getBasicTypeString(); // TODO: performance: should not be making copy; interface needs to change
+ const char* token = constructorName.c_str();
+
+ // exactly two arguments needed
+ if (function.getParamCount() != 2) {
+ error(loc, "sampler-constructor requires two arguments", token, "");
+ return true;
+ }
+
+ // For now, not allowing arrayed constructors, the rest of this function
+ // is set up to allow them, if this test is removed:
+ if (function.getType().isArray()) {
+ error(loc, "sampler-constructor cannot make an array of samplers", token, "");
+ return true;
+ }
+
+ // first argument
+ // * the constructor's first argument must be a texture type
+ // * the dimensionality (1D, 2D, 3D, Cube, Rect, Buffer, MS, and Array)
+ // of the texture type must match that of the constructed sampler type
+ // (that is, the suffixes of the type of the first argument and the
+ // type of the constructor will be spelled the same way)
+ if (function[0].type->getBasicType() != EbtSampler ||
+ ! function[0].type->getSampler().isTexture() ||
+ function[0].type->isArray()) {
+ error(loc, "sampler-constructor first argument must be a scalar textureXXX type", token, "");
+ return true;
+ }
+ // simulate the first argument's impact on the result type, so it can be compared with the encapsulated operator!=()
+ TSampler texture = function.getType().getSampler();
+ texture.combined = false;
+ texture.shadow = false;
+ if (texture != function[0].type->getSampler()) {
+ error(loc, "sampler-constructor first argument must match type and dimensionality of constructor type", token, "");
+ return true;
+ }
+
+ // second argument
+ // * the constructor's second argument must be a scalar of type
+ // *sampler* or *samplerShadow*
+ if ( function[1].type->getBasicType() != EbtSampler ||
+ ! function[1].type->getSampler().isPureSampler() ||
+ function[1].type->isArray()) {
+ error(loc, "sampler-constructor second argument must be a scalar type 'sampler'", token, "");
+ return true;
+ }
+
+ return false;
+}
+
+// Checks to see if a void variable has been declared and raise an error message for such a case
+//
+// returns true in case of an error
+//
+bool TParseContext::voidErrorCheck(const TSourceLoc& loc, const TString& identifier, const TBasicType basicType)
+{
+ if (basicType == EbtVoid) {
+ error(loc, "illegal use of type 'void'", identifier.c_str(), "");
+ return true;
+ }
+
+ return false;
+}
+
+// Checks to see if the node (for the expression) contains a scalar boolean expression or not
+void TParseContext::boolCheck(const TSourceLoc& loc, const TIntermTyped* type)
+{
+ if (type->getBasicType() != EbtBool || type->isArray() || type->isMatrix() || type->isVector())
+ error(loc, "boolean expression expected", "", "");
+}
+
+// This function checks to see if the node (for the expression) contains a scalar boolean expression or not
+void TParseContext::boolCheck(const TSourceLoc& loc, const TPublicType& pType)
+{
+ if (pType.basicType != EbtBool || pType.arraySizes || pType.matrixCols > 1 || (pType.vectorSize > 1))
+ error(loc, "boolean expression expected", "", "");
+}
+
+void TParseContext::samplerCheck(const TSourceLoc& loc, const TType& type, const TString& identifier, TIntermTyped* /*initializer*/)
+{
+ // Check that the appropriate extension is enabled if external sampler is used.
+ // There are two extensions. The correct one must be used based on GLSL version.
+ if (type.getBasicType() == EbtSampler && type.getSampler().external) {
+ if (version < 300) {
+ requireExtensions(loc, 1, &E_GL_OES_EGL_image_external, "samplerExternalOES");
+ } else {
+ requireExtensions(loc, 1, &E_GL_OES_EGL_image_external_essl3, "samplerExternalOES");
+ }
+ }
+ if (type.getSampler().yuv) {
+ requireExtensions(loc, 1, &E_GL_EXT_YUV_target, "__samplerExternal2DY2YEXT");
+ }
+
+ if (type.getQualifier().storage == EvqUniform)
+ return;
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtSampler))
+ error(loc, "non-uniform struct contains a sampler or image:", type.getBasicTypeString().c_str(), identifier.c_str());
+ else if (type.getBasicType() == EbtSampler && type.getQualifier().storage != EvqUniform) {
+ // non-uniform sampler
+ // not yet: okay if it has an initializer
+ // if (! initializer)
+ error(loc, "sampler/image types can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
+ }
+}
+
+void TParseContext::atomicUintCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (type.getQualifier().storage == EvqUniform)
+ return;
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAtomicUint))
+ error(loc, "non-uniform struct contains an atomic_uint:", type.getBasicTypeString().c_str(), identifier.c_str());
+ else if (type.getBasicType() == EbtAtomicUint && type.getQualifier().storage != EvqUniform)
+ error(loc, "atomic_uints can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
+}
+#ifdef NV_EXTENSIONS
+void TParseContext::accStructNVCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (type.getQualifier().storage == EvqUniform)
+ return;
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAccStructNV))
+ error(loc, "non-uniform struct contains an accelerationStructureNV:", type.getBasicTypeString().c_str(), identifier.c_str());
+ else if (type.getBasicType() == EbtAccStructNV && type.getQualifier().storage != EvqUniform)
+ error(loc, "accelerationStructureNV can only be used in uniform variables or function parameters:",
+ type.getBasicTypeString().c_str(), identifier.c_str());
+
+}
+#endif
+
+void TParseContext::transparentOpaqueCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (parsingBuiltins)
+ return;
+
+ if (type.getQualifier().storage != EvqUniform)
+ return;
+
+ if (type.containsNonOpaque()) {
+ // Vulkan doesn't allow transparent uniforms outside of blocks
+ if (spvVersion.vulkan > 0)
+ vulkanRemoved(loc, "non-opaque uniforms outside a block");
+ // OpenGL wants locations on these (unless they are getting automapped)
+ if (spvVersion.openGl > 0 && !type.getQualifier().hasLocation() && !intermediate.getAutoMapLocations())
+ error(loc, "non-opaque uniform variables need a layout(location=L)", identifier.c_str(), "");
+ }
+}
+
+//
+// Qualifier checks knowing the qualifier and that it is a member of a struct/block.
+//
+void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType)
+{
+ globalQualifierFixCheck(publicType.loc, publicType.qualifier);
+ checkNoShaderLayouts(publicType.loc, publicType.shaderQualifiers);
+ if (publicType.qualifier.isNonUniform()) {
+ error(publicType.loc, "not allowed on block or structure members", "nonuniformEXT", "");
+ publicType.qualifier.nonUniform = false;
+ }
+}
+
+//
+// Check/fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level.
+//
+void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier)
+{
+ bool nonuniformOkay = false;
+
+ // move from parameter/unknown qualifiers to pipeline in/out qualifiers
+ switch (qualifier.storage) {
+ case EvqIn:
+ profileRequires(loc, ENoProfile, 130, nullptr, "in for stage inputs");
+ profileRequires(loc, EEsProfile, 300, nullptr, "in for stage inputs");
+ qualifier.storage = EvqVaryingIn;
+ nonuniformOkay = true;
+ break;
+ case EvqOut:
+ profileRequires(loc, ENoProfile, 130, nullptr, "out for stage outputs");
+ profileRequires(loc, EEsProfile, 300, nullptr, "out for stage outputs");
+ qualifier.storage = EvqVaryingOut;
+ break;
+ case EvqInOut:
+ qualifier.storage = EvqVaryingIn;
+ error(loc, "cannot use 'inout' at global scope", "", "");
+ break;
+ case EvqGlobal:
+ case EvqTemporary:
+ nonuniformOkay = true;
+ break;
+ default:
+ break;
+ }
+
+ if (!nonuniformOkay && qualifier.nonUniform)
+ error(loc, "for non-parameter, can only apply to 'in' or no storage qualifier", "nonuniformEXT", "");
+
+ invariantCheck(loc, qualifier);
+}
+
+//
+// Check a full qualifier and type (no variable yet) at global level.
+//
+void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQualifier& qualifier, const TPublicType& publicType)
+{
+ if (! symbolTable.atGlobalLevel())
+ return;
+
+ if (!(publicType.userDef && publicType.userDef->getBasicType() == EbtReference)) {
+ if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) {
+ error(loc, "memory qualifiers cannot be used on this type", "", "");
+ } else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) {
+ error(loc, "memory qualifiers cannot be used on this type", "", "");
+ }
+ }
+
+ if (qualifier.storage == EvqBuffer &&
+ publicType.basicType != EbtBlock &&
+ !qualifier.layoutBufferReference)
+ error(loc, "buffers can be declared only as blocks", "buffer", "");
+
+ if (qualifier.storage != EvqVaryingIn && qualifier.storage != EvqVaryingOut)
+ return;
+
+ if (publicType.shaderQualifiers.blendEquation)
+ error(loc, "can only be applied to a standalone 'out'", "blend equation", "");
+
+ // now, knowing it is a shader in/out, do all the in/out semantic checks
+
+ if (publicType.basicType == EbtBool && !parsingBuiltins) {
+ error(loc, "cannot be bool", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+
+ if (isTypeInt(publicType.basicType) || publicType.basicType == EbtDouble)
+ profileRequires(loc, EEsProfile, 300, nullptr, "shader input/output");
+
+ if (!qualifier.flat
+#ifdef AMD_EXTENSIONS
+ && !qualifier.explicitInterp
+#endif
+#ifdef NV_EXTENSIONS
+ && !qualifier.pervertexNV
+#endif
+ ) {
+ if (isTypeInt(publicType.basicType) ||
+ publicType.basicType == EbtDouble ||
+ (publicType.userDef && (publicType.userDef->containsBasicType(EbtInt8) ||
+ publicType.userDef->containsBasicType(EbtUint8) ||
+ publicType.userDef->containsBasicType(EbtInt16) ||
+ publicType.userDef->containsBasicType(EbtUint16) ||
+ publicType.userDef->containsBasicType(EbtInt) ||
+ publicType.userDef->containsBasicType(EbtUint) ||
+ publicType.userDef->containsBasicType(EbtInt64) ||
+ publicType.userDef->containsBasicType(EbtUint64) ||
+ publicType.userDef->containsBasicType(EbtDouble)))) {
+ if (qualifier.storage == EvqVaryingIn && language == EShLangFragment)
+ error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage));
+ else if (qualifier.storage == EvqVaryingOut && language == EShLangVertex && version == 300)
+ error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage));
+ }
+ }
+
+ if (qualifier.patch && qualifier.isInterpolation())
+ error(loc, "cannot use interpolation qualifiers with patch", "patch", "");
+
+#ifdef NV_EXTENSIONS
+ if (qualifier.perTaskNV && publicType.basicType != EbtBlock)
+ error(loc, "taskNV variables can be declared only as blocks", "taskNV", "");
+#endif
+
+ if (qualifier.storage == EvqVaryingIn) {
+ switch (language) {
+ case EShLangVertex:
+ if (publicType.basicType == EbtStruct) {
+ error(loc, "cannot be a structure or array", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+ if (publicType.arraySizes) {
+ requireProfile(loc, ~EEsProfile, "vertex input arrays");
+ profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays");
+ }
+ if (publicType.basicType == EbtDouble)
+ profileRequires(loc, ~EEsProfile, 410, nullptr, "vertex-shader `double` type input");
+ if (qualifier.isAuxiliary() || qualifier.isInterpolation() || qualifier.isMemory() || qualifier.invariant)
+ error(loc, "vertex input cannot be further qualified", "", "");
+ break;
+
+ case EShLangTessControl:
+ if (qualifier.patch)
+ error(loc, "can only use on output in tessellation-control shader", "patch", "");
+ break;
+
+ case EShLangTessEvaluation:
+ break;
+
+ case EShLangGeometry:
+ break;
+
+ case EShLangFragment:
+ if (publicType.userDef) {
+ profileRequires(loc, EEsProfile, 300, nullptr, "fragment-shader struct input");
+ profileRequires(loc, ~EEsProfile, 150, nullptr, "fragment-shader struct input");
+ if (publicType.userDef->containsStructure())
+ requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing structure");
+ if (publicType.userDef->containsArray())
+ requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing an array");
+ }
+ break;
+
+ case EShLangCompute:
+ if (! symbolTable.atBuiltInLevel())
+ error(loc, "global storage input qualifier cannot be used in a compute shader", "in", "");
+ break;
+
+ default:
+ break;
+ }
+ } else {
+ // qualifier.storage == EvqVaryingOut
+ switch (language) {
+ case EShLangVertex:
+ if (publicType.userDef) {
+ profileRequires(loc, EEsProfile, 300, nullptr, "vertex-shader struct output");
+ profileRequires(loc, ~EEsProfile, 150, nullptr, "vertex-shader struct output");
+ if (publicType.userDef->containsStructure())
+ requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing structure");
+ if (publicType.userDef->containsArray())
+ requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing an array");
+ }
+
+ break;
+
+ case EShLangTessControl:
+ break;
+
+ case EShLangTessEvaluation:
+ if (qualifier.patch)
+ error(loc, "can only use on input in tessellation-evaluation shader", "patch", "");
+ break;
+
+ case EShLangGeometry:
+ break;
+
+ case EShLangFragment:
+ profileRequires(loc, EEsProfile, 300, nullptr, "fragment shader output");
+ if (publicType.basicType == EbtStruct) {
+ error(loc, "cannot be a structure", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+ if (publicType.matrixRows > 0) {
+ error(loc, "cannot be a matrix", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+ if (qualifier.isAuxiliary())
+ error(loc, "can't use auxiliary qualifier on a fragment output", "centroid/sample/patch", "");
+ if (qualifier.isInterpolation())
+ error(loc, "can't use interpolation qualifier on a fragment output", "flat/smooth/noperspective", "");
+ if (publicType.basicType == EbtDouble || publicType.basicType == EbtInt64 || publicType.basicType == EbtUint64)
+ error(loc, "cannot contain a double, int64, or uint64", GetStorageQualifierString(qualifier.storage), "");
+ break;
+
+ case EShLangCompute:
+ error(loc, "global storage output qualifier cannot be used in a compute shader", "out", "");
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+//
+// Merge characteristics of the 'src' qualifier into the 'dst'.
+// If there is duplication, issue error messages, unless 'force'
+// is specified, which means to just override default settings.
+//
+// Also, when force is false, it will be assumed that 'src' follows
+// 'dst', for the purpose of error checking order for versions
+// that require specific orderings of qualifiers.
+//
+void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, const TQualifier& src, bool force)
+{
+ // Multiple auxiliary qualifiers (mostly done later by 'individual qualifiers')
+ if (src.isAuxiliary() && dst.isAuxiliary())
+ error(loc, "can only have one auxiliary qualifier (centroid, patch, and sample)", "", "");
+
+ // Multiple interpolation qualifiers (mostly done later by 'individual qualifiers')
+ if (src.isInterpolation() && dst.isInterpolation())
+#ifdef AMD_EXTENSIONS
+ error(loc, "can only have one interpolation qualifier (flat, smooth, noperspective, __explicitInterpAMD)", "", "");
+#else
+ error(loc, "can only have one interpolation qualifier (flat, smooth, noperspective)", "", "");
+#endif
+
+ // Ordering
+ if (! force && ((profile != EEsProfile && version < 420) ||
+ (profile == EEsProfile && version < 310))
+ && ! extensionTurnedOn(E_GL_ARB_shading_language_420pack)) {
+ // non-function parameters
+ if (src.noContraction && (dst.invariant || dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "precise qualifier must appear first", "", "");
+ if (src.invariant && (dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "invariant qualifier must appear before interpolation, storage, and precision qualifiers ", "", "");
+ else if (src.isInterpolation() && (dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "interpolation qualifiers must appear before storage and precision qualifiers", "", "");
+ else if (src.isAuxiliary() && (dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "Auxiliary qualifiers (centroid, patch, and sample) must appear before storage and precision qualifiers", "", "");
+ else if (src.storage != EvqTemporary && (dst.precision != EpqNone))
+ error(loc, "precision qualifier must appear as last qualifier", "", "");
+
+ // function parameters
+ if (src.noContraction && (dst.storage == EvqConst || dst.storage == EvqIn || dst.storage == EvqOut))
+ error(loc, "precise qualifier must appear first", "", "");
+ if (src.storage == EvqConst && (dst.storage == EvqIn || dst.storage == EvqOut))
+ error(loc, "in/out must appear before const", "", "");
+ }
+
+ // Storage qualification
+ if (dst.storage == EvqTemporary || dst.storage == EvqGlobal)
+ dst.storage = src.storage;
+ else if ((dst.storage == EvqIn && src.storage == EvqOut) ||
+ (dst.storage == EvqOut && src.storage == EvqIn))
+ dst.storage = EvqInOut;
+ else if ((dst.storage == EvqIn && src.storage == EvqConst) ||
+ (dst.storage == EvqConst && src.storage == EvqIn))
+ dst.storage = EvqConstReadOnly;
+ else if (src.storage != EvqTemporary &&
+ src.storage != EvqGlobal)
+ error(loc, "too many storage qualifiers", GetStorageQualifierString(src.storage), "");
+
+ // Precision qualifiers
+ if (! force && src.precision != EpqNone && dst.precision != EpqNone)
+ error(loc, "only one precision qualifier allowed", GetPrecisionQualifierString(src.precision), "");
+ if (dst.precision == EpqNone || (force && src.precision != EpqNone))
+ dst.precision = src.precision;
+
+ if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
+ (src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
+ (src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
+ (src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent)) ||
+ (src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent)))) {
+ error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent qualifier allowed", GetPrecisionQualifierString(src.precision), "");
+ }
+ // Layout qualifiers
+ mergeObjectLayoutQualifiers(dst, src, false);
+
+ // individual qualifiers
+ bool repeated = false;
+ #define MERGE_SINGLETON(field) repeated |= dst.field && src.field; dst.field |= src.field;
+ MERGE_SINGLETON(invariant);
+ MERGE_SINGLETON(noContraction);
+ MERGE_SINGLETON(centroid);
+ MERGE_SINGLETON(smooth);
+ MERGE_SINGLETON(flat);
+ MERGE_SINGLETON(nopersp);
+#ifdef AMD_EXTENSIONS
+ MERGE_SINGLETON(explicitInterp);
+#endif
+#ifdef NV_EXTENSIONS
+ MERGE_SINGLETON(perPrimitiveNV);
+ MERGE_SINGLETON(perViewNV);
+ MERGE_SINGLETON(perTaskNV);
+#endif
+ MERGE_SINGLETON(patch);
+ MERGE_SINGLETON(sample);
+ MERGE_SINGLETON(coherent);
+ MERGE_SINGLETON(devicecoherent);
+ MERGE_SINGLETON(queuefamilycoherent);
+ MERGE_SINGLETON(workgroupcoherent);
+ MERGE_SINGLETON(subgroupcoherent);
+ MERGE_SINGLETON(nonprivate);
+ MERGE_SINGLETON(volatil);
+ MERGE_SINGLETON(restrict);
+ MERGE_SINGLETON(readonly);
+ MERGE_SINGLETON(writeonly);
+ MERGE_SINGLETON(specConstant);
+ MERGE_SINGLETON(nonUniform);
+
+ if (repeated)
+ error(loc, "replicated qualifiers", "", "");
+}
+
+void TParseContext::setDefaultPrecision(const TSourceLoc& loc, TPublicType& publicType, TPrecisionQualifier qualifier)
+{
+ TBasicType basicType = publicType.basicType;
+
+ if (basicType == EbtSampler) {
+ defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)] = qualifier;
+
+ return; // all is well
+ }
+
+ if (basicType == EbtInt || basicType == EbtFloat) {
+ if (publicType.isScalar()) {
+ defaultPrecision[basicType] = qualifier;
+ if (basicType == EbtInt) {
+ defaultPrecision[EbtUint] = qualifier;
+ precisionManager.explicitIntDefaultSeen();
+ } else
+ precisionManager.explicitFloatDefaultSeen();
+
+ return; // all is well
+ }
+ }
+
+ if (basicType == EbtAtomicUint) {
+ if (qualifier != EpqHigh)
+ error(loc, "can only apply highp to atomic_uint", "precision", "");
+
+ return;
+ }
+
+ error(loc, "cannot apply precision statement to this type; use 'float', 'int' or a sampler type", TType::getBasicString(basicType), "");
+}
+
+// used to flatten the sampler type space into a single dimension
+// correlates with the declaration of defaultSamplerPrecision[]
+int TParseContext::computeSamplerTypeIndex(TSampler& sampler)
+{
+ int arrayIndex = sampler.arrayed ? 1 : 0;
+ int shadowIndex = sampler.shadow ? 1 : 0;
+ int externalIndex = sampler.external? 1 : 0;
+ int imageIndex = sampler.image ? 1 : 0;
+ int msIndex = sampler.ms ? 1 : 0;
+
+ int flattened = EsdNumDims * (EbtNumTypes * (2 * (2 * (2 * (2 * arrayIndex + msIndex) + imageIndex) + shadowIndex) +
+ externalIndex) + sampler.type) + sampler.dim;
+ assert(flattened < maxSamplerIndex);
+
+ return flattened;
+}
+
+TPrecisionQualifier TParseContext::getDefaultPrecision(TPublicType& publicType)
+{
+ if (publicType.basicType == EbtSampler)
+ return defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)];
+ else
+ return defaultPrecision[publicType.basicType];
+}
+
+void TParseContext::precisionQualifierCheck(const TSourceLoc& loc, TBasicType baseType, TQualifier& qualifier)
+{
+ // Built-in symbols are allowed some ambiguous precisions, to be pinned down
+ // later by context.
+ if (! obeyPrecisionQualifiers() || parsingBuiltins)
+ return;
+
+ if (baseType == EbtAtomicUint && qualifier.precision != EpqNone && qualifier.precision != EpqHigh)
+ error(loc, "atomic counters can only be highp", "atomic_uint", "");
+
+ if (baseType == EbtFloat || baseType == EbtUint || baseType == EbtInt || baseType == EbtSampler || baseType == EbtAtomicUint) {
+ if (qualifier.precision == EpqNone) {
+ if (relaxedErrors())
+ warn(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "substituting 'mediump'");
+ else
+ error(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "");
+ qualifier.precision = EpqMedium;
+ defaultPrecision[baseType] = EpqMedium;
+ }
+ } else if (qualifier.precision != EpqNone)
+ error(loc, "type cannot have precision qualifier", TType::getBasicString(baseType), "");
+}
+
+void TParseContext::parameterTypeCheck(const TSourceLoc& loc, TStorageQualifier qualifier, const TType& type)
+{
+ if ((qualifier == EvqOut || qualifier == EvqInOut) && type.isOpaque())
+ error(loc, "samplers and atomic_uints cannot be output parameters", type.getBasicTypeString().c_str(), "");
+
+ if (!parsingBuiltins && type.containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, type.getBasicTypeString().c_str(), "float16 types can only be in uniform block or buffer storage");
+ if (!parsingBuiltins && type.contains16BitInt())
+ requireInt16Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int16 types can only be in uniform block or buffer storage");
+ if (!parsingBuiltins && type.contains8BitInt())
+ requireInt8Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int8 types can only be in uniform block or buffer storage");
+}
+
+bool TParseContext::containsFieldWithBasicType(const TType& type, TBasicType basicType)
+{
+ if (type.getBasicType() == basicType)
+ return true;
+
+ if (type.getBasicType() == EbtStruct) {
+ const TTypeList& structure = *type.getStruct();
+ for (unsigned int i = 0; i < structure.size(); ++i) {
+ if (containsFieldWithBasicType(*structure[i].type, basicType))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+//
+// Do size checking for an array type's size.
+//
+void TParseContext::arraySizeCheck(const TSourceLoc& loc, TIntermTyped* expr, TArraySize& sizePair, const char *sizeType)
+{
+ bool isConst = false;
+ sizePair.node = nullptr;
+
+ int size = 1;
+
+ TIntermConstantUnion* constant = expr->getAsConstantUnion();
+ if (constant) {
+ // handle true (non-specialization) constant
+ size = constant->getConstArray()[0].getIConst();
+ isConst = true;
+ } else {
+ // see if it's a specialization constant instead
+ if (expr->getQualifier().isSpecConstant()) {
+ isConst = true;
+ sizePair.node = expr;
+ TIntermSymbol* symbol = expr->getAsSymbolNode();
+ if (symbol && symbol->getConstArray().size() > 0)
+ size = symbol->getConstArray()[0].getIConst();
+ } else if (expr->getAsUnaryNode() &&
+ expr->getAsUnaryNode()->getOp() == glslang::EOpArrayLength &&
+ expr->getAsUnaryNode()->getOperand()->getType().isCoopMat()) {
+ isConst = true;
+ size = 1;
+ sizePair.node = expr->getAsUnaryNode();
+ }
+ }
+
+ sizePair.size = size;
+
+ if (! isConst || (expr->getBasicType() != EbtInt && expr->getBasicType() != EbtUint)) {
+ error(loc, sizeType, "", "must be a constant integer expression");
+ return;
+ }
+
+ if (size <= 0) {
+ error(loc, sizeType, "", "must be a positive integer");
+ return;
+ }
+}
+
+//
+// See if this qualifier can be an array.
+//
+// Returns true if there is an error.
+//
+bool TParseContext::arrayQualifierError(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ if (qualifier.storage == EvqConst) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "const array");
+ profileRequires(loc, EEsProfile, 300, nullptr, "const array");
+ }
+
+ if (qualifier.storage == EvqVaryingIn && language == EShLangVertex) {
+ requireProfile(loc, ~EEsProfile, "vertex input arrays");
+ profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays");
+ }
+
+ return false;
+}
+
+//
+// See if this qualifier and type combination can be an array.
+// Assumes arrayQualifierError() was also called to catch the type-invariant tests.
+//
+// Returns true if there is an error.
+//
+bool TParseContext::arrayError(const TSourceLoc& loc, const TType& type)
+{
+ if (type.getQualifier().storage == EvqVaryingOut && language == EShLangVertex) {
+ if (type.isArrayOfArrays())
+ requireProfile(loc, ~EEsProfile, "vertex-shader array-of-array output");
+ else if (type.isStruct())
+ requireProfile(loc, ~EEsProfile, "vertex-shader array-of-struct output");
+ }
+ if (type.getQualifier().storage == EvqVaryingIn && language == EShLangFragment) {
+ if (type.isArrayOfArrays())
+ requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array input");
+ else if (type.isStruct())
+ requireProfile(loc, ~EEsProfile, "fragment-shader array-of-struct input");
+ }
+ if (type.getQualifier().storage == EvqVaryingOut && language == EShLangFragment) {
+ if (type.isArrayOfArrays())
+ requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array output");
+ }
+
+ return false;
+}
+
+//
+// Require array to be completely sized
+//
+void TParseContext::arraySizeRequiredCheck(const TSourceLoc& loc, const TArraySizes& arraySizes)
+{
+ if (!parsingBuiltins && arraySizes.hasUnsized())
+ error(loc, "array size required", "", "");
+}
+
+void TParseContext::structArrayCheck(const TSourceLoc& /*loc*/, const TType& type)
+{
+ const TTypeList& structure = *type.getStruct();
+ for (int m = 0; m < (int)structure.size(); ++m) {
+ const TType& member = *structure[m].type;
+ if (member.isArray())
+ arraySizeRequiredCheck(structure[m].loc, *member.getArraySizes());
+ }
+}
+
+void TParseContext::arraySizesCheck(const TSourceLoc& loc, const TQualifier& qualifier, TArraySizes* arraySizes,
+ const TIntermTyped* initializer, bool lastMember)
+{
+ assert(arraySizes);
+
+ // always allow special built-in ins/outs sized to topologies
+ if (parsingBuiltins)
+ return;
+
+ // initializer must be a sized array, in which case
+ // allow the initializer to set any unknown array sizes
+ if (initializer != nullptr) {
+ if (initializer->getType().isUnsizedArray())
+ error(loc, "array initializer must be sized", "[]", "");
+ return;
+ }
+
+ // No environment allows any non-outer-dimension to be implicitly sized
+ if (arraySizes->isInnerUnsized()) {
+ error(loc, "only outermost dimension of an array of arrays can be implicitly sized", "[]", "");
+ arraySizes->clearInnerUnsized();
+ }
+
+ if (arraySizes->isInnerSpecialization() &&
+ (qualifier.storage != EvqTemporary && qualifier.storage != EvqGlobal && qualifier.storage != EvqShared && qualifier.storage != EvqConst))
+ error(loc, "only outermost dimension of an array of arrays can be a specialization constant", "[]", "");
+
+ // desktop always allows outer-dimension-unsized variable arrays,
+ if (profile != EEsProfile)
+ return;
+
+ // for ES, if size isn't coming from an initializer, it has to be explicitly declared now,
+ // with very few exceptions
+
+ // last member of ssbo block exception:
+ if (qualifier.storage == EvqBuffer && lastMember)
+ return;
+
+ // implicitly-sized io exceptions:
+ switch (language) {
+ case EShLangGeometry:
+ if (qualifier.storage == EvqVaryingIn)
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionsTurnedOn(Num_AEP_geometry_shader, AEP_geometry_shader))
+ return;
+ break;
+ case EShLangTessControl:
+ if ( qualifier.storage == EvqVaryingIn ||
+ (qualifier.storage == EvqVaryingOut && ! qualifier.patch))
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))
+ return;
+ break;
+ case EShLangTessEvaluation:
+ if ((qualifier.storage == EvqVaryingIn && ! qualifier.patch) ||
+ qualifier.storage == EvqVaryingOut)
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))
+ return;
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangMeshNV:
+ if (qualifier.storage == EvqVaryingOut)
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionTurnedOn(E_GL_NV_mesh_shader))
+ return;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ arraySizeRequiredCheck(loc, *arraySizes);
+}
+
+void TParseContext::arrayOfArrayVersionCheck(const TSourceLoc& loc, const TArraySizes* sizes)
+{
+ if (sizes == nullptr || sizes->getNumDims() == 1)
+ return;
+
+ const char* feature = "arrays of arrays";
+
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, feature);
+}
+
+//
+// Do all the semantic checking for declaring or redeclaring an array, with and
+// without a size, and make the right changes to the symbol table.
+//
+void TParseContext::declareArray(const TSourceLoc& loc, const TString& identifier, const TType& type, TSymbol*& symbol)
+{
+ if (symbol == nullptr) {
+ bool currentScope;
+ symbol = symbolTable.find(identifier, nullptr, &currentScope);
+
+ if (symbol && builtInName(identifier) && ! symbolTable.atBuiltInLevel()) {
+ // bad shader (errors already reported) trying to redeclare a built-in name as an array
+ symbol = nullptr;
+ return;
+ }
+ if (symbol == nullptr || ! currentScope) {
+ //
+ // Successfully process a new definition.
+ // (Redeclarations have to take place at the same scope; otherwise they are hiding declarations)
+ //
+ symbol = new TVariable(&identifier, type);
+ symbolTable.insert(*symbol);
+ if (symbolTable.atGlobalLevel())
+ trackLinkage(*symbol);
+
+ if (! symbolTable.atBuiltInLevel()) {
+ if (isIoResizeArray(type)) {
+ ioArraySymbolResizeList.push_back(symbol);
+ checkIoArraysConsistency(loc, true);
+ } else
+ fixIoArraySize(loc, symbol->getWritableType());
+ }
+
+ return;
+ }
+ if (symbol->getAsAnonMember()) {
+ error(loc, "cannot redeclare a user-block member array", identifier.c_str(), "");
+ symbol = nullptr;
+ return;
+ }
+ }
+
+ //
+ // Process a redeclaration.
+ //
+
+ if (symbol == nullptr) {
+ error(loc, "array variable name expected", identifier.c_str(), "");
+ return;
+ }
+
+ // redeclareBuiltinVariable() should have already done the copyUp()
+ TType& existingType = symbol->getWritableType();
+
+ if (! existingType.isArray()) {
+ error(loc, "redeclaring non-array as array", identifier.c_str(), "");
+ return;
+ }
+
+ if (! existingType.sameElementType(type)) {
+ error(loc, "redeclaration of array with a different element type", identifier.c_str(), "");
+ return;
+ }
+
+ if (! existingType.sameInnerArrayness(type)) {
+ error(loc, "redeclaration of array with a different array dimensions or sizes", identifier.c_str(), "");
+ return;
+ }
+
+ if (existingType.isSizedArray()) {
+ // be more leniant for input arrays to geometry shaders and tessellation control outputs, where the redeclaration is the same size
+ if (! (isIoResizeArray(type) && existingType.getOuterArraySize() == type.getOuterArraySize()))
+ error(loc, "redeclaration of array with size", identifier.c_str(), "");
+ return;
+ }
+
+ arrayLimitCheck(loc, identifier, type.getOuterArraySize());
+
+ existingType.updateArraySizes(type);
+
+ if (isIoResizeArray(type))
+ checkIoArraysConsistency(loc);
+}
+
+// Policy and error check for needing a runtime sized array.
+void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermTyped& base)
+{
+ // runtime length implies runtime sizeable, so no problem
+ if (isRuntimeLength(base))
+ return;
+
+ // Check for last member of a bufferreference type, which is runtime sizeable
+ // but doesn't support runtime length
+ if (base.getType().getQualifier().storage == EvqBuffer) {
+ const TIntermBinary* binary = base.getAsBinaryNode();
+ if (binary != nullptr &&
+ binary->getOp() == EOpIndexDirectStruct &&
+ binary->getLeft()->getBasicType() == EbtReference) {
+
+ const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ const int memberCount = (int)binary->getLeft()->getType().getReferentType()->getStruct()->size();
+ if (index == memberCount - 1)
+ return;
+ }
+ }
+
+ // check for additional things allowed by GL_EXT_nonuniform_qualifier
+ if (base.getBasicType() == EbtSampler ||
+ (base.getBasicType() == EbtBlock && base.getType().getQualifier().isUniformOrBuffer()))
+ requireExtensions(loc, 1, &E_GL_EXT_nonuniform_qualifier, "variable index");
+ else
+ error(loc, "", "[", "array must be redeclared with a size before being indexed with a variable");
+}
+
+// Policy decision for whether a run-time .length() is allowed.
+bool TParseContext::isRuntimeLength(const TIntermTyped& base) const
+{
+ if (base.getType().getQualifier().storage == EvqBuffer) {
+ // in a buffer block
+ const TIntermBinary* binary = base.getAsBinaryNode();
+ if (binary != nullptr && binary->getOp() == EOpIndexDirectStruct) {
+ // is it the last member?
+ const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+
+ if (binary->getLeft()->getBasicType() == EbtReference)
+ return false;
+
+ const int memberCount = (int)binary->getLeft()->getType().getStruct()->size();
+ if (index == memberCount - 1)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#ifdef NV_EXTENSIONS
+// Fix mesh view output array dimension
+void TParseContext::resizeMeshViewDimension(const TSourceLoc& loc, TType& type)
+{
+ // see if member is a per-view attribute
+ if (type.getQualifier().isPerView()) {
+ // since we don't have the maxMeshViewCountNV set during parsing builtins, we hardcode the value
+ int maxViewCount = parsingBuiltins ? 4 : resources.maxMeshViewCountNV;
+
+ if (! type.isArray()) {
+ error(loc, "requires an view array dimension", "perviewNV", "");
+ }
+ else if (!type.isUnsizedArray() && type.getOuterArraySize() != maxViewCount) {
+ error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", "");
+ }
+ else if (type.isUnsizedArray()) {
+ type.changeOuterArraySize(maxViewCount);
+ }
+ }
+}
+#endif
+
+// Returns true if the first argument to the #line directive is the line number for the next line.
+//
+// Desktop, pre-version 3.30: "After processing this directive
+// (including its new-line), the implementation will behave as if it is compiling at line number line+1 and
+// source string number source-string-number."
+//
+// Desktop, version 3.30 and later, and ES: "After processing this directive
+// (including its new-line), the implementation will behave as if it is compiling at line number line and
+// source string number source-string-number.
+bool TParseContext::lineDirectiveShouldSetNextLine() const
+{
+ return profile == EEsProfile || version >= 330;
+}
+
+//
+// Enforce non-initializer type/qualifier rules.
+//
+void TParseContext::nonInitConstCheck(const TSourceLoc& loc, TString& identifier, TType& type)
+{
+ //
+ // Make the qualifier make sense, given that there is not an initializer.
+ //
+ if (type.getQualifier().storage == EvqConst ||
+ type.getQualifier().storage == EvqConstReadOnly) {
+ type.getQualifier().makeTemporary();
+ error(loc, "variables with qualifier 'const' must be initialized", identifier.c_str(), "");
+ }
+}
+
+//
+// See if the identifier is a built-in symbol that can be redeclared, and if so,
+// copy the symbol table's read-only built-in variable to the current
+// global level, where it can be modified based on the passed in type.
+//
+// Returns nullptr if no redeclaration took place; meaning a normal declaration still
+// needs to occur for it, not necessarily an error.
+//
+// Returns a redeclared and type-modified variable if a redeclarated occurred.
+//
+TSymbol* TParseContext::redeclareBuiltinVariable(const TSourceLoc& loc, const TString& identifier,
+ const TQualifier& qualifier, const TShaderQualifiers& publicType)
+{
+ if (! builtInName(identifier) || symbolTable.atBuiltInLevel() || ! symbolTable.atGlobalLevel())
+ return nullptr;
+
+ bool nonEsRedecls = (profile != EEsProfile && (version >= 130 || identifier == "gl_TexCoord"));
+ bool esRedecls = (profile == EEsProfile &&
+ (version >= 320 || extensionsTurnedOn(Num_AEP_shader_io_blocks, AEP_shader_io_blocks)));
+ if (! esRedecls && ! nonEsRedecls)
+ return nullptr;
+
+ // Special case when using GL_ARB_separate_shader_objects
+ bool ssoPre150 = false; // means the only reason this variable is redeclared is due to this combination
+ if (profile != EEsProfile && version <= 140 && extensionTurnedOn(E_GL_ARB_separate_shader_objects)) {
+ if (identifier == "gl_Position" ||
+ identifier == "gl_PointSize" ||
+ identifier == "gl_ClipVertex" ||
+ identifier == "gl_FogFragCoord")
+ ssoPre150 = true;
+ }
+
+ // Potentially redeclaring a built-in variable...
+
+ if (ssoPre150 ||
+ (identifier == "gl_FragDepth" && ((nonEsRedecls && version >= 420) || esRedecls)) ||
+ (identifier == "gl_FragCoord" && ((nonEsRedecls && version >= 150) || esRedecls)) ||
+ identifier == "gl_ClipDistance" ||
+ identifier == "gl_CullDistance" ||
+ identifier == "gl_FrontColor" ||
+ identifier == "gl_BackColor" ||
+ identifier == "gl_FrontSecondaryColor" ||
+ identifier == "gl_BackSecondaryColor" ||
+ identifier == "gl_SecondaryColor" ||
+ (identifier == "gl_Color" && language == EShLangFragment) ||
+ (identifier == "gl_FragStencilRefARB" && (nonEsRedecls && version >= 140)
+ && language == EShLangFragment) ||
+#ifdef NV_EXTENSIONS
+ identifier == "gl_SampleMask" ||
+ identifier == "gl_Layer" ||
+ identifier == "gl_PrimitiveIndicesNV" ||
+#endif
+ identifier == "gl_TexCoord") {
+
+ // Find the existing symbol, if any.
+ bool builtIn;
+ TSymbol* symbol = symbolTable.find(identifier, &builtIn);
+
+ // If the symbol was not found, this must be a version/profile/stage
+ // that doesn't have it.
+ if (! symbol)
+ return nullptr;
+
+ // If it wasn't at a built-in level, then it's already been redeclared;
+ // that is, this is a redeclaration of a redeclaration; reuse that initial
+ // redeclaration. Otherwise, make the new one.
+ if (builtIn)
+ makeEditable(symbol);
+
+ // Now, modify the type of the copy, as per the type of the current redeclaration.
+
+ TQualifier& symbolQualifier = symbol->getWritableType().getQualifier();
+ if (ssoPre150) {
+ if (intermediate.inIoAccessed(identifier))
+ error(loc, "cannot redeclare after use", identifier.c_str(), "");
+ if (qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
+ if (qualifier.isMemory() || qualifier.isAuxiliary() || (language == EShLangVertex && qualifier.storage != EvqVaryingOut) ||
+ (language == EShLangFragment && qualifier.storage != EvqVaryingIn))
+ error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str());
+ if (! qualifier.smooth)
+ error(loc, "cannot change interpolation qualification of", "redeclaration", symbol->getName().c_str());
+ } else if (identifier == "gl_FrontColor" ||
+ identifier == "gl_BackColor" ||
+ identifier == "gl_FrontSecondaryColor" ||
+ identifier == "gl_BackSecondaryColor" ||
+ identifier == "gl_SecondaryColor" ||
+ identifier == "gl_Color") {
+ symbolQualifier.flat = qualifier.flat;
+ symbolQualifier.smooth = qualifier.smooth;
+ symbolQualifier.nopersp = qualifier.nopersp;
+ if (qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
+ if (qualifier.isMemory() || qualifier.isAuxiliary() || symbol->getType().getQualifier().storage != qualifier.storage)
+ error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str());
+ } else if (identifier == "gl_TexCoord" ||
+ identifier == "gl_ClipDistance" ||
+ identifier == "gl_CullDistance") {
+ if (qualifier.hasLayout() || qualifier.isMemory() || qualifier.isAuxiliary() ||
+ qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
+ symbolQualifier.storage != qualifier.storage)
+ error(loc, "cannot change qualification of", "redeclaration", symbol->getName().c_str());
+ } else if (identifier == "gl_FragCoord") {
+ if (intermediate.inIoAccessed("gl_FragCoord"))
+ error(loc, "cannot redeclare after use", "gl_FragCoord", "");
+ if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
+ qualifier.isMemory() || qualifier.isAuxiliary())
+ error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
+ if (qualifier.storage != EvqVaryingIn)
+ error(loc, "cannot change input storage qualification of", "redeclaration", symbol->getName().c_str());
+ if (! builtIn && (publicType.pixelCenterInteger != intermediate.getPixelCenterInteger() ||
+ publicType.originUpperLeft != intermediate.getOriginUpperLeft()))
+ error(loc, "cannot redeclare with different qualification:", "redeclaration", symbol->getName().c_str());
+ if (publicType.pixelCenterInteger)
+ intermediate.setPixelCenterInteger();
+ if (publicType.originUpperLeft)
+ intermediate.setOriginUpperLeft();
+ } else if (identifier == "gl_FragDepth") {
+ if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
+ qualifier.isMemory() || qualifier.isAuxiliary())
+ error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
+ if (qualifier.storage != EvqVaryingOut)
+ error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str());
+ if (publicType.layoutDepth != EldNone) {
+ if (intermediate.inIoAccessed("gl_FragDepth"))
+ error(loc, "cannot redeclare after use", "gl_FragDepth", "");
+ if (! intermediate.setDepth(publicType.layoutDepth))
+ error(loc, "all redeclarations must use the same depth layout on", "redeclaration", symbol->getName().c_str());
+ }
+ }
+ else if (
+#ifdef NV_EXTENSIONS
+ identifier == "gl_PrimitiveIndicesNV" ||
+#endif
+ identifier == "gl_FragStencilRefARB") {
+ if (qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
+ if (qualifier.storage != EvqVaryingOut)
+ error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str());
+ }
+#ifdef NV_EXTENSIONS
+ else if (identifier == "gl_SampleMask") {
+ if (!publicType.layoutOverrideCoverage) {
+ error(loc, "redeclaration only allowed for override_coverage layout", "redeclaration", symbol->getName().c_str());
+ }
+ intermediate.setLayoutOverrideCoverage();
+ }
+ else if (identifier == "gl_Layer") {
+ if (!qualifier.layoutViewportRelative && qualifier.layoutSecondaryViewportRelativeOffset == -2048)
+ error(loc, "redeclaration only allowed for viewport_relative or secondary_view_offset layout", "redeclaration", symbol->getName().c_str());
+ symbolQualifier.layoutViewportRelative = qualifier.layoutViewportRelative;
+ symbolQualifier.layoutSecondaryViewportRelativeOffset = qualifier.layoutSecondaryViewportRelativeOffset;
+ }
+#endif
+
+ // TODO: semantics quality: separate smooth from nothing declared, then use IsInterpolation for several tests above
+
+ return symbol;
+ }
+
+ return nullptr;
+}
+
+//
+// Either redeclare the requested block, or give an error message why it can't be done.
+//
+// TODO: functionality: explicitly sizing members of redeclared blocks is not giving them an explicit size
+void TParseContext::redeclareBuiltinBlock(const TSourceLoc& loc, TTypeList& newTypeList, const TString& blockName,
+ const TString* instanceName, TArraySizes* arraySizes)
+{
+ const char* feature = "built-in block redeclaration";
+ profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature);
+ profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
+
+ if (blockName != "gl_PerVertex" && blockName != "gl_PerFragment"
+#ifdef NV_EXTENSIONS
+ && blockName != "gl_MeshPerVertexNV" && blockName != "gl_MeshPerPrimitiveNV"
+#endif
+ )
+ {
+ error(loc, "cannot redeclare block: ", "block declaration", blockName.c_str());
+ return;
+ }
+
+ // Redeclaring a built-in block...
+
+ if (instanceName && ! builtInName(*instanceName)) {
+ error(loc, "cannot redeclare a built-in block with a user name", instanceName->c_str(), "");
+ return;
+ }
+
+ // Blocks with instance names are easy to find, lookup the instance name,
+ // Anonymous blocks need to be found via a member.
+ bool builtIn;
+ TSymbol* block;
+ if (instanceName)
+ block = symbolTable.find(*instanceName, &builtIn);
+ else
+ block = symbolTable.find(newTypeList.front().type->getFieldName(), &builtIn);
+
+ // If the block was not found, this must be a version/profile/stage
+ // that doesn't have it, or the instance name is wrong.
+ const char* errorName = instanceName ? instanceName->c_str() : newTypeList.front().type->getFieldName().c_str();
+ if (! block) {
+ error(loc, "no declaration found for redeclaration", errorName, "");
+ return;
+ }
+ // Built-in blocks cannot be redeclared more than once, which if happened,
+ // we'd be finding the already redeclared one here, rather than the built in.
+ if (! builtIn) {
+ error(loc, "can only redeclare a built-in block once, and before any use", blockName.c_str(), "");
+ return;
+ }
+
+ // Copy the block to make a writable version, to insert into the block table after editing.
+ block = symbolTable.copyUpDeferredInsert(block);
+
+ if (block->getType().getBasicType() != EbtBlock) {
+ error(loc, "cannot redeclare a non block as a block", errorName, "");
+ return;
+ }
+
+ // Fix XFB stuff up, it applies to the order of the redeclaration, not
+ // the order of the original members.
+ if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) {
+ if (!currentBlockQualifier.hasXfbBuffer())
+ currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
+ if (!currentBlockQualifier.hasStream())
+ currentBlockQualifier.layoutStream = globalOutputDefaults.layoutStream;
+ fixXfbOffsets(currentBlockQualifier, newTypeList);
+ }
+
+ // Edit and error check the container against the redeclaration
+ // - remove unused members
+ // - ensure remaining qualifiers/types match
+
+ TType& type = block->getWritableType();
+
+#ifdef NV_EXTENSIONS
+ // if gl_PerVertex is redeclared for the purpose of passing through "gl_Position"
+ // for passthrough purpose, the redeclared block should have the same qualifers as
+ // the current one
+ if (currentBlockQualifier.layoutPassthrough) {
+ type.getQualifier().layoutPassthrough = currentBlockQualifier.layoutPassthrough;
+ type.getQualifier().storage = currentBlockQualifier.storage;
+ type.getQualifier().layoutStream = currentBlockQualifier.layoutStream;
+ type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
+ }
+#endif
+
+ TTypeList::iterator member = type.getWritableStruct()->begin();
+ size_t numOriginalMembersFound = 0;
+ while (member != type.getStruct()->end()) {
+ // look for match
+ bool found = false;
+ TTypeList::const_iterator newMember;
+ TSourceLoc memberLoc;
+ memberLoc.init();
+ for (newMember = newTypeList.begin(); newMember != newTypeList.end(); ++newMember) {
+ if (member->type->getFieldName() == newMember->type->getFieldName()) {
+ found = true;
+ memberLoc = newMember->loc;
+ break;
+ }
+ }
+
+ if (found) {
+ ++numOriginalMembersFound;
+ // - ensure match between redeclared members' types
+ // - check for things that can't be changed
+ // - update things that can be changed
+ TType& oldType = *member->type;
+ const TType& newType = *newMember->type;
+ if (! newType.sameElementType(oldType))
+ error(memberLoc, "cannot redeclare block member with a different type", member->type->getFieldName().c_str(), "");
+ if (oldType.isArray() != newType.isArray())
+ error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerView() && ! oldType.sameArrayness(newType) && oldType.isSizedArray())
+ error(memberLoc, "cannot change array size of redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerView() && newType.isArray())
+ arrayLimitCheck(loc, member->type->getFieldName(), newType.getOuterArraySize());
+#ifdef NV_EXTENSIONS
+ if (oldType.getQualifier().isPerView() && ! newType.getQualifier().isPerView())
+ error(memberLoc, "missing perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerView() && newType.getQualifier().isPerView())
+ error(memberLoc, "cannot add perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (newType.getQualifier().isPerView()) {
+ if (oldType.getArraySizes()->getNumDims() != newType.getArraySizes()->getNumDims())
+ error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! newType.isUnsizedArray() && newType.getOuterArraySize() != resources.maxMeshViewCountNV)
+ error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", "");
+ else if (newType.getArraySizes()->getNumDims() == 2) {
+ int innerDimSize = newType.getArraySizes()->getDimSize(1);
+ arrayLimitCheck(memberLoc, member->type->getFieldName(), innerDimSize);
+ oldType.getArraySizes()->setDimSize(1, innerDimSize);
+ }
+ }
+ if (oldType.getQualifier().isPerPrimitive() && ! newType.getQualifier().isPerPrimitive())
+ error(memberLoc, "missing perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerPrimitive() && newType.getQualifier().isPerPrimitive())
+ error(memberLoc, "cannot add perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+#endif
+ if (newType.getQualifier().isMemory())
+ error(memberLoc, "cannot add memory qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ if (newType.getQualifier().hasNonXfbLayout())
+ error(memberLoc, "cannot add non-XFB layout to redeclared block member", member->type->getFieldName().c_str(), "");
+ if (newType.getQualifier().patch)
+ error(memberLoc, "cannot add patch to redeclared block member", member->type->getFieldName().c_str(), "");
+ if (newType.getQualifier().hasXfbBuffer() &&
+ newType.getQualifier().layoutXfbBuffer != currentBlockQualifier.layoutXfbBuffer)
+ error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
+ if (newType.getQualifier().hasStream() &&
+ newType.getQualifier().layoutStream != currentBlockQualifier.layoutStream)
+ error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_stream", "");
+ oldType.getQualifier().centroid = newType.getQualifier().centroid;
+ oldType.getQualifier().sample = newType.getQualifier().sample;
+ oldType.getQualifier().invariant = newType.getQualifier().invariant;
+ oldType.getQualifier().noContraction = newType.getQualifier().noContraction;
+ oldType.getQualifier().smooth = newType.getQualifier().smooth;
+ oldType.getQualifier().flat = newType.getQualifier().flat;
+ oldType.getQualifier().nopersp = newType.getQualifier().nopersp;
+ oldType.getQualifier().layoutXfbOffset = newType.getQualifier().layoutXfbOffset;
+ oldType.getQualifier().layoutXfbBuffer = newType.getQualifier().layoutXfbBuffer;
+ oldType.getQualifier().layoutXfbStride = newType.getQualifier().layoutXfbStride;
+ if (oldType.getQualifier().layoutXfbOffset != TQualifier::layoutXfbBufferEnd) {
+ // If any member has an xfb_offset, then the block's xfb_buffer inherents current xfb_buffer,
+ // and for xfb processing, the member needs it as well, along with xfb_stride.
+ type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
+ oldType.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
+ }
+ if (oldType.isUnsizedArray() && newType.isSizedArray())
+ oldType.changeOuterArraySize(newType.getOuterArraySize());
+
+ // check and process the member's type, which will include managing xfb information
+ layoutTypeCheck(loc, oldType);
+
+ // go to next member
+ ++member;
+ } else {
+ // For missing members of anonymous blocks that have been redeclared,
+ // hide the original (shared) declaration.
+ // Instance-named blocks can just have the member removed.
+ if (instanceName)
+ member = type.getWritableStruct()->erase(member);
+ else {
+ member->type->hideMember();
+ ++member;
+ }
+ }
+ }
+
+ if (spvVersion.vulkan > 0) {
+ // ...then streams apply to built-in blocks, instead of them being only on stream 0
+ type.getQualifier().layoutStream = currentBlockQualifier.layoutStream;
+ }
+
+ if (numOriginalMembersFound < newTypeList.size())
+ error(loc, "block redeclaration has extra members", blockName.c_str(), "");
+ if (type.isArray() != (arraySizes != nullptr) ||
+ (type.isArray() && arraySizes != nullptr && type.getArraySizes()->getNumDims() != arraySizes->getNumDims()))
+ error(loc, "cannot change arrayness of redeclared block", blockName.c_str(), "");
+ else if (type.isArray()) {
+ // At this point, we know both are arrays and both have the same number of dimensions.
+
+ // It is okay for a built-in block redeclaration to be unsized, and keep the size of the
+ // original block declaration.
+ if (!arraySizes->isSized() && type.isSizedArray())
+ arraySizes->changeOuterSize(type.getOuterArraySize());
+
+ // And, okay to be giving a size to the array, by the redeclaration
+ if (!type.isSizedArray() && arraySizes->isSized())
+ type.changeOuterArraySize(arraySizes->getOuterSize());
+
+ // Now, they must match in all dimensions.
+ if (type.isSizedArray() && *type.getArraySizes() != *arraySizes)
+ error(loc, "cannot change array size of redeclared block", blockName.c_str(), "");
+ }
+
+ symbolTable.insert(*block);
+
+ // Check for general layout qualifier errors
+ layoutObjectCheck(loc, *block);
+
+ // Tracking for implicit sizing of array
+ if (isIoResizeArray(block->getType())) {
+ ioArraySymbolResizeList.push_back(block);
+ checkIoArraysConsistency(loc, true);
+ } else if (block->getType().isArray())
+ fixIoArraySize(loc, block->getWritableType());
+
+ // Save it in the AST for linker use.
+ trackLinkage(*block);
+}
+
+void TParseContext::paramCheckFixStorage(const TSourceLoc& loc, const TStorageQualifier& qualifier, TType& type)
+{
+ switch (qualifier) {
+ case EvqConst:
+ case EvqConstReadOnly:
+ type.getQualifier().storage = EvqConstReadOnly;
+ break;
+ case EvqIn:
+ case EvqOut:
+ case EvqInOut:
+ type.getQualifier().storage = qualifier;
+ break;
+ case EvqGlobal:
+ case EvqTemporary:
+ type.getQualifier().storage = EvqIn;
+ break;
+ default:
+ type.getQualifier().storage = EvqIn;
+ error(loc, "storage qualifier not allowed on function parameter", GetStorageQualifierString(qualifier), "");
+ break;
+ }
+}
+
+void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& qualifier, TType& type)
+{
+ if (qualifier.isMemory()) {
+ type.getQualifier().volatil = qualifier.volatil;
+ type.getQualifier().coherent = qualifier.coherent;
+ type.getQualifier().devicecoherent = qualifier.devicecoherent ;
+ type.getQualifier().queuefamilycoherent = qualifier.queuefamilycoherent;
+ type.getQualifier().workgroupcoherent = qualifier.workgroupcoherent;
+ type.getQualifier().subgroupcoherent = qualifier.subgroupcoherent;
+ type.getQualifier().nonprivate = qualifier.nonprivate;
+ type.getQualifier().readonly = qualifier.readonly;
+ type.getQualifier().writeonly = qualifier.writeonly;
+ type.getQualifier().restrict = qualifier.restrict;
+ }
+
+ if (qualifier.isAuxiliary() ||
+ qualifier.isInterpolation())
+ error(loc, "cannot use auxiliary or interpolation qualifiers on a function parameter", "", "");
+ if (qualifier.hasLayout())
+ error(loc, "cannot use layout qualifiers on a function parameter", "", "");
+ if (qualifier.invariant)
+ error(loc, "cannot use invariant qualifier on a function parameter", "", "");
+ if (qualifier.noContraction) {
+ if (qualifier.isParamOutput())
+ type.getQualifier().noContraction = true;
+ else
+ warn(loc, "qualifier has no effect on non-output parameters", "precise", "");
+ }
+ if (qualifier.isNonUniform())
+ type.getQualifier().nonUniform = qualifier.nonUniform;
+
+ paramCheckFixStorage(loc, qualifier.storage, type);
+}
+
+void TParseContext::nestedBlockCheck(const TSourceLoc& loc)
+{
+ if (structNestingLevel > 0)
+ error(loc, "cannot nest a block definition inside a structure or block", "", "");
+ ++structNestingLevel;
+}
+
+void TParseContext::nestedStructCheck(const TSourceLoc& loc)
+{
+ if (structNestingLevel > 0)
+ error(loc, "cannot nest a structure definition inside a structure or block", "", "");
+ ++structNestingLevel;
+}
+
+void TParseContext::arrayObjectCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ // Some versions don't allow comparing arrays or structures containing arrays
+ if (type.containsArray()) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, op);
+ profileRequires(loc, EEsProfile, 300, nullptr, op);
+ }
+}
+
+void TParseContext::opaqueCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (containsFieldWithBasicType(type, EbtSampler))
+ error(loc, "can't use with samplers or structs containing samplers", op, "");
+}
+
+void TParseContext::referenceCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (containsFieldWithBasicType(type, EbtReference))
+ error(loc, "can't use with reference types", op, "");
+}
+
+void TParseContext::storage16BitAssignmentCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtFloat16))
+ requireFloat16Arithmetic(loc, op, "can't use with structs containing float16");
+
+ if (type.isArray() && type.getBasicType() == EbtFloat16)
+ requireFloat16Arithmetic(loc, op, "can't use with arrays containing float16");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt16))
+ requireInt16Arithmetic(loc, op, "can't use with structs containing int16");
+
+ if (type.isArray() && type.getBasicType() == EbtInt16)
+ requireInt16Arithmetic(loc, op, "can't use with arrays containing int16");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint16))
+ requireInt16Arithmetic(loc, op, "can't use with structs containing uint16");
+
+ if (type.isArray() && type.getBasicType() == EbtUint16)
+ requireInt16Arithmetic(loc, op, "can't use with arrays containing uint16");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt8))
+ requireInt8Arithmetic(loc, op, "can't use with structs containing int8");
+
+ if (type.isArray() && type.getBasicType() == EbtInt8)
+ requireInt8Arithmetic(loc, op, "can't use with arrays containing int8");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint8))
+ requireInt8Arithmetic(loc, op, "can't use with structs containing uint8");
+
+ if (type.isArray() && type.getBasicType() == EbtUint8)
+ requireInt8Arithmetic(loc, op, "can't use with arrays containing uint8");
+}
+
+void TParseContext::specializationCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (type.containsSpecializationSize())
+ error(loc, "can't use with types containing arrays sized with a specialization constant", op, "");
+}
+
+void TParseContext::structTypeCheck(const TSourceLoc& /*loc*/, TPublicType& publicType)
+{
+ const TTypeList& typeList = *publicType.userDef->getStruct();
+
+ // fix and check for member storage qualifiers and types that don't belong within a structure
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ if (memberQualifier.isAuxiliary() ||
+ memberQualifier.isInterpolation() ||
+ (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal))
+ error(memberLoc, "cannot use storage or interpolation qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
+ if (memberQualifier.isMemory())
+ error(memberLoc, "cannot use memory qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
+ if (memberQualifier.hasLayout()) {
+ error(memberLoc, "cannot use layout qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
+ memberQualifier.clearLayout();
+ }
+ if (memberQualifier.invariant)
+ error(memberLoc, "cannot use invariant qualifier on structure members", typeList[member].type->getFieldName().c_str(), "");
+ }
+}
+
+//
+// See if this loop satisfies the limitations for ES 2.0 (version 100) for loops in Appendex A:
+//
+// "The loop index has type int or float.
+//
+// "The for statement has the form:
+// for ( init-declaration ; condition ; expression )
+// init-declaration has the form: type-specifier identifier = constant-expression
+// condition has the form: loop-index relational_operator constant-expression
+// where relational_operator is one of: > >= < <= == or !=
+// expression [sic] has one of the following forms:
+// loop-index++
+// loop-index--
+// loop-index += constant-expression
+// loop-index -= constant-expression
+//
+// The body is handled in an AST traversal.
+//
+void TParseContext::inductiveLoopCheck(const TSourceLoc& loc, TIntermNode* init, TIntermLoop* loop)
+{
+ // loop index init must exist and be a declaration, which shows up in the AST as an aggregate of size 1 of the declaration
+ bool badInit = false;
+ if (! init || ! init->getAsAggregate() || init->getAsAggregate()->getSequence().size() != 1)
+ badInit = true;
+ TIntermBinary* binaryInit = 0;
+ if (! badInit) {
+ // get the declaration assignment
+ binaryInit = init->getAsAggregate()->getSequence()[0]->getAsBinaryNode();
+ if (! binaryInit)
+ badInit = true;
+ }
+ if (badInit) {
+ error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // loop index must be type int or float
+ if (! binaryInit->getType().isScalar() || (binaryInit->getBasicType() != EbtInt && binaryInit->getBasicType() != EbtFloat)) {
+ error(loc, "inductive loop requires a scalar 'int' or 'float' loop index", "limitations", "");
+ return;
+ }
+
+ // init is the form "loop-index = constant"
+ if (binaryInit->getOp() != EOpAssign || ! binaryInit->getLeft()->getAsSymbolNode() || ! binaryInit->getRight()->getAsConstantUnion()) {
+ error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // get the unique id of the loop index
+ int loopIndex = binaryInit->getLeft()->getAsSymbolNode()->getId();
+ inductiveLoopIds.insert(loopIndex);
+
+ // condition's form must be "loop-index relational-operator constant-expression"
+ bool badCond = ! loop->getTest();
+ if (! badCond) {
+ TIntermBinary* binaryCond = loop->getTest()->getAsBinaryNode();
+ badCond = ! binaryCond;
+ if (! badCond) {
+ switch (binaryCond->getOp()) {
+ case EOpGreaterThan:
+ case EOpGreaterThanEqual:
+ case EOpLessThan:
+ case EOpLessThanEqual:
+ case EOpEqual:
+ case EOpNotEqual:
+ break;
+ default:
+ badCond = true;
+ }
+ }
+ if (binaryCond && (! binaryCond->getLeft()->getAsSymbolNode() ||
+ binaryCond->getLeft()->getAsSymbolNode()->getId() != loopIndex ||
+ ! binaryCond->getRight()->getAsConstantUnion()))
+ badCond = true;
+ }
+ if (badCond) {
+ error(loc, "inductive-loop condition requires the form \"loop-index <comparison-op> constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // loop-index++
+ // loop-index--
+ // loop-index += constant-expression
+ // loop-index -= constant-expression
+ bool badTerminal = ! loop->getTerminal();
+ if (! badTerminal) {
+ TIntermUnary* unaryTerminal = loop->getTerminal()->getAsUnaryNode();
+ TIntermBinary* binaryTerminal = loop->getTerminal()->getAsBinaryNode();
+ if (unaryTerminal || binaryTerminal) {
+ switch(loop->getTerminal()->getAsOperator()->getOp()) {
+ case EOpPostDecrement:
+ case EOpPostIncrement:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ break;
+ default:
+ badTerminal = true;
+ }
+ } else
+ badTerminal = true;
+ if (binaryTerminal && (! binaryTerminal->getLeft()->getAsSymbolNode() ||
+ binaryTerminal->getLeft()->getAsSymbolNode()->getId() != loopIndex ||
+ ! binaryTerminal->getRight()->getAsConstantUnion()))
+ badTerminal = true;
+ if (unaryTerminal && (! unaryTerminal->getOperand()->getAsSymbolNode() ||
+ unaryTerminal->getOperand()->getAsSymbolNode()->getId() != loopIndex))
+ badTerminal = true;
+ }
+ if (badTerminal) {
+ error(loc, "inductive-loop termination requires the form \"loop-index++, loop-index--, loop-index += constant-expression, or loop-index -= constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // the body
+ inductiveLoopBodyCheck(loop->getBody(), loopIndex, symbolTable);
+}
+
+// Do limit checks for built-in arrays.
+void TParseContext::arrayLimitCheck(const TSourceLoc& loc, const TString& identifier, int size)
+{
+ if (identifier.compare("gl_TexCoord") == 0)
+ limitCheck(loc, size, "gl_MaxTextureCoords", "gl_TexCoord array size");
+ else if (identifier.compare("gl_ClipDistance") == 0)
+ limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistance array size");
+ else if (identifier.compare("gl_CullDistance") == 0)
+ limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistance array size");
+#ifdef NV_EXTENSIONS
+ else if (identifier.compare("gl_ClipDistancePerViewNV") == 0)
+ limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistancePerViewNV array size");
+ else if (identifier.compare("gl_CullDistancePerViewNV") == 0)
+ limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistancePerViewNV array size");
+#endif
+}
+
+// See if the provided value is less than or equal to the symbol indicated by limit,
+// which should be a constant in the symbol table.
+void TParseContext::limitCheck(const TSourceLoc& loc, int value, const char* limit, const char* feature)
+{
+ TSymbol* symbol = symbolTable.find(limit);
+ assert(symbol->getAsVariable());
+ const TConstUnionArray& constArray = symbol->getAsVariable()->getConstArray();
+ assert(! constArray.empty());
+ if (value > constArray[0].getIConst())
+ error(loc, "must be less than or equal to", feature, "%s (%d)", limit, constArray[0].getIConst());
+}
+
+//
+// Do any additional error checking, etc., once we know the parsing is done.
+//
+void TParseContext::finish()
+{
+ TParseContextBase::finish();
+
+ if (parsingBuiltins)
+ return;
+
+ // Check on array indexes for ES 2.0 (version 100) limitations.
+ for (size_t i = 0; i < needsIndexLimitationChecking.size(); ++i)
+ constantIndexExpressionCheck(needsIndexLimitationChecking[i]);
+
+ // Check for stages that are enabled by extension.
+ // Can't do this at the beginning, it is chicken and egg to add a stage by
+ // extension.
+ // Stage-specific features were correctly tested for already, this is just
+ // about the stage itself.
+ switch (language) {
+ case EShLangGeometry:
+ if (profile == EEsProfile && version == 310)
+ requireExtensions(getCurrentLoc(), Num_AEP_geometry_shader, AEP_geometry_shader, "geometry shaders");
+ break;
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ if (profile == EEsProfile && version == 310)
+ requireExtensions(getCurrentLoc(), Num_AEP_tessellation_shader, AEP_tessellation_shader, "tessellation shaders");
+ else if (profile != EEsProfile && version < 400)
+ requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_tessellation_shader, "tessellation shaders");
+ break;
+ case EShLangCompute:
+ if (profile != EEsProfile && version < 430)
+ requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_compute_shader, "compute shaders");
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangTaskNV:
+ requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "task shaders");
+ break;
+ case EShLangMeshNV:
+ requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "mesh shaders");
+ break;
+#endif
+ default:
+ break;
+ }
+
+#ifdef NV_EXTENSIONS
+ // Set default outputs for GL_NV_geometry_shader_passthrough
+ if (language == EShLangGeometry && extensionTurnedOn(E_SPV_NV_geometry_shader_passthrough)) {
+ if (intermediate.getOutputPrimitive() == ElgNone) {
+ switch (intermediate.getInputPrimitive()) {
+ case ElgPoints: intermediate.setOutputPrimitive(ElgPoints); break;
+ case ElgLines: intermediate.setOutputPrimitive(ElgLineStrip); break;
+ case ElgTriangles: intermediate.setOutputPrimitive(ElgTriangleStrip); break;
+ default: break;
+ }
+ }
+ if (intermediate.getVertices() == TQualifier::layoutNotSet) {
+ switch (intermediate.getInputPrimitive()) {
+ case ElgPoints: intermediate.setVertices(1); break;
+ case ElgLines: intermediate.setVertices(2); break;
+ case ElgTriangles: intermediate.setVertices(3); break;
+ default: break;
+ }
+ }
+ }
+#endif
+}
+
+//
+// Layout qualifier stuff.
+//
+
+// Put the id's layout qualification into the public type, for qualifiers not having a number set.
+// This is before we know any type information for error checking.
+void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id)
+{
+ std::transform(id.begin(), id.end(), id.begin(), ::tolower);
+
+ if (id == TQualifier::getLayoutMatrixString(ElmColumnMajor)) {
+ publicType.qualifier.layoutMatrix = ElmColumnMajor;
+ return;
+ }
+ if (id == TQualifier::getLayoutMatrixString(ElmRowMajor)) {
+ publicType.qualifier.layoutMatrix = ElmRowMajor;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpPacked)) {
+ if (spvVersion.spv != 0)
+ spvRemoved(loc, "packed");
+ publicType.qualifier.layoutPacking = ElpPacked;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpShared)) {
+ if (spvVersion.spv != 0)
+ spvRemoved(loc, "shared");
+ publicType.qualifier.layoutPacking = ElpShared;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpStd140)) {
+ publicType.qualifier.layoutPacking = ElpStd140;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpStd430)) {
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "std430");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, "std430");
+ profileRequires(loc, EEsProfile, 310, nullptr, "std430");
+ publicType.qualifier.layoutPacking = ElpStd430;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpScalar)) {
+ requireVulkan(loc, "scalar");
+ requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "scalar block layout");
+ publicType.qualifier.layoutPacking = ElpScalar;
+ return;
+ }
+ // TODO: compile-time performance: may need to stop doing linear searches
+ for (TLayoutFormat format = (TLayoutFormat)(ElfNone + 1); format < ElfCount; format = (TLayoutFormat)(format + 1)) {
+ if (id == TQualifier::getLayoutFormatString(format)) {
+ if ((format > ElfEsFloatGuard && format < ElfFloatGuard) ||
+ (format > ElfEsIntGuard && format < ElfIntGuard) ||
+ (format > ElfEsUintGuard && format < ElfCount))
+ requireProfile(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, "image load-store format");
+ profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "image load store");
+ profileRequires(loc, EEsProfile, 310, E_GL_ARB_shader_image_load_store, "image load store");
+ publicType.qualifier.layoutFormat = format;
+ return;
+ }
+ }
+ if (id == "push_constant") {
+ requireVulkan(loc, "push_constant");
+ publicType.qualifier.layoutPushConstant = true;
+ return;
+ }
+ if (id == "buffer_reference") {
+ requireVulkan(loc, "buffer_reference");
+ requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference");
+ publicType.qualifier.layoutBufferReference = true;
+ intermediate.setUseStorageBuffer();
+ intermediate.setUsePhysicalStorageBuffer();
+ return;
+ }
+ if (language == EShLangGeometry || language == EShLangTessEvaluation
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ ) {
+ if (id == TQualifier::getGeometryString(ElgTriangles)) {
+ publicType.shaderQualifiers.geometry = ElgTriangles;
+ return;
+ }
+ if (language == EShLangGeometry
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ ) {
+ if (id == TQualifier::getGeometryString(ElgPoints)) {
+ publicType.shaderQualifiers.geometry = ElgPoints;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgLines)) {
+ publicType.shaderQualifiers.geometry = ElgLines;
+ return;
+ }
+#ifdef NV_EXTENSIONS
+ if (language == EShLangGeometry)
+#endif
+ {
+ if (id == TQualifier::getGeometryString(ElgLineStrip)) {
+ publicType.shaderQualifiers.geometry = ElgLineStrip;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgLinesAdjacency)) {
+ publicType.shaderQualifiers.geometry = ElgLinesAdjacency;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgTrianglesAdjacency)) {
+ publicType.shaderQualifiers.geometry = ElgTrianglesAdjacency;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgTriangleStrip)) {
+ publicType.shaderQualifiers.geometry = ElgTriangleStrip;
+ return;
+ }
+#ifdef NV_EXTENSIONS
+ if (id == "passthrough") {
+ requireExtensions(loc, 1, &E_SPV_NV_geometry_shader_passthrough, "geometry shader passthrough");
+ publicType.qualifier.layoutPassthrough = true;
+ intermediate.setGeoPassthroughEXT();
+ return;
+ }
+#endif
+ }
+ } else {
+ assert(language == EShLangTessEvaluation);
+
+ // input primitive
+ if (id == TQualifier::getGeometryString(ElgTriangles)) {
+ publicType.shaderQualifiers.geometry = ElgTriangles;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgQuads)) {
+ publicType.shaderQualifiers.geometry = ElgQuads;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgIsolines)) {
+ publicType.shaderQualifiers.geometry = ElgIsolines;
+ return;
+ }
+
+ // vertex spacing
+ if (id == TQualifier::getVertexSpacingString(EvsEqual)) {
+ publicType.shaderQualifiers.spacing = EvsEqual;
+ return;
+ }
+ if (id == TQualifier::getVertexSpacingString(EvsFractionalEven)) {
+ publicType.shaderQualifiers.spacing = EvsFractionalEven;
+ return;
+ }
+ if (id == TQualifier::getVertexSpacingString(EvsFractionalOdd)) {
+ publicType.shaderQualifiers.spacing = EvsFractionalOdd;
+ return;
+ }
+
+ // triangle order
+ if (id == TQualifier::getVertexOrderString(EvoCw)) {
+ publicType.shaderQualifiers.order = EvoCw;
+ return;
+ }
+ if (id == TQualifier::getVertexOrderString(EvoCcw)) {
+ publicType.shaderQualifiers.order = EvoCcw;
+ return;
+ }
+
+ // point mode
+ if (id == "point_mode") {
+ publicType.shaderQualifiers.pointMode = true;
+ return;
+ }
+ }
+ }
+ if (language == EShLangFragment) {
+ if (id == "origin_upper_left") {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "origin_upper_left");
+ publicType.shaderQualifiers.originUpperLeft = true;
+ return;
+ }
+ if (id == "pixel_center_integer") {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "pixel_center_integer");
+ publicType.shaderQualifiers.pixelCenterInteger = true;
+ return;
+ }
+ if (id == "early_fragment_tests") {
+ profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "early_fragment_tests");
+ profileRequires(loc, EEsProfile, 310, nullptr, "early_fragment_tests");
+ publicType.shaderQualifiers.earlyFragmentTests = true;
+ return;
+ }
+ if (id == "post_depth_coverage") {
+ requireExtensions(loc, Num_post_depth_coverageEXTs, post_depth_coverageEXTs, "post depth coverage");
+ if (extensionTurnedOn(E_GL_ARB_post_depth_coverage)) {
+ publicType.shaderQualifiers.earlyFragmentTests = true;
+ }
+ publicType.shaderQualifiers.postDepthCoverage = true;
+ return;
+ }
+ for (TLayoutDepth depth = (TLayoutDepth)(EldNone + 1); depth < EldCount; depth = (TLayoutDepth)(depth+1)) {
+ if (id == TQualifier::getLayoutDepthString(depth)) {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "depth layout qualifier");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, nullptr, "depth layout qualifier");
+ publicType.shaderQualifiers.layoutDepth = depth;
+ return;
+ }
+ }
+ if (id.compare(0, 13, "blend_support") == 0) {
+ bool found = false;
+ for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) {
+ if (id == TQualifier::getBlendEquationString(be)) {
+ profileRequires(loc, EEsProfile, 320, E_GL_KHR_blend_equation_advanced, "blend equation");
+ profileRequires(loc, ~EEsProfile, 0, E_GL_KHR_blend_equation_advanced, "blend equation");
+ intermediate.addBlendEquation(be);
+ publicType.shaderQualifiers.blendEquation = true;
+ found = true;
+ break;
+ }
+ }
+ if (! found)
+ error(loc, "unknown blend equation", "blend_support", "");
+ return;
+ }
+#ifdef NV_EXTENSIONS
+ if (id == "override_coverage") {
+ requireExtensions(loc, 1, &E_GL_NV_sample_mask_override_coverage, "sample mask override coverage");
+ publicType.shaderQualifiers.layoutOverrideCoverage = true;
+ return;
+ }
+ }
+ if (language == EShLangVertex ||
+ language == EShLangTessControl ||
+ language == EShLangTessEvaluation ||
+ language == EShLangGeometry ) {
+ if (id == "viewport_relative") {
+ requireExtensions(loc, 1, &E_GL_NV_viewport_array2, "view port array2");
+ publicType.qualifier.layoutViewportRelative = true;
+ return;
+ }
+ } else {
+ if (language == EShLangRayGenNV || language == EShLangIntersectNV ||
+ language == EShLangAnyHitNV || language == EShLangClosestHitNV ||
+ language == EShLangMissNV || language == EShLangCallableNV) {
+ if (id == "shaderrecordnv") {
+ publicType.qualifier.layoutShaderRecordNV = true;
+ return;
+ }
+ }
+ }
+ if (language == EShLangCompute) {
+ if (id.compare(0, 17, "derivative_group_") == 0) {
+ requireExtensions(loc, 1, &E_GL_NV_compute_shader_derivatives, "compute shader derivatives");
+ if (id == "derivative_group_quadsnv") {
+ publicType.shaderQualifiers.layoutDerivativeGroupQuads = true;
+ return;
+ } else if (id == "derivative_group_linearnv") {
+ publicType.shaderQualifiers.layoutDerivativeGroupLinear = true;
+ return;
+ }
+ }
+ }
+#else
+ }
+#endif
+ error(loc, "unrecognized layout identifier, or qualifier requires assignment (e.g., binding = 4)", id.c_str(), "");
+}
+
+// Put the id's layout qualifier value into the public type, for qualifiers having a number set.
+// This is before we know any type information for error checking.
+void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id, const TIntermTyped* node)
+{
+ const char* feature = "layout-id value";
+ const char* nonLiteralFeature = "non-literal layout-id value";
+
+ integerCheck(node, feature);
+ const TIntermConstantUnion* constUnion = node->getAsConstantUnion();
+ int value;
+ bool nonLiteral = false;
+ if (constUnion) {
+ value = constUnion->getConstArray()[0].getIConst();
+ if (! constUnion->isLiteral()) {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, nonLiteralFeature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, nonLiteralFeature);
+ }
+ } else {
+ // grammar should have give out the error message
+ value = 0;
+ nonLiteral = true;
+ }
+
+ if (value < 0) {
+ error(loc, "cannot be negative", feature, "");
+ return;
+ }
+
+ std::transform(id.begin(), id.end(), id.begin(), ::tolower);
+
+ if (id == "offset") {
+ // "offset" can be for either
+ // - uniform offsets
+ // - atomic_uint offsets
+ const char* feature = "offset";
+ if (spvVersion.spv == 0) {
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
+ const char* exts[2] = { E_GL_ARB_enhanced_layouts, E_GL_ARB_shader_atomic_counters };
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, 2, exts, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ }
+ publicType.qualifier.layoutOffset = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "offset", "");
+ return;
+ } else if (id == "align") {
+ const char* feature = "uniform buffer-member align";
+ if (spvVersion.spv == 0) {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
+ }
+ // "The specified alignment must be a power of 2, or a compile-time error results."
+ if (! IsPow2(value))
+ error(loc, "must be a power of 2", "align", "");
+ else
+ publicType.qualifier.layoutAlign = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "align", "");
+ return;
+ } else if (id == "location") {
+ profileRequires(loc, EEsProfile, 300, nullptr, "location");
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ~EEsProfile, 330, 2, exts, "location");
+ if ((unsigned int)value >= TQualifier::layoutLocationEnd)
+ error(loc, "location is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutLocation = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "location", "");
+ return;
+ } else if (id == "set") {
+ if ((unsigned int)value >= TQualifier::layoutSetEnd)
+ error(loc, "set is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutSet = value;
+ if (value != 0)
+ requireVulkan(loc, "descriptor set");
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "set", "");
+ return;
+ } else if (id == "binding") {
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, "binding");
+ profileRequires(loc, EEsProfile, 310, nullptr, "binding");
+ if ((unsigned int)value >= TQualifier::layoutBindingEnd)
+ error(loc, "binding is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutBinding = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "binding", "");
+ return;
+ } else if (id == "component") {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "component");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "component");
+ if ((unsigned)value >= TQualifier::layoutComponentEnd)
+ error(loc, "component is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutComponent = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "component", "");
+ return;
+ } else if (id.compare(0, 4, "xfb_") == 0) {
+ // "Any shader making any static use (after preprocessing) of any of these
+ // *xfb_* qualifiers will cause the shader to be in a transform feedback
+ // capturing mode and hence responsible for describing the transform feedback
+ // setup."
+ intermediate.setXfbMode();
+ const char* feature = "transform feedback qualifier";
+ requireStage(loc, (EShLanguageMask)(EShLangVertexMask | EShLangGeometryMask | EShLangTessControlMask | EShLangTessEvaluationMask), feature);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
+ if (id == "xfb_buffer") {
+ // "It is a compile-time error to specify an *xfb_buffer* that is greater than
+ // the implementation-dependent constant gl_MaxTransformFeedbackBuffers."
+ if (value >= resources.maxTransformFeedbackBuffers)
+ error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d", resources.maxTransformFeedbackBuffers);
+ if (value >= (int)TQualifier::layoutXfbBufferEnd)
+ error(loc, "buffer is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbBufferEnd-1);
+ else
+ publicType.qualifier.layoutXfbBuffer = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "xfb_buffer", "");
+ return;
+ } else if (id == "xfb_offset") {
+ if (value >= (int)TQualifier::layoutXfbOffsetEnd)
+ error(loc, "offset is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbOffsetEnd-1);
+ else
+ publicType.qualifier.layoutXfbOffset = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "xfb_offset", "");
+ return;
+ } else if (id == "xfb_stride") {
+ // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
+ // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
+ if (value > 4 * resources.maxTransformFeedbackInterleavedComponents) {
+ error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d",
+ resources.maxTransformFeedbackInterleavedComponents);
+ }
+ if (value >= (int)TQualifier::layoutXfbStrideEnd)
+ error(loc, "stride is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbStrideEnd-1);
+ else
+ publicType.qualifier.layoutXfbStride = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "xfb_stride", "");
+ return;
+ }
+ }
+
+ if (id == "input_attachment_index") {
+ requireVulkan(loc, "input_attachment_index");
+ if (value >= (int)TQualifier::layoutAttachmentEnd)
+ error(loc, "attachment index is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutAttachment = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "input_attachment_index", "");
+ return;
+ }
+ if (id == "constant_id") {
+ requireSpv(loc, "constant_id");
+ if (value >= (int)TQualifier::layoutSpecConstantIdEnd) {
+ error(loc, "specialization-constant id is too large", id.c_str(), "");
+ } else {
+ publicType.qualifier.layoutSpecConstantId = value;
+ publicType.qualifier.specConstant = true;
+ if (! intermediate.addUsedConstantId(value))
+ error(loc, "specialization-constant id already used", id.c_str(), "");
+ }
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "constant_id", "");
+ return;
+ }
+ if (id == "num_views") {
+ requireExtensions(loc, Num_OVR_multiview_EXTs, OVR_multiview_EXTs, "num_views");
+ publicType.shaderQualifiers.numViews = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "num_views", "");
+ return;
+ }
+
+#if NV_EXTENSIONS
+ if (language == EShLangVertex ||
+ language == EShLangTessControl ||
+ language == EShLangTessEvaluation ||
+ language == EShLangGeometry) {
+ if (id == "secondary_view_offset") {
+ requireExtensions(loc, 1, &E_GL_NV_stereo_view_rendering, "stereo view rendering");
+ publicType.qualifier.layoutSecondaryViewportRelativeOffset = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "secondary_view_offset", "");
+ return;
+ }
+ }
+#endif
+
+ if (id == "buffer_reference_align") {
+ requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference_align");
+ if (! IsPow2(value))
+ error(loc, "must be a power of 2", "buffer_reference_align", "");
+ else
+ publicType.qualifier.layoutBufferReferenceAlign = (unsigned int)std::log2(value);
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "buffer_reference_align", "");
+ return;
+ }
+
+ switch (language) {
+ case EShLangVertex:
+ break;
+
+ case EShLangTessControl:
+ if (id == "vertices") {
+ if (value == 0)
+ error(loc, "must be greater than 0", "vertices", "");
+ else
+ publicType.shaderQualifiers.vertices = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "vertices", "");
+ return;
+ }
+ break;
+
+ case EShLangTessEvaluation:
+ break;
+
+ case EShLangGeometry:
+ if (id == "invocations") {
+ profileRequires(loc, ECompatibilityProfile | ECoreProfile, 400, nullptr, "invocations");
+ if (value == 0)
+ error(loc, "must be at least 1", "invocations", "");
+ else
+ publicType.shaderQualifiers.invocations = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "invocations", "");
+ return;
+ }
+ if (id == "max_vertices") {
+ publicType.shaderQualifiers.vertices = value;
+ if (value > resources.maxGeometryOutputVertices)
+ error(loc, "too large, must be less than gl_MaxGeometryOutputVertices", "max_vertices", "");
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "max_vertices", "");
+ return;
+ }
+ if (id == "stream") {
+ requireProfile(loc, ~EEsProfile, "selecting output stream");
+ publicType.qualifier.layoutStream = value;
+ if (value > 0)
+ intermediate.setMultiStream();
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "stream", "");
+ return;
+ }
+ break;
+
+ case EShLangFragment:
+ if (id == "index") {
+ requireProfile(loc, ECompatibilityProfile | ECoreProfile, "index layout qualifier on fragment output");
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ECompatibilityProfile | ECoreProfile, 330, 2, exts, "index layout qualifier on fragment output");
+
+ // "It is also a compile-time error if a fragment shader sets a layout index to less than 0 or greater than 1."
+ if (value < 0 || value > 1) {
+ value = 0;
+ error(loc, "value must be 0 or 1", "index", "");
+ }
+
+ publicType.qualifier.layoutIndex = value;
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "index", "");
+ return;
+ }
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangMeshNV:
+ if (id == "max_vertices") {
+ requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_vertices");
+ publicType.shaderQualifiers.vertices = value;
+ if (value > resources.maxMeshOutputVerticesNV)
+ error(loc, "too large, must be less than gl_MaxMeshOutputVerticesNV", "max_vertices", "");
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "max_vertices", "");
+ return;
+ }
+ if (id == "max_primitives") {
+ requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_primitives");
+ publicType.shaderQualifiers.primitives = value;
+ if (value > resources.maxMeshOutputPrimitivesNV)
+ error(loc, "too large, must be less than gl_MaxMeshOutputPrimitivesNV", "max_primitives", "");
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "max_primitives", "");
+ return;
+ }
+ // Fall through
+
+ case EShLangTaskNV:
+ // Fall through
+#endif
+ case EShLangCompute:
+ if (id.compare(0, 11, "local_size_") == 0) {
+#ifdef NV_EXTENSIONS
+ if (language == EShLangMeshNV || language == EShLangTaskNV) {
+ requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "gl_WorkGroupSize");
+ }
+ else
+#endif
+ {
+ profileRequires(loc, EEsProfile, 310, 0, "gl_WorkGroupSize");
+ profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_compute_shader, "gl_WorkGroupSize");
+ }
+ if (nonLiteral)
+ error(loc, "needs a literal integer", "local_size", "");
+ if (id.size() == 12 && value == 0) {
+ error(loc, "must be at least 1", id.c_str(), "");
+ return;
+ }
+ if (id == "local_size_x") {
+ publicType.shaderQualifiers.localSize[0] = value;
+ return;
+ }
+ if (id == "local_size_y") {
+ publicType.shaderQualifiers.localSize[1] = value;
+ return;
+ }
+ if (id == "local_size_z") {
+ publicType.shaderQualifiers.localSize[2] = value;
+ return;
+ }
+ if (spvVersion.spv != 0) {
+ if (id == "local_size_x_id") {
+ publicType.shaderQualifiers.localSizeSpecId[0] = value;
+ return;
+ }
+ if (id == "local_size_y_id") {
+ publicType.shaderQualifiers.localSizeSpecId[1] = value;
+ return;
+ }
+ if (id == "local_size_z_id") {
+ publicType.shaderQualifiers.localSizeSpecId[2] = value;
+ return;
+ }
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ error(loc, "there is no such layout identifier for this stage taking an assigned value", id.c_str(), "");
+}
+
+// Merge any layout qualifier information from src into dst, leaving everything else in dst alone
+//
+// "More than one layout qualifier may appear in a single declaration.
+// Additionally, the same layout-qualifier-name can occur multiple times
+// within a layout qualifier or across multiple layout qualifiers in the
+// same declaration. When the same layout-qualifier-name occurs
+// multiple times, in a single declaration, the last occurrence overrides
+// the former occurrence(s). Further, if such a layout-qualifier-name
+// will effect subsequent declarations or other observable behavior, it
+// is only the last occurrence that will have any effect, behaving as if
+// the earlier occurrence(s) within the declaration are not present.
+// This is also true for overriding layout-qualifier-names, where one
+// overrides the other (e.g., row_major vs. column_major); only the last
+// occurrence has any effect."
+void TParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly)
+{
+ if (src.hasMatrix())
+ dst.layoutMatrix = src.layoutMatrix;
+ if (src.hasPacking())
+ dst.layoutPacking = src.layoutPacking;
+
+ if (src.hasStream())
+ dst.layoutStream = src.layoutStream;
+
+ if (src.hasFormat())
+ dst.layoutFormat = src.layoutFormat;
+
+ if (src.hasXfbBuffer())
+ dst.layoutXfbBuffer = src.layoutXfbBuffer;
+
+ if (src.hasAlign())
+ dst.layoutAlign = src.layoutAlign;
+
+ if (src.hasBufferReferenceAlign())
+ dst.layoutBufferReferenceAlign = src.layoutBufferReferenceAlign;
+
+ if (! inheritOnly) {
+ if (src.hasLocation())
+ dst.layoutLocation = src.layoutLocation;
+ if (src.hasComponent())
+ dst.layoutComponent = src.layoutComponent;
+ if (src.hasIndex())
+ dst.layoutIndex = src.layoutIndex;
+
+ if (src.hasOffset())
+ dst.layoutOffset = src.layoutOffset;
+
+ if (src.hasSet())
+ dst.layoutSet = src.layoutSet;
+ if (src.layoutBinding != TQualifier::layoutBindingEnd)
+ dst.layoutBinding = src.layoutBinding;
+
+ if (src.hasXfbStride())
+ dst.layoutXfbStride = src.layoutXfbStride;
+ if (src.hasXfbOffset())
+ dst.layoutXfbOffset = src.layoutXfbOffset;
+ if (src.hasAttachment())
+ dst.layoutAttachment = src.layoutAttachment;
+ if (src.hasSpecConstantId())
+ dst.layoutSpecConstantId = src.layoutSpecConstantId;
+
+ if (src.layoutPushConstant)
+ dst.layoutPushConstant = true;
+
+ if (src.layoutBufferReference)
+ dst.layoutBufferReference = true;
+
+#ifdef NV_EXTENSIONS
+ if (src.layoutPassthrough)
+ dst.layoutPassthrough = true;
+ if (src.layoutViewportRelative)
+ dst.layoutViewportRelative = true;
+ if (src.layoutSecondaryViewportRelativeOffset != -2048)
+ dst.layoutSecondaryViewportRelativeOffset = src.layoutSecondaryViewportRelativeOffset;
+ if (src.layoutShaderRecordNV)
+ dst.layoutShaderRecordNV = true;
+ if (src.pervertexNV)
+ dst.pervertexNV = true;
+#endif
+ }
+}
+
+// Do error layout error checking given a full variable/block declaration.
+void TParseContext::layoutObjectCheck(const TSourceLoc& loc, const TSymbol& symbol)
+{
+ const TType& type = symbol.getType();
+ const TQualifier& qualifier = type.getQualifier();
+
+ // first, cross check WRT to just the type
+ layoutTypeCheck(loc, type);
+
+ // now, any remaining error checking based on the object itself
+
+ if (qualifier.hasAnyLocation()) {
+ switch (qualifier.storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ if (symbol.getAsVariable() == nullptr)
+ error(loc, "can only be used on variable declaration", "location", "");
+ break;
+ default:
+ break;
+ }
+ }
+
+ // user-variable location check, which are required for SPIR-V in/out:
+ // - variables have it directly,
+ // - blocks have it on each member (already enforced), so check first one
+ if (spvVersion.spv > 0 && !parsingBuiltins && qualifier.builtIn == EbvNone &&
+ !qualifier.hasLocation() && !intermediate.getAutoMapLocations()) {
+
+ switch (qualifier.storage) {
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ if (!type.getQualifier().isTaskMemory() &&
+ (type.getBasicType() != EbtBlock ||
+ (!(*type.getStruct())[0].type->getQualifier().hasLocation() &&
+ (*type.getStruct())[0].type->getQualifier().builtIn == EbvNone)))
+ error(loc, "SPIR-V requires location for user input/output", "location", "");
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Check packing and matrix
+ if (qualifier.hasUniformLayout()) {
+ switch (qualifier.storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ if (type.getBasicType() != EbtBlock) {
+ if (qualifier.hasMatrix())
+ error(loc, "cannot specify matrix layout on a variable declaration", "layout", "");
+ if (qualifier.hasPacking())
+ error(loc, "cannot specify packing on a variable declaration", "layout", "");
+ // "The offset qualifier can only be used on block members of blocks..."
+ if (qualifier.hasOffset() && type.getBasicType() != EbtAtomicUint)
+ error(loc, "cannot specify on a variable declaration", "offset", "");
+ // "The align qualifier can only be used on blocks or block members..."
+ if (qualifier.hasAlign())
+ error(loc, "cannot specify on a variable declaration", "align", "");
+ if (qualifier.layoutPushConstant)
+ error(loc, "can only specify on a uniform block", "push_constant", "");
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV)
+ error(loc, "can only specify on a buffer block", "shaderRecordNV", "");
+#endif
+ }
+ break;
+ default:
+ // these were already filtered by layoutTypeCheck() (or its callees)
+ break;
+ }
+ }
+}
+
+// "For some blocks declared as arrays, the location can only be applied at the block level:
+// When a block is declared as an array where additional locations are needed for each member
+// for each block array element, it is a compile-time error to specify locations on the block
+// members. That is, when locations would be under specified by applying them on block members,
+// they are not allowed on block members. For arrayed interfaces (those generally having an
+// extra level of arrayness due to interface expansion), the outer array is stripped before
+// applying this rule."
+void TParseContext::layoutMemberLocationArrayCheck(const TSourceLoc& loc, bool memberWithLocation,
+ TArraySizes* arraySizes)
+{
+ if (memberWithLocation && arraySizes != nullptr) {
+ if (arraySizes->getNumDims() > (currentBlockQualifier.isArrayedIo(language) ? 1 : 0))
+ error(loc, "cannot use in a block array where new locations are needed for each block element",
+ "location", "");
+ }
+}
+
+// Do layout error checking with respect to a type.
+void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
+{
+ const TQualifier& qualifier = type.getQualifier();
+
+ // first, intra-layout qualifier-only error checking
+ layoutQualifierCheck(loc, qualifier);
+
+ // now, error checking combining type and qualifier
+
+ if (qualifier.hasAnyLocation()) {
+ if (qualifier.hasLocation()) {
+ if (qualifier.storage == EvqVaryingOut && language == EShLangFragment) {
+ if (qualifier.layoutLocation >= (unsigned int)resources.maxDrawBuffers)
+ error(loc, "too large for fragment output", "location", "");
+ }
+ }
+ if (qualifier.hasComponent()) {
+ // "It is a compile-time error if this sequence of components gets larger than 3."
+ if (qualifier.layoutComponent + type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1) > 4)
+ error(loc, "type overflows the available 4 components", "component", "");
+
+ // "It is a compile-time error to apply the component qualifier to a matrix, a structure, a block, or an array containing any of these."
+ if (type.isMatrix() || type.getBasicType() == EbtBlock || type.getBasicType() == EbtStruct)
+ error(loc, "cannot apply to a matrix, structure, or block", "component", "");
+
+ // " It is a compile-time error to use component 1 or 3 as the beginning of a double or dvec2."
+ if (type.getBasicType() == EbtDouble)
+ if (qualifier.layoutComponent & 1)
+ error(loc, "doubles cannot start on an odd-numbered component", "component", "");
+ }
+
+ switch (qualifier.storage) {
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ if (type.getBasicType() == EbtBlock)
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "location qualifier on in/out block");
+#ifdef NV_EXTENSIONS
+ if (type.getQualifier().isTaskMemory())
+ error(loc, "cannot apply to taskNV in/out blocks", "location", "");
+#endif
+ break;
+ case EvqUniform:
+ case EvqBuffer:
+ if (type.getBasicType() == EbtBlock)
+ error(loc, "cannot apply to uniform or buffer block", "location", "");
+ break;
+#ifdef NV_EXTENSIONS
+ case EvqPayloadNV:
+ case EvqPayloadInNV:
+ case EvqHitAttrNV:
+ case EvqCallableDataNV:
+ case EvqCallableDataInNV:
+ break;
+#endif
+ default:
+ error(loc, "can only apply to uniform, buffer, in, or out storage qualifiers", "location", "");
+ break;
+ }
+
+ bool typeCollision;
+ int repeated = intermediate.addUsedLocation(qualifier, type, typeCollision);
+ if (repeated >= 0 && ! typeCollision)
+ error(loc, "overlapping use of location", "location", "%d", repeated);
+ // "fragment-shader outputs ... if two variables are placed within the same
+ // location, they must have the same underlying type (floating-point or integer)"
+ if (typeCollision && language == EShLangFragment && qualifier.isPipeOutput())
+ error(loc, "fragment outputs sharing the same location must be the same basic type", "location", "%d", repeated);
+ }
+
+ if (qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()) {
+ int repeated = intermediate.addXfbBufferOffset(type);
+ if (repeated >= 0)
+ error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer);
+
+ // "The offset must be a multiple of the size of the first component of the first
+ // qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate
+ // containing a double or 64-bit integer, the offset must also be a multiple of 8..."
+ if ((type.containsBasicType(EbtDouble) || type.containsBasicType(EbtInt64) || type.containsBasicType(EbtUint64)) &&
+ ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8))
+ error(loc, "type contains double or 64-bit integer; xfb_offset must be a multiple of 8", "xfb_offset", "");
+#ifdef AMD_EXTENSIONS
+ else if ((type.containsBasicType(EbtBool) || type.containsBasicType(EbtFloat) ||
+ type.containsBasicType(EbtInt) || type.containsBasicType(EbtUint)) &&
+ ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4))
+ error(loc, "must be a multiple of size of first component", "xfb_offset", "");
+ // ..., if applied to an aggregate containing a half float or 16-bit integer, the offset must also be a multiple of 2..."
+ else if ((type.containsBasicType(EbtFloat16) || type.containsBasicType(EbtInt16) || type.containsBasicType(EbtUint16)) &&
+ !IsMultipleOfPow2(qualifier.layoutXfbOffset, 2))
+ error(loc, "type contains half float or 16-bit integer; xfb_offset must be a multiple of 2", "xfb_offset", "");
+#else
+ else if (! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4))
+ error(loc, "must be a multiple of size of first component", "xfb_offset", "");
+#endif
+ }
+
+ if (qualifier.hasXfbStride() && qualifier.hasXfbBuffer()) {
+ if (! intermediate.setXfbBufferStride(qualifier.layoutXfbBuffer, qualifier.layoutXfbStride))
+ error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer);
+ }
+
+ if (qualifier.hasBinding()) {
+ // Binding checking, from the spec:
+ //
+ // "If the binding point for any uniform or shader storage block instance is less than zero, or greater than or
+ // equal to the implementation-dependent maximum number of uniform buffer bindings, a compile-time
+ // error will occur. When the binding identifier is used with a uniform or shader storage block instanced as
+ // an array of size N, all elements of the array from binding through binding + N - 1 must be within this
+ // range."
+ //
+ if (! type.isOpaque() && type.getBasicType() != EbtBlock)
+ error(loc, "requires block, or sampler/image, or atomic-counter type", "binding", "");
+ if (type.getBasicType() == EbtSampler) {
+ int lastBinding = qualifier.layoutBinding;
+ if (type.isArray()) {
+ if (spvVersion.vulkan > 0)
+ lastBinding += 1;
+ else {
+ if (type.isSizedArray())
+ lastBinding += type.getCumulativeArraySize();
+ else {
+ lastBinding += 1;
+ if (spvVersion.vulkan == 0)
+ warn(loc, "assuming binding count of one for compile-time checking of binding numbers for unsized array", "[]", "");
+ }
+ }
+ }
+ if (spvVersion.vulkan == 0 && lastBinding >= resources.maxCombinedTextureImageUnits)
+ error(loc, "sampler binding not less than gl_MaxCombinedTextureImageUnits", "binding", type.isArray() ? "(using array)" : "");
+ }
+ if (type.getBasicType() == EbtAtomicUint) {
+ if (qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) {
+ error(loc, "atomic_uint binding is too large; see gl_MaxAtomicCounterBindings", "binding", "");
+ return;
+ }
+ }
+ } else if (!intermediate.getAutoMapBindings()) {
+ // some types require bindings
+
+ // atomic_uint
+ if (type.getBasicType() == EbtAtomicUint)
+ error(loc, "layout(binding=X) is required", "atomic_uint", "");
+
+ // SPIR-V
+ if (spvVersion.spv > 0) {
+ if (qualifier.isUniformOrBuffer()) {
+ if (type.getBasicType() == EbtBlock && !qualifier.layoutPushConstant &&
+#ifdef NV_EXTENSIONS
+ !qualifier.layoutShaderRecordNV &&
+#endif
+ !qualifier.layoutAttachment &&
+ !qualifier.layoutBufferReference)
+ error(loc, "uniform/buffer blocks require layout(binding=X)", "binding", "");
+ else if (spvVersion.vulkan > 0 && type.getBasicType() == EbtSampler)
+ error(loc, "sampler/texture/image requires layout(binding=X)", "binding", "");
+ }
+ }
+ }
+
+ // some things can't have arrays of arrays
+ if (type.isArrayOfArrays()) {
+ if (spvVersion.vulkan > 0) {
+ if (type.isOpaque() || (type.getQualifier().isUniformOrBuffer() && type.getBasicType() == EbtBlock))
+ warn(loc, "Generating SPIR-V array-of-arrays, but Vulkan only supports single array level for this resource", "[][]", "");
+ }
+ }
+
+ // "The offset qualifier can only be used on block members of blocks..."
+ if (qualifier.hasOffset()) {
+ if (type.getBasicType() == EbtBlock)
+ error(loc, "only applies to block members, not blocks", "offset", "");
+ }
+
+ // Image format
+ if (qualifier.hasFormat()) {
+ if (! type.isImage())
+ error(loc, "only apply to images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ else {
+ if (type.getSampler().type == EbtFloat && qualifier.layoutFormat > ElfFloatGuard)
+ error(loc, "does not apply to floating point images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ if (type.getSampler().type == EbtInt && (qualifier.layoutFormat < ElfFloatGuard || qualifier.layoutFormat > ElfIntGuard))
+ error(loc, "does not apply to signed integer images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ if (type.getSampler().type == EbtUint && qualifier.layoutFormat < ElfIntGuard)
+ error(loc, "does not apply to unsigned integer images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+
+ if (profile == EEsProfile) {
+ // "Except for image variables qualified with the format qualifiers r32f, r32i, and r32ui, image variables must
+ // specify either memory qualifier readonly or the memory qualifier writeonly."
+ if (! (qualifier.layoutFormat == ElfR32f || qualifier.layoutFormat == ElfR32i || qualifier.layoutFormat == ElfR32ui)) {
+ if (! qualifier.readonly && ! qualifier.writeonly)
+ error(loc, "format requires readonly or writeonly memory qualifier", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ }
+ }
+ }
+ } else if (type.isImage() && ! qualifier.writeonly) {
+ const char *explanation = "image variables not declared 'writeonly' and without a format layout qualifier";
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, explanation);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 0, E_GL_EXT_shader_image_load_formatted, explanation);
+ }
+
+ if (qualifier.layoutPushConstant && type.getBasicType() != EbtBlock)
+ error(loc, "can only be used with a block", "push_constant", "");
+
+ if (qualifier.layoutBufferReference && type.getBasicType() != EbtBlock)
+ error(loc, "can only be used with a block", "buffer_reference", "");
+
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV && type.getBasicType() != EbtBlock)
+ error(loc, "can only be used with a block", "shaderRecordNV", "");
+#endif
+
+ // input attachment
+ if (type.isSubpass()) {
+ if (! qualifier.hasAttachment())
+ error(loc, "requires an input_attachment_index layout qualifier", "subpass", "");
+ } else {
+ if (qualifier.hasAttachment())
+ error(loc, "can only be used with a subpass", "input_attachment_index", "");
+ }
+
+ // specialization-constant id
+ if (qualifier.hasSpecConstantId()) {
+ if (type.getQualifier().storage != EvqConst)
+ error(loc, "can only be applied to 'const'-qualified scalar", "constant_id", "");
+ if (! type.isScalar())
+ error(loc, "can only be applied to a scalar", "constant_id", "");
+ switch (type.getBasicType())
+ {
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtBool:
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ break;
+ default:
+ error(loc, "cannot be applied to this type", "constant_id", "");
+ break;
+ }
+ }
+}
+
+// Do layout error checking that can be done within a layout qualifier proper, not needing to know
+// if there are blocks, atomic counters, variables, etc.
+void TParseContext::layoutQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ if (qualifier.storage == EvqShared && qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifiers to a shared variable", "shared", "");
+
+ // "It is a compile-time error to use *component* without also specifying the location qualifier (order does not matter)."
+ if (qualifier.hasComponent() && ! qualifier.hasLocation())
+ error(loc, "must specify 'location' to use 'component'", "component", "");
+
+ if (qualifier.hasAnyLocation()) {
+
+ // "As with input layout qualifiers, all shaders except compute shaders
+ // allow *location* layout qualifiers on output variable declarations,
+ // output block declarations, and output block member declarations."
+
+ switch (qualifier.storage) {
+ case EvqVaryingIn:
+ {
+ const char* feature = "location qualifier on input";
+ if (profile == EEsProfile && version < 310)
+ requireStage(loc, EShLangVertex, feature);
+ else
+ requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature);
+ if (language == EShLangVertex) {
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ~EEsProfile, 330, 2, exts, feature);
+ profileRequires(loc, EEsProfile, 300, nullptr, feature);
+ } else {
+ profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ }
+ break;
+ }
+ case EvqVaryingOut:
+ {
+ const char* feature = "location qualifier on output";
+ if (profile == EEsProfile && version < 310)
+ requireStage(loc, EShLangFragment, feature);
+ else
+ requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature);
+ if (language == EShLangFragment) {
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ~EEsProfile, 330, 2, exts, feature);
+ profileRequires(loc, EEsProfile, 300, nullptr, feature);
+ } else {
+ profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ }
+ break;
+ }
+ case EvqUniform:
+ case EvqBuffer:
+ {
+ const char* feature = "location qualifier on uniform or buffer";
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ break;
+ }
+ default:
+ break;
+ }
+ if (qualifier.hasIndex()) {
+ if (qualifier.storage != EvqVaryingOut)
+ error(loc, "can only be used on an output", "index", "");
+ if (! qualifier.hasLocation())
+ error(loc, "can only be used with an explicit location", "index", "");
+ }
+ }
+
+ if (qualifier.hasBinding()) {
+ if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory())
+ error(loc, "requires uniform or buffer storage qualifier", "binding", "");
+ }
+ if (qualifier.hasStream()) {
+ if (!qualifier.isPipeOutput())
+ error(loc, "can only be used on an output", "stream", "");
+ }
+ if (qualifier.hasXfb()) {
+ if (!qualifier.isPipeOutput())
+ error(loc, "can only be used on an output", "xfb layout qualifier", "");
+ }
+ if (qualifier.hasUniformLayout()) {
+ if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) {
+ if (qualifier.hasMatrix() || qualifier.hasPacking())
+ error(loc, "matrix or packing qualifiers can only be used on a uniform or buffer", "layout", "");
+ if (qualifier.hasOffset() || qualifier.hasAlign())
+ error(loc, "offset/align can only be used on a uniform or buffer", "layout", "");
+ }
+ }
+ if (qualifier.layoutPushConstant) {
+ if (qualifier.storage != EvqUniform)
+ error(loc, "can only be used with a uniform", "push_constant", "");
+ if (qualifier.hasSet())
+ error(loc, "cannot be used with push_constant", "set", "");
+ }
+ if (qualifier.layoutBufferReference) {
+ if (qualifier.storage != EvqBuffer)
+ error(loc, "can only be used with buffer", "buffer_reference", "");
+ }
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV) {
+ if (qualifier.storage != EvqBuffer)
+ error(loc, "can only be used with a buffer", "shaderRecordNV", "");
+ if (qualifier.hasBinding())
+ error(loc, "cannot be used with shaderRecordNV", "binding", "");
+ if (qualifier.hasSet())
+ error(loc, "cannot be used with shaderRecordNV", "set", "");
+
+ }
+ if (qualifier.storage == EvqHitAttrNV && qualifier.hasLayout()) {
+ error(loc, "cannot apply layout qualifiers to hitAttributeNV variable", "hitAttributeNV", "");
+ }
+#endif
+}
+
+// For places that can't have shader-level layout qualifiers
+void TParseContext::checkNoShaderLayouts(const TSourceLoc& loc, const TShaderQualifiers& shaderQualifiers)
+{
+ const char* message = "can only apply to a standalone qualifier";
+
+ if (shaderQualifiers.geometry != ElgNone)
+ error(loc, message, TQualifier::getGeometryString(shaderQualifiers.geometry), "");
+ if (shaderQualifiers.spacing != EvsNone)
+ error(loc, message, TQualifier::getVertexSpacingString(shaderQualifiers.spacing), "");
+ if (shaderQualifiers.order != EvoNone)
+ error(loc, message, TQualifier::getVertexOrderString(shaderQualifiers.order), "");
+ if (shaderQualifiers.pointMode)
+ error(loc, message, "point_mode", "");
+ if (shaderQualifiers.invocations != TQualifier::layoutNotSet)
+ error(loc, message, "invocations", "");
+ if (shaderQualifiers.earlyFragmentTests)
+ error(loc, message, "early_fragment_tests", "");
+ if (shaderQualifiers.postDepthCoverage)
+ error(loc, message, "post_depth_coverage", "");
+ for (int i = 0; i < 3; ++i) {
+ if (shaderQualifiers.localSize[i] > 1)
+ error(loc, message, "local_size", "");
+ if (shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet)
+ error(loc, message, "local_size id", "");
+ }
+ if (shaderQualifiers.vertices != TQualifier::layoutNotSet) {
+ if (language == EShLangGeometry
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ )
+ error(loc, message, "max_vertices", "");
+ else if (language == EShLangTessControl)
+ error(loc, message, "vertices", "");
+ else
+ assert(0);
+ }
+#ifdef NV_EXTENSIONS
+ if (shaderQualifiers.primitives != TQualifier::layoutNotSet) {
+ if (language == EShLangMeshNV)
+ error(loc, message, "max_primitives", "");
+ else
+ assert(0);
+ }
+#endif
+ if (shaderQualifiers.blendEquation)
+ error(loc, message, "blend equation", "");
+ if (shaderQualifiers.numViews != TQualifier::layoutNotSet)
+ error(loc, message, "num_views", "");
+}
+
+// Correct and/or advance an object's offset layout qualifier.
+void TParseContext::fixOffset(const TSourceLoc& loc, TSymbol& symbol)
+{
+ const TQualifier& qualifier = symbol.getType().getQualifier();
+ if (symbol.getType().getBasicType() == EbtAtomicUint) {
+ if (qualifier.hasBinding() && (int)qualifier.layoutBinding < resources.maxAtomicCounterBindings) {
+
+ // Set the offset
+ int offset;
+ if (qualifier.hasOffset())
+ offset = qualifier.layoutOffset;
+ else
+ offset = atomicUintOffsets[qualifier.layoutBinding];
+ symbol.getWritableType().getQualifier().layoutOffset = offset;
+
+ // Check for overlap
+ int numOffsets = 4;
+ if (symbol.getType().isArray()) {
+ if (symbol.getType().isSizedArray() && !symbol.getType().getArraySizes()->isInnerUnsized())
+ numOffsets *= symbol.getType().getCumulativeArraySize();
+ else {
+ // "It is a compile-time error to declare an unsized array of atomic_uint."
+ error(loc, "array must be explicitly sized", "atomic_uint", "");
+ }
+ }
+ int repeated = intermediate.addUsedOffsets(qualifier.layoutBinding, offset, numOffsets);
+ if (repeated >= 0)
+ error(loc, "atomic counters sharing the same offset:", "offset", "%d", repeated);
+
+ // Bump the default offset
+ atomicUintOffsets[qualifier.layoutBinding] = offset + numOffsets;
+ }
+ }
+}
+
+//
+// Look up a function name in the symbol table, and make sure it is a function.
+//
+// Return the function symbol if found, otherwise nullptr.
+//
+const TFunction* TParseContext::findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ const TFunction* function = nullptr;
+
+ if (symbolTable.isFunctionNameVariable(call.getName())) {
+ error(loc, "can't use function syntax on variable", call.getName().c_str(), "");
+ return nullptr;
+ }
+
+ bool explicitTypesEnabled = extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64);
+
+ if (profile == EEsProfile || version < 120)
+ function = findFunctionExact(loc, call, builtIn);
+ else if (version < 400)
+ function = findFunction120(loc, call, builtIn);
+ else if (explicitTypesEnabled)
+ function = findFunctionExplicitTypes(loc, call, builtIn);
+ else
+ function = findFunction400(loc, call, builtIn);
+
+ return function;
+}
+
+// Function finding algorithm for ES and desktop 110.
+const TFunction* TParseContext::findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol == nullptr) {
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+
+ return nullptr;
+ }
+
+ return symbol->getAsFunction();
+}
+
+// Function finding algorithm for desktop versions 120 through 330.
+const TFunction* TParseContext::findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ // first, look for an exact match
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol)
+ return symbol->getAsFunction();
+
+ // exact match not found, look through a list of overloaded functions of the same name
+
+ // "If no exact match is found, then [implicit conversions] will be applied to find a match. Mismatched types
+ // on input parameters (in or inout or default) must have a conversion from the calling argument type to the
+ // formal parameter type. Mismatched types on output parameters (out or inout) must have a conversion
+ // from the formal parameter type to the calling argument type. When argument conversions are used to find
+ // a match, it is a semantic error if there are multiple ways to apply these conversions to make the call match
+ // more than one function."
+
+ const TFunction* candidate = nullptr;
+ TVector<const TFunction*> candidateList;
+ symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
+
+ for (auto it = candidateList.begin(); it != candidateList.end(); ++it) {
+ const TFunction& function = *(*it);
+
+ // to even be a potential match, number of arguments has to match
+ if (call.getParamCount() != function.getParamCount())
+ continue;
+
+ bool possibleMatch = true;
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ // same types is easy
+ if (*function[i].type == *call[i].type)
+ continue;
+
+ // We have a mismatch in type, see if it is implicitly convertible
+
+ if (function[i].type->isArray() || call[i].type->isArray() ||
+ ! function[i].type->sameElementShape(*call[i].type))
+ possibleMatch = false;
+ else {
+ // do direction-specific checks for conversion of basic type
+ if (function[i].type->getQualifier().isParamInput()) {
+ if (! intermediate.canImplicitlyPromote(call[i].type->getBasicType(), function[i].type->getBasicType()))
+ possibleMatch = false;
+ }
+ if (function[i].type->getQualifier().isParamOutput()) {
+ if (! intermediate.canImplicitlyPromote(function[i].type->getBasicType(), call[i].type->getBasicType()))
+ possibleMatch = false;
+ }
+ }
+ if (! possibleMatch)
+ break;
+ }
+ if (possibleMatch) {
+ if (candidate) {
+ // our second match, meaning ambiguity
+ error(loc, "ambiguous function signature match: multiple signatures match under implicit type conversion", call.getName().c_str(), "");
+ } else
+ candidate = &function;
+ }
+ }
+
+ if (candidate == nullptr)
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+
+ return candidate;
+}
+
+// Function finding algorithm for desktop version 400 and above.
+//
+// "When function calls are resolved, an exact type match for all the arguments
+// is sought. If an exact match is found, all other functions are ignored, and
+// the exact match is used. If no exact match is found, then the implicit
+// conversions in section 4.1.10 Implicit Conversions will be applied to find
+// a match. Mismatched types on input parameters (in or inout or default) must
+// have a conversion from the calling argument type to the formal parameter type.
+// Mismatched types on output parameters (out or inout) must have a conversion
+// from the formal parameter type to the calling argument type.
+//
+// "If implicit conversions can be used to find more than one matching function,
+// a single best-matching function is sought. To determine a best match, the
+// conversions between calling argument and formal parameter types are compared
+// for each function argument and pair of matching functions. After these
+// comparisons are performed, each pair of matching functions are compared.
+// A function declaration A is considered a better match than function
+// declaration B if
+//
+// * for at least one function argument, the conversion for that argument in A
+// is better than the corresponding conversion in B; and
+// * there is no function argument for which the conversion in B is better than
+// the corresponding conversion in A.
+//
+// "If a single function declaration is considered a better match than every
+// other matching function declaration, it will be used. Otherwise, a
+// compile-time semantic error for an ambiguous overloaded function call occurs.
+//
+// "To determine whether the conversion for a single argument in one match is
+// better than that for another match, the following rules are applied, in order:
+//
+// 1. An exact match is better than a match involving any implicit conversion.
+// 2. A match involving an implicit conversion from float to double is better
+// than a match involving any other implicit conversion.
+// 3. A match involving an implicit conversion from either int or uint to float
+// is better than a match involving an implicit conversion from either int
+// or uint to double.
+//
+// "If none of the rules above apply to a particular pair of conversions, neither
+// conversion is considered better than the other."
+//
+const TFunction* TParseContext::findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ // first, look for an exact match
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol)
+ return symbol->getAsFunction();
+
+ // no exact match, use the generic selector, parameterized by the GLSL rules
+
+ // create list of candidates to send
+ TVector<const TFunction*> candidateList;
+ symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
+
+ // can 'from' convert to 'to'?
+ const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool {
+ if (from == to)
+ return true;
+ if (from.coopMatParameterOK(to))
+ return true;
+ // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions
+ if (builtIn && from.isArray() && to.isUnsizedArray()) {
+ TType fromElementType(from, 0);
+ TType toElementType(to, 0);
+ if (fromElementType == toElementType)
+ return true;
+ }
+ if (from.isArray() || to.isArray() || ! from.sameElementShape(to))
+ return false;
+ return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType());
+ };
+
+ // Is 'to2' a better conversion than 'to1'?
+ // Ties should not be considered as better.
+ // Assumes 'convertible' already said true.
+ const auto better = [](const TType& from, const TType& to1, const TType& to2) -> bool {
+ // 1. exact match
+ if (from == to2)
+ return from != to1;
+ if (from == to1)
+ return false;
+
+ // 2. float -> double is better
+ if (from.getBasicType() == EbtFloat) {
+ if (to2.getBasicType() == EbtDouble && to1.getBasicType() != EbtDouble)
+ return true;
+ }
+
+ // 3. -> float is better than -> double
+ return to2.getBasicType() == EbtFloat && to1.getBasicType() == EbtDouble;
+ };
+
+ // for ambiguity reporting
+ bool tie = false;
+
+ // send to the generic selector
+ const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
+
+ if (bestMatch == nullptr)
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+ else if (tie)
+ error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), "");
+
+ return bestMatch;
+}
+
+// "To determine whether the conversion for a single argument in one match
+// is better than that for another match, the conversion is assigned of the
+// three ranks ordered from best to worst:
+// 1. Exact match: no conversion.
+// 2. Promotion: integral or floating-point promotion.
+// 3. Conversion: integral conversion, floating-point conversion,
+// floating-integral conversion.
+// A conversion C1 is better than a conversion C2 if the rank of C1 is
+// better than the rank of C2."
+const TFunction* TParseContext::findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ // first, look for an exact match
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol)
+ return symbol->getAsFunction();
+
+ // no exact match, use the generic selector, parameterized by the GLSL rules
+
+ // create list of candidates to send
+ TVector<const TFunction*> candidateList;
+ symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
+
+ // can 'from' convert to 'to'?
+ const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool {
+ if (from == to)
+ return true;
+ if (from.coopMatParameterOK(to))
+ return true;
+ // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions
+ if (builtIn && from.isArray() && to.isUnsizedArray()) {
+ TType fromElementType(from, 0);
+ TType toElementType(to, 0);
+ if (fromElementType == toElementType)
+ return true;
+ }
+ if (from.isArray() || to.isArray() || ! from.sameElementShape(to))
+ return false;
+ return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType());
+ };
+
+ // Is 'to2' a better conversion than 'to1'?
+ // Ties should not be considered as better.
+ // Assumes 'convertible' already said true.
+ const auto better = [this](const TType& from, const TType& to1, const TType& to2) -> bool {
+ // 1. exact match
+ if (from == to2)
+ return from != to1;
+ if (from == to1)
+ return false;
+
+ // 2. Promotion (integral, floating-point) is better
+ TBasicType from_type = from.getBasicType();
+ TBasicType to1_type = to1.getBasicType();
+ TBasicType to2_type = to2.getBasicType();
+ bool isPromotion1 = (intermediate.isIntegralPromotion(from_type, to1_type) ||
+ intermediate.isFPPromotion(from_type, to1_type));
+ bool isPromotion2 = (intermediate.isIntegralPromotion(from_type, to2_type) ||
+ intermediate.isFPPromotion(from_type, to2_type));
+ if (isPromotion2)
+ return !isPromotion1;
+ if(isPromotion1)
+ return false;
+
+ // 3. Conversion (integral, floating-point , floating-integral)
+ bool isConversion1 = (intermediate.isIntegralConversion(from_type, to1_type) ||
+ intermediate.isFPConversion(from_type, to1_type) ||
+ intermediate.isFPIntegralConversion(from_type, to1_type));
+ bool isConversion2 = (intermediate.isIntegralConversion(from_type, to2_type) ||
+ intermediate.isFPConversion(from_type, to2_type) ||
+ intermediate.isFPIntegralConversion(from_type, to2_type));
+
+ return isConversion2 && !isConversion1;
+ };
+
+ // for ambiguity reporting
+ bool tie = false;
+
+ // send to the generic selector
+ const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
+
+ if (bestMatch == nullptr)
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+ else if (tie)
+ error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), "");
+
+ return bestMatch;
+}
+
+// When a declaration includes a type, but not a variable name, it can be
+// to establish defaults.
+void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType& publicType)
+{
+ if (publicType.basicType == EbtAtomicUint && publicType.qualifier.hasBinding() && publicType.qualifier.hasOffset()) {
+ if (publicType.qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) {
+ error(loc, "atomic_uint binding is too large", "binding", "");
+ return;
+ }
+ atomicUintOffsets[publicType.qualifier.layoutBinding] = publicType.qualifier.layoutOffset;
+ return;
+ }
+
+ if (publicType.qualifier.hasLayout() && !publicType.qualifier.layoutBufferReference)
+ warn(loc, "useless application of layout qualifier", "layout", "");
+}
+
+//
+// Do everything necessary to handle a variable (non-block) declaration.
+// Either redeclaring a variable, or making a new one, updating the symbol
+// table, and all error checking.
+//
+// Returns a subtree node that computes an initializer, if needed.
+// Returns nullptr if there is no code to execute for initialization.
+//
+// 'publicType' is the type part of the declaration (to the left)
+// 'arraySizes' is the arrayness tagged on the identifier (to the right)
+//
+TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& identifier, const TPublicType& publicType,
+ TArraySizes* arraySizes, TIntermTyped* initializer)
+{
+ // Make a fresh type that combines the characteristics from the individual
+ // identifier syntax and the declaration-type syntax.
+ TType type(publicType);
+ type.transferArraySizes(arraySizes);
+ type.copyArrayInnerSizes(publicType.arraySizes);
+ arrayOfArrayVersionCheck(loc, type.getArraySizes());
+
+ if (type.isCoopMat()) {
+ intermediate.setUseVulkanMemoryModel();
+ intermediate.setUseStorageBuffer();
+
+ if (!publicType.typeParameters || publicType.typeParameters->getNumDims() != 4) {
+ error(loc, "expected four type parameters", identifier.c_str(), "");
+ }
+ if (publicType.typeParameters &&
+ publicType.typeParameters->getDimSize(0) != 16 &&
+ publicType.typeParameters->getDimSize(0) != 32 &&
+ publicType.typeParameters->getDimSize(0) != 64) {
+ error(loc, "expected 16, 32, or 64 bits for first type parameter", identifier.c_str(), "");
+ }
+ } else {
+ if (publicType.typeParameters && publicType.typeParameters->getNumDims() != 0) {
+ error(loc, "unexpected type parameters", identifier.c_str(), "");
+ }
+ }
+
+ if (voidErrorCheck(loc, identifier, type.getBasicType()))
+ return nullptr;
+
+ if (initializer)
+ rValueErrorCheck(loc, "initializer", initializer);
+ else
+ nonInitConstCheck(loc, identifier, type);
+
+ samplerCheck(loc, type, identifier, initializer);
+ atomicUintCheck(loc, type, identifier);
+ transparentOpaqueCheck(loc, type, identifier);
+#ifdef NV_EXTENSIONS
+ accStructNVCheck(loc, type, identifier);
+#endif
+ if (type.getQualifier().storage == EvqConst && type.containsBasicType(EbtReference)) {
+ error(loc, "variables with reference type can't have qualifier 'const'", "qualifier", "");
+ }
+
+ if (type.getQualifier().storage != EvqUniform && type.getQualifier().storage != EvqBuffer) {
+ if (type.containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, "qualifier", "float16 types can only be in uniform block or buffer storage");
+ if (type.contains16BitInt())
+ requireInt16Arithmetic(loc, "qualifier", "(u)int16 types can only be in uniform block or buffer storage");
+ if (type.contains8BitInt())
+ requireInt8Arithmetic(loc, "qualifier", "(u)int8 types can only be in uniform block or buffer storage");
+ }
+
+ if (type.getQualifier().storage == EvqShared &&
+ type.containsCoopMat())
+ error(loc, "qualifier", "Cooperative matrix types must not be used in shared memory", "");
+
+ if (identifier != "gl_FragCoord" && (publicType.shaderQualifiers.originUpperLeft || publicType.shaderQualifiers.pixelCenterInteger))
+ error(loc, "can only apply origin_upper_left and pixel_center_origin to gl_FragCoord", "layout qualifier", "");
+ if (identifier != "gl_FragDepth" && publicType.shaderQualifiers.layoutDepth != EldNone)
+ error(loc, "can only apply depth layout to gl_FragDepth", "layout qualifier", "");
+
+ // Check for redeclaration of built-ins and/or attempting to declare a reserved name
+ TSymbol* symbol = redeclareBuiltinVariable(loc, identifier, type.getQualifier(), publicType.shaderQualifiers);
+ if (symbol == nullptr)
+ reservedErrorCheck(loc, identifier);
+
+ inheritGlobalDefaults(type.getQualifier());
+
+ // Declare the variable
+ if (type.isArray()) {
+ // Check that implicit sizing is only where allowed.
+ arraySizesCheck(loc, type.getQualifier(), type.getArraySizes(), initializer, false);
+
+ if (! arrayQualifierError(loc, type.getQualifier()) && ! arrayError(loc, type))
+ declareArray(loc, identifier, type, symbol);
+
+ if (initializer) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "initializer");
+ profileRequires(loc, EEsProfile, 300, nullptr, "initializer");
+ }
+ } else {
+ // non-array case
+ if (symbol == nullptr)
+ symbol = declareNonArray(loc, identifier, type);
+ else if (type != symbol->getType())
+ error(loc, "cannot change the type of", "redeclaration", symbol->getName().c_str());
+ }
+
+ if (symbol == nullptr)
+ return nullptr;
+
+ // Deal with initializer
+ TIntermNode* initNode = nullptr;
+ if (symbol != nullptr && initializer) {
+ TVariable* variable = symbol->getAsVariable();
+ if (! variable) {
+ error(loc, "initializer requires a variable, not a member", identifier.c_str(), "");
+ return nullptr;
+ }
+ initNode = executeInitializer(loc, initializer, variable);
+ }
+
+ // look for errors in layout qualifier use
+ layoutObjectCheck(loc, *symbol);
+
+ // fix up
+ fixOffset(loc, *symbol);
+
+ return initNode;
+}
+
+// Pick up global defaults from the provide global defaults into dst.
+void TParseContext::inheritGlobalDefaults(TQualifier& dst) const
+{
+ if (dst.storage == EvqVaryingOut) {
+ if (! dst.hasStream() && language == EShLangGeometry)
+ dst.layoutStream = globalOutputDefaults.layoutStream;
+ if (! dst.hasXfbBuffer())
+ dst.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
+ }
+}
+
+//
+// Make an internal-only variable whose name is for debug purposes only
+// and won't be searched for. Callers will only use the return value to use
+// the variable, not the name to look it up. It is okay if the name
+// is the same as other names; there won't be any conflict.
+//
+TVariable* TParseContext::makeInternalVariable(const char* name, const TType& type) const
+{
+ TString* nameString = NewPoolTString(name);
+ TVariable* variable = new TVariable(nameString, type);
+ symbolTable.makeInternalVariable(*variable);
+
+ return variable;
+}
+
+//
+// Declare a non-array variable, the main point being there is no redeclaration
+// for resizing allowed.
+//
+// Return the successfully declared variable.
+//
+TVariable* TParseContext::declareNonArray(const TSourceLoc& loc, const TString& identifier, const TType& type)
+{
+ // make a new variable
+ TVariable* variable = new TVariable(&identifier, type);
+
+ ioArrayCheck(loc, type, identifier);
+
+ // add variable to symbol table
+ if (symbolTable.insert(*variable)) {
+ if (symbolTable.atGlobalLevel())
+ trackLinkage(*variable);
+ return variable;
+ }
+
+ error(loc, "redefinition", variable->getName().c_str(), "");
+ return nullptr;
+}
+
+//
+// Handle all types of initializers from the grammar.
+//
+// Returning nullptr just means there is no code to execute to handle the
+// initializer, which will, for example, be the case for constant initializers.
+//
+TIntermNode* TParseContext::executeInitializer(const TSourceLoc& loc, TIntermTyped* initializer, TVariable* variable)
+{
+ //
+ // Identifier must be of type constant, a global, or a temporary, and
+ // starting at version 120, desktop allows uniforms to have initializers.
+ //
+ TStorageQualifier qualifier = variable->getType().getQualifier().storage;
+ if (! (qualifier == EvqTemporary || qualifier == EvqGlobal || qualifier == EvqConst ||
+ (qualifier == EvqUniform && profile != EEsProfile && version >= 120))) {
+ error(loc, " cannot initialize this type of qualifier ", variable->getType().getStorageQualifierString(), "");
+ return nullptr;
+ }
+ arrayObjectCheck(loc, variable->getType(), "array initializer");
+
+ //
+ // If the initializer was from braces { ... }, we convert the whole subtree to a
+ // constructor-style subtree, allowing the rest of the code to operate
+ // identically for both kinds of initializers.
+ //
+ // Type can't be deduced from the initializer list, so a skeletal type to
+ // follow has to be passed in. Constness and specialization-constness
+ // should be deduced bottom up, not dictated by the skeletal type.
+ //
+ TType skeletalType;
+ skeletalType.shallowCopy(variable->getType());
+ skeletalType.getQualifier().makeTemporary();
+ initializer = convertInitializerList(loc, skeletalType, initializer);
+ if (! initializer) {
+ // error recovery; don't leave const without constant values
+ if (qualifier == EvqConst)
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+
+ // Fix outer arrayness if variable is unsized, getting size from the initializer
+ if (initializer->getType().isSizedArray() && variable->getType().isUnsizedArray())
+ variable->getWritableType().changeOuterArraySize(initializer->getType().getOuterArraySize());
+
+ // Inner arrayness can also get set by an initializer
+ if (initializer->getType().isArrayOfArrays() && variable->getType().isArrayOfArrays() &&
+ initializer->getType().getArraySizes()->getNumDims() ==
+ variable->getType().getArraySizes()->getNumDims()) {
+ // adopt unsized sizes from the initializer's sizes
+ for (int d = 1; d < variable->getType().getArraySizes()->getNumDims(); ++d) {
+ if (variable->getType().getArraySizes()->getDimSize(d) == UnsizedArraySize) {
+ variable->getWritableType().getArraySizes()->setDimSize(d,
+ initializer->getType().getArraySizes()->getDimSize(d));
+ }
+ }
+ }
+
+ // Uniforms require a compile-time constant initializer
+ if (qualifier == EvqUniform && ! initializer->getType().getQualifier().isFrontEndConstant()) {
+ error(loc, "uniform initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str());
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+ // Global consts require a constant initializer (specialization constant is okay)
+ if (qualifier == EvqConst && symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) {
+ error(loc, "global const initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str());
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+
+ // Const variables require a constant initializer, depending on version
+ if (qualifier == EvqConst) {
+ if (! initializer->getType().getQualifier().isConstant()) {
+ const char* initFeature = "non-constant initializer";
+ requireProfile(loc, ~EEsProfile, initFeature);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ variable->getWritableType().getQualifier().storage = EvqConstReadOnly;
+ qualifier = EvqConstReadOnly;
+ }
+ } else {
+ // Non-const global variables in ES need a const initializer.
+ //
+ // "In declarations of global variables with no storage qualifier or with a const
+ // qualifier any initializer must be a constant expression."
+ if (symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) {
+ const char* initFeature = "non-constant global initializer (needs GL_EXT_shader_non_constant_global_initializers)";
+ if (profile == EEsProfile) {
+ if (relaxedErrors() && ! extensionTurnedOn(E_GL_EXT_shader_non_constant_global_initializers))
+ warn(loc, "not allowed in this version", initFeature, "");
+ else
+ profileRequires(loc, EEsProfile, 0, E_GL_EXT_shader_non_constant_global_initializers, initFeature);
+ }
+ }
+ }
+
+ if (qualifier == EvqConst || qualifier == EvqUniform) {
+ // Compile-time tagging of the variable with its constant value...
+
+ initializer = intermediate.addConversion(EOpAssign, variable->getType(), initializer);
+ if (! initializer || ! initializer->getType().getQualifier().isConstant() || variable->getType() != initializer->getType()) {
+ error(loc, "non-matching or non-convertible constant type for const initializer",
+ variable->getType().getStorageQualifierString(), "");
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+
+ // We either have a folded constant in getAsConstantUnion, or we have to use
+ // the initializer's subtree in the AST to represent the computation of a
+ // specialization constant.
+ assert(initializer->getAsConstantUnion() || initializer->getType().getQualifier().isSpecConstant());
+ if (initializer->getAsConstantUnion())
+ variable->setConstArray(initializer->getAsConstantUnion()->getConstArray());
+ else {
+ // It's a specialization constant.
+ variable->getWritableType().getQualifier().makeSpecConstant();
+
+ // Keep the subtree that computes the specialization constant with the variable.
+ // Later, a symbol node will adopt the subtree from the variable.
+ variable->setConstSubtree(initializer);
+ }
+ } else {
+ // normal assigning of a value to a variable...
+ specializationCheck(loc, initializer->getType(), "initializer");
+ TIntermSymbol* intermSymbol = intermediate.addSymbol(*variable, loc);
+ TIntermTyped* initNode = intermediate.addAssign(EOpAssign, intermSymbol, initializer, loc);
+ if (! initNode)
+ assignError(loc, "=", intermSymbol->getCompleteString(), initializer->getCompleteString());
+
+ return initNode;
+ }
+
+ return nullptr;
+}
+
+//
+// Reprocess any initializer-list (the "{ ... }" syntax) parts of the
+// initializer.
+//
+// Need to hierarchically assign correct types and implicit
+// conversions. Will do this mimicking the same process used for
+// creating a constructor-style initializer, ensuring we get the
+// same form. However, it has to in parallel walk the 'type'
+// passed in, as type cannot be deduced from an initializer list.
+//
+TIntermTyped* TParseContext::convertInitializerList(const TSourceLoc& loc, const TType& type, TIntermTyped* initializer)
+{
+ // Will operate recursively. Once a subtree is found that is constructor style,
+ // everything below it is already good: Only the "top part" of the initializer
+ // can be an initializer list, where "top part" can extend for several (or all) levels.
+
+ // see if we have bottomed out in the tree within the initializer-list part
+ TIntermAggregate* initList = initializer->getAsAggregate();
+ if (! initList || initList->getOp() != EOpNull)
+ return initializer;
+
+ // Of the initializer-list set of nodes, need to process bottom up,
+ // so recurse deep, then process on the way up.
+
+ // Go down the tree here...
+ if (type.isArray()) {
+ // The type's array might be unsized, which could be okay, so base sizes on the size of the aggregate.
+ // Later on, initializer execution code will deal with array size logic.
+ TType arrayType;
+ arrayType.shallowCopy(type); // sharing struct stuff is fine
+ arrayType.copyArraySizes(*type.getArraySizes()); // but get a fresh copy of the array information, to edit below
+
+ // edit array sizes to fill in unsized dimensions
+ arrayType.changeOuterArraySize((int)initList->getSequence().size());
+ TIntermTyped* firstInit = initList->getSequence()[0]->getAsTyped();
+ if (arrayType.isArrayOfArrays() && firstInit->getType().isArray() &&
+ arrayType.getArraySizes()->getNumDims() == firstInit->getType().getArraySizes()->getNumDims() + 1) {
+ for (int d = 1; d < arrayType.getArraySizes()->getNumDims(); ++d) {
+ if (arrayType.getArraySizes()->getDimSize(d) == UnsizedArraySize)
+ arrayType.getArraySizes()->setDimSize(d, firstInit->getType().getArraySizes()->getDimSize(d - 1));
+ }
+ }
+
+ TType elementType(arrayType, 0); // dereferenced type
+ for (size_t i = 0; i < initList->getSequence().size(); ++i) {
+ initList->getSequence()[i] = convertInitializerList(loc, elementType, initList->getSequence()[i]->getAsTyped());
+ if (initList->getSequence()[i] == nullptr)
+ return nullptr;
+ }
+
+ return addConstructor(loc, initList, arrayType);
+ } else if (type.isStruct()) {
+ if (type.getStruct()->size() != initList->getSequence().size()) {
+ error(loc, "wrong number of structure members", "initializer list", "");
+ return nullptr;
+ }
+ for (size_t i = 0; i < type.getStruct()->size(); ++i) {
+ initList->getSequence()[i] = convertInitializerList(loc, *(*type.getStruct())[i].type, initList->getSequence()[i]->getAsTyped());
+ if (initList->getSequence()[i] == nullptr)
+ return nullptr;
+ }
+ } else if (type.isMatrix()) {
+ if (type.getMatrixCols() != (int)initList->getSequence().size()) {
+ error(loc, "wrong number of matrix columns:", "initializer list", type.getCompleteString().c_str());
+ return nullptr;
+ }
+ TType vectorType(type, 0); // dereferenced type
+ for (int i = 0; i < type.getMatrixCols(); ++i) {
+ initList->getSequence()[i] = convertInitializerList(loc, vectorType, initList->getSequence()[i]->getAsTyped());
+ if (initList->getSequence()[i] == nullptr)
+ return nullptr;
+ }
+ } else if (type.isVector()) {
+ if (type.getVectorSize() != (int)initList->getSequence().size()) {
+ error(loc, "wrong vector size (or rows in a matrix column):", "initializer list", type.getCompleteString().c_str());
+ return nullptr;
+ }
+ } else {
+ error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString().c_str());
+ return nullptr;
+ }
+
+ // Now that the subtree is processed, process this node as if the
+ // initializer list is a set of arguments to a constructor.
+ TIntermNode* emulatedConstructorArguments;
+ if (initList->getSequence().size() == 1)
+ emulatedConstructorArguments = initList->getSequence()[0];
+ else
+ emulatedConstructorArguments = initList;
+ return addConstructor(loc, emulatedConstructorArguments, type);
+}
+
+//
+// Test for the correctness of the parameters passed to various constructor functions
+// and also convert them to the right data type, if allowed and required.
+//
+// 'node' is what to construct from.
+// 'type' is what type to construct.
+//
+// Returns nullptr for an error or the constructed node (aggregate or typed) for no error.
+//
+TIntermTyped* TParseContext::addConstructor(const TSourceLoc& loc, TIntermNode* node, const TType& type)
+{
+ if (node == nullptr || node->getAsTyped() == nullptr)
+ return nullptr;
+ rValueErrorCheck(loc, "constructor", node->getAsTyped());
+
+ TIntermAggregate* aggrNode = node->getAsAggregate();
+ TOperator op = intermediate.mapTypeToConstructorOp(type);
+
+ // Combined texture-sampler constructors are completely semantic checked
+ // in constructorTextureSamplerError()
+ if (op == EOpConstructTextureSampler) {
+ if (aggrNode->getSequence()[1]->getAsTyped()->getType().getSampler().shadow) {
+ // Transfer depth into the texture (SPIR-V image) type, as a hint
+ // for tools to know this texture/image is a depth image.
+ aggrNode->getSequence()[0]->getAsTyped()->getWritableType().getSampler().shadow = true;
+ }
+ return intermediate.setAggregateOperator(aggrNode, op, type, loc);
+ }
+
+ TTypeList::const_iterator memberTypes;
+ if (op == EOpConstructStruct)
+ memberTypes = type.getStruct()->begin();
+
+ TType elementType;
+ if (type.isArray()) {
+ TType dereferenced(type, 0);
+ elementType.shallowCopy(dereferenced);
+ } else
+ elementType.shallowCopy(type);
+
+ bool singleArg;
+ if (aggrNode) {
+ if (aggrNode->getOp() != EOpNull)
+ singleArg = true;
+ else
+ singleArg = false;
+ } else
+ singleArg = true;
+
+ TIntermTyped *newNode;
+ if (singleArg) {
+ // If structure constructor or array constructor is being called
+ // for only one parameter inside the structure, we need to call constructAggregate function once.
+ if (type.isArray())
+ newNode = constructAggregate(node, elementType, 1, node->getLoc());
+ else if (op == EOpConstructStruct)
+ newNode = constructAggregate(node, *(*memberTypes).type, 1, node->getLoc());
+ else
+ newNode = constructBuiltIn(type, op, node->getAsTyped(), node->getLoc(), false);
+
+ if (newNode && (type.isArray() || op == EOpConstructStruct))
+ newNode = intermediate.setAggregateOperator(newNode, EOpConstructStruct, type, loc);
+
+ return newNode;
+ }
+
+ //
+ // Handle list of arguments.
+ //
+ TIntermSequence &sequenceVector = aggrNode->getSequence(); // Stores the information about the parameter to the constructor
+ // if the structure constructor contains more than one parameter, then construct
+ // each parameter
+
+ int paramCount = 0; // keeps track of the constructor parameter number being checked
+
+ // for each parameter to the constructor call, check to see if the right type is passed or convert them
+ // to the right type if possible (and allowed).
+ // for structure constructors, just check if the right type is passed, no conversion is allowed.
+ for (TIntermSequence::iterator p = sequenceVector.begin();
+ p != sequenceVector.end(); p++, paramCount++) {
+ if (type.isArray())
+ newNode = constructAggregate(*p, elementType, paramCount+1, node->getLoc());
+ else if (op == EOpConstructStruct)
+ newNode = constructAggregate(*p, *(memberTypes[paramCount]).type, paramCount+1, node->getLoc());
+ else
+ newNode = constructBuiltIn(type, op, (*p)->getAsTyped(), node->getLoc(), true);
+
+ if (newNode)
+ *p = newNode;
+ else
+ return nullptr;
+ }
+
+ return intermediate.setAggregateOperator(aggrNode, op, type, loc);
+}
+
+// Function for constructor implementation. Calls addUnaryMath with appropriate EOp value
+// for the parameter to the constructor (passed to this function). Essentially, it converts
+// the parameter types correctly. If a constructor expects an int (like ivec2) and is passed a
+// float, then float is converted to int.
+//
+// Returns nullptr for an error or the constructed node.
+//
+TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, TIntermTyped* node, const TSourceLoc& loc,
+ bool subset)
+{
+ // If we are changing a matrix in both domain of basic type and to a non matrix,
+ // do the shape change first (by default, below, basic type is changed before shape).
+ // This avoids requesting a matrix of a new type that is going to be discarded anyway.
+ // TODO: This could be generalized to more type combinations, but that would require
+ // more extensive testing and full algorithm rework. For now, the need to do two changes makes
+ // the recursive call work, and avoids the most aggregious case of creating integer matrices.
+ if (node->getType().isMatrix() && (type.isScalar() || type.isVector()) &&
+ type.isFloatingDomain() != node->getType().isFloatingDomain()) {
+ TType transitionType(node->getBasicType(), glslang::EvqTemporary, type.getVectorSize(), 0, 0, node->isVector());
+ TOperator transitionOp = intermediate.mapTypeToConstructorOp(transitionType);
+ node = constructBuiltIn(transitionType, transitionOp, node, loc, false);
+ }
+
+ TIntermTyped* newNode;
+ TOperator basicOp;
+
+ //
+ // First, convert types as needed.
+ //
+ switch (op) {
+ case EOpConstructVec2:
+ case EOpConstructVec3:
+ case EOpConstructVec4:
+ case EOpConstructMat2x2:
+ case EOpConstructMat2x3:
+ case EOpConstructMat2x4:
+ case EOpConstructMat3x2:
+ case EOpConstructMat3x3:
+ case EOpConstructMat3x4:
+ case EOpConstructMat4x2:
+ case EOpConstructMat4x3:
+ case EOpConstructMat4x4:
+ case EOpConstructFloat:
+ basicOp = EOpConstructFloat;
+ break;
+
+ case EOpConstructDVec2:
+ case EOpConstructDVec3:
+ case EOpConstructDVec4:
+ case EOpConstructDMat2x2:
+ case EOpConstructDMat2x3:
+ case EOpConstructDMat2x4:
+ case EOpConstructDMat3x2:
+ case EOpConstructDMat3x3:
+ case EOpConstructDMat3x4:
+ case EOpConstructDMat4x2:
+ case EOpConstructDMat4x3:
+ case EOpConstructDMat4x4:
+ case EOpConstructDouble:
+ basicOp = EOpConstructDouble;
+ break;
+
+ case EOpConstructF16Vec2:
+ case EOpConstructF16Vec3:
+ case EOpConstructF16Vec4:
+ case EOpConstructF16Mat2x2:
+ case EOpConstructF16Mat2x3:
+ case EOpConstructF16Mat2x4:
+ case EOpConstructF16Mat3x2:
+ case EOpConstructF16Mat3x3:
+ case EOpConstructF16Mat3x4:
+ case EOpConstructF16Mat4x2:
+ case EOpConstructF16Mat4x3:
+ case EOpConstructF16Mat4x4:
+ case EOpConstructFloat16:
+ basicOp = EOpConstructFloat16;
+ break;
+
+ case EOpConstructI8Vec2:
+ case EOpConstructI8Vec3:
+ case EOpConstructI8Vec4:
+ case EOpConstructInt8:
+ basicOp = EOpConstructInt8;
+ break;
+
+ case EOpConstructU8Vec2:
+ case EOpConstructU8Vec3:
+ case EOpConstructU8Vec4:
+ case EOpConstructUint8:
+ basicOp = EOpConstructUint8;
+ break;
+
+ case EOpConstructI16Vec2:
+ case EOpConstructI16Vec3:
+ case EOpConstructI16Vec4:
+ case EOpConstructInt16:
+ basicOp = EOpConstructInt16;
+ break;
+
+ case EOpConstructU16Vec2:
+ case EOpConstructU16Vec3:
+ case EOpConstructU16Vec4:
+ case EOpConstructUint16:
+ basicOp = EOpConstructUint16;
+ break;
+
+ case EOpConstructIVec2:
+ case EOpConstructIVec3:
+ case EOpConstructIVec4:
+ case EOpConstructInt:
+ basicOp = EOpConstructInt;
+ break;
+
+ case EOpConstructUVec2:
+ case EOpConstructUVec3:
+ case EOpConstructUVec4:
+ case EOpConstructUint:
+ basicOp = EOpConstructUint;
+ break;
+
+ case EOpConstructI64Vec2:
+ case EOpConstructI64Vec3:
+ case EOpConstructI64Vec4:
+ case EOpConstructInt64:
+ basicOp = EOpConstructInt64;
+ break;
+
+ case EOpConstructUint64:
+ if (type.isScalar() && node->getType().getBasicType() == EbtReference) {
+ TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUint64, true, node, type);
+ return newNode;
+ }
+ // fall through
+ case EOpConstructU64Vec2:
+ case EOpConstructU64Vec3:
+ case EOpConstructU64Vec4:
+ basicOp = EOpConstructUint64;
+ break;
+
+ case EOpConstructBVec2:
+ case EOpConstructBVec3:
+ case EOpConstructBVec4:
+ case EOpConstructBool:
+ basicOp = EOpConstructBool;
+ break;
+
+ case EOpConstructNonuniform:
+ // Make a nonuniform copy of node
+ newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpCopyObject, true, node, node->getType());
+ newNode->getWritableType().getQualifier().nonUniform = true;
+ return newNode;
+
+ case EOpConstructReference:
+ // construct reference from reference
+ if (node->getType().getBasicType() == EbtReference) {
+ newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConstructReference, true, node, type);
+ return newNode;
+ // construct reference from uint64
+ } else if (node->getType().isScalar() && node->getType().getBasicType() == EbtUint64) {
+ TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToPtr, true, node, type);
+ return newNode;
+ } else {
+ return nullptr;
+ }
+
+ case EOpConstructCooperativeMatrix:
+ if (!node->getType().isCoopMat()) {
+ if (type.getBasicType() != node->getType().getBasicType()) {
+ node = intermediate.addConversion(type.getBasicType(), node);
+ }
+ node = intermediate.setAggregateOperator(node, EOpConstructCooperativeMatrix, type, node->getLoc());
+ } else {
+ switch (type.getBasicType()) {
+ default:
+ assert(0);
+ break;
+ case EbtFloat:
+ assert(node->getType().getBasicType() == EbtFloat16);
+ node = intermediate.addUnaryNode(EOpConvFloat16ToFloat, node, node->getLoc(), type);
+ break;
+ case EbtFloat16:
+ assert(node->getType().getBasicType() == EbtFloat);
+ node = intermediate.addUnaryNode(EOpConvFloatToFloat16, node, node->getLoc(), type);
+ break;
+ }
+ // If it's a (non-specialization) constant, it must be folded.
+ if (node->getAsUnaryNode()->getOperand()->getAsConstantUnion())
+ return node->getAsUnaryNode()->getOperand()->getAsConstantUnion()->fold(op, node->getType());
+ }
+
+ return node;
+
+ default:
+ error(loc, "unsupported construction", "", "");
+
+ return nullptr;
+ }
+ newNode = intermediate.addUnaryMath(basicOp, node, node->getLoc());
+ if (newNode == nullptr) {
+ error(loc, "can't convert", "constructor", "");
+ return nullptr;
+ }
+
+ //
+ // Now, if there still isn't an operation to do the construction, and we need one, add one.
+ //
+
+ // Otherwise, skip out early.
+ if (subset || (newNode != node && newNode->getType() == type))
+ return newNode;
+
+ // setAggregateOperator will insert a new node for the constructor, as needed.
+ return intermediate.setAggregateOperator(newNode, op, type, loc);
+}
+
+// This function tests for the type of the parameters to the structure or array constructor. Raises
+// an error message if the expected type does not match the parameter passed to the constructor.
+//
+// Returns nullptr for an error or the input node itself if the expected and the given parameter types match.
+//
+TIntermTyped* TParseContext::constructAggregate(TIntermNode* node, const TType& type, int paramCount, const TSourceLoc& loc)
+{
+ TIntermTyped* converted = intermediate.addConversion(EOpConstructStruct, type, node->getAsTyped());
+ if (! converted || converted->getType() != type) {
+ error(loc, "", "constructor", "cannot convert parameter %d from '%s' to '%s'", paramCount,
+ node->getAsTyped()->getType().getCompleteString().c_str(), type.getCompleteString().c_str());
+
+ return nullptr;
+ }
+
+ return converted;
+}
+
+//
+// Do everything needed to add an interface block.
+//
+void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, const TString* instanceName,
+ TArraySizes* arraySizes)
+{
+ blockStageIoCheck(loc, currentBlockQualifier);
+ blockQualifierCheck(loc, currentBlockQualifier, instanceName != nullptr);
+ if (arraySizes != nullptr) {
+ arraySizesCheck(loc, currentBlockQualifier, arraySizes, nullptr, false);
+ arrayOfArrayVersionCheck(loc, arraySizes);
+ if (arraySizes->getNumDims() > 1)
+ requireProfile(loc, ~EEsProfile, "array-of-array of block");
+ }
+
+ // fix and check for member storage qualifiers and types that don't belong within a block
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TType& memberType = *typeList[member].type;
+ TQualifier& memberQualifier = memberType.getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ globalQualifierFixCheck(memberLoc, memberQualifier);
+ if (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal && memberQualifier.storage != currentBlockQualifier.storage)
+ error(memberLoc, "member storage qualifier cannot contradict block storage qualifier", memberType.getFieldName().c_str(), "");
+ memberQualifier.storage = currentBlockQualifier.storage;
+#ifdef NV_EXTENSIONS
+ if (currentBlockQualifier.perPrimitiveNV)
+ memberQualifier.perPrimitiveNV = currentBlockQualifier.perPrimitiveNV;
+ if (currentBlockQualifier.perViewNV)
+ memberQualifier.perViewNV = currentBlockQualifier.perViewNV;
+ if (currentBlockQualifier.perTaskNV)
+ memberQualifier.perTaskNV = currentBlockQualifier.perTaskNV;
+#endif
+ if ((currentBlockQualifier.storage == EvqUniform || currentBlockQualifier.storage == EvqBuffer) && (memberQualifier.isInterpolation() || memberQualifier.isAuxiliary()))
+ error(memberLoc, "member of uniform or buffer block cannot have an auxiliary or interpolation qualifier", memberType.getFieldName().c_str(), "");
+ if (memberType.isArray())
+ arraySizesCheck(memberLoc, currentBlockQualifier, memberType.getArraySizes(), nullptr, member == typeList.size() - 1);
+ if (memberQualifier.hasOffset()) {
+ if (spvVersion.spv == 0) {
+ requireProfile(memberLoc, ~EEsProfile, "offset on block member");
+ profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "offset on block member");
+ }
+ }
+
+ if (memberType.containsOpaque())
+ error(memberLoc, "member of block cannot be or contain a sampler, image, or atomic_uint type", typeList[member].type->getFieldName().c_str(), "");
+
+ if (memberType.containsCoopMat())
+ error(memberLoc, "member of block cannot be or contain a cooperative matrix type", typeList[member].type->getFieldName().c_str(), "");
+ }
+
+ // This might be a redeclaration of a built-in block. If so, redeclareBuiltinBlock() will
+ // do all the rest.
+ if (! symbolTable.atBuiltInLevel() && builtInName(*blockName)) {
+ redeclareBuiltinBlock(loc, typeList, *blockName, instanceName, arraySizes);
+ return;
+ }
+
+ // Not a redeclaration of a built-in; check that all names are user names.
+ reservedErrorCheck(loc, *blockName);
+ if (instanceName)
+ reservedErrorCheck(loc, *instanceName);
+ for (unsigned int member = 0; member < typeList.size(); ++member)
+ reservedErrorCheck(typeList[member].loc, typeList[member].type->getFieldName());
+
+ // Make default block qualification, and adjust the member qualifications
+
+ TQualifier defaultQualification;
+ switch (currentBlockQualifier.storage) {
+ case EvqUniform: defaultQualification = globalUniformDefaults; break;
+ case EvqBuffer: defaultQualification = globalBufferDefaults; break;
+ case EvqVaryingIn: defaultQualification = globalInputDefaults; break;
+ case EvqVaryingOut: defaultQualification = globalOutputDefaults; break;
+ default: defaultQualification.clear(); break;
+ }
+
+ // Special case for "push_constant uniform", which has a default of std430,
+ // contrary to normal uniform defaults, and can't have a default tracked for it.
+ if ((currentBlockQualifier.layoutPushConstant && !currentBlockQualifier.hasPacking())
+#ifdef NV_EXTENSIONS
+ || (currentBlockQualifier.layoutShaderRecordNV && !currentBlockQualifier.hasPacking())
+#endif
+ )
+ currentBlockQualifier.layoutPacking = ElpStd430;
+
+#ifdef NV_EXTENSIONS
+ // Special case for "taskNV in/out", which has a default of std430,
+ if (currentBlockQualifier.perTaskNV && !currentBlockQualifier.hasPacking())
+ currentBlockQualifier.layoutPacking = ElpStd430;
+#endif
+
+ // fix and check for member layout qualifiers
+
+ mergeObjectLayoutQualifiers(defaultQualification, currentBlockQualifier, true);
+
+ // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts."
+ if (currentBlockQualifier.hasAlign()) {
+ if (defaultQualification.layoutPacking != ElpStd140 &&
+ defaultQualification.layoutPacking != ElpStd430 &&
+ defaultQualification.layoutPacking != ElpScalar) {
+ error(loc, "can only be used with std140, std430, or scalar layout packing", "align", "");
+ defaultQualification.layoutAlign = -1;
+ }
+ }
+
+ bool memberWithLocation = false;
+ bool memberWithoutLocation = false;
+#ifdef NV_EXTENSIONS
+ bool memberWithPerViewQualifier = false;
+#endif
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ if (memberQualifier.hasStream()) {
+ if (defaultQualification.layoutStream != memberQualifier.layoutStream)
+ error(memberLoc, "member cannot contradict block", "stream", "");
+ }
+
+ // "This includes a block's inheritance of the
+ // current global default buffer, a block member's inheritance of the block's
+ // buffer, and the requirement that any *xfb_buffer* declared on a block
+ // member must match the buffer inherited from the block."
+ if (memberQualifier.hasXfbBuffer()) {
+ if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer)
+ error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
+ }
+
+ if (memberQualifier.hasPacking())
+ error(memberLoc, "member of block cannot have a packing layout qualifier", typeList[member].type->getFieldName().c_str(), "");
+ if (memberQualifier.hasLocation()) {
+ const char* feature = "location on block member";
+ switch (currentBlockQualifier.storage) {
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ requireProfile(memberLoc, ECoreProfile | ECompatibilityProfile | EEsProfile, feature);
+ profileRequires(memberLoc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
+ profileRequires(memberLoc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature);
+ memberWithLocation = true;
+ break;
+ default:
+ error(memberLoc, "can only use in an in/out block", feature, "");
+ break;
+ }
+ } else
+ memberWithoutLocation = true;
+
+ // "The offset qualifier can only be used on block members of blocks declared with std140 or std430 layouts."
+ // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts."
+ if (memberQualifier.hasAlign() || memberQualifier.hasOffset()) {
+ if (defaultQualification.layoutPacking != ElpStd140 &&
+ defaultQualification.layoutPacking != ElpStd430 &&
+ defaultQualification.layoutPacking != ElpScalar)
+ error(memberLoc, "can only be used with std140, std430, or scalar layout packing", "offset/align", "");
+ }
+
+#ifdef NV_EXTENSIONS
+ if (memberQualifier.isPerView()) {
+ memberWithPerViewQualifier = true;
+ }
+#endif
+
+ TQualifier newMemberQualification = defaultQualification;
+ mergeQualifiers(memberLoc, newMemberQualification, memberQualifier, false);
+ memberQualifier = newMemberQualification;
+ }
+
+ layoutMemberLocationArrayCheck(loc, memberWithLocation, arraySizes);
+
+ // Ensure that the block has an XfbBuffer assigned. This is needed
+ // because if the block has a XfbOffset assigned, then it is
+ // assumed that it has implicitly assigned the current global
+ // XfbBuffer, and because it's members need to be assigned a
+ // XfbOffset if they lack it.
+ if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) {
+ if (!currentBlockQualifier.hasXfbBuffer() && currentBlockQualifier.hasXfbOffset())
+ currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
+ }
+
+ // Process the members
+ fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation);
+ fixXfbOffsets(currentBlockQualifier, typeList);
+ fixBlockUniformOffsets(currentBlockQualifier, typeList);
+ for (unsigned int member = 0; member < typeList.size(); ++member)
+ layoutTypeCheck(typeList[member].loc, *typeList[member].type);
+
+#ifdef NV_EXTENSIONS
+ if (memberWithPerViewQualifier) {
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ resizeMeshViewDimension(typeList[member].loc, *typeList[member].type);
+ }
+ }
+#endif
+
+ // reverse merge, so that currentBlockQualifier now has all layout information
+ // (can't use defaultQualification directly, it's missing other non-layout-default-class qualifiers)
+ mergeObjectLayoutQualifiers(currentBlockQualifier, defaultQualification, true);
+
+ //
+ // Build and add the interface block as a new type named 'blockName'
+ //
+
+ TType blockType(&typeList, *blockName, currentBlockQualifier);
+ if (arraySizes != nullptr)
+ blockType.transferArraySizes(arraySizes);
+ else
+ ioArrayCheck(loc, blockType, instanceName ? *instanceName : *blockName);
+
+ if (currentBlockQualifier.layoutBufferReference) {
+
+ if (currentBlockQualifier.storage != EvqBuffer)
+ error(loc, "can only be used with buffer", "buffer_reference", "");
+
+ // Create the block reference type. If it was forward-declared, detect that
+ // as a referent struct type with no members. Replace the referent type with
+ // blockType.
+ TType blockNameType(EbtReference, blockType, *blockName);
+ TVariable* blockNameVar = new TVariable(blockName, blockNameType, true);
+ if (! symbolTable.insert(*blockNameVar)) {
+ TSymbol* existingName = symbolTable.find(*blockName);
+ if (existingName->getType().getBasicType() == EbtReference &&
+ existingName->getType().getReferentType()->getStruct() &&
+ existingName->getType().getReferentType()->getStruct()->size() == 0 &&
+ existingName->getType().getQualifier().storage == blockType.getQualifier().storage) {
+ existingName->getType().getReferentType()->deepCopy(blockType);
+ } else {
+ error(loc, "block name cannot be redefined", blockName->c_str(), "");
+ }
+ }
+ if (!instanceName) {
+ return;
+ }
+ } else {
+ //
+ // Don't make a user-defined type out of block name; that will cause an error
+ // if the same block name gets reused in a different interface.
+ //
+ // "Block names have no other use within a shader
+ // beyond interface matching; it is a compile-time error to use a block name at global scope for anything
+ // other than as a block name (e.g., use of a block name for a global variable name or function name is
+ // currently reserved)."
+ //
+ // Use the symbol table to prevent normal reuse of the block's name, as a variable entry,
+ // whose type is EbtBlock, but without all the structure; that will come from the type
+ // the instances point to.
+ //
+ TType blockNameType(EbtBlock, blockType.getQualifier().storage);
+ TVariable* blockNameVar = new TVariable(blockName, blockNameType);
+ if (! symbolTable.insert(*blockNameVar)) {
+ TSymbol* existingName = symbolTable.find(*blockName);
+ if (existingName->getType().getBasicType() == EbtBlock) {
+ if (existingName->getType().getQualifier().storage == blockType.getQualifier().storage) {
+ error(loc, "Cannot reuse block name within the same interface:", blockName->c_str(), blockType.getStorageQualifierString());
+ return;
+ }
+ } else {
+ error(loc, "block name cannot redefine a non-block name", blockName->c_str(), "");
+ return;
+ }
+ }
+ }
+
+ // Add the variable, as anonymous or named instanceName.
+ // Make an anonymous variable if no name was provided.
+ if (! instanceName)
+ instanceName = NewPoolTString("");
+
+ TVariable& variable = *new TVariable(instanceName, blockType);
+ if (! symbolTable.insert(variable)) {
+ if (*instanceName == "")
+ error(loc, "nameless block contains a member that already has a name at global scope", blockName->c_str(), "");
+ else
+ error(loc, "block instance name redefinition", variable.getName().c_str(), "");
+
+ return;
+ }
+
+ // Check for general layout qualifier errors
+ layoutObjectCheck(loc, variable);
+
+ // fix up
+ if (isIoResizeArray(blockType)) {
+ ioArraySymbolResizeList.push_back(&variable);
+ checkIoArraysConsistency(loc, true);
+ } else
+ fixIoArraySize(loc, variable.getWritableType());
+
+ // Save it in the AST for linker use.
+ trackLinkage(variable);
+}
+
+// Do all block-declaration checking regarding the combination of in/out/uniform/buffer
+// with a particular stage.
+void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ switch (qualifier.storage) {
+ case EvqUniform:
+ profileRequires(loc, EEsProfile, 300, nullptr, "uniform block");
+ profileRequires(loc, ENoProfile, 140, nullptr, "uniform block");
+ if (currentBlockQualifier.layoutPacking == ElpStd430 && ! currentBlockQualifier.layoutPushConstant)
+ requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "std430 requires the buffer storage qualifier");
+ break;
+ case EvqBuffer:
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "buffer block");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, "buffer block");
+ profileRequires(loc, EEsProfile, 310, nullptr, "buffer block");
+ break;
+ case EvqVaryingIn:
+ profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "input block");
+ // It is a compile-time error to have an input block in a vertex shader or an output block in a fragment shader
+ // "Compute shaders do not permit user-defined input variables..."
+ requireStage(loc, (EShLanguageMask)(EShLangTessControlMask|EShLangTessEvaluationMask|EShLangGeometryMask|EShLangFragmentMask
+#ifdef NV_EXTENSIONS
+ |EShLangMeshNVMask
+#endif
+ ), "input block");
+ if (language == EShLangFragment) {
+ profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "fragment input block");
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangMeshNV && ! qualifier.isTaskMemory()) {
+ error(loc, "input blocks cannot be used in a mesh shader", "out", "");
+ }
+#endif
+ break;
+ case EvqVaryingOut:
+ profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "output block");
+ requireStage(loc, (EShLanguageMask)(EShLangVertexMask|EShLangTessControlMask|EShLangTessEvaluationMask|EShLangGeometryMask
+#ifdef NV_EXTENSIONS
+ |EShLangMeshNVMask|EShLangTaskNVMask
+#endif
+ ), "output block");
+ // ES 310 can have a block before shader_io is turned on, so skip this test for built-ins
+ if (language == EShLangVertex && ! parsingBuiltins) {
+ profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "vertex output block");
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangMeshNV && qualifier.isTaskMemory()) {
+ error(loc, "can only use on input blocks in mesh shader", "taskNV", "");
+ }
+ else if (language == EShLangTaskNV && ! qualifier.isTaskMemory()) {
+ error(loc, "output blocks cannot be used in a task shader", "out", "");
+ }
+#endif
+ break;
+#ifdef NV_EXTENSIONS
+ case EvqPayloadNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangAnyHitNVMask | EShLangClosestHitNVMask | EShLangMissNVMask),
+ "rayPayloadNV block");
+ break;
+ case EvqPayloadInNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangAnyHitNVMask | EShLangClosestHitNVMask | EShLangMissNVMask),
+ "rayPayloadInNV block");
+ break;
+ case EvqHitAttrNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangIntersectNVMask | EShLangAnyHitNVMask | EShLangClosestHitNVMask), "hitAttributeNV block");
+ break;
+ case EvqCallableDataNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "callableDataNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangClosestHitNVMask | EShLangMissNVMask | EShLangCallableNVMask),
+ "callableDataNV block");
+ break;
+ case EvqCallableDataInNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangCallableNVMask), "callableDataInNV block");
+ break;
+#endif
+ default:
+ error(loc, "only uniform, buffer, in, or out blocks are supported", blockName->c_str(), "");
+ break;
+ }
+}
+
+// Do all block-declaration checking regarding its qualifiers.
+void TParseContext::blockQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier, bool /*instanceName*/)
+{
+ // The 4.5 specification says:
+ //
+ // interface-block :
+ // layout-qualifieropt interface-qualifier block-name { member-list } instance-nameopt ;
+ //
+ // interface-qualifier :
+ // in
+ // out
+ // patch in
+ // patch out
+ // uniform
+ // buffer
+ //
+ // Note however memory qualifiers aren't included, yet the specification also says
+ //
+ // "...memory qualifiers may also be used in the declaration of shader storage blocks..."
+
+ if (qualifier.isInterpolation())
+ error(loc, "cannot use interpolation qualifiers on an interface block", "flat/smooth/noperspective", "");
+ if (qualifier.centroid)
+ error(loc, "cannot use centroid qualifier on an interface block", "centroid", "");
+ if (qualifier.sample)
+ error(loc, "cannot use sample qualifier on an interface block", "sample", "");
+ if (qualifier.invariant)
+ error(loc, "cannot use invariant qualifier on an interface block", "invariant", "");
+ if (qualifier.layoutPushConstant)
+ intermediate.addPushConstantCount();
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV)
+ intermediate.addShaderRecordNVCount();
+ if (qualifier.perTaskNV)
+ intermediate.addTaskNVCount();
+#endif
+}
+
+//
+// "For a block, this process applies to the entire block, or until the first member
+// is reached that has a location layout qualifier. When a block member is declared with a location
+// qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level
+// declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
+// until the next member declared with a location qualifier. The values used for locations do not have to be
+// declared in increasing order."
+void TParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation)
+{
+ // "If a block has no block-level location layout qualifier, it is required that either all or none of its members
+ // have a location layout qualifier, or a compile-time error results."
+ if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation)
+ error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", "");
+ else {
+ if (memberWithLocation) {
+ // remove any block-level location and make it per *every* member
+ int nextLocation = 0; // by the rule above, initial value is not relevant
+ if (qualifier.hasAnyLocation()) {
+ nextLocation = qualifier.layoutLocation;
+ qualifier.layoutLocation = TQualifier::layoutLocationEnd;
+ if (qualifier.hasComponent()) {
+ // "It is a compile-time error to apply the *component* qualifier to a ... block"
+ error(loc, "cannot apply to a block", "component", "");
+ }
+ if (qualifier.hasIndex()) {
+ error(loc, "cannot apply to a block", "index", "");
+ }
+ }
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ if (! memberQualifier.hasLocation()) {
+ if (nextLocation >= (int)TQualifier::layoutLocationEnd)
+ error(memberLoc, "location is too large", "location", "");
+ memberQualifier.layoutLocation = nextLocation;
+ memberQualifier.layoutComponent = TQualifier::layoutComponentEnd;
+ }
+ nextLocation = memberQualifier.layoutLocation + intermediate.computeTypeLocationSize(
+ *typeList[member].type, language);
+ }
+ }
+ }
+}
+
+void TParseContext::fixXfbOffsets(TQualifier& qualifier, TTypeList& typeList)
+{
+ // "If a block is qualified with xfb_offset, all its
+ // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
+ // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
+ // offsets."
+
+ if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset())
+ return;
+
+ int nextOffset = qualifier.layoutXfbOffset;
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ bool contains64BitType = false;
+#ifdef AMD_EXTENSIONS
+ bool contains32BitType = false;
+ bool contains16BitType = false;
+ int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType, contains32BitType, contains16BitType);
+#else
+ int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType);
+#endif
+ // see if we need to auto-assign an offset to this member
+ if (! memberQualifier.hasXfbOffset()) {
+ // "if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8"
+ if (contains64BitType)
+ RoundToPow2(nextOffset, 8);
+#ifdef AMD_EXTENSIONS
+ else if (contains32BitType)
+ RoundToPow2(nextOffset, 4);
+ else if (contains16BitType)
+ RoundToPow2(nextOffset, 2);
+#endif
+ memberQualifier.layoutXfbOffset = nextOffset;
+ } else
+ nextOffset = memberQualifier.layoutXfbOffset;
+ nextOffset += memberSize;
+ }
+
+ // The above gave all block members an offset, so we can take it off the block now,
+ // which will avoid double counting the offset usage.
+ qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd;
+}
+
+// Calculate and save the offset of each block member, using the recursively
+// defined block offset rules and the user-provided offset and align.
+//
+// Also, compute and save the total size of the block. For the block's size, arrayness
+// is not taken into account, as each element is backed by a separate buffer.
+//
+void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typeList)
+{
+ if (!qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory())
+ return;
+ if (qualifier.layoutPacking != ElpStd140 && qualifier.layoutPacking != ElpStd430 && qualifier.layoutPacking != ElpScalar)
+ return;
+
+ int offset = 0;
+ int memberSize;
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+
+ // "When align is applied to an array, it effects only the start of the array, not the array's internal stride."
+
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = typeList[member].type->getQualifier().layoutMatrix;
+ int dummyStride;
+ int memberAlignment = intermediate.getMemberAlignment(*typeList[member].type, memberSize, dummyStride, qualifier.layoutPacking,
+ subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor : qualifier.layoutMatrix == ElmRowMajor);
+ if (memberQualifier.hasOffset()) {
+ // "The specified offset must be a multiple
+ // of the base alignment of the type of the block member it qualifies, or a compile-time error results."
+ if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment))
+ error(memberLoc, "must be a multiple of the member's alignment", "offset", "");
+
+ // GLSL: "It is a compile-time error to specify an offset that is smaller than the offset of the previous
+ // member in the block or that lies within the previous member of the block"
+ if (spvVersion.spv == 0) {
+ if (memberQualifier.layoutOffset < offset)
+ error(memberLoc, "cannot lie in previous members", "offset", "");
+
+ // "The offset qualifier forces the qualified member to start at or after the specified
+ // integral-constant expression, which will be its byte offset from the beginning of the buffer.
+ // "The actual offset of a member is computed as
+ // follows: If offset was declared, start with that offset, otherwise start with the next available offset."
+ offset = std::max(offset, memberQualifier.layoutOffset);
+ } else {
+ // TODO: Vulkan: "It is a compile-time error to have any offset, explicit or assigned,
+ // that lies within another member of the block."
+
+ offset = memberQualifier.layoutOffset;
+ }
+ }
+
+ // "The actual alignment of a member will be the greater of the specified align alignment and the standard
+ // (e.g., std140) base alignment for the member's type."
+ if (memberQualifier.hasAlign())
+ memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign);
+
+ // "If the resulting offset is not a multiple of the actual alignment,
+ // increase it to the first offset that is a multiple of
+ // the actual alignment."
+ RoundToPow2(offset, memberAlignment);
+ typeList[member].type->getQualifier().layoutOffset = offset;
+ offset += memberSize;
+ }
+}
+
+// For an identifier that is already declared, add more qualification to it.
+void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier)
+{
+ TSymbol* symbol = symbolTable.find(identifier);
+
+ // A forward declaration of a block reference looks to the grammar like adding
+ // a qualifier to an existing symbol. Detect this and create the block reference
+ // type with an empty type list, which will be filled in later in
+ // TParseContext::declareBlock.
+ if (!symbol && qualifier.layoutBufferReference) {
+ TTypeList typeList;
+ TType blockType(&typeList, identifier, qualifier);;
+ TType blockNameType(EbtReference, blockType, identifier);
+ TVariable* blockNameVar = new TVariable(&identifier, blockNameType, true);
+ if (! symbolTable.insert(*blockNameVar)) {
+ error(loc, "block name cannot redefine a non-block name", blockName->c_str(), "");
+ }
+ return;
+ }
+
+ if (! symbol) {
+ error(loc, "identifier not previously declared", identifier.c_str(), "");
+ return;
+ }
+ if (symbol->getAsFunction()) {
+ error(loc, "cannot re-qualify a function name", identifier.c_str(), "");
+ return;
+ }
+
+ if (qualifier.isAuxiliary() ||
+ qualifier.isMemory() ||
+ qualifier.isInterpolation() ||
+ qualifier.hasLayout() ||
+ qualifier.storage != EvqTemporary ||
+ qualifier.precision != EpqNone) {
+ error(loc, "cannot add storage, auxiliary, memory, interpolation, layout, or precision qualifier to an existing variable", identifier.c_str(), "");
+ return;
+ }
+
+ // For read-only built-ins, add a new symbol for holding the modified qualifier.
+ // This will bring up an entire block, if a block type has to be modified (e.g., gl_Position inside a block)
+ if (symbol->isReadOnly())
+ symbol = symbolTable.copyUp(symbol);
+
+ if (qualifier.invariant) {
+ if (intermediate.inIoAccessed(identifier))
+ error(loc, "cannot change qualification after use", "invariant", "");
+ symbol->getWritableType().getQualifier().invariant = true;
+ invariantCheck(loc, symbol->getType().getQualifier());
+ } else if (qualifier.noContraction) {
+ if (intermediate.inIoAccessed(identifier))
+ error(loc, "cannot change qualification after use", "precise", "");
+ symbol->getWritableType().getQualifier().noContraction = true;
+ } else if (qualifier.specConstant) {
+ symbol->getWritableType().getQualifier().makeSpecConstant();
+ if (qualifier.hasSpecConstantId())
+ symbol->getWritableType().getQualifier().layoutSpecConstantId = qualifier.layoutSpecConstantId;
+ } else
+ warn(loc, "unknown requalification", "", "");
+}
+
+void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, TIdentifierList& identifiers)
+{
+ for (unsigned int i = 0; i < identifiers.size(); ++i)
+ addQualifierToExisting(loc, qualifier, *identifiers[i]);
+}
+
+// Make sure 'invariant' isn't being applied to a non-allowed object.
+void TParseContext::invariantCheck(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ if (! qualifier.invariant)
+ return;
+
+ bool pipeOut = qualifier.isPipeOutput();
+ bool pipeIn = qualifier.isPipeInput();
+ if (version >= 300 || (profile != EEsProfile && version >= 420)) {
+ if (! pipeOut)
+ error(loc, "can only apply to an output", "invariant", "");
+ } else {
+ if ((language == EShLangVertex && pipeIn) || (! pipeOut && ! pipeIn))
+ error(loc, "can only apply to an output, or to an input in a non-vertex stage\n", "invariant", "");
+ }
+}
+
+//
+// Updating default qualifier for the case of a declaration with just a qualifier,
+// no type, block, or identifier.
+//
+void TParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, const TPublicType& publicType)
+{
+ if (publicType.shaderQualifiers.vertices != TQualifier::layoutNotSet) {
+#ifdef NV_EXTENSIONS
+ assert(language == EShLangTessControl || language == EShLangGeometry || language == EShLangMeshNV);
+#else
+ assert(language == EShLangTessControl || language == EShLangGeometry);
+#endif
+ const char* id = (language == EShLangTessControl) ? "vertices" : "max_vertices";
+
+ if (publicType.qualifier.storage != EvqVaryingOut)
+ error(loc, "can only apply to 'out'", id, "");
+ if (! intermediate.setVertices(publicType.shaderQualifiers.vertices))
+ error(loc, "cannot change previously set layout value", id, "");
+
+ if (language == EShLangTessControl)
+ checkIoArraysConsistency(loc);
+ }
+#ifdef NV_EXTENSIONS
+ if (publicType.shaderQualifiers.primitives != TQualifier::layoutNotSet) {
+ assert(language == EShLangMeshNV);
+ const char* id = "max_primitives";
+
+ if (publicType.qualifier.storage != EvqVaryingOut)
+ error(loc, "can only apply to 'out'", id, "");
+ if (! intermediate.setPrimitives(publicType.shaderQualifiers.primitives))
+ error(loc, "cannot change previously set layout value", id, "");
+ }
+#endif
+ if (publicType.shaderQualifiers.invocations != TQualifier::layoutNotSet) {
+ if (publicType.qualifier.storage != EvqVaryingIn)
+ error(loc, "can only apply to 'in'", "invocations", "");
+ if (! intermediate.setInvocations(publicType.shaderQualifiers.invocations))
+ error(loc, "cannot change previously set layout value", "invocations", "");
+ }
+ if (publicType.shaderQualifiers.geometry != ElgNone) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ switch (publicType.shaderQualifiers.geometry) {
+ case ElgPoints:
+ case ElgLines:
+ case ElgLinesAdjacency:
+ case ElgTriangles:
+ case ElgTrianglesAdjacency:
+ case ElgQuads:
+ case ElgIsolines:
+#ifdef NV_EXTENSIONS
+ if (language == EShLangMeshNV) {
+ error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ }
+#endif
+ if (intermediate.setInputPrimitive(publicType.shaderQualifiers.geometry)) {
+ if (language == EShLangGeometry)
+ checkIoArraysConsistency(loc);
+ } else
+ error(loc, "cannot change previously set input primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ default:
+ error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ }
+ } else if (publicType.qualifier.storage == EvqVaryingOut) {
+ switch (publicType.shaderQualifiers.geometry) {
+#ifdef NV_EXTENSIONS
+ case ElgLines:
+ case ElgTriangles:
+ if (language != EShLangMeshNV) {
+ error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ }
+#endif
+ // Fall through
+ case ElgPoints:
+ case ElgLineStrip:
+ case ElgTriangleStrip:
+ if (! intermediate.setOutputPrimitive(publicType.shaderQualifiers.geometry))
+ error(loc, "cannot change previously set output primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ default:
+ error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ }
+ } else
+ error(loc, "cannot apply to:", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), GetStorageQualifierString(publicType.qualifier.storage));
+ }
+ if (publicType.shaderQualifiers.spacing != EvsNone) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setVertexSpacing(publicType.shaderQualifiers.spacing))
+ error(loc, "cannot change previously set vertex spacing", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), "");
+ } else
+ error(loc, "can only apply to 'in'", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), "");
+ }
+ if (publicType.shaderQualifiers.order != EvoNone) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setVertexOrder(publicType.shaderQualifiers.order))
+ error(loc, "cannot change previously set vertex order", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), "");
+ } else
+ error(loc, "can only apply to 'in'", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), "");
+ }
+ if (publicType.shaderQualifiers.pointMode) {
+ if (publicType.qualifier.storage == EvqVaryingIn)
+ intermediate.setPointMode();
+ else
+ error(loc, "can only apply to 'in'", "point_mode", "");
+ }
+ for (int i = 0; i < 3; ++i) {
+ if (publicType.shaderQualifiers.localSize[i] > 1) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setLocalSize(i, publicType.shaderQualifiers.localSize[i]))
+ error(loc, "cannot change previously set size", "local_size", "");
+ else {
+ int max = 0;
+ if (language == EShLangCompute) {
+ switch (i) {
+ case 0: max = resources.maxComputeWorkGroupSizeX; break;
+ case 1: max = resources.maxComputeWorkGroupSizeY; break;
+ case 2: max = resources.maxComputeWorkGroupSizeZ; break;
+ default: break;
+ }
+ if (intermediate.getLocalSize(i) > (unsigned int)max)
+ error(loc, "too large; see gl_MaxComputeWorkGroupSize", "local_size", "");
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangMeshNV) {
+ switch (i) {
+ case 0: max = resources.maxMeshWorkGroupSizeX_NV; break;
+ case 1: max = resources.maxMeshWorkGroupSizeY_NV; break;
+ case 2: max = resources.maxMeshWorkGroupSizeZ_NV; break;
+ default: break;
+ }
+ if (intermediate.getLocalSize(i) > (unsigned int)max)
+ error(loc, "too large; see gl_MaxMeshWorkGroupSizeNV", "local_size", "");
+ }
+ else if (language == EShLangTaskNV) {
+ switch (i) {
+ case 0: max = resources.maxTaskWorkGroupSizeX_NV; break;
+ case 1: max = resources.maxTaskWorkGroupSizeY_NV; break;
+ case 2: max = resources.maxTaskWorkGroupSizeZ_NV; break;
+ default: break;
+ }
+ if (intermediate.getLocalSize(i) > (unsigned int)max)
+ error(loc, "too large; see gl_MaxTaskWorkGroupSizeNV", "local_size", "");
+ }
+#endif
+ else {
+ assert(0);
+ }
+
+ // Fix the existing constant gl_WorkGroupSize with this new information.
+ TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
+ if (workGroupSize != nullptr)
+ workGroupSize->getWritableConstArray()[i].setUConst(intermediate.getLocalSize(i));
+ }
+ } else
+ error(loc, "can only apply to 'in'", "local_size", "");
+ }
+ if (publicType.shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setLocalSizeSpecId(i, publicType.shaderQualifiers.localSizeSpecId[i]))
+ error(loc, "cannot change previously set size", "local_size", "");
+ } else
+ error(loc, "can only apply to 'in'", "local_size id", "");
+ // Set the workgroup built-in variable as a specialization constant
+ TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
+ if (workGroupSize != nullptr)
+ workGroupSize->getWritableType().getQualifier().specConstant = true;
+ }
+ }
+ if (publicType.shaderQualifiers.earlyFragmentTests) {
+ if (publicType.qualifier.storage == EvqVaryingIn)
+ intermediate.setEarlyFragmentTests();
+ else
+ error(loc, "can only apply to 'in'", "early_fragment_tests", "");
+ }
+ if (publicType.shaderQualifiers.postDepthCoverage) {
+ if (publicType.qualifier.storage == EvqVaryingIn)
+ intermediate.setPostDepthCoverage();
+ else
+ error(loc, "can only apply to 'in'", "post_coverage_coverage", "");
+ }
+ if (publicType.shaderQualifiers.blendEquation) {
+ if (publicType.qualifier.storage != EvqVaryingOut)
+ error(loc, "can only apply to 'out'", "blend equation", "");
+ }
+
+#ifdef NV_EXTENSIONS
+ if (publicType.shaderQualifiers.layoutDerivativeGroupQuads &&
+ publicType.shaderQualifiers.layoutDerivativeGroupLinear) {
+ error(loc, "cannot be both specified", "derivative_group_quadsNV and derivative_group_linearNV", "");
+ }
+
+ if (publicType.shaderQualifiers.layoutDerivativeGroupQuads) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if ((intermediate.getLocalSize(0) & 1) ||
+ (intermediate.getLocalSize(1) & 1))
+ error(loc, "requires local_size_x and local_size_y to be multiple of two", "derivative_group_quadsNV", "");
+ else
+ intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupQuads);
+ }
+ else
+ error(loc, "can only apply to 'in'", "derivative_group_quadsNV", "");
+ }
+ if (publicType.shaderQualifiers.layoutDerivativeGroupLinear) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if((intermediate.getLocalSize(0) *
+ intermediate.getLocalSize(1) *
+ intermediate.getLocalSize(2)) % 4 != 0)
+ error(loc, "requires total group size to be multiple of four", "derivative_group_linearNV", "");
+ else
+ intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupLinear);
+ }
+ else
+ error(loc, "can only apply to 'in'", "derivative_group_linearNV", "");
+ }
+ // Check mesh out array sizes, once all the necessary out qualifiers are defined.
+ if ((language == EShLangMeshNV) &&
+ (intermediate.getVertices() != TQualifier::layoutNotSet) &&
+ (intermediate.getPrimitives() != TQualifier::layoutNotSet) &&
+ (intermediate.getOutputPrimitive() != ElgNone))
+ {
+ checkIoArraysConsistency(loc);
+ }
+#endif
+ const TQualifier& qualifier = publicType.qualifier;
+
+ if (qualifier.isAuxiliary() ||
+ qualifier.isMemory() ||
+ qualifier.isInterpolation() ||
+ qualifier.precision != EpqNone)
+ error(loc, "cannot use auxiliary, memory, interpolation, or precision qualifier in a default qualifier declaration (declaration with no type)", "qualifier", "");
+ // "The offset qualifier can only be used on block members of blocks..."
+ // "The align qualifier can only be used on blocks or block members..."
+ if (qualifier.hasOffset() ||
+ qualifier.hasAlign())
+ error(loc, "cannot use offset or align qualifiers in a default qualifier declaration (declaration with no type)", "layout qualifier", "");
+
+ layoutQualifierCheck(loc, qualifier);
+
+ switch (qualifier.storage) {
+ case EvqUniform:
+ if (qualifier.hasMatrix())
+ globalUniformDefaults.layoutMatrix = qualifier.layoutMatrix;
+ if (qualifier.hasPacking())
+ globalUniformDefaults.layoutPacking = qualifier.layoutPacking;
+ break;
+ case EvqBuffer:
+ if (qualifier.hasMatrix())
+ globalBufferDefaults.layoutMatrix = qualifier.layoutMatrix;
+ if (qualifier.hasPacking())
+ globalBufferDefaults.layoutPacking = qualifier.layoutPacking;
+ break;
+ case EvqVaryingIn:
+ break;
+ case EvqVaryingOut:
+ if (qualifier.hasStream())
+ globalOutputDefaults.layoutStream = qualifier.layoutStream;
+ if (qualifier.hasXfbBuffer())
+ globalOutputDefaults.layoutXfbBuffer = qualifier.layoutXfbBuffer;
+ if (globalOutputDefaults.hasXfbBuffer() && qualifier.hasXfbStride()) {
+ if (! intermediate.setXfbBufferStride(globalOutputDefaults.layoutXfbBuffer, qualifier.layoutXfbStride))
+ error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer);
+ }
+ break;
+ default:
+ error(loc, "default qualifier requires 'uniform', 'buffer', 'in', or 'out' storage qualification", "", "");
+ return;
+ }
+
+ if (qualifier.hasBinding())
+ error(loc, "cannot declare a default, include a type or full declaration", "binding", "");
+ if (qualifier.hasAnyLocation())
+ error(loc, "cannot declare a default, use a full declaration", "location/component/index", "");
+ if (qualifier.hasXfbOffset())
+ error(loc, "cannot declare a default, use a full declaration", "xfb_offset", "");
+ if (qualifier.layoutPushConstant)
+ error(loc, "cannot declare a default, can only be used on a block", "push_constant", "");
+ if (qualifier.layoutBufferReference)
+ error(loc, "cannot declare a default, can only be used on a block", "buffer_reference", "");
+ if (qualifier.hasSpecConstantId())
+ error(loc, "cannot declare a default, can only be used on a scalar", "constant_id", "");
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV)
+ error(loc, "cannot declare a default, can only be used on a block", "shaderRecordNV", "");
+#endif
+}
+
+//
+// Take the sequence of statements that has been built up since the last case/default,
+// put it on the list of top-level nodes for the current (inner-most) switch statement,
+// and follow that by the case/default we are on now. (See switch topology comment on
+// TIntermSwitch.)
+//
+void TParseContext::wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode)
+{
+ TIntermSequence* switchSequence = switchSequenceStack.back();
+
+ if (statements) {
+ if (switchSequence->size() == 0)
+ error(statements->getLoc(), "cannot have statements before first case/default label", "switch", "");
+ statements->setOperator(EOpSequence);
+ switchSequence->push_back(statements);
+ }
+ if (branchNode) {
+ // check all previous cases for the same label (or both are 'default')
+ for (unsigned int s = 0; s < switchSequence->size(); ++s) {
+ TIntermBranch* prevBranch = (*switchSequence)[s]->getAsBranchNode();
+ if (prevBranch) {
+ TIntermTyped* prevExpression = prevBranch->getExpression();
+ TIntermTyped* newExpression = branchNode->getAsBranchNode()->getExpression();
+ if (prevExpression == nullptr && newExpression == nullptr)
+ error(branchNode->getLoc(), "duplicate label", "default", "");
+ else if (prevExpression != nullptr &&
+ newExpression != nullptr &&
+ prevExpression->getAsConstantUnion() &&
+ newExpression->getAsConstantUnion() &&
+ prevExpression->getAsConstantUnion()->getConstArray()[0].getIConst() ==
+ newExpression->getAsConstantUnion()->getConstArray()[0].getIConst())
+ error(branchNode->getLoc(), "duplicated value", "case", "");
+ }
+ }
+ switchSequence->push_back(branchNode);
+ }
+}
+
+//
+// Turn the top-level node sequence built up of wrapupSwitchSubsequence9)
+// into a switch node.
+//
+TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expression, TIntermAggregate* lastStatements)
+{
+ profileRequires(loc, EEsProfile, 300, nullptr, "switch statements");
+ profileRequires(loc, ENoProfile, 130, nullptr, "switch statements");
+
+ wrapupSwitchSubsequence(lastStatements, nullptr);
+
+ if (expression == nullptr ||
+ (expression->getBasicType() != EbtInt && expression->getBasicType() != EbtUint) ||
+ expression->getType().isArray() || expression->getType().isMatrix() || expression->getType().isVector())
+ error(loc, "condition must be a scalar integer expression", "switch", "");
+
+ // If there is nothing to do, drop the switch but still execute the expression
+ TIntermSequence* switchSequence = switchSequenceStack.back();
+ if (switchSequence->size() == 0)
+ return expression;
+
+ if (lastStatements == nullptr) {
+ // This was originally an ERRROR, because early versions of the specification said
+ // "it is an error to have no statement between a label and the end of the switch statement."
+ // The specifications were updated to remove this (being ill-defined what a "statement" was),
+ // so, this became a warning. However, 3.0 tests still check for the error.
+ if (profile == EEsProfile && version <= 300 && ! relaxedErrors())
+ error(loc, "last case/default label not followed by statements", "switch", "");
+ else
+ warn(loc, "last case/default label not followed by statements", "switch", "");
+
+ // emulate a break for error recovery
+ lastStatements = intermediate.makeAggregate(intermediate.addBranch(EOpBreak, loc));
+ lastStatements->setOperator(EOpSequence);
+ switchSequence->push_back(lastStatements);
+ }
+
+ TIntermAggregate* body = new TIntermAggregate(EOpSequence);
+ body->getSequence() = *switchSequenceStack.back();
+ body->setLoc(loc);
+
+ TIntermSwitch* switchNode = new TIntermSwitch(expression, body);
+ switchNode->setLoc(loc);
+
+ return switchNode;
+}
+
+} // end namespace glslang
+
diff --git a/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h
new file mode 100644
index 0000000000..a1ffe64dbf
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/ParseHelper.h
@@ -0,0 +1,510 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// This header defines a two-level parse-helper hierarchy, derived from
+// TParseVersions:
+// - TParseContextBase: sharable across multiple parsers
+// - TParseContext: GLSL specific helper
+//
+
+#ifndef _PARSER_HELPER_INCLUDED_
+#define _PARSER_HELPER_INCLUDED_
+
+#include <cstdarg>
+#include <functional>
+
+#include "parseVersions.h"
+#include "../Include/ShHandle.h"
+#include "SymbolTable.h"
+#include "localintermediate.h"
+#include "Scan.h"
+#include "attribute.h"
+
+namespace glslang {
+
+struct TPragma {
+ TPragma(bool o, bool d) : optimize(o), debug(d) { }
+ bool optimize;
+ bool debug;
+ TPragmaTable pragmaTable;
+};
+
+class TScanContext;
+class TPpContext;
+
+typedef std::set<int> TIdSetType;
+
+//
+// Sharable code (as well as what's in TParseVersions) across
+// parse helpers.
+//
+class TParseContextBase : public TParseVersions {
+public:
+ TParseContextBase(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, int version,
+ EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
+ TInfoSink& infoSink, bool forwardCompatible, EShMessages messages,
+ const TString* entryPoint = nullptr)
+ : TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
+ scopeMangler("::"),
+ symbolTable(symbolTable),
+ statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
+ postEntryPointReturn(false),
+ contextPragma(true, false),
+ parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr),
+ limits(resources.limits),
+ globalUniformBlock(nullptr),
+ globalUniformBinding(TQualifier::layoutBindingEnd),
+ globalUniformSet(TQualifier::layoutSetEnd)
+ {
+ if (entryPoint != nullptr)
+ sourceEntryPointName = *entryPoint;
+ }
+ virtual ~TParseContextBase() { }
+
+ virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+ virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+ virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+ virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+
+ virtual void setLimits(const TBuiltInResource&) = 0;
+
+ void checkIndex(const TSourceLoc&, const TType&, int& index);
+
+ EShLanguage getLanguage() const { return language; }
+ void setScanContext(TScanContext* c) { scanContext = c; }
+ TScanContext* getScanContext() const { return scanContext; }
+ void setPpContext(TPpContext* c) { ppContext = c; }
+ TPpContext* getPpContext() const { return ppContext; }
+
+ virtual void setLineCallback(const std::function<void(int, int, bool, int, const char*)>& func) { lineCallback = func; }
+ virtual void setExtensionCallback(const std::function<void(int, const char*, const char*)>& func) { extensionCallback = func; }
+ virtual void setVersionCallback(const std::function<void(int, int, const char*)>& func) { versionCallback = func; }
+ virtual void setPragmaCallback(const std::function<void(int, const TVector<TString>&)>& func) { pragmaCallback = func; }
+ virtual void setErrorCallback(const std::function<void(int, const char*)>& func) { errorCallback = func; }
+
+ virtual void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) = 0;
+ virtual bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) = 0;
+ virtual bool lineDirectiveShouldSetNextLine() const = 0;
+ virtual void handlePragma(const TSourceLoc&, const TVector<TString>&) = 0;
+
+ virtual bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) = 0;
+
+ virtual void notifyVersion(int line, int version, const char* type_string)
+ {
+ if (versionCallback)
+ versionCallback(line, version, type_string);
+ }
+ virtual void notifyErrorDirective(int line, const char* error_message)
+ {
+ if (errorCallback)
+ errorCallback(line, error_message);
+ }
+ virtual void notifyLineDirective(int curLineNo, int newLineNo, bool hasSource, int sourceNum, const char* sourceName)
+ {
+ if (lineCallback)
+ lineCallback(curLineNo, newLineNo, hasSource, sourceNum, sourceName);
+ }
+ virtual void notifyExtensionDirective(int line, const char* extension, const char* behavior)
+ {
+ if (extensionCallback)
+ extensionCallback(line, extension, behavior);
+ }
+
+ // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
+ virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
+
+ // Potentially rename shader entry point function
+ void renameShaderFunction(TString*& name) const
+ {
+ // Replace the entry point name given in the shader with the real entry point name,
+ // if there is a substitution.
+ if (name != nullptr && *name == sourceEntryPointName && intermediate.getEntryPointName().size() > 0)
+ name = NewPoolTString(intermediate.getEntryPointName().c_str());
+ }
+
+ virtual bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
+ virtual void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
+
+ const char* const scopeMangler;
+
+ // Basic parsing state, easily accessible to the grammar
+
+ TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile
+ int statementNestingLevel; // 0 if outside all flow control or compound statements
+ int loopNestingLevel; // 0 if outside all loops
+ int structNestingLevel; // 0 if outside blocks and structures
+ int controlFlowNestingLevel; // 0 if outside all flow control
+ const TType* currentFunctionType; // the return type of the function that's currently being parsed
+ bool functionReturnsValue; // true if a non-void function has a return
+ // if inside a function, true if the function is the entry point and this is after a return statement
+ bool postEntryPointReturn;
+ // case, node, case, case, node, ...; ensure only one node between cases; stack of them for nesting
+ TList<TIntermSequence*> switchSequenceStack;
+ // the statementNestingLevel the current switch statement is at, which must match the level of its case statements
+ TList<int> switchLevel;
+ struct TPragma contextPragma;
+
+protected:
+ TParseContextBase(TParseContextBase&);
+ TParseContextBase& operator=(TParseContextBase&);
+
+ const bool parsingBuiltins; // true if parsing built-in symbols/functions
+ TVector<TSymbol*> linkageSymbols; // will be transferred to 'linkage', after all editing is done, order preserving
+ TScanContext* scanContext;
+ TPpContext* ppContext;
+ TBuiltInResource resources;
+ TLimits& limits;
+ TString sourceEntryPointName;
+
+ // These, if set, will be called when a line, pragma ... is preprocessed.
+ // They will be called with any parameters to the original directive.
+ std::function<void(int, int, bool, int, const char*)> lineCallback;
+ std::function<void(int, const TVector<TString>&)> pragmaCallback;
+ std::function<void(int, int, const char*)> versionCallback;
+ std::function<void(int, const char*, const char*)> extensionCallback;
+ std::function<void(int, const char*)> errorCallback;
+
+ // see implementation for detail
+ const TFunction* selectFunction(const TVector<const TFunction*>, const TFunction&,
+ std::function<bool(const TType&, const TType&, TOperator, int arg)>,
+ std::function<bool(const TType&, const TType&, const TType&)>,
+ /* output */ bool& tie);
+
+ virtual void parseSwizzleSelector(const TSourceLoc&, const TString&, int size,
+ TSwizzleSelectors<TVectorSelector>&);
+
+ // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
+ TVariable* globalUniformBlock; // the actual block, inserted into the symbol table
+ unsigned int globalUniformBinding; // the block's binding number
+ unsigned int globalUniformSet; // the block's set number
+ int firstNewMember; // the index of the first member not yet inserted into the symbol table
+ // override this to set the language-specific name
+ virtual const char* getGlobalUniformBlockName() const { return ""; }
+ virtual void setUniformBlockDefaults(TType&) const { }
+ virtual void finalizeGlobalUniformBlockLayout(TVariable&) { }
+ virtual void outputMessage(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, TPrefixType prefix,
+ va_list args);
+ virtual void trackLinkage(TSymbol& symbol);
+ virtual void makeEditable(TSymbol*&);
+ virtual TVariable* getEditableVariable(const char* name);
+ virtual void finish();
+};
+
+//
+// Manage the state for when to respect precision qualifiers and when to warn about
+// the defaults being different than might be expected.
+//
+class TPrecisionManager {
+public:
+ TPrecisionManager() : obey(false), warn(false), explicitIntDefault(false), explicitFloatDefault(false){ }
+ virtual ~TPrecisionManager() {}
+
+ void respectPrecisionQualifiers() { obey = true; }
+ bool respectingPrecisionQualifiers() const { return obey; }
+ bool shouldWarnAboutDefaults() const { return warn; }
+ void defaultWarningGiven() { warn = false; }
+ void warnAboutDefaults() { warn = true; }
+ void explicitIntDefaultSeen()
+ {
+ explicitIntDefault = true;
+ if (explicitFloatDefault)
+ warn = false;
+ }
+ void explicitFloatDefaultSeen()
+ {
+ explicitFloatDefault = true;
+ if (explicitIntDefault)
+ warn = false;
+ }
+
+protected:
+ bool obey; // respect precision qualifiers
+ bool warn; // need to give a warning about the defaults
+ bool explicitIntDefault; // user set the default for int/uint
+ bool explicitFloatDefault; // user set the default for float
+};
+
+//
+// GLSL-specific parse helper. Should have GLSL in the name, but that's
+// too big of a change for comparing branches at the moment, and perhaps
+// impacts downstream consumers as well.
+//
+class TParseContext : public TParseContextBase {
+public:
+ TParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins, int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&,
+ bool forwardCompatible = false, EShMessages messages = EShMsgDefault,
+ const TString* entryPoint = nullptr);
+ virtual ~TParseContext();
+
+ bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); };
+ void setPrecisionDefaults();
+
+ void setLimits(const TBuiltInResource&) override;
+ bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override;
+ void parserError(const char* s); // for bison's yyerror
+
+ void reservedErrorCheck(const TSourceLoc&, const TString&);
+ void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) override;
+ bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) override;
+ bool lineDirectiveShouldSetNextLine() const override;
+ bool builtInName(const TString&);
+
+ void handlePragma(const TSourceLoc&, const TVector<TString>&) override;
+ TIntermTyped* handleVariable(const TSourceLoc&, TSymbol* symbol, const TString* string);
+ TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
+ void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
+
+ void makeEditable(TSymbol*&) override;
+ bool isIoResizeArray(const TType&) const;
+ void fixIoArraySize(const TSourceLoc&, TType&);
+ void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
+ void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
+ void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
+ int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const;
+ void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&);
+
+ TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
+ TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
+ TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
+ void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, int member, const TString& memberName);
+ TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
+ TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&);
+ TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*);
+ TIntermTyped* handleBuiltInFunctionCall(TSourceLoc, TIntermNode* arguments, const TFunction& function);
+ void computeBuiltinPrecisions(TIntermTyped&, const TFunction&);
+ TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*);
+ void checkLocation(const TSourceLoc&, TOperator);
+ TIntermTyped* handleLengthMethod(const TSourceLoc&, TFunction*, TIntermNode*);
+ void addInputArgumentConversions(const TFunction&, TIntermNode*&) const;
+ TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermAggregate&) const;
+ void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&);
+ void nonOpBuiltInCheck(const TSourceLoc&, const TFunction&, TIntermAggregate&);
+ void userFunctionCallCheck(const TSourceLoc&, TIntermAggregate&);
+ void samplerConstructorLocationCheck(const TSourceLoc&, const char* token, TIntermNode*);
+ TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&);
+ void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier);
+ void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier);
+ void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode);
+
+ void assignError(const TSourceLoc&, const char* op, TString left, TString right);
+ void unaryOpError(const TSourceLoc&, const char* op, TString operand);
+ void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right);
+ void variableCheck(TIntermTyped*& nodePtr);
+ bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
+ void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
+ void constantValueCheck(TIntermTyped* node, const char* token);
+ void integerCheck(const TIntermTyped* node, const char* token);
+ void globalCheck(const TSourceLoc&, const char* token);
+ bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
+ bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
+ void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&, const char *sizeType);
+ bool arrayQualifierError(const TSourceLoc&, const TQualifier&);
+ bool arrayError(const TSourceLoc&, const TType&);
+ void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
+ void structArrayCheck(const TSourceLoc&, const TType& structure);
+ void arraySizesCheck(const TSourceLoc&, const TQualifier&, TArraySizes*, const TIntermTyped* initializer, bool lastMember);
+ void arrayOfArrayVersionCheck(const TSourceLoc&, const TArraySizes*);
+ bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType);
+ void boolCheck(const TSourceLoc&, const TIntermTyped*);
+ void boolCheck(const TSourceLoc&, const TPublicType&);
+ void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer);
+ void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier);
+ void accStructNVCheck(const TSourceLoc & loc, const TType & type, const TString & identifier);
+ void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier);
+ void memberQualifierCheck(glslang::TPublicType&);
+ void globalQualifierFixCheck(const TSourceLoc&, TQualifier&);
+ void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&);
+ bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
+ void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force);
+ void setDefaultPrecision(const TSourceLoc&, TPublicType&, TPrecisionQualifier);
+ int computeSamplerTypeIndex(TSampler&);
+ TPrecisionQualifier getDefaultPrecision(TPublicType&);
+ void precisionQualifierCheck(const TSourceLoc&, TBasicType, TQualifier&);
+ void parameterTypeCheck(const TSourceLoc&, TStorageQualifier qualifier, const TType& type);
+ bool containsFieldWithBasicType(const TType& type ,TBasicType basicType);
+ TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&);
+ void redeclareBuiltinBlock(const TSourceLoc&, TTypeList& typeList, const TString& blockName, const TString* instanceName, TArraySizes* arraySizes);
+ void paramCheckFixStorage(const TSourceLoc&, const TStorageQualifier&, TType& type);
+ void paramCheckFix(const TSourceLoc&, const TQualifier&, TType& type);
+ void nestedBlockCheck(const TSourceLoc&);
+ void nestedStructCheck(const TSourceLoc&);
+ void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op);
+ void opaqueCheck(const TSourceLoc&, const TType&, const char* op);
+ void referenceCheck(const TSourceLoc&, const TType&, const char* op);
+ void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op);
+ void specializationCheck(const TSourceLoc&, const TType&, const char* op);
+ void structTypeCheck(const TSourceLoc&, TPublicType&);
+ void inductiveLoopCheck(const TSourceLoc&, TIntermNode* init, TIntermLoop* loop);
+ void arrayLimitCheck(const TSourceLoc&, const TString&, int size);
+ void limitCheck(const TSourceLoc&, int value, const char* limit, const char* feature);
+
+ void inductiveLoopBodyCheck(TIntermNode*, int loopIndexId, TSymbolTable&);
+ void constantIndexExpressionCheck(TIntermNode*);
+
+ void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&);
+ void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&, const TIntermTyped*);
+ void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly);
+ void layoutObjectCheck(const TSourceLoc&, const TSymbol&);
+ void layoutMemberLocationArrayCheck(const TSourceLoc&, bool memberWithLocation, TArraySizes* arraySizes);
+ void layoutTypeCheck(const TSourceLoc&, const TType&);
+ void layoutQualifierCheck(const TSourceLoc&, const TQualifier&);
+ void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&);
+ void fixOffset(const TSourceLoc&, TSymbol&);
+
+ const TFunction* findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ void declareTypeDefaults(const TSourceLoc&, const TPublicType&);
+ TIntermNode* declareVariable(const TSourceLoc&, TString& identifier, const TPublicType&, TArraySizes* typeArray = 0, TIntermTyped* initializer = 0);
+ TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
+ TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
+ TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
+ void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
+ void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
+ void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
+ void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
+ void fixXfbOffsets(TQualifier&, TTypeList&);
+ void fixBlockUniformOffsets(TQualifier&, TTypeList&);
+ void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
+ void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
+ void invariantCheck(const TSourceLoc&, const TQualifier&);
+ void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
+ void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
+ TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
+
+ TAttributeType attributeFromName(const TString& name) const;
+ TAttributes* makeAttributes(const TString& identifier) const;
+ TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const;
+ TAttributes* mergeAttributes(TAttributes*, TAttributes*) const;
+
+ // Determine selection control from attributes
+ void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*);
+ void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
+
+ // Determine loop control from attributes
+ void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
+
+ void resizeMeshViewDimension(const TSourceLoc&, TType&);
+
+protected:
+ void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type);
+ void inheritGlobalDefaults(TQualifier& dst) const;
+ TVariable* makeInternalVariable(const char* name, const TType&) const;
+ TVariable* declareNonArray(const TSourceLoc&, const TString& identifier, const TType&);
+ void declareArray(const TSourceLoc&, const TString& identifier, const TType&, TSymbol*&);
+ void checkRuntimeSizable(const TSourceLoc&, const TIntermTyped&);
+ bool isRuntimeLength(const TIntermTyped&) const;
+ TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
+ TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
+ void finish() override;
+
+public:
+ //
+ // Generally, bison productions, the scanner, and the PP need read/write access to these; just give them direct access
+ //
+
+ // Current state of parsing
+ bool inMain; // if inside a function, true if the function is main
+ const TString* blockName;
+ TQualifier currentBlockQualifier;
+ TPrecisionQualifier defaultPrecision[EbtNumTypes];
+ TBuiltInResource resources;
+ TLimits& limits;
+
+protected:
+ TParseContext(TParseContext&);
+ TParseContext& operator=(TParseContext&);
+
+ static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2 * 2 * 2)); // see computeSamplerTypeIndex()
+ TPrecisionQualifier defaultSamplerPrecision[maxSamplerIndex];
+ TPrecisionManager precisionManager;
+ TQualifier globalBufferDefaults;
+ TQualifier globalUniformDefaults;
+ TQualifier globalInputDefaults;
+ TQualifier globalOutputDefaults;
+ int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
+ TString currentCaller; // name of last function body entered (not valid when at global scope)
+ TIdSetType inductiveLoopIds;
+ bool anyIndexLimits;
+ TVector<TIntermTyped*> needsIndexLimitationChecking;
+
+ //
+ // Geometry shader input arrays:
+ // - array sizing is based on input primitive and/or explicit size
+ //
+ // Tessellation control output arrays:
+ // - array sizing is based on output layout(vertices=...) and/or explicit size
+ //
+ // Both:
+ // - array sizing is retroactive
+ // - built-in block redeclarations interact with this
+ //
+ // Design:
+ // - use a per-context "resize-list", a list of symbols whose array sizes
+ // can be fixed
+ //
+ // - the resize-list starts empty at beginning of user-shader compilation, it does
+ // not have built-ins in it
+ //
+ // - on built-in array use: copyUp() symbol and add it to the resize-list
+ //
+ // - on user array declaration: add it to the resize-list
+ //
+ // - on block redeclaration: copyUp() symbol and add it to the resize-list
+ // * note, that appropriately gives an error if redeclaring a block that
+ // was already used and hence already copied-up
+ //
+ // - on seeing a layout declaration that sizes the array, fix everything in the
+ // resize-list, giving errors for mismatch
+ //
+ // - on seeing an array size declaration, give errors on mismatch between it and previous
+ // array-sizing declarations
+ //
+ TVector<TSymbol*> ioArraySymbolResizeList;
+};
+
+} // end namespace glslang
+
+#endif // _PARSER_HELPER_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp b/thirdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp
new file mode 100644
index 0000000000..84c40f4e79
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp
@@ -0,0 +1,315 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "../Include/PoolAlloc.h"
+
+#include "../Include/InitializeGlobals.h"
+#include "../OSDependent/osinclude.h"
+
+namespace glslang {
+
+// Process-wide TLS index
+OS_TLSIndex PoolIndex;
+
+// Return the thread-specific current pool.
+TPoolAllocator& GetThreadPoolAllocator()
+{
+ return *static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
+}
+
+// Set the thread-specific current pool.
+void SetThreadPoolAllocator(TPoolAllocator* poolAllocator)
+{
+ OS_SetTLSValue(PoolIndex, poolAllocator);
+}
+
+// Process-wide set up of the TLS pool storage.
+bool InitializePoolIndex()
+{
+ // Allocate a TLS index.
+ if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
+ return false;
+
+ return true;
+}
+
+//
+// Implement the functionality of the TPoolAllocator class, which
+// is documented in PoolAlloc.h.
+//
+TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
+ pageSize(growthIncrement),
+ alignment(allocationAlignment),
+ freeList(nullptr),
+ inUseList(nullptr),
+ numCalls(0)
+{
+ //
+ // Don't allow page sizes we know are smaller than all common
+ // OS page sizes.
+ //
+ if (pageSize < 4*1024)
+ pageSize = 4*1024;
+
+ //
+ // A large currentPageOffset indicates a new page needs to
+ // be obtained to allocate memory.
+ //
+ currentPageOffset = pageSize;
+
+ //
+ // Adjust alignment to be at least pointer aligned and
+ // power of 2.
+ //
+ size_t minAlign = sizeof(void*);
+ alignment &= ~(minAlign - 1);
+ if (alignment < minAlign)
+ alignment = minAlign;
+ size_t a = 1;
+ while (a < alignment)
+ a <<= 1;
+ alignment = a;
+ alignmentMask = a - 1;
+
+ //
+ // Align header skip
+ //
+ headerSkip = minAlign;
+ if (headerSkip < sizeof(tHeader)) {
+ headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
+ }
+
+ push();
+}
+
+TPoolAllocator::~TPoolAllocator()
+{
+ while (inUseList) {
+ tHeader* next = inUseList->nextPage;
+ inUseList->~tHeader();
+ delete [] reinterpret_cast<char*>(inUseList);
+ inUseList = next;
+ }
+
+ //
+ // Always delete the free list memory - it can't be being
+ // (correctly) referenced, whether the pool allocator was
+ // global or not. We should not check the guard blocks
+ // here, because we did it already when the block was
+ // placed into the free list.
+ //
+ while (freeList) {
+ tHeader* next = freeList->nextPage;
+ delete [] reinterpret_cast<char*>(freeList);
+ freeList = next;
+ }
+}
+
+const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
+const unsigned char TAllocation::guardBlockEndVal = 0xfe;
+const unsigned char TAllocation::userDataFill = 0xcd;
+
+# ifdef GUARD_BLOCKS
+ const size_t TAllocation::guardBlockSize = 16;
+# else
+ const size_t TAllocation::guardBlockSize = 0;
+# endif
+
+//
+// Check a single guard block for damage
+//
+#ifdef GUARD_BLOCKS
+void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
+#else
+void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
+#endif
+{
+#ifdef GUARD_BLOCKS
+ for (size_t x = 0; x < guardBlockSize; x++) {
+ if (blockMem[x] != val) {
+ const int maxSize = 80;
+ char assertMsg[maxSize];
+
+ // We don't print the assert message. It's here just to be helpful.
+ snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
+ locText, size, data());
+ assert(0 && "PoolAlloc: Damage in guard block");
+ }
+ }
+#else
+ assert(guardBlockSize == 0);
+#endif
+}
+
+void TPoolAllocator::push()
+{
+ tAllocState state = { currentPageOffset, inUseList };
+
+ stack.push_back(state);
+
+ //
+ // Indicate there is no current page to allocate from.
+ //
+ currentPageOffset = pageSize;
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred since the last push(), or since the
+// last pop(), or since the object's creation.
+//
+// The deallocated pages are saved for future allocations.
+//
+void TPoolAllocator::pop()
+{
+ if (stack.size() < 1)
+ return;
+
+ tHeader* page = stack.back().page;
+ currentPageOffset = stack.back().offset;
+
+ while (inUseList != page) {
+ tHeader* nextInUse = inUseList->nextPage;
+ size_t pageCount = inUseList->pageCount;
+
+ // This technically ends the lifetime of the header as C++ object,
+ // but we will still control the memory and reuse it.
+ inUseList->~tHeader(); // currently, just a debug allocation checker
+
+ if (pageCount > 1) {
+ delete [] reinterpret_cast<char*>(inUseList);
+ } else {
+ inUseList->nextPage = freeList;
+ freeList = inUseList;
+ }
+ inUseList = nextInUse;
+ }
+
+ stack.pop_back();
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred.
+//
+void TPoolAllocator::popAll()
+{
+ while (stack.size() > 0)
+ pop();
+}
+
+void* TPoolAllocator::allocate(size_t numBytes)
+{
+ // If we are using guard blocks, all allocations are bracketed by
+ // them: [guardblock][allocation][guardblock]. numBytes is how
+ // much memory the caller asked for. allocationSize is the total
+ // size including guard blocks. In release build,
+ // guardBlockSize=0 and this all gets optimized away.
+ size_t allocationSize = TAllocation::allocationSize(numBytes);
+
+ //
+ // Just keep some interesting statistics.
+ //
+ ++numCalls;
+ totalBytes += numBytes;
+
+ //
+ // Do the allocation, most likely case first, for efficiency.
+ // This step could be moved to be inline sometime.
+ //
+ if (currentPageOffset + allocationSize <= pageSize) {
+ //
+ // Safe to allocate from currentPageOffset.
+ //
+ unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
+ currentPageOffset += allocationSize;
+ currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
+
+ return initializeAllocation(inUseList, memory, numBytes);
+ }
+
+ if (allocationSize + headerSkip > pageSize) {
+ //
+ // Do a multi-page allocation. Don't mix these with the others.
+ // The OS is efficient and allocating and free-ing multiple pages.
+ //
+ size_t numBytesToAlloc = allocationSize + headerSkip;
+ tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
+ if (memory == 0)
+ return 0;
+
+ // Use placement-new to initialize header
+ new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
+ inUseList = memory;
+
+ currentPageOffset = pageSize; // make next allocation come from a new page
+
+ // No guard blocks for multi-page allocations (yet)
+ return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
+ }
+
+ //
+ // Need a simple page to allocate from.
+ //
+ tHeader* memory;
+ if (freeList) {
+ memory = freeList;
+ freeList = freeList->nextPage;
+ } else {
+ memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
+ if (memory == 0)
+ return 0;
+ }
+
+ // Use placement-new to initialize header
+ new(memory) tHeader(inUseList, 1);
+ inUseList = memory;
+
+ unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
+ currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
+
+ return initializeAllocation(inUseList, ret, numBytes);
+}
+
+//
+// Check all allocations in a list for damage by calling check on each.
+//
+void TAllocation::checkAllocList() const
+{
+ for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
+ alloc->check();
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp b/thirdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp
new file mode 100644
index 0000000000..1d33bfd203
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp
@@ -0,0 +1,118 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/intermediate.h"
+#include "RemoveTree.h"
+
+namespace glslang {
+
+//
+// Code to recursively delete the intermediate tree.
+//
+struct TRemoveTraverser : TIntermTraverser {
+ TRemoveTraverser() : TIntermTraverser(false, false, true, false) {}
+
+ virtual void visitSymbol(TIntermSymbol* node)
+ {
+ delete node;
+ }
+
+ virtual bool visitBinary(TVisit /* visit*/ , TIntermBinary* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitUnary(TVisit /* visit */, TIntermUnary* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitAggregate(TVisit /* visit*/ , TIntermAggregate* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitSelection(TVisit /* visit*/ , TIntermSelection* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitSwitch(TVisit /* visit*/ , TIntermSwitch* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual void visitConstantUnion(TIntermConstantUnion* node)
+ {
+ delete node;
+ }
+
+ virtual bool visitLoop(TVisit /* visit*/ , TIntermLoop* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitBranch(TVisit /* visit*/ , TIntermBranch* node)
+ {
+ delete node;
+
+ return true;
+ }
+};
+
+//
+// Entry point.
+//
+void RemoveAllTreeNodes(TIntermNode* root)
+{
+ TRemoveTraverser it;
+
+ root->traverse(&it);
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/RemoveTree.h b/thirdparty/glslang/glslang/MachineIndependent/RemoveTree.h
new file mode 100644
index 0000000000..1ed015626b
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/RemoveTree.h
@@ -0,0 +1,41 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#pragma once
+
+namespace glslang {
+
+void RemoveAllTreeNodes(TIntermNode*);
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Scan.cpp b/thirdparty/glslang/glslang/MachineIndependent/Scan.cpp
new file mode 100644
index 0000000000..482f6ba271
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Scan.cpp
@@ -0,0 +1,1793 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// GLSL scanning, leveraging the scanning done by the preprocessor.
+//
+
+#include <cstring>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "../Include/Types.h"
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "attribute.h"
+#include "glslang_tab.cpp.h"
+#include "ScanContext.h"
+#include "Scan.h"
+
+// preprocessor includes
+#include "preprocessor/PpContext.h"
+#include "preprocessor/PpTokens.h"
+
+// Required to avoid missing prototype warnings for some compilers
+int yylex(YYSTYPE*, glslang::TParseContext&);
+
+namespace glslang {
+
+// read past any white space
+void TInputScanner::consumeWhiteSpace(bool& foundNonSpaceTab)
+{
+ int c = peek(); // don't accidentally consume anything other than whitespace
+ while (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if (c == '\r' || c == '\n')
+ foundNonSpaceTab = true;
+ get();
+ c = peek();
+ }
+}
+
+// return true if a comment was actually consumed
+bool TInputScanner::consumeComment()
+{
+ if (peek() != '/')
+ return false;
+
+ get(); // consume the '/'
+ int c = peek();
+ if (c == '/') {
+
+ // a '//' style comment
+ get(); // consume the second '/'
+ c = get();
+ do {
+ while (c != EndOfInput && c != '\\' && c != '\r' && c != '\n')
+ c = get();
+
+ if (c == EndOfInput || c == '\r' || c == '\n') {
+ while (c == '\r' || c == '\n')
+ c = get();
+
+ // we reached the end of the comment
+ break;
+ } else {
+ // it's a '\', so we need to keep going, after skipping what's escaped
+
+ // read the skipped character
+ c = get();
+
+ // if it's a two-character newline, skip both characters
+ if (c == '\r' && peek() == '\n')
+ get();
+ c = get();
+ }
+ } while (true);
+
+ // put back the last non-comment character
+ if (c != EndOfInput)
+ unget();
+
+ return true;
+ } else if (c == '*') {
+
+ // a '/*' style comment
+ get(); // consume the '*'
+ c = get();
+ do {
+ while (c != EndOfInput && c != '*')
+ c = get();
+ if (c == '*') {
+ c = get();
+ if (c == '/')
+ break; // end of comment
+ // not end of comment
+ } else // end of input
+ break;
+ } while (true);
+
+ return true;
+ } else {
+ // it's not a comment, put the '/' back
+ unget();
+
+ return false;
+ }
+}
+
+// skip whitespace, then skip a comment, rinse, repeat
+void TInputScanner::consumeWhitespaceComment(bool& foundNonSpaceTab)
+{
+ do {
+ consumeWhiteSpace(foundNonSpaceTab);
+
+ // if not starting a comment now, then done
+ int c = peek();
+ if (c != '/' || c == EndOfInput)
+ return;
+
+ // skip potential comment
+ foundNonSpaceTab = true;
+ if (! consumeComment())
+ return;
+
+ } while (true);
+}
+
+// Returns true if there was non-white space (e.g., a comment, newline) before the #version
+// or no #version was found; otherwise, returns false. There is no error case, it always
+// succeeds, but will leave version == 0 if no #version was found.
+//
+// Sets notFirstToken based on whether tokens (beyond white space and comments)
+// appeared before the #version.
+//
+// N.B. does not attempt to leave input in any particular known state. The assumption
+// is that scanning will start anew, following the rules for the chosen version/profile,
+// and with a corresponding parsing context.
+//
+bool TInputScanner::scanVersion(int& version, EProfile& profile, bool& notFirstToken)
+{
+ // This function doesn't have to get all the semantics correct,
+ // just find the #version if there is a correct one present.
+ // The preprocessor will have the responsibility of getting all the semantics right.
+
+ bool versionNotFirst = false; // means not first WRT comments and white space, nothing more
+ notFirstToken = false; // means not first WRT to real tokens
+ version = 0; // means not found
+ profile = ENoProfile;
+
+ bool foundNonSpaceTab = false;
+ bool lookingInMiddle = false;
+ int c;
+ do {
+ if (lookingInMiddle) {
+ notFirstToken = true;
+ // make forward progress by finishing off the current line plus extra new lines
+ if (peek() == '\n' || peek() == '\r') {
+ while (peek() == '\n' || peek() == '\r')
+ get();
+ } else
+ do {
+ c = get();
+ } while (c != EndOfInput && c != '\n' && c != '\r');
+ while (peek() == '\n' || peek() == '\r')
+ get();
+ if (peek() == EndOfInput)
+ return true;
+ }
+ lookingInMiddle = true;
+
+ // Nominal start, skipping the desktop allowed comments and white space, but tracking if
+ // something else was found for ES:
+ consumeWhitespaceComment(foundNonSpaceTab);
+ if (foundNonSpaceTab)
+ versionNotFirst = true;
+
+ // "#"
+ if (get() != '#') {
+ versionNotFirst = true;
+ continue;
+ }
+
+ // whitespace
+ do {
+ c = get();
+ } while (c == ' ' || c == '\t');
+
+ // "version"
+ if ( c != 'v' ||
+ get() != 'e' ||
+ get() != 'r' ||
+ get() != 's' ||
+ get() != 'i' ||
+ get() != 'o' ||
+ get() != 'n') {
+ versionNotFirst = true;
+ continue;
+ }
+
+ // whitespace
+ do {
+ c = get();
+ } while (c == ' ' || c == '\t');
+
+ // version number
+ while (c >= '0' && c <= '9') {
+ version = 10 * version + (c - '0');
+ c = get();
+ }
+ if (version == 0) {
+ versionNotFirst = true;
+ continue;
+ }
+
+ // whitespace
+ while (c == ' ' || c == '\t')
+ c = get();
+
+ // profile
+ const int maxProfileLength = 13; // not including any 0
+ char profileString[maxProfileLength];
+ int profileLength;
+ for (profileLength = 0; profileLength < maxProfileLength; ++profileLength) {
+ if (c == EndOfInput || c == ' ' || c == '\t' || c == '\n' || c == '\r')
+ break;
+ profileString[profileLength] = (char)c;
+ c = get();
+ }
+ if (c != EndOfInput && c != ' ' && c != '\t' && c != '\n' && c != '\r') {
+ versionNotFirst = true;
+ continue;
+ }
+
+ if (profileLength == 2 && strncmp(profileString, "es", profileLength) == 0)
+ profile = EEsProfile;
+ else if (profileLength == 4 && strncmp(profileString, "core", profileLength) == 0)
+ profile = ECoreProfile;
+ else if (profileLength == 13 && strncmp(profileString, "compatibility", profileLength) == 0)
+ profile = ECompatibilityProfile;
+
+ return versionNotFirst;
+ } while (true);
+}
+
+// Fill this in when doing glslang-level scanning, to hand back to the parser.
+class TParserToken {
+public:
+ explicit TParserToken(YYSTYPE& b) : sType(b) { }
+
+ YYSTYPE& sType;
+protected:
+ TParserToken(TParserToken&);
+ TParserToken& operator=(TParserToken&);
+};
+
+} // end namespace glslang
+
+// This is the function the glslang parser (i.e., bison) calls to get its next token
+int yylex(YYSTYPE* glslangTokenDesc, glslang::TParseContext& parseContext)
+{
+ glslang::TParserToken token(*glslangTokenDesc);
+
+ return parseContext.getScanContext()->tokenize(parseContext.getPpContext(), token);
+}
+
+namespace {
+
+struct str_eq
+{
+ bool operator()(const char* lhs, const char* rhs) const
+ {
+ return strcmp(lhs, rhs) == 0;
+ }
+};
+
+struct str_hash
+{
+ size_t operator()(const char* str) const
+ {
+ // djb2
+ unsigned long hash = 5381;
+ int c;
+
+ while ((c = *str++) != 0)
+ hash = ((hash << 5) + hash) + c;
+
+ return hash;
+ }
+};
+
+// A single global usable by all threads, by all versions, by all languages.
+// After a single process-level initialization, this is read only and thread safe
+std::unordered_map<const char*, int, str_hash, str_eq>* KeywordMap = nullptr;
+std::unordered_set<const char*, str_hash, str_eq>* ReservedSet = nullptr;
+
+};
+
+namespace glslang {
+
+void TScanContext::fillInKeywordMap()
+{
+ if (KeywordMap != nullptr) {
+ // this is really an error, as this should called only once per process
+ // but, the only risk is if two threads called simultaneously
+ return;
+ }
+ KeywordMap = new std::unordered_map<const char*, int, str_hash, str_eq>;
+
+ (*KeywordMap)["const"] = CONST;
+ (*KeywordMap)["uniform"] = UNIFORM;
+ (*KeywordMap)["nonuniformEXT"] = NONUNIFORM;
+ (*KeywordMap)["in"] = IN;
+ (*KeywordMap)["out"] = OUT;
+ (*KeywordMap)["inout"] = INOUT;
+ (*KeywordMap)["struct"] = STRUCT;
+ (*KeywordMap)["break"] = BREAK;
+ (*KeywordMap)["continue"] = CONTINUE;
+ (*KeywordMap)["do"] = DO;
+ (*KeywordMap)["for"] = FOR;
+ (*KeywordMap)["while"] = WHILE;
+ (*KeywordMap)["switch"] = SWITCH;
+ (*KeywordMap)["case"] = CASE;
+ (*KeywordMap)["default"] = DEFAULT;
+ (*KeywordMap)["if"] = IF;
+ (*KeywordMap)["else"] = ELSE;
+ (*KeywordMap)["discard"] = DISCARD;
+ (*KeywordMap)["return"] = RETURN;
+ (*KeywordMap)["void"] = VOID;
+ (*KeywordMap)["bool"] = BOOL;
+ (*KeywordMap)["float"] = FLOAT;
+ (*KeywordMap)["int"] = INT;
+ (*KeywordMap)["bvec2"] = BVEC2;
+ (*KeywordMap)["bvec3"] = BVEC3;
+ (*KeywordMap)["bvec4"] = BVEC4;
+ (*KeywordMap)["vec2"] = VEC2;
+ (*KeywordMap)["vec3"] = VEC3;
+ (*KeywordMap)["vec4"] = VEC4;
+ (*KeywordMap)["ivec2"] = IVEC2;
+ (*KeywordMap)["ivec3"] = IVEC3;
+ (*KeywordMap)["ivec4"] = IVEC4;
+ (*KeywordMap)["mat2"] = MAT2;
+ (*KeywordMap)["mat3"] = MAT3;
+ (*KeywordMap)["mat4"] = MAT4;
+ (*KeywordMap)["true"] = BOOLCONSTANT;
+ (*KeywordMap)["false"] = BOOLCONSTANT;
+ (*KeywordMap)["attribute"] = ATTRIBUTE;
+ (*KeywordMap)["varying"] = VARYING;
+ (*KeywordMap)["buffer"] = BUFFER;
+ (*KeywordMap)["coherent"] = COHERENT;
+ (*KeywordMap)["devicecoherent"] = DEVICECOHERENT;
+ (*KeywordMap)["queuefamilycoherent"] = QUEUEFAMILYCOHERENT;
+ (*KeywordMap)["workgroupcoherent"] = WORKGROUPCOHERENT;
+ (*KeywordMap)["subgroupcoherent"] = SUBGROUPCOHERENT;
+ (*KeywordMap)["nonprivate"] = NONPRIVATE;
+ (*KeywordMap)["restrict"] = RESTRICT;
+ (*KeywordMap)["readonly"] = READONLY;
+ (*KeywordMap)["writeonly"] = WRITEONLY;
+ (*KeywordMap)["atomic_uint"] = ATOMIC_UINT;
+ (*KeywordMap)["volatile"] = VOLATILE;
+ (*KeywordMap)["layout"] = LAYOUT;
+ (*KeywordMap)["shared"] = SHARED;
+ (*KeywordMap)["patch"] = PATCH;
+ (*KeywordMap)["sample"] = SAMPLE;
+ (*KeywordMap)["subroutine"] = SUBROUTINE;
+ (*KeywordMap)["highp"] = HIGH_PRECISION;
+ (*KeywordMap)["mediump"] = MEDIUM_PRECISION;
+ (*KeywordMap)["lowp"] = LOW_PRECISION;
+ (*KeywordMap)["precision"] = PRECISION;
+ (*KeywordMap)["mat2x2"] = MAT2X2;
+ (*KeywordMap)["mat2x3"] = MAT2X3;
+ (*KeywordMap)["mat2x4"] = MAT2X4;
+ (*KeywordMap)["mat3x2"] = MAT3X2;
+ (*KeywordMap)["mat3x3"] = MAT3X3;
+ (*KeywordMap)["mat3x4"] = MAT3X4;
+ (*KeywordMap)["mat4x2"] = MAT4X2;
+ (*KeywordMap)["mat4x3"] = MAT4X3;
+ (*KeywordMap)["mat4x4"] = MAT4X4;
+ (*KeywordMap)["dmat2"] = DMAT2;
+ (*KeywordMap)["dmat3"] = DMAT3;
+ (*KeywordMap)["dmat4"] = DMAT4;
+ (*KeywordMap)["dmat2x2"] = DMAT2X2;
+ (*KeywordMap)["dmat2x3"] = DMAT2X3;
+ (*KeywordMap)["dmat2x4"] = DMAT2X4;
+ (*KeywordMap)["dmat3x2"] = DMAT3X2;
+ (*KeywordMap)["dmat3x3"] = DMAT3X3;
+ (*KeywordMap)["dmat3x4"] = DMAT3X4;
+ (*KeywordMap)["dmat4x2"] = DMAT4X2;
+ (*KeywordMap)["dmat4x3"] = DMAT4X3;
+ (*KeywordMap)["dmat4x4"] = DMAT4X4;
+ (*KeywordMap)["image1D"] = IMAGE1D;
+ (*KeywordMap)["iimage1D"] = IIMAGE1D;
+ (*KeywordMap)["uimage1D"] = UIMAGE1D;
+ (*KeywordMap)["image2D"] = IMAGE2D;
+ (*KeywordMap)["iimage2D"] = IIMAGE2D;
+ (*KeywordMap)["uimage2D"] = UIMAGE2D;
+ (*KeywordMap)["image3D"] = IMAGE3D;
+ (*KeywordMap)["iimage3D"] = IIMAGE3D;
+ (*KeywordMap)["uimage3D"] = UIMAGE3D;
+ (*KeywordMap)["image2DRect"] = IMAGE2DRECT;
+ (*KeywordMap)["iimage2DRect"] = IIMAGE2DRECT;
+ (*KeywordMap)["uimage2DRect"] = UIMAGE2DRECT;
+ (*KeywordMap)["imageCube"] = IMAGECUBE;
+ (*KeywordMap)["iimageCube"] = IIMAGECUBE;
+ (*KeywordMap)["uimageCube"] = UIMAGECUBE;
+ (*KeywordMap)["imageBuffer"] = IMAGEBUFFER;
+ (*KeywordMap)["iimageBuffer"] = IIMAGEBUFFER;
+ (*KeywordMap)["uimageBuffer"] = UIMAGEBUFFER;
+ (*KeywordMap)["image1DArray"] = IMAGE1DARRAY;
+ (*KeywordMap)["iimage1DArray"] = IIMAGE1DARRAY;
+ (*KeywordMap)["uimage1DArray"] = UIMAGE1DARRAY;
+ (*KeywordMap)["image2DArray"] = IMAGE2DARRAY;
+ (*KeywordMap)["iimage2DArray"] = IIMAGE2DARRAY;
+ (*KeywordMap)["uimage2DArray"] = UIMAGE2DARRAY;
+ (*KeywordMap)["imageCubeArray"] = IMAGECUBEARRAY;
+ (*KeywordMap)["iimageCubeArray"] = IIMAGECUBEARRAY;
+ (*KeywordMap)["uimageCubeArray"] = UIMAGECUBEARRAY;
+ (*KeywordMap)["image2DMS"] = IMAGE2DMS;
+ (*KeywordMap)["iimage2DMS"] = IIMAGE2DMS;
+ (*KeywordMap)["uimage2DMS"] = UIMAGE2DMS;
+ (*KeywordMap)["image2DMSArray"] = IMAGE2DMSARRAY;
+ (*KeywordMap)["iimage2DMSArray"] = IIMAGE2DMSARRAY;
+ (*KeywordMap)["uimage2DMSArray"] = UIMAGE2DMSARRAY;
+ (*KeywordMap)["double"] = DOUBLE;
+ (*KeywordMap)["dvec2"] = DVEC2;
+ (*KeywordMap)["dvec3"] = DVEC3;
+ (*KeywordMap)["dvec4"] = DVEC4;
+ (*KeywordMap)["uint"] = UINT;
+ (*KeywordMap)["uvec2"] = UVEC2;
+ (*KeywordMap)["uvec3"] = UVEC3;
+ (*KeywordMap)["uvec4"] = UVEC4;
+
+ (*KeywordMap)["int64_t"] = INT64_T;
+ (*KeywordMap)["uint64_t"] = UINT64_T;
+ (*KeywordMap)["i64vec2"] = I64VEC2;
+ (*KeywordMap)["i64vec3"] = I64VEC3;
+ (*KeywordMap)["i64vec4"] = I64VEC4;
+ (*KeywordMap)["u64vec2"] = U64VEC2;
+ (*KeywordMap)["u64vec3"] = U64VEC3;
+ (*KeywordMap)["u64vec4"] = U64VEC4;
+
+ // GL_EXT_shader_explicit_arithmetic_types
+ (*KeywordMap)["int8_t"] = INT8_T;
+ (*KeywordMap)["i8vec2"] = I8VEC2;
+ (*KeywordMap)["i8vec3"] = I8VEC3;
+ (*KeywordMap)["i8vec4"] = I8VEC4;
+ (*KeywordMap)["uint8_t"] = UINT8_T;
+ (*KeywordMap)["u8vec2"] = U8VEC2;
+ (*KeywordMap)["u8vec3"] = U8VEC3;
+ (*KeywordMap)["u8vec4"] = U8VEC4;
+
+ (*KeywordMap)["int16_t"] = INT16_T;
+ (*KeywordMap)["i16vec2"] = I16VEC2;
+ (*KeywordMap)["i16vec3"] = I16VEC3;
+ (*KeywordMap)["i16vec4"] = I16VEC4;
+ (*KeywordMap)["uint16_t"] = UINT16_T;
+ (*KeywordMap)["u16vec2"] = U16VEC2;
+ (*KeywordMap)["u16vec3"] = U16VEC3;
+ (*KeywordMap)["u16vec4"] = U16VEC4;
+
+ (*KeywordMap)["int32_t"] = INT32_T;
+ (*KeywordMap)["i32vec2"] = I32VEC2;
+ (*KeywordMap)["i32vec3"] = I32VEC3;
+ (*KeywordMap)["i32vec4"] = I32VEC4;
+ (*KeywordMap)["uint32_t"] = UINT32_T;
+ (*KeywordMap)["u32vec2"] = U32VEC2;
+ (*KeywordMap)["u32vec3"] = U32VEC3;
+ (*KeywordMap)["u32vec4"] = U32VEC4;
+
+ (*KeywordMap)["float16_t"] = FLOAT16_T;
+ (*KeywordMap)["f16vec2"] = F16VEC2;
+ (*KeywordMap)["f16vec3"] = F16VEC3;
+ (*KeywordMap)["f16vec4"] = F16VEC4;
+ (*KeywordMap)["f16mat2"] = F16MAT2;
+ (*KeywordMap)["f16mat3"] = F16MAT3;
+ (*KeywordMap)["f16mat4"] = F16MAT4;
+ (*KeywordMap)["f16mat2x2"] = F16MAT2X2;
+ (*KeywordMap)["f16mat2x3"] = F16MAT2X3;
+ (*KeywordMap)["f16mat2x4"] = F16MAT2X4;
+ (*KeywordMap)["f16mat3x2"] = F16MAT3X2;
+ (*KeywordMap)["f16mat3x3"] = F16MAT3X3;
+ (*KeywordMap)["f16mat3x4"] = F16MAT3X4;
+ (*KeywordMap)["f16mat4x2"] = F16MAT4X2;
+ (*KeywordMap)["f16mat4x3"] = F16MAT4X3;
+ (*KeywordMap)["f16mat4x4"] = F16MAT4X4;
+
+ (*KeywordMap)["float32_t"] = FLOAT32_T;
+ (*KeywordMap)["f32vec2"] = F32VEC2;
+ (*KeywordMap)["f32vec3"] = F32VEC3;
+ (*KeywordMap)["f32vec4"] = F32VEC4;
+ (*KeywordMap)["f32mat2"] = F32MAT2;
+ (*KeywordMap)["f32mat3"] = F32MAT3;
+ (*KeywordMap)["f32mat4"] = F32MAT4;
+ (*KeywordMap)["f32mat2x2"] = F32MAT2X2;
+ (*KeywordMap)["f32mat2x3"] = F32MAT2X3;
+ (*KeywordMap)["f32mat2x4"] = F32MAT2X4;
+ (*KeywordMap)["f32mat3x2"] = F32MAT3X2;
+ (*KeywordMap)["f32mat3x3"] = F32MAT3X3;
+ (*KeywordMap)["f32mat3x4"] = F32MAT3X4;
+ (*KeywordMap)["f32mat4x2"] = F32MAT4X2;
+ (*KeywordMap)["f32mat4x3"] = F32MAT4X3;
+ (*KeywordMap)["f32mat4x4"] = F32MAT4X4;
+ (*KeywordMap)["float64_t"] = FLOAT64_T;
+ (*KeywordMap)["f64vec2"] = F64VEC2;
+ (*KeywordMap)["f64vec3"] = F64VEC3;
+ (*KeywordMap)["f64vec4"] = F64VEC4;
+ (*KeywordMap)["f64mat2"] = F64MAT2;
+ (*KeywordMap)["f64mat3"] = F64MAT3;
+ (*KeywordMap)["f64mat4"] = F64MAT4;
+ (*KeywordMap)["f64mat2x2"] = F64MAT2X2;
+ (*KeywordMap)["f64mat2x3"] = F64MAT2X3;
+ (*KeywordMap)["f64mat2x4"] = F64MAT2X4;
+ (*KeywordMap)["f64mat3x2"] = F64MAT3X2;
+ (*KeywordMap)["f64mat3x3"] = F64MAT3X3;
+ (*KeywordMap)["f64mat3x4"] = F64MAT3X4;
+ (*KeywordMap)["f64mat4x2"] = F64MAT4X2;
+ (*KeywordMap)["f64mat4x3"] = F64MAT4X3;
+ (*KeywordMap)["f64mat4x4"] = F64MAT4X4;
+
+ (*KeywordMap)["sampler2D"] = SAMPLER2D;
+ (*KeywordMap)["samplerCube"] = SAMPLERCUBE;
+ (*KeywordMap)["samplerCubeArray"] = SAMPLERCUBEARRAY;
+ (*KeywordMap)["samplerCubeArrayShadow"] = SAMPLERCUBEARRAYSHADOW;
+ (*KeywordMap)["isamplerCubeArray"] = ISAMPLERCUBEARRAY;
+ (*KeywordMap)["usamplerCubeArray"] = USAMPLERCUBEARRAY;
+ (*KeywordMap)["sampler1DArrayShadow"] = SAMPLER1DARRAYSHADOW;
+ (*KeywordMap)["isampler1DArray"] = ISAMPLER1DARRAY;
+ (*KeywordMap)["usampler1D"] = USAMPLER1D;
+ (*KeywordMap)["isampler1D"] = ISAMPLER1D;
+ (*KeywordMap)["usampler1DArray"] = USAMPLER1DARRAY;
+ (*KeywordMap)["samplerBuffer"] = SAMPLERBUFFER;
+ (*KeywordMap)["samplerCubeShadow"] = SAMPLERCUBESHADOW;
+ (*KeywordMap)["sampler2DArray"] = SAMPLER2DARRAY;
+ (*KeywordMap)["sampler2DArrayShadow"] = SAMPLER2DARRAYSHADOW;
+ (*KeywordMap)["isampler2D"] = ISAMPLER2D;
+ (*KeywordMap)["isampler3D"] = ISAMPLER3D;
+ (*KeywordMap)["isamplerCube"] = ISAMPLERCUBE;
+ (*KeywordMap)["isampler2DArray"] = ISAMPLER2DARRAY;
+ (*KeywordMap)["usampler2D"] = USAMPLER2D;
+ (*KeywordMap)["usampler3D"] = USAMPLER3D;
+ (*KeywordMap)["usamplerCube"] = USAMPLERCUBE;
+ (*KeywordMap)["usampler2DArray"] = USAMPLER2DARRAY;
+ (*KeywordMap)["isampler2DRect"] = ISAMPLER2DRECT;
+ (*KeywordMap)["usampler2DRect"] = USAMPLER2DRECT;
+ (*KeywordMap)["isamplerBuffer"] = ISAMPLERBUFFER;
+ (*KeywordMap)["usamplerBuffer"] = USAMPLERBUFFER;
+ (*KeywordMap)["sampler2DMS"] = SAMPLER2DMS;
+ (*KeywordMap)["isampler2DMS"] = ISAMPLER2DMS;
+ (*KeywordMap)["usampler2DMS"] = USAMPLER2DMS;
+ (*KeywordMap)["sampler2DMSArray"] = SAMPLER2DMSARRAY;
+ (*KeywordMap)["isampler2DMSArray"] = ISAMPLER2DMSARRAY;
+ (*KeywordMap)["usampler2DMSArray"] = USAMPLER2DMSARRAY;
+ (*KeywordMap)["sampler1D"] = SAMPLER1D;
+ (*KeywordMap)["sampler1DShadow"] = SAMPLER1DSHADOW;
+ (*KeywordMap)["sampler3D"] = SAMPLER3D;
+ (*KeywordMap)["sampler2DShadow"] = SAMPLER2DSHADOW;
+ (*KeywordMap)["sampler2DRect"] = SAMPLER2DRECT;
+ (*KeywordMap)["sampler2DRectShadow"] = SAMPLER2DRECTSHADOW;
+ (*KeywordMap)["sampler1DArray"] = SAMPLER1DARRAY;
+
+ (*KeywordMap)["samplerExternalOES"] = SAMPLEREXTERNALOES; // GL_OES_EGL_image_external
+
+ (*KeywordMap)["__samplerExternal2DY2YEXT"] = SAMPLEREXTERNAL2DY2YEXT; // GL_EXT_YUV_target
+
+ (*KeywordMap)["sampler"] = SAMPLER;
+ (*KeywordMap)["samplerShadow"] = SAMPLERSHADOW;
+
+ (*KeywordMap)["texture2D"] = TEXTURE2D;
+ (*KeywordMap)["textureCube"] = TEXTURECUBE;
+ (*KeywordMap)["textureCubeArray"] = TEXTURECUBEARRAY;
+ (*KeywordMap)["itextureCubeArray"] = ITEXTURECUBEARRAY;
+ (*KeywordMap)["utextureCubeArray"] = UTEXTURECUBEARRAY;
+ (*KeywordMap)["itexture1DArray"] = ITEXTURE1DARRAY;
+ (*KeywordMap)["utexture1D"] = UTEXTURE1D;
+ (*KeywordMap)["itexture1D"] = ITEXTURE1D;
+ (*KeywordMap)["utexture1DArray"] = UTEXTURE1DARRAY;
+ (*KeywordMap)["textureBuffer"] = TEXTUREBUFFER;
+ (*KeywordMap)["texture2DArray"] = TEXTURE2DARRAY;
+ (*KeywordMap)["itexture2D"] = ITEXTURE2D;
+ (*KeywordMap)["itexture3D"] = ITEXTURE3D;
+ (*KeywordMap)["itextureCube"] = ITEXTURECUBE;
+ (*KeywordMap)["itexture2DArray"] = ITEXTURE2DARRAY;
+ (*KeywordMap)["utexture2D"] = UTEXTURE2D;
+ (*KeywordMap)["utexture3D"] = UTEXTURE3D;
+ (*KeywordMap)["utextureCube"] = UTEXTURECUBE;
+ (*KeywordMap)["utexture2DArray"] = UTEXTURE2DARRAY;
+ (*KeywordMap)["itexture2DRect"] = ITEXTURE2DRECT;
+ (*KeywordMap)["utexture2DRect"] = UTEXTURE2DRECT;
+ (*KeywordMap)["itextureBuffer"] = ITEXTUREBUFFER;
+ (*KeywordMap)["utextureBuffer"] = UTEXTUREBUFFER;
+ (*KeywordMap)["texture2DMS"] = TEXTURE2DMS;
+ (*KeywordMap)["itexture2DMS"] = ITEXTURE2DMS;
+ (*KeywordMap)["utexture2DMS"] = UTEXTURE2DMS;
+ (*KeywordMap)["texture2DMSArray"] = TEXTURE2DMSARRAY;
+ (*KeywordMap)["itexture2DMSArray"] = ITEXTURE2DMSARRAY;
+ (*KeywordMap)["utexture2DMSArray"] = UTEXTURE2DMSARRAY;
+ (*KeywordMap)["texture1D"] = TEXTURE1D;
+ (*KeywordMap)["texture3D"] = TEXTURE3D;
+ (*KeywordMap)["texture2DRect"] = TEXTURE2DRECT;
+ (*KeywordMap)["texture1DArray"] = TEXTURE1DARRAY;
+
+ (*KeywordMap)["subpassInput"] = SUBPASSINPUT;
+ (*KeywordMap)["subpassInputMS"] = SUBPASSINPUTMS;
+ (*KeywordMap)["isubpassInput"] = ISUBPASSINPUT;
+ (*KeywordMap)["isubpassInputMS"] = ISUBPASSINPUTMS;
+ (*KeywordMap)["usubpassInput"] = USUBPASSINPUT;
+ (*KeywordMap)["usubpassInputMS"] = USUBPASSINPUTMS;
+
+#ifdef AMD_EXTENSIONS
+ (*KeywordMap)["f16sampler1D"] = F16SAMPLER1D;
+ (*KeywordMap)["f16sampler2D"] = F16SAMPLER2D;
+ (*KeywordMap)["f16sampler3D"] = F16SAMPLER3D;
+ (*KeywordMap)["f16sampler2DRect"] = F16SAMPLER2DRECT;
+ (*KeywordMap)["f16samplerCube"] = F16SAMPLERCUBE;
+ (*KeywordMap)["f16sampler1DArray"] = F16SAMPLER1DARRAY;
+ (*KeywordMap)["f16sampler2DArray"] = F16SAMPLER2DARRAY;
+ (*KeywordMap)["f16samplerCubeArray"] = F16SAMPLERCUBEARRAY;
+ (*KeywordMap)["f16samplerBuffer"] = F16SAMPLERBUFFER;
+ (*KeywordMap)["f16sampler2DMS"] = F16SAMPLER2DMS;
+ (*KeywordMap)["f16sampler2DMSArray"] = F16SAMPLER2DMSARRAY;
+ (*KeywordMap)["f16sampler1DShadow"] = F16SAMPLER1DSHADOW;
+ (*KeywordMap)["f16sampler2DShadow"] = F16SAMPLER2DSHADOW;
+ (*KeywordMap)["f16sampler2DRectShadow"] = F16SAMPLER2DRECTSHADOW;
+ (*KeywordMap)["f16samplerCubeShadow"] = F16SAMPLERCUBESHADOW;
+ (*KeywordMap)["f16sampler1DArrayShadow"] = F16SAMPLER1DARRAYSHADOW;
+ (*KeywordMap)["f16sampler2DArrayShadow"] = F16SAMPLER2DARRAYSHADOW;
+ (*KeywordMap)["f16samplerCubeArrayShadow"] = F16SAMPLERCUBEARRAYSHADOW;
+
+ (*KeywordMap)["f16image1D"] = F16IMAGE1D;
+ (*KeywordMap)["f16image2D"] = F16IMAGE2D;
+ (*KeywordMap)["f16image3D"] = F16IMAGE3D;
+ (*KeywordMap)["f16image2DRect"] = F16IMAGE2DRECT;
+ (*KeywordMap)["f16imageCube"] = F16IMAGECUBE;
+ (*KeywordMap)["f16image1DArray"] = F16IMAGE1DARRAY;
+ (*KeywordMap)["f16image2DArray"] = F16IMAGE2DARRAY;
+ (*KeywordMap)["f16imageCubeArray"] = F16IMAGECUBEARRAY;
+ (*KeywordMap)["f16imageBuffer"] = F16IMAGEBUFFER;
+ (*KeywordMap)["f16image2DMS"] = F16IMAGE2DMS;
+ (*KeywordMap)["f16image2DMSArray"] = F16IMAGE2DMSARRAY;
+
+ (*KeywordMap)["f16texture1D"] = F16TEXTURE1D;
+ (*KeywordMap)["f16texture2D"] = F16TEXTURE2D;
+ (*KeywordMap)["f16texture3D"] = F16TEXTURE3D;
+ (*KeywordMap)["f16texture2DRect"] = F16TEXTURE2DRECT;
+ (*KeywordMap)["f16textureCube"] = F16TEXTURECUBE;
+ (*KeywordMap)["f16texture1DArray"] = F16TEXTURE1DARRAY;
+ (*KeywordMap)["f16texture2DArray"] = F16TEXTURE2DARRAY;
+ (*KeywordMap)["f16textureCubeArray"] = F16TEXTURECUBEARRAY;
+ (*KeywordMap)["f16textureBuffer"] = F16TEXTUREBUFFER;
+ (*KeywordMap)["f16texture2DMS"] = F16TEXTURE2DMS;
+ (*KeywordMap)["f16texture2DMSArray"] = F16TEXTURE2DMSARRAY;
+
+ (*KeywordMap)["f16subpassInput"] = F16SUBPASSINPUT;
+ (*KeywordMap)["f16subpassInputMS"] = F16SUBPASSINPUTMS;
+#endif
+
+ (*KeywordMap)["noperspective"] = NOPERSPECTIVE;
+ (*KeywordMap)["smooth"] = SMOOTH;
+ (*KeywordMap)["flat"] = FLAT;
+#ifdef AMD_EXTENSIONS
+ (*KeywordMap)["__explicitInterpAMD"] = EXPLICITINTERPAMD;
+#endif
+ (*KeywordMap)["centroid"] = CENTROID;
+#ifdef NV_EXTENSIONS
+ (*KeywordMap)["pervertexNV"] = PERVERTEXNV;
+#endif
+ (*KeywordMap)["precise"] = PRECISE;
+ (*KeywordMap)["invariant"] = INVARIANT;
+ (*KeywordMap)["packed"] = PACKED;
+ (*KeywordMap)["resource"] = RESOURCE;
+ (*KeywordMap)["superp"] = SUPERP;
+
+#ifdef NV_EXTENSIONS
+ (*KeywordMap)["rayPayloadNV"] = PAYLOADNV;
+ (*KeywordMap)["rayPayloadInNV"] = PAYLOADINNV;
+ (*KeywordMap)["hitAttributeNV"] = HITATTRNV;
+ (*KeywordMap)["callableDataNV"] = CALLDATANV;
+ (*KeywordMap)["callableDataInNV"] = CALLDATAINNV;
+ (*KeywordMap)["accelerationStructureNV"] = ACCSTRUCTNV;
+ (*KeywordMap)["perprimitiveNV"] = PERPRIMITIVENV;
+ (*KeywordMap)["perviewNV"] = PERVIEWNV;
+ (*KeywordMap)["taskNV"] = PERTASKNV;
+#endif
+
+ (*KeywordMap)["fcoopmatNV"] = FCOOPMATNV;
+
+ ReservedSet = new std::unordered_set<const char*, str_hash, str_eq>;
+
+ ReservedSet->insert("common");
+ ReservedSet->insert("partition");
+ ReservedSet->insert("active");
+ ReservedSet->insert("asm");
+ ReservedSet->insert("class");
+ ReservedSet->insert("union");
+ ReservedSet->insert("enum");
+ ReservedSet->insert("typedef");
+ ReservedSet->insert("template");
+ ReservedSet->insert("this");
+ ReservedSet->insert("goto");
+ ReservedSet->insert("inline");
+ ReservedSet->insert("noinline");
+ ReservedSet->insert("public");
+ ReservedSet->insert("static");
+ ReservedSet->insert("extern");
+ ReservedSet->insert("external");
+ ReservedSet->insert("interface");
+ ReservedSet->insert("long");
+ ReservedSet->insert("short");
+ ReservedSet->insert("half");
+ ReservedSet->insert("fixed");
+ ReservedSet->insert("unsigned");
+ ReservedSet->insert("input");
+ ReservedSet->insert("output");
+ ReservedSet->insert("hvec2");
+ ReservedSet->insert("hvec3");
+ ReservedSet->insert("hvec4");
+ ReservedSet->insert("fvec2");
+ ReservedSet->insert("fvec3");
+ ReservedSet->insert("fvec4");
+ ReservedSet->insert("sampler3DRect");
+ ReservedSet->insert("filter");
+ ReservedSet->insert("sizeof");
+ ReservedSet->insert("cast");
+ ReservedSet->insert("namespace");
+ ReservedSet->insert("using");
+}
+
+void TScanContext::deleteKeywordMap()
+{
+ delete KeywordMap;
+ KeywordMap = nullptr;
+ delete ReservedSet;
+ ReservedSet = nullptr;
+}
+
+// Called by yylex to get the next token.
+// Returning 0 implies end of input.
+int TScanContext::tokenize(TPpContext* pp, TParserToken& token)
+{
+ do {
+ parserToken = &token;
+ TPpToken ppToken;
+ int token = pp->tokenize(ppToken);
+ if (token == EndOfInput)
+ return 0;
+
+ tokenText = ppToken.name;
+ loc = ppToken.loc;
+ parserToken->sType.lex.loc = loc;
+ switch (token) {
+ case ';': afterType = false; afterBuffer = false; return SEMICOLON;
+ case ',': afterType = false; return COMMA;
+ case ':': return COLON;
+ case '=': afterType = false; return EQUAL;
+ case '(': afterType = false; return LEFT_PAREN;
+ case ')': afterType = false; return RIGHT_PAREN;
+ case '.': field = true; return DOT;
+ case '!': return BANG;
+ case '-': return DASH;
+ case '~': return TILDE;
+ case '+': return PLUS;
+ case '*': return STAR;
+ case '/': return SLASH;
+ case '%': return PERCENT;
+ case '<': return LEFT_ANGLE;
+ case '>': return RIGHT_ANGLE;
+ case '|': return VERTICAL_BAR;
+ case '^': return CARET;
+ case '&': return AMPERSAND;
+ case '?': return QUESTION;
+ case '[': return LEFT_BRACKET;
+ case ']': return RIGHT_BRACKET;
+ case '{': afterStruct = false; afterBuffer = false; return LEFT_BRACE;
+ case '}': return RIGHT_BRACE;
+ case '\\':
+ parseContext.error(loc, "illegal use of escape character", "\\", "");
+ break;
+
+ case PPAtomAddAssign: return ADD_ASSIGN;
+ case PPAtomSubAssign: return SUB_ASSIGN;
+ case PPAtomMulAssign: return MUL_ASSIGN;
+ case PPAtomDivAssign: return DIV_ASSIGN;
+ case PPAtomModAssign: return MOD_ASSIGN;
+
+ case PpAtomRight: return RIGHT_OP;
+ case PpAtomLeft: return LEFT_OP;
+
+ case PpAtomRightAssign: return RIGHT_ASSIGN;
+ case PpAtomLeftAssign: return LEFT_ASSIGN;
+ case PpAtomAndAssign: return AND_ASSIGN;
+ case PpAtomOrAssign: return OR_ASSIGN;
+ case PpAtomXorAssign: return XOR_ASSIGN;
+
+ case PpAtomAnd: return AND_OP;
+ case PpAtomOr: return OR_OP;
+ case PpAtomXor: return XOR_OP;
+
+ case PpAtomEQ: return EQ_OP;
+ case PpAtomGE: return GE_OP;
+ case PpAtomNE: return NE_OP;
+ case PpAtomLE: return LE_OP;
+
+ case PpAtomDecrement: return DEC_OP;
+ case PpAtomIncrement: return INC_OP;
+
+ case PpAtomColonColon:
+ parseContext.error(loc, "not supported", "::", "");
+ break;
+
+ case PpAtomConstInt: parserToken->sType.lex.i = ppToken.ival; return INTCONSTANT;
+ case PpAtomConstUint: parserToken->sType.lex.i = ppToken.ival; return UINTCONSTANT;
+ case PpAtomConstInt16: parserToken->sType.lex.i = ppToken.ival; return INT16CONSTANT;
+ case PpAtomConstUint16: parserToken->sType.lex.i = ppToken.ival; return UINT16CONSTANT;
+ case PpAtomConstInt64: parserToken->sType.lex.i64 = ppToken.i64val; return INT64CONSTANT;
+ case PpAtomConstUint64: parserToken->sType.lex.i64 = ppToken.i64val; return UINT64CONSTANT;
+ case PpAtomConstFloat: parserToken->sType.lex.d = ppToken.dval; return FLOATCONSTANT;
+ case PpAtomConstDouble: parserToken->sType.lex.d = ppToken.dval; return DOUBLECONSTANT;
+ case PpAtomConstFloat16: parserToken->sType.lex.d = ppToken.dval; return FLOAT16CONSTANT;
+ case PpAtomIdentifier:
+ {
+ int token = tokenizeIdentifier();
+ field = false;
+ return token;
+ }
+
+ case EndOfInput: return 0;
+
+ default:
+ char buf[2];
+ buf[0] = (char)token;
+ buf[1] = 0;
+ parseContext.error(loc, "unexpected token", buf, "");
+ break;
+ }
+ } while (true);
+}
+
+int TScanContext::tokenizeIdentifier()
+{
+ if (ReservedSet->find(tokenText) != ReservedSet->end())
+ return reservedWord();
+
+ auto it = KeywordMap->find(tokenText);
+ if (it == KeywordMap->end()) {
+ // Should have an identifier of some sort
+ return identifierOrType();
+ }
+ keyword = it->second;
+
+ switch (keyword) {
+ case CONST:
+ case UNIFORM:
+ case IN:
+ case OUT:
+ case INOUT:
+ case BREAK:
+ case CONTINUE:
+ case DO:
+ case FOR:
+ case WHILE:
+ case IF:
+ case ELSE:
+ case DISCARD:
+ case RETURN:
+ case CASE:
+ return keyword;
+
+ case STRUCT:
+ afterStruct = true;
+ return keyword;
+
+ case NONUNIFORM:
+ if (parseContext.extensionTurnedOn(E_GL_EXT_nonuniform_qualifier))
+ return keyword;
+ else
+ return identifierOrType();
+
+ case SWITCH:
+ case DEFAULT:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 130))
+ reservedWord();
+ return keyword;
+
+ case VOID:
+ case BOOL:
+ case FLOAT:
+ case INT:
+ case BVEC2:
+ case BVEC3:
+ case BVEC4:
+ case VEC2:
+ case VEC3:
+ case VEC4:
+ case IVEC2:
+ case IVEC3:
+ case IVEC4:
+ case MAT2:
+ case MAT3:
+ case MAT4:
+ case SAMPLER2D:
+ case SAMPLERCUBE:
+ afterType = true;
+ return keyword;
+
+ case BOOLCONSTANT:
+ if (strcmp("true", tokenText) == 0)
+ parserToken->sType.lex.b = true;
+ else
+ parserToken->sType.lex.b = false;
+ return keyword;
+
+ case ATTRIBUTE:
+ case VARYING:
+ if (parseContext.profile == EEsProfile && parseContext.version >= 300)
+ reservedWord();
+ return keyword;
+
+ case BUFFER:
+ afterBuffer = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version < 310) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 430))
+ return identifierOrType();
+ return keyword;
+
+#ifdef NV_EXTENSIONS
+ case PAYLOADNV:
+ case PAYLOADINNV:
+ case HITATTRNV:
+ case CALLDATANV:
+ case CALLDATAINNV:
+ case ACCSTRUCTNV:
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 460
+ && parseContext.extensionTurnedOn(E_GL_NV_ray_tracing)))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case ATOMIC_UINT:
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 310) ||
+ parseContext.extensionTurnedOn(E_GL_ARB_shader_atomic_counters))
+ return keyword;
+ return es30ReservedFromGLSL(420);
+
+ case COHERENT:
+ case DEVICECOHERENT:
+ case QUEUEFAMILYCOHERENT:
+ case WORKGROUPCOHERENT:
+ case SUBGROUPCOHERENT:
+ case NONPRIVATE:
+ case RESTRICT:
+ case READONLY:
+ case WRITEONLY:
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310)
+ return keyword;
+ return es30ReservedFromGLSL(parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store) ? 130 : 420);
+
+ case VOLATILE:
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310)
+ return keyword;
+ if (! parseContext.symbolTable.atBuiltInLevel() && (parseContext.profile == EEsProfile ||
+ (parseContext.version < 420 && ! parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))))
+ reservedWord();
+ return keyword;
+
+ case LAYOUT:
+ {
+ const int numLayoutExts = 2;
+ const char* layoutExts[numLayoutExts] = { E_GL_ARB_shading_language_420pack,
+ E_GL_ARB_explicit_attrib_location };
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 140 &&
+ ! parseContext.extensionsTurnedOn(numLayoutExts, layoutExts)))
+ return identifierOrType();
+ return keyword;
+ }
+ case SHARED:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 140))
+ return identifierOrType();
+ return keyword;
+
+ case PATCH:
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile == EEsProfile &&
+ (parseContext.version >= 320 ||
+ parseContext.extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))) ||
+ (parseContext.profile != EEsProfile && parseContext.extensionTurnedOn(E_GL_ARB_tessellation_shader)))
+ return keyword;
+
+ return es30ReservedFromGLSL(400);
+
+ case SAMPLE:
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(1, &E_GL_OES_shader_multisample_interpolation))
+ return keyword;
+ return es30ReservedFromGLSL(400);
+
+ case SUBROUTINE:
+ return es30ReservedFromGLSL(400);
+
+ case HIGH_PRECISION:
+ case MEDIUM_PRECISION:
+ case LOW_PRECISION:
+ case PRECISION:
+ return precisionKeyword();
+
+ case MAT2X2:
+ case MAT2X3:
+ case MAT2X4:
+ case MAT3X2:
+ case MAT3X3:
+ case MAT3X4:
+ case MAT4X2:
+ case MAT4X3:
+ case MAT4X4:
+ return matNxM();
+
+ case DMAT2:
+ case DMAT3:
+ case DMAT4:
+ case DMAT2X2:
+ case DMAT2X3:
+ case DMAT2X4:
+ case DMAT3X2:
+ case DMAT3X3:
+ case DMAT3X4:
+ case DMAT4X2:
+ case DMAT4X3:
+ case DMAT4X4:
+ return dMat();
+
+ case IMAGE1D:
+ case IIMAGE1D:
+ case UIMAGE1D:
+ case IMAGE1DARRAY:
+ case IIMAGE1DARRAY:
+ case UIMAGE1DARRAY:
+ case IMAGE2DRECT:
+ case IIMAGE2DRECT:
+ case UIMAGE2DRECT:
+ afterType = true;
+ return firstGenerationImage(false);
+
+ case IMAGEBUFFER:
+ case IIMAGEBUFFER:
+ case UIMAGEBUFFER:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
+ return keyword;
+ return firstGenerationImage(false);
+
+ case IMAGE2D:
+ case IIMAGE2D:
+ case UIMAGE2D:
+ case IMAGE3D:
+ case IIMAGE3D:
+ case UIMAGE3D:
+ case IMAGECUBE:
+ case IIMAGECUBE:
+ case UIMAGECUBE:
+ case IMAGE2DARRAY:
+ case IIMAGE2DARRAY:
+ case UIMAGE2DARRAY:
+ afterType = true;
+ return firstGenerationImage(true);
+
+ case IMAGECUBEARRAY:
+ case IIMAGECUBEARRAY:
+ case UIMAGECUBEARRAY:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array))
+ return keyword;
+ return secondGenerationImage();
+
+ case IMAGE2DMS:
+ case IIMAGE2DMS:
+ case UIMAGE2DMS:
+ case IMAGE2DMSARRAY:
+ case IIMAGE2DMSARRAY:
+ case UIMAGE2DMSARRAY:
+ afterType = true;
+ return secondGenerationImage();
+
+ case DOUBLE:
+ case DVEC2:
+ case DVEC3:
+ case DVEC4:
+ afterType = true;
+ if (parseContext.profile == EEsProfile || parseContext.version < 400)
+ reservedWord();
+ return keyword;
+
+ case INT64_T:
+ case UINT64_T:
+ case I64VEC2:
+ case I64VEC3:
+ case I64VEC4:
+ case U64VEC2:
+ case U64VEC3:
+ case U64VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_int64) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64))))
+ return keyword;
+ return identifierOrType();
+
+ case INT8_T:
+ case UINT8_T:
+ case I8VEC2:
+ case I8VEC3:
+ case I8VEC4:
+ case U8VEC2:
+ case U8VEC3:
+ case U8VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_8bit_storage) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+
+ case INT16_T:
+ case UINT16_T:
+ case I16VEC2:
+ case I16VEC3:
+ case I16VEC4:
+ case U16VEC2:
+ case U16VEC3:
+ case U16VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (
+#ifdef AMD_EXTENSIONS
+ parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_int16) ||
+#endif
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16))))
+ return keyword;
+ return identifierOrType();
+ case INT32_T:
+ case UINT32_T:
+ case I32VEC2:
+ case I32VEC3:
+ case I32VEC4:
+ case U32VEC2:
+ case U32VEC3:
+ case U32VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+ case FLOAT32_T:
+ case F32VEC2:
+ case F32VEC3:
+ case F32VEC4:
+ case F32MAT2:
+ case F32MAT3:
+ case F32MAT4:
+ case F32MAT2X2:
+ case F32MAT2X3:
+ case F32MAT2X4:
+ case F32MAT3X2:
+ case F32MAT3X3:
+ case F32MAT3X4:
+ case F32MAT4X2:
+ case F32MAT4X3:
+ case F32MAT4X4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+
+ case FLOAT64_T:
+ case F64VEC2:
+ case F64VEC3:
+ case F64VEC4:
+ case F64MAT2:
+ case F64MAT3:
+ case F64MAT4:
+ case F64MAT2X2:
+ case F64MAT2X3:
+ case F64MAT2X4:
+ case F64MAT3X2:
+ case F64MAT3X3:
+ case F64MAT3X4:
+ case F64MAT4X2:
+ case F64MAT4X3:
+ case F64MAT4X4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+
+ case FLOAT16_T:
+ case F16VEC2:
+ case F16VEC3:
+ case F16VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (
+#ifdef AMD_EXTENSIONS
+ parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) ||
+#endif
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16))))
+ return keyword;
+
+ return identifierOrType();
+
+ case F16MAT2:
+ case F16MAT3:
+ case F16MAT4:
+ case F16MAT2X2:
+ case F16MAT2X3:
+ case F16MAT2X4:
+ case F16MAT3X2:
+ case F16MAT3X3:
+ case F16MAT3X4:
+ case F16MAT4X2:
+ case F16MAT4X3:
+ case F16MAT4X4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (
+#ifdef AMD_EXTENSIONS
+ parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) ||
+#endif
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16))))
+ return keyword;
+
+ return identifierOrType();
+
+ case SAMPLERCUBEARRAY:
+ case SAMPLERCUBEARRAYSHADOW:
+ case ISAMPLERCUBEARRAY:
+ case USAMPLERCUBEARRAY:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array))
+ return keyword;
+ if (parseContext.profile == EEsProfile || (parseContext.version < 400 && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_cube_map_array)))
+ reservedWord();
+ return keyword;
+
+ case ISAMPLER1D:
+ case ISAMPLER1DARRAY:
+ case SAMPLER1DARRAYSHADOW:
+ case USAMPLER1D:
+ case USAMPLER1DARRAY:
+ afterType = true;
+ return es30ReservedFromGLSL(130);
+
+ case UINT:
+ case UVEC2:
+ case UVEC3:
+ case UVEC4:
+ case SAMPLERCUBESHADOW:
+ case SAMPLER2DARRAY:
+ case SAMPLER2DARRAYSHADOW:
+ case ISAMPLER2D:
+ case ISAMPLER3D:
+ case ISAMPLERCUBE:
+ case ISAMPLER2DARRAY:
+ case USAMPLER2D:
+ case USAMPLER3D:
+ case USAMPLERCUBE:
+ case USAMPLER2DARRAY:
+ afterType = true;
+ return nonreservedKeyword(300, 130);
+
+ case ISAMPLER2DRECT:
+ case USAMPLER2DRECT:
+ afterType = true;
+ return es30ReservedFromGLSL(140);
+
+ case SAMPLERBUFFER:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
+ return keyword;
+ return es30ReservedFromGLSL(130);
+
+ case ISAMPLERBUFFER:
+ case USAMPLERBUFFER:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
+ return keyword;
+ return es30ReservedFromGLSL(140);
+
+ case SAMPLER2DMS:
+ case ISAMPLER2DMS:
+ case USAMPLER2DMS:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310)
+ return keyword;
+ return es30ReservedFromGLSL(150);
+
+ case SAMPLER2DMSARRAY:
+ case ISAMPLER2DMSARRAY:
+ case USAMPLER2DMSARRAY:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(1, &E_GL_OES_texture_storage_multisample_2d_array))
+ return keyword;
+ return es30ReservedFromGLSL(150);
+
+ case SAMPLER1D:
+ case SAMPLER1DSHADOW:
+ afterType = true;
+ if (parseContext.profile == EEsProfile)
+ reservedWord();
+ return keyword;
+
+ case SAMPLER3D:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version < 300) {
+ if (!parseContext.extensionTurnedOn(E_GL_OES_texture_3D))
+ reservedWord();
+ }
+ return keyword;
+
+ case SAMPLER2DSHADOW:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version < 300) {
+ if (!parseContext.extensionTurnedOn(E_GL_EXT_shadow_samplers))
+ reservedWord();
+ }
+ return keyword;
+
+ case SAMPLER2DRECT:
+ case SAMPLER2DRECTSHADOW:
+ afterType = true;
+ if (parseContext.profile == EEsProfile)
+ reservedWord();
+ else if (parseContext.version < 140 && ! parseContext.symbolTable.atBuiltInLevel() && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_rectangle)) {
+ if (parseContext.relaxedErrors())
+ parseContext.requireExtensions(loc, 1, &E_GL_ARB_texture_rectangle, "texture-rectangle sampler keyword");
+ else
+ reservedWord();
+ }
+ return keyword;
+
+ case SAMPLER1DARRAY:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version == 300)
+ reservedWord();
+ else if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 130))
+ return identifierOrType();
+ return keyword;
+
+ case SAMPLEREXTERNALOES:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external) ||
+ parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external_essl3))
+ return keyword;
+ return identifierOrType();
+
+ case SAMPLEREXTERNAL2DY2YEXT:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ parseContext.extensionTurnedOn(E_GL_EXT_YUV_target))
+ return keyword;
+ return identifierOrType();
+
+ case TEXTURE2D:
+ case TEXTURECUBE:
+ case TEXTURECUBEARRAY:
+ case ITEXTURECUBEARRAY:
+ case UTEXTURECUBEARRAY:
+ case ITEXTURE1DARRAY:
+ case UTEXTURE1D:
+ case ITEXTURE1D:
+ case UTEXTURE1DARRAY:
+ case TEXTUREBUFFER:
+ case TEXTURE2DARRAY:
+ case ITEXTURE2D:
+ case ITEXTURE3D:
+ case ITEXTURECUBE:
+ case ITEXTURE2DARRAY:
+ case UTEXTURE2D:
+ case UTEXTURE3D:
+ case UTEXTURECUBE:
+ case UTEXTURE2DARRAY:
+ case ITEXTURE2DRECT:
+ case UTEXTURE2DRECT:
+ case ITEXTUREBUFFER:
+ case UTEXTUREBUFFER:
+ case TEXTURE2DMS:
+ case ITEXTURE2DMS:
+ case UTEXTURE2DMS:
+ case TEXTURE2DMSARRAY:
+ case ITEXTURE2DMSARRAY:
+ case UTEXTURE2DMSARRAY:
+ case TEXTURE1D:
+ case TEXTURE3D:
+ case TEXTURE2DRECT:
+ case TEXTURE1DARRAY:
+ case SAMPLER:
+ case SAMPLERSHADOW:
+ if (parseContext.spvVersion.vulkan > 0)
+ return keyword;
+ else
+ return identifierOrType();
+
+ case SUBPASSINPUT:
+ case SUBPASSINPUTMS:
+ case ISUBPASSINPUT:
+ case ISUBPASSINPUTMS:
+ case USUBPASSINPUT:
+ case USUBPASSINPUTMS:
+ if (parseContext.spvVersion.vulkan > 0)
+ return keyword;
+ else
+ return identifierOrType();
+
+#ifdef AMD_EXTENSIONS
+ case F16SAMPLER1D:
+ case F16SAMPLER2D:
+ case F16SAMPLER3D:
+ case F16SAMPLER2DRECT:
+ case F16SAMPLERCUBE:
+ case F16SAMPLER1DARRAY:
+ case F16SAMPLER2DARRAY:
+ case F16SAMPLERCUBEARRAY:
+ case F16SAMPLERBUFFER:
+ case F16SAMPLER2DMS:
+ case F16SAMPLER2DMSARRAY:
+ case F16SAMPLER1DSHADOW:
+ case F16SAMPLER2DSHADOW:
+ case F16SAMPLER1DARRAYSHADOW:
+ case F16SAMPLER2DARRAYSHADOW:
+ case F16SAMPLER2DRECTSHADOW:
+ case F16SAMPLERCUBESHADOW:
+ case F16SAMPLERCUBEARRAYSHADOW:
+
+ case F16IMAGE1D:
+ case F16IMAGE2D:
+ case F16IMAGE3D:
+ case F16IMAGE2DRECT:
+ case F16IMAGECUBE:
+ case F16IMAGE1DARRAY:
+ case F16IMAGE2DARRAY:
+ case F16IMAGECUBEARRAY:
+ case F16IMAGEBUFFER:
+ case F16IMAGE2DMS:
+ case F16IMAGE2DMSARRAY:
+
+ case F16TEXTURE1D:
+ case F16TEXTURE2D:
+ case F16TEXTURE3D:
+ case F16TEXTURE2DRECT:
+ case F16TEXTURECUBE:
+ case F16TEXTURE1DARRAY:
+ case F16TEXTURE2DARRAY:
+ case F16TEXTURECUBEARRAY:
+ case F16TEXTUREBUFFER:
+ case F16TEXTURE2DMS:
+ case F16TEXTURE2DMSARRAY:
+
+ case F16SUBPASSINPUT:
+ case F16SUBPASSINPUTMS:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float_fetch) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case NOPERSPECTIVE:
+#ifdef NV_EXTENSIONS
+ if (parseContext.profile == EEsProfile && parseContext.version >= 300 &&
+ parseContext.extensionTurnedOn(E_GL_NV_shader_noperspective_interpolation))
+ return keyword;
+#endif
+ return es30ReservedFromGLSL(130);
+
+ case SMOOTH:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 130))
+ return identifierOrType();
+ return keyword;
+
+#ifdef AMD_EXTENSIONS
+ case EXPLICITINTERPAMD:
+ if (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ parseContext.extensionTurnedOn(E_GL_AMD_shader_explicit_vertex_parameter))
+ return keyword;
+ return identifierOrType();
+#endif
+
+#ifdef NV_EXTENSIONS
+ case PERVERTEXNV:
+ if (((parseContext.profile != EEsProfile && parseContext.version >= 450) ||
+ (parseContext.profile == EEsProfile && parseContext.version >= 320)) &&
+ parseContext.extensionTurnedOn(E_GL_NV_fragment_shader_barycentric))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case FLAT:
+ if (parseContext.profile == EEsProfile && parseContext.version < 300)
+ reservedWord();
+ else if (parseContext.profile != EEsProfile && parseContext.version < 130)
+ return identifierOrType();
+ return keyword;
+
+ case CENTROID:
+ if (parseContext.version < 120)
+ return identifierOrType();
+ return keyword;
+
+ case PRECISE:
+ if ((parseContext.profile == EEsProfile &&
+ (parseContext.version >= 320 || parseContext.extensionsTurnedOn(Num_AEP_gpu_shader5, AEP_gpu_shader5))) ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 400))
+ return keyword;
+ if (parseContext.profile == EEsProfile && parseContext.version == 310) {
+ reservedWord();
+ return keyword;
+ }
+ return identifierOrType();
+
+ case INVARIANT:
+ if (parseContext.profile != EEsProfile && parseContext.version < 120)
+ return identifierOrType();
+ return keyword;
+
+ case PACKED:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 330))
+ return reservedWord();
+ return identifierOrType();
+
+ case RESOURCE:
+ {
+ bool reserved = (parseContext.profile == EEsProfile && parseContext.version >= 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 420);
+ return identifierOrReserved(reserved);
+ }
+ case SUPERP:
+ {
+ bool reserved = parseContext.profile == EEsProfile || parseContext.version >= 130;
+ return identifierOrReserved(reserved);
+ }
+
+#ifdef NV_EXTENSIONS
+ case PERPRIMITIVENV:
+ case PERVIEWNV:
+ case PERTASKNV:
+ if ((parseContext.profile != EEsProfile && parseContext.version >= 450) ||
+ (parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionTurnedOn(E_GL_NV_mesh_shader))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case FCOOPMATNV:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ parseContext.extensionTurnedOn(E_GL_NV_cooperative_matrix))
+ return keyword;
+ return identifierOrType();
+
+ default:
+ parseContext.infoSink.info.message(EPrefixInternalError, "Unknown glslang keyword", loc);
+ return 0;
+ }
+}
+
+int TScanContext::identifierOrType()
+{
+ parserToken->sType.lex.string = NewPoolTString(tokenText);
+ if (field)
+ return IDENTIFIER;
+
+ parserToken->sType.lex.symbol = parseContext.symbolTable.find(*parserToken->sType.lex.string);
+ if ((afterType == false && afterStruct == false) && parserToken->sType.lex.symbol != nullptr) {
+ if (const TVariable* variable = parserToken->sType.lex.symbol->getAsVariable()) {
+ if (variable->isUserType() &&
+ // treat redeclaration of forward-declared buffer/uniform reference as an identifier
+ !(variable->getType().getBasicType() == EbtReference && afterBuffer)) {
+ afterType = true;
+
+ return TYPE_NAME;
+ }
+ }
+ }
+
+ return IDENTIFIER;
+}
+
+// Give an error for use of a reserved symbol.
+// However, allow built-in declarations to use reserved words, to allow
+// extension support before the extension is enabled.
+int TScanContext::reservedWord()
+{
+ if (! parseContext.symbolTable.atBuiltInLevel())
+ parseContext.error(loc, "Reserved word.", tokenText, "", "");
+
+ return 0;
+}
+
+int TScanContext::identifierOrReserved(bool reserved)
+{
+ if (reserved) {
+ reservedWord();
+
+ return 0;
+ }
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future reserved keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+// For keywords that suddenly showed up on non-ES (not previously reserved)
+// but then got reserved by ES 3.0.
+int TScanContext::es30ReservedFromGLSL(int version)
+{
+ if (parseContext.symbolTable.atBuiltInLevel())
+ return keyword;
+
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < version)) {
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "future reserved word in ES 300 and keyword in GLSL", tokenText, "");
+
+ return identifierOrType();
+ } else if (parseContext.profile == EEsProfile && parseContext.version >= 300)
+ reservedWord();
+
+ return keyword;
+}
+
+// For a keyword that was never reserved, until it suddenly
+// showed up, both in an es version and a non-ES version.
+int TScanContext::nonreservedKeyword(int esVersion, int nonEsVersion)
+{
+ if ((parseContext.profile == EEsProfile && parseContext.version < esVersion) ||
+ (parseContext.profile != EEsProfile && parseContext.version < nonEsVersion)) {
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future keyword", tokenText, "");
+
+ return identifierOrType();
+ }
+
+ return keyword;
+}
+
+int TScanContext::precisionKeyword()
+{
+ if (parseContext.profile == EEsProfile || parseContext.version >= 130)
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using ES precision qualifier keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::matNxM()
+{
+ afterType = true;
+
+ if (parseContext.version > 110)
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future non-square matrix type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::dMat()
+{
+ afterType = true;
+
+ if (parseContext.profile == EEsProfile && parseContext.version >= 300) {
+ reservedWord();
+
+ return keyword;
+ }
+
+ if (parseContext.profile != EEsProfile && parseContext.version >= 400)
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::firstGenerationImage(bool inEs310)
+{
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && (parseContext.version >= 420 ||
+ parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))) ||
+ (inEs310 && parseContext.profile == EEsProfile && parseContext.version >= 310))
+ return keyword;
+
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 130)) {
+ reservedWord();
+
+ return keyword;
+ }
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::secondGenerationImage()
+{
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310) {
+ reservedWord();
+ return keyword;
+ }
+
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile &&
+ (parseContext.version >= 420 || parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))))
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Scan.h b/thirdparty/glslang/glslang/MachineIndependent/Scan.h
new file mode 100644
index 0000000000..24b75cf7ca
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Scan.h
@@ -0,0 +1,276 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef _GLSLANG_SCAN_INCLUDED_
+#define _GLSLANG_SCAN_INCLUDED_
+
+#include "Versions.h"
+
+namespace glslang {
+
+// Use a global end-of-input character, so no translation is needed across
+// layers of encapsulation. Characters are all 8 bit, and positive, so there is
+// no aliasing of character 255 onto -1, for example.
+const int EndOfInput = -1;
+
+//
+// A character scanner that seamlessly, on read-only strings, reads across an
+// array of strings without assuming null termination.
+//
+class TInputScanner {
+public:
+ TInputScanner(int n, const char* const s[], size_t L[], const char* const* names = nullptr,
+ int b = 0, int f = 0, bool single = false) :
+ numSources(n),
+ // up to this point, common usage is "char*", but now we need positive 8-bit characters
+ sources(reinterpret_cast<const unsigned char* const *>(s)),
+ lengths(L), currentSource(0), currentChar(0), stringBias(b), finale(f), singleLogical(single),
+ endOfFileReached(false)
+ {
+ loc = new TSourceLoc[numSources];
+ for (int i = 0; i < numSources; ++i) {
+ loc[i].init(i - stringBias);
+ }
+ if (names != nullptr) {
+ for (int i = 0; i < numSources; ++i)
+ loc[i].name = names[i] != nullptr ? NewPoolTString(names[i]) : nullptr;
+ }
+ loc[currentSource].line = 1;
+ logicalSourceLoc.init(1);
+ logicalSourceLoc.name = loc[0].name;
+ }
+
+ virtual ~TInputScanner()
+ {
+ delete [] loc;
+ }
+
+ // retrieve the next character and advance one character
+ int get()
+ {
+ int ret = peek();
+ if (ret == EndOfInput)
+ return ret;
+ ++loc[currentSource].column;
+ ++logicalSourceLoc.column;
+ if (ret == '\n') {
+ ++loc[currentSource].line;
+ ++logicalSourceLoc.line;
+ logicalSourceLoc.column = 0;
+ loc[currentSource].column = 0;
+ }
+ advance();
+
+ return ret;
+ }
+
+ // retrieve the next character, no advance
+ int peek()
+ {
+ if (currentSource >= numSources) {
+ endOfFileReached = true;
+ return EndOfInput;
+ }
+ // Make sure we do not read off the end of a string.
+ // N.B. Sources can have a length of 0.
+ int sourceToRead = currentSource;
+ size_t charToRead = currentChar;
+ while(charToRead >= lengths[sourceToRead]) {
+ charToRead = 0;
+ sourceToRead += 1;
+ if (sourceToRead >= numSources) {
+ return EndOfInput;
+ }
+ }
+
+ // Here, we care about making negative valued characters positive
+ return sources[sourceToRead][charToRead];
+ }
+
+ // go back one character
+ void unget()
+ {
+ // Do not roll back once we've reached the end of the file.
+ if (endOfFileReached)
+ return;
+
+ if (currentChar > 0) {
+ --currentChar;
+ --loc[currentSource].column;
+ --logicalSourceLoc.column;
+ if (loc[currentSource].column < 0) {
+ // We've moved back past a new line. Find the
+ // previous newline (or start of the file) to compute
+ // the column count on the now current line.
+ size_t chIndex = currentChar;
+ while (chIndex > 0) {
+ if (sources[currentSource][chIndex] == '\n') {
+ break;
+ }
+ --chIndex;
+ }
+ logicalSourceLoc.column = (int)(currentChar - chIndex);
+ loc[currentSource].column = (int)(currentChar - chIndex);
+ }
+ } else {
+ do {
+ --currentSource;
+ } while (currentSource > 0 && lengths[currentSource] == 0);
+ if (lengths[currentSource] == 0) {
+ // set to 0 if we've backed up to the start of an empty string
+ currentChar = 0;
+ } else
+ currentChar = lengths[currentSource] - 1;
+ }
+ if (peek() == '\n') {
+ --loc[currentSource].line;
+ --logicalSourceLoc.line;
+ }
+ }
+
+ // for #line override
+ void setLine(int newLine)
+ {
+ logicalSourceLoc.line = newLine;
+ loc[getLastValidSourceIndex()].line = newLine;
+ }
+
+ // for #line override in filename based parsing
+ void setFile(const char* filename)
+ {
+ TString* fn_tstr = NewPoolTString(filename);
+ logicalSourceLoc.name = fn_tstr;
+ loc[getLastValidSourceIndex()].name = fn_tstr;
+ }
+
+ void setFile(const char* filename, int i)
+ {
+ TString* fn_tstr = NewPoolTString(filename);
+ if (i == getLastValidSourceIndex()) {
+ logicalSourceLoc.name = fn_tstr;
+ }
+ loc[i].name = fn_tstr;
+ }
+
+ void setString(int newString)
+ {
+ logicalSourceLoc.string = newString;
+ loc[getLastValidSourceIndex()].string = newString;
+ logicalSourceLoc.name = nullptr;
+ loc[getLastValidSourceIndex()].name = nullptr;
+ }
+
+ // for #include content indentation
+ void setColumn(int col)
+ {
+ logicalSourceLoc.column = col;
+ loc[getLastValidSourceIndex()].column = col;
+ }
+
+ void setEndOfInput()
+ {
+ endOfFileReached = true;
+ currentSource = numSources;
+ }
+
+ bool atEndOfInput() const { return endOfFileReached; }
+
+ const TSourceLoc& getSourceLoc() const
+ {
+ if (singleLogical) {
+ return logicalSourceLoc;
+ } else {
+ return loc[std::max(0, std::min(currentSource, numSources - finale - 1))];
+ }
+ }
+ // Returns the index (starting from 0) of the most recent valid source string we are reading from.
+ int getLastValidSourceIndex() const { return std::min(currentSource, numSources - 1); }
+
+ void consumeWhiteSpace(bool& foundNonSpaceTab);
+ bool consumeComment();
+ void consumeWhitespaceComment(bool& foundNonSpaceTab);
+ bool scanVersion(int& version, EProfile& profile, bool& notFirstToken);
+
+protected:
+
+ // advance one character
+ void advance()
+ {
+ ++currentChar;
+ if (currentChar >= lengths[currentSource]) {
+ ++currentSource;
+ if (currentSource < numSources) {
+ loc[currentSource].string = loc[currentSource - 1].string + 1;
+ loc[currentSource].line = 1;
+ loc[currentSource].column = 0;
+ }
+ while (currentSource < numSources && lengths[currentSource] == 0) {
+ ++currentSource;
+ if (currentSource < numSources) {
+ loc[currentSource].string = loc[currentSource - 1].string + 1;
+ loc[currentSource].line = 1;
+ loc[currentSource].column = 0;
+ }
+ }
+ currentChar = 0;
+ }
+ }
+
+ int numSources; // number of strings in source
+ const unsigned char* const *sources; // array of strings; must be converted to positive values on use, to avoid aliasing with -1 as EndOfInput
+ const size_t *lengths; // length of each string
+ int currentSource;
+ size_t currentChar;
+
+ // This is for reporting what string/line an error occurred on, and can be overridden by #line.
+ // It remembers the last state of each source string as it is left for the next one, so unget()
+ // can restore that state.
+ TSourceLoc* loc; // an array
+
+ int stringBias; // the first string that is the user's string number 0
+ int finale; // number of internal strings after user's last string
+
+ TSourceLoc logicalSourceLoc;
+ bool singleLogical; // treats the strings as a single logical string.
+ // locations will be reported from the first string.
+
+ // Set to true once peek() returns EndOfFile, so that we won't roll back
+ // once we've reached EndOfFile.
+ bool endOfFileReached;
+};
+
+} // end namespace glslang
+
+#endif // _GLSLANG_SCAN_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/ScanContext.h b/thirdparty/glslang/glslang/MachineIndependent/ScanContext.h
new file mode 100644
index 0000000000..74b2b3c746
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/ScanContext.h
@@ -0,0 +1,93 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// This holds context specific to the GLSL scanner, which
+// sits between the preprocessor scanner and parser.
+//
+
+#pragma once
+
+#include "ParseHelper.h"
+
+namespace glslang {
+
+class TPpContext;
+class TPpToken;
+class TParserToken;
+
+class TScanContext {
+public:
+ explicit TScanContext(TParseContextBase& pc) :
+ parseContext(pc),
+ afterType(false), afterStruct(false),
+ field(false), afterBuffer(false) { }
+ virtual ~TScanContext() { }
+
+ static void fillInKeywordMap();
+ static void deleteKeywordMap();
+
+ int tokenize(TPpContext*, TParserToken&);
+
+protected:
+ TScanContext(TScanContext&);
+ TScanContext& operator=(TScanContext&);
+
+ int tokenizeIdentifier();
+ int identifierOrType();
+ int reservedWord();
+ int identifierOrReserved(bool reserved);
+ int es30ReservedFromGLSL(int version);
+ int nonreservedKeyword(int esVersion, int nonEsVersion);
+ int precisionKeyword();
+ int matNxM();
+ int dMat();
+ int firstGenerationImage(bool inEs310);
+ int secondGenerationImage();
+
+ TParseContextBase& parseContext;
+ bool afterType; // true if we've recognized a type, so can only be looking for an identifier
+ bool afterStruct; // true if we've recognized the STRUCT keyword, so can only be looking for an identifier
+ bool field; // true if we're on a field, right after a '.'
+ bool afterBuffer; // true if we've recognized the BUFFER keyword
+ TSourceLoc loc;
+ TParserToken* parserToken;
+ TPpToken* ppToken;
+
+ const char* tokenText;
+ int keyword;
+};
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp b/thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp
new file mode 100644
index 0000000000..6f9db0195c
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp
@@ -0,0 +1,2056 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Implement the top-level of interface to the compiler/linker,
+// as defined in ShaderLang.h
+// This is the platform independent interface between an OGL driver
+// and the shading language compiler/linker.
+//
+#include <cstring>
+#include <iostream>
+#include <sstream>
+#include <memory>
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "Scan.h"
+#include "ScanContext.h"
+
+#ifdef ENABLE_HLSL
+#include "../../hlsl/hlslParseHelper.h"
+#include "../../hlsl/hlslParseables.h"
+#include "../../hlsl/hlslScanContext.h"
+#endif
+
+#include "../Include/ShHandle.h"
+#include "../../OGLCompilersDLL/InitializeDll.h"
+
+#include "preprocessor/PpContext.h"
+
+#define SH_EXPORTING
+#include "../Public/ShaderLang.h"
+#include "reflection.h"
+#include "iomapper.h"
+#include "Initialize.h"
+
+// TODO: this really shouldn't be here, it is only because of the trial addition
+// of printing pre-processed tokens, which requires knowing the string literal
+// token to print ", but none of that seems appropriate for this file.
+#include "preprocessor/PpTokens.h"
+
+namespace { // anonymous namespace for file-local functions and symbols
+
+// Total number of successful initializers of glslang: a refcount
+// Shared global; access should be protected by a global mutex/critical section.
+int NumberOfClients = 0;
+
+using namespace glslang;
+
+// Create a language specific version of parseables.
+TBuiltInParseables* CreateBuiltInParseables(TInfoSink& infoSink, EShSource source)
+{
+ switch (source) {
+ case EShSourceGlsl: return new TBuiltIns(); // GLSL builtIns
+#ifdef ENABLE_HLSL
+ case EShSourceHlsl: return new TBuiltInParseablesHlsl(); // HLSL intrinsics
+#endif
+
+ default:
+ infoSink.info.message(EPrefixInternalError, "Unable to determine source language");
+ return nullptr;
+ }
+}
+
+// Create a language specific version of a parse context.
+TParseContextBase* CreateParseContext(TSymbolTable& symbolTable, TIntermediate& intermediate,
+ int version, EProfile profile, EShSource source,
+ EShLanguage language, TInfoSink& infoSink,
+ SpvVersion spvVersion, bool forwardCompatible, EShMessages messages,
+ bool parsingBuiltIns, std::string sourceEntryPointName = "")
+{
+ switch (source) {
+ case EShSourceGlsl: {
+ if (sourceEntryPointName.size() == 0)
+ intermediate.setEntryPointName("main");
+ TString entryPoint = sourceEntryPointName.c_str();
+ return new TParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion,
+ language, infoSink, forwardCompatible, messages, &entryPoint);
+ }
+#ifdef ENABLE_HLSL
+ case EShSourceHlsl:
+ return new HlslParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion,
+ language, infoSink, sourceEntryPointName.c_str(), forwardCompatible, messages);
+#endif
+ default:
+ infoSink.info.message(EPrefixInternalError, "Unable to determine source language");
+ return nullptr;
+ }
+}
+
+// Local mapping functions for making arrays of symbol tables....
+
+const int VersionCount = 17; // index range in MapVersionToIndex
+
+int MapVersionToIndex(int version)
+{
+ int index = 0;
+
+ switch (version) {
+ case 100: index = 0; break;
+ case 110: index = 1; break;
+ case 120: index = 2; break;
+ case 130: index = 3; break;
+ case 140: index = 4; break;
+ case 150: index = 5; break;
+ case 300: index = 6; break;
+ case 330: index = 7; break;
+ case 400: index = 8; break;
+ case 410: index = 9; break;
+ case 420: index = 10; break;
+ case 430: index = 11; break;
+ case 440: index = 12; break;
+ case 310: index = 13; break;
+ case 450: index = 14; break;
+ case 500: index = 0; break; // HLSL
+ case 320: index = 15; break;
+ case 460: index = 16; break;
+ default: assert(0); break;
+ }
+
+ assert(index < VersionCount);
+
+ return index;
+}
+
+const int SpvVersionCount = 3; // index range in MapSpvVersionToIndex
+
+int MapSpvVersionToIndex(const SpvVersion& spvVersion)
+{
+ int index = 0;
+
+ if (spvVersion.openGl > 0)
+ index = 1;
+ else if (spvVersion.vulkan > 0)
+ index = 2;
+
+ assert(index < SpvVersionCount);
+
+ return index;
+}
+
+const int ProfileCount = 4; // index range in MapProfileToIndex
+
+int MapProfileToIndex(EProfile profile)
+{
+ int index = 0;
+
+ switch (profile) {
+ case ENoProfile: index = 0; break;
+ case ECoreProfile: index = 1; break;
+ case ECompatibilityProfile: index = 2; break;
+ case EEsProfile: index = 3; break;
+ default: break;
+ }
+
+ assert(index < ProfileCount);
+
+ return index;
+}
+
+const int SourceCount = 2;
+
+int MapSourceToIndex(EShSource source)
+{
+ int index = 0;
+
+ switch (source) {
+ case EShSourceGlsl: index = 0; break;
+ case EShSourceHlsl: index = 1; break;
+ default: break;
+ }
+
+ assert(index < SourceCount);
+
+ return index;
+}
+
+// only one of these needed for non-ES; ES needs 2 for different precision defaults of built-ins
+enum EPrecisionClass {
+ EPcGeneral,
+ EPcFragment,
+ EPcCount
+};
+
+// A process-global symbol table per version per profile for built-ins common
+// to multiple stages (languages), and a process-global symbol table per version
+// per profile per stage for built-ins unique to each stage. They will be sparsely
+// populated, so they will only be generated as needed.
+//
+// Each has a different set of built-ins, and we want to preserve that from
+// compile to compile.
+//
+TSymbolTable* CommonSymbolTable[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EPcCount] = {};
+TSymbolTable* SharedSymbolTables[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EShLangCount] = {};
+
+TPoolAllocator* PerProcessGPA = nullptr;
+
+//
+// Parse and add to the given symbol table the content of the given shader string.
+//
+bool InitializeSymbolTable(const TString& builtIns, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
+ EShSource source, TInfoSink& infoSink, TSymbolTable& symbolTable)
+{
+ TIntermediate intermediate(language, version, profile);
+
+ intermediate.setSource(source);
+
+ std::unique_ptr<TParseContextBase> parseContext(CreateParseContext(symbolTable, intermediate, version, profile, source,
+ language, infoSink, spvVersion, true, EShMsgDefault,
+ true));
+
+ TShader::ForbidIncluder includer;
+ TPpContext ppContext(*parseContext, "", includer);
+ TScanContext scanContext(*parseContext);
+ parseContext->setScanContext(&scanContext);
+ parseContext->setPpContext(&ppContext);
+
+ //
+ // Push the symbol table to give it an initial scope. This
+ // push should not have a corresponding pop, so that built-ins
+ // are preserved, and the test for an empty table fails.
+ //
+
+ symbolTable.push();
+
+ const char* builtInShaders[2];
+ size_t builtInLengths[2];
+ builtInShaders[0] = builtIns.c_str();
+ builtInLengths[0] = builtIns.size();
+
+ if (builtInLengths[0] == 0)
+ return true;
+
+ TInputScanner input(1, builtInShaders, builtInLengths);
+ if (! parseContext->parseShaderStrings(ppContext, input) != 0) {
+ infoSink.info.message(EPrefixInternalError, "Unable to parse built-ins");
+ printf("Unable to parse built-ins\n%s\n", infoSink.info.c_str());
+ printf("%s\n", builtInShaders[0]);
+
+ return false;
+ }
+
+ return true;
+}
+
+int CommonIndex(EProfile profile, EShLanguage language)
+{
+ return (profile == EEsProfile && language == EShLangFragment) ? EPcFragment : EPcGeneral;
+}
+
+//
+// To initialize per-stage shared tables, with the common table already complete.
+//
+void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int version, EProfile profile, const SpvVersion& spvVersion,
+ EShLanguage language, EShSource source, TInfoSink& infoSink, TSymbolTable** commonTable,
+ TSymbolTable** symbolTables)
+{
+ (*symbolTables[language]).adoptLevels(*commonTable[CommonIndex(profile, language)]);
+ InitializeSymbolTable(builtInParseables.getStageString(language), version, profile, spvVersion, language, source,
+ infoSink, *symbolTables[language]);
+ builtInParseables.identifyBuiltIns(version, profile, spvVersion, language, *symbolTables[language]);
+ if (profile == EEsProfile && version >= 300)
+ (*symbolTables[language]).setNoBuiltInRedeclarations();
+ if (version == 110)
+ (*symbolTables[language]).setSeparateNameSpaces();
+}
+
+//
+// Initialize the full set of shareable symbol tables;
+// The common (cross-stage) and those shareable per-stage.
+//
+bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TSymbolTable** symbolTables, int version, EProfile profile, const SpvVersion& spvVersion, EShSource source)
+{
+ std::unique_ptr<TBuiltInParseables> builtInParseables(CreateBuiltInParseables(infoSink, source));
+
+ if (builtInParseables == nullptr)
+ return false;
+
+ builtInParseables->initialize(version, profile, spvVersion);
+
+ // do the common tables
+ InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangVertex, source,
+ infoSink, *commonTable[EPcGeneral]);
+ if (profile == EEsProfile)
+ InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangFragment, source,
+ infoSink, *commonTable[EPcFragment]);
+
+ // do the per-stage tables
+
+ // always have vertex and fragment
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangVertex, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangFragment, source,
+ infoSink, commonTable, symbolTables);
+
+ // check for tessellation
+ if ((profile != EEsProfile && version >= 150) ||
+ (profile == EEsProfile && version >= 310)) {
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessControl, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessEvaluation, source,
+ infoSink, commonTable, symbolTables);
+ }
+
+ // check for geometry
+ if ((profile != EEsProfile && version >= 150) ||
+ (profile == EEsProfile && version >= 310))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangGeometry, source,
+ infoSink, commonTable, symbolTables);
+
+ // check for compute
+ if ((profile != EEsProfile && version >= 420) ||
+ (profile == EEsProfile && version >= 310))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCompute, source,
+ infoSink, commonTable, symbolTables);
+
+#ifdef NV_EXTENSIONS
+ // check for ray tracing stages
+ if (profile != EEsProfile && version >= 450) {
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangRayGenNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangIntersectNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangAnyHitNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangClosestHitNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMissNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCallableNV, source,
+ infoSink, commonTable, symbolTables);
+ }
+
+ // check for mesh
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMeshNV, source,
+ infoSink, commonTable, symbolTables);
+
+ // check for task
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTaskNV, source,
+ infoSink, commonTable, symbolTables);
+#endif
+
+
+
+ return true;
+}
+
+bool AddContextSpecificSymbols(const TBuiltInResource* resources, TInfoSink& infoSink, TSymbolTable& symbolTable, int version,
+ EProfile profile, const SpvVersion& spvVersion, EShLanguage language, EShSource source)
+{
+ std::unique_ptr<TBuiltInParseables> builtInParseables(CreateBuiltInParseables(infoSink, source));
+
+ if (builtInParseables == nullptr)
+ return false;
+
+ builtInParseables->initialize(*resources, version, profile, spvVersion, language);
+ InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, language, source, infoSink, symbolTable);
+ builtInParseables->identifyBuiltIns(version, profile, spvVersion, language, symbolTable, *resources);
+
+ return true;
+}
+
+//
+// To do this on the fly, we want to leave the current state of our thread's
+// pool allocator intact, so:
+// - Switch to a new pool for parsing the built-ins
+// - Do the parsing, which builds the symbol table, using the new pool
+// - Switch to the process-global pool to save a copy of the resulting symbol table
+// - Free up the new pool used to parse the built-ins
+// - Switch back to the original thread's pool
+//
+// This only gets done the first time any thread needs a particular symbol table
+// (lazy evaluation).
+//
+void SetupBuiltinSymbolTable(int version, EProfile profile, const SpvVersion& spvVersion, EShSource source)
+{
+ TInfoSink infoSink;
+
+ // Make sure only one thread tries to do this at a time
+ glslang::GetGlobalLock();
+
+ // See if it's already been done for this version/profile combination
+ int versionIndex = MapVersionToIndex(version);
+ int spvVersionIndex = MapSpvVersionToIndex(spvVersion);
+ int profileIndex = MapProfileToIndex(profile);
+ int sourceIndex = MapSourceToIndex(source);
+ if (CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][EPcGeneral]) {
+ glslang::ReleaseGlobalLock();
+
+ return;
+ }
+
+ // Switch to a new pool
+ TPoolAllocator& previousAllocator = GetThreadPoolAllocator();
+ TPoolAllocator* builtInPoolAllocator = new TPoolAllocator;
+ SetThreadPoolAllocator(builtInPoolAllocator);
+
+ // Dynamically allocate the local symbol tables so we can control when they are deallocated WRT when the pool is popped.
+ TSymbolTable* commonTable[EPcCount];
+ TSymbolTable* stageTables[EShLangCount];
+ for (int precClass = 0; precClass < EPcCount; ++precClass)
+ commonTable[precClass] = new TSymbolTable;
+ for (int stage = 0; stage < EShLangCount; ++stage)
+ stageTables[stage] = new TSymbolTable;
+
+ // Generate the local symbol tables using the new pool
+ InitializeSymbolTables(infoSink, commonTable, stageTables, version, profile, spvVersion, source);
+
+ // Switch to the process-global pool
+ SetThreadPoolAllocator(PerProcessGPA);
+
+ // Copy the local symbol tables from the new pool to the global tables using the process-global pool
+ for (int precClass = 0; precClass < EPcCount; ++precClass) {
+ if (! commonTable[precClass]->isEmpty()) {
+ CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass] = new TSymbolTable;
+ CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->copyTable(*commonTable[precClass]);
+ CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->readOnly();
+ }
+ }
+ for (int stage = 0; stage < EShLangCount; ++stage) {
+ if (! stageTables[stage]->isEmpty()) {
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage] = new TSymbolTable;
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->adoptLevels(*CommonSymbolTable
+ [versionIndex][spvVersionIndex][profileIndex][sourceIndex][CommonIndex(profile, (EShLanguage)stage)]);
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->copyTable(*stageTables[stage]);
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->readOnly();
+ }
+ }
+
+ // Clean up the local tables before deleting the pool they used.
+ for (int precClass = 0; precClass < EPcCount; ++precClass)
+ delete commonTable[precClass];
+ for (int stage = 0; stage < EShLangCount; ++stage)
+ delete stageTables[stage];
+
+ delete builtInPoolAllocator;
+ SetThreadPoolAllocator(&previousAllocator);
+
+ glslang::ReleaseGlobalLock();
+}
+
+// Function to Print all builtins
+void DumpBuiltinSymbolTable(TInfoSink& infoSink, const TSymbolTable& symbolTable)
+{
+ infoSink.debug << "BuiltinSymbolTable {\n";
+
+ symbolTable.dump(infoSink, true);
+
+ infoSink.debug << "}\n";
+}
+
+// Return true if the shader was correctly specified for version/profile/stage.
+bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNotFirst, int defaultVersion,
+ EShSource source, int& version, EProfile& profile, const SpvVersion& spvVersion)
+{
+ const int FirstProfileVersion = 150;
+ bool correct = true;
+
+ if (source == EShSourceHlsl) {
+ version = 500; // shader model; currently a characteristic of glslang, not the input
+ profile = ECoreProfile; // allow doubles in prototype parsing
+ return correct;
+ }
+
+ // Get a version...
+ if (version == 0) {
+ version = defaultVersion;
+ // infoSink.info.message(EPrefixWarning, "#version: statement missing; use #version on first line of shader");
+ }
+
+ // Get a good profile...
+ if (profile == ENoProfile) {
+ if (version == 300 || version == 310 || version == 320) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 require specifying the 'es' profile");
+ profile = EEsProfile;
+ } else if (version == 100)
+ profile = EEsProfile;
+ else if (version >= FirstProfileVersion)
+ profile = ECoreProfile;
+ else
+ profile = ENoProfile;
+ } else {
+ // a profile was provided...
+ if (version < 150) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: versions before 150 do not allow a profile token");
+ if (version == 100)
+ profile = EEsProfile;
+ else
+ profile = ENoProfile;
+ } else if (version == 300 || version == 310 || version == 320) {
+ if (profile != EEsProfile) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 support only the es profile");
+ }
+ profile = EEsProfile;
+ } else {
+ if (profile == EEsProfile) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: only version 300, 310, and 320 support the es profile");
+ if (version >= FirstProfileVersion)
+ profile = ECoreProfile;
+ else
+ profile = ENoProfile;
+ }
+ // else: typical desktop case... e.g., "#version 410 core"
+ }
+ }
+
+ // Fix version...
+ switch (version) {
+ // ES versions
+ case 100: break;
+ case 300: break;
+ case 310: break;
+ case 320: break;
+
+ // desktop versions
+ case 110: break;
+ case 120: break;
+ case 130: break;
+ case 140: break;
+ case 150: break;
+ case 330: break;
+ case 400: break;
+ case 410: break;
+ case 420: break;
+ case 430: break;
+ case 440: break;
+ case 450: break;
+ case 460: break;
+
+ // unknown version
+ default:
+ correct = false;
+ infoSink.info.message(EPrefixError, "version not supported");
+ if (profile == EEsProfile)
+ version = 310;
+ else {
+ version = 450;
+ profile = ECoreProfile;
+ }
+ break;
+ }
+
+ // Correct for stage type...
+ switch (stage) {
+ case EShLangGeometry:
+ if ((profile == EEsProfile && version < 310) ||
+ (profile != EEsProfile && version < 150)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: geometry shaders require es profile with version 310 or non-es profile with version 150 or above");
+ version = (profile == EEsProfile) ? 310 : 150;
+ if (profile == EEsProfile || profile == ENoProfile)
+ profile = ECoreProfile;
+ }
+ break;
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ if ((profile == EEsProfile && version < 310) ||
+ (profile != EEsProfile && version < 150)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: tessellation shaders require es profile with version 310 or non-es profile with version 150 or above");
+ version = (profile == EEsProfile) ? 310 : 400; // 150 supports the extension, correction is to 400 which does not
+ if (profile == EEsProfile || profile == ENoProfile)
+ profile = ECoreProfile;
+ }
+ break;
+ case EShLangCompute:
+ if ((profile == EEsProfile && version < 310) ||
+ (profile != EEsProfile && version < 420)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: compute shaders require es profile with version 310 or above, or non-es profile with version 420 or above");
+ version = profile == EEsProfile ? 310 : 420;
+ }
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ if (profile == EEsProfile || version < 460) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: ray tracing shaders require non-es profile with version 460 or above");
+ version = 460;
+ }
+ break;
+ case EShLangMeshNV:
+ case EShLangTaskNV:
+ if ((profile == EEsProfile && version < 320) ||
+ (profile != EEsProfile && version < 450)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: mesh/task shaders require es profile with version 320 or above, or non-es profile with version 450 or above");
+ version = profile == EEsProfile ? 320 : 450;
+ }
+#endif
+ default:
+ break;
+ }
+
+ if (profile == EEsProfile && version >= 300 && versionNotFirst) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: statement must appear first in es-profile shader; before comments or newlines");
+ }
+
+ // Check for SPIR-V compatibility
+ if (spvVersion.spv != 0) {
+ switch (profile) {
+ case EEsProfile:
+ if (spvVersion.vulkan > 0 && version < 310) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: ES shaders for Vulkan SPIR-V require version 310 or higher");
+ version = 310;
+ }
+ if (spvVersion.openGl >= 100) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: ES shaders for OpenGL SPIR-V are not supported");
+ version = 310;
+ }
+ break;
+ case ECompatibilityProfile:
+ infoSink.info.message(EPrefixError, "#version: compilation for SPIR-V does not support the compatibility profile");
+ break;
+ default:
+ if (spvVersion.vulkan > 0 && version < 140) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: Desktop shaders for Vulkan SPIR-V require version 140 or higher");
+ version = 140;
+ }
+ if (spvVersion.openGl >= 100 && version < 330) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: Desktop shaders for OpenGL SPIR-V require version 330 or higher");
+ version = 330;
+ }
+ break;
+ }
+ }
+
+ return correct;
+}
+
+// There are multiple paths in for setting environment stuff.
+// TEnvironment takes precedence, for what it sets, so sort all this out.
+// Ideally, the internal code could be made to use TEnvironment, but for
+// now, translate it to the historically used parameters.
+void TranslateEnvironment(const TEnvironment* environment, EShMessages& messages, EShSource& source,
+ EShLanguage& stage, SpvVersion& spvVersion)
+{
+ // Set up environmental defaults, first ignoring 'environment'.
+ if (messages & EShMsgSpvRules)
+ spvVersion.spv = EShTargetSpv_1_0;
+ if (messages & EShMsgVulkanRules) {
+ spvVersion.vulkan = EShTargetVulkan_1_0;
+ spvVersion.vulkanGlsl = 100;
+ } else if (spvVersion.spv != 0)
+ spvVersion.openGl = 100;
+
+ // Now, override, based on any content set in 'environment'.
+ // 'environment' must be cleared to ESh*None settings when items
+ // are not being set.
+ if (environment != nullptr) {
+ // input language
+ if (environment->input.languageFamily != EShSourceNone) {
+ stage = environment->input.stage;
+ switch (environment->input.dialect) {
+ case EShClientNone:
+ break;
+ case EShClientVulkan:
+ spvVersion.vulkanGlsl = environment->input.dialectVersion;
+ break;
+ case EShClientOpenGL:
+ spvVersion.openGl = environment->input.dialectVersion;
+ break;
+ }
+ switch (environment->input.languageFamily) {
+ case EShSourceNone:
+ break;
+ case EShSourceGlsl:
+ source = EShSourceGlsl;
+ messages = static_cast<EShMessages>(messages & ~EShMsgReadHlsl);
+ break;
+ case EShSourceHlsl:
+ source = EShSourceHlsl;
+ messages = static_cast<EShMessages>(messages | EShMsgReadHlsl);
+ break;
+ }
+ }
+
+ // client
+ switch (environment->client.client) {
+ case EShClientVulkan:
+ spvVersion.vulkan = environment->client.version;
+ break;
+ default:
+ break;
+ }
+
+ // generated code
+ switch (environment->target.language) {
+ case EshTargetSpv:
+ spvVersion.spv = environment->target.version;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+// Most processes are recorded when set in the intermediate representation,
+// These are the few that are not.
+void RecordProcesses(TIntermediate& intermediate, EShMessages messages, const std::string& sourceEntryPointName)
+{
+ if ((messages & EShMsgRelaxedErrors) != 0)
+ intermediate.addProcess("relaxed-errors");
+ if ((messages & EShMsgSuppressWarnings) != 0)
+ intermediate.addProcess("suppress-warnings");
+ if ((messages & EShMsgKeepUncalled) != 0)
+ intermediate.addProcess("keep-uncalled");
+ if (sourceEntryPointName.size() > 0) {
+ intermediate.addProcess("source-entrypoint");
+ intermediate.addProcessArgument(sourceEntryPointName);
+ }
+}
+
+// This is the common setup and cleanup code for PreprocessDeferred and
+// CompileDeferred.
+// It takes any callable with a signature of
+// bool (TParseContextBase& parseContext, TPpContext& ppContext,
+// TInputScanner& input, bool versionWillBeError,
+// TSymbolTable& , TIntermediate& ,
+// EShOptimizationLevel , EShMessages );
+// Which returns false if a failure was detected and true otherwise.
+//
+template<typename ProcessingContext>
+bool ProcessDeferred(
+ TCompiler* compiler,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const char* const stringNames[],
+ const char* customPreamble,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop; this is the GLSL version, not SPIR-V or Vulkan
+ EProfile defaultProfile,
+ // set version/profile to defaultVersion/defaultProfile regardless of the #version
+ // directive in the source code
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages, // warnings/errors/AST; things to print out
+ TIntermediate& intermediate, // returned tree, etc.
+ ProcessingContext& processingContext,
+ bool requireNonempty,
+ TShader::Includer& includer,
+ const std::string sourceEntryPointName = "",
+ const TEnvironment* environment = nullptr) // optional way of fully setting all versions, overriding the above
+{
+ // This must be undone (.pop()) by the caller, after it finishes consuming the created tree.
+ GetThreadPoolAllocator().push();
+
+ if (numStrings == 0)
+ return true;
+
+ // Move to length-based strings, rather than null-terminated strings.
+ // Also, add strings to include the preamble and to ensure the shader is not null,
+ // which lets the grammar accept what was a null (post preprocessing) shader.
+ //
+ // Shader will look like
+ // string 0: system preamble
+ // string 1: custom preamble
+ // string 2...numStrings+1: user's shader
+ // string numStrings+2: "int;"
+ const int numPre = 2;
+ const int numPost = requireNonempty? 1 : 0;
+ const int numTotal = numPre + numStrings + numPost;
+ std::unique_ptr<size_t[]> lengths(new size_t[numTotal]);
+ std::unique_ptr<const char*[]> strings(new const char*[numTotal]);
+ std::unique_ptr<const char*[]> names(new const char*[numTotal]);
+ for (int s = 0; s < numStrings; ++s) {
+ strings[s + numPre] = shaderStrings[s];
+ if (inputLengths == nullptr || inputLengths[s] < 0)
+ lengths[s + numPre] = strlen(shaderStrings[s]);
+ else
+ lengths[s + numPre] = inputLengths[s];
+ }
+ if (stringNames != nullptr) {
+ for (int s = 0; s < numStrings; ++s)
+ names[s + numPre] = stringNames[s];
+ } else {
+ for (int s = 0; s < numStrings; ++s)
+ names[s + numPre] = nullptr;
+ }
+
+ // Get all the stages, languages, clients, and other environment
+ // stuff sorted out.
+ EShSource source = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl;
+ SpvVersion spvVersion;
+ EShLanguage stage = compiler->getLanguage();
+ TranslateEnvironment(environment, messages, source, stage, spvVersion);
+ if (environment != nullptr && environment->target.hlslFunctionality1)
+ intermediate.setHlslFunctionality1();
+
+ // First, without using the preprocessor or parser, find the #version, so we know what
+ // symbol tables, processing rules, etc. to set up. This does not need the extra strings
+ // outlined above, just the user shader, after the system and user preambles.
+ glslang::TInputScanner userInput(numStrings, &strings[numPre], &lengths[numPre]);
+ int version = 0;
+ EProfile profile = ENoProfile;
+ bool versionNotFirstToken = false;
+ bool versionNotFirst = (source == EShSourceHlsl)
+ ? true
+ : userInput.scanVersion(version, profile, versionNotFirstToken);
+ bool versionNotFound = version == 0;
+ if (forceDefaultVersionAndProfile && source == EShSourceGlsl) {
+ if (! (messages & EShMsgSuppressWarnings) && ! versionNotFound &&
+ (version != defaultVersion || profile != defaultProfile)) {
+ compiler->infoSink.info << "Warning, (version, profile) forced to be ("
+ << defaultVersion << ", " << ProfileName(defaultProfile)
+ << "), while in source code it is ("
+ << version << ", " << ProfileName(profile) << ")\n";
+ }
+
+ if (versionNotFound) {
+ versionNotFirstToken = false;
+ versionNotFirst = false;
+ versionNotFound = false;
+ }
+ version = defaultVersion;
+ profile = defaultProfile;
+ }
+
+ bool goodVersion = DeduceVersionProfile(compiler->infoSink, stage,
+ versionNotFirst, defaultVersion, source, version, profile, spvVersion);
+ bool versionWillBeError = (versionNotFound || (profile == EEsProfile && version >= 300 && versionNotFirst));
+ bool warnVersionNotFirst = false;
+ if (! versionWillBeError && versionNotFirstToken) {
+ if (messages & EShMsgRelaxedErrors)
+ warnVersionNotFirst = true;
+ else
+ versionWillBeError = true;
+ }
+
+ intermediate.setSource(source);
+ intermediate.setVersion(version);
+ intermediate.setProfile(profile);
+ intermediate.setSpv(spvVersion);
+ RecordProcesses(intermediate, messages, sourceEntryPointName);
+ if (spvVersion.vulkan > 0)
+ intermediate.setOriginUpperLeft();
+ if ((messages & EShMsgHlslOffsets) || source == EShSourceHlsl)
+ intermediate.setHlslOffsets();
+ if (messages & EShMsgDebugInfo) {
+ intermediate.setSourceFile(names[numPre]);
+ for (int s = 0; s < numStrings; ++s) {
+ // The string may not be null-terminated, so make sure we provide
+ // the length along with the string.
+ intermediate.addSourceText(strings[numPre + s], lengths[numPre + s]);
+ }
+ }
+ SetupBuiltinSymbolTable(version, profile, spvVersion, source);
+
+ TSymbolTable* cachedTable = SharedSymbolTables[MapVersionToIndex(version)]
+ [MapSpvVersionToIndex(spvVersion)]
+ [MapProfileToIndex(profile)]
+ [MapSourceToIndex(source)]
+ [stage];
+
+ // Dynamically allocate the symbol table so we can control when it is deallocated WRT the pool.
+ std::unique_ptr<TSymbolTable> symbolTable(new TSymbolTable);
+ if (cachedTable)
+ symbolTable->adoptLevels(*cachedTable);
+
+ // Add built-in symbols that are potentially context dependent;
+ // they get popped again further down.
+ if (! AddContextSpecificSymbols(resources, compiler->infoSink, *symbolTable, version, profile, spvVersion,
+ stage, source)) {
+ return false;
+ }
+
+ if (messages & EShMsgBuiltinSymbolTable)
+ DumpBuiltinSymbolTable(compiler->infoSink, *symbolTable);
+
+ //
+ // Now we can process the full shader under proper symbols and rules.
+ //
+
+ std::unique_ptr<TParseContextBase> parseContext(CreateParseContext(*symbolTable, intermediate, version, profile, source,
+ stage, compiler->infoSink,
+ spvVersion, forwardCompatible, messages, false, sourceEntryPointName));
+ TPpContext ppContext(*parseContext, names[numPre] ? names[numPre] : "", includer);
+
+ // only GLSL (bison triggered, really) needs an externally set scan context
+ glslang::TScanContext scanContext(*parseContext);
+ if (source == EShSourceGlsl)
+ parseContext->setScanContext(&scanContext);
+
+ parseContext->setPpContext(&ppContext);
+ parseContext->setLimits(*resources);
+ if (! goodVersion)
+ parseContext->addError();
+ if (warnVersionNotFirst) {
+ TSourceLoc loc;
+ loc.init();
+ parseContext->warn(loc, "Illegal to have non-comment, non-whitespace tokens before #version", "#version", "");
+ }
+
+ parseContext->initializeExtensionBehavior();
+
+ // Fill in the strings as outlined above.
+ std::string preamble;
+ parseContext->getPreamble(preamble);
+ strings[0] = preamble.c_str();
+ lengths[0] = strlen(strings[0]);
+ names[0] = nullptr;
+ strings[1] = customPreamble;
+ lengths[1] = strlen(strings[1]);
+ names[1] = nullptr;
+ assert(2 == numPre);
+ if (requireNonempty) {
+ const int postIndex = numStrings + numPre;
+ strings[postIndex] = "\n int;";
+ lengths[postIndex] = strlen(strings[numStrings + numPre]);
+ names[postIndex] = nullptr;
+ }
+ TInputScanner fullInput(numStrings + numPre + numPost, strings.get(), lengths.get(), names.get(), numPre, numPost);
+
+ // Push a new symbol allocation scope that will get used for the shader's globals.
+ symbolTable->push();
+
+ bool success = processingContext(*parseContext, ppContext, fullInput,
+ versionWillBeError, *symbolTable,
+ intermediate, optLevel, messages);
+ return success;
+}
+
+// Responsible for keeping track of the most recent source string and line in
+// the preprocessor and outputting newlines appropriately if the source string
+// or line changes.
+class SourceLineSynchronizer {
+public:
+ SourceLineSynchronizer(const std::function<int()>& lastSourceIndex,
+ std::string* output)
+ : getLastSourceIndex(lastSourceIndex), output(output), lastSource(-1), lastLine(0) {}
+// SourceLineSynchronizer(const SourceLineSynchronizer&) = delete;
+// SourceLineSynchronizer& operator=(const SourceLineSynchronizer&) = delete;
+
+ // Sets the internally tracked source string index to that of the most
+ // recently read token. If we switched to a new source string, returns
+ // true and inserts a newline. Otherwise, returns false and outputs nothing.
+ bool syncToMostRecentString() {
+ if (getLastSourceIndex() != lastSource) {
+ // After switching to a new source string, we need to reset lastLine
+ // because line number resets every time a new source string is
+ // used. We also need to output a newline to separate the output
+ // from the previous source string (if there is one).
+ if (lastSource != -1 || lastLine != 0)
+ *output += '\n';
+ lastSource = getLastSourceIndex();
+ lastLine = -1;
+ return true;
+ }
+ return false;
+ }
+
+ // Calls syncToMostRecentString() and then sets the internally tracked line
+ // number to tokenLine. If we switched to a new line, returns true and inserts
+ // newlines appropriately. Otherwise, returns false and outputs nothing.
+ bool syncToLine(int tokenLine) {
+ syncToMostRecentString();
+ const bool newLineStarted = lastLine < tokenLine;
+ for (; lastLine < tokenLine; ++lastLine) {
+ if (lastLine > 0) *output += '\n';
+ }
+ return newLineStarted;
+ }
+
+ // Sets the internally tracked line number to newLineNum.
+ void setLineNum(int newLineNum) { lastLine = newLineNum; }
+
+private:
+ SourceLineSynchronizer& operator=(const SourceLineSynchronizer&);
+
+ // A function for getting the index of the last valid source string we've
+ // read tokens from.
+ const std::function<int()> getLastSourceIndex;
+ // output string for newlines.
+ std::string* output;
+ // lastSource is the source string index (starting from 0) of the last token
+ // processed. It is tracked in order for newlines to be inserted when a new
+ // source string starts. -1 means we haven't started processing any source
+ // string.
+ int lastSource;
+ // lastLine is the line number (starting from 1) of the last token processed.
+ // It is tracked in order for newlines to be inserted when a token appears
+ // on a new line. 0 means we haven't started processing any line in the
+ // current source string.
+ int lastLine;
+};
+
+// DoPreprocessing is a valid ProcessingContext template argument,
+// which only performs the preprocessing step of compilation.
+// It places the result in the "string" argument to its constructor.
+//
+// This is not an officially supported or fully working path.
+struct DoPreprocessing {
+ explicit DoPreprocessing(std::string* string): outputString(string) {}
+ bool operator()(TParseContextBase& parseContext, TPpContext& ppContext,
+ TInputScanner& input, bool versionWillBeError,
+ TSymbolTable&, TIntermediate&,
+ EShOptimizationLevel, EShMessages)
+ {
+ // This is a list of tokens that do not require a space before or after.
+ static const std::string unNeededSpaceTokens = ";()[]";
+ static const std::string noSpaceBeforeTokens = ",";
+ glslang::TPpToken ppToken;
+
+ parseContext.setScanner(&input);
+ ppContext.setInput(input, versionWillBeError);
+
+ std::string outputBuffer;
+ SourceLineSynchronizer lineSync(
+ std::bind(&TInputScanner::getLastValidSourceIndex, &input), &outputBuffer);
+
+ parseContext.setExtensionCallback([&lineSync, &outputBuffer](
+ int line, const char* extension, const char* behavior) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#extension ";
+ outputBuffer += extension;
+ outputBuffer += " : ";
+ outputBuffer += behavior;
+ });
+
+ parseContext.setLineCallback([&lineSync, &outputBuffer, &parseContext](
+ int curLineNum, int newLineNum, bool hasSource, int sourceNum, const char* sourceName) {
+ // SourceNum is the number of the source-string that is being parsed.
+ lineSync.syncToLine(curLineNum);
+ outputBuffer += "#line ";
+ outputBuffer += std::to_string(newLineNum);
+ if (hasSource) {
+ outputBuffer += ' ';
+ if (sourceName != nullptr) {
+ outputBuffer += '\"';
+ outputBuffer += sourceName;
+ outputBuffer += '\"';
+ } else {
+ outputBuffer += std::to_string(sourceNum);
+ }
+ }
+ if (parseContext.lineDirectiveShouldSetNextLine()) {
+ // newLineNum is the new line number for the line following the #line
+ // directive. So the new line number for the current line is
+ newLineNum -= 1;
+ }
+ outputBuffer += '\n';
+ // And we are at the next line of the #line directive now.
+ lineSync.setLineNum(newLineNum + 1);
+ });
+
+ parseContext.setVersionCallback(
+ [&lineSync, &outputBuffer](int line, int version, const char* str) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#version ";
+ outputBuffer += std::to_string(version);
+ if (str) {
+ outputBuffer += ' ';
+ outputBuffer += str;
+ }
+ });
+
+ parseContext.setPragmaCallback([&lineSync, &outputBuffer](
+ int line, const glslang::TVector<glslang::TString>& ops) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#pragma ";
+ for(size_t i = 0; i < ops.size(); ++i) {
+ outputBuffer += ops[i].c_str();
+ }
+ });
+
+ parseContext.setErrorCallback([&lineSync, &outputBuffer](
+ int line, const char* errorMessage) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#error ";
+ outputBuffer += errorMessage;
+ });
+
+ int lastToken = EndOfInput; // lastToken records the last token processed.
+ do {
+ int token = ppContext.tokenize(ppToken);
+ if (token == EndOfInput)
+ break;
+
+ bool isNewString = lineSync.syncToMostRecentString();
+ bool isNewLine = lineSync.syncToLine(ppToken.loc.line);
+
+ if (isNewLine) {
+ // Don't emit whitespace onto empty lines.
+ // Copy any whitespace characters at the start of a line
+ // from the input to the output.
+ outputBuffer += std::string(ppToken.loc.column - 1, ' ');
+ }
+
+ // Output a space in between tokens, but not at the start of a line,
+ // and also not around special tokens. This helps with readability
+ // and consistency.
+ if (!isNewString && !isNewLine && lastToken != EndOfInput &&
+ (unNeededSpaceTokens.find((char)token) == std::string::npos) &&
+ (unNeededSpaceTokens.find((char)lastToken) == std::string::npos) &&
+ (noSpaceBeforeTokens.find((char)token) == std::string::npos)) {
+ outputBuffer += ' ';
+ }
+ lastToken = token;
+ if (token == PpAtomConstString)
+ outputBuffer += "\"";
+ outputBuffer += ppToken.name;
+ if (token == PpAtomConstString)
+ outputBuffer += "\"";
+ } while (true);
+ outputBuffer += '\n';
+ *outputString = std::move(outputBuffer);
+
+ bool success = true;
+ if (parseContext.getNumErrors() > 0) {
+ success = false;
+ parseContext.infoSink.info.prefix(EPrefixError);
+ parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n";
+ }
+ return success;
+ }
+ std::string* outputString;
+};
+
+// DoFullParse is a valid ProcessingConext template argument for fully
+// parsing the shader. It populates the "intermediate" with the AST.
+struct DoFullParse{
+ bool operator()(TParseContextBase& parseContext, TPpContext& ppContext,
+ TInputScanner& fullInput, bool versionWillBeError,
+ TSymbolTable&, TIntermediate& intermediate,
+ EShOptimizationLevel optLevel, EShMessages messages)
+ {
+ bool success = true;
+ // Parse the full shader.
+ if (! parseContext.parseShaderStrings(ppContext, fullInput, versionWillBeError))
+ success = false;
+
+ if (success && intermediate.getTreeRoot()) {
+ if (optLevel == EShOptNoGeneration)
+ parseContext.infoSink.info.message(EPrefixNone, "No errors. No code generation or linking was requested.");
+ else
+ success = intermediate.postProcess(intermediate.getTreeRoot(), parseContext.getLanguage());
+ } else if (! success) {
+ parseContext.infoSink.info.prefix(EPrefixError);
+ parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n";
+ }
+
+ if (messages & EShMsgAST)
+ intermediate.output(parseContext.infoSink, true);
+
+ return success;
+ }
+};
+
+// Take a single compilation unit, and run the preprocessor on it.
+// Return: True if there were no issues found in preprocessing,
+// False if during preprocessing any unknown version, pragmas or
+// extensions were found.
+//
+// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
+// is not an officially supported or fully working path.
+bool PreprocessDeferred(
+ TCompiler* compiler,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const char* const stringNames[],
+ const char* preamble,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop
+ EProfile defaultProfile,
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages, // warnings/errors/AST; things to print out
+ TShader::Includer& includer,
+ TIntermediate& intermediate, // returned tree, etc.
+ std::string* outputString)
+{
+ DoPreprocessing parser(outputString);
+ return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames,
+ preamble, optLevel, resources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, messages, intermediate, parser,
+ false, includer);
+}
+
+//
+// do a partial compile on the given strings for a single compilation unit
+// for a potential deferred link into a single stage (and deferred full compile of that
+// stage through machine-dependent compilation).
+//
+// all preprocessing, parsing, semantic checks, etc. for a single compilation unit
+// are done here.
+//
+// return: the tree and other information is filled into the intermediate argument,
+// and true is returned by the function for success.
+//
+bool CompileDeferred(
+ TCompiler* compiler,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const char* const stringNames[],
+ const char* preamble,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop
+ EProfile defaultProfile,
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages, // warnings/errors/AST; things to print out
+ TIntermediate& intermediate,// returned tree, etc.
+ TShader::Includer& includer,
+ const std::string sourceEntryPointName = "",
+ TEnvironment* environment = nullptr)
+{
+ DoFullParse parser;
+ return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames,
+ preamble, optLevel, resources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, messages, intermediate, parser,
+ true, includer, sourceEntryPointName, environment);
+}
+
+} // end anonymous namespace for local functions
+
+//
+// ShInitialize() should be called exactly once per process, not per thread.
+//
+int ShInitialize()
+{
+ glslang::InitGlobalLock();
+
+ if (! InitProcess())
+ return 0;
+
+ glslang::GetGlobalLock();
+ ++NumberOfClients;
+ glslang::ReleaseGlobalLock();
+
+ if (PerProcessGPA == nullptr)
+ PerProcessGPA = new TPoolAllocator();
+
+ glslang::TScanContext::fillInKeywordMap();
+#ifdef ENABLE_HLSL
+ glslang::HlslScanContext::fillInKeywordMap();
+#endif
+
+ return 1;
+}
+
+//
+// Driver calls these to create and destroy compiler/linker
+// objects.
+//
+
+ShHandle ShConstructCompiler(const EShLanguage language, int debugOptions)
+{
+ if (!InitThread())
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(ConstructCompiler(language, debugOptions));
+
+ return reinterpret_cast<void*>(base);
+}
+
+ShHandle ShConstructLinker(const EShExecutable executable, int debugOptions)
+{
+ if (!InitThread())
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(ConstructLinker(executable, debugOptions));
+
+ return reinterpret_cast<void*>(base);
+}
+
+ShHandle ShConstructUniformMap()
+{
+ if (!InitThread())
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(ConstructUniformMap());
+
+ return reinterpret_cast<void*>(base);
+}
+
+void ShDestruct(ShHandle handle)
+{
+ if (handle == 0)
+ return;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(handle);
+
+ if (base->getAsCompiler())
+ DeleteCompiler(base->getAsCompiler());
+ else if (base->getAsLinker())
+ DeleteLinker(base->getAsLinker());
+ else if (base->getAsUniformMap())
+ DeleteUniformMap(base->getAsUniformMap());
+}
+
+//
+// Cleanup symbol tables
+//
+int ShFinalize()
+{
+ glslang::GetGlobalLock();
+ --NumberOfClients;
+ assert(NumberOfClients >= 0);
+ bool finalize = NumberOfClients == 0;
+ glslang::ReleaseGlobalLock();
+ if (! finalize)
+ return 1;
+
+ for (int version = 0; version < VersionCount; ++version) {
+ for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) {
+ for (int p = 0; p < ProfileCount; ++p) {
+ for (int source = 0; source < SourceCount; ++source) {
+ for (int stage = 0; stage < EShLangCount; ++stage) {
+ delete SharedSymbolTables[version][spvVersion][p][source][stage];
+ SharedSymbolTables[version][spvVersion][p][source][stage] = 0;
+ }
+ }
+ }
+ }
+ }
+
+ for (int version = 0; version < VersionCount; ++version) {
+ for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) {
+ for (int p = 0; p < ProfileCount; ++p) {
+ for (int source = 0; source < SourceCount; ++source) {
+ for (int pc = 0; pc < EPcCount; ++pc) {
+ delete CommonSymbolTable[version][spvVersion][p][source][pc];
+ CommonSymbolTable[version][spvVersion][p][source][pc] = 0;
+ }
+ }
+ }
+ }
+ }
+
+ if (PerProcessGPA != nullptr) {
+ delete PerProcessGPA;
+ PerProcessGPA = nullptr;
+ }
+
+ glslang::TScanContext::deleteKeywordMap();
+#ifdef ENABLE_HLSL
+ glslang::HlslScanContext::deleteKeywordMap();
+#endif
+
+ return 1;
+}
+
+//
+// Do a full compile on the given strings for a single compilation unit
+// forming a complete stage. The result of the machine dependent compilation
+// is left in the provided compile object.
+//
+// Return: The return value is really boolean, indicating
+// success (1) or failure (0).
+//
+int ShCompile(
+ const ShHandle handle,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int /*debugOptions*/,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages // warnings/errors/AST; things to print out
+ )
+{
+ // Map the generic handle to the C++ object
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TCompiler* compiler = base->getAsCompiler();
+ if (compiler == 0)
+ return 0;
+
+ SetThreadPoolAllocator(compiler->getPool());
+
+ compiler->infoSink.info.erase();
+ compiler->infoSink.debug.erase();
+
+ TIntermediate intermediate(compiler->getLanguage());
+ TShader::ForbidIncluder includer;
+ bool success = CompileDeferred(compiler, shaderStrings, numStrings, inputLengths, nullptr,
+ "", optLevel, resources, defaultVersion, ENoProfile, false,
+ forwardCompatible, messages, intermediate, includer);
+
+ //
+ // Call the machine dependent compiler
+ //
+ if (success && intermediate.getTreeRoot() && optLevel != EShOptNoGeneration)
+ success = compiler->compile(intermediate.getTreeRoot(), intermediate.getVersion(), intermediate.getProfile());
+
+ intermediate.removeTree();
+
+ // Throw away all the temporary memory used by the compilation process.
+ // The push was done in the CompileDeferred() call above.
+ GetThreadPoolAllocator().pop();
+
+ return success ? 1 : 0;
+}
+
+//
+// Link the given compile objects.
+//
+// Return: The return value of is really boolean, indicating
+// success or failure.
+//
+int ShLinkExt(
+ const ShHandle linkHandle,
+ const ShHandle compHandles[],
+ const int numHandles)
+{
+ if (linkHandle == 0 || numHandles == 0)
+ return 0;
+
+ THandleList cObjects;
+
+ for (int i = 0; i < numHandles; ++i) {
+ if (compHandles[i] == 0)
+ return 0;
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(compHandles[i]);
+ if (base->getAsLinker()) {
+ cObjects.push_back(base->getAsLinker());
+ }
+ if (base->getAsCompiler())
+ cObjects.push_back(base->getAsCompiler());
+
+ if (cObjects[i] == 0)
+ return 0;
+ }
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(linkHandle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+
+ SetThreadPoolAllocator(linker->getPool());
+
+ if (linker == 0)
+ return 0;
+
+ linker->infoSink.info.erase();
+
+ for (int i = 0; i < numHandles; ++i) {
+ if (cObjects[i]->getAsCompiler()) {
+ if (! cObjects[i]->getAsCompiler()->linkable()) {
+ linker->infoSink.info.message(EPrefixError, "Not all shaders have valid object code.");
+ return 0;
+ }
+ }
+ }
+
+ bool ret = linker->link(cObjects);
+
+ return ret ? 1 : 0;
+}
+
+//
+// ShSetEncrpytionMethod is a place-holder for specifying
+// how source code is encrypted.
+//
+void ShSetEncryptionMethod(ShHandle handle)
+{
+ if (handle == 0)
+ return;
+}
+
+//
+// Return any compiler/linker/uniformmap log of messages for the application.
+//
+const char* ShGetInfoLog(const ShHandle handle)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(handle);
+ TInfoSink* infoSink;
+
+ if (base->getAsCompiler())
+ infoSink = &(base->getAsCompiler()->getInfoSink());
+ else if (base->getAsLinker())
+ infoSink = &(base->getAsLinker()->getInfoSink());
+ else
+ return 0;
+
+ infoSink->info << infoSink->debug.c_str();
+ return infoSink->info.c_str();
+}
+
+//
+// Return the resulting binary code from the link process. Structure
+// is machine dependent.
+//
+const void* ShGetExecutable(const ShHandle handle)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+ if (linker == 0)
+ return 0;
+
+ return linker->getObjectCode();
+}
+
+//
+// Let the linker know where the application said it's attributes are bound.
+// The linker does not use these values, they are remapped by the ICD or
+// hardware. It just needs them to know what's aliased.
+//
+// Return: The return value of is really boolean, indicating
+// success or failure.
+//
+int ShSetVirtualAttributeBindings(const ShHandle handle, const ShBindingTable* table)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+
+ if (linker == 0)
+ return 0;
+
+ linker->setAppAttributeBindings(table);
+
+ return 1;
+}
+
+//
+// Let the linker know where the predefined attributes have to live.
+//
+int ShSetFixedAttributeBindings(const ShHandle handle, const ShBindingTable* table)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+
+ if (linker == 0)
+ return 0;
+
+ linker->setFixedAttributeBindings(table);
+ return 1;
+}
+
+//
+// Some attribute locations are off-limits to the linker...
+//
+int ShExcludeAttributes(const ShHandle handle, int *attributes, int count)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+ if (linker == 0)
+ return 0;
+
+ linker->setExcludedAttributes(attributes, count);
+
+ return 1;
+}
+
+//
+// Return the index for OpenGL to use for knowing where a uniform lives.
+//
+// Return: The return value of is really boolean, indicating
+// success or failure.
+//
+int ShGetUniformLocation(const ShHandle handle, const char* name)
+{
+ if (handle == 0)
+ return -1;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TUniformMap* uniformMap= base->getAsUniformMap();
+ if (uniformMap == 0)
+ return -1;
+
+ return uniformMap->getLocation(name);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+//
+// Deferred-Lowering C++ Interface
+// -----------------------------------
+//
+// Below is a new alternate C++ interface that might potentially replace the above
+// opaque handle-based interface.
+//
+// See more detailed comment in ShaderLang.h
+//
+
+namespace glslang {
+
+#include "../Include/revision.h"
+
+#define QUOTE(s) #s
+#define STR(n) QUOTE(n)
+
+const char* GetEsslVersionString()
+{
+ return "OpenGL ES GLSL 3.20 glslang Khronos. " STR(GLSLANG_MINOR_VERSION) "." STR(GLSLANG_PATCH_LEVEL);
+}
+
+const char* GetGlslVersionString()
+{
+ return "4.60 glslang Khronos. " STR(GLSLANG_MINOR_VERSION) "." STR(GLSLANG_PATCH_LEVEL);
+}
+
+int GetKhronosToolId()
+{
+ return 8;
+}
+
+bool InitializeProcess()
+{
+ return ShInitialize() != 0;
+}
+
+void FinalizeProcess()
+{
+ ShFinalize();
+}
+
+class TDeferredCompiler : public TCompiler {
+public:
+ TDeferredCompiler(EShLanguage s, TInfoSink& i) : TCompiler(s, i) { }
+ virtual bool compile(TIntermNode*, int = 0, EProfile = ENoProfile) { return true; }
+};
+
+TShader::TShader(EShLanguage s)
+ : stage(s), lengths(nullptr), stringNames(nullptr), preamble("")
+{
+ pool = new TPoolAllocator;
+ infoSink = new TInfoSink;
+ compiler = new TDeferredCompiler(stage, *infoSink);
+ intermediate = new TIntermediate(s);
+
+ // clear environment (avoid constructors in them for use in a C interface)
+ environment.input.languageFamily = EShSourceNone;
+ environment.input.dialect = EShClientNone;
+ environment.client.client = EShClientNone;
+ environment.target.language = EShTargetNone;
+ environment.target.hlslFunctionality1 = false;
+}
+
+TShader::~TShader()
+{
+ delete infoSink;
+ delete compiler;
+ delete intermediate;
+ delete pool;
+}
+
+void TShader::setStrings(const char* const* s, int n)
+{
+ strings = s;
+ numStrings = n;
+ lengths = nullptr;
+}
+
+void TShader::setStringsWithLengths(const char* const* s, const int* l, int n)
+{
+ strings = s;
+ numStrings = n;
+ lengths = l;
+}
+
+void TShader::setStringsWithLengthsAndNames(
+ const char* const* s, const int* l, const char* const* names, int n)
+{
+ strings = s;
+ numStrings = n;
+ lengths = l;
+ stringNames = names;
+}
+
+void TShader::setEntryPoint(const char* entryPoint)
+{
+ intermediate->setEntryPointName(entryPoint);
+}
+
+void TShader::setSourceEntryPoint(const char* name)
+{
+ sourceEntryPointName = name;
+}
+
+void TShader::addProcesses(const std::vector<std::string>& p)
+{
+ intermediate->addProcesses(p);
+}
+
+// Set binding base for given resource type
+void TShader::setShiftBinding(TResourceType res, unsigned int base) {
+ intermediate->setShiftBinding(res, base);
+}
+
+// Set binding base for given resource type for a given binding set.
+void TShader::setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set) {
+ intermediate->setShiftBindingForSet(res, base, set);
+}
+
+// Set binding base for sampler types
+void TShader::setShiftSamplerBinding(unsigned int base) { setShiftBinding(EResSampler, base); }
+// Set binding base for texture types (SRV)
+void TShader::setShiftTextureBinding(unsigned int base) { setShiftBinding(EResTexture, base); }
+// Set binding base for image types
+void TShader::setShiftImageBinding(unsigned int base) { setShiftBinding(EResImage, base); }
+// Set binding base for uniform buffer objects (CBV)
+void TShader::setShiftUboBinding(unsigned int base) { setShiftBinding(EResUbo, base); }
+// Synonym for setShiftUboBinding, to match HLSL language.
+void TShader::setShiftCbufferBinding(unsigned int base) { setShiftBinding(EResUbo, base); }
+// Set binding base for UAV (unordered access view)
+void TShader::setShiftUavBinding(unsigned int base) { setShiftBinding(EResUav, base); }
+// Set binding base for SSBOs
+void TShader::setShiftSsboBinding(unsigned int base) { setShiftBinding(EResSsbo, base); }
+// Enables binding automapping using TIoMapper
+void TShader::setAutoMapBindings(bool map) { intermediate->setAutoMapBindings(map); }
+// Enables position.Y output negation in vertex shader
+void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); }
+// Fragile: currently within one stage: simple auto-assignment of location
+void TShader::setAutoMapLocations(bool map) { intermediate->setAutoMapLocations(map); }
+void TShader::addUniformLocationOverride(const char* name, int loc)
+{
+ intermediate->addUniformLocationOverride(name, loc);
+}
+void TShader::setUniformLocationBase(int base)
+{
+ intermediate->setUniformLocationBase(base);
+}
+// See comment above TDefaultHlslIoMapper in iomapper.cpp:
+void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); }
+void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); }
+void TShader::setNoStorageFormat(bool useUnknownFormat) { intermediate->setNoStorageFormat(useUnknownFormat); }
+void TShader::setResourceSetBinding(const std::vector<std::string>& base) { intermediate->setResourceSetBinding(base); }
+void TShader::setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { intermediate->setTextureSamplerTransformMode(mode); }
+
+//
+// Turn the shader strings into a parse tree in the TIntermediate.
+//
+// Returns true for success.
+//
+bool TShader::parse(const TBuiltInResource* builtInResources, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages messages, Includer& includer)
+{
+ if (! InitThread())
+ return false;
+ SetThreadPoolAllocator(pool);
+
+ if (! preamble)
+ preamble = "";
+
+ return CompileDeferred(compiler, strings, numStrings, lengths, stringNames,
+ preamble, EShOptNone, builtInResources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, messages, *intermediate, includer, sourceEntryPointName,
+ &environment);
+}
+
+// Fill in a string with the result of preprocessing ShaderStrings
+// Returns true if all extensions, pragmas and version strings were valid.
+//
+// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
+// is not an officially supported or fully working path.
+bool TShader::preprocess(const TBuiltInResource* builtInResources,
+ int defaultVersion, EProfile defaultProfile,
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages message,
+ std::string* output_string,
+ Includer& includer)
+{
+ if (! InitThread())
+ return false;
+ SetThreadPoolAllocator(pool);
+
+ if (! preamble)
+ preamble = "";
+
+ return PreprocessDeferred(compiler, strings, numStrings, lengths, stringNames, preamble,
+ EShOptNone, builtInResources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, message, includer, *intermediate, output_string);
+}
+
+const char* TShader::getInfoLog()
+{
+ return infoSink->info.c_str();
+}
+
+const char* TShader::getInfoDebugLog()
+{
+ return infoSink->debug.c_str();
+}
+
+TProgram::TProgram() : reflection(0), ioMapper(nullptr), linked(false)
+{
+ pool = new TPoolAllocator;
+ infoSink = new TInfoSink;
+ for (int s = 0; s < EShLangCount; ++s) {
+ intermediate[s] = 0;
+ newedIntermediate[s] = false;
+ }
+}
+
+TProgram::~TProgram()
+{
+ delete ioMapper;
+ delete infoSink;
+ delete reflection;
+
+ for (int s = 0; s < EShLangCount; ++s)
+ if (newedIntermediate[s])
+ delete intermediate[s];
+
+ delete pool;
+}
+
+//
+// Merge the compilation units within each stage into a single TIntermediate.
+// All starting compilation units need to be the result of calling TShader::parse().
+//
+// Return true for success.
+//
+bool TProgram::link(EShMessages messages)
+{
+ if (linked)
+ return false;
+ linked = true;
+
+ bool error = false;
+
+ SetThreadPoolAllocator(pool);
+
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (! linkStage((EShLanguage)s, messages))
+ error = true;
+ }
+
+ // TODO: Link: cross-stage error checking
+
+ return ! error;
+}
+
+//
+// Merge the compilation units within the given stage into a single TIntermediate.
+//
+// Return true for success.
+//
+bool TProgram::linkStage(EShLanguage stage, EShMessages messages)
+{
+ if (stages[stage].size() == 0)
+ return true;
+
+ int numEsShaders = 0, numNonEsShaders = 0;
+ for (auto it = stages[stage].begin(); it != stages[stage].end(); ++it) {
+ if ((*it)->intermediate->getProfile() == EEsProfile) {
+ numEsShaders++;
+ } else {
+ numNonEsShaders++;
+ }
+ }
+
+ if (numEsShaders > 0 && numNonEsShaders > 0) {
+ infoSink->info.message(EPrefixError, "Cannot mix ES profile with non-ES profile shaders");
+ return false;
+ } else if (numEsShaders > 1) {
+ infoSink->info.message(EPrefixError, "Cannot attach multiple ES shaders of the same type to a single program");
+ return false;
+ }
+
+ //
+ // Be efficient for the common single compilation unit per stage case,
+ // reusing it's TIntermediate instead of merging into a new one.
+ //
+ TIntermediate *firstIntermediate = stages[stage].front()->intermediate;
+ if (stages[stage].size() == 1)
+ intermediate[stage] = firstIntermediate;
+ else {
+ intermediate[stage] = new TIntermediate(stage,
+ firstIntermediate->getVersion(),
+ firstIntermediate->getProfile());
+
+
+ // The new TIntermediate must use the same origin as the original TIntermediates.
+ // Otherwise linking will fail due to different coordinate systems.
+ if (firstIntermediate->getOriginUpperLeft()) {
+ intermediate[stage]->setOriginUpperLeft();
+ }
+ intermediate[stage]->setSpv(firstIntermediate->getSpv());
+
+ newedIntermediate[stage] = true;
+ }
+
+ if (messages & EShMsgAST)
+ infoSink->info << "\nLinked " << StageName(stage) << " stage:\n\n";
+
+ if (stages[stage].size() > 1) {
+ std::list<TShader*>::const_iterator it;
+ for (it = stages[stage].begin(); it != stages[stage].end(); ++it)
+ intermediate[stage]->merge(*infoSink, *(*it)->intermediate);
+ }
+
+ intermediate[stage]->finalCheck(*infoSink, (messages & EShMsgKeepUncalled) != 0);
+
+ if (messages & EShMsgAST)
+ intermediate[stage]->output(*infoSink, true);
+
+ return intermediate[stage]->getNumErrors() == 0;
+}
+
+const char* TProgram::getInfoLog()
+{
+ return infoSink->info.c_str();
+}
+
+const char* TProgram::getInfoDebugLog()
+{
+ return infoSink->debug.c_str();
+}
+
+//
+// Reflection implementation.
+//
+
+bool TProgram::buildReflection(int opts)
+{
+ if (! linked || reflection)
+ return false;
+
+ int firstStage = EShLangVertex, lastStage = EShLangFragment;
+
+ if (opts & EShReflectionIntermediateIO) {
+ // if we're reflecting intermediate I/O, determine the first and last stage linked and use those as the
+ // boundaries for which stages generate pipeline inputs/outputs
+ firstStage = EShLangCount;
+ lastStage = 0;
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (intermediate[s]) {
+ firstStage = std::min(firstStage, s);
+ lastStage = std::max(lastStage, s);
+ }
+ }
+ }
+
+ reflection = new TReflection((EShReflectionOptions)opts, (EShLanguage)firstStage, (EShLanguage)lastStage);
+
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (intermediate[s]) {
+ if (! reflection->addStage((EShLanguage)s, *intermediate[s]))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); }
+int TProgram::getReflectionIndex(const char* name) const { return reflection->getIndex(name); }
+
+int TProgram::getNumUniformVariables() const { return reflection->getNumUniforms(); }
+const TObjectReflection& TProgram::getUniform(int index) const { return reflection->getUniform(index); }
+int TProgram::getNumUniformBlocks() const { return reflection->getNumUniformBlocks(); }
+const TObjectReflection& TProgram::getUniformBlock(int index) const { return reflection->getUniformBlock(index); }
+int TProgram::getNumPipeInputs() const { return reflection->getNumPipeInputs(); }
+const TObjectReflection& TProgram::getPipeInput(int index) const { return reflection->getPipeInput(index); }
+int TProgram::getNumPipeOutputs() const { return reflection->getNumPipeOutputs(); }
+const TObjectReflection& TProgram::getPipeOutput(int index) const { return reflection->getPipeOutput(index); }
+int TProgram::getNumBufferVariables() const { return reflection->getNumBufferVariables(); }
+const TObjectReflection& TProgram::getBufferVariable(int index) const { return reflection->getBufferVariable(index); }
+int TProgram::getNumBufferBlocks() const { return reflection->getNumStorageBuffers(); }
+const TObjectReflection& TProgram::getBufferBlock(int index) const { return reflection->getStorageBufferBlock(index); }
+int TProgram::getNumAtomicCounters() const { return reflection->getNumAtomicCounters(); }
+const TObjectReflection& TProgram::getAtomicCounter(int index) const { return reflection->getAtomicCounter(index); }
+
+void TProgram::dumpReflection() { reflection->dump(); }
+
+//
+// I/O mapping implementation.
+//
+bool TProgram::mapIO(TIoMapResolver* resolver)
+{
+ if (! linked || ioMapper)
+ return false;
+
+ ioMapper = new TIoMapper;
+
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (intermediate[s]) {
+ if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, resolver))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp
new file mode 100644
index 0000000000..c0a02e68a7
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp
@@ -0,0 +1,436 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Symbol table for parsing. Most functionality and main ideas
+// are documented in the header file.
+//
+
+#include "SymbolTable.h"
+
+namespace glslang {
+
+//
+// TType helper function needs a place to live.
+//
+
+//
+// Recursively generate mangled names.
+//
+void TType::buildMangledName(TString& mangledName) const
+{
+ if (isMatrix())
+ mangledName += 'm';
+ else if (isVector())
+ mangledName += 'v';
+
+ switch (basicType) {
+ case EbtFloat: mangledName += 'f'; break;
+ case EbtDouble: mangledName += 'd'; break;
+ case EbtFloat16: mangledName += "f16"; break;
+ case EbtInt: mangledName += 'i'; break;
+ case EbtUint: mangledName += 'u'; break;
+ case EbtInt8: mangledName += "i8"; break;
+ case EbtUint8: mangledName += "u8"; break;
+ case EbtInt16: mangledName += "i16"; break;
+ case EbtUint16: mangledName += "u16"; break;
+ case EbtInt64: mangledName += "i64"; break;
+ case EbtUint64: mangledName += "u64"; break;
+ case EbtBool: mangledName += 'b'; break;
+ case EbtAtomicUint: mangledName += "au"; break;
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV: mangledName += "asnv"; break;
+#endif
+ case EbtSampler:
+ switch (sampler.type) {
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: mangledName += "f16"; break;
+#endif
+ case EbtInt: mangledName += "i"; break;
+ case EbtUint: mangledName += "u"; break;
+ default: break; // some compilers want this
+ }
+ if (sampler.image)
+ mangledName += "I"; // a normal image
+ else if (sampler.sampler)
+ mangledName += "p"; // a "pure" sampler
+ else if (!sampler.combined)
+ mangledName += "t"; // a "pure" texture
+ else
+ mangledName += "s"; // traditional combined sampler
+ if (sampler.arrayed)
+ mangledName += "A";
+ if (sampler.shadow)
+ mangledName += "S";
+ if (sampler.external)
+ mangledName += "E";
+ if (sampler.yuv)
+ mangledName += "Y";
+ switch (sampler.dim) {
+ case Esd1D: mangledName += "1"; break;
+ case Esd2D: mangledName += "2"; break;
+ case Esd3D: mangledName += "3"; break;
+ case EsdCube: mangledName += "C"; break;
+ case EsdRect: mangledName += "R2"; break;
+ case EsdBuffer: mangledName += "B"; break;
+ case EsdSubpass: mangledName += "P"; break;
+ default: break; // some compilers want this
+ }
+
+ if (sampler.hasReturnStruct()) {
+ // Name mangle for sampler return struct uses struct table index.
+ mangledName += "-tx-struct";
+
+ char text[16]; // plenty enough space for the small integers.
+ snprintf(text, sizeof(text), "%d-", sampler.structReturnIndex);
+ mangledName += text;
+ } else {
+ switch (sampler.getVectorSize()) {
+ case 1: mangledName += "1"; break;
+ case 2: mangledName += "2"; break;
+ case 3: mangledName += "3"; break;
+ case 4: break; // default to prior name mangle behavior
+ }
+ }
+
+ if (sampler.ms)
+ mangledName += "M";
+ break;
+ case EbtStruct:
+ case EbtBlock:
+ if (basicType == EbtStruct)
+ mangledName += "struct-";
+ else
+ mangledName += "block-";
+ if (typeName)
+ mangledName += *typeName;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ mangledName += '-';
+ (*structure)[i].type->buildMangledName(mangledName);
+ }
+ default:
+ break;
+ }
+
+ if (getVectorSize() > 0)
+ mangledName += static_cast<char>('0' + getVectorSize());
+ else {
+ mangledName += static_cast<char>('0' + getMatrixCols());
+ mangledName += static_cast<char>('0' + getMatrixRows());
+ }
+
+ if (arraySizes) {
+ const int maxSize = 11;
+ char buf[maxSize];
+ for (int i = 0; i < arraySizes->getNumDims(); ++i) {
+ if (arraySizes->getDimNode(i)) {
+ if (arraySizes->getDimNode(i)->getAsSymbolNode())
+ snprintf(buf, maxSize, "s%d", arraySizes->getDimNode(i)->getAsSymbolNode()->getId());
+ else
+ snprintf(buf, maxSize, "s%p", arraySizes->getDimNode(i));
+ } else
+ snprintf(buf, maxSize, "%d", arraySizes->getDimSize(i));
+ mangledName += '[';
+ mangledName += buf;
+ mangledName += ']';
+ }
+ }
+}
+
+//
+// Dump functions.
+//
+
+void TSymbol::dumpExtensions(TInfoSink& infoSink) const
+{
+ int numExtensions = getNumExtensions();
+ if (numExtensions) {
+ infoSink.debug << " <";
+
+ for (int i = 0; i < numExtensions; i++)
+ infoSink.debug << getExtensions()[i] << ",";
+
+ infoSink.debug << ">";
+ }
+}
+
+void TVariable::dump(TInfoSink& infoSink, bool complete) const
+{
+ if (complete) {
+ infoSink.debug << getName().c_str() << ": " << type.getCompleteString();
+ dumpExtensions(infoSink);
+ } else {
+ infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " "
+ << type.getBasicTypeString();
+
+ if (type.isArray())
+ infoSink.debug << "[0]";
+ }
+
+ infoSink.debug << "\n";
+}
+
+void TFunction::dump(TInfoSink& infoSink, bool complete) const
+{
+ if (complete) {
+ infoSink.debug << getName().c_str() << ": " << returnType.getCompleteString() << " " << getName().c_str()
+ << "(";
+
+ int numParams = getParamCount();
+ for (int i = 0; i < numParams; i++) {
+ const TParameter &param = parameters[i];
+ infoSink.debug << param.type->getCompleteString() << " "
+ << (param.type->isStruct() ? "of " + param.type->getTypeName() + " " : "")
+ << (param.name ? *param.name : "") << (i < numParams - 1 ? "," : "");
+ }
+
+ infoSink.debug << ")";
+ dumpExtensions(infoSink);
+ } else {
+ infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " "
+ << getMangledName().c_str() << "n";
+ }
+
+ infoSink.debug << "\n";
+}
+
+void TAnonMember::dump(TInfoSink& TInfoSink, bool complete) const
+{
+ TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str()
+ << "\n";
+}
+
+void TSymbolTableLevel::dump(TInfoSink& infoSink, bool complete) const
+{
+ tLevel::const_iterator it;
+ for (it = level.begin(); it != level.end(); ++it)
+ (*it).second->dump(infoSink, complete);
+}
+
+void TSymbolTable::dump(TInfoSink& infoSink, bool complete) const
+{
+ for (int level = currentLevel(); level >= 0; --level) {
+ infoSink.debug << "LEVEL " << level << "\n";
+ table[level]->dump(infoSink, complete);
+ }
+}
+
+//
+// Functions have buried pointers to delete.
+//
+TFunction::~TFunction()
+{
+ for (TParamList::iterator i = parameters.begin(); i != parameters.end(); ++i)
+ delete (*i).type;
+}
+
+//
+// Symbol table levels are a map of pointers to symbols that have to be deleted.
+//
+TSymbolTableLevel::~TSymbolTableLevel()
+{
+ for (tLevel::iterator it = level.begin(); it != level.end(); ++it)
+ delete (*it).second;
+
+ delete [] defaultPrecision;
+}
+
+//
+// Change all function entries in the table with the non-mangled name
+// to be related to the provided built-in operation.
+//
+void TSymbolTableLevel::relateToOperator(const char* name, TOperator op)
+{
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ while (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) {
+ TFunction* function = (*candidate).second->getAsFunction();
+ function->relateToOperator(op);
+ } else
+ break;
+ ++candidate;
+ }
+}
+
+// Make all function overloads of the given name require an extension(s).
+// Should only be used for a version/profile that actually needs the extension(s).
+void TSymbolTableLevel::setFunctionExtensions(const char* name, int num, const char* const extensions[])
+{
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ while (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) {
+ TSymbol* symbol = candidate->second;
+ symbol->setExtensions(num, extensions);
+ } else
+ break;
+ ++candidate;
+ }
+}
+
+//
+// Make all symbols in this table level read only.
+//
+void TSymbolTableLevel::readOnly()
+{
+ for (tLevel::iterator it = level.begin(); it != level.end(); ++it)
+ (*it).second->makeReadOnly();
+}
+
+//
+// Copy a symbol, but the copy is writable; call readOnly() afterward if that's not desired.
+//
+TSymbol::TSymbol(const TSymbol& copyOf)
+{
+ name = NewPoolTString(copyOf.name->c_str());
+ uniqueId = copyOf.uniqueId;
+ writable = true;
+}
+
+TVariable::TVariable(const TVariable& copyOf) : TSymbol(copyOf)
+{
+ type.deepCopy(copyOf.type);
+ userType = copyOf.userType;
+
+ // we don't support specialization-constant subtrees in cloned tables, only extensions
+ constSubtree = nullptr;
+ extensions = nullptr;
+ memberExtensions = nullptr;
+ if (copyOf.getNumExtensions() > 0)
+ setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions());
+ if (copyOf.hasMemberExtensions()) {
+ for (int m = 0; m < (int)copyOf.type.getStruct()->size(); ++m) {
+ if (copyOf.getNumMemberExtensions(m) > 0)
+ setMemberExtensions(m, copyOf.getNumMemberExtensions(m), copyOf.getMemberExtensions(m));
+ }
+ }
+
+ if (! copyOf.constArray.empty()) {
+ assert(! copyOf.type.isStruct());
+ TConstUnionArray newArray(copyOf.constArray, 0, copyOf.constArray.size());
+ constArray = newArray;
+ }
+}
+
+TVariable* TVariable::clone() const
+{
+ TVariable *variable = new TVariable(*this);
+
+ return variable;
+}
+
+TFunction::TFunction(const TFunction& copyOf) : TSymbol(copyOf)
+{
+ for (unsigned int i = 0; i < copyOf.parameters.size(); ++i) {
+ TParameter param;
+ parameters.push_back(param);
+ parameters.back().copyParam(copyOf.parameters[i]);
+ }
+
+ extensions = nullptr;
+ if (copyOf.getNumExtensions() > 0)
+ setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions());
+ returnType.deepCopy(copyOf.returnType);
+ mangledName = copyOf.mangledName;
+ op = copyOf.op;
+ defined = copyOf.defined;
+ prototyped = copyOf.prototyped;
+ implicitThis = copyOf.implicitThis;
+ illegalImplicitThis = copyOf.illegalImplicitThis;
+ defaultParamCount = copyOf.defaultParamCount;
+}
+
+TFunction* TFunction::clone() const
+{
+ TFunction *function = new TFunction(*this);
+
+ return function;
+}
+
+TAnonMember* TAnonMember::clone() const
+{
+ // Anonymous members of a given block should be cloned at a higher level,
+ // where they can all be assured to still end up pointing to a single
+ // copy of the original container.
+ assert(0);
+
+ return 0;
+}
+
+TSymbolTableLevel* TSymbolTableLevel::clone() const
+{
+ TSymbolTableLevel *symTableLevel = new TSymbolTableLevel();
+ symTableLevel->anonId = anonId;
+ symTableLevel->thisLevel = thisLevel;
+ std::vector<bool> containerCopied(anonId, false);
+ tLevel::const_iterator iter;
+ for (iter = level.begin(); iter != level.end(); ++iter) {
+ const TAnonMember* anon = iter->second->getAsAnonMember();
+ if (anon) {
+ // Insert all the anonymous members of this same container at once,
+ // avoid inserting the remaining members in the future, once this has been done,
+ // allowing them to all be part of the same new container.
+ if (! containerCopied[anon->getAnonId()]) {
+ TVariable* container = anon->getAnonContainer().clone();
+ container->changeName(NewPoolTString(""));
+ // insert the container and all its members
+ symTableLevel->insert(*container, false);
+ containerCopied[anon->getAnonId()] = true;
+ }
+ } else
+ symTableLevel->insert(*iter->second->clone(), false);
+ }
+
+ return symTableLevel;
+}
+
+void TSymbolTable::copyTable(const TSymbolTable& copyOf)
+{
+ assert(adoptedLevels == copyOf.adoptedLevels);
+
+ uniqueId = copyOf.uniqueId;
+ noBuiltInRedeclarations = copyOf.noBuiltInRedeclarations;
+ separateNameSpaces = copyOf.separateNameSpaces;
+ for (unsigned int i = copyOf.adoptedLevels; i < copyOf.table.size(); ++i)
+ table.push_back(copyOf.table[i]->clone());
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h
new file mode 100644
index 0000000000..f3873cff02
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/SymbolTable.h
@@ -0,0 +1,872 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _SYMBOL_TABLE_INCLUDED_
+#define _SYMBOL_TABLE_INCLUDED_
+
+//
+// Symbol table for parsing. Has these design characteristics:
+//
+// * Same symbol table can be used to compile many shaders, to preserve
+// effort of creating and loading with the large numbers of built-in
+// symbols.
+//
+// --> This requires a copy mechanism, so initial pools used to create
+// the shared information can be popped. Done through "clone"
+// methods.
+//
+// * Name mangling will be used to give each function a unique name
+// so that symbol table lookups are never ambiguous. This allows
+// a simpler symbol table structure.
+//
+// * Pushing and popping of scope, so symbol table will really be a stack
+// of symbol tables. Searched from the top, with new inserts going into
+// the top.
+//
+// * Constants: Compile time constant symbols will keep their values
+// in the symbol table. The parser can substitute constants at parse
+// time, including doing constant folding and constant propagation.
+//
+// * No temporaries: Temporaries made from operations (+, --, .xy, etc.)
+// are tracked in the intermediate representation, not the symbol table.
+//
+
+#include "../Include/Common.h"
+#include "../Include/intermediate.h"
+#include "../Include/InfoSink.h"
+
+namespace glslang {
+
+//
+// Symbol base class. (Can build functions or variables out of these...)
+//
+
+class TVariable;
+class TFunction;
+class TAnonMember;
+
+typedef TVector<const char*> TExtensionList;
+
+class TSymbol {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ explicit TSymbol(const TString *n) : name(n), extensions(0), writable(true) { }
+ virtual TSymbol* clone() const = 0;
+ virtual ~TSymbol() { } // rely on all symbol owned memory coming from the pool
+
+ virtual const TString& getName() const { return *name; }
+ virtual void changeName(const TString* newName) { name = newName; }
+ virtual void addPrefix(const char* prefix)
+ {
+ TString newName(prefix);
+ newName.append(*name);
+ changeName(NewPoolTString(newName.c_str()));
+ }
+ virtual const TString& getMangledName() const { return getName(); }
+ virtual TFunction* getAsFunction() { return 0; }
+ virtual const TFunction* getAsFunction() const { return 0; }
+ virtual TVariable* getAsVariable() { return 0; }
+ virtual const TVariable* getAsVariable() const { return 0; }
+ virtual const TAnonMember* getAsAnonMember() const { return 0; }
+ virtual const TType& getType() const = 0;
+ virtual TType& getWritableType() = 0;
+ virtual void setUniqueId(int id) { uniqueId = id; }
+ virtual int getUniqueId() const { return uniqueId; }
+ virtual void setExtensions(int numExts, const char* const exts[])
+ {
+ assert(extensions == 0);
+ assert(numExts > 0);
+ extensions = NewPoolObject(extensions);
+ for (int e = 0; e < numExts; ++e)
+ extensions->push_back(exts[e]);
+ }
+ virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); }
+ virtual const char** getExtensions() const { return extensions->data(); }
+ virtual void dump(TInfoSink& infoSink, bool complete = false) const = 0;
+ void dumpExtensions(TInfoSink& infoSink) const;
+
+ virtual bool isReadOnly() const { return ! writable; }
+ virtual void makeReadOnly() { writable = false; }
+
+protected:
+ explicit TSymbol(const TSymbol&);
+ TSymbol& operator=(const TSymbol&);
+
+ const TString *name;
+ unsigned int uniqueId; // For cross-scope comparing during code generation
+
+ // For tracking what extensions must be present
+ // (don't use if correct version/profile is present).
+ TExtensionList* extensions; // an array of pointers to existing constant char strings
+
+ //
+ // N.B.: Non-const functions that will be generally used should assert on this,
+ // to avoid overwriting shared symbol-table information.
+ //
+ bool writable;
+};
+
+//
+// Variable class, meaning a symbol that's not a function.
+//
+// There could be a separate class hierarchy for Constant variables;
+// Only one of int, bool, or float, (or none) is correct for
+// any particular use, but it's easy to do this way, and doesn't
+// seem worth having separate classes, and "getConst" can't simply return
+// different values for different types polymorphically, so this is
+// just simple and pragmatic.
+//
+class TVariable : public TSymbol {
+public:
+ TVariable(const TString *name, const TType& t, bool uT = false )
+ : TSymbol(name),
+ userType(uT),
+ constSubtree(nullptr),
+ memberExtensions(nullptr),
+ anonId(-1)
+ { type.shallowCopy(t); }
+ virtual TVariable* clone() const;
+ virtual ~TVariable() { }
+
+ virtual TVariable* getAsVariable() { return this; }
+ virtual const TVariable* getAsVariable() const { return this; }
+ virtual const TType& getType() const { return type; }
+ virtual TType& getWritableType() { assert(writable); return type; }
+ virtual bool isUserType() const { return userType; }
+ virtual const TConstUnionArray& getConstArray() const { return constArray; }
+ virtual TConstUnionArray& getWritableConstArray() { assert(writable); return constArray; }
+ virtual void setConstArray(const TConstUnionArray& array) { constArray = array; }
+ virtual void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
+ virtual TIntermTyped* getConstSubtree() const { return constSubtree; }
+ virtual void setAnonId(int i) { anonId = i; }
+ virtual int getAnonId() const { return anonId; }
+
+ virtual void setMemberExtensions(int member, int numExts, const char* const exts[])
+ {
+ assert(type.isStruct());
+ assert(numExts > 0);
+ if (memberExtensions == nullptr) {
+ memberExtensions = NewPoolObject(memberExtensions);
+ memberExtensions->resize(type.getStruct()->size());
+ }
+ for (int e = 0; e < numExts; ++e)
+ (*memberExtensions)[member].push_back(exts[e]);
+ }
+ virtual bool hasMemberExtensions() const { return memberExtensions != nullptr; }
+ virtual int getNumMemberExtensions(int member) const
+ {
+ return memberExtensions == nullptr ? 0 : (int)(*memberExtensions)[member].size();
+ }
+ virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); }
+
+ virtual void dump(TInfoSink& infoSink, bool complete = false) const;
+
+protected:
+ explicit TVariable(const TVariable&);
+ TVariable& operator=(const TVariable&);
+
+ TType type;
+ bool userType;
+
+ // we are assuming that Pool Allocator will free the memory allocated to unionArray
+ // when this object is destroyed
+
+ TConstUnionArray constArray; // for compile-time constant value
+ TIntermTyped* constSubtree; // for specialization constant computation
+ TVector<TExtensionList>* memberExtensions; // per-member extension list, allocated only when needed
+ int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose
+};
+
+//
+// The function sub-class of symbols and the parser will need to
+// share this definition of a function parameter.
+//
+struct TParameter {
+ TString *name;
+ TType* type;
+ TIntermTyped* defaultValue;
+ void copyParam(const TParameter& param)
+ {
+ if (param.name)
+ name = NewPoolTString(param.name->c_str());
+ else
+ name = 0;
+ type = param.type->clone();
+ defaultValue = param.defaultValue;
+ }
+ TBuiltInVariable getDeclaredBuiltIn() const { return type->getQualifier().declaredBuiltIn; }
+};
+
+//
+// The function sub-class of a symbol.
+//
+class TFunction : public TSymbol {
+public:
+ explicit TFunction(TOperator o) :
+ TSymbol(0),
+ op(o),
+ defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) { }
+ TFunction(const TString *name, const TType& retType, TOperator tOp = EOpNull) :
+ TSymbol(name),
+ mangledName(*name + '('),
+ op(tOp),
+ defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0)
+ {
+ returnType.shallowCopy(retType);
+ declaredBuiltIn = retType.getQualifier().builtIn;
+ }
+ virtual TFunction* clone() const override;
+ virtual ~TFunction();
+
+ virtual TFunction* getAsFunction() override { return this; }
+ virtual const TFunction* getAsFunction() const override { return this; }
+
+ // Install 'p' as the (non-'this') last parameter.
+ // Non-'this' parameters are reflected in both the list of parameters and the
+ // mangled name.
+ virtual void addParameter(TParameter& p)
+ {
+ assert(writable);
+ parameters.push_back(p);
+ p.type->appendMangledName(mangledName);
+
+ if (p.defaultValue != nullptr)
+ defaultParamCount++;
+ }
+
+ // Install 'this' as the first parameter.
+ // 'this' is reflected in the list of parameters, but not the mangled name.
+ virtual void addThisParameter(TType& type, const char* name)
+ {
+ TParameter p = { NewPoolTString(name), new TType, nullptr };
+ p.type->shallowCopy(type);
+ parameters.insert(parameters.begin(), p);
+ }
+
+ virtual void addPrefix(const char* prefix) override
+ {
+ TSymbol::addPrefix(prefix);
+ mangledName.insert(0, prefix);
+ }
+
+ virtual void removePrefix(const TString& prefix)
+ {
+ assert(mangledName.compare(0, prefix.size(), prefix) == 0);
+ mangledName.erase(0, prefix.size());
+ }
+
+ virtual const TString& getMangledName() const override { return mangledName; }
+ virtual const TType& getType() const override { return returnType; }
+ virtual TBuiltInVariable getDeclaredBuiltInType() const { return declaredBuiltIn; }
+ virtual TType& getWritableType() override { return returnType; }
+ virtual void relateToOperator(TOperator o) { assert(writable); op = o; }
+ virtual TOperator getBuiltInOp() const { return op; }
+ virtual void setDefined() { assert(writable); defined = true; }
+ virtual bool isDefined() const { return defined; }
+ virtual void setPrototyped() { assert(writable); prototyped = true; }
+ virtual bool isPrototyped() const { return prototyped; }
+ virtual void setImplicitThis() { assert(writable); implicitThis = true; }
+ virtual bool hasImplicitThis() const { return implicitThis; }
+ virtual void setIllegalImplicitThis() { assert(writable); illegalImplicitThis = true; }
+ virtual bool hasIllegalImplicitThis() const { return illegalImplicitThis; }
+
+ // Return total number of parameters
+ virtual int getParamCount() const { return static_cast<int>(parameters.size()); }
+ // Return number of parameters with default values.
+ virtual int getDefaultParamCount() const { return defaultParamCount; }
+ // Return number of fixed parameters (without default values)
+ virtual int getFixedParamCount() const { return getParamCount() - getDefaultParamCount(); }
+
+ virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; }
+ virtual const TParameter& operator[](int i) const { return parameters[i]; }
+
+ virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
+
+protected:
+ explicit TFunction(const TFunction&);
+ TFunction& operator=(const TFunction&);
+
+ typedef TVector<TParameter> TParamList;
+ TParamList parameters;
+ TType returnType;
+ TBuiltInVariable declaredBuiltIn;
+
+ TString mangledName;
+ TOperator op;
+ bool defined;
+ bool prototyped;
+ bool implicitThis; // True if this function is allowed to see all members of 'this'
+ bool illegalImplicitThis; // True if this function is not supposed to have access to dynamic members of 'this',
+ // even if it finds member variables in the symbol table.
+ // This is important for a static member function that has member variables in scope,
+ // but is not allowed to use them, or see hidden symbols instead.
+ int defaultParamCount;
+};
+
+//
+// Members of anonymous blocks are a kind of TSymbol. They are not hidden in
+// the symbol table behind a container; rather they are visible and point to
+// their anonymous container. (The anonymous container is found through the
+// member, not the other way around.)
+//
+class TAnonMember : public TSymbol {
+public:
+ TAnonMember(const TString* n, unsigned int m, TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { }
+ virtual TAnonMember* clone() const override;
+ virtual ~TAnonMember() { }
+
+ virtual const TAnonMember* getAsAnonMember() const override { return this; }
+ virtual const TVariable& getAnonContainer() const { return anonContainer; }
+ virtual unsigned int getMemberNumber() const { return memberNumber; }
+
+ virtual const TType& getType() const override
+ {
+ const TTypeList& types = *anonContainer.getType().getStruct();
+ return *types[memberNumber].type;
+ }
+
+ virtual TType& getWritableType() override
+ {
+ assert(writable);
+ const TTypeList& types = *anonContainer.getType().getStruct();
+ return *types[memberNumber].type;
+ }
+
+ virtual void setExtensions(int numExts, const char* const exts[]) override
+ {
+ anonContainer.setMemberExtensions(memberNumber, numExts, exts);
+ }
+ virtual int getNumExtensions() const override { return anonContainer.getNumMemberExtensions(memberNumber); }
+ virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); }
+
+ virtual int getAnonId() const { return anonId; }
+ virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
+
+protected:
+ explicit TAnonMember(const TAnonMember&);
+ TAnonMember& operator=(const TAnonMember&);
+
+ TVariable& anonContainer;
+ unsigned int memberNumber;
+ int anonId;
+};
+
+class TSymbolTableLevel {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ TSymbolTableLevel() : defaultPrecision(0), anonId(0), thisLevel(false) { }
+ ~TSymbolTableLevel();
+
+ bool insert(TSymbol& symbol, bool separateNameSpaces)
+ {
+ //
+ // returning true means symbol was added to the table with no semantic errors
+ //
+ const TString& name = symbol.getName();
+ if (name == "") {
+ symbol.getAsVariable()->setAnonId(anonId++);
+ // An empty name means an anonymous container, exposing its members to the external scope.
+ // Give it a name and insert its members in the symbol table, pointing to the container.
+ char buf[20];
+ snprintf(buf, 20, "%s%d", AnonymousPrefix, symbol.getAsVariable()->getAnonId());
+ symbol.changeName(NewPoolTString(buf));
+
+ return insertAnonymousMembers(symbol, 0);
+ } else {
+ // Check for redefinition errors:
+ // - STL itself will tell us if there is a direct name collision, with name mangling, at this level
+ // - additionally, check for function-redefining-variable name collisions
+ const TString& insertName = symbol.getMangledName();
+ if (symbol.getAsFunction()) {
+ // make sure there isn't a variable of this name
+ if (! separateNameSpaces && level.find(name) != level.end())
+ return false;
+
+ // insert, and whatever happens is okay
+ level.insert(tLevelPair(insertName, &symbol));
+
+ return true;
+ } else
+ return level.insert(tLevelPair(insertName, &symbol)).second;
+ }
+ }
+
+ // Add more members to an already inserted aggregate object
+ bool amend(TSymbol& symbol, int firstNewMember)
+ {
+ // See insert() for comments on basic explanation of insert.
+ // This operates similarly, but more simply.
+ // Only supporting amend of anonymous blocks so far.
+ if (IsAnonymous(symbol.getName()))
+ return insertAnonymousMembers(symbol, firstNewMember);
+ else
+ return false;
+ }
+
+ bool insertAnonymousMembers(TSymbol& symbol, int firstMember)
+ {
+ const TTypeList& types = *symbol.getAsVariable()->getType().getStruct();
+ for (unsigned int m = firstMember; m < types.size(); ++m) {
+ TAnonMember* member = new TAnonMember(&types[m].type->getFieldName(), m, *symbol.getAsVariable(), symbol.getAsVariable()->getAnonId());
+ if (! level.insert(tLevelPair(member->getMangledName(), member)).second)
+ return false;
+ }
+
+ return true;
+ }
+
+ TSymbol* find(const TString& name) const
+ {
+ tLevel::const_iterator it = level.find(name);
+ if (it == level.end())
+ return 0;
+ else
+ return (*it).second;
+ }
+
+ void findFunctionNameList(const TString& name, TVector<const TFunction*>& list)
+ {
+ size_t parenAt = name.find_first_of('(');
+ TString base(name, 0, parenAt + 1);
+
+ tLevel::const_iterator begin = level.lower_bound(base);
+ base[parenAt] = ')'; // assume ')' is lexically after '('
+ tLevel::const_iterator end = level.upper_bound(base);
+ for (tLevel::const_iterator it = begin; it != end; ++it)
+ list.push_back(it->second->getAsFunction());
+ }
+
+ // See if there is already a function in the table having the given non-function-style name.
+ bool hasFunctionName(const TString& name) const
+ {
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ if (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0)
+
+ return true;
+ }
+
+ return false;
+ }
+
+ // See if there is a variable at this level having the given non-function-style name.
+ // Return true if name is found, and set variable to true if the name was a variable.
+ bool findFunctionVariableName(const TString& name, bool& variable) const
+ {
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ if (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt == candidateName.npos) {
+ // not a mangled name
+ if (candidateName == name) {
+ // found a variable name match
+ variable = true;
+ return true;
+ }
+ } else {
+ // a mangled name
+ if (candidateName.compare(0, parenAt, name) == 0) {
+ // found a function name match
+ variable = false;
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ // Use this to do a lazy 'push' of precision defaults the first time
+ // a precision statement is seen in a new scope. Leave it at 0 for
+ // when no push was needed. Thus, it is not the current defaults,
+ // it is what to restore the defaults to when popping a level.
+ void setPreviousDefaultPrecisions(const TPrecisionQualifier *p)
+ {
+ // can call multiple times at one scope, will only latch on first call,
+ // as we're tracking the previous scope's values, not the current values
+ if (defaultPrecision != 0)
+ return;
+
+ defaultPrecision = new TPrecisionQualifier[EbtNumTypes];
+ for (int t = 0; t < EbtNumTypes; ++t)
+ defaultPrecision[t] = p[t];
+ }
+
+ void getPreviousDefaultPrecisions(TPrecisionQualifier *p)
+ {
+ // can be called for table level pops that didn't set the
+ // defaults
+ if (defaultPrecision == 0 || p == 0)
+ return;
+
+ for (int t = 0; t < EbtNumTypes; ++t)
+ p[t] = defaultPrecision[t];
+ }
+
+ void relateToOperator(const char* name, TOperator op);
+ void setFunctionExtensions(const char* name, int num, const char* const extensions[]);
+ void dump(TInfoSink& infoSink, bool complete = false) const;
+ TSymbolTableLevel* clone() const;
+ void readOnly();
+
+ void setThisLevel() { thisLevel = true; }
+ bool isThisLevel() const { return thisLevel; }
+
+protected:
+ explicit TSymbolTableLevel(TSymbolTableLevel&);
+ TSymbolTableLevel& operator=(TSymbolTableLevel&);
+
+ typedef std::map<TString, TSymbol*, std::less<TString>, pool_allocator<std::pair<const TString, TSymbol*> > > tLevel;
+ typedef const tLevel::value_type tLevelPair;
+ typedef std::pair<tLevel::iterator, bool> tInsertResult;
+
+ tLevel level; // named mappings
+ TPrecisionQualifier *defaultPrecision;
+ int anonId;
+ bool thisLevel; // True if this level of the symbol table is a structure scope containing member function
+ // that are supposed to see anonymous access to member variables.
+};
+
+class TSymbolTable {
+public:
+ TSymbolTable() : uniqueId(0), noBuiltInRedeclarations(false), separateNameSpaces(false), adoptedLevels(0)
+ {
+ //
+ // This symbol table cannot be used until push() is called.
+ //
+ }
+ ~TSymbolTable()
+ {
+ // this can be called explicitly; safest to code it so it can be called multiple times
+
+ // don't deallocate levels passed in from elsewhere
+ while (table.size() > adoptedLevels)
+ pop(0);
+ }
+
+ void adoptLevels(TSymbolTable& symTable)
+ {
+ for (unsigned int level = 0; level < symTable.table.size(); ++level) {
+ table.push_back(symTable.table[level]);
+ ++adoptedLevels;
+ }
+ uniqueId = symTable.uniqueId;
+ noBuiltInRedeclarations = symTable.noBuiltInRedeclarations;
+ separateNameSpaces = symTable.separateNameSpaces;
+ }
+
+ //
+ // While level adopting is generic, the methods below enact a the following
+ // convention for levels:
+ // 0: common built-ins shared across all stages, all compiles, only one copy for all symbol tables
+ // 1: per-stage built-ins, shared across all compiles, but a different copy per stage
+ // 2: built-ins specific to a compile, like resources that are context-dependent, or redeclared built-ins
+ // 3: user-shader globals
+ //
+protected:
+ static const int globalLevel = 3;
+ bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels
+ bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals
+ bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals
+public:
+ bool isEmpty() { return table.size() == 0; }
+ bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); }
+ bool atGlobalLevel() { return isGlobalLevel(currentLevel()); }
+
+ void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; }
+ void setSeparateNameSpaces() { separateNameSpaces = true; }
+
+ void push()
+ {
+ table.push_back(new TSymbolTableLevel);
+ }
+
+ // Make a new symbol-table level to represent the scope introduced by a structure
+ // containing member functions, such that the member functions can find anonymous
+ // references to member variables.
+ //
+ // 'thisSymbol' should have a name of "" to trigger anonymous structure-member
+ // symbol finds.
+ void pushThis(TSymbol& thisSymbol)
+ {
+ assert(thisSymbol.getName().size() == 0);
+ table.push_back(new TSymbolTableLevel);
+ table.back()->setThisLevel();
+ insert(thisSymbol);
+ }
+
+ void pop(TPrecisionQualifier *p)
+ {
+ table[currentLevel()]->getPreviousDefaultPrecisions(p);
+ delete table.back();
+ table.pop_back();
+ }
+
+ //
+ // Insert a visible symbol into the symbol table so it can
+ // be found later by name.
+ //
+ // Returns false if the was a name collision.
+ //
+ bool insert(TSymbol& symbol)
+ {
+ symbol.setUniqueId(++uniqueId);
+
+ // make sure there isn't a function of this variable name
+ if (! separateNameSpaces && ! symbol.getAsFunction() && table[currentLevel()]->hasFunctionName(symbol.getName()))
+ return false;
+
+ // check for not overloading or redefining a built-in function
+ if (noBuiltInRedeclarations) {
+ if (atGlobalLevel() && currentLevel() > 0) {
+ if (table[0]->hasFunctionName(symbol.getName()))
+ return false;
+ if (currentLevel() > 1 && table[1]->hasFunctionName(symbol.getName()))
+ return false;
+ }
+ }
+
+ return table[currentLevel()]->insert(symbol, separateNameSpaces);
+ }
+
+ // Add more members to an already inserted aggregate object
+ bool amend(TSymbol& symbol, int firstNewMember)
+ {
+ // See insert() for comments on basic explanation of insert.
+ // This operates similarly, but more simply.
+ return table[currentLevel()]->amend(symbol, firstNewMember);
+ }
+
+ //
+ // To allocate an internal temporary, which will need to be uniquely
+ // identified by the consumer of the AST, but never need to
+ // found by doing a symbol table search by name, hence allowed an
+ // arbitrary name in the symbol with no worry of collision.
+ //
+ void makeInternalVariable(TSymbol& symbol)
+ {
+ symbol.setUniqueId(++uniqueId);
+ }
+
+ //
+ // Copy a variable or anonymous member's structure from a shared level so that
+ // it can be added (soon after return) to the symbol table where it can be
+ // modified without impacting other users of the shared table.
+ //
+ TSymbol* copyUpDeferredInsert(TSymbol* shared)
+ {
+ if (shared->getAsVariable()) {
+ TSymbol* copy = shared->clone();
+ copy->setUniqueId(shared->getUniqueId());
+ return copy;
+ } else {
+ const TAnonMember* anon = shared->getAsAnonMember();
+ assert(anon);
+ TVariable* container = anon->getAnonContainer().clone();
+ container->changeName(NewPoolTString(""));
+ container->setUniqueId(anon->getAnonContainer().getUniqueId());
+ return container;
+ }
+ }
+
+ TSymbol* copyUp(TSymbol* shared)
+ {
+ TSymbol* copy = copyUpDeferredInsert(shared);
+ table[globalLevel]->insert(*copy, separateNameSpaces);
+ if (shared->getAsVariable())
+ return copy;
+ else {
+ // return the copy of the anonymous member
+ return table[globalLevel]->find(shared->getName());
+ }
+ }
+
+ // Normal find of a symbol, that can optionally say whether the symbol was found
+ // at a built-in level or the current top-scope level.
+ TSymbol* find(const TString& name, bool* builtIn = 0, bool* currentScope = 0, int* thisDepthP = 0)
+ {
+ int level = currentLevel();
+ TSymbol* symbol;
+ int thisDepth = 0;
+ do {
+ if (table[level]->isThisLevel())
+ ++thisDepth;
+ symbol = table[level]->find(name);
+ --level;
+ } while (symbol == nullptr && level >= 0);
+ level++;
+ if (builtIn)
+ *builtIn = isBuiltInLevel(level);
+ if (currentScope)
+ *currentScope = isGlobalLevel(currentLevel()) || level == currentLevel(); // consider shared levels as "current scope" WRT user globals
+ if (thisDepthP != nullptr) {
+ if (! table[level]->isThisLevel())
+ thisDepth = 0;
+ *thisDepthP = thisDepth;
+ }
+
+ return symbol;
+ }
+
+ // Find of a symbol that returns how many layers deep of nested
+ // structures-with-member-functions ('this' scopes) deep the symbol was
+ // found in.
+ TSymbol* find(const TString& name, int& thisDepth)
+ {
+ int level = currentLevel();
+ TSymbol* symbol;
+ thisDepth = 0;
+ do {
+ if (table[level]->isThisLevel())
+ ++thisDepth;
+ symbol = table[level]->find(name);
+ --level;
+ } while (symbol == 0 && level >= 0);
+
+ if (! table[level + 1]->isThisLevel())
+ thisDepth = 0;
+
+ return symbol;
+ }
+
+ bool isFunctionNameVariable(const TString& name) const
+ {
+ if (separateNameSpaces)
+ return false;
+
+ int level = currentLevel();
+ do {
+ bool variable;
+ bool found = table[level]->findFunctionVariableName(name, variable);
+ if (found)
+ return variable;
+ --level;
+ } while (level >= 0);
+
+ return false;
+ }
+
+ void findFunctionNameList(const TString& name, TVector<const TFunction*>& list, bool& builtIn)
+ {
+ // For user levels, return the set found in the first scope with a match
+ builtIn = false;
+ int level = currentLevel();
+ do {
+ table[level]->findFunctionNameList(name, list);
+ --level;
+ } while (list.empty() && level >= globalLevel);
+
+ if (! list.empty())
+ return;
+
+ // Gather across all built-in levels; they don't hide each other
+ builtIn = true;
+ do {
+ table[level]->findFunctionNameList(name, list);
+ --level;
+ } while (level >= 0);
+ }
+
+ void relateToOperator(const char* name, TOperator op)
+ {
+ for (unsigned int level = 0; level < table.size(); ++level)
+ table[level]->relateToOperator(name, op);
+ }
+
+ void setFunctionExtensions(const char* name, int num, const char* const extensions[])
+ {
+ for (unsigned int level = 0; level < table.size(); ++level)
+ table[level]->setFunctionExtensions(name, num, extensions);
+ }
+
+ void setVariableExtensions(const char* name, int numExts, const char* const extensions[])
+ {
+ TSymbol* symbol = find(TString(name));
+ if (symbol == nullptr)
+ return;
+
+ symbol->setExtensions(numExts, extensions);
+ }
+
+ void setVariableExtensions(const char* blockName, const char* name, int numExts, const char* const extensions[])
+ {
+ TSymbol* symbol = find(TString(blockName));
+ if (symbol == nullptr)
+ return;
+ TVariable* variable = symbol->getAsVariable();
+ assert(variable != nullptr);
+
+ const TTypeList& structure = *variable->getAsVariable()->getType().getStruct();
+ for (int member = 0; member < (int)structure.size(); ++member) {
+ if (structure[member].type->getFieldName().compare(name) == 0) {
+ variable->setMemberExtensions(member, numExts, extensions);
+ return;
+ }
+ }
+ }
+
+ int getMaxSymbolId() { return uniqueId; }
+ void dump(TInfoSink& infoSink, bool complete = false) const;
+ void copyTable(const TSymbolTable& copyOf);
+
+ void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); }
+
+ void readOnly()
+ {
+ for (unsigned int level = 0; level < table.size(); ++level)
+ table[level]->readOnly();
+ }
+
+protected:
+ TSymbolTable(TSymbolTable&);
+ TSymbolTable& operator=(TSymbolTableLevel&);
+
+ int currentLevel() const { return static_cast<int>(table.size()) - 1; }
+
+ std::vector<TSymbolTableLevel*> table;
+ int uniqueId; // for unique identification in code generation
+ bool noBuiltInRedeclarations;
+ bool separateNameSpaces;
+ unsigned int adoptedLevels;
+};
+
+} // end namespace glslang
+
+#endif // _SYMBOL_TABLE_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Versions.cpp b/thirdparty/glslang/glslang/MachineIndependent/Versions.cpp
new file mode 100644
index 0000000000..f19c38502d
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Versions.cpp
@@ -0,0 +1,1130 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Help manage multiple profiles, versions, extensions etc.
+//
+// These don't return error codes, as the presumption is parsing will
+// always continue as if the tested feature were enabled, and thus there
+// is no error recovery needed.
+//
+
+//
+// HOW TO add a feature enabled by an extension.
+//
+// To add a new hypothetical "Feature F" to the front end, where an extension
+// "XXX_extension_X" can be used to enable the feature, do the following.
+//
+// OVERVIEW: Specific features are what are error-checked for, not
+// extensions: A specific Feature F might be enabled by an extension, or a
+// particular version in a particular profile, or a stage, or combinations, etc.
+//
+// The basic mechanism is to use the following to "declare" all the things that
+// enable/disable Feature F, in a code path that implements Feature F:
+//
+// requireProfile()
+// profileRequires()
+// requireStage()
+// checkDeprecated()
+// requireNotRemoved()
+// requireExtensions()
+//
+// Typically, only the first two calls are needed. They go into a code path that
+// implements Feature F, and will log the proper error/warning messages. Parsing
+// will then always continue as if the tested feature was enabled.
+//
+// There is typically no if-testing or conditional parsing, just insertion of the calls above.
+// However, if symbols specific to the extension are added (step 5), they will
+// only be added under tests that the minimum version and profile are present.
+//
+// 1) Add a symbol name for the extension string at the bottom of Versions.h:
+//
+// const char* const XXX_extension_X = "XXX_extension_X";
+//
+// 2) Add extension initialization to TParseVersions::initializeExtensionBehavior(),
+// the first function below:
+//
+// extensionBehavior[XXX_extension_X] = EBhDisable;
+//
+// 3) Add any preprocessor directives etc. in the next function, TParseVersions::getPreamble():
+//
+// "#define XXX_extension_X 1\n"
+//
+// The new-line is important, as that ends preprocess tokens.
+//
+// 4) Insert a profile check in the feature's path (unless all profiles support the feature,
+// for some version level). That is, call requireProfile() to constrain the profiles, e.g.:
+//
+// // ... in a path specific to Feature F...
+// requireProfile(loc,
+// ECoreProfile | ECompatibilityProfile,
+// "Feature F");
+//
+// 5) For each profile that supports the feature, insert version/extension checks:
+//
+// The mostly likely scenario is that Feature F can only be used with a
+// particular profile if XXX_extension_X is present or the version is
+// high enough that the core specification already incorporated it.
+//
+// // following the requireProfile() call...
+// profileRequires(loc,
+// ECoreProfile | ECompatibilityProfile,
+// 420, // 0 if no version incorporated the feature into the core spec.
+// XXX_extension_X, // can be a list of extensions that all add the feature
+// "Feature F Description");
+//
+// This allows the feature if either A) one of the extensions is enabled or
+// B) the version is high enough. If no version yet incorporates the feature
+// into core, pass in 0.
+//
+// This can be called multiple times, if different profiles support the
+// feature starting at different version numbers or with different
+// extensions.
+//
+// This must be called for each profile allowed by the initial call to requireProfile().
+//
+// Profiles are all masks, which can be "or"-ed together.
+//
+// ENoProfile
+// ECoreProfile
+// ECompatibilityProfile
+// EEsProfile
+//
+// The ENoProfile profile is only for desktop, before profiles showed up in version 150;
+// All other #version with no profile default to either es or core, and so have profiles.
+//
+// You can select all but a particular profile using ~. The following basically means "desktop":
+//
+// ~EEsProfile
+//
+// 6) If built-in symbols are added by the extension, add them in Initialize.cpp: Their use
+// will be automatically error checked against the extensions enabled at that moment.
+// see the comment at the top of Initialize.cpp for where to put them. Establish them at
+// the earliest release that supports the extension. Then, tag them with the
+// set of extensions that both enable them and are necessary, given the version of the symbol
+// table. (There is a different symbol table for each version.)
+//
+
+#include "parseVersions.h"
+#include "localintermediate.h"
+
+namespace glslang {
+
+//
+// Initialize all extensions, almost always to 'disable', as once their features
+// are incorporated into a core version, their features are supported through allowing that
+// core version, not through a pseudo-enablement of the extension.
+//
+void TParseVersions::initializeExtensionBehavior()
+{
+ extensionBehavior[E_GL_OES_texture_3D] = EBhDisable;
+ extensionBehavior[E_GL_OES_standard_derivatives] = EBhDisable;
+ extensionBehavior[E_GL_EXT_frag_depth] = EBhDisable;
+ extensionBehavior[E_GL_OES_EGL_image_external] = EBhDisable;
+ extensionBehavior[E_GL_OES_EGL_image_external_essl3] = EBhDisable;
+ extensionBehavior[E_GL_EXT_YUV_target] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_texture_lod] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shadow_samplers] = EBhDisable;
+ extensionBehavior[E_GL_ARB_texture_rectangle] = EBhDisable;
+ extensionBehavior[E_GL_3DL_array_objects] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shading_language_420pack] = EBhDisable;
+ extensionBehavior[E_GL_ARB_texture_gather] = EBhDisable;
+ extensionBehavior[E_GL_ARB_gpu_shader5] = EBhDisablePartial;
+ extensionBehavior[E_GL_ARB_separate_shader_objects] = EBhDisable;
+ extensionBehavior[E_GL_ARB_compute_shader] = EBhDisable;
+ extensionBehavior[E_GL_ARB_tessellation_shader] = EBhDisable;
+ extensionBehavior[E_GL_ARB_enhanced_layouts] = EBhDisable;
+ extensionBehavior[E_GL_ARB_texture_cube_map_array] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_texture_lod] = EBhDisable;
+ extensionBehavior[E_GL_ARB_explicit_attrib_location] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_image_load_store] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_atomic_counters] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_draw_parameters] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_group_vote] = EBhDisable;
+ extensionBehavior[E_GL_ARB_derivative_control] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_texture_image_samples] = EBhDisable;
+ extensionBehavior[E_GL_ARB_viewport_array] = EBhDisable;
+ extensionBehavior[E_GL_ARB_gpu_shader_int64] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_ballot] = EBhDisable;
+ extensionBehavior[E_GL_ARB_sparse_texture2] = EBhDisable;
+ extensionBehavior[E_GL_ARB_sparse_texture_clamp] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_stencil_export] = EBhDisable;
+// extensionBehavior[E_GL_ARB_cull_distance] = EBhDisable; // present for 4.5, but need extension control over block members
+ extensionBehavior[E_GL_ARB_post_depth_coverage] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_viewport_layer_array] = EBhDisable;
+
+ extensionBehavior[E_GL_KHR_shader_subgroup_basic] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_vote] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_arithmetic] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_ballot] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_shuffle] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_shuffle_relative] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_clustered] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_quad] = EBhDisable;
+ extensionBehavior[E_GL_KHR_memory_scope_semantics] = EBhDisable;
+
+ extensionBehavior[E_GL_EXT_shader_atomic_int64] = EBhDisable;
+
+ extensionBehavior[E_GL_EXT_shader_non_constant_global_initializers] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_image_load_formatted] = EBhDisable;
+ extensionBehavior[E_GL_EXT_post_depth_coverage] = EBhDisable;
+ extensionBehavior[E_GL_EXT_control_flow_attributes] = EBhDisable;
+ extensionBehavior[E_GL_EXT_nonuniform_qualifier] = EBhDisable;
+ extensionBehavior[E_GL_EXT_samplerless_texture_functions] = EBhDisable;
+ extensionBehavior[E_GL_EXT_scalar_block_layout] = EBhDisable;
+ extensionBehavior[E_GL_EXT_fragment_invocation_density] = EBhDisable;
+ extensionBehavior[E_GL_EXT_buffer_reference] = EBhDisable;
+ extensionBehavior[E_GL_EXT_buffer_reference2] = EBhDisable;
+
+ extensionBehavior[E_GL_EXT_shader_16bit_storage] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_8bit_storage] = EBhDisable;
+
+ // #line and #include
+ extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhDisable;
+ extensionBehavior[E_GL_GOOGLE_include_directive] = EBhDisable;
+
+#ifdef AMD_EXTENSIONS
+ extensionBehavior[E_GL_AMD_shader_ballot] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gcn_shader] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gpu_shader_half_float] = EBhDisable;
+ extensionBehavior[E_GL_AMD_texture_gather_bias_lod] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gpu_shader_int16] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_image_load_store_lod] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_fragment_mask] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gpu_shader_half_float_fetch] = EBhDisable;
+#endif
+
+#ifdef NV_EXTENSIONS
+ extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable;
+ extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable;
+ extensionBehavior[E_GL_NV_viewport_array2] = EBhDisable;
+ extensionBehavior[E_GL_NV_stereo_view_rendering] = EBhDisable;
+ extensionBehavior[E_GL_NVX_multiview_per_view_attributes] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_atomic_int64] = EBhDisable;
+ extensionBehavior[E_GL_NV_conservative_raster_underestimation] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_noperspective_interpolation] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_subgroup_partitioned] = EBhDisable;
+ extensionBehavior[E_GL_NV_shading_rate_image] = EBhDisable;
+ extensionBehavior[E_GL_NV_ray_tracing] = EBhDisable;
+ extensionBehavior[E_GL_NV_fragment_shader_barycentric] = EBhDisable;
+ extensionBehavior[E_GL_NV_compute_shader_derivatives] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_texture_footprint] = EBhDisable;
+ extensionBehavior[E_GL_NV_mesh_shader] = EBhDisable;
+#endif
+
+ extensionBehavior[E_GL_NV_cooperative_matrix] = EBhDisable;
+
+ // AEP
+ extensionBehavior[E_GL_ANDROID_extension_pack_es31a] = EBhDisable;
+ extensionBehavior[E_GL_KHR_blend_equation_advanced] = EBhDisable;
+ extensionBehavior[E_GL_OES_sample_variables] = EBhDisable;
+ extensionBehavior[E_GL_OES_shader_image_atomic] = EBhDisable;
+ extensionBehavior[E_GL_OES_shader_multisample_interpolation] = EBhDisable;
+ extensionBehavior[E_GL_OES_texture_storage_multisample_2d_array] = EBhDisable;
+ extensionBehavior[E_GL_EXT_geometry_shader] = EBhDisable;
+ extensionBehavior[E_GL_EXT_geometry_point_size] = EBhDisable;
+ extensionBehavior[E_GL_EXT_gpu_shader5] = EBhDisable;
+ extensionBehavior[E_GL_EXT_primitive_bounding_box] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_io_blocks] = EBhDisable;
+ extensionBehavior[E_GL_EXT_tessellation_shader] = EBhDisable;
+ extensionBehavior[E_GL_EXT_tessellation_point_size] = EBhDisable;
+ extensionBehavior[E_GL_EXT_texture_buffer] = EBhDisable;
+ extensionBehavior[E_GL_EXT_texture_cube_map_array] = EBhDisable;
+
+ // OES matching AEP
+ extensionBehavior[E_GL_OES_geometry_shader] = EBhDisable;
+ extensionBehavior[E_GL_OES_geometry_point_size] = EBhDisable;
+ extensionBehavior[E_GL_OES_gpu_shader5] = EBhDisable;
+ extensionBehavior[E_GL_OES_primitive_bounding_box] = EBhDisable;
+ extensionBehavior[E_GL_OES_shader_io_blocks] = EBhDisable;
+ extensionBehavior[E_GL_OES_tessellation_shader] = EBhDisable;
+ extensionBehavior[E_GL_OES_tessellation_point_size] = EBhDisable;
+ extensionBehavior[E_GL_OES_texture_buffer] = EBhDisable;
+ extensionBehavior[E_GL_OES_texture_cube_map_array] = EBhDisable;
+
+ // EXT extensions
+ extensionBehavior[E_GL_EXT_device_group] = EBhDisable;
+ extensionBehavior[E_GL_EXT_multiview] = EBhDisable;
+
+ // OVR extensions
+ extensionBehavior[E_GL_OVR_multiview] = EBhDisable;
+ extensionBehavior[E_GL_OVR_multiview2] = EBhDisable;
+
+ // explicit types
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int8] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int16] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int32] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int64] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float16] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float32] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float64] = EBhDisable;
+}
+
+// Get code that is not part of a shared symbol table, is specific to this shader,
+// or needed by the preprocessor (which does not use a shared symbol table).
+void TParseVersions::getPreamble(std::string& preamble)
+{
+ if (profile == EEsProfile) {
+ preamble =
+ "#define GL_ES 1\n"
+ "#define GL_FRAGMENT_PRECISION_HIGH 1\n"
+ "#define GL_OES_texture_3D 1\n"
+ "#define GL_OES_standard_derivatives 1\n"
+ "#define GL_EXT_frag_depth 1\n"
+ "#define GL_OES_EGL_image_external 1\n"
+ "#define GL_OES_EGL_image_external_essl3 1\n"
+ "#define GL_EXT_YUV_target 1\n"
+ "#define GL_EXT_shader_texture_lod 1\n"
+ "#define GL_EXT_shadow_samplers 1\n"
+
+ // AEP
+ "#define GL_ANDROID_extension_pack_es31a 1\n"
+ "#define GL_KHR_blend_equation_advanced 1\n"
+ "#define GL_OES_sample_variables 1\n"
+ "#define GL_OES_shader_image_atomic 1\n"
+ "#define GL_OES_shader_multisample_interpolation 1\n"
+ "#define GL_OES_texture_storage_multisample_2d_array 1\n"
+ "#define GL_EXT_geometry_shader 1\n"
+ "#define GL_EXT_geometry_point_size 1\n"
+ "#define GL_EXT_gpu_shader5 1\n"
+ "#define GL_EXT_primitive_bounding_box 1\n"
+ "#define GL_EXT_shader_io_blocks 1\n"
+ "#define GL_EXT_tessellation_shader 1\n"
+ "#define GL_EXT_tessellation_point_size 1\n"
+ "#define GL_EXT_texture_buffer 1\n"
+ "#define GL_EXT_texture_cube_map_array 1\n"
+
+ // OES matching AEP
+ "#define GL_OES_geometry_shader 1\n"
+ "#define GL_OES_geometry_point_size 1\n"
+ "#define GL_OES_gpu_shader5 1\n"
+ "#define GL_OES_primitive_bounding_box 1\n"
+ "#define GL_OES_shader_io_blocks 1\n"
+ "#define GL_OES_tessellation_shader 1\n"
+ "#define GL_OES_tessellation_point_size 1\n"
+ "#define GL_OES_texture_buffer 1\n"
+ "#define GL_OES_texture_cube_map_array 1\n"
+ "#define GL_EXT_shader_non_constant_global_initializers 1\n"
+ ;
+
+#ifdef NV_EXTENSIONS
+ if (profile == EEsProfile && version >= 300) {
+ preamble += "#define GL_NV_shader_noperspective_interpolation 1\n";
+ }
+#endif
+
+ } else {
+ preamble =
+ "#define GL_FRAGMENT_PRECISION_HIGH 1\n"
+ "#define GL_ARB_texture_rectangle 1\n"
+ "#define GL_ARB_shading_language_420pack 1\n"
+ "#define GL_ARB_texture_gather 1\n"
+ "#define GL_ARB_gpu_shader5 1\n"
+ "#define GL_ARB_separate_shader_objects 1\n"
+ "#define GL_ARB_compute_shader 1\n"
+ "#define GL_ARB_tessellation_shader 1\n"
+ "#define GL_ARB_enhanced_layouts 1\n"
+ "#define GL_ARB_texture_cube_map_array 1\n"
+ "#define GL_ARB_shader_texture_lod 1\n"
+ "#define GL_ARB_explicit_attrib_location 1\n"
+ "#define GL_ARB_shader_image_load_store 1\n"
+ "#define GL_ARB_shader_atomic_counters 1\n"
+ "#define GL_ARB_shader_draw_parameters 1\n"
+ "#define GL_ARB_shader_group_vote 1\n"
+ "#define GL_ARB_derivative_control 1\n"
+ "#define GL_ARB_shader_texture_image_samples 1\n"
+ "#define GL_ARB_viewport_array 1\n"
+ "#define GL_ARB_gpu_shader_int64 1\n"
+ "#define GL_ARB_shader_ballot 1\n"
+ "#define GL_ARB_sparse_texture2 1\n"
+ "#define GL_ARB_sparse_texture_clamp 1\n"
+ "#define GL_ARB_shader_stencil_export 1\n"
+// "#define GL_ARB_cull_distance 1\n" // present for 4.5, but need extension control over block members
+ "#define GL_ARB_post_depth_coverage 1\n"
+ "#define GL_EXT_shader_non_constant_global_initializers 1\n"
+ "#define GL_EXT_shader_image_load_formatted 1\n"
+ "#define GL_EXT_post_depth_coverage 1\n"
+ "#define GL_EXT_control_flow_attributes 1\n"
+ "#define GL_EXT_nonuniform_qualifier 1\n"
+ "#define GL_EXT_shader_16bit_storage 1\n"
+ "#define GL_EXT_shader_8bit_storage 1\n"
+ "#define GL_EXT_samplerless_texture_functions 1\n"
+ "#define GL_EXT_scalar_block_layout 1\n"
+ "#define GL_EXT_fragment_invocation_density 1\n"
+ "#define GL_EXT_buffer_reference 1\n"
+ "#define GL_EXT_buffer_reference2 1\n"
+
+ // GL_KHR_shader_subgroup
+ "#define GL_KHR_shader_subgroup_basic 1\n"
+ "#define GL_KHR_shader_subgroup_vote 1\n"
+ "#define GL_KHR_shader_subgroup_arithmetic 1\n"
+ "#define GL_KHR_shader_subgroup_ballot 1\n"
+ "#define GL_KHR_shader_subgroup_shuffle 1\n"
+ "#define GL_KHR_shader_subgroup_shuffle_relative 1\n"
+ "#define GL_KHR_shader_subgroup_clustered 1\n"
+ "#define GL_KHR_shader_subgroup_quad 1\n"
+
+ "#define E_GL_EXT_shader_atomic_int64 1\n"
+
+#ifdef AMD_EXTENSIONS
+ "#define GL_AMD_shader_ballot 1\n"
+ "#define GL_AMD_shader_trinary_minmax 1\n"
+ "#define GL_AMD_shader_explicit_vertex_parameter 1\n"
+ "#define GL_AMD_gcn_shader 1\n"
+ "#define GL_AMD_gpu_shader_half_float 1\n"
+ "#define GL_AMD_texture_gather_bias_lod 1\n"
+ "#define GL_AMD_gpu_shader_int16 1\n"
+ "#define GL_AMD_shader_image_load_store_lod 1\n"
+ "#define GL_AMD_shader_fragment_mask 1\n"
+ "#define GL_AMD_gpu_shader_half_float_fetch 1\n"
+#endif
+
+#ifdef NV_EXTENSIONS
+ "#define GL_NV_sample_mask_override_coverage 1\n"
+ "#define GL_NV_geometry_shader_passthrough 1\n"
+ "#define GL_NV_viewport_array2 1\n"
+ "#define GL_NV_shader_atomic_int64 1\n"
+ "#define GL_NV_conservative_raster_underestimation 1\n"
+ "#define GL_NV_shader_subgroup_partitioned 1\n"
+ "#define GL_NV_shading_rate_image 1\n"
+ "#define GL_NV_ray_tracing 1\n"
+ "#define GL_NV_fragment_shader_barycentric 1\n"
+ "#define GL_NV_compute_shader_derivatives 1\n"
+ "#define GL_NV_shader_texture_footprint 1\n"
+ "#define GL_NV_mesh_shader 1\n"
+#endif
+ "#define GL_NV_cooperative_matrix 1\n"
+
+ "#define GL_EXT_shader_explicit_arithmetic_types 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int8 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int16 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int32 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int64 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_float16 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_float32 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_float64 1\n"
+ ;
+
+ if (version >= 150) {
+ // define GL_core_profile and GL_compatibility_profile
+ preamble += "#define GL_core_profile 1\n";
+
+ if (profile == ECompatibilityProfile)
+ preamble += "#define GL_compatibility_profile 1\n";
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ preamble +=
+ "#define GL_EXT_device_group 1\n"
+ "#define GL_EXT_multiview 1\n"
+ ;
+ }
+
+ if (version >= 300 /* both ES and non-ES */) {
+ preamble +=
+ "#define GL_OVR_multiview 1\n"
+ "#define GL_OVR_multiview2 1\n"
+ ;
+ }
+
+ // #line and #include
+ preamble +=
+ "#define GL_GOOGLE_cpp_style_line_directive 1\n"
+ "#define GL_GOOGLE_include_directive 1\n"
+ ;
+
+ // #define VULKAN XXXX
+ const int numberBufSize = 12;
+ char numberBuf[numberBufSize];
+ if (spvVersion.vulkanGlsl > 0) {
+ preamble += "#define VULKAN ";
+ snprintf(numberBuf, numberBufSize, "%d", spvVersion.vulkanGlsl);
+ preamble += numberBuf;
+ preamble += "\n";
+ }
+ // #define GL_SPIRV XXXX
+ if (spvVersion.openGl > 0) {
+ preamble += "#define GL_SPIRV ";
+ snprintf(numberBuf, numberBufSize, "%d", spvVersion.openGl);
+ preamble += numberBuf;
+ preamble += "\n";
+ }
+
+}
+
+//
+// When to use requireProfile():
+//
+// Use if only some profiles support a feature. However, if within a profile the feature
+// is version or extension specific, follow this call with calls to profileRequires().
+//
+// Operation: If the current profile is not one of the profileMask,
+// give an error message.
+//
+void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
+{
+ if (! (profile & profileMask))
+ error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
+}
+
+//
+// Map from stage enum to externally readable text name.
+//
+const char* StageName(EShLanguage stage)
+{
+ switch(stage) {
+ case EShLangVertex: return "vertex";
+ case EShLangTessControl: return "tessellation control";
+ case EShLangTessEvaluation: return "tessellation evaluation";
+ case EShLangGeometry: return "geometry";
+ case EShLangFragment: return "fragment";
+ case EShLangCompute: return "compute";
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV: return "ray-generation";
+ case EShLangIntersectNV: return "intersection";
+ case EShLangAnyHitNV: return "any-hit";
+ case EShLangClosestHitNV: return "closest-hit";
+ case EShLangMissNV: return "miss";
+ case EShLangCallableNV: return "callable";
+ case EShLangMeshNV: return "mesh";
+ case EShLangTaskNV: return "task";
+#endif
+ default: return "unknown stage";
+ }
+}
+
+//
+// When to use profileRequires():
+//
+// If a set of profiles have the same requirements for what version or extensions
+// are needed to support a feature.
+//
+// It must be called for each profile that needs protection. Use requireProfile() first
+// to reduce that set of profiles.
+//
+// Operation: Will issue warnings/errors based on the current profile, version, and extension
+// behaviors. It only checks extensions when the current profile is one of the profileMask.
+//
+// A minVersion of 0 means no version of the profileMask support this in core,
+// the extension must be present.
+//
+
+// entry point that takes multiple extensions
+void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ if (profile & profileMask) {
+ bool okay = false;
+ if (minVersion > 0 && version >= minVersion)
+ okay = true;
+ for (int i = 0; i < numExtensions; ++i) {
+ switch (getExtensionBehavior(extensions[i])) {
+ case EBhWarn:
+ infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
+ // fall through
+ case EBhRequire:
+ case EBhEnable:
+ okay = true;
+ break;
+ default: break; // some compilers want this
+ }
+ }
+
+ if (! okay)
+ error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
+ }
+}
+
+// entry point for the above that takes a single extension
+void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, const char* featureDesc)
+{
+ profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
+}
+
+//
+// When to use requireStage()
+//
+// If only some stages support a feature.
+//
+// Operation: If the current stage is not present, give an error message.
+//
+void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguageMask languageMask, const char* featureDesc)
+{
+ if (((1 << language) & languageMask) == 0)
+ error(loc, "not supported in this stage:", featureDesc, StageName(language));
+}
+
+// If only one stage supports a feature, this can be called. But, all supporting stages
+// must be specified with one call.
+void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguage stage, const char* featureDesc)
+{
+ requireStage(loc, static_cast<EShLanguageMask>(1 << stage), featureDesc);
+}
+
+//
+// Within a set of profiles, see if a feature is deprecated and give an error or warning based on whether
+// a future compatibility context is being use.
+//
+void TParseVersions::checkDeprecated(const TSourceLoc& loc, int profileMask, int depVersion, const char* featureDesc)
+{
+ if (profile & profileMask) {
+ if (version >= depVersion) {
+ if (forwardCompatible)
+ error(loc, "deprecated, may be removed in future release", featureDesc, "");
+ else if (! suppressWarnings())
+ infoSink.info.message(EPrefixWarning, (TString(featureDesc) + " deprecated in version " +
+ String(depVersion) + "; may be removed in future release").c_str(), loc);
+ }
+ }
+}
+
+//
+// Within a set of profiles, see if a feature has now been removed and if so, give an error.
+// The version argument is the first version no longer having the feature.
+//
+void TParseVersions::requireNotRemoved(const TSourceLoc& loc, int profileMask, int removedVersion, const char* featureDesc)
+{
+ if (profile & profileMask) {
+ if (version >= removedVersion) {
+ const int maxSize = 60;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%s profile; removed in version %d", ProfileName(profile), removedVersion);
+ error(loc, "no longer supported in", featureDesc, buf);
+ }
+ }
+}
+
+void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
+{
+ error(loc, "feature not yet implemented", featureDesc, "");
+}
+
+// Returns true if at least one of the extensions in the extensions parameter is requested. Otherwise, returns false.
+// Warns appropriately if the requested behavior of an extension is "warn".
+bool TParseVersions::checkExtensionsRequested(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ // First, see if any of the extensions are enabled
+ for (int i = 0; i < numExtensions; ++i) {
+ TExtensionBehavior behavior = getExtensionBehavior(extensions[i]);
+ if (behavior == EBhEnable || behavior == EBhRequire)
+ return true;
+ }
+
+ // See if any extensions want to give a warning on use; give warnings for all such extensions
+ bool warned = false;
+ for (int i = 0; i < numExtensions; ++i) {
+ TExtensionBehavior behavior = getExtensionBehavior(extensions[i]);
+ if (behavior == EBhDisable && relaxedErrors()) {
+ infoSink.info.message(EPrefixWarning, "The following extension must be enabled to use this feature:", loc);
+ behavior = EBhWarn;
+ }
+ if (behavior == EBhWarn) {
+ infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
+ warned = true;
+ }
+ }
+ if (warned)
+ return true;
+ return false;
+}
+
+//
+// Use when there are no profile/version to check, it's just an error if one of the
+// extensions is not present.
+//
+void TParseVersions::requireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc))
+ return;
+
+ // If we get this far, give errors explaining what extensions are needed
+ if (numExtensions == 1)
+ error(loc, "required extension not requested:", featureDesc, extensions[0]);
+ else {
+ error(loc, "required extension not requested:", featureDesc, "Possible extensions include:");
+ for (int i = 0; i < numExtensions; ++i)
+ infoSink.info.message(EPrefixNone, extensions[i]);
+ }
+}
+
+//
+// Use by preprocessor when there are no profile/version to check, it's just an error if one of the
+// extensions is not present.
+//
+void TParseVersions::ppRequireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc))
+ return;
+
+ // If we get this far, give errors explaining what extensions are needed
+ if (numExtensions == 1)
+ ppError(loc, "required extension not requested:", featureDesc, extensions[0]);
+ else {
+ ppError(loc, "required extension not requested:", featureDesc, "Possible extensions include:");
+ for (int i = 0; i < numExtensions; ++i)
+ infoSink.info.message(EPrefixNone, extensions[i]);
+ }
+}
+
+TExtensionBehavior TParseVersions::getExtensionBehavior(const char* extension)
+{
+ auto iter = extensionBehavior.find(TString(extension));
+ if (iter == extensionBehavior.end())
+ return EBhMissing;
+ else
+ return iter->second;
+}
+
+// Returns true if the given extension is set to enable, require, or warn.
+bool TParseVersions::extensionTurnedOn(const char* const extension)
+{
+ switch (getExtensionBehavior(extension)) {
+ case EBhEnable:
+ case EBhRequire:
+ case EBhWarn:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+// See if any of the extensions are set to enable, require, or warn.
+bool TParseVersions::extensionsTurnedOn(int numExtensions, const char* const extensions[])
+{
+ for (int i = 0; i < numExtensions; ++i) {
+ if (extensionTurnedOn(extensions[i]))
+ return true;
+ }
+ return false;
+}
+
+//
+// Change the current state of an extension's behavior.
+//
+void TParseVersions::updateExtensionBehavior(int line, const char* extension, const char* behaviorString)
+{
+ // Translate from text string of extension's behavior to an enum.
+ TExtensionBehavior behavior = EBhDisable;
+ if (! strcmp("require", behaviorString))
+ behavior = EBhRequire;
+ else if (! strcmp("enable", behaviorString))
+ behavior = EBhEnable;
+ else if (! strcmp("disable", behaviorString))
+ behavior = EBhDisable;
+ else if (! strcmp("warn", behaviorString))
+ behavior = EBhWarn;
+ else {
+ error(getCurrentLoc(), "behavior not supported:", "#extension", behaviorString);
+ return;
+ }
+
+ // check if extension is used with correct shader stage
+ checkExtensionStage(getCurrentLoc(), extension);
+
+ // update the requested extension
+ updateExtensionBehavior(extension, behavior);
+
+ // see if need to propagate to implicitly modified things
+ if (strcmp(extension, "GL_ANDROID_extension_pack_es31a") == 0) {
+ // to everything in AEP
+ updateExtensionBehavior(line, "GL_KHR_blend_equation_advanced", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_sample_variables", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_shader_image_atomic", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_shader_multisample_interpolation", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_texture_storage_multisample_2d_array", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_geometry_shader", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_gpu_shader5", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_primitive_bounding_box", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_tessellation_shader", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_texture_buffer", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_texture_cube_map_array", behaviorString);
+ }
+ // geometry to io_blocks
+ else if (strcmp(extension, "GL_EXT_geometry_shader") == 0)
+ updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString);
+ else if (strcmp(extension, "GL_OES_geometry_shader") == 0)
+ updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString);
+ // tessellation to io_blocks
+ else if (strcmp(extension, "GL_EXT_tessellation_shader") == 0)
+ updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString);
+ else if (strcmp(extension, "GL_OES_tessellation_shader") == 0)
+ updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString);
+ else if (strcmp(extension, "GL_GOOGLE_include_directive") == 0)
+ updateExtensionBehavior(line, "GL_GOOGLE_cpp_style_line_directive", behaviorString);
+ // subgroup_* to subgroup_basic
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_vote") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_arithmetic") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_ballot") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle_relative") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_clustered") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_quad") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+#ifdef NV_EXTENSIONS
+ else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+#endif
+ else if (strcmp(extension, "GL_EXT_buffer_reference2") == 0)
+ updateExtensionBehavior(line, "GL_EXT_buffer_reference", behaviorString);
+}
+
+void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBehavior behavior)
+{
+ // Update the current behavior
+ if (strcmp(extension, "all") == 0) {
+ // special case for the 'all' extension; apply it to every extension present
+ if (behavior == EBhRequire || behavior == EBhEnable) {
+ error(getCurrentLoc(), "extension 'all' cannot have 'require' or 'enable' behavior", "#extension", "");
+ return;
+ } else {
+ for (auto iter = extensionBehavior.begin(); iter != extensionBehavior.end(); ++iter)
+ iter->second = behavior;
+ }
+ } else {
+ // Do the update for this single extension
+ auto iter = extensionBehavior.find(TString(extension));
+ if (iter == extensionBehavior.end()) {
+ switch (behavior) {
+ case EBhRequire:
+ error(getCurrentLoc(), "extension not supported:", "#extension", extension);
+ break;
+ case EBhEnable:
+ case EBhWarn:
+ case EBhDisable:
+ warn(getCurrentLoc(), "extension not supported:", "#extension", extension);
+ break;
+ default:
+ assert(0 && "unexpected behavior");
+ }
+
+ return;
+ } else {
+ if (iter->second == EBhDisablePartial)
+ warn(getCurrentLoc(), "extension is only partially supported:", "#extension", extension);
+ if (behavior == EBhEnable || behavior == EBhRequire)
+ intermediate.addRequestedExtension(extension);
+ iter->second = behavior;
+ }
+ }
+}
+
+// Check if extension is used with correct shader stage.
+void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * const extension)
+{
+#ifdef NV_EXTENSIONS
+ // GL_NV_mesh_shader extension is only allowed in task/mesh shaders
+ if (strcmp(extension, "GL_NV_mesh_shader") == 0) {
+ requireStage(loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask | EShLangFragmentMask),
+ "#extension GL_NV_mesh_shader");
+ profileRequires(loc, ECoreProfile, 450, 0, "#extension GL_NV_mesh_shader");
+ profileRequires(loc, EEsProfile, 320, 0, "#extension GL_NV_mesh_shader");
+ }
+#endif
+}
+
+// Call for any operation needing full GLSL integer data-type support.
+void TParseVersions::fullIntegerCheck(const TSourceLoc& loc, const char* op)
+{
+ profileRequires(loc, ENoProfile, 130, nullptr, op);
+ profileRequires(loc, EEsProfile, 300, nullptr, op);
+}
+
+// Call for any operation needing GLSL double data-type support.
+void TParseVersions::doubleCheck(const TSourceLoc& loc, const char* op)
+{
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+}
+
+// Call for any operation needing GLSL float16 data-type support.
+void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+bool TParseVersions::float16Arithmetic()
+{
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
+}
+
+bool TParseVersions::int16Arithmetic()
+{
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
+}
+
+bool TParseVersions::int8Arithmetic()
+{
+ const char* const extensions[] = {
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
+}
+
+void TParseVersions::requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
+{
+ TString combined;
+ combined = op;
+ combined += ": ";
+ combined += featureDesc;
+
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
+}
+
+void TParseVersions::requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
+{
+ TString combined;
+ combined = op;
+ combined += ": ";
+ combined += featureDesc;
+
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
+}
+
+void TParseVersions::requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
+{
+ TString combined;
+ combined = op;
+ combined += ": ";
+ combined += featureDesc;
+
+ const char* const extensions[] = {
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
+}
+
+void TParseVersions::float16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_16bit_storage,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL float32 data-type support.
+void TParseVersions::explicitFloat32Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float32};
+ requireExtensions(loc, 2, extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL float64 data-type support.
+void TParseVersions::explicitFloat64Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float64};
+ requireExtensions(loc, 2, extensions, op);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+ }
+}
+
+// Call for any operation needing GLSL explicit int8 data-type support.
+void TParseVersions::explicitInt8Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ requireExtensions(loc, 2, extensions, op);
+ }
+}
+
+#ifdef AMD_EXTENSIONS
+// Call for any operation needing GLSL float16 opaque-type support
+void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ requireExtensions(loc, 1, &E_GL_AMD_gpu_shader_half_float_fetch, op);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+ }
+}
+#endif
+
+// Call for any operation needing GLSL explicit int16 data-type support.
+void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+void TParseVersions::int16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_16bit_storage,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+void TParseVersions::int8ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[] = {
+ E_GL_EXT_shader_8bit_storage,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL explicit int32 data-type support.
+void TParseVersions::explicitInt32Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int32};
+ requireExtensions(loc, 2, extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL 64-bit integer data-type support.
+void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[3] = {E_GL_ARB_gpu_shader_int64,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int64};
+ requireExtensions(loc, 3, extensions, op);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+ }
+}
+
+void TParseVersions::fcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[] = {E_GL_NV_cooperative_matrix};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+// Call for any operation removed because SPIR-V is in use.
+void TParseVersions::spvRemoved(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.spv != 0)
+ error(loc, "not allowed when generating SPIR-V", op, "");
+}
+
+// Call for any operation removed because Vulkan SPIR-V is being generated.
+void TParseVersions::vulkanRemoved(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.vulkan > 0)
+ error(loc, "not allowed when using GLSL for Vulkan", op, "");
+}
+
+// Call for any operation that requires Vulkan.
+void TParseVersions::requireVulkan(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.vulkan == 0)
+ error(loc, "only allowed when using GLSL for Vulkan", op, "");
+}
+
+// Call for any operation that requires SPIR-V.
+void TParseVersions::requireSpv(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.spv == 0)
+ error(loc, "only allowed when generating SPIR-V", op, "");
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/Versions.h b/thirdparty/glslang/glslang/MachineIndependent/Versions.h
new file mode 100644
index 0000000000..bff082709f
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/Versions.h
@@ -0,0 +1,300 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef _VERSIONS_INCLUDED_
+#define _VERSIONS_INCLUDED_
+
+//
+// Help manage multiple profiles, versions, extensions etc.
+//
+
+//
+// Profiles are set up for masking operations, so queries can be done on multiple
+// profiles at the same time.
+//
+// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible
+// defects from mixing the two different forms.
+//
+typedef enum {
+ EBadProfile = 0,
+ ENoProfile = (1 << 0), // only for desktop, before profiles showed up
+ ECoreProfile = (1 << 1),
+ ECompatibilityProfile = (1 << 2),
+ EEsProfile = (1 << 3)
+} EProfile;
+
+namespace glslang {
+
+//
+// Map from profile enum to externally readable text name.
+//
+inline const char* ProfileName(EProfile profile)
+{
+ switch (profile) {
+ case ENoProfile: return "none";
+ case ECoreProfile: return "core";
+ case ECompatibilityProfile: return "compatibility";
+ case EEsProfile: return "es";
+ default: return "unknown profile";
+ }
+}
+
+//
+// What source rules, validation rules, target language, etc. are needed or
+// desired for SPIR-V?
+//
+// 0 means a target or rule set is not enabled (ignore rules from that entity).
+// Non-0 means to apply semantic rules arising from that version of its rule set.
+// The union of all requested rule sets will be applied.
+//
+struct SpvVersion {
+ SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0) {}
+ unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header
+ int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX"
+ int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use
+ int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX"
+};
+
+//
+// The behaviors from the GLSL "#extension extension_name : behavior"
+//
+typedef enum {
+ EBhMissing = 0,
+ EBhRequire,
+ EBhEnable,
+ EBhWarn,
+ EBhDisable,
+ EBhDisablePartial // use as initial state of an extension that is only partially implemented
+} TExtensionBehavior;
+
+//
+// Symbolic names for extensions. Strings may be directly used when calling the
+// functions, but better to have the compiler do spelling checks.
+//
+const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D";
+const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives";
+const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth";
+const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external";
+const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3";
+const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target";
+const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod";
+const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers";
+
+const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle";
+const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects";
+const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack";
+const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather";
+const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5";
+const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects";
+const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader";
+const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
+const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
+const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
+const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
+const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
+const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
+const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
+const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
+const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote";
+const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control";
+const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples";
+const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array";
+const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64";
+const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot";
+const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2";
+const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp";
+const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export";
+// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
+const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
+const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
+
+const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
+const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
+const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic";
+const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot";
+const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle";
+const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative";
+const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered";
+const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad";
+const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics";
+
+const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64";
+
+const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers";
+const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted";
+
+const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage";
+const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage";
+
+
+// EXT extensions
+const char* const E_GL_EXT_device_group = "GL_EXT_device_group";
+const char* const E_GL_EXT_multiview = "GL_EXT_multiview";
+const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage";
+const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes";
+const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier";
+const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions";
+const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout";
+const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
+const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
+const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2";
+
+// Arrays of extensions for the above viewportEXTs duplications
+
+const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage };
+const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]);
+
+// OVR extensions
+const char* const E_GL_OVR_multiview = "GL_OVR_multiview";
+const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2";
+
+const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 };
+const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]);
+
+// #line and #include
+const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
+const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
+
+#ifdef AMD_EXTENSIONS
+const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
+const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
+const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
+const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader";
+const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float";
+const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod";
+const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16";
+const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
+const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
+const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
+#endif
+
+#ifdef NV_EXTENSIONS
+
+const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
+const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
+const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2";
+const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering";
+const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes";
+const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64";
+const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation";
+const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation";
+const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned";
+const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image";
+const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing";
+const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric";
+const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives";
+const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint";
+const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader";
+
+// Arrays of extensions for the above viewportEXTs duplications
+
+const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
+const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
+#endif
+
+const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
+
+// AEP
+const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
+const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced";
+const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables";
+const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic";
+const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation";
+const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array";
+const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader";
+const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size";
+const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5";
+const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box";
+const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks";
+const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader";
+const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size";
+const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer";
+const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array";
+
+// OES matching AEP
+const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader";
+const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size";
+const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5";
+const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box";
+const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks";
+const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader";
+const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size";
+const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
+const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
+
+// KHX
+const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64";
+
+// Arrays of extensions for the above AEP duplications
+
+const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader };
+const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]);
+
+const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size };
+const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]);
+
+const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 };
+const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]);
+
+const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box };
+const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]);
+
+const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks };
+const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]);
+
+const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader };
+const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]);
+
+const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size };
+const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]);
+
+const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer };
+const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]);
+
+const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array };
+const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]);
+
+} // end namespace glslang
+
+#endif // _VERSIONS_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/attribute.cpp b/thirdparty/glslang/glslang/MachineIndependent/attribute.cpp
new file mode 100644
index 0000000000..d4a23f39de
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/attribute.cpp
@@ -0,0 +1,343 @@
+//
+// Copyright (C) 2017 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google, Inc., nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "attribute.h"
+#include "../Include/intermediate.h"
+#include "ParseHelper.h"
+
+namespace glslang {
+
+// extract integers out of attribute arguments stored in attribute aggregate
+bool TAttributeArgs::getInt(int& value, int argNum) const
+{
+ const TConstUnion* intConst = getConstUnion(EbtInt, argNum);
+
+ if (intConst == nullptr)
+ return false;
+
+ value = intConst->getIConst();
+ return true;
+}
+
+
+// extract strings out of attribute arguments stored in attribute aggregate.
+// convert to lower case if converToLower is true (for case-insensitive compare convenience)
+bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const
+{
+ const TConstUnion* stringConst = getConstUnion(EbtString, argNum);
+
+ if (stringConst == nullptr)
+ return false;
+
+ value = *stringConst->getSConst();
+
+ // Convenience.
+ if (convertToLower)
+ std::transform(value.begin(), value.end(), value.begin(), ::tolower);
+
+ return true;
+}
+
+// How many arguments were supplied?
+int TAttributeArgs::size() const
+{
+ return args == nullptr ? 0 : (int)args->getSequence().size();
+}
+
+// Helper to get attribute const union. Returns nullptr on failure.
+const TConstUnion* TAttributeArgs::getConstUnion(TBasicType basicType, int argNum) const
+{
+ if (args == nullptr)
+ return nullptr;
+
+ if (argNum >= (int)args->getSequence().size())
+ return nullptr;
+
+ if (args->getSequence()[argNum]->getAsConstantUnion() == nullptr)
+ return nullptr;
+
+ const TConstUnion* constVal = &args->getSequence()[argNum]->getAsConstantUnion()->getConstArray()[0];
+ if (constVal == nullptr || constVal->getType() != basicType)
+ return nullptr;
+
+ return constVal;
+}
+
+// Implementation of TParseContext parts of attributes
+TAttributeType TParseContext::attributeFromName(const TString& name) const
+{
+ if (name == "branch" || name == "dont_flatten")
+ return EatBranch;
+ else if (name == "flatten")
+ return EatFlatten;
+ else if (name == "unroll")
+ return EatUnroll;
+ else if (name == "loop" || name == "dont_unroll")
+ return EatLoop;
+ else if (name == "dependency_infinite")
+ return EatDependencyInfinite;
+ else if (name == "dependency_length")
+ return EatDependencyLength;
+ else if (name == "min_iterations")
+ return EatMinIterations;
+ else if (name == "max_iterations")
+ return EatMaxIterations;
+ else if (name == "iteration_multiple")
+ return EatIterationMultiple;
+ else if (name == "peel_count")
+ return EatPeelCount;
+ else if (name == "partial_count")
+ return EatPartialCount;
+ else
+ return EatNone;
+}
+
+// Make an initial leaf for the grammar from a no-argument attribute
+TAttributes* TParseContext::makeAttributes(const TString& identifier) const
+{
+ TAttributes *attributes = nullptr;
+ attributes = NewPoolObject(attributes);
+ TAttributeArgs args = { attributeFromName(identifier), nullptr };
+ attributes->push_back(args);
+ return attributes;
+}
+
+// Make an initial leaf for the grammar from a one-argument attribute
+TAttributes* TParseContext::makeAttributes(const TString& identifier, TIntermNode* node) const
+{
+ TAttributes *attributes = nullptr;
+ attributes = NewPoolObject(attributes);
+
+ // for now, node is always a simple single expression, but other code expects
+ // a list, so make it so
+ TIntermAggregate* agg = intermediate.makeAggregate(node);
+ TAttributeArgs args = { attributeFromName(identifier), agg };
+ attributes->push_back(args);
+ return attributes;
+}
+
+// Merge two sets of attributes into a single set.
+// The second argument is destructively consumed.
+TAttributes* TParseContext::mergeAttributes(TAttributes* attr1, TAttributes* attr2) const
+{
+ attr1->splice(attr1->end(), *attr2);
+ return attr1;
+}
+
+//
+// Selection attributes
+//
+void TParseContext::handleSelectionAttributes(const TAttributes& attributes, TIntermNode* node)
+{
+ TIntermSelection* selection = node->getAsSelectionNode();
+ if (selection == nullptr)
+ return;
+
+ for (auto it = attributes.begin(); it != attributes.end(); ++it) {
+ if (it->size() > 0) {
+ warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
+ continue;
+ }
+
+ switch (it->name) {
+ case EatFlatten:
+ selection->setFlatten();
+ break;
+ case EatBranch:
+ selection->setDontFlatten();
+ break;
+ default:
+ warn(node->getLoc(), "attribute does not apply to a selection", "", "");
+ break;
+ }
+ }
+}
+
+//
+// Switch attributes
+//
+void TParseContext::handleSwitchAttributes(const TAttributes& attributes, TIntermNode* node)
+{
+ TIntermSwitch* selection = node->getAsSwitchNode();
+ if (selection == nullptr)
+ return;
+
+ for (auto it = attributes.begin(); it != attributes.end(); ++it) {
+ if (it->size() > 0) {
+ warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
+ continue;
+ }
+
+ switch (it->name) {
+ case EatFlatten:
+ selection->setFlatten();
+ break;
+ case EatBranch:
+ selection->setDontFlatten();
+ break;
+ default:
+ warn(node->getLoc(), "attribute does not apply to a switch", "", "");
+ break;
+ }
+ }
+}
+
+//
+// Loop attributes
+//
+void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermNode* node)
+{
+ TIntermLoop* loop = node->getAsLoopNode();
+ if (loop == nullptr) {
+ // the actual loop might be part of a sequence
+ TIntermAggregate* agg = node->getAsAggregate();
+ if (agg == nullptr)
+ return;
+ for (auto it = agg->getSequence().begin(); it != agg->getSequence().end(); ++it) {
+ loop = (*it)->getAsLoopNode();
+ if (loop != nullptr)
+ break;
+ }
+ if (loop == nullptr)
+ return;
+ }
+
+ for (auto it = attributes.begin(); it != attributes.end(); ++it) {
+
+ const auto noArgument = [&](const char* feature) {
+ if (it->size() > 0) {
+ warn(node->getLoc(), "expected no arguments", feature, "");
+ return false;
+ }
+ return true;
+ };
+
+ const auto positiveSignedArgument = [&](const char* feature, int& value) {
+ if (it->size() == 1 && it->getInt(value)) {
+ if (value <= 0) {
+ error(node->getLoc(), "must be positive", feature, "");
+ return false;
+ }
+ } else {
+ warn(node->getLoc(), "expected a single integer argument", feature, "");
+ return false;
+ }
+ return true;
+ };
+
+ const auto unsignedArgument = [&](const char* feature, unsigned int& uiValue) {
+ int value;
+ if (!(it->size() == 1 && it->getInt(value))) {
+ warn(node->getLoc(), "expected a single integer argument", feature, "");
+ return false;
+ }
+ uiValue = (unsigned int)value;
+ return true;
+ };
+
+ const auto positiveUnsignedArgument = [&](const char* feature, unsigned int& uiValue) {
+ int value;
+ if (it->size() == 1 && it->getInt(value)) {
+ if (value == 0) {
+ error(node->getLoc(), "must be greater than or equal to 1", feature, "");
+ return false;
+ }
+ } else {
+ warn(node->getLoc(), "expected a single integer argument", feature, "");
+ return false;
+ }
+ uiValue = (unsigned int)value;
+ return true;
+ };
+
+ const auto spirv14 = [&](const char* feature) {
+ if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_4)
+ warn(node->getLoc(), "attribute requires a SPIR-V 1.4 target-env", feature, "");
+ };
+
+ int value = 0;
+ unsigned uiValue = 0;
+ switch (it->name) {
+ case EatUnroll:
+ if (noArgument("unroll"))
+ loop->setUnroll();
+ break;
+ case EatLoop:
+ if (noArgument("dont_unroll"))
+ loop->setDontUnroll();
+ break;
+ case EatDependencyInfinite:
+ if (noArgument("dependency_infinite"))
+ loop->setLoopDependency(TIntermLoop::dependencyInfinite);
+ break;
+ case EatDependencyLength:
+ if (positiveSignedArgument("dependency_length", value))
+ loop->setLoopDependency(value);
+ break;
+ case EatMinIterations:
+ spirv14("min_iterations");
+ if (unsignedArgument("min_iterations", uiValue))
+ loop->setMinIterations(uiValue);
+ break;
+ case EatMaxIterations:
+ spirv14("max_iterations");
+ if (unsignedArgument("max_iterations", uiValue))
+ loop->setMaxIterations(uiValue);
+ break;
+ case EatIterationMultiple:
+ spirv14("iteration_multiple");
+ if (positiveUnsignedArgument("iteration_multiple", uiValue))
+ loop->setIterationMultiple(uiValue);
+ break;
+ case EatPeelCount:
+ spirv14("peel_count");
+ if (unsignedArgument("peel_count", uiValue))
+ loop->setPeelCount(uiValue);
+ break;
+ case EatPartialCount:
+ spirv14("partial_count");
+ if (unsignedArgument("partial_count", uiValue))
+ loop->setPartialCount(uiValue);
+ break;
+ default:
+ warn(node->getLoc(), "attribute does not apply to a loop", "", "");
+ break;
+ }
+ }
+}
+
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/attribute.h b/thirdparty/glslang/glslang/MachineIndependent/attribute.h
new file mode 100644
index 0000000000..844ce45806
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/attribute.h
@@ -0,0 +1,107 @@
+//
+// Copyright (C) 2017 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _ATTRIBUTE_INCLUDED_
+#define _ATTRIBUTE_INCLUDED_
+
+#include "../Include/Common.h"
+#include "../Include/ConstantUnion.h"
+
+namespace glslang {
+
+ enum TAttributeType {
+ EatNone,
+ EatAllow_uav_condition,
+ EatBranch,
+ EatCall,
+ EatDomain,
+ EatEarlyDepthStencil,
+ EatFastOpt,
+ EatFlatten,
+ EatForceCase,
+ EatInstance,
+ EatMaxTessFactor,
+ EatNumThreads,
+ EatMaxVertexCount,
+ EatOutputControlPoints,
+ EatOutputTopology,
+ EatPartitioning,
+ EatPatchConstantFunc,
+ EatPatchSize,
+ EatUnroll,
+ EatLoop,
+ EatBinding,
+ EatGlobalBinding,
+ EatLocation,
+ EatInputAttachment,
+ EatBuiltIn,
+ EatPushConstant,
+ EatConstantId,
+ EatDependencyInfinite,
+ EatDependencyLength,
+ EatMinIterations,
+ EatMaxIterations,
+ EatIterationMultiple,
+ EatPeelCount,
+ EatPartialCount
+ };
+
+ class TIntermAggregate;
+
+ struct TAttributeArgs {
+ TAttributeType name;
+ const TIntermAggregate* args;
+
+ // Obtain attribute as integer
+ // Return false if it cannot be obtained
+ bool getInt(int& value, int argNum = 0) const;
+
+ // Obtain attribute as string, with optional to-lower transform
+ // Return false if it cannot be obtained
+ bool getString(TString& value, int argNum = 0, bool convertToLower = true) const;
+
+ // How many arguments were provided to the attribute?
+ int size() const;
+
+ protected:
+ const TConstUnion* getConstUnion(TBasicType basicType, int argNum) const;
+ };
+
+ typedef TList<TAttributeArgs> TAttributes;
+
+} // end namespace glslang
+
+#endif // _ATTRIBUTE_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/gl_types.h b/thirdparty/glslang/glslang/MachineIndependent/gl_types.h
new file mode 100644
index 0000000000..c9fee9ecce
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/gl_types.h
@@ -0,0 +1,214 @@
+/*
+** Copyright (c) 2013 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+#pragma once
+
+#define GL_FLOAT 0x1406
+#define GL_FLOAT_VEC2 0x8B50
+#define GL_FLOAT_VEC3 0x8B51
+#define GL_FLOAT_VEC4 0x8B52
+
+#define GL_DOUBLE 0x140A
+#define GL_DOUBLE_VEC2 0x8FFC
+#define GL_DOUBLE_VEC3 0x8FFD
+#define GL_DOUBLE_VEC4 0x8FFE
+
+#define GL_INT 0x1404
+#define GL_INT_VEC2 0x8B53
+#define GL_INT_VEC3 0x8B54
+#define GL_INT_VEC4 0x8B55
+
+#define GL_UNSIGNED_INT 0x1405
+#define GL_UNSIGNED_INT_VEC2 0x8DC6
+#define GL_UNSIGNED_INT_VEC3 0x8DC7
+#define GL_UNSIGNED_INT_VEC4 0x8DC8
+
+#define GL_INT64_ARB 0x140E
+#define GL_INT64_VEC2_ARB 0x8FE9
+#define GL_INT64_VEC3_ARB 0x8FEA
+#define GL_INT64_VEC4_ARB 0x8FEB
+
+#define GL_UNSIGNED_INT64_ARB 0x140F
+#define GL_UNSIGNED_INT64_VEC2_ARB 0x8FE5
+#define GL_UNSIGNED_INT64_VEC3_ARB 0x8FE6
+#define GL_UNSIGNED_INT64_VEC4_ARB 0x8FE7
+
+#define GL_BOOL 0x8B56
+#define GL_BOOL_VEC2 0x8B57
+#define GL_BOOL_VEC3 0x8B58
+#define GL_BOOL_VEC4 0x8B59
+
+#define GL_FLOAT_MAT2 0x8B5A
+#define GL_FLOAT_MAT3 0x8B5B
+#define GL_FLOAT_MAT4 0x8B5C
+#define GL_FLOAT_MAT2x3 0x8B65
+#define GL_FLOAT_MAT2x4 0x8B66
+#define GL_FLOAT_MAT3x2 0x8B67
+#define GL_FLOAT_MAT3x4 0x8B68
+#define GL_FLOAT_MAT4x2 0x8B69
+#define GL_FLOAT_MAT4x3 0x8B6A
+
+#define GL_DOUBLE_MAT2 0x8F46
+#define GL_DOUBLE_MAT3 0x8F47
+#define GL_DOUBLE_MAT4 0x8F48
+#define GL_DOUBLE_MAT2x3 0x8F49
+#define GL_DOUBLE_MAT2x4 0x8F4A
+#define GL_DOUBLE_MAT3x2 0x8F4B
+#define GL_DOUBLE_MAT3x4 0x8F4C
+#define GL_DOUBLE_MAT4x2 0x8F4D
+#define GL_DOUBLE_MAT4x3 0x8F4E
+
+#ifdef AMD_EXTENSIONS
+// Those constants are borrowed from extension NV_gpu_shader5
+#define GL_FLOAT16_NV 0x8FF8
+#define GL_FLOAT16_VEC2_NV 0x8FF9
+#define GL_FLOAT16_VEC3_NV 0x8FFA
+#define GL_FLOAT16_VEC4_NV 0x8FFB
+
+#define GL_FLOAT16_MAT2_AMD 0x91C5
+#define GL_FLOAT16_MAT3_AMD 0x91C6
+#define GL_FLOAT16_MAT4_AMD 0x91C7
+#define GL_FLOAT16_MAT2x3_AMD 0x91C8
+#define GL_FLOAT16_MAT2x4_AMD 0x91C9
+#define GL_FLOAT16_MAT3x2_AMD 0x91CA
+#define GL_FLOAT16_MAT3x4_AMD 0x91CB
+#define GL_FLOAT16_MAT4x2_AMD 0x91CC
+#define GL_FLOAT16_MAT4x3_AMD 0x91CD
+#endif
+
+#define GL_SAMPLER_1D 0x8B5D
+#define GL_SAMPLER_2D 0x8B5E
+#define GL_SAMPLER_3D 0x8B5F
+#define GL_SAMPLER_CUBE 0x8B60
+#define GL_SAMPLER_BUFFER 0x8DC2
+#define GL_SAMPLER_1D_ARRAY 0x8DC0
+#define GL_SAMPLER_2D_ARRAY 0x8DC1
+#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3
+#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4
+#define GL_SAMPLER_CUBE_SHADOW 0x8DC5
+#define GL_SAMPLER_1D_SHADOW 0x8B61
+#define GL_SAMPLER_2D_SHADOW 0x8B62
+#define GL_SAMPLER_2D_RECT 0x8B63
+#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64
+#define GL_SAMPLER_2D_MULTISAMPLE 0x9108
+#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B
+#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C
+#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D
+#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
+#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
+
+#ifdef AMD_EXTENSIONS
+#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE
+#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF
+#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0
+#define GL_FLOAT16_SAMPLER_CUBE_AMD 0x91D1
+#define GL_FLOAT16_SAMPLER_2D_RECT_AMD 0x91D2
+#define GL_FLOAT16_SAMPLER_1D_ARRAY_AMD 0x91D3
+#define GL_FLOAT16_SAMPLER_2D_ARRAY_AMD 0x91D4
+#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD 0x91D5
+#define GL_FLOAT16_SAMPLER_BUFFER_AMD 0x91D6
+#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD 0x91D7
+#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD 0x91D8
+
+#define GL_FLOAT16_SAMPLER_1D_SHADOW_AMD 0x91D9
+#define GL_FLOAT16_SAMPLER_2D_SHADOW_AMD 0x91DA
+#define GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD 0x91DB
+#define GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD 0x91DC
+#define GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD 0x91DD
+#define GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD 0x91DE
+#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD 0x91DF
+
+#define GL_FLOAT16_IMAGE_1D_AMD 0x91E0
+#define GL_FLOAT16_IMAGE_2D_AMD 0x91E1
+#define GL_FLOAT16_IMAGE_3D_AMD 0x91E2
+#define GL_FLOAT16_IMAGE_2D_RECT_AMD 0x91E3
+#define GL_FLOAT16_IMAGE_CUBE_AMD 0x91E4
+#define GL_FLOAT16_IMAGE_1D_ARRAY_AMD 0x91E5
+#define GL_FLOAT16_IMAGE_2D_ARRAY_AMD 0x91E6
+#define GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD 0x91E7
+#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8
+#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9
+#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA
+#endif
+
+#define GL_INT_SAMPLER_1D 0x8DC9
+#define GL_INT_SAMPLER_2D 0x8DCA
+#define GL_INT_SAMPLER_3D 0x8DCB
+#define GL_INT_SAMPLER_CUBE 0x8DCC
+#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE
+#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF
+#define GL_INT_SAMPLER_2D_RECT 0x8DCD
+#define GL_INT_SAMPLER_BUFFER 0x8DD0
+#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109
+#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C
+#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E
+#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E
+
+#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1
+#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2
+#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3
+#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4
+#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6
+#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7
+#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5
+#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D
+#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F
+#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A
+
+#define GL_IMAGE_1D 0x904C
+#define GL_IMAGE_2D 0x904D
+#define GL_IMAGE_3D 0x904E
+#define GL_IMAGE_2D_RECT 0x904F
+#define GL_IMAGE_CUBE 0x9050
+#define GL_IMAGE_BUFFER 0x9051
+#define GL_IMAGE_1D_ARRAY 0x9052
+#define GL_IMAGE_2D_ARRAY 0x9053
+#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054
+#define GL_IMAGE_2D_MULTISAMPLE 0x9055
+#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056
+#define GL_INT_IMAGE_1D 0x9057
+#define GL_INT_IMAGE_2D 0x9058
+#define GL_INT_IMAGE_3D 0x9059
+#define GL_INT_IMAGE_2D_RECT 0x905A
+#define GL_INT_IMAGE_CUBE 0x905B
+#define GL_INT_IMAGE_BUFFER 0x905C
+#define GL_INT_IMAGE_1D_ARRAY 0x905D
+#define GL_INT_IMAGE_2D_ARRAY 0x905E
+#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F
+#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060
+#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061
+#define GL_UNSIGNED_INT_IMAGE_1D 0x9062
+#define GL_UNSIGNED_INT_IMAGE_2D 0x9063
+#define GL_UNSIGNED_INT_IMAGE_3D 0x9064
+#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065
+#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066
+#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067
+#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068
+#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069
+#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C
+
+#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB
diff --git a/thirdparty/glslang/glslang/MachineIndependent/glslang.y b/thirdparty/glslang/glslang/MachineIndependent/glslang.y
new file mode 100644
index 0000000000..b5691a29fd
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/glslang.y
@@ -0,0 +1,3796 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+/**
+ * This is bison grammar and productions for parsing all versions of the
+ * GLSL shading languages.
+ */
+%{
+
+/* Based on:
+ANSI C Yacc grammar
+
+In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a
+matching Lex specification) for the April 30, 1985 draft version of the
+ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that
+original, as mentioned in the answer to question 17.25 of the comp.lang.c
+FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z.
+
+I intend to keep this version as close to the current C Standard grammar as
+possible; please let me know if you discover discrepancies.
+
+Jutta Degener, 1995
+*/
+
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "../Public/ShaderLang.h"
+#include "attribute.h"
+
+using namespace glslang;
+
+%}
+
+%define parse.error verbose
+
+%union {
+ struct {
+ glslang::TSourceLoc loc;
+ union {
+ glslang::TString *string;
+ int i;
+ unsigned int u;
+ long long i64;
+ unsigned long long u64;
+ bool b;
+ double d;
+ };
+ glslang::TSymbol* symbol;
+ } lex;
+ struct {
+ glslang::TSourceLoc loc;
+ glslang::TOperator op;
+ union {
+ TIntermNode* intermNode;
+ glslang::TIntermNodePair nodePair;
+ glslang::TIntermTyped* intermTypedNode;
+ glslang::TAttributes* attributes;
+ };
+ union {
+ glslang::TPublicType type;
+ glslang::TFunction* function;
+ glslang::TParameter param;
+ glslang::TTypeLoc typeLine;
+ glslang::TTypeList* typeList;
+ glslang::TArraySizes* arraySizes;
+ glslang::TIdentifierList* identifierList;
+ };
+ glslang::TArraySizes* typeParameters;
+ } interm;
+}
+
+%{
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4065)
+ #pragma warning(disable : 4127)
+ #pragma warning(disable : 4244)
+#endif
+
+#define parseContext (*pParseContext)
+#define yyerror(context, msg) context->parserError(msg)
+
+extern int yylex(YYSTYPE*, TParseContext&);
+
+%}
+
+%parse-param {glslang::TParseContext* pParseContext}
+%lex-param {parseContext}
+%pure-parser // enable thread safety
+%expect 1 // One shift reduce conflict because of if | else
+
+%token <lex> ATTRIBUTE VARYING
+%token <lex> FLOAT16_T FLOAT FLOAT32_T DOUBLE FLOAT64_T
+%token <lex> CONST BOOL INT UINT INT64_T UINT64_T INT32_T UINT32_T INT16_T UINT16_T INT8_T UINT8_T
+%token <lex> BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT SUBROUTINE
+%token <lex> BVEC2 BVEC3 BVEC4
+%token <lex> IVEC2 IVEC3 IVEC4
+%token <lex> UVEC2 UVEC3 UVEC4
+%token <lex> I64VEC2 I64VEC3 I64VEC4
+%token <lex> U64VEC2 U64VEC3 U64VEC4
+%token <lex> I32VEC2 I32VEC3 I32VEC4
+%token <lex> U32VEC2 U32VEC3 U32VEC4
+%token <lex> I16VEC2 I16VEC3 I16VEC4
+%token <lex> U16VEC2 U16VEC3 U16VEC4
+%token <lex> I8VEC2 I8VEC3 I8VEC4
+%token <lex> U8VEC2 U8VEC3 U8VEC4
+%token <lex> VEC2 VEC3 VEC4
+%token <lex> MAT2 MAT3 MAT4 CENTROID IN OUT INOUT
+%token <lex> UNIFORM PATCH SAMPLE BUFFER SHARED NONUNIFORM PAYLOADNV PAYLOADINNV HITATTRNV CALLDATANV CALLDATAINNV
+%token <lex> COHERENT VOLATILE RESTRICT READONLY WRITEONLY DEVICECOHERENT QUEUEFAMILYCOHERENT WORKGROUPCOHERENT SUBGROUPCOHERENT NONPRIVATE
+%token <lex> DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4
+%token <lex> F16VEC2 F16VEC3 F16VEC4 F16MAT2 F16MAT3 F16MAT4
+%token <lex> F32VEC2 F32VEC3 F32VEC4 F32MAT2 F32MAT3 F32MAT4
+%token <lex> F64VEC2 F64VEC3 F64VEC4 F64MAT2 F64MAT3 F64MAT4
+%token <lex> NOPERSPECTIVE FLAT SMOOTH LAYOUT EXPLICITINTERPAMD PERVERTEXNV PERPRIMITIVENV PERVIEWNV PERTASKNV
+
+%token <lex> MAT2X2 MAT2X3 MAT2X4
+%token <lex> MAT3X2 MAT3X3 MAT3X4
+%token <lex> MAT4X2 MAT4X3 MAT4X4
+%token <lex> DMAT2X2 DMAT2X3 DMAT2X4
+%token <lex> DMAT3X2 DMAT3X3 DMAT3X4
+%token <lex> DMAT4X2 DMAT4X3 DMAT4X4
+%token <lex> F16MAT2X2 F16MAT2X3 F16MAT2X4
+%token <lex> F16MAT3X2 F16MAT3X3 F16MAT3X4
+%token <lex> F16MAT4X2 F16MAT4X3 F16MAT4X4
+%token <lex> F32MAT2X2 F32MAT2X3 F32MAT2X4
+%token <lex> F32MAT3X2 F32MAT3X3 F32MAT3X4
+%token <lex> F32MAT4X2 F32MAT4X3 F32MAT4X4
+%token <lex> F64MAT2X2 F64MAT2X3 F64MAT2X4
+%token <lex> F64MAT3X2 F64MAT3X3 F64MAT3X4
+%token <lex> F64MAT4X2 F64MAT4X3 F64MAT4X4
+%token <lex> ATOMIC_UINT
+%token <lex> ACCSTRUCTNV
+%token <lex> FCOOPMATNV
+
+// combined image/sampler
+%token <lex> SAMPLER1D SAMPLER2D SAMPLER3D SAMPLERCUBE SAMPLER1DSHADOW SAMPLER2DSHADOW
+%token <lex> SAMPLERCUBESHADOW SAMPLER1DARRAY SAMPLER2DARRAY SAMPLER1DARRAYSHADOW
+%token <lex> SAMPLER2DARRAYSHADOW ISAMPLER1D ISAMPLER2D ISAMPLER3D ISAMPLERCUBE
+%token <lex> ISAMPLER1DARRAY ISAMPLER2DARRAY USAMPLER1D USAMPLER2D USAMPLER3D
+%token <lex> USAMPLERCUBE USAMPLER1DARRAY USAMPLER2DARRAY
+%token <lex> SAMPLER2DRECT SAMPLER2DRECTSHADOW ISAMPLER2DRECT USAMPLER2DRECT
+%token <lex> SAMPLERBUFFER ISAMPLERBUFFER USAMPLERBUFFER
+%token <lex> SAMPLERCUBEARRAY SAMPLERCUBEARRAYSHADOW
+%token <lex> ISAMPLERCUBEARRAY USAMPLERCUBEARRAY
+%token <lex> SAMPLER2DMS ISAMPLER2DMS USAMPLER2DMS
+%token <lex> SAMPLER2DMSARRAY ISAMPLER2DMSARRAY USAMPLER2DMSARRAY
+%token <lex> SAMPLEREXTERNALOES
+%token <lex> SAMPLEREXTERNAL2DY2YEXT
+
+%token <lex> F16SAMPLER1D F16SAMPLER2D F16SAMPLER3D F16SAMPLER2DRECT F16SAMPLERCUBE
+%token <lex> F16SAMPLER1DARRAY F16SAMPLER2DARRAY F16SAMPLERCUBEARRAY
+%token <lex> F16SAMPLERBUFFER F16SAMPLER2DMS F16SAMPLER2DMSARRAY
+%token <lex> F16SAMPLER1DSHADOW F16SAMPLER2DSHADOW F16SAMPLER1DARRAYSHADOW F16SAMPLER2DARRAYSHADOW
+%token <lex> F16SAMPLER2DRECTSHADOW F16SAMPLERCUBESHADOW F16SAMPLERCUBEARRAYSHADOW
+
+// pure sampler
+%token <lex> SAMPLER SAMPLERSHADOW
+
+// texture without sampler
+%token <lex> TEXTURE1D TEXTURE2D TEXTURE3D TEXTURECUBE
+%token <lex> TEXTURE1DARRAY TEXTURE2DARRAY
+%token <lex> ITEXTURE1D ITEXTURE2D ITEXTURE3D ITEXTURECUBE
+%token <lex> ITEXTURE1DARRAY ITEXTURE2DARRAY UTEXTURE1D UTEXTURE2D UTEXTURE3D
+%token <lex> UTEXTURECUBE UTEXTURE1DARRAY UTEXTURE2DARRAY
+%token <lex> TEXTURE2DRECT ITEXTURE2DRECT UTEXTURE2DRECT
+%token <lex> TEXTUREBUFFER ITEXTUREBUFFER UTEXTUREBUFFER
+%token <lex> TEXTURECUBEARRAY ITEXTURECUBEARRAY UTEXTURECUBEARRAY
+%token <lex> TEXTURE2DMS ITEXTURE2DMS UTEXTURE2DMS
+%token <lex> TEXTURE2DMSARRAY ITEXTURE2DMSARRAY UTEXTURE2DMSARRAY
+
+%token <lex> F16TEXTURE1D F16TEXTURE2D F16TEXTURE3D F16TEXTURE2DRECT F16TEXTURECUBE
+%token <lex> F16TEXTURE1DARRAY F16TEXTURE2DARRAY F16TEXTURECUBEARRAY
+%token <lex> F16TEXTUREBUFFER F16TEXTURE2DMS F16TEXTURE2DMSARRAY
+
+// input attachments
+%token <lex> SUBPASSINPUT SUBPASSINPUTMS ISUBPASSINPUT ISUBPASSINPUTMS USUBPASSINPUT USUBPASSINPUTMS
+%token <lex> F16SUBPASSINPUT F16SUBPASSINPUTMS
+
+%token <lex> IMAGE1D IIMAGE1D UIMAGE1D IMAGE2D IIMAGE2D
+%token <lex> UIMAGE2D IMAGE3D IIMAGE3D UIMAGE3D
+%token <lex> IMAGE2DRECT IIMAGE2DRECT UIMAGE2DRECT
+%token <lex> IMAGECUBE IIMAGECUBE UIMAGECUBE
+%token <lex> IMAGEBUFFER IIMAGEBUFFER UIMAGEBUFFER
+%token <lex> IMAGE1DARRAY IIMAGE1DARRAY UIMAGE1DARRAY
+%token <lex> IMAGE2DARRAY IIMAGE2DARRAY UIMAGE2DARRAY
+%token <lex> IMAGECUBEARRAY IIMAGECUBEARRAY UIMAGECUBEARRAY
+%token <lex> IMAGE2DMS IIMAGE2DMS UIMAGE2DMS
+%token <lex> IMAGE2DMSARRAY IIMAGE2DMSARRAY UIMAGE2DMSARRAY
+
+%token <lex> F16IMAGE1D F16IMAGE2D F16IMAGE3D F16IMAGE2DRECT
+%token <lex> F16IMAGECUBE F16IMAGE1DARRAY F16IMAGE2DARRAY F16IMAGECUBEARRAY
+%token <lex> F16IMAGEBUFFER F16IMAGE2DMS F16IMAGE2DMSARRAY
+
+%token <lex> STRUCT VOID WHILE
+
+%token <lex> IDENTIFIER TYPE_NAME
+%token <lex> FLOATCONSTANT DOUBLECONSTANT INT16CONSTANT UINT16CONSTANT INT32CONSTANT UINT32CONSTANT INTCONSTANT UINTCONSTANT INT64CONSTANT UINT64CONSTANT BOOLCONSTANT FLOAT16CONSTANT
+%token <lex> LEFT_OP RIGHT_OP
+%token <lex> INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP
+%token <lex> AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN
+%token <lex> MOD_ASSIGN LEFT_ASSIGN RIGHT_ASSIGN AND_ASSIGN XOR_ASSIGN OR_ASSIGN
+%token <lex> SUB_ASSIGN
+
+%token <lex> LEFT_PAREN RIGHT_PAREN LEFT_BRACKET RIGHT_BRACKET LEFT_BRACE RIGHT_BRACE DOT
+%token <lex> COMMA COLON EQUAL SEMICOLON BANG DASH TILDE PLUS STAR SLASH PERCENT
+%token <lex> LEFT_ANGLE RIGHT_ANGLE VERTICAL_BAR CARET AMPERSAND QUESTION
+
+%token <lex> INVARIANT PRECISE
+%token <lex> HIGH_PRECISION MEDIUM_PRECISION LOW_PRECISION PRECISION
+
+%token <lex> PACKED RESOURCE SUPERP
+
+%type <interm> assignment_operator unary_operator
+%type <interm.intermTypedNode> variable_identifier primary_expression postfix_expression
+%type <interm.intermTypedNode> expression integer_expression assignment_expression
+%type <interm.intermTypedNode> unary_expression multiplicative_expression additive_expression
+%type <interm.intermTypedNode> relational_expression equality_expression
+%type <interm.intermTypedNode> conditional_expression constant_expression
+%type <interm.intermTypedNode> logical_or_expression logical_xor_expression logical_and_expression
+%type <interm.intermTypedNode> shift_expression and_expression exclusive_or_expression inclusive_or_expression
+%type <interm.intermTypedNode> function_call initializer initializer_list condition conditionopt
+
+%type <interm.intermNode> translation_unit function_definition
+%type <interm.intermNode> statement simple_statement
+%type <interm.intermNode> statement_list switch_statement_list compound_statement
+%type <interm.intermNode> declaration_statement selection_statement selection_statement_nonattributed expression_statement
+%type <interm.intermNode> switch_statement switch_statement_nonattributed case_label
+%type <interm.intermNode> declaration external_declaration
+%type <interm.intermNode> for_init_statement compound_statement_no_new_scope
+%type <interm.nodePair> selection_rest_statement for_rest_statement
+%type <interm.intermNode> iteration_statement iteration_statement_nonattributed jump_statement statement_no_new_scope statement_scoped
+%type <interm> single_declaration init_declarator_list
+
+%type <interm> parameter_declaration parameter_declarator parameter_type_specifier
+
+%type <interm> array_specifier
+%type <interm.type> precise_qualifier invariant_qualifier interpolation_qualifier storage_qualifier precision_qualifier
+%type <interm.type> layout_qualifier layout_qualifier_id_list layout_qualifier_id
+%type <interm.type> non_uniform_qualifier
+
+%type <interm.typeParameters> type_parameter_specifier
+%type <interm.typeParameters> type_parameter_specifier_opt
+%type <interm.typeParameters> type_parameter_specifier_list
+
+%type <interm.type> type_qualifier fully_specified_type type_specifier
+%type <interm.type> single_type_qualifier
+%type <interm.type> type_specifier_nonarray
+%type <interm.type> struct_specifier
+%type <interm.typeLine> struct_declarator
+%type <interm.typeList> struct_declarator_list struct_declaration struct_declaration_list type_name_list
+%type <interm> block_structure
+%type <interm.function> function_header function_declarator
+%type <interm.function> function_header_with_parameters
+%type <interm> function_call_header_with_parameters function_call_header_no_parameters function_call_generic function_prototype
+%type <interm> function_call_or_method function_identifier function_call_header
+
+%type <interm.identifierList> identifier_list
+
+%type <interm.attributes> attribute attribute_list single_attribute
+
+%start translation_unit
+%%
+
+variable_identifier
+ : IDENTIFIER {
+ $$ = parseContext.handleVariable($1.loc, $1.symbol, $1.string);
+ }
+ ;
+
+primary_expression
+ : variable_identifier {
+ $$ = $1;
+ }
+ | INT32CONSTANT {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true);
+ }
+ | UINT32CONSTANT {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true);
+ }
+ | INTCONSTANT {
+ $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true);
+ }
+ | UINTCONSTANT {
+ parseContext.fullIntegerCheck($1.loc, "unsigned literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true);
+ }
+ | INT64CONSTANT {
+ parseContext.int64Check($1.loc, "64-bit integer literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.i64, $1.loc, true);
+ }
+ | UINT64CONSTANT {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.u64, $1.loc, true);
+ }
+ | INT16CONSTANT {
+ parseContext.explicitInt16Check($1.loc, "16-bit integer literal");
+ $$ = parseContext.intermediate.addConstantUnion((short)$1.i, $1.loc, true);
+ }
+ | UINT16CONSTANT {
+ parseContext.explicitInt16Check($1.loc, "16-bit unsigned integer literal");
+ $$ = parseContext.intermediate.addConstantUnion((unsigned short)$1.u, $1.loc, true);
+ }
+ | FLOATCONSTANT {
+ $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat, $1.loc, true);
+ }
+ | DOUBLECONSTANT {
+ parseContext.doubleCheck($1.loc, "double literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true);
+ }
+ | FLOAT16CONSTANT {
+ parseContext.float16Check($1.loc, "half float literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat16, $1.loc, true);
+ }
+ | BOOLCONSTANT {
+ $$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true);
+ }
+ | LEFT_PAREN expression RIGHT_PAREN {
+ $$ = $2;
+ if ($$->getAsConstantUnion())
+ $$->getAsConstantUnion()->setExpression();
+ }
+ ;
+
+postfix_expression
+ : primary_expression {
+ $$ = $1;
+ }
+ | postfix_expression LEFT_BRACKET integer_expression RIGHT_BRACKET {
+ $$ = parseContext.handleBracketDereference($2.loc, $1, $3);
+ }
+ | function_call {
+ $$ = $1;
+ }
+ | postfix_expression DOT IDENTIFIER {
+ $$ = parseContext.handleDotDereference($3.loc, $1, *$3.string);
+ }
+ | postfix_expression INC_OP {
+ parseContext.variableCheck($1);
+ parseContext.lValueErrorCheck($2.loc, "++", $1);
+ $$ = parseContext.handleUnaryMath($2.loc, "++", EOpPostIncrement, $1);
+ }
+ | postfix_expression DEC_OP {
+ parseContext.variableCheck($1);
+ parseContext.lValueErrorCheck($2.loc, "--", $1);
+ $$ = parseContext.handleUnaryMath($2.loc, "--", EOpPostDecrement, $1);
+ }
+ ;
+
+integer_expression
+ : expression {
+ parseContext.integerCheck($1, "[]");
+ $$ = $1;
+ }
+ ;
+
+function_call
+ : function_call_or_method {
+ $$ = parseContext.handleFunctionCall($1.loc, $1.function, $1.intermNode);
+ delete $1.function;
+ }
+ ;
+
+function_call_or_method
+ : function_call_generic {
+ $$ = $1;
+ }
+ ;
+
+function_call_generic
+ : function_call_header_with_parameters RIGHT_PAREN {
+ $$ = $1;
+ $$.loc = $2.loc;
+ }
+ | function_call_header_no_parameters RIGHT_PAREN {
+ $$ = $1;
+ $$.loc = $2.loc;
+ }
+ ;
+
+function_call_header_no_parameters
+ : function_call_header VOID {
+ $$ = $1;
+ }
+ | function_call_header {
+ $$ = $1;
+ }
+ ;
+
+function_call_header_with_parameters
+ : function_call_header assignment_expression {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy($2->getType());
+ $1.function->addParameter(param);
+ $$.function = $1.function;
+ $$.intermNode = $2;
+ }
+ | function_call_header_with_parameters COMMA assignment_expression {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy($3->getType());
+ $1.function->addParameter(param);
+ $$.function = $1.function;
+ $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, $3, $2.loc);
+ }
+ ;
+
+function_call_header
+ : function_identifier LEFT_PAREN {
+ $$ = $1;
+ }
+ ;
+
+// Grammar Note: Constructors look like functions, but are recognized as types.
+
+function_identifier
+ : type_specifier {
+ // Constructor
+ $$.intermNode = 0;
+ $$.function = parseContext.handleConstructorCall($1.loc, $1);
+ }
+ | postfix_expression {
+ //
+ // Should be a method or subroutine call, but we haven't recognized the arguments yet.
+ //
+ $$.function = 0;
+ $$.intermNode = 0;
+
+ TIntermMethod* method = $1->getAsMethodNode();
+ if (method) {
+ $$.function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength);
+ $$.intermNode = method->getObject();
+ } else {
+ TIntermSymbol* symbol = $1->getAsSymbolNode();
+ if (symbol) {
+ parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName());
+ TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid));
+ $$.function = function;
+ } else
+ parseContext.error($1->getLoc(), "function call, method, or subroutine call expected", "", "");
+ }
+
+ if ($$.function == 0) {
+ // error recover
+ TString* empty = NewPoolTString("");
+ $$.function = new TFunction(empty, TType(EbtVoid), EOpNull);
+ }
+ }
+ | non_uniform_qualifier {
+ // Constructor
+ $$.intermNode = 0;
+ $$.function = parseContext.handleConstructorCall($1.loc, $1);
+ }
+ ;
+
+unary_expression
+ : postfix_expression {
+ parseContext.variableCheck($1);
+ $$ = $1;
+ if (TIntermMethod* method = $1->getAsMethodNode())
+ parseContext.error($1->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), "");
+ }
+ | INC_OP unary_expression {
+ parseContext.lValueErrorCheck($1.loc, "++", $2);
+ $$ = parseContext.handleUnaryMath($1.loc, "++", EOpPreIncrement, $2);
+ }
+ | DEC_OP unary_expression {
+ parseContext.lValueErrorCheck($1.loc, "--", $2);
+ $$ = parseContext.handleUnaryMath($1.loc, "--", EOpPreDecrement, $2);
+ }
+ | unary_operator unary_expression {
+ if ($1.op != EOpNull) {
+ char errorOp[2] = {0, 0};
+ switch($1.op) {
+ case EOpNegative: errorOp[0] = '-'; break;
+ case EOpLogicalNot: errorOp[0] = '!'; break;
+ case EOpBitwiseNot: errorOp[0] = '~'; break;
+ default: break; // some compilers want this
+ }
+ $$ = parseContext.handleUnaryMath($1.loc, errorOp, $1.op, $2);
+ } else {
+ $$ = $2;
+ if ($$->getAsConstantUnion())
+ $$->getAsConstantUnion()->setExpression();
+ }
+ }
+ ;
+// Grammar Note: No traditional style type casts.
+
+unary_operator
+ : PLUS { $$.loc = $1.loc; $$.op = EOpNull; }
+ | DASH { $$.loc = $1.loc; $$.op = EOpNegative; }
+ | BANG { $$.loc = $1.loc; $$.op = EOpLogicalNot; }
+ | TILDE { $$.loc = $1.loc; $$.op = EOpBitwiseNot;
+ parseContext.fullIntegerCheck($1.loc, "bitwise not"); }
+ ;
+// Grammar Note: No '*' or '&' unary ops. Pointers are not supported.
+
+multiplicative_expression
+ : unary_expression { $$ = $1; }
+ | multiplicative_expression STAR unary_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "*", EOpMul, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | multiplicative_expression SLASH unary_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "/", EOpDiv, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | multiplicative_expression PERCENT unary_expression {
+ parseContext.fullIntegerCheck($2.loc, "%");
+ $$ = parseContext.handleBinaryMath($2.loc, "%", EOpMod, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+additive_expression
+ : multiplicative_expression { $$ = $1; }
+ | additive_expression PLUS multiplicative_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "+", EOpAdd, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | additive_expression DASH multiplicative_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "-", EOpSub, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+shift_expression
+ : additive_expression { $$ = $1; }
+ | shift_expression LEFT_OP additive_expression {
+ parseContext.fullIntegerCheck($2.loc, "bit shift left");
+ $$ = parseContext.handleBinaryMath($2.loc, "<<", EOpLeftShift, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | shift_expression RIGHT_OP additive_expression {
+ parseContext.fullIntegerCheck($2.loc, "bit shift right");
+ $$ = parseContext.handleBinaryMath($2.loc, ">>", EOpRightShift, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+relational_expression
+ : shift_expression { $$ = $1; }
+ | relational_expression LEFT_ANGLE shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "<", EOpLessThan, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | relational_expression RIGHT_ANGLE shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, ">", EOpGreaterThan, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | relational_expression LE_OP shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "<=", EOpLessThanEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | relational_expression GE_OP shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, ">=", EOpGreaterThanEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+equality_expression
+ : relational_expression { $$ = $1; }
+ | equality_expression EQ_OP relational_expression {
+ parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison");
+ parseContext.opaqueCheck($2.loc, $1->getType(), "==");
+ parseContext.specializationCheck($2.loc, $1->getType(), "==");
+ parseContext.referenceCheck($2.loc, $1->getType(), "==");
+ $$ = parseContext.handleBinaryMath($2.loc, "==", EOpEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | equality_expression NE_OP relational_expression {
+ parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison");
+ parseContext.opaqueCheck($2.loc, $1->getType(), "!=");
+ parseContext.specializationCheck($2.loc, $1->getType(), "!=");
+ parseContext.referenceCheck($2.loc, $1->getType(), "!=");
+ $$ = parseContext.handleBinaryMath($2.loc, "!=", EOpNotEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+and_expression
+ : equality_expression { $$ = $1; }
+ | and_expression AMPERSAND equality_expression {
+ parseContext.fullIntegerCheck($2.loc, "bitwise and");
+ $$ = parseContext.handleBinaryMath($2.loc, "&", EOpAnd, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+exclusive_or_expression
+ : and_expression { $$ = $1; }
+ | exclusive_or_expression CARET and_expression {
+ parseContext.fullIntegerCheck($2.loc, "bitwise exclusive or");
+ $$ = parseContext.handleBinaryMath($2.loc, "^", EOpExclusiveOr, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+inclusive_or_expression
+ : exclusive_or_expression { $$ = $1; }
+ | inclusive_or_expression VERTICAL_BAR exclusive_or_expression {
+ parseContext.fullIntegerCheck($2.loc, "bitwise inclusive or");
+ $$ = parseContext.handleBinaryMath($2.loc, "|", EOpInclusiveOr, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+logical_and_expression
+ : inclusive_or_expression { $$ = $1; }
+ | logical_and_expression AND_OP inclusive_or_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "&&", EOpLogicalAnd, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+logical_xor_expression
+ : logical_and_expression { $$ = $1; }
+ | logical_xor_expression XOR_OP logical_and_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "^^", EOpLogicalXor, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+logical_or_expression
+ : logical_xor_expression { $$ = $1; }
+ | logical_or_expression OR_OP logical_xor_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "||", EOpLogicalOr, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+conditional_expression
+ : logical_or_expression { $$ = $1; }
+ | logical_or_expression QUESTION {
+ ++parseContext.controlFlowNestingLevel;
+ }
+ expression COLON assignment_expression {
+ --parseContext.controlFlowNestingLevel;
+ parseContext.boolCheck($2.loc, $1);
+ parseContext.rValueErrorCheck($2.loc, "?", $1);
+ parseContext.rValueErrorCheck($5.loc, ":", $4);
+ parseContext.rValueErrorCheck($5.loc, ":", $6);
+ $$ = parseContext.intermediate.addSelection($1, $4, $6, $2.loc);
+ if ($$ == 0) {
+ parseContext.binaryOpError($2.loc, ":", $4->getCompleteString(), $6->getCompleteString());
+ $$ = $6;
+ }
+ }
+ ;
+
+assignment_expression
+ : conditional_expression { $$ = $1; }
+ | unary_expression assignment_operator assignment_expression {
+ parseContext.arrayObjectCheck($2.loc, $1->getType(), "array assignment");
+ parseContext.opaqueCheck($2.loc, $1->getType(), "=");
+ parseContext.storage16BitAssignmentCheck($2.loc, $1->getType(), "=");
+ parseContext.specializationCheck($2.loc, $1->getType(), "=");
+ parseContext.lValueErrorCheck($2.loc, "assign", $1);
+ parseContext.rValueErrorCheck($2.loc, "assign", $3);
+ $$ = parseContext.intermediate.addAssign($2.op, $1, $3, $2.loc);
+ if ($$ == 0) {
+ parseContext.assignError($2.loc, "assign", $1->getCompleteString(), $3->getCompleteString());
+ $$ = $1;
+ }
+ }
+ ;
+
+assignment_operator
+ : EQUAL {
+ $$.loc = $1.loc;
+ $$.op = EOpAssign;
+ }
+ | MUL_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpMulAssign;
+ }
+ | DIV_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpDivAssign;
+ }
+ | MOD_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "%=");
+ $$.loc = $1.loc;
+ $$.op = EOpModAssign;
+ }
+ | ADD_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpAddAssign;
+ }
+ | SUB_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpSubAssign;
+ }
+ | LEFT_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bit-shift left assign");
+ $$.loc = $1.loc; $$.op = EOpLeftShiftAssign;
+ }
+ | RIGHT_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bit-shift right assign");
+ $$.loc = $1.loc; $$.op = EOpRightShiftAssign;
+ }
+ | AND_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bitwise-and assign");
+ $$.loc = $1.loc; $$.op = EOpAndAssign;
+ }
+ | XOR_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bitwise-xor assign");
+ $$.loc = $1.loc; $$.op = EOpExclusiveOrAssign;
+ }
+ | OR_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bitwise-or assign");
+ $$.loc = $1.loc; $$.op = EOpInclusiveOrAssign;
+ }
+ ;
+
+expression
+ : assignment_expression {
+ $$ = $1;
+ }
+ | expression COMMA assignment_expression {
+ parseContext.samplerConstructorLocationCheck($2.loc, ",", $3);
+ $$ = parseContext.intermediate.addComma($1, $3, $2.loc);
+ if ($$ == 0) {
+ parseContext.binaryOpError($2.loc, ",", $1->getCompleteString(), $3->getCompleteString());
+ $$ = $3;
+ }
+ }
+ ;
+
+constant_expression
+ : conditional_expression {
+ parseContext.constantValueCheck($1, "");
+ $$ = $1;
+ }
+ ;
+
+declaration
+ : function_prototype SEMICOLON {
+ parseContext.handleFunctionDeclarator($1.loc, *$1.function, true /* prototype */);
+ $$ = 0;
+ // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature
+ }
+ | init_declarator_list SEMICOLON {
+ if ($1.intermNode && $1.intermNode->getAsAggregate())
+ $1.intermNode->getAsAggregate()->setOperator(EOpSequence);
+ $$ = $1.intermNode;
+ }
+ | PRECISION precision_qualifier type_specifier SEMICOLON {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "precision statement");
+
+ // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope
+ parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]);
+ parseContext.setDefaultPrecision($1.loc, $3, $2.qualifier.precision);
+ $$ = 0;
+ }
+ | block_structure SEMICOLON {
+ parseContext.declareBlock($1.loc, *$1.typeList);
+ $$ = 0;
+ }
+ | block_structure IDENTIFIER SEMICOLON {
+ parseContext.declareBlock($1.loc, *$1.typeList, $2.string);
+ $$ = 0;
+ }
+ | block_structure IDENTIFIER array_specifier SEMICOLON {
+ parseContext.declareBlock($1.loc, *$1.typeList, $2.string, $3.arraySizes);
+ $$ = 0;
+ }
+ | type_qualifier SEMICOLON {
+ parseContext.globalQualifierFixCheck($1.loc, $1.qualifier);
+ parseContext.updateStandaloneQualifierDefaults($1.loc, $1);
+ $$ = 0;
+ }
+ | type_qualifier IDENTIFIER SEMICOLON {
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$2.string);
+ $$ = 0;
+ }
+ | type_qualifier IDENTIFIER identifier_list SEMICOLON {
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ $3->push_back($2.string);
+ parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$3);
+ $$ = 0;
+ }
+ ;
+
+block_structure
+ : type_qualifier IDENTIFIER LEFT_BRACE { parseContext.nestedBlockCheck($1.loc); } struct_declaration_list RIGHT_BRACE {
+ --parseContext.structNestingLevel;
+ parseContext.blockName = $2.string;
+ parseContext.globalQualifierFixCheck($1.loc, $1.qualifier);
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.currentBlockQualifier = $1.qualifier;
+ $$.loc = $1.loc;
+ $$.typeList = $5;
+ }
+
+identifier_list
+ : COMMA IDENTIFIER {
+ $$ = new TIdentifierList;
+ $$->push_back($2.string);
+ }
+ | identifier_list COMMA IDENTIFIER {
+ $$ = $1;
+ $$->push_back($3.string);
+ }
+ ;
+
+function_prototype
+ : function_declarator RIGHT_PAREN {
+ $$.function = $1;
+ $$.loc = $2.loc;
+ }
+ ;
+
+function_declarator
+ : function_header {
+ $$ = $1;
+ }
+ | function_header_with_parameters {
+ $$ = $1;
+ }
+ ;
+
+
+function_header_with_parameters
+ : function_header parameter_declaration {
+ // Add the parameter
+ $$ = $1;
+ if ($2.param.type->getBasicType() != EbtVoid)
+ $1->addParameter($2.param);
+ else
+ delete $2.param.type;
+ }
+ | function_header_with_parameters COMMA parameter_declaration {
+ //
+ // Only first parameter of one-parameter functions can be void
+ // The check for named parameters not being void is done in parameter_declarator
+ //
+ if ($3.param.type->getBasicType() == EbtVoid) {
+ //
+ // This parameter > first is void
+ //
+ parseContext.error($2.loc, "cannot be an argument type except for '(void)'", "void", "");
+ delete $3.param.type;
+ } else {
+ // Add the parameter
+ $$ = $1;
+ $1->addParameter($3.param);
+ }
+ }
+ ;
+
+function_header
+ : fully_specified_type IDENTIFIER LEFT_PAREN {
+ if ($1.qualifier.storage != EvqGlobal && $1.qualifier.storage != EvqTemporary) {
+ parseContext.error($2.loc, "no qualifiers allowed for function return",
+ GetStorageQualifierString($1.qualifier.storage), "");
+ }
+ if ($1.arraySizes)
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+
+ // Add the function as a prototype after parsing it (we do not support recursion)
+ TFunction *function;
+ TType type($1);
+
+ // Potentially rename shader entry point function. No-op most of the time.
+ parseContext.renameShaderFunction($2.string);
+
+ // Make the function
+ function = new TFunction($2.string, type);
+ $$ = function;
+ }
+ ;
+
+parameter_declarator
+ // Type + name
+ : type_specifier IDENTIFIER {
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+ if ($1.basicType == EbtVoid) {
+ parseContext.error($2.loc, "illegal use of type 'void'", $2.string->c_str(), "");
+ }
+ parseContext.reservedErrorCheck($2.loc, *$2.string);
+
+ TParameter param = {$2.string, new TType($1)};
+ $$.loc = $2.loc;
+ $$.param = param;
+ }
+ | type_specifier IDENTIFIER array_specifier {
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+ TType* type = new TType($1);
+ type->transferArraySizes($3.arraySizes);
+ type->copyArrayInnerSizes($1.arraySizes);
+
+ parseContext.arrayOfArrayVersionCheck($2.loc, type->getArraySizes());
+ parseContext.arraySizeRequiredCheck($3.loc, *$3.arraySizes);
+ parseContext.reservedErrorCheck($2.loc, *$2.string);
+
+ TParameter param = { $2.string, type };
+
+ $$.loc = $2.loc;
+ $$.param = param;
+ }
+ ;
+
+parameter_declaration
+ //
+ // With name
+ //
+ : type_qualifier parameter_declarator {
+ $$ = $2;
+ if ($1.qualifier.precision != EpqNone)
+ $$.param.type->getQualifier().precision = $1.qualifier.precision;
+ parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type);
+ parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type);
+
+ }
+ | parameter_declarator {
+ $$ = $1;
+
+ parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type);
+ parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type);
+ parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+ }
+ //
+ // Without name
+ //
+ | type_qualifier parameter_type_specifier {
+ $$ = $2;
+ if ($1.qualifier.precision != EpqNone)
+ $$.param.type->getQualifier().precision = $1.qualifier.precision;
+ parseContext.precisionQualifierCheck($1.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type);
+ parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type);
+ }
+ | parameter_type_specifier {
+ $$ = $1;
+
+ parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type);
+ parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type);
+ parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+ }
+ ;
+
+parameter_type_specifier
+ : type_specifier {
+ TParameter param = { 0, new TType($1) };
+ $$.param = param;
+ if ($1.arraySizes)
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+ ;
+
+init_declarator_list
+ : single_declaration {
+ $$ = $1;
+ }
+ | init_declarator_list COMMA IDENTIFIER {
+ $$ = $1;
+ parseContext.declareVariable($3.loc, *$3.string, $1.type);
+ }
+ | init_declarator_list COMMA IDENTIFIER array_specifier {
+ $$ = $1;
+ parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes);
+ }
+ | init_declarator_list COMMA IDENTIFIER array_specifier EQUAL initializer {
+ $$.type = $1.type;
+ TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes, $6);
+ $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $5.loc);
+ }
+ | init_declarator_list COMMA IDENTIFIER EQUAL initializer {
+ $$.type = $1.type;
+ TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, 0, $5);
+ $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $4.loc);
+ }
+ ;
+
+single_declaration
+ : fully_specified_type {
+ $$.type = $1;
+ $$.intermNode = 0;
+ parseContext.declareTypeDefaults($$.loc, $$.type);
+ }
+ | fully_specified_type IDENTIFIER {
+ $$.type = $1;
+ $$.intermNode = 0;
+ parseContext.declareVariable($2.loc, *$2.string, $1);
+ }
+ | fully_specified_type IDENTIFIER array_specifier {
+ $$.type = $1;
+ $$.intermNode = 0;
+ parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes);
+ }
+ | fully_specified_type IDENTIFIER array_specifier EQUAL initializer {
+ $$.type = $1;
+ TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes, $5);
+ $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $4.loc);
+ }
+ | fully_specified_type IDENTIFIER EQUAL initializer {
+ $$.type = $1;
+ TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4);
+ $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $3.loc);
+ }
+
+// Grammar Note: No 'enum', or 'typedef'.
+
+fully_specified_type
+ : type_specifier {
+ $$ = $1;
+
+ parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $$);
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ parseContext.precisionQualifierCheck($$.loc, $$.basicType, $$.qualifier);
+ }
+ | type_qualifier type_specifier {
+ parseContext.globalQualifierFixCheck($1.loc, $1.qualifier);
+ parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $2);
+
+ if ($2.arraySizes) {
+ parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ if ($2.arraySizes && parseContext.arrayQualifierError($2.loc, $1.qualifier))
+ $2.arraySizes = nullptr;
+
+ parseContext.checkNoShaderLayouts($2.loc, $1.shaderQualifiers);
+ $2.shaderQualifiers.merge($1.shaderQualifiers);
+ parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true);
+ parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier);
+
+ $$ = $2;
+
+ if (! $$.qualifier.isInterpolation() &&
+ ((parseContext.language == EShLangVertex && $$.qualifier.storage == EvqVaryingOut) ||
+ (parseContext.language == EShLangFragment && $$.qualifier.storage == EvqVaryingIn)))
+ $$.qualifier.smooth = true;
+ }
+ ;
+
+invariant_qualifier
+ : INVARIANT {
+ parseContext.globalCheck($1.loc, "invariant");
+ parseContext.profileRequires($$.loc, ENoProfile, 120, 0, "invariant");
+ $$.init($1.loc);
+ $$.qualifier.invariant = true;
+ }
+ ;
+
+interpolation_qualifier
+ : SMOOTH {
+ parseContext.globalCheck($1.loc, "smooth");
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "smooth");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "smooth");
+ $$.init($1.loc);
+ $$.qualifier.smooth = true;
+ }
+ | FLAT {
+ parseContext.globalCheck($1.loc, "flat");
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "flat");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "flat");
+ $$.init($1.loc);
+ $$.qualifier.flat = true;
+ }
+ | NOPERSPECTIVE {
+ parseContext.globalCheck($1.loc, "noperspective");
+#ifdef NV_EXTENSIONS
+ parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective");
+#else
+ parseContext.requireProfile($1.loc, ~EEsProfile, "noperspective");
+#endif
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "noperspective");
+ $$.init($1.loc);
+ $$.qualifier.nopersp = true;
+ }
+ | EXPLICITINTERPAMD {
+#ifdef AMD_EXTENSIONS
+ parseContext.globalCheck($1.loc, "__explicitInterpAMD");
+ parseContext.profileRequires($1.loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ parseContext.profileRequires($1.loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ $$.init($1.loc);
+ $$.qualifier.explicitInterp = true;
+#endif
+ }
+ | PERVERTEXNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "pervertexNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires($1.loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ $$.init($1.loc);
+ $$.qualifier.pervertexNV = true;
+#endif
+ }
+ | PERPRIMITIVENV {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck($1.loc, "perprimitiveNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV");
+ // Fragment shader stage doesn't check for extension. So we explicitly add below extension check.
+ if (parseContext.language == EShLangFragment)
+ parseContext.requireExtensions($1.loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV");
+ $$.init($1.loc);
+ $$.qualifier.perPrimitiveNV = true;
+#endif
+ }
+ | PERVIEWNV {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck($1.loc, "perviewNV");
+ parseContext.requireStage($1.loc, EShLangMeshNV, "perviewNV");
+ $$.init($1.loc);
+ $$.qualifier.perViewNV = true;
+#endif
+ }
+ | PERTASKNV {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck($1.loc, "taskNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV");
+ $$.init($1.loc);
+ $$.qualifier.perTaskNV = true;
+#endif
+ }
+ ;
+
+layout_qualifier
+ : LAYOUT LEFT_PAREN layout_qualifier_id_list RIGHT_PAREN {
+ $$ = $3;
+ }
+ ;
+
+layout_qualifier_id_list
+ : layout_qualifier_id {
+ $$ = $1;
+ }
+ | layout_qualifier_id_list COMMA layout_qualifier_id {
+ $$ = $1;
+ $$.shaderQualifiers.merge($3.shaderQualifiers);
+ parseContext.mergeObjectLayoutQualifiers($$.qualifier, $3.qualifier, false);
+ }
+
+layout_qualifier_id
+ : IDENTIFIER {
+ $$.init($1.loc);
+ parseContext.setLayoutQualifier($1.loc, $$, *$1.string);
+ }
+ | IDENTIFIER EQUAL constant_expression {
+ $$.init($1.loc);
+ parseContext.setLayoutQualifier($1.loc, $$, *$1.string, $3);
+ }
+ | SHARED { // because "shared" is both an identifier and a keyword
+ $$.init($1.loc);
+ TString strShared("shared");
+ parseContext.setLayoutQualifier($1.loc, $$, strShared);
+ }
+ ;
+
+precise_qualifier
+ : PRECISE {
+ parseContext.profileRequires($$.loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise");
+ parseContext.profileRequires($1.loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise");
+ $$.init($1.loc);
+ $$.qualifier.noContraction = true;
+ }
+ ;
+
+type_qualifier
+ : single_type_qualifier {
+ $$ = $1;
+ }
+ | type_qualifier single_type_qualifier {
+ $$ = $1;
+ if ($$.basicType == EbtVoid)
+ $$.basicType = $2.basicType;
+
+ $$.shaderQualifiers.merge($2.shaderQualifiers);
+ parseContext.mergeQualifiers($$.loc, $$.qualifier, $2.qualifier, false);
+ }
+ ;
+
+single_type_qualifier
+ : storage_qualifier {
+ $$ = $1;
+ }
+ | layout_qualifier {
+ $$ = $1;
+ }
+ | precision_qualifier {
+ parseContext.checkPrecisionQualifier($1.loc, $1.qualifier.precision);
+ $$ = $1;
+ }
+ | interpolation_qualifier {
+ // allow inheritance of storage qualifier from block declaration
+ $$ = $1;
+ }
+ | invariant_qualifier {
+ // allow inheritance of storage qualifier from block declaration
+ $$ = $1;
+ }
+ | precise_qualifier {
+ // allow inheritance of storage qualifier from block declaration
+ $$ = $1;
+ }
+ | non_uniform_qualifier {
+ $$ = $1;
+ }
+ ;
+
+storage_qualifier
+ : CONST {
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant
+ }
+ | ATTRIBUTE {
+ parseContext.requireStage($1.loc, EShLangVertex, "attribute");
+ parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "attribute");
+ parseContext.checkDeprecated($1.loc, ENoProfile, 130, "attribute");
+ parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "attribute");
+ parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "attribute");
+
+ parseContext.globalCheck($1.loc, "attribute");
+
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqVaryingIn;
+ }
+ | VARYING {
+ parseContext.checkDeprecated($1.loc, ENoProfile, 130, "varying");
+ parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "varying");
+ parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "varying");
+ parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "varying");
+
+ parseContext.globalCheck($1.loc, "varying");
+
+ $$.init($1.loc);
+ if (parseContext.language == EShLangVertex)
+ $$.qualifier.storage = EvqVaryingOut;
+ else
+ $$.qualifier.storage = EvqVaryingIn;
+ }
+ | INOUT {
+ parseContext.globalCheck($1.loc, "inout");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqInOut;
+ }
+ | IN {
+ parseContext.globalCheck($1.loc, "in");
+ $$.init($1.loc);
+ // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later
+ $$.qualifier.storage = EvqIn;
+ }
+ | OUT {
+ parseContext.globalCheck($1.loc, "out");
+ $$.init($1.loc);
+ // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later
+ $$.qualifier.storage = EvqOut;
+ }
+ | CENTROID {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, 0, "centroid");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "centroid");
+ parseContext.globalCheck($1.loc, "centroid");
+ $$.init($1.loc);
+ $$.qualifier.centroid = true;
+ }
+ | PATCH {
+ parseContext.globalCheck($1.loc, "patch");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch");
+ $$.init($1.loc);
+ $$.qualifier.patch = true;
+ }
+ | SAMPLE {
+ parseContext.globalCheck($1.loc, "sample");
+ $$.init($1.loc);
+ $$.qualifier.sample = true;
+ }
+ | UNIFORM {
+ parseContext.globalCheck($1.loc, "uniform");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqUniform;
+ }
+ | BUFFER {
+ parseContext.globalCheck($1.loc, "buffer");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqBuffer;
+ }
+ | HITATTRNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "hitAttributeNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectNVMask | EShLangClosestHitNVMask
+ | EShLangAnyHitNVMask), "hitAttributeNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqHitAttrNV;
+#endif
+ }
+ | PAYLOADNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "rayPayloadNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqPayloadNV;
+#endif
+ }
+ | PAYLOADINNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "rayPayloadInNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadInNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqPayloadInNV;
+#endif
+ }
+ | CALLDATANV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "callableDataNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenNVMask |
+ EShLangClosestHitNVMask | EShLangMissNVMask | EShLangCallableNVMask), "callableDataNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqCallableDataNV;
+#endif
+ }
+ | CALLDATAINNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "callableDataInNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableNVMask), "callableDataInNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqCallableDataInNV;
+#endif
+ }
+ | SHARED {
+ parseContext.globalCheck($1.loc, "shared");
+ parseContext.profileRequires($1.loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared");
+ parseContext.profileRequires($1.loc, EEsProfile, 310, 0, "shared");
+#ifdef NV_EXTENSIONS
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared");
+#else
+ parseContext.requireStage($1.loc, EShLangCompute, "shared");
+#endif
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqShared;
+ }
+ | COHERENT {
+ $$.init($1.loc);
+ $$.qualifier.coherent = true;
+ }
+ | DEVICECOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent");
+ $$.qualifier.devicecoherent = true;
+ }
+ | QUEUEFAMILYCOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent");
+ $$.qualifier.queuefamilycoherent = true;
+ }
+ | WORKGROUPCOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent");
+ $$.qualifier.workgroupcoherent = true;
+ }
+ | SUBGROUPCOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent");
+ $$.qualifier.subgroupcoherent = true;
+ }
+ | NONPRIVATE {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate");
+ $$.qualifier.nonprivate = true;
+ }
+ | VOLATILE {
+ $$.init($1.loc);
+ $$.qualifier.volatil = true;
+ }
+ | RESTRICT {
+ $$.init($1.loc);
+ $$.qualifier.restrict = true;
+ }
+ | READONLY {
+ $$.init($1.loc);
+ $$.qualifier.readonly = true;
+ }
+ | WRITEONLY {
+ $$.init($1.loc);
+ $$.qualifier.writeonly = true;
+ }
+ | SUBROUTINE {
+ parseContext.spvRemoved($1.loc, "subroutine");
+ parseContext.globalCheck($1.loc, "subroutine");
+ parseContext.unimplemented($1.loc, "subroutine");
+ $$.init($1.loc);
+ }
+ | SUBROUTINE LEFT_PAREN type_name_list RIGHT_PAREN {
+ parseContext.spvRemoved($1.loc, "subroutine");
+ parseContext.globalCheck($1.loc, "subroutine");
+ parseContext.unimplemented($1.loc, "subroutine");
+ $$.init($1.loc);
+ }
+ ;
+
+non_uniform_qualifier
+ : NONUNIFORM {
+ $$.init($1.loc);
+ $$.qualifier.nonUniform = true;
+ }
+ ;
+
+type_name_list
+ : IDENTIFIER {
+ // TODO
+ }
+ | type_name_list COMMA IDENTIFIER {
+ // TODO: 4.0 semantics: subroutines
+ // 1) make sure each identifier is a type declared earlier with SUBROUTINE
+ // 2) save all of the identifiers for future comparison with the declared function
+ }
+ ;
+
+type_specifier
+ : type_specifier_nonarray type_parameter_specifier_opt {
+ $$ = $1;
+ $$.qualifier.precision = parseContext.getDefaultPrecision($$);
+ $$.typeParameters = $2;
+ }
+ | type_specifier_nonarray type_parameter_specifier_opt array_specifier {
+ parseContext.arrayOfArrayVersionCheck($3.loc, $3.arraySizes);
+ $$ = $1;
+ $$.qualifier.precision = parseContext.getDefaultPrecision($$);
+ $$.typeParameters = $2;
+ $$.arraySizes = $3.arraySizes;
+ }
+ ;
+
+array_specifier
+ : LEFT_BRACKET RIGHT_BRACKET {
+ $$.loc = $1.loc;
+ $$.arraySizes = new TArraySizes;
+ $$.arraySizes->addInnerSize();
+ }
+ | LEFT_BRACKET conditional_expression RIGHT_BRACKET {
+ $$.loc = $1.loc;
+ $$.arraySizes = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($2->getLoc(), $2, size, "array size");
+ $$.arraySizes->addInnerSize(size);
+ }
+ | array_specifier LEFT_BRACKET RIGHT_BRACKET {
+ $$ = $1;
+ $$.arraySizes->addInnerSize();
+ }
+ | array_specifier LEFT_BRACKET conditional_expression RIGHT_BRACKET {
+ $$ = $1;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($3->getLoc(), $3, size, "array size");
+ $$.arraySizes->addInnerSize(size);
+ }
+ ;
+
+type_parameter_specifier_opt
+ : type_parameter_specifier {
+ $$ = $1;
+ }
+ | /* May be null */ {
+ $$ = 0;
+ }
+ ;
+
+type_parameter_specifier
+ : LEFT_ANGLE type_parameter_specifier_list RIGHT_ANGLE {
+ $$ = $2;
+ }
+ ;
+
+type_parameter_specifier_list
+ : unary_expression {
+ $$ = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($1->getLoc(), $1, size, "type parameter");
+ $$->addInnerSize(size);
+ }
+ | type_parameter_specifier_list COMMA unary_expression {
+ $$ = $1;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($3->getLoc(), $3, size, "type parameter");
+ $$->addInnerSize(size);
+ }
+ ;
+
+type_specifier_nonarray
+ : VOID {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtVoid;
+ }
+ | FLOAT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ }
+ | DOUBLE {
+ parseContext.doubleCheck($1.loc, "double");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ }
+ | FLOAT16_T {
+ parseContext.float16ScalarVectorCheck($1.loc, "float16_t", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ }
+ | FLOAT32_T {
+ parseContext.explicitFloat32Check($1.loc, "float32_t", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ }
+ | FLOAT64_T {
+ parseContext.explicitFloat64Check($1.loc, "float64_t", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ }
+ | INT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ }
+ | UINT {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ }
+ | INT8_T {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ }
+ | UINT8_T {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ }
+ | INT16_T {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ }
+ | UINT16_T {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ }
+ | INT32_T {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ }
+ | UINT32_T {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ }
+ | INT64_T {
+ parseContext.int64Check($1.loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ }
+ | UINT64_T {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ }
+ | BOOL {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ }
+ | VEC2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(2);
+ }
+ | VEC3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(3);
+ }
+ | VEC4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(4);
+ }
+ | DVEC2 {
+ parseContext.doubleCheck($1.loc, "double vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(2);
+ }
+ | DVEC3 {
+ parseContext.doubleCheck($1.loc, "double vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(3);
+ }
+ | DVEC4 {
+ parseContext.doubleCheck($1.loc, "double vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(4);
+ }
+ | F16VEC2 {
+ parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setVector(2);
+ }
+ | F16VEC3 {
+ parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setVector(3);
+ }
+ | F16VEC4 {
+ parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setVector(4);
+ }
+ | F32VEC2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(2);
+ }
+ | F32VEC3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(3);
+ }
+ | F32VEC4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(4);
+ }
+ | F64VEC2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(2);
+ }
+ | F64VEC3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(3);
+ }
+ | F64VEC4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(4);
+ }
+ | BVEC2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ $$.setVector(2);
+ }
+ | BVEC3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ $$.setVector(3);
+ }
+ | BVEC4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ $$.setVector(4);
+ }
+ | IVEC2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(2);
+ }
+ | IVEC3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(3);
+ }
+ | IVEC4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(4);
+ }
+ | I8VEC2 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ $$.setVector(2);
+ }
+ | I8VEC3 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ $$.setVector(3);
+ }
+ | I8VEC4 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ $$.setVector(4);
+ }
+ | I16VEC2 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ $$.setVector(2);
+ }
+ | I16VEC3 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ $$.setVector(3);
+ }
+ | I16VEC4 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ $$.setVector(4);
+ }
+ | I32VEC2 {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(2);
+ }
+ | I32VEC3 {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(3);
+ }
+ | I32VEC4 {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(4);
+ }
+ | I64VEC2 {
+ parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ $$.setVector(2);
+ }
+ | I64VEC3 {
+ parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ $$.setVector(3);
+ }
+ | I64VEC4 {
+ parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ $$.setVector(4);
+ }
+ | UVEC2 {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(2);
+ }
+ | UVEC3 {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(3);
+ }
+ | UVEC4 {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(4);
+ }
+ | U8VEC2 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ $$.setVector(2);
+ }
+ | U8VEC3 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ $$.setVector(3);
+ }
+ | U8VEC4 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ $$.setVector(4);
+ }
+ | U16VEC2 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ $$.setVector(2);
+ }
+ | U16VEC3 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ $$.setVector(3);
+ }
+ | U16VEC4 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ $$.setVector(4);
+ }
+ | U32VEC2 {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(2);
+ }
+ | U32VEC3 {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(3);
+ }
+ | U32VEC4 {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(4);
+ }
+ | U64VEC2 {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ $$.setVector(2);
+ }
+ | U64VEC3 {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ $$.setVector(3);
+ }
+ | U64VEC4 {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ $$.setVector(4);
+ }
+ | MAT2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | MAT3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | MAT4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | MAT2X2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | MAT2X3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 3);
+ }
+ | MAT2X4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 4);
+ }
+ | MAT3X2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 2);
+ }
+ | MAT3X3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | MAT3X4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 4);
+ }
+ | MAT4X2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 2);
+ }
+ | MAT4X3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 3);
+ }
+ | MAT4X4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | DMAT2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | DMAT3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | DMAT4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | DMAT2X2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | DMAT2X3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 3);
+ }
+ | DMAT2X4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 4);
+ }
+ | DMAT3X2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 2);
+ }
+ | DMAT3X3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | DMAT3X4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 4);
+ }
+ | DMAT4X2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 2);
+ }
+ | DMAT4X3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 3);
+ }
+ | DMAT4X4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | F16MAT2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 2);
+ }
+ | F16MAT3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 3);
+ }
+ | F16MAT4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 4);
+ }
+ | F16MAT2X2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 2);
+ }
+ | F16MAT2X3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 3);
+ }
+ | F16MAT2X4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 4);
+ }
+ | F16MAT3X2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 2);
+ }
+ | F16MAT3X3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 3);
+ }
+ | F16MAT3X4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 4);
+ }
+ | F16MAT4X2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 2);
+ }
+ | F16MAT4X3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 3);
+ }
+ | F16MAT4X4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 4);
+ }
+ | F32MAT2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | F32MAT3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | F32MAT4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | F32MAT2X2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | F32MAT2X3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 3);
+ }
+ | F32MAT2X4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 4);
+ }
+ | F32MAT3X2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 2);
+ }
+ | F32MAT3X3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | F32MAT3X4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 4);
+ }
+ | F32MAT4X2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 2);
+ }
+ | F32MAT4X3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 3);
+ }
+ | F32MAT4X4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | F64MAT2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | F64MAT3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | F64MAT4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | F64MAT2X2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | F64MAT2X3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 3);
+ }
+ | F64MAT2X4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 4);
+ }
+ | F64MAT3X2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 2);
+ }
+ | F64MAT3X3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | F64MAT3X4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 4);
+ }
+ | F64MAT4X2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 2);
+ }
+ | F64MAT4X3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 3);
+ }
+ | F64MAT4X4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | ACCSTRUCTNV {
+#ifdef NV_EXTENSIONS
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtAccStructNV;
+#endif
+ }
+ | ATOMIC_UINT {
+ parseContext.vulkanRemoved($1.loc, "atomic counter types");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtAtomicUint;
+ }
+ | SAMPLER1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D);
+ }
+ | SAMPLER2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D);
+ }
+ | SAMPLER3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd3D);
+ }
+ | SAMPLERCUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube);
+ }
+ | SAMPLER1DSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D, false, true);
+ }
+ | SAMPLER2DSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, false, true);
+ }
+ | SAMPLERCUBESHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube, false, true);
+ }
+ | SAMPLER1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D, true);
+ }
+ | SAMPLER2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, true);
+ }
+ | SAMPLER1DARRAYSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D, true, true);
+ }
+ | SAMPLER2DARRAYSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, true, true);
+ }
+ | SAMPLERCUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube, true);
+ }
+ | SAMPLERCUBEARRAYSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube, true, true);
+ }
+ | F16SAMPLER1D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D);
+#endif
+ }
+ | F16SAMPLER2D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D);
+#endif
+ }
+ | F16SAMPLER3D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd3D);
+#endif
+ }
+ | F16SAMPLERCUBE {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube);
+#endif
+ }
+ | F16SAMPLER1DSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D, false, true);
+#endif
+ }
+ | F16SAMPLER2DSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, false, true);
+#endif
+ }
+ | F16SAMPLERCUBESHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube, false, true);
+#endif
+ }
+ | F16SAMPLER1DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D, true);
+#endif
+ }
+ | F16SAMPLER2DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, true);
+#endif
+ }
+ | F16SAMPLER1DARRAYSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D, true, true);
+#endif
+ }
+ | F16SAMPLER2DARRAYSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, true, true);
+#endif
+ }
+ | F16SAMPLERCUBEARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube, true);
+#endif
+ }
+ | F16SAMPLERCUBEARRAYSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube, true, true);
+#endif
+ }
+ | ISAMPLER1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd1D);
+ }
+ | ISAMPLER2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D);
+ }
+ | ISAMPLER3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd3D);
+ }
+ | ISAMPLERCUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdCube);
+ }
+ | ISAMPLER1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd1D, true);
+ }
+ | ISAMPLER2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D, true);
+ }
+ | ISAMPLERCUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdCube, true);
+ }
+ | USAMPLER1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd1D);
+ }
+ | USAMPLER2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D);
+ }
+ | USAMPLER3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd3D);
+ }
+ | USAMPLERCUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdCube);
+ }
+ | USAMPLER1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd1D, true);
+ }
+ | USAMPLER2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D, true);
+ }
+ | USAMPLERCUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdCube, true);
+ }
+ | SAMPLER2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdRect);
+ }
+ | SAMPLER2DRECTSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdRect, false, true);
+ }
+ | F16SAMPLER2DRECT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdRect);
+#endif
+ }
+ | F16SAMPLER2DRECTSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdRect, false, true);
+#endif
+ }
+ | ISAMPLER2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdRect);
+ }
+ | USAMPLER2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdRect);
+ }
+ | SAMPLERBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdBuffer);
+ }
+ | F16SAMPLERBUFFER {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdBuffer);
+#endif
+ }
+ | ISAMPLERBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdBuffer);
+ }
+ | USAMPLERBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdBuffer);
+ }
+ | SAMPLER2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, false, false, true);
+ }
+ | F16SAMPLER2DMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+ | ISAMPLER2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D, false, false, true);
+ }
+ | USAMPLER2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D, false, false, true);
+ }
+ | SAMPLER2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, true, false, true);
+ }
+ | F16SAMPLER2DMSARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+ | ISAMPLER2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D, true, false, true);
+ }
+ | USAMPLER2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D, true, false, true);
+ }
+ | SAMPLER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setPureSampler(false);
+ }
+ | SAMPLERSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setPureSampler(true);
+ }
+ | TEXTURE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd1D);
+ }
+ | F16TEXTURE1D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd1D);
+#endif
+ }
+ | TEXTURE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D);
+ }
+ | F16TEXTURE2D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D);
+#endif
+ }
+ | TEXTURE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd3D);
+ }
+ | F16TEXTURE3D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd3D);
+#endif
+ }
+ | TEXTURECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdCube);
+ }
+ | F16TEXTURECUBE {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdCube);
+#endif
+ }
+ | TEXTURE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd1D, true);
+ }
+ | F16TEXTURE1DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd1D, true);
+#endif
+ }
+ | TEXTURE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D, true);
+ }
+ | F16TEXTURE2DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D, true);
+#endif
+ }
+ | TEXTURECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdCube, true);
+ }
+ | F16TEXTURECUBEARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdCube, true);
+#endif
+ }
+ | ITEXTURE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd1D);
+ }
+ | ITEXTURE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D);
+ }
+ | ITEXTURE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd3D);
+ }
+ | ITEXTURECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdCube);
+ }
+ | ITEXTURE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd1D, true);
+ }
+ | ITEXTURE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D, true);
+ }
+ | ITEXTURECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdCube, true);
+ }
+ | UTEXTURE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd1D);
+ }
+ | UTEXTURE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D);
+ }
+ | UTEXTURE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd3D);
+ }
+ | UTEXTURECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdCube);
+ }
+ | UTEXTURE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd1D, true);
+ }
+ | UTEXTURE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D, true);
+ }
+ | UTEXTURECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdCube, true);
+ }
+ | TEXTURE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdRect);
+ }
+ | F16TEXTURE2DRECT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdRect);
+#endif
+ }
+ | ITEXTURE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdRect);
+ }
+ | UTEXTURE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdRect);
+ }
+ | TEXTUREBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdBuffer);
+ }
+ | F16TEXTUREBUFFER {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdBuffer);
+#endif
+ }
+ | ITEXTUREBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdBuffer);
+ }
+ | UTEXTUREBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdBuffer);
+ }
+ | TEXTURE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D, false, false, true);
+ }
+ | F16TEXTURE2DMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+ | ITEXTURE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D, false, false, true);
+ }
+ | UTEXTURE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D, false, false, true);
+ }
+ | TEXTURE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D, true, false, true);
+ }
+ | F16TEXTURE2DMSARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+ | ITEXTURE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D, true, false, true);
+ }
+ | UTEXTURE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D, true, false, true);
+ }
+ | IMAGE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd1D);
+ }
+ | F16IMAGE1D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd1D);
+#endif
+ }
+ | IIMAGE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd1D);
+ }
+ | UIMAGE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd1D);
+ }
+ | IMAGE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D);
+ }
+ | F16IMAGE2D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D);
+#endif
+ }
+ | IIMAGE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D);
+ }
+ | UIMAGE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D);
+ }
+ | IMAGE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd3D);
+ }
+ | F16IMAGE3D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd3D);
+#endif
+ }
+ | IIMAGE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd3D);
+ }
+ | UIMAGE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd3D);
+ }
+ | IMAGE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdRect);
+ }
+ | F16IMAGE2DRECT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdRect);
+#endif
+ }
+ | IIMAGE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdRect);
+ }
+ | UIMAGE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdRect);
+ }
+ | IMAGECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdCube);
+ }
+ | F16IMAGECUBE {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdCube);
+#endif
+ }
+ | IIMAGECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdCube);
+ }
+ | UIMAGECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdCube);
+ }
+ | IMAGEBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdBuffer);
+ }
+ | F16IMAGEBUFFER {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdBuffer);
+#endif
+ }
+ | IIMAGEBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdBuffer);
+ }
+ | UIMAGEBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdBuffer);
+ }
+ | IMAGE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd1D, true);
+ }
+ | F16IMAGE1DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd1D, true);
+#endif
+ }
+ | IIMAGE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd1D, true);
+ }
+ | UIMAGE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd1D, true);
+ }
+ | IMAGE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D, true);
+ }
+ | F16IMAGE2DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D, true);
+#endif
+ }
+ | IIMAGE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D, true);
+ }
+ | UIMAGE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D, true);
+ }
+ | IMAGECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdCube, true);
+ }
+ | F16IMAGECUBEARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdCube, true);
+#endif
+ }
+ | IIMAGECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdCube, true);
+ }
+ | UIMAGECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdCube, true);
+ }
+ | IMAGE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D, false, false, true);
+ }
+ | F16IMAGE2DMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+ | IIMAGE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D, false, false, true);
+ }
+ | UIMAGE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D, false, false, true);
+ }
+ | IMAGE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D, true, false, true);
+ }
+ | F16IMAGE2DMSARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+ | IIMAGE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D, true, false, true);
+ }
+ | UIMAGE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D, true, false, true);
+ }
+ | SAMPLEREXTERNALOES { // GL_OES_EGL_image_external
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D);
+ $$.sampler.external = true;
+ }
+ | SAMPLEREXTERNAL2DY2YEXT { // GL_EXT_YUV_target
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D);
+ $$.sampler.yuv = true;
+ }
+ | SUBPASSINPUT {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat);
+ }
+ | SUBPASSINPUTMS {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat, true);
+ }
+ | F16SUBPASSINPUT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat16);
+#endif
+ }
+ | F16SUBPASSINPUTMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat16, true);
+#endif
+ }
+ | ISUBPASSINPUT {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtInt);
+ }
+ | ISUBPASSINPUTMS {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtInt, true);
+ }
+ | USUBPASSINPUT {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtUint);
+ }
+ | USUBPASSINPUTMS {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtUint, true);
+ }
+ | FCOOPMATNV {
+ parseContext.fcoopmatCheck($1.loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.coopmat = true;
+ }
+ | struct_specifier {
+ $$ = $1;
+ $$.qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary;
+ parseContext.structTypeCheck($$.loc, $$);
+ }
+ | TYPE_NAME {
+ //
+ // This is for user defined type names. The lexical phase looked up the
+ // type.
+ //
+ if (const TVariable* variable = ($1.symbol)->getAsVariable()) {
+ const TType& structure = variable->getType();
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtStruct;
+ $$.userDef = &structure;
+ } else
+ parseContext.error($1.loc, "expected type name", $1.string->c_str(), "");
+ }
+ ;
+
+precision_qualifier
+ : HIGH_PRECISION {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "highp precision qualifier");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqHigh);
+ }
+ | MEDIUM_PRECISION {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "mediump precision qualifier");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqMedium);
+ }
+ | LOW_PRECISION {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "lowp precision qualifier");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqLow);
+ }
+ ;
+
+struct_specifier
+ : STRUCT IDENTIFIER LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE {
+ TType* structure = new TType($5, *$2.string);
+ parseContext.structArrayCheck($2.loc, *structure);
+ TVariable* userTypeDef = new TVariable($2.string, *structure, true);
+ if (! parseContext.symbolTable.insert(*userTypeDef))
+ parseContext.error($2.loc, "redefinition", $2.string->c_str(), "struct");
+ $$.init($1.loc);
+ $$.basicType = EbtStruct;
+ $$.userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+ | STRUCT LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE {
+ TType* structure = new TType($4, TString(""));
+ $$.init($1.loc);
+ $$.basicType = EbtStruct;
+ $$.userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+ ;
+
+struct_declaration_list
+ : struct_declaration {
+ $$ = $1;
+ }
+ | struct_declaration_list struct_declaration {
+ $$ = $1;
+ for (unsigned int i = 0; i < $2->size(); ++i) {
+ for (unsigned int j = 0; j < $$->size(); ++j) {
+ if ((*$$)[j].type->getFieldName() == (*$2)[i].type->getFieldName())
+ parseContext.error((*$2)[i].loc, "duplicate member name:", "", (*$2)[i].type->getFieldName().c_str());
+ }
+ $$->push_back((*$2)[i]);
+ }
+ }
+ ;
+
+struct_declaration
+ : type_specifier struct_declarator_list SEMICOLON {
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+
+ $$ = $2;
+
+ parseContext.voidErrorCheck($1.loc, (*$2)[0].type->getFieldName(), $1.basicType);
+ parseContext.precisionQualifierCheck($1.loc, $1.basicType, $1.qualifier);
+
+ for (unsigned int i = 0; i < $$->size(); ++i) {
+ TType type($1);
+ type.setFieldName((*$$)[i].type->getFieldName());
+ type.transferArraySizes((*$$)[i].type->getArraySizes());
+ type.copyArrayInnerSizes($1.arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes());
+ (*$$)[i].type->shallowCopy(type);
+ }
+ }
+ | type_qualifier type_specifier struct_declarator_list SEMICOLON {
+ if ($2.arraySizes) {
+ parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck($2.loc, *$2.arraySizes);
+ }
+
+ $$ = $3;
+
+ parseContext.memberQualifierCheck($1);
+ parseContext.voidErrorCheck($2.loc, (*$3)[0].type->getFieldName(), $2.basicType);
+ parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true);
+ parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier);
+
+ for (unsigned int i = 0; i < $$->size(); ++i) {
+ TType type($2);
+ type.setFieldName((*$$)[i].type->getFieldName());
+ type.transferArraySizes((*$$)[i].type->getArraySizes());
+ type.copyArrayInnerSizes($2.arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes());
+ (*$$)[i].type->shallowCopy(type);
+ }
+ }
+ ;
+
+struct_declarator_list
+ : struct_declarator {
+ $$ = new TTypeList;
+ $$->push_back($1);
+ }
+ | struct_declarator_list COMMA struct_declarator {
+ $$->push_back($3);
+ }
+ ;
+
+struct_declarator
+ : IDENTIFIER {
+ $$.type = new TType(EbtVoid);
+ $$.loc = $1.loc;
+ $$.type->setFieldName(*$1.string);
+ }
+ | IDENTIFIER array_specifier {
+ parseContext.arrayOfArrayVersionCheck($1.loc, $2.arraySizes);
+
+ $$.type = new TType(EbtVoid);
+ $$.loc = $1.loc;
+ $$.type->setFieldName(*$1.string);
+ $$.type->transferArraySizes($2.arraySizes);
+ }
+ ;
+
+initializer
+ : assignment_expression {
+ $$ = $1;
+ }
+ | LEFT_BRACE initializer_list RIGHT_BRACE {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile($1.loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ $$ = $2;
+ }
+ | LEFT_BRACE initializer_list COMMA RIGHT_BRACE {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile($1.loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ $$ = $2;
+ }
+ ;
+
+initializer_list
+ : initializer {
+ $$ = parseContext.intermediate.growAggregate(0, $1, $1->getLoc());
+ }
+ | initializer_list COMMA initializer {
+ $$ = parseContext.intermediate.growAggregate($1, $3);
+ }
+ ;
+
+declaration_statement
+ : declaration { $$ = $1; }
+ ;
+
+statement
+ : compound_statement { $$ = $1; }
+ | simple_statement { $$ = $1; }
+ ;
+
+// Grammar Note: labeled statements for switch statements only; 'goto' is not supported.
+
+simple_statement
+ : declaration_statement { $$ = $1; }
+ | expression_statement { $$ = $1; }
+ | selection_statement { $$ = $1; }
+ | switch_statement { $$ = $1; }
+ | case_label { $$ = $1; }
+ | iteration_statement { $$ = $1; }
+ | jump_statement { $$ = $1; }
+ ;
+
+compound_statement
+ : LEFT_BRACE RIGHT_BRACE { $$ = 0; }
+ | LEFT_BRACE {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ }
+ statement_list {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ }
+ RIGHT_BRACE {
+ if ($3 && $3->getAsAggregate())
+ $3->getAsAggregate()->setOperator(EOpSequence);
+ $$ = $3;
+ }
+ ;
+
+statement_no_new_scope
+ : compound_statement_no_new_scope { $$ = $1; }
+ | simple_statement { $$ = $1; }
+ ;
+
+statement_scoped
+ : {
+ ++parseContext.controlFlowNestingLevel;
+ }
+ compound_statement {
+ --parseContext.controlFlowNestingLevel;
+ $$ = $2;
+ }
+ | {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ simple_statement {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ $$ = $2;
+ }
+
+compound_statement_no_new_scope
+ // Statement that doesn't create a new scope, for selection_statement, iteration_statement
+ : LEFT_BRACE RIGHT_BRACE {
+ $$ = 0;
+ }
+ | LEFT_BRACE statement_list RIGHT_BRACE {
+ if ($2 && $2->getAsAggregate())
+ $2->getAsAggregate()->setOperator(EOpSequence);
+ $$ = $2;
+ }
+ ;
+
+statement_list
+ : statement {
+ $$ = parseContext.intermediate.makeAggregate($1);
+ if ($1 && $1->getAsBranchNode() && ($1->getAsBranchNode()->getFlowOp() == EOpCase ||
+ $1->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence(0, $1);
+ $$ = 0; // start a fresh subsequence for what's after this case
+ }
+ }
+ | statement_list statement {
+ if ($2 && $2->getAsBranchNode() && ($2->getAsBranchNode()->getFlowOp() == EOpCase ||
+ $2->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence($1 ? $1->getAsAggregate() : 0, $2);
+ $$ = 0; // start a fresh subsequence for what's after this case
+ } else
+ $$ = parseContext.intermediate.growAggregate($1, $2);
+ }
+ ;
+
+expression_statement
+ : SEMICOLON { $$ = 0; }
+ | expression SEMICOLON { $$ = static_cast<TIntermNode*>($1); }
+ ;
+
+selection_statement
+ : selection_statement_nonattributed {
+ $$ = $1;
+ }
+ | attribute selection_statement_nonattributed {
+ parseContext.handleSelectionAttributes(*$1, $2);
+ $$ = $2;
+ }
+
+selection_statement_nonattributed
+ : IF LEFT_PAREN expression RIGHT_PAREN selection_rest_statement {
+ parseContext.boolCheck($1.loc, $3);
+ $$ = parseContext.intermediate.addSelection($3, $5, $1.loc);
+ }
+ ;
+
+selection_rest_statement
+ : statement_scoped ELSE statement_scoped {
+ $$.node1 = $1;
+ $$.node2 = $3;
+ }
+ | statement_scoped {
+ $$.node1 = $1;
+ $$.node2 = 0;
+ }
+ ;
+
+condition
+ // In 1996 c++ draft, conditions can include single declarations
+ : expression {
+ $$ = $1;
+ parseContext.boolCheck($1->getLoc(), $1);
+ }
+ | fully_specified_type IDENTIFIER EQUAL initializer {
+ parseContext.boolCheck($2.loc, $1);
+
+ TType type($1);
+ TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4);
+ if (initNode)
+ $$ = initNode->getAsTyped();
+ else
+ $$ = 0;
+ }
+ ;
+
+switch_statement
+ : switch_statement_nonattributed {
+ $$ = $1;
+ }
+ | attribute switch_statement_nonattributed {
+ parseContext.handleSwitchAttributes(*$1, $2);
+ $$ = $2;
+ }
+
+switch_statement_nonattributed
+ : SWITCH LEFT_PAREN expression RIGHT_PAREN {
+ // start new switch sequence on the switch stack
+ ++parseContext.controlFlowNestingLevel;
+ ++parseContext.statementNestingLevel;
+ parseContext.switchSequenceStack.push_back(new TIntermSequence);
+ parseContext.switchLevel.push_back(parseContext.statementNestingLevel);
+ parseContext.symbolTable.push();
+ }
+ LEFT_BRACE switch_statement_list RIGHT_BRACE {
+ $$ = parseContext.addSwitch($1.loc, $3, $7 ? $7->getAsAggregate() : 0);
+ delete parseContext.switchSequenceStack.back();
+ parseContext.switchSequenceStack.pop_back();
+ parseContext.switchLevel.pop_back();
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ ;
+
+switch_statement_list
+ : /* nothing */ {
+ $$ = 0;
+ }
+ | statement_list {
+ $$ = $1;
+ }
+ ;
+
+case_label
+ : CASE expression COLON {
+ $$ = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error($1.loc, "cannot appear outside switch statement", "case", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error($1.loc, "cannot be nested inside control flow", "case", "");
+ else {
+ parseContext.constantValueCheck($2, "case");
+ parseContext.integerCheck($2, "case");
+ $$ = parseContext.intermediate.addBranch(EOpCase, $2, $1.loc);
+ }
+ }
+ | DEFAULT COLON {
+ $$ = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error($1.loc, "cannot appear outside switch statement", "default", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error($1.loc, "cannot be nested inside control flow", "default", "");
+ else
+ $$ = parseContext.intermediate.addBranch(EOpDefault, $1.loc);
+ }
+ ;
+
+iteration_statement
+ : iteration_statement_nonattributed {
+ $$ = $1;
+ }
+ | attribute iteration_statement_nonattributed {
+ parseContext.handleLoopAttributes(*$1, $2);
+ $$ = $2;
+ }
+
+iteration_statement_nonattributed
+ : WHILE LEFT_PAREN {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error($1.loc, "while loops not available", "limitation", "");
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ condition RIGHT_PAREN statement_no_new_scope {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ $$ = parseContext.intermediate.addLoop($6, $4, 0, true, $1.loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ | DO {
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ statement WHILE LEFT_PAREN expression RIGHT_PAREN SEMICOLON {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error($1.loc, "do-while loops not available", "limitation", "");
+
+ parseContext.boolCheck($8.loc, $6);
+
+ $$ = parseContext.intermediate.addLoop($3, $6, 0, false, $4.loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ | FOR LEFT_PAREN {
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ for_init_statement for_rest_statement RIGHT_PAREN statement_no_new_scope {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ $$ = parseContext.intermediate.makeAggregate($4, $2.loc);
+ TIntermLoop* forLoop = parseContext.intermediate.addLoop($7, reinterpret_cast<TIntermTyped*>($5.node1), reinterpret_cast<TIntermTyped*>($5.node2), true, $1.loc);
+ if (! parseContext.limits.nonInductiveForLoops)
+ parseContext.inductiveLoopCheck($1.loc, $4, forLoop);
+ $$ = parseContext.intermediate.growAggregate($$, forLoop, $1.loc);
+ $$->getAsAggregate()->setOperator(EOpSequence);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ ;
+
+for_init_statement
+ : expression_statement {
+ $$ = $1;
+ }
+ | declaration_statement {
+ $$ = $1;
+ }
+ ;
+
+conditionopt
+ : condition {
+ $$ = $1;
+ }
+ | /* May be null */ {
+ $$ = 0;
+ }
+ ;
+
+for_rest_statement
+ : conditionopt SEMICOLON {
+ $$.node1 = $1;
+ $$.node2 = 0;
+ }
+ | conditionopt SEMICOLON expression {
+ $$.node1 = $1;
+ $$.node2 = $3;
+ }
+ ;
+
+jump_statement
+ : CONTINUE SEMICOLON {
+ if (parseContext.loopNestingLevel <= 0)
+ parseContext.error($1.loc, "continue statement only allowed in loops", "", "");
+ $$ = parseContext.intermediate.addBranch(EOpContinue, $1.loc);
+ }
+ | BREAK SEMICOLON {
+ if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0)
+ parseContext.error($1.loc, "break statement only allowed in switch and loops", "", "");
+ $$ = parseContext.intermediate.addBranch(EOpBreak, $1.loc);
+ }
+ | RETURN SEMICOLON {
+ $$ = parseContext.intermediate.addBranch(EOpReturn, $1.loc);
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid)
+ parseContext.error($1.loc, "non-void function must return a value", "return", "");
+ if (parseContext.inMain)
+ parseContext.postEntryPointReturn = true;
+ }
+ | RETURN expression SEMICOLON {
+ $$ = parseContext.handleReturnValue($1.loc, $2);
+ }
+ | DISCARD SEMICOLON {
+ parseContext.requireStage($1.loc, EShLangFragment, "discard");
+ $$ = parseContext.intermediate.addBranch(EOpKill, $1.loc);
+ }
+ ;
+
+// Grammar Note: No 'goto'. Gotos are not supported.
+
+translation_unit
+ : external_declaration {
+ $$ = $1;
+ parseContext.intermediate.setTreeRoot($$);
+ }
+ | translation_unit external_declaration {
+ if ($2 != nullptr) {
+ $$ = parseContext.intermediate.growAggregate($1, $2);
+ parseContext.intermediate.setTreeRoot($$);
+ }
+ }
+ ;
+
+external_declaration
+ : function_definition {
+ $$ = $1;
+ }
+ | declaration {
+ $$ = $1;
+ }
+ | SEMICOLON {
+ parseContext.requireProfile($1.loc, ~EEsProfile, "extraneous semicolon");
+ parseContext.profileRequires($1.loc, ~EEsProfile, 460, nullptr, "extraneous semicolon");
+ $$ = nullptr;
+ }
+ ;
+
+function_definition
+ : function_prototype {
+ $1.function = parseContext.handleFunctionDeclarator($1.loc, *$1.function, false /* not prototype */);
+ $1.intermNode = parseContext.handleFunctionDefinition($1.loc, *$1.function);
+ }
+ compound_statement_no_new_scope {
+ // May be best done as post process phase on intermediate code
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue)
+ parseContext.error($1.loc, "function does not return a value:", "", $1.function->getName().c_str());
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ $$ = parseContext.intermediate.growAggregate($1.intermNode, $3);
+ parseContext.intermediate.setAggregateOperator($$, EOpFunction, $1.function->getType(), $1.loc);
+ $$->getAsAggregate()->setName($1.function->getMangledName().c_str());
+
+ // store the pragma information for debug and optimize and other vendor specific
+ // information. This information can be queried from the parse tree
+ $$->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize);
+ $$->getAsAggregate()->setDebug(parseContext.contextPragma.debug);
+ $$->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable);
+ }
+ ;
+
+attribute
+ : LEFT_BRACKET LEFT_BRACKET attribute_list RIGHT_BRACKET RIGHT_BRACKET {
+ $$ = $3;
+ parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_control_flow_attributes, "attribute");
+ }
+
+attribute_list
+ : single_attribute {
+ $$ = $1;
+ }
+ | attribute_list COMMA single_attribute {
+ $$ = parseContext.mergeAttributes($1, $3);
+ }
+
+single_attribute
+ : IDENTIFIER {
+ $$ = parseContext.makeAttributes(*$1.string);
+ }
+ | IDENTIFIER LEFT_PAREN constant_expression RIGHT_PAREN {
+ $$ = parseContext.makeAttributes(*$1.string, $3);
+ }
+
+%%
diff --git a/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp
new file mode 100644
index 0000000000..07feffea60
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp
@@ -0,0 +1,10468 @@
+/* A Bison parser, made by GNU Bison 3.0.4. */
+
+/* Bison implementation for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "3.0.4"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 1
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+
+
+
+/* Copy the first part of user declarations. */
+#line 43 "MachineIndependent/glslang.y" /* yacc.c:339 */
+
+
+/* Based on:
+ANSI C Yacc grammar
+
+In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a
+matching Lex specification) for the April 30, 1985 draft version of the
+ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that
+original, as mentioned in the answer to question 17.25 of the comp.lang.c
+FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z.
+
+I intend to keep this version as close to the current C Standard grammar as
+possible; please let me know if you discover discrepancies.
+
+Jutta Degener, 1995
+*/
+
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "../Public/ShaderLang.h"
+#include "attribute.h"
+
+using namespace glslang;
+
+
+#line 92 "MachineIndependent/glslang_tab.cpp" /* yacc.c:339 */
+
+# ifndef YY_NULLPTR
+# if defined __cplusplus && 201103L <= __cplusplus
+# define YY_NULLPTR nullptr
+# else
+# define YY_NULLPTR 0
+# endif
+# endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 1
+#endif
+
+/* In a future release of Bison, this section will be replaced
+ by #include "glslang_tab.cpp.h". */
+#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int yydebug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ ATTRIBUTE = 258,
+ VARYING = 259,
+ FLOAT16_T = 260,
+ FLOAT = 261,
+ FLOAT32_T = 262,
+ DOUBLE = 263,
+ FLOAT64_T = 264,
+ CONST = 265,
+ BOOL = 266,
+ INT = 267,
+ UINT = 268,
+ INT64_T = 269,
+ UINT64_T = 270,
+ INT32_T = 271,
+ UINT32_T = 272,
+ INT16_T = 273,
+ UINT16_T = 274,
+ INT8_T = 275,
+ UINT8_T = 276,
+ BREAK = 277,
+ CONTINUE = 278,
+ DO = 279,
+ ELSE = 280,
+ FOR = 281,
+ IF = 282,
+ DISCARD = 283,
+ RETURN = 284,
+ SWITCH = 285,
+ CASE = 286,
+ DEFAULT = 287,
+ SUBROUTINE = 288,
+ BVEC2 = 289,
+ BVEC3 = 290,
+ BVEC4 = 291,
+ IVEC2 = 292,
+ IVEC3 = 293,
+ IVEC4 = 294,
+ UVEC2 = 295,
+ UVEC3 = 296,
+ UVEC4 = 297,
+ I64VEC2 = 298,
+ I64VEC3 = 299,
+ I64VEC4 = 300,
+ U64VEC2 = 301,
+ U64VEC3 = 302,
+ U64VEC4 = 303,
+ I32VEC2 = 304,
+ I32VEC3 = 305,
+ I32VEC4 = 306,
+ U32VEC2 = 307,
+ U32VEC3 = 308,
+ U32VEC4 = 309,
+ I16VEC2 = 310,
+ I16VEC3 = 311,
+ I16VEC4 = 312,
+ U16VEC2 = 313,
+ U16VEC3 = 314,
+ U16VEC4 = 315,
+ I8VEC2 = 316,
+ I8VEC3 = 317,
+ I8VEC4 = 318,
+ U8VEC2 = 319,
+ U8VEC3 = 320,
+ U8VEC4 = 321,
+ VEC2 = 322,
+ VEC3 = 323,
+ VEC4 = 324,
+ MAT2 = 325,
+ MAT3 = 326,
+ MAT4 = 327,
+ CENTROID = 328,
+ IN = 329,
+ OUT = 330,
+ INOUT = 331,
+ UNIFORM = 332,
+ PATCH = 333,
+ SAMPLE = 334,
+ BUFFER = 335,
+ SHARED = 336,
+ NONUNIFORM = 337,
+ PAYLOADNV = 338,
+ PAYLOADINNV = 339,
+ HITATTRNV = 340,
+ CALLDATANV = 341,
+ CALLDATAINNV = 342,
+ COHERENT = 343,
+ VOLATILE = 344,
+ RESTRICT = 345,
+ READONLY = 346,
+ WRITEONLY = 347,
+ DEVICECOHERENT = 348,
+ QUEUEFAMILYCOHERENT = 349,
+ WORKGROUPCOHERENT = 350,
+ SUBGROUPCOHERENT = 351,
+ NONPRIVATE = 352,
+ DVEC2 = 353,
+ DVEC3 = 354,
+ DVEC4 = 355,
+ DMAT2 = 356,
+ DMAT3 = 357,
+ DMAT4 = 358,
+ F16VEC2 = 359,
+ F16VEC3 = 360,
+ F16VEC4 = 361,
+ F16MAT2 = 362,
+ F16MAT3 = 363,
+ F16MAT4 = 364,
+ F32VEC2 = 365,
+ F32VEC3 = 366,
+ F32VEC4 = 367,
+ F32MAT2 = 368,
+ F32MAT3 = 369,
+ F32MAT4 = 370,
+ F64VEC2 = 371,
+ F64VEC3 = 372,
+ F64VEC4 = 373,
+ F64MAT2 = 374,
+ F64MAT3 = 375,
+ F64MAT4 = 376,
+ NOPERSPECTIVE = 377,
+ FLAT = 378,
+ SMOOTH = 379,
+ LAYOUT = 380,
+ EXPLICITINTERPAMD = 381,
+ PERVERTEXNV = 382,
+ PERPRIMITIVENV = 383,
+ PERVIEWNV = 384,
+ PERTASKNV = 385,
+ MAT2X2 = 386,
+ MAT2X3 = 387,
+ MAT2X4 = 388,
+ MAT3X2 = 389,
+ MAT3X3 = 390,
+ MAT3X4 = 391,
+ MAT4X2 = 392,
+ MAT4X3 = 393,
+ MAT4X4 = 394,
+ DMAT2X2 = 395,
+ DMAT2X3 = 396,
+ DMAT2X4 = 397,
+ DMAT3X2 = 398,
+ DMAT3X3 = 399,
+ DMAT3X4 = 400,
+ DMAT4X2 = 401,
+ DMAT4X3 = 402,
+ DMAT4X4 = 403,
+ F16MAT2X2 = 404,
+ F16MAT2X3 = 405,
+ F16MAT2X4 = 406,
+ F16MAT3X2 = 407,
+ F16MAT3X3 = 408,
+ F16MAT3X4 = 409,
+ F16MAT4X2 = 410,
+ F16MAT4X3 = 411,
+ F16MAT4X4 = 412,
+ F32MAT2X2 = 413,
+ F32MAT2X3 = 414,
+ F32MAT2X4 = 415,
+ F32MAT3X2 = 416,
+ F32MAT3X3 = 417,
+ F32MAT3X4 = 418,
+ F32MAT4X2 = 419,
+ F32MAT4X3 = 420,
+ F32MAT4X4 = 421,
+ F64MAT2X2 = 422,
+ F64MAT2X3 = 423,
+ F64MAT2X4 = 424,
+ F64MAT3X2 = 425,
+ F64MAT3X3 = 426,
+ F64MAT3X4 = 427,
+ F64MAT4X2 = 428,
+ F64MAT4X3 = 429,
+ F64MAT4X4 = 430,
+ ATOMIC_UINT = 431,
+ ACCSTRUCTNV = 432,
+ FCOOPMATNV = 433,
+ SAMPLER1D = 434,
+ SAMPLER2D = 435,
+ SAMPLER3D = 436,
+ SAMPLERCUBE = 437,
+ SAMPLER1DSHADOW = 438,
+ SAMPLER2DSHADOW = 439,
+ SAMPLERCUBESHADOW = 440,
+ SAMPLER1DARRAY = 441,
+ SAMPLER2DARRAY = 442,
+ SAMPLER1DARRAYSHADOW = 443,
+ SAMPLER2DARRAYSHADOW = 444,
+ ISAMPLER1D = 445,
+ ISAMPLER2D = 446,
+ ISAMPLER3D = 447,
+ ISAMPLERCUBE = 448,
+ ISAMPLER1DARRAY = 449,
+ ISAMPLER2DARRAY = 450,
+ USAMPLER1D = 451,
+ USAMPLER2D = 452,
+ USAMPLER3D = 453,
+ USAMPLERCUBE = 454,
+ USAMPLER1DARRAY = 455,
+ USAMPLER2DARRAY = 456,
+ SAMPLER2DRECT = 457,
+ SAMPLER2DRECTSHADOW = 458,
+ ISAMPLER2DRECT = 459,
+ USAMPLER2DRECT = 460,
+ SAMPLERBUFFER = 461,
+ ISAMPLERBUFFER = 462,
+ USAMPLERBUFFER = 463,
+ SAMPLERCUBEARRAY = 464,
+ SAMPLERCUBEARRAYSHADOW = 465,
+ ISAMPLERCUBEARRAY = 466,
+ USAMPLERCUBEARRAY = 467,
+ SAMPLER2DMS = 468,
+ ISAMPLER2DMS = 469,
+ USAMPLER2DMS = 470,
+ SAMPLER2DMSARRAY = 471,
+ ISAMPLER2DMSARRAY = 472,
+ USAMPLER2DMSARRAY = 473,
+ SAMPLEREXTERNALOES = 474,
+ SAMPLEREXTERNAL2DY2YEXT = 475,
+ F16SAMPLER1D = 476,
+ F16SAMPLER2D = 477,
+ F16SAMPLER3D = 478,
+ F16SAMPLER2DRECT = 479,
+ F16SAMPLERCUBE = 480,
+ F16SAMPLER1DARRAY = 481,
+ F16SAMPLER2DARRAY = 482,
+ F16SAMPLERCUBEARRAY = 483,
+ F16SAMPLERBUFFER = 484,
+ F16SAMPLER2DMS = 485,
+ F16SAMPLER2DMSARRAY = 486,
+ F16SAMPLER1DSHADOW = 487,
+ F16SAMPLER2DSHADOW = 488,
+ F16SAMPLER1DARRAYSHADOW = 489,
+ F16SAMPLER2DARRAYSHADOW = 490,
+ F16SAMPLER2DRECTSHADOW = 491,
+ F16SAMPLERCUBESHADOW = 492,
+ F16SAMPLERCUBEARRAYSHADOW = 493,
+ SAMPLER = 494,
+ SAMPLERSHADOW = 495,
+ TEXTURE1D = 496,
+ TEXTURE2D = 497,
+ TEXTURE3D = 498,
+ TEXTURECUBE = 499,
+ TEXTURE1DARRAY = 500,
+ TEXTURE2DARRAY = 501,
+ ITEXTURE1D = 502,
+ ITEXTURE2D = 503,
+ ITEXTURE3D = 504,
+ ITEXTURECUBE = 505,
+ ITEXTURE1DARRAY = 506,
+ ITEXTURE2DARRAY = 507,
+ UTEXTURE1D = 508,
+ UTEXTURE2D = 509,
+ UTEXTURE3D = 510,
+ UTEXTURECUBE = 511,
+ UTEXTURE1DARRAY = 512,
+ UTEXTURE2DARRAY = 513,
+ TEXTURE2DRECT = 514,
+ ITEXTURE2DRECT = 515,
+ UTEXTURE2DRECT = 516,
+ TEXTUREBUFFER = 517,
+ ITEXTUREBUFFER = 518,
+ UTEXTUREBUFFER = 519,
+ TEXTURECUBEARRAY = 520,
+ ITEXTURECUBEARRAY = 521,
+ UTEXTURECUBEARRAY = 522,
+ TEXTURE2DMS = 523,
+ ITEXTURE2DMS = 524,
+ UTEXTURE2DMS = 525,
+ TEXTURE2DMSARRAY = 526,
+ ITEXTURE2DMSARRAY = 527,
+ UTEXTURE2DMSARRAY = 528,
+ F16TEXTURE1D = 529,
+ F16TEXTURE2D = 530,
+ F16TEXTURE3D = 531,
+ F16TEXTURE2DRECT = 532,
+ F16TEXTURECUBE = 533,
+ F16TEXTURE1DARRAY = 534,
+ F16TEXTURE2DARRAY = 535,
+ F16TEXTURECUBEARRAY = 536,
+ F16TEXTUREBUFFER = 537,
+ F16TEXTURE2DMS = 538,
+ F16TEXTURE2DMSARRAY = 539,
+ SUBPASSINPUT = 540,
+ SUBPASSINPUTMS = 541,
+ ISUBPASSINPUT = 542,
+ ISUBPASSINPUTMS = 543,
+ USUBPASSINPUT = 544,
+ USUBPASSINPUTMS = 545,
+ F16SUBPASSINPUT = 546,
+ F16SUBPASSINPUTMS = 547,
+ IMAGE1D = 548,
+ IIMAGE1D = 549,
+ UIMAGE1D = 550,
+ IMAGE2D = 551,
+ IIMAGE2D = 552,
+ UIMAGE2D = 553,
+ IMAGE3D = 554,
+ IIMAGE3D = 555,
+ UIMAGE3D = 556,
+ IMAGE2DRECT = 557,
+ IIMAGE2DRECT = 558,
+ UIMAGE2DRECT = 559,
+ IMAGECUBE = 560,
+ IIMAGECUBE = 561,
+ UIMAGECUBE = 562,
+ IMAGEBUFFER = 563,
+ IIMAGEBUFFER = 564,
+ UIMAGEBUFFER = 565,
+ IMAGE1DARRAY = 566,
+ IIMAGE1DARRAY = 567,
+ UIMAGE1DARRAY = 568,
+ IMAGE2DARRAY = 569,
+ IIMAGE2DARRAY = 570,
+ UIMAGE2DARRAY = 571,
+ IMAGECUBEARRAY = 572,
+ IIMAGECUBEARRAY = 573,
+ UIMAGECUBEARRAY = 574,
+ IMAGE2DMS = 575,
+ IIMAGE2DMS = 576,
+ UIMAGE2DMS = 577,
+ IMAGE2DMSARRAY = 578,
+ IIMAGE2DMSARRAY = 579,
+ UIMAGE2DMSARRAY = 580,
+ F16IMAGE1D = 581,
+ F16IMAGE2D = 582,
+ F16IMAGE3D = 583,
+ F16IMAGE2DRECT = 584,
+ F16IMAGECUBE = 585,
+ F16IMAGE1DARRAY = 586,
+ F16IMAGE2DARRAY = 587,
+ F16IMAGECUBEARRAY = 588,
+ F16IMAGEBUFFER = 589,
+ F16IMAGE2DMS = 590,
+ F16IMAGE2DMSARRAY = 591,
+ STRUCT = 592,
+ VOID = 593,
+ WHILE = 594,
+ IDENTIFIER = 595,
+ TYPE_NAME = 596,
+ FLOATCONSTANT = 597,
+ DOUBLECONSTANT = 598,
+ INT16CONSTANT = 599,
+ UINT16CONSTANT = 600,
+ INT32CONSTANT = 601,
+ UINT32CONSTANT = 602,
+ INTCONSTANT = 603,
+ UINTCONSTANT = 604,
+ INT64CONSTANT = 605,
+ UINT64CONSTANT = 606,
+ BOOLCONSTANT = 607,
+ FLOAT16CONSTANT = 608,
+ LEFT_OP = 609,
+ RIGHT_OP = 610,
+ INC_OP = 611,
+ DEC_OP = 612,
+ LE_OP = 613,
+ GE_OP = 614,
+ EQ_OP = 615,
+ NE_OP = 616,
+ AND_OP = 617,
+ OR_OP = 618,
+ XOR_OP = 619,
+ MUL_ASSIGN = 620,
+ DIV_ASSIGN = 621,
+ ADD_ASSIGN = 622,
+ MOD_ASSIGN = 623,
+ LEFT_ASSIGN = 624,
+ RIGHT_ASSIGN = 625,
+ AND_ASSIGN = 626,
+ XOR_ASSIGN = 627,
+ OR_ASSIGN = 628,
+ SUB_ASSIGN = 629,
+ LEFT_PAREN = 630,
+ RIGHT_PAREN = 631,
+ LEFT_BRACKET = 632,
+ RIGHT_BRACKET = 633,
+ LEFT_BRACE = 634,
+ RIGHT_BRACE = 635,
+ DOT = 636,
+ COMMA = 637,
+ COLON = 638,
+ EQUAL = 639,
+ SEMICOLON = 640,
+ BANG = 641,
+ DASH = 642,
+ TILDE = 643,
+ PLUS = 644,
+ STAR = 645,
+ SLASH = 646,
+ PERCENT = 647,
+ LEFT_ANGLE = 648,
+ RIGHT_ANGLE = 649,
+ VERTICAL_BAR = 650,
+ CARET = 651,
+ AMPERSAND = 652,
+ QUESTION = 653,
+ INVARIANT = 654,
+ PRECISE = 655,
+ HIGH_PRECISION = 656,
+ MEDIUM_PRECISION = 657,
+ LOW_PRECISION = 658,
+ PRECISION = 659,
+ PACKED = 660,
+ RESOURCE = 661,
+ SUPERP = 662
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+
+union YYSTYPE
+{
+#line 71 "MachineIndependent/glslang.y" /* yacc.c:355 */
+
+ struct {
+ glslang::TSourceLoc loc;
+ union {
+ glslang::TString *string;
+ int i;
+ unsigned int u;
+ long long i64;
+ unsigned long long u64;
+ bool b;
+ double d;
+ };
+ glslang::TSymbol* symbol;
+ } lex;
+ struct {
+ glslang::TSourceLoc loc;
+ glslang::TOperator op;
+ union {
+ TIntermNode* intermNode;
+ glslang::TIntermNodePair nodePair;
+ glslang::TIntermTyped* intermTypedNode;
+ glslang::TAttributes* attributes;
+ };
+ union {
+ glslang::TPublicType type;
+ glslang::TFunction* function;
+ glslang::TParameter param;
+ glslang::TTypeLoc typeLine;
+ glslang::TTypeList* typeList;
+ glslang::TArraySizes* arraySizes;
+ glslang::TIdentifierList* identifierList;
+ };
+ glslang::TArraySizes* typeParameters;
+ } interm;
+
+#line 576 "MachineIndependent/glslang_tab.cpp" /* yacc.c:355 */
+};
+
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+
+int yyparse (glslang::TParseContext* pParseContext);
+
+#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */
+
+/* Copy the second part of user declarations. */
+#line 107 "MachineIndependent/glslang.y" /* yacc.c:358 */
+
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4065)
+ #pragma warning(disable : 4127)
+ #pragma warning(disable : 4244)
+#endif
+
+#define parseContext (*pParseContext)
+#define yyerror(context, msg) context->parserError(msg)
+
+extern int yylex(YYSTYPE*, TParseContext&);
+
+
+#line 607 "MachineIndependent/glslang_tab.cpp" /* yacc.c:358 */
+
+#ifdef short
+# undef short
+#endif
+
+#ifdef YYTYPE_UINT8
+typedef YYTYPE_UINT8 yytype_uint8;
+#else
+typedef unsigned char yytype_uint8;
+#endif
+
+#ifdef YYTYPE_INT8
+typedef YYTYPE_INT8 yytype_int8;
+#else
+typedef signed char yytype_int8;
+#endif
+
+#ifdef YYTYPE_UINT16
+typedef YYTYPE_UINT16 yytype_uint16;
+#else
+typedef unsigned short int yytype_uint16;
+#endif
+
+#ifdef YYTYPE_INT16
+typedef YYTYPE_INT16 yytype_int16;
+#else
+typedef short int yytype_int16;
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif ! defined YYSIZE_T
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned int
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
+
+#ifndef YY_
+# if defined YYENABLE_NLS && YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(Msgid) Msgid
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE
+# if (defined __GNUC__ \
+ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
+ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
+# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
+# else
+# define YY_ATTRIBUTE(Spec) /* empty */
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
+#endif
+
+#if !defined _Noreturn \
+ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)
+# if defined _MSC_VER && 1200 <= _MSC_VER
+# define _Noreturn __declspec (noreturn)
+# else
+# define _Noreturn YY_ATTRIBUTE ((__noreturn__))
+# endif
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(E) ((void) (E))
+#else
+# define YYUSE(E) /* empty */
+#endif
+
+#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized. */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
+ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+ _Pragma ("GCC diagnostic pop")
+#else
+# define YY_INITIAL_VALUE(Value) Value
+#endif
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+ /* Use EXIT_SUCCESS as a witness for stdlib.h. */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's 'empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined EXIT_SUCCESS
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined EXIT_SUCCESS
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yytype_int16 yyss_alloc;
+ YYSTYPE yyvs_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+# define YYCOPY_NEEDED 1
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from SRC to DST. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(Dst, Src, Count) \
+ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
+# else
+# define YYCOPY(Dst, Src, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (Dst)[yyi] = (Src)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 384
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 9348
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 408
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 110
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 578
+/* YYNSTATES -- Number of states. */
+#define YYNSTATES 722
+
+/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
+ by yylex, with out-of-bounds checking. */
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 662
+
+#define YYTRANSLATE(YYX) \
+ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex, without out-of-bounds checking. */
+static const yytype_uint16 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
+ 345, 346, 347, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 361, 362, 363, 364,
+ 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394,
+ 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407
+};
+
+#if YYDEBUG
+ /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
+static const yytype_uint16 yyrline[] =
+{
+ 0, 302, 302, 308, 311, 315, 319, 322, 326, 330,
+ 334, 338, 342, 345, 349, 353, 356, 364, 367, 370,
+ 373, 376, 381, 389, 396, 403, 409, 413, 420, 423,
+ 429, 436, 446, 454, 459, 486, 494, 500, 504, 508,
+ 528, 529, 530, 531, 537, 538, 543, 548, 557, 558,
+ 563, 571, 572, 578, 587, 588, 593, 598, 603, 611,
+ 612, 621, 633, 634, 643, 644, 653, 654, 663, 664,
+ 672, 673, 681, 682, 690, 691, 691, 709, 710, 726,
+ 730, 734, 738, 743, 747, 751, 755, 759, 763, 767,
+ 774, 777, 788, 795, 800, 805, 813, 817, 821, 825,
+ 830, 835, 844, 844, 855, 859, 866, 873, 876, 883,
+ 891, 911, 934, 949, 974, 985, 995, 1005, 1015, 1024,
+ 1027, 1031, 1035, 1040, 1048, 1053, 1058, 1063, 1068, 1077,
+ 1088, 1115, 1124, 1131, 1138, 1149, 1158, 1168, 1180, 1189,
+ 1201, 1207, 1210, 1217, 1221, 1225, 1233, 1242, 1245, 1256,
+ 1259, 1262, 1266, 1270, 1274, 1278, 1284, 1288, 1300, 1314,
+ 1319, 1325, 1331, 1338, 1344, 1349, 1354, 1359, 1369, 1379,
+ 1389, 1399, 1408, 1420, 1424, 1429, 1434, 1439, 1444, 1449,
+ 1453, 1457, 1461, 1465, 1471, 1480, 1487, 1490, 1498, 1503,
+ 1513, 1518, 1526, 1530, 1540, 1543, 1549, 1555, 1562, 1572,
+ 1576, 1580, 1585, 1590, 1595, 1600, 1604, 1609, 1614, 1619,
+ 1624, 1629, 1634, 1639, 1644, 1649, 1653, 1658, 1663, 1668,
+ 1674, 1680, 1686, 1692, 1698, 1704, 1710, 1716, 1722, 1728,
+ 1734, 1740, 1745, 1750, 1755, 1760, 1765, 1770, 1776, 1782,
+ 1788, 1794, 1800, 1806, 1812, 1818, 1824, 1830, 1836, 1842,
+ 1848, 1854, 1860, 1866, 1872, 1878, 1884, 1890, 1896, 1902,
+ 1908, 1914, 1920, 1926, 1932, 1937, 1942, 1947, 1952, 1957,
+ 1962, 1967, 1972, 1977, 1982, 1987, 1992, 1998, 2004, 2010,
+ 2016, 2022, 2028, 2034, 2040, 2046, 2052, 2058, 2064, 2070,
+ 2076, 2082, 2088, 2094, 2100, 2106, 2112, 2118, 2124, 2130,
+ 2136, 2142, 2148, 2154, 2160, 2166, 2172, 2178, 2184, 2190,
+ 2196, 2202, 2208, 2214, 2220, 2226, 2232, 2238, 2244, 2250,
+ 2256, 2262, 2268, 2274, 2280, 2286, 2291, 2296, 2301, 2306,
+ 2311, 2316, 2321, 2326, 2331, 2336, 2341, 2346, 2351, 2356,
+ 2364, 2372, 2380, 2388, 2396, 2404, 2412, 2420, 2428, 2436,
+ 2444, 2452, 2460, 2465, 2470, 2475, 2480, 2485, 2490, 2495,
+ 2500, 2505, 2510, 2515, 2520, 2525, 2530, 2535, 2540, 2548,
+ 2556, 2561, 2566, 2571, 2579, 2584, 2589, 2594, 2602, 2607,
+ 2612, 2617, 2625, 2630, 2635, 2640, 2645, 2650, 2658, 2663,
+ 2671, 2676, 2684, 2689, 2697, 2702, 2710, 2715, 2723, 2728,
+ 2736, 2741, 2746, 2751, 2756, 2761, 2766, 2771, 2776, 2781,
+ 2786, 2791, 2796, 2801, 2806, 2811, 2819, 2824, 2829, 2834,
+ 2842, 2847, 2852, 2857, 2865, 2870, 2875, 2880, 2888, 2893,
+ 2898, 2903, 2911, 2916, 2921, 2926, 2934, 2939, 2944, 2949,
+ 2957, 2962, 2967, 2972, 2980, 2985, 2990, 2995, 3003, 3008,
+ 3013, 3018, 3026, 3031, 3036, 3041, 3049, 3054, 3059, 3064,
+ 3072, 3077, 3082, 3087, 3095, 3100, 3105, 3110, 3118, 3123,
+ 3128, 3133, 3141, 3146, 3151, 3157, 3163, 3169, 3175, 3184,
+ 3193, 3199, 3205, 3211, 3217, 3223, 3228, 3244, 3249, 3254,
+ 3262, 3262, 3273, 3273, 3283, 3286, 3299, 3321, 3348, 3352,
+ 3358, 3363, 3374, 3377, 3383, 3392, 3395, 3401, 3405, 3406,
+ 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3422, 3423, 3427,
+ 3423, 3439, 3440, 3444, 3444, 3451, 3451, 3465, 3468, 3476,
+ 3484, 3495, 3496, 3500, 3503, 3509, 3516, 3520, 3528, 3532,
+ 3545, 3548, 3554, 3554, 3574, 3577, 3583, 3595, 3607, 3610,
+ 3616, 3616, 3631, 3631, 3647, 3647, 3668, 3671, 3677, 3680,
+ 3686, 3690, 3697, 3702, 3707, 3714, 3717, 3726, 3730, 3739,
+ 3742, 3745, 3753, 3753, 3775, 3781, 3784, 3789, 3792
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || 1
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "ATTRIBUTE", "VARYING", "FLOAT16_T",
+ "FLOAT", "FLOAT32_T", "DOUBLE", "FLOAT64_T", "CONST", "BOOL", "INT",
+ "UINT", "INT64_T", "UINT64_T", "INT32_T", "UINT32_T", "INT16_T",
+ "UINT16_T", "INT8_T", "UINT8_T", "BREAK", "CONTINUE", "DO", "ELSE",
+ "FOR", "IF", "DISCARD", "RETURN", "SWITCH", "CASE", "DEFAULT",
+ "SUBROUTINE", "BVEC2", "BVEC3", "BVEC4", "IVEC2", "IVEC3", "IVEC4",
+ "UVEC2", "UVEC3", "UVEC4", "I64VEC2", "I64VEC3", "I64VEC4", "U64VEC2",
+ "U64VEC3", "U64VEC4", "I32VEC2", "I32VEC3", "I32VEC4", "U32VEC2",
+ "U32VEC3", "U32VEC4", "I16VEC2", "I16VEC3", "I16VEC4", "U16VEC2",
+ "U16VEC3", "U16VEC4", "I8VEC2", "I8VEC3", "I8VEC4", "U8VEC2", "U8VEC3",
+ "U8VEC4", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3", "MAT4", "CENTROID",
+ "IN", "OUT", "INOUT", "UNIFORM", "PATCH", "SAMPLE", "BUFFER", "SHARED",
+ "NONUNIFORM", "PAYLOADNV", "PAYLOADINNV", "HITATTRNV", "CALLDATANV",
+ "CALLDATAINNV", "COHERENT", "VOLATILE", "RESTRICT", "READONLY",
+ "WRITEONLY", "DEVICECOHERENT", "QUEUEFAMILYCOHERENT",
+ "WORKGROUPCOHERENT", "SUBGROUPCOHERENT", "NONPRIVATE", "DVEC2", "DVEC3",
+ "DVEC4", "DMAT2", "DMAT3", "DMAT4", "F16VEC2", "F16VEC3", "F16VEC4",
+ "F16MAT2", "F16MAT3", "F16MAT4", "F32VEC2", "F32VEC3", "F32VEC4",
+ "F32MAT2", "F32MAT3", "F32MAT4", "F64VEC2", "F64VEC3", "F64VEC4",
+ "F64MAT2", "F64MAT3", "F64MAT4", "NOPERSPECTIVE", "FLAT", "SMOOTH",
+ "LAYOUT", "EXPLICITINTERPAMD", "PERVERTEXNV", "PERPRIMITIVENV",
+ "PERVIEWNV", "PERTASKNV", "MAT2X2", "MAT2X3", "MAT2X4", "MAT3X2",
+ "MAT3X3", "MAT3X4", "MAT4X2", "MAT4X3", "MAT4X4", "DMAT2X2", "DMAT2X3",
+ "DMAT2X4", "DMAT3X2", "DMAT3X3", "DMAT3X4", "DMAT4X2", "DMAT4X3",
+ "DMAT4X4", "F16MAT2X2", "F16MAT2X3", "F16MAT2X4", "F16MAT3X2",
+ "F16MAT3X3", "F16MAT3X4", "F16MAT4X2", "F16MAT4X3", "F16MAT4X4",
+ "F32MAT2X2", "F32MAT2X3", "F32MAT2X4", "F32MAT3X2", "F32MAT3X3",
+ "F32MAT3X4", "F32MAT4X2", "F32MAT4X3", "F32MAT4X4", "F64MAT2X2",
+ "F64MAT2X3", "F64MAT2X4", "F64MAT3X2", "F64MAT3X3", "F64MAT3X4",
+ "F64MAT4X2", "F64MAT4X3", "F64MAT4X4", "ATOMIC_UINT", "ACCSTRUCTNV",
+ "FCOOPMATNV", "SAMPLER1D", "SAMPLER2D", "SAMPLER3D", "SAMPLERCUBE",
+ "SAMPLER1DSHADOW", "SAMPLER2DSHADOW", "SAMPLERCUBESHADOW",
+ "SAMPLER1DARRAY", "SAMPLER2DARRAY", "SAMPLER1DARRAYSHADOW",
+ "SAMPLER2DARRAYSHADOW", "ISAMPLER1D", "ISAMPLER2D", "ISAMPLER3D",
+ "ISAMPLERCUBE", "ISAMPLER1DARRAY", "ISAMPLER2DARRAY", "USAMPLER1D",
+ "USAMPLER2D", "USAMPLER3D", "USAMPLERCUBE", "USAMPLER1DARRAY",
+ "USAMPLER2DARRAY", "SAMPLER2DRECT", "SAMPLER2DRECTSHADOW",
+ "ISAMPLER2DRECT", "USAMPLER2DRECT", "SAMPLERBUFFER", "ISAMPLERBUFFER",
+ "USAMPLERBUFFER", "SAMPLERCUBEARRAY", "SAMPLERCUBEARRAYSHADOW",
+ "ISAMPLERCUBEARRAY", "USAMPLERCUBEARRAY", "SAMPLER2DMS", "ISAMPLER2DMS",
+ "USAMPLER2DMS", "SAMPLER2DMSARRAY", "ISAMPLER2DMSARRAY",
+ "USAMPLER2DMSARRAY", "SAMPLEREXTERNALOES", "SAMPLEREXTERNAL2DY2YEXT",
+ "F16SAMPLER1D", "F16SAMPLER2D", "F16SAMPLER3D", "F16SAMPLER2DRECT",
+ "F16SAMPLERCUBE", "F16SAMPLER1DARRAY", "F16SAMPLER2DARRAY",
+ "F16SAMPLERCUBEARRAY", "F16SAMPLERBUFFER", "F16SAMPLER2DMS",
+ "F16SAMPLER2DMSARRAY", "F16SAMPLER1DSHADOW", "F16SAMPLER2DSHADOW",
+ "F16SAMPLER1DARRAYSHADOW", "F16SAMPLER2DARRAYSHADOW",
+ "F16SAMPLER2DRECTSHADOW", "F16SAMPLERCUBESHADOW",
+ "F16SAMPLERCUBEARRAYSHADOW", "SAMPLER", "SAMPLERSHADOW", "TEXTURE1D",
+ "TEXTURE2D", "TEXTURE3D", "TEXTURECUBE", "TEXTURE1DARRAY",
+ "TEXTURE2DARRAY", "ITEXTURE1D", "ITEXTURE2D", "ITEXTURE3D",
+ "ITEXTURECUBE", "ITEXTURE1DARRAY", "ITEXTURE2DARRAY", "UTEXTURE1D",
+ "UTEXTURE2D", "UTEXTURE3D", "UTEXTURECUBE", "UTEXTURE1DARRAY",
+ "UTEXTURE2DARRAY", "TEXTURE2DRECT", "ITEXTURE2DRECT", "UTEXTURE2DRECT",
+ "TEXTUREBUFFER", "ITEXTUREBUFFER", "UTEXTUREBUFFER", "TEXTURECUBEARRAY",
+ "ITEXTURECUBEARRAY", "UTEXTURECUBEARRAY", "TEXTURE2DMS", "ITEXTURE2DMS",
+ "UTEXTURE2DMS", "TEXTURE2DMSARRAY", "ITEXTURE2DMSARRAY",
+ "UTEXTURE2DMSARRAY", "F16TEXTURE1D", "F16TEXTURE2D", "F16TEXTURE3D",
+ "F16TEXTURE2DRECT", "F16TEXTURECUBE", "F16TEXTURE1DARRAY",
+ "F16TEXTURE2DARRAY", "F16TEXTURECUBEARRAY", "F16TEXTUREBUFFER",
+ "F16TEXTURE2DMS", "F16TEXTURE2DMSARRAY", "SUBPASSINPUT",
+ "SUBPASSINPUTMS", "ISUBPASSINPUT", "ISUBPASSINPUTMS", "USUBPASSINPUT",
+ "USUBPASSINPUTMS", "F16SUBPASSINPUT", "F16SUBPASSINPUTMS", "IMAGE1D",
+ "IIMAGE1D", "UIMAGE1D", "IMAGE2D", "IIMAGE2D", "UIMAGE2D", "IMAGE3D",
+ "IIMAGE3D", "UIMAGE3D", "IMAGE2DRECT", "IIMAGE2DRECT", "UIMAGE2DRECT",
+ "IMAGECUBE", "IIMAGECUBE", "UIMAGECUBE", "IMAGEBUFFER", "IIMAGEBUFFER",
+ "UIMAGEBUFFER", "IMAGE1DARRAY", "IIMAGE1DARRAY", "UIMAGE1DARRAY",
+ "IMAGE2DARRAY", "IIMAGE2DARRAY", "UIMAGE2DARRAY", "IMAGECUBEARRAY",
+ "IIMAGECUBEARRAY", "UIMAGECUBEARRAY", "IMAGE2DMS", "IIMAGE2DMS",
+ "UIMAGE2DMS", "IMAGE2DMSARRAY", "IIMAGE2DMSARRAY", "UIMAGE2DMSARRAY",
+ "F16IMAGE1D", "F16IMAGE2D", "F16IMAGE3D", "F16IMAGE2DRECT",
+ "F16IMAGECUBE", "F16IMAGE1DARRAY", "F16IMAGE2DARRAY",
+ "F16IMAGECUBEARRAY", "F16IMAGEBUFFER", "F16IMAGE2DMS",
+ "F16IMAGE2DMSARRAY", "STRUCT", "VOID", "WHILE", "IDENTIFIER",
+ "TYPE_NAME", "FLOATCONSTANT", "DOUBLECONSTANT", "INT16CONSTANT",
+ "UINT16CONSTANT", "INT32CONSTANT", "UINT32CONSTANT", "INTCONSTANT",
+ "UINTCONSTANT", "INT64CONSTANT", "UINT64CONSTANT", "BOOLCONSTANT",
+ "FLOAT16CONSTANT", "LEFT_OP", "RIGHT_OP", "INC_OP", "DEC_OP", "LE_OP",
+ "GE_OP", "EQ_OP", "NE_OP", "AND_OP", "OR_OP", "XOR_OP", "MUL_ASSIGN",
+ "DIV_ASSIGN", "ADD_ASSIGN", "MOD_ASSIGN", "LEFT_ASSIGN", "RIGHT_ASSIGN",
+ "AND_ASSIGN", "XOR_ASSIGN", "OR_ASSIGN", "SUB_ASSIGN", "LEFT_PAREN",
+ "RIGHT_PAREN", "LEFT_BRACKET", "RIGHT_BRACKET", "LEFT_BRACE",
+ "RIGHT_BRACE", "DOT", "COMMA", "COLON", "EQUAL", "SEMICOLON", "BANG",
+ "DASH", "TILDE", "PLUS", "STAR", "SLASH", "PERCENT", "LEFT_ANGLE",
+ "RIGHT_ANGLE", "VERTICAL_BAR", "CARET", "AMPERSAND", "QUESTION",
+ "INVARIANT", "PRECISE", "HIGH_PRECISION", "MEDIUM_PRECISION",
+ "LOW_PRECISION", "PRECISION", "PACKED", "RESOURCE", "SUPERP", "$accept",
+ "variable_identifier", "primary_expression", "postfix_expression",
+ "integer_expression", "function_call", "function_call_or_method",
+ "function_call_generic", "function_call_header_no_parameters",
+ "function_call_header_with_parameters", "function_call_header",
+ "function_identifier", "unary_expression", "unary_operator",
+ "multiplicative_expression", "additive_expression", "shift_expression",
+ "relational_expression", "equality_expression", "and_expression",
+ "exclusive_or_expression", "inclusive_or_expression",
+ "logical_and_expression", "logical_xor_expression",
+ "logical_or_expression", "conditional_expression", "$@1",
+ "assignment_expression", "assignment_operator", "expression",
+ "constant_expression", "declaration", "block_structure", "$@2",
+ "identifier_list", "function_prototype", "function_declarator",
+ "function_header_with_parameters", "function_header",
+ "parameter_declarator", "parameter_declaration",
+ "parameter_type_specifier", "init_declarator_list", "single_declaration",
+ "fully_specified_type", "invariant_qualifier", "interpolation_qualifier",
+ "layout_qualifier", "layout_qualifier_id_list", "layout_qualifier_id",
+ "precise_qualifier", "type_qualifier", "single_type_qualifier",
+ "storage_qualifier", "non_uniform_qualifier", "type_name_list",
+ "type_specifier", "array_specifier", "type_parameter_specifier_opt",
+ "type_parameter_specifier", "type_parameter_specifier_list",
+ "type_specifier_nonarray", "precision_qualifier", "struct_specifier",
+ "$@3", "$@4", "struct_declaration_list", "struct_declaration",
+ "struct_declarator_list", "struct_declarator", "initializer",
+ "initializer_list", "declaration_statement", "statement",
+ "simple_statement", "compound_statement", "$@5", "$@6",
+ "statement_no_new_scope", "statement_scoped", "$@7", "$@8",
+ "compound_statement_no_new_scope", "statement_list",
+ "expression_statement", "selection_statement",
+ "selection_statement_nonattributed", "selection_rest_statement",
+ "condition", "switch_statement", "switch_statement_nonattributed", "$@9",
+ "switch_statement_list", "case_label", "iteration_statement",
+ "iteration_statement_nonattributed", "$@10", "$@11", "$@12",
+ "for_init_statement", "conditionopt", "for_rest_statement",
+ "jump_statement", "translation_unit", "external_declaration",
+ "function_definition", "$@13", "attribute", "attribute_list",
+ "single_attribute", YY_NULLPTR
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+ (internal) symbol number NUM (which must be that of a token). */
+static const yytype_uint16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
+ 345, 346, 347, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 361, 362, 363, 364,
+ 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394,
+ 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 409, 410, 411, 412, 413, 414,
+ 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 428, 429, 430, 431, 432, 433, 434,
+ 435, 436, 437, 438, 439, 440, 441, 442, 443, 444,
+ 445, 446, 447, 448, 449, 450, 451, 452, 453, 454,
+ 455, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 465, 466, 467, 468, 469, 470, 471, 472, 473, 474,
+ 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504,
+ 505, 506, 507, 508, 509, 510, 511, 512, 513, 514,
+ 515, 516, 517, 518, 519, 520, 521, 522, 523, 524,
+ 525, 526, 527, 528, 529, 530, 531, 532, 533, 534,
+ 535, 536, 537, 538, 539, 540, 541, 542, 543, 544,
+ 545, 546, 547, 548, 549, 550, 551, 552, 553, 554,
+ 555, 556, 557, 558, 559, 560, 561, 562, 563, 564,
+ 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 592, 593, 594,
+ 595, 596, 597, 598, 599, 600, 601, 602, 603, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 614,
+ 615, 616, 617, 618, 619, 620, 621, 622, 623, 624,
+ 625, 626, 627, 628, 629, 630, 631, 632, 633, 634,
+ 635, 636, 637, 638, 639, 640, 641, 642, 643, 644,
+ 645, 646, 647, 648, 649, 650, 651, 652, 653, 654,
+ 655, 656, 657, 658, 659, 660, 661, 662
+};
+# endif
+
+#define YYPACT_NINF -659
+
+#define yypact_value_is_default(Yystate) \
+ (!!((Yystate) == (-659)))
+
+#define YYTABLE_NINF -524
+
+#define yytable_value_is_error(Yytable_value) \
+ 0
+
+ /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+static const yytype_int16 yypact[] =
+{
+ 3535, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -331, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -324, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -319, -659, -659, -659, -659, -659,
+ -659, -659, -659, -256, -659, -314, -351, -309, -306, 5942,
+ -257, -659, -217, -659, -659, -659, -659, 4338, -659, -659,
+ -659, -659, -241, -659, -659, 721, -659, -659, -204, -71,
+ -219, -659, 9007, -349, -659, -659, -215, -659, 5942, -659,
+ -659, -659, 5942, -178, -172, -659, -337, -267, -659, -659,
+ -659, 8237, -207, -659, -659, -659, -659, -341, -659, -211,
+ -330, -659, -659, 5942, -210, 6697, -659, -322, 1123, -659,
+ -659, -659, -659, -207, -328, -659, 7082, -304, -659, -163,
+ -659, -252, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, 8237, 8237, 8237, -659, -659,
+ -659, -659, -659, -659, -303, -659, -659, -659, -196, -299,
+ 8622, -194, -659, 8237, -659, -659, -355, -195, -659, -157,
+ 8237, -659, -71, 5942, 5942, -155, 4739, -659, -659, -659,
+ -659, -242, -236, -249, -335, -206, -191, -187, -209, -149,
+ -150, -333, -162, 7467, -659, -170, -168, -659, -154, -153,
+ -167, 7852, -152, 8237, -159, -148, -151, -160, -659, -659,
+ -274, -659, -659, -251, -659, -351, -147, -144, -659, -659,
+ -659, -659, 1525, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -19, -195, 7082, -302, 7082, -659, -659, 7082,
+ 5942, -659, -115, -659, -659, -659, -292, -659, -659, 8237,
+ -108, -659, -659, 8237, -143, -659, -659, -659, 8237, -659,
+ -659, -659, -659, -659, 5140, -155, -207, -250, -659, -659,
+ -659, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237,
+ 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237,
+ -659, -659, -659, -142, -659, -659, 1927, -659, 8237, -659,
+ -659, -245, 8237, -226, -659, -659, -106, -659, 1927, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ 8237, 8237, -659, -659, -659, -659, -659, -659, -659, 7082,
+ -659, -238, -659, 5541, -659, -659, -141, -140, -659, -659,
+ -659, -659, -244, -195, -155, -659, -659, -659, -659, -242,
+ -242, -236, -236, -249, -249, -249, -249, -335, -335, -206,
+ -191, -187, -209, -149, -150, 8237, -659, -104, 3133, -263,
+ -659, -260, -659, 3937, -136, -297, -659, 1927, -659, -659,
+ -659, -659, 6312, -659, -659, -659, -659, -224, -135, -659,
+ -659, 3937, -138, -659, -140, -97, 5942, -132, 8237, -133,
+ -106, -134, -659, -659, 8237, 8237, -659, -137, -129, 224,
+ -128, 2731, -659, -127, -131, 2329, -126, -659, -659, -659,
+ -659, -255, 8237, 2329, -138, -659, -659, 1927, 7082, -659,
+ -659, -659, -659, -130, -140, -659, -659, 1927, -123, -659,
+ -659, -659
+};
+
+ /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE does not specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint16 yydefact[] =
+{
+ 0, 157, 158, 202, 200, 203, 201, 204, 156, 215,
+ 205, 206, 213, 214, 211, 212, 209, 210, 207, 208,
+ 183, 231, 232, 233, 234, 235, 236, 249, 250, 251,
+ 246, 247, 248, 261, 262, 263, 243, 244, 245, 258,
+ 259, 260, 240, 241, 242, 255, 256, 257, 237, 238,
+ 239, 252, 253, 254, 216, 217, 218, 264, 265, 266,
+ 162, 160, 161, 159, 165, 163, 164, 166, 172, 185,
+ 168, 169, 167, 170, 171, 173, 179, 180, 181, 182,
+ 174, 175, 176, 177, 178, 219, 220, 221, 276, 277,
+ 278, 222, 223, 224, 288, 289, 290, 225, 226, 227,
+ 300, 301, 302, 228, 229, 230, 312, 313, 314, 134,
+ 133, 132, 0, 135, 136, 137, 138, 139, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 315, 316, 317, 318, 319, 320,
+ 321, 322, 323, 325, 324, 484, 326, 327, 328, 329,
+ 330, 331, 332, 333, 334, 335, 336, 352, 353, 354,
+ 355, 356, 357, 359, 360, 361, 362, 363, 364, 366,
+ 367, 370, 371, 372, 374, 375, 337, 338, 358, 365,
+ 376, 378, 379, 380, 382, 383, 474, 475, 339, 340,
+ 341, 368, 342, 346, 347, 350, 373, 377, 381, 343,
+ 344, 348, 349, 369, 345, 351, 384, 385, 386, 388,
+ 390, 392, 394, 396, 400, 401, 402, 403, 404, 405,
+ 407, 408, 409, 410, 411, 412, 414, 416, 417, 418,
+ 420, 421, 398, 406, 413, 422, 424, 425, 426, 428,
+ 429, 387, 389, 391, 415, 393, 395, 397, 399, 419,
+ 423, 427, 476, 477, 480, 481, 482, 483, 478, 479,
+ 430, 432, 433, 434, 436, 437, 438, 440, 441, 442,
+ 444, 445, 446, 448, 449, 450, 452, 453, 454, 456,
+ 457, 458, 460, 461, 462, 464, 465, 466, 468, 469,
+ 470, 472, 473, 431, 435, 439, 443, 447, 455, 459,
+ 463, 451, 467, 471, 0, 199, 486, 571, 131, 146,
+ 487, 488, 489, 0, 570, 0, 572, 0, 108, 107,
+ 0, 119, 124, 153, 152, 150, 154, 0, 147, 149,
+ 155, 129, 195, 151, 485, 0, 567, 569, 0, 0,
+ 0, 492, 0, 0, 96, 93, 0, 106, 0, 115,
+ 109, 117, 0, 118, 0, 94, 125, 0, 99, 148,
+ 130, 0, 188, 194, 1, 568, 186, 0, 145, 143,
+ 0, 141, 490, 0, 0, 0, 97, 0, 0, 573,
+ 110, 114, 116, 112, 120, 111, 0, 126, 102, 0,
+ 100, 0, 2, 12, 13, 10, 11, 4, 5, 6,
+ 7, 8, 9, 15, 14, 0, 0, 0, 42, 41,
+ 43, 40, 3, 17, 36, 19, 24, 25, 0, 0,
+ 29, 0, 197, 0, 35, 33, 0, 189, 184, 0,
+ 0, 140, 0, 0, 0, 0, 0, 494, 95, 190,
+ 44, 48, 51, 54, 59, 62, 64, 66, 68, 70,
+ 72, 74, 0, 0, 98, 0, 0, 552, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 518, 527, 531,
+ 44, 77, 90, 0, 507, 0, 155, 129, 510, 529,
+ 509, 508, 0, 511, 512, 533, 513, 540, 514, 515,
+ 548, 516, 0, 113, 0, 121, 0, 502, 128, 0,
+ 0, 104, 0, 101, 37, 38, 0, 21, 22, 0,
+ 0, 27, 26, 0, 199, 30, 32, 39, 0, 196,
+ 187, 92, 144, 142, 0, 0, 500, 0, 498, 493,
+ 495, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 75, 191, 192, 0, 563, 562, 0, 554, 0, 566,
+ 564, 0, 0, 0, 547, 550, 0, 517, 0, 80,
+ 81, 83, 82, 85, 86, 87, 88, 89, 84, 79,
+ 0, 0, 532, 528, 530, 534, 541, 549, 123, 0,
+ 505, 0, 127, 0, 105, 16, 0, 23, 20, 31,
+ 198, 491, 0, 501, 0, 496, 45, 46, 47, 50,
+ 49, 52, 53, 57, 58, 55, 56, 60, 61, 63,
+ 65, 67, 69, 71, 73, 0, 193, 0, 0, 0,
+ 565, 0, 546, 0, 577, 0, 575, 519, 78, 91,
+ 122, 503, 0, 103, 18, 497, 499, 0, 0, 557,
+ 556, 559, 525, 542, 538, 0, 0, 0, 0, 0,
+ 0, 0, 504, 506, 0, 0, 558, 0, 0, 537,
+ 0, 0, 535, 0, 0, 0, 0, 574, 576, 520,
+ 76, 0, 560, 0, 525, 524, 526, 544, 0, 522,
+ 551, 521, 578, 0, 561, 555, 536, 545, 0, 539,
+ 553, 543
+};
+
+ /* YYPGOTO[NTERM-NUM]. */
+static const yytype_int16 yypgoto[] =
+{
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -364, -659, -389, -385, -457, -384, -310, -307,
+ -305, -308, -301, -298, -659, -386, -659, -390, -659, -415,
+ -418, 1, -659, -659, -659, 2, -659, -659, -659, -110,
+ -105, -107, -659, -659, -628, -659, -659, -659, -659, -188,
+ -659, -336, -343, -659, 6, -659, 0, -334, -659, -659,
+ -659, -659, -67, -659, -659, -659, -431, -437, -277, -350,
+ -501, -659, -375, -488, -658, -414, -659, -659, -428, -426,
+ -659, -659, -87, -568, -368, -659, -231, -659, -388, -659,
+ -230, -659, -659, -659, -659, -228, -659, -659, -659, -659,
+ -659, -659, -659, -659, -70, -659, -659, -659, -659, -394
+};
+
+ /* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int16 yydefgoto[] =
+{
+ -1, 432, 433, 434, 616, 435, 436, 437, 438, 439,
+ 440, 441, 490, 443, 461, 462, 463, 464, 465, 466,
+ 467, 468, 469, 470, 471, 491, 645, 492, 600, 493,
+ 542, 494, 335, 520, 411, 495, 337, 338, 339, 369,
+ 370, 371, 340, 341, 342, 343, 344, 345, 390, 391,
+ 346, 347, 348, 349, 444, 387, 445, 397, 382, 383,
+ 446, 352, 353, 354, 453, 393, 456, 457, 547, 548,
+ 518, 611, 498, 499, 500, 501, 588, 681, 710, 689,
+ 690, 691, 711, 502, 503, 504, 505, 692, 677, 506,
+ 507, 693, 718, 508, 509, 510, 653, 576, 648, 671,
+ 687, 688, 511, 355, 356, 357, 366, 512, 655, 656
+};
+
+ /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule whose
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+static const yytype_int16 yytable[] =
+{
+ 351, 334, 336, 372, 379, 477, 350, 478, 479, 472,
+ 388, 482, 526, 608, 604, 610, 517, 442, 612, 550,
+ 657, 360, 544, 558, 559, 675, 363, 538, 395, 379,
+ 569, 460, 372, 706, 365, 448, 396, 709, 405, 539,
+ 395, 449, 407, 675, 358, 709, 451, 406, 447, 395,
+ 535, 359, 452, 527, 528, 473, 514, 454, 560, 561,
+ 361, 524, 525, 474, 541, 570, 581, 367, 583, 513,
+ 515, 364, -34, 473, 529, 473, 368, 532, 530, 537,
+ 519, 679, 609, 533, 615, 680, 460, 573, 647, 613,
+ 601, 589, 590, 591, 592, 593, 594, 595, 596, 597,
+ 598, 633, 634, 635, 636, 556, 557, 550, 660, 460,
+ 599, 379, 408, 672, 617, 409, 673, 454, 410, 601,
+ 454, 713, 601, 376, 517, 374, 517, 601, 375, 517,
+ 522, 601, 624, 523, 602, 625, 386, 601, 624, 717,
+ 650, 665, 661, 619, 662, 330, 331, 332, 551, 552,
+ 553, 554, 381, 555, 562, 563, 601, 652, 601, 684,
+ 392, 683, 403, 649, 398, 629, 630, 651, 404, 604,
+ 395, 631, 632, 450, 620, 458, 550, 521, 637, 638,
+ 531, 536, 473, 540, 454, 546, 566, 626, 627, 628,
+ 460, 460, 460, 460, 460, 460, 460, 460, 460, 460,
+ 460, 460, 460, 460, 460, 460, 564, 719, 454, 565,
+ 658, 659, 623, 567, 568, 574, 571, 575, 579, 517,
+ 587, 577, 578, 582, 584, 614, 586, 585, -35, 604,
+ 667, -33, 618, -28, 654, 668, 646, 664, 674, 678,
+ 685, -523, 601, 694, 695, 697, 699, 703, 702, 704,
+ 712, 487, 707, 708, 639, 720, 674, 721, 640, 642,
+ 696, 641, 401, 400, 543, 402, 362, 643, 622, 389,
+ 701, 644, 517, 669, 666, 715, 705, 454, 716, 399,
+ 670, 605, 606, 686, 607, 385, 698, 714, 0, 0,
+ 0, 0, 541, 0, 700, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 460, 0, 0, 676, 517, 0,
+ 485, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 379, 0, 676, 0, 0, 0, 373,
+ 0, 0, 0, 0, 0, 350, 0, 380, 0, 0,
+ 0, 0, 0, 350, 0, 351, 334, 336, 0, 0,
+ 0, 350, 394, 0, 0, 0, 0, 0, 373, 0,
+ 0, 0, 373, 0, 350, 0, 0, 0, 350, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 455, 0, 0, 0, 0, 497, 350,
+ 0, 0, 0, 0, 496, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 455, 545, 0, 455, 0, 0, 350,
+ 350, 0, 350, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 497, 0, 0, 0, 0, 0, 496, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 455, 0, 0, 0, 0, 0, 350, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 455, 0, 0, 0, 0, 0,
+ 350, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 497, 0, 0, 0,
+ 0, 0, 496, 0, 0, 0, 0, 0, 497, 0,
+ 0, 0, 0, 0, 496, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 455, 0, 0, 0, 0, 0, 350,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 497, 0,
+ 0, 0, 0, 497, 496, 0, 0, 497, 0, 496,
+ 0, 0, 0, 496, 0, 0, 0, 0, 0, 0,
+ 0, 497, 0, 0, 0, 0, 380, 496, 0, 0,
+ 0, 0, 350, 0, 0, 0, 0, 0, 0, 0,
+ 0, 497, 0, 0, 0, 497, 0, 496, 0, 0,
+ 0, 496, 0, 497, 0, 0, 0, 497, 0, 496,
+ 0, 0, 0, 496, 0, 0, 0, 497, 0, 0,
+ 0, 384, 0, 496, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325,
+ 0, 0, 326, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 327, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 328, 329, 330, 331, 332, 333, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 475, 476, 477, 0, 478,
+ 479, 480, 481, 482, 483, 484, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
+ 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
+ 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
+ 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293,
+ 294, 295, 296, 297, 298, 299, 300, 301, 302, 303,
+ 304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323,
+ 324, 325, 485, 412, 326, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 0, 0, 425,
+ 426, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 427, 0,
+ 486, 0, 487, 488, 0, 0, 0, 0, 489, 428,
+ 429, 430, 431, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 328, 329, 330, 331, 332, 333, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 475, 476, 477,
+ 0, 478, 479, 480, 481, 482, 483, 484, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
+ 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 485, 412, 326, 413, 414, 415,
+ 416, 417, 418, 419, 420, 421, 422, 423, 424, 0,
+ 0, 425, 426, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 427, 0, 486, 0, 487, 603, 0, 0, 0, 0,
+ 489, 428, 429, 430, 431, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 328, 329, 330, 331, 332, 333,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 475,
+ 476, 477, 0, 478, 479, 480, 481, 482, 483, 484,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 485, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 486, 0, 487, 0, 0, 0,
+ 0, 0, 489, 428, 429, 430, 431, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 328, 329, 330, 331,
+ 332, 333, 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 475, 476, 477, 0, 478, 479, 480, 481, 482,
+ 483, 484, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 485, 412,
+ 326, 413, 414, 415, 416, 417, 418, 419, 420, 421,
+ 422, 423, 424, 0, 0, 425, 426, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 427, 0, 486, 0, 398, 0,
+ 0, 0, 0, 0, 489, 428, 429, 430, 431, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 328, 329,
+ 330, 331, 332, 333, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 475, 476, 477, 0, 478, 479, 480,
+ 481, 482, 483, 484, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325,
+ 485, 412, 326, 413, 414, 415, 416, 417, 418, 419,
+ 420, 421, 422, 423, 424, 0, 0, 425, 426, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 427, 0, 486, 0,
+ 0, 0, 0, 0, 0, 0, 489, 428, 429, 430,
+ 431, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 328, 329, 330, 331, 332, 333, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
+ 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
+ 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
+ 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293,
+ 294, 295, 296, 297, 298, 299, 300, 301, 302, 303,
+ 304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323,
+ 324, 325, 0, 412, 326, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 0, 0, 425,
+ 426, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 427, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 489, 428,
+ 429, 430, 431, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 328, 329, 330, 331, 332, 333, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
+ 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 0, 0, 326, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 327, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 328, 329, 330, 331, 332, 333,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 328, 329, 330, 331,
+ 332, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 0, 377, 326,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 378, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 328, 329, 330,
+ 331, 332, 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 0, 0,
+ 326, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 549,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 328, 329,
+ 330, 331, 332, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
+ 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
+ 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 0,
+ 0, 326, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 621, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 328,
+ 329, 330, 331, 332, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325,
+ 0, 0, 326, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 663, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 328, 329, 330, 331, 332, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 0, 326, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 328, 329, 330, 331, 332, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 516, 682, 0, 0, 0, 0, 0, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 69,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 459, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 516, 0, 0, 0, 0, 0, 0, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 69,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 572, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 580, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 69,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 534, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 0, 326
+};
+
+static const yytype_int16 yycheck[] =
+{
+ 0, 0, 0, 339, 347, 24, 0, 26, 27, 395,
+ 81, 30, 427, 514, 502, 516, 406, 381, 519, 456,
+ 588, 340, 453, 358, 359, 653, 340, 382, 377, 372,
+ 363, 395, 368, 691, 385, 376, 385, 695, 375, 394,
+ 377, 382, 376, 671, 375, 703, 376, 384, 382, 377,
+ 440, 375, 382, 356, 357, 377, 384, 393, 393, 394,
+ 379, 425, 426, 385, 450, 398, 481, 376, 483, 403,
+ 404, 385, 375, 377, 377, 377, 382, 376, 381, 443,
+ 384, 378, 384, 382, 376, 382, 450, 473, 576, 520,
+ 382, 365, 366, 367, 368, 369, 370, 371, 372, 373,
+ 374, 558, 559, 560, 561, 354, 355, 544, 609, 473,
+ 384, 454, 379, 376, 529, 382, 376, 453, 385, 382,
+ 456, 376, 382, 340, 514, 382, 516, 382, 385, 519,
+ 382, 382, 382, 385, 385, 385, 340, 382, 382, 707,
+ 385, 385, 380, 533, 382, 401, 402, 403, 390, 391,
+ 392, 387, 393, 389, 360, 361, 382, 383, 382, 383,
+ 379, 662, 340, 578, 379, 554, 555, 582, 340, 657,
+ 377, 556, 557, 384, 538, 385, 613, 340, 562, 563,
+ 376, 375, 377, 340, 520, 340, 395, 551, 552, 553,
+ 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 397, 708, 544, 396,
+ 600, 601, 546, 362, 364, 385, 378, 385, 385, 609,
+ 380, 375, 375, 375, 383, 340, 377, 375, 375, 717,
+ 645, 375, 340, 376, 340, 339, 378, 378, 653, 375,
+ 375, 379, 382, 340, 376, 378, 380, 376, 385, 25,
+ 376, 379, 379, 384, 564, 385, 671, 380, 565, 567,
+ 678, 566, 372, 368, 452, 372, 333, 568, 545, 340,
+ 685, 569, 662, 648, 624, 703, 690, 613, 704, 366,
+ 648, 512, 512, 671, 512, 355, 680, 702, -1, -1,
+ -1, -1, 678, -1, 684, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 678, -1, -1, 653, 708, -1,
+ 339, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 676, -1, 671, -1, -1, -1, 339,
+ -1, -1, -1, -1, -1, 339, -1, 347, -1, -1,
+ -1, -1, -1, 347, -1, 355, 355, 355, -1, -1,
+ -1, 355, 362, -1, -1, -1, -1, -1, 368, -1,
+ -1, -1, 372, -1, 368, -1, -1, -1, 372, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 393, -1, -1, -1, -1, 398, 393,
+ -1, -1, -1, -1, 398, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 453, 454, -1, 456, -1, -1, 453,
+ 454, -1, 456, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 502, -1, -1, -1, -1, -1, 502, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 520, -1, -1, -1, -1, -1, 520, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 544, -1, -1, -1, -1, -1,
+ 544, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 576, -1, -1, -1,
+ -1, -1, 576, -1, -1, -1, -1, -1, 588, -1,
+ -1, -1, -1, -1, 588, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 613, -1, -1, -1, -1, -1, 613,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 648, -1,
+ -1, -1, -1, 653, 648, -1, -1, 657, -1, 653,
+ -1, -1, -1, 657, -1, -1, -1, -1, -1, -1,
+ -1, 671, -1, -1, -1, -1, 676, 671, -1, -1,
+ -1, -1, 676, -1, -1, -1, -1, -1, -1, -1,
+ -1, 691, -1, -1, -1, 695, -1, 691, -1, -1,
+ -1, 695, -1, 703, -1, -1, -1, 707, -1, 703,
+ -1, -1, -1, 707, -1, -1, -1, 717, -1, -1,
+ -1, 0, -1, 717, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
+ 329, 330, 331, 332, 333, 334, 335, 336, 337, 338,
+ -1, -1, 341, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 385, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 399, 400, 401, 402, 403, 404, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, -1, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
+ 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
+ 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336,
+ 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 350, 351, 352, 353, -1, -1, 356,
+ 357, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 375, -1,
+ 377, -1, 379, 380, -1, -1, -1, -1, 385, 386,
+ 387, 388, 389, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 399, 400, 401, 402, 403, 404, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
+ 345, 346, 347, 348, 349, 350, 351, 352, 353, -1,
+ -1, 356, 357, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 375, -1, 377, -1, 379, 380, -1, -1, -1, -1,
+ 385, 386, 387, 388, 389, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 399, 400, 401, 402, 403, 404,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, -1, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, 377, -1, 379, -1, -1, -1,
+ -1, -1, 385, 386, 387, 388, 389, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 399, 400, 401, 402,
+ 403, 404, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, -1, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170,
+ 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270,
+ 271, 272, 273, 274, 275, 276, 277, 278, 279, 280,
+ 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 298, 299, 300,
+ 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 318, 319, 320,
+ 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
+ 331, 332, 333, 334, 335, 336, 337, 338, 339, 340,
+ 341, 342, 343, 344, 345, 346, 347, 348, 349, 350,
+ 351, 352, 353, -1, -1, 356, 357, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 375, -1, 377, -1, 379, -1,
+ -1, -1, -1, -1, 385, 386, 387, 388, 389, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 399, 400,
+ 401, 402, 403, 404, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, -1, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
+ 329, 330, 331, 332, 333, 334, 335, 336, 337, 338,
+ 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, -1, -1, 356, 357, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 375, -1, 377, -1,
+ -1, -1, -1, -1, -1, -1, 385, 386, 387, 388,
+ 389, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 399, 400, 401, 402, 403, 404, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
+ 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
+ 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336,
+ 337, 338, -1, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 350, 351, 352, 353, -1, -1, 356,
+ 357, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 375, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 385, 386,
+ 387, 388, 389, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 399, 400, 401, 402, 403, 404, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, -1, -1, 341, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 385, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 399, 400, 401, 402, 403, 404,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 399, 400, 401, 402,
+ 403, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
+ 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 326, 327, 328, 329, 330, 331,
+ 332, 333, 334, 335, 336, 337, 338, -1, 340, 341,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 385, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 399, 400, 401,
+ 402, 403, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170,
+ 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270,
+ 271, 272, 273, 274, 275, 276, 277, 278, 279, 280,
+ 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 298, 299, 300,
+ 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 318, 319, 320,
+ 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
+ 331, 332, 333, 334, 335, 336, 337, 338, -1, -1,
+ 341, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 380,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 399, 400,
+ 401, 402, 403, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
+ 330, 331, 332, 333, 334, 335, 336, 337, 338, -1,
+ -1, 341, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 380, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 399,
+ 400, 401, 402, 403, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
+ 329, 330, 331, 332, 333, 334, 335, 336, 337, 338,
+ -1, -1, 341, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 380, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 399, 400, 401, 402, 403, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, -1, 341, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, 399, 400, 401, 402, 403, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, 379, 380, -1, -1, -1, -1, -1, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 82,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, 378, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, 379, -1, -1, -1, -1, -1, -1, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 82,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, 378, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 385, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 82,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, -1, 341
+};
+
+ /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint16 yystos[] =
+{
+ 0, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 341, 385, 399, 400,
+ 401, 402, 403, 404, 439, 440, 443, 444, 445, 446,
+ 450, 451, 452, 453, 454, 455, 458, 459, 460, 461,
+ 462, 464, 469, 470, 471, 511, 512, 513, 375, 375,
+ 340, 379, 470, 340, 385, 385, 514, 376, 382, 447,
+ 448, 449, 459, 464, 382, 385, 340, 340, 385, 460,
+ 464, 393, 466, 467, 0, 512, 340, 463, 81, 340,
+ 456, 457, 379, 473, 464, 377, 385, 465, 379, 490,
+ 448, 447, 449, 340, 340, 375, 384, 465, 379, 382,
+ 385, 442, 340, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, 356, 357, 375, 386, 387,
+ 388, 389, 409, 410, 411, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 462, 464, 468, 465, 376, 382,
+ 384, 376, 382, 472, 459, 464, 474, 475, 385, 378,
+ 420, 422, 423, 424, 425, 426, 427, 428, 429, 430,
+ 431, 432, 433, 377, 385, 22, 23, 24, 26, 27,
+ 28, 29, 30, 31, 32, 339, 377, 379, 380, 385,
+ 420, 433, 435, 437, 439, 443, 462, 464, 480, 481,
+ 482, 483, 491, 492, 493, 494, 497, 498, 501, 502,
+ 503, 510, 515, 465, 384, 465, 379, 435, 478, 384,
+ 441, 340, 382, 385, 420, 420, 437, 356, 357, 377,
+ 381, 376, 376, 382, 338, 435, 375, 420, 382, 394,
+ 340, 433, 438, 457, 474, 464, 340, 476, 477, 380,
+ 475, 390, 391, 392, 387, 389, 354, 355, 358, 359,
+ 393, 394, 360, 361, 397, 396, 395, 362, 364, 363,
+ 398, 378, 378, 433, 385, 385, 505, 375, 375, 385,
+ 385, 437, 375, 437, 383, 375, 377, 380, 484, 365,
+ 366, 367, 368, 369, 370, 371, 372, 373, 374, 384,
+ 436, 382, 385, 380, 481, 494, 498, 503, 478, 384,
+ 478, 479, 478, 474, 340, 376, 412, 437, 340, 435,
+ 420, 380, 476, 465, 382, 385, 420, 420, 420, 422,
+ 422, 423, 423, 424, 424, 424, 424, 425, 425, 426,
+ 427, 428, 429, 430, 431, 434, 378, 481, 506, 437,
+ 385, 437, 383, 504, 340, 516, 517, 491, 435, 435,
+ 478, 380, 382, 380, 378, 385, 477, 437, 339, 480,
+ 492, 507, 376, 376, 437, 452, 459, 496, 375, 378,
+ 382, 485, 380, 478, 383, 375, 496, 508, 509, 487,
+ 488, 489, 495, 499, 340, 376, 438, 378, 517, 380,
+ 435, 437, 385, 376, 25, 483, 482, 379, 384, 482,
+ 486, 490, 376, 376, 437, 486, 487, 491, 500, 478,
+ 385, 380
+};
+
+ /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint16 yyr1[] =
+{
+ 0, 408, 409, 410, 410, 410, 410, 410, 410, 410,
+ 410, 410, 410, 410, 410, 410, 410, 411, 411, 411,
+ 411, 411, 411, 412, 413, 414, 415, 415, 416, 416,
+ 417, 417, 418, 419, 419, 419, 420, 420, 420, 420,
+ 421, 421, 421, 421, 422, 422, 422, 422, 423, 423,
+ 423, 424, 424, 424, 425, 425, 425, 425, 425, 426,
+ 426, 426, 427, 427, 428, 428, 429, 429, 430, 430,
+ 431, 431, 432, 432, 433, 434, 433, 435, 435, 436,
+ 436, 436, 436, 436, 436, 436, 436, 436, 436, 436,
+ 437, 437, 438, 439, 439, 439, 439, 439, 439, 439,
+ 439, 439, 441, 440, 442, 442, 443, 444, 444, 445,
+ 445, 446, 447, 447, 448, 448, 448, 448, 449, 450,
+ 450, 450, 450, 450, 451, 451, 451, 451, 451, 452,
+ 452, 453, 454, 454, 454, 454, 454, 454, 454, 454,
+ 455, 456, 456, 457, 457, 457, 458, 459, 459, 460,
+ 460, 460, 460, 460, 460, 460, 461, 461, 461, 461,
+ 461, 461, 461, 461, 461, 461, 461, 461, 461, 461,
+ 461, 461, 461, 461, 461, 461, 461, 461, 461, 461,
+ 461, 461, 461, 461, 461, 462, 463, 463, 464, 464,
+ 465, 465, 465, 465, 466, 466, 467, 468, 468, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 470, 470, 470,
+ 472, 471, 473, 471, 474, 474, 475, 475, 476, 476,
+ 477, 477, 478, 478, 478, 479, 479, 480, 481, 481,
+ 482, 482, 482, 482, 482, 482, 482, 483, 484, 485,
+ 483, 486, 486, 488, 487, 489, 487, 490, 490, 491,
+ 491, 492, 492, 493, 493, 494, 495, 495, 496, 496,
+ 497, 497, 499, 498, 500, 500, 501, 501, 502, 502,
+ 504, 503, 505, 503, 506, 503, 507, 507, 508, 508,
+ 509, 509, 510, 510, 510, 510, 510, 511, 511, 512,
+ 512, 512, 514, 513, 515, 516, 516, 517, 517
+};
+
+ /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
+{
+ 0, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 3, 1, 4, 1,
+ 3, 2, 2, 1, 1, 1, 2, 2, 2, 1,
+ 2, 3, 2, 1, 1, 1, 1, 2, 2, 2,
+ 1, 1, 1, 1, 1, 3, 3, 3, 1, 3,
+ 3, 1, 3, 3, 1, 3, 3, 3, 3, 1,
+ 3, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+ 1, 3, 1, 3, 1, 0, 6, 1, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 3, 1, 2, 2, 4, 2, 3, 4, 2,
+ 3, 4, 0, 6, 2, 3, 2, 1, 1, 2,
+ 3, 3, 2, 3, 2, 1, 2, 1, 1, 1,
+ 3, 4, 6, 5, 1, 2, 3, 5, 4, 1,
+ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 4, 1, 3, 1, 3, 1, 1, 1, 2, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 4, 1, 1, 3, 2, 3,
+ 2, 3, 3, 4, 1, 0, 3, 1, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 6, 0, 5, 1, 2, 3, 4, 1, 3,
+ 1, 2, 1, 3, 4, 1, 3, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 0, 0,
+ 5, 1, 1, 0, 2, 0, 2, 2, 3, 1,
+ 2, 1, 2, 1, 2, 5, 3, 1, 1, 4,
+ 1, 2, 0, 8, 0, 1, 3, 2, 1, 2,
+ 0, 6, 0, 8, 0, 7, 1, 1, 1, 0,
+ 2, 3, 2, 2, 2, 3, 2, 1, 2, 1,
+ 1, 1, 0, 3, 5, 1, 3, 1, 4
+};
+
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+do \
+ if (yychar == YYEMPTY) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ YYPOPSTACK (yylen); \
+ yystate = *yyssp; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (pParseContext, YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+while (0)
+
+/* Error token number */
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+/* This macro is provided for backward compatibility. */
+#ifndef YY_LOCATION_PRINT
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+#endif
+
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value, pParseContext); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+
+/*----------------------------------------.
+| Print this symbol's value on YYOUTPUT. |
+`----------------------------------------*/
+
+static void
+yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext)
+{
+ FILE *yyo = yyoutput;
+ YYUSE (yyo);
+ YYUSE (pParseContext);
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
+# endif
+ YYUSE (yytype);
+}
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+static void
+yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext)
+{
+ YYFPRINTF (yyoutput, "%s %s (",
+ yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
+
+ yy_symbol_value_print (yyoutput, yytype, yyvaluep, pParseContext);
+ YYFPRINTF (yyoutput, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+static void
+yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+static void
+yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, glslang::TParseContext* pParseContext)
+{
+ unsigned long int yylno = yyrline[yyrule];
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr,
+ yystos[yyssp[yyi + 1 - yynrhs]],
+ &(yyvsp[(yyi + 1) - (yynrhs)])
+ , pParseContext);
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyssp, yyvsp, Rule, pParseContext); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+static YYSIZE_T
+yystrlen (const char *yystr)
+{
+ YYSIZE_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYSIZE_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ /* Fall through. */
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return yystpcpy (yyres, yystr) - yyres;
+}
+# endif
+
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
+
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+ yytype_int16 *yyssp, int yytoken)
+{
+ YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
+ YYSIZE_T yysize = yysize0;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = YY_NULLPTR;
+ /* Arguments of yyformat. */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Number of reported tokens (one for the "unexpected", one per
+ "expected"). */
+ int yycount = 0;
+
+ /* There are many possibilities here to consider:
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
+ {
+ int yyn = yypact[*yyssp];
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ {
+ YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
+ if (! (yysize <= yysize1
+ && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
+ }
+ }
+ }
+
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
+
+ {
+ YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
+ if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
+
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
+ }
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ yyp++;
+ yyformat++;
+ }
+ }
+ return 0;
+}
+#endif /* YYERROR_VERBOSE */
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, glslang::TParseContext* pParseContext)
+{
+ YYUSE (yyvaluep);
+ YYUSE (pParseContext);
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+int
+yyparse (glslang::TParseContext* pParseContext)
+{
+/* The lookahead symbol. */
+int yychar;
+
+
+/* The semantic value of the lookahead symbol. */
+/* Default value used for initialization, for pacifying older GCCs
+ or non-GCC compilers. */
+YY_INITIAL_VALUE (static YYSTYPE yyval_default;)
+YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);
+
+ /* Number of syntax errors so far. */
+ int yynerrs;
+
+ int yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ 'yyss': related to states.
+ 'yyvs': related to semantic values.
+
+ Refer to the stacks through separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yytype_int16 yyssa[YYINITDEPTH];
+ yytype_int16 *yyss;
+ yytype_int16 *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ YYSIZE_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken = 0;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yyssp = yyss = yyssa;
+ yyvsp = yyvs = yyvsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+ goto yysetstate;
+
+/*------------------------------------------------------------.
+| yynewstate -- Push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+ yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+ yysetstate:
+ *yyssp = yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+#else /* no yyoverflow */
+# ifndef YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+# else
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long int) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yypact_value_is_default (yyn))
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = yylex (&yylval, parseContext);
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+
+ yystate = yyn;
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- Do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ '$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 2:
+#line 302 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleVariable((yyvsp[0].lex).loc, (yyvsp[0].lex).symbol, (yyvsp[0].lex).string);
+ }
+#line 4159 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 3:
+#line 308 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 4:
+#line 311 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true);
+ }
+#line 4176 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 5:
+#line 315 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true);
+ }
+#line 4185 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 6:
+#line 319 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true);
+ }
+#line 4193 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 7:
+#line 322 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true);
+ }
+#line 4202 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 8:
+#line 326 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i64, (yyvsp[0].lex).loc, true);
+ }
+#line 4211 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 9:
+#line 330 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u64, (yyvsp[0].lex).loc, true);
+ }
+#line 4220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 10:
+#line 334 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((short)(yyvsp[0].lex).i, (yyvsp[0].lex).loc, true);
+ }
+#line 4229 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 11:
+#line 338 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit unsigned integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((unsigned short)(yyvsp[0].lex).u, (yyvsp[0].lex).loc, true);
+ }
+#line 4238 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 12:
+#line 342 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat, (yyvsp[0].lex).loc, true);
+ }
+#line 4246 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 13:
+#line 345 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtDouble, (yyvsp[0].lex).loc, true);
+ }
+#line 4255 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 14:
+#line 349 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat16, (yyvsp[0].lex).loc, true);
+ }
+#line 4264 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 15:
+#line 353 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).b, (yyvsp[0].lex).loc, true);
+ }
+#line 4272 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 16:
+#line 356 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode);
+ if ((yyval.interm.intermTypedNode)->getAsConstantUnion())
+ (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression();
+ }
+#line 4282 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 17:
+#line 364 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4290 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 18:
+#line 367 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBracketDereference((yyvsp[-2].lex).loc, (yyvsp[-3].interm.intermTypedNode), (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 4298 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 19:
+#line 370 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4306 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 20:
+#line 373 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleDotDereference((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode), *(yyvsp[0].lex).string);
+ }
+#line 4314 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 21:
+#line 376 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode));
+ parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "++", (yyvsp[-1].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "++", EOpPostIncrement, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 4324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 22:
+#line 381 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode));
+ parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "--", (yyvsp[-1].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "--", EOpPostDecrement, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 4334 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 23:
+#line 389 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.integerCheck((yyvsp[0].interm.intermTypedNode), "[]");
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4343 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 24:
+#line 396 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleFunctionCall((yyvsp[0].interm).loc, (yyvsp[0].interm).function, (yyvsp[0].interm).intermNode);
+ delete (yyvsp[0].interm).function;
+ }
+#line 4352 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 25:
+#line 403 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ }
+#line 4360 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 26:
+#line 409 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ }
+#line 4369 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 27:
+#line 413 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ }
+#line 4378 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 28:
+#line 420 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ }
+#line 4386 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 29:
+#line 423 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ }
+#line 4394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 30:
+#line 429 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType());
+ (yyvsp[-1].interm).function->addParameter(param);
+ (yyval.interm).function = (yyvsp[-1].interm).function;
+ (yyval.interm).intermNode = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4406 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 31:
+#line 436 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType());
+ (yyvsp[-2].interm).function->addParameter(param);
+ (yyval.interm).function = (yyvsp[-2].interm).function;
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc);
+ }
+#line 4418 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 32:
+#line 446 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ }
+#line 4426 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 33:
+#line 454 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // Constructor
+ (yyval.interm).intermNode = 0;
+ (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type));
+ }
+#line 4436 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 34:
+#line 459 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ //
+ // Should be a method or subroutine call, but we haven't recognized the arguments yet.
+ //
+ (yyval.interm).function = 0;
+ (yyval.interm).intermNode = 0;
+
+ TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode();
+ if (method) {
+ (yyval.interm).function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength);
+ (yyval.interm).intermNode = method->getObject();
+ } else {
+ TIntermSymbol* symbol = (yyvsp[0].interm.intermTypedNode)->getAsSymbolNode();
+ if (symbol) {
+ parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName());
+ TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid));
+ (yyval.interm).function = function;
+ } else
+ parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "function call, method, or subroutine call expected", "", "");
+ }
+
+ if ((yyval.interm).function == 0) {
+ // error recover
+ TString* empty = NewPoolTString("");
+ (yyval.interm).function = new TFunction(empty, TType(EbtVoid), EOpNull);
+ }
+ }
+#line 4468 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 35:
+#line 486 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // Constructor
+ (yyval.interm).intermNode = 0;
+ (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type));
+ }
+#line 4478 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 36:
+#line 494 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.variableCheck((yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ if (TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode())
+ parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), "");
+ }
+#line 4489 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 37:
+#line 500 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "++", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "++", EOpPreIncrement, (yyvsp[0].interm.intermTypedNode));
+ }
+#line 4498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 38:
+#line 504 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "--", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "--", EOpPreDecrement, (yyvsp[0].interm.intermTypedNode));
+ }
+#line 4507 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 39:
+#line 508 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm).op != EOpNull) {
+ char errorOp[2] = {0, 0};
+ switch((yyvsp[-1].interm).op) {
+ case EOpNegative: errorOp[0] = '-'; break;
+ case EOpLogicalNot: errorOp[0] = '!'; break;
+ case EOpBitwiseNot: errorOp[0] = '~'; break;
+ default: break; // some compilers want this
+ }
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].interm).loc, errorOp, (yyvsp[-1].interm).op, (yyvsp[0].interm.intermTypedNode));
+ } else {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ if ((yyval.interm.intermTypedNode)->getAsConstantUnion())
+ (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression();
+ }
+ }
+#line 4528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 40:
+#line 528 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNull; }
+#line 4534 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 41:
+#line 529 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNegative; }
+#line 4540 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 42:
+#line 530 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLogicalNot; }
+#line 4546 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 43:
+#line 531 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpBitwiseNot;
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise not"); }
+#line 4553 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 44:
+#line 537 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4559 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 45:
+#line 538 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "*", EOpMul, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4569 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 46:
+#line 543 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "/", EOpDiv, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4579 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 47:
+#line 548 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "%");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "%", EOpMod, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4590 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 48:
+#line 557 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4596 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 49:
+#line 558 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "+", EOpAdd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4606 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 50:
+#line 563 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "-", EOpSub, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4616 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 51:
+#line 571 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4622 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 52:
+#line 572 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift left");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<<", EOpLeftShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4633 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 53:
+#line 578 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift right");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">>", EOpRightShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4644 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 54:
+#line 587 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4650 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 55:
+#line 588 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<", EOpLessThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4660 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 56:
+#line 593 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">", EOpGreaterThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 57:
+#line 598 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<=", EOpLessThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4680 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 58:
+#line 603 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">=", EOpGreaterThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4690 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 59:
+#line 611 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 60:
+#line 612 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison");
+ parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "==");
+ parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "==");
+ parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "==");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "==", EOpEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4710 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 61:
+#line 621 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison");
+ parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!=");
+ parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!=");
+ parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!=");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "!=", EOpNotEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4724 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 62:
+#line 633 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4730 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 63:
+#line 634 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise and");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&", EOpAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4741 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 64:
+#line 643 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4747 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 65:
+#line 644 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise exclusive or");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^", EOpExclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4758 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 66:
+#line 653 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4764 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 67:
+#line 654 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise inclusive or");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "|", EOpInclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4775 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 68:
+#line 663 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4781 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 69:
+#line 664 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&&", EOpLogicalAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4791 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 70:
+#line 672 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4797 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 71:
+#line 673 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^^", EOpLogicalXor, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4807 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 72:
+#line 681 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4813 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 73:
+#line 682 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "||", EOpLogicalOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4823 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 74:
+#line 690 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4829 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 75:
+#line 691 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 4837 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 76:
+#line 694 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ --parseContext.controlFlowNestingLevel;
+ parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-5].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-4].lex).loc, "?", (yyvsp[-5].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addSelection((yyvsp[-5].interm.intermTypedNode), (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-4].lex).loc);
+ if ((yyval.interm.intermTypedNode) == 0) {
+ parseContext.binaryOpError((yyvsp[-4].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString());
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+ }
+#line 4854 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 77:
+#line 709 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4860 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 78:
+#line 710 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayObjectCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array assignment");
+ parseContext.opaqueCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=");
+ parseContext.storage16BitAssignmentCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=");
+ parseContext.specializationCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=");
+ parseContext.lValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addAssign((yyvsp[-1].interm).op, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].interm).loc);
+ if ((yyval.interm.intermTypedNode) == 0) {
+ parseContext.assignError((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString());
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+ }
+#line 4878 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 79:
+#line 726 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpAssign;
+ }
+#line 4887 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 80:
+#line 730 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpMulAssign;
+ }
+#line 4896 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 81:
+#line 734 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpDivAssign;
+ }
+#line 4905 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 82:
+#line 738 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "%=");
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpModAssign;
+ }
+#line 4915 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 83:
+#line 743 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpAddAssign;
+ }
+#line 4924 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 84:
+#line 747 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpSubAssign;
+ }
+#line 4933 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 85:
+#line 751 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift left assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLeftShiftAssign;
+ }
+#line 4942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 86:
+#line 755 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift right assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpRightShiftAssign;
+ }
+#line 4951 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 87:
+#line 759 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-and assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpAndAssign;
+ }
+#line 4960 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 88:
+#line 763 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-xor assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpExclusiveOrAssign;
+ }
+#line 4969 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 89:
+#line 767 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-or assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpInclusiveOrAssign;
+ }
+#line 4978 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 90:
+#line 774 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4986 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 91:
+#line 777 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.samplerConstructorLocationCheck((yyvsp[-1].lex).loc, ",", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addComma((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc);
+ if ((yyval.interm.intermTypedNode) == 0) {
+ parseContext.binaryOpError((yyvsp[-1].lex).loc, ",", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString());
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+ }
+#line 4999 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 92:
+#line 788 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.constantValueCheck((yyvsp[0].interm.intermTypedNode), "");
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 5008 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 93:
+#line 795 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleFunctionDeclarator((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).function, true /* prototype */);
+ (yyval.interm.intermNode) = 0;
+ // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature
+ }
+#line 5018 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 94:
+#line 800 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm).intermNode && (yyvsp[-1].interm).intermNode->getAsAggregate())
+ (yyvsp[-1].interm).intermNode->getAsAggregate()->setOperator(EOpSequence);
+ (yyval.interm.intermNode) = (yyvsp[-1].interm).intermNode;
+ }
+#line 5028 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 95:
+#line 805 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[-3].lex).loc, ENoProfile, 130, 0, "precision statement");
+
+ // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope
+ parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]);
+ parseContext.setDefaultPrecision((yyvsp[-3].lex).loc, (yyvsp[-1].interm.type), (yyvsp[-2].interm.type).qualifier.precision);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5041 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 96:
+#line 813 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.declareBlock((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).typeList);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5050 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 97:
+#line 817 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.declareBlock((yyvsp[-2].interm).loc, *(yyvsp[-2].interm).typeList, (yyvsp[-1].lex).string);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5059 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 98:
+#line 821 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.declareBlock((yyvsp[-3].interm).loc, *(yyvsp[-3].interm).typeList, (yyvsp[-2].lex).string, (yyvsp[-1].interm).arraySizes);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5068 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 99:
+#line 825 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier);
+ parseContext.updateStandaloneQualifierDefaults((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type));
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5078 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 100:
+#line 830 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkNoShaderLayouts((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).shaderQualifiers);
+ parseContext.addQualifierToExisting((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, *(yyvsp[-1].lex).string);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5088 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 101:
+#line 835 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkNoShaderLayouts((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).shaderQualifiers);
+ (yyvsp[-1].interm.identifierList)->push_back((yyvsp[-2].lex).string);
+ parseContext.addQualifierToExisting((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).qualifier, *(yyvsp[-1].interm.identifierList));
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5099 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 102:
+#line 844 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { parseContext.nestedBlockCheck((yyvsp[-2].interm.type).loc); }
+#line 5105 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 103:
+#line 844 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ --parseContext.structNestingLevel;
+ parseContext.blockName = (yyvsp[-4].lex).string;
+ parseContext.globalQualifierFixCheck((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).qualifier);
+ parseContext.checkNoShaderLayouts((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).shaderQualifiers);
+ parseContext.currentBlockQualifier = (yyvsp[-5].interm.type).qualifier;
+ (yyval.interm).loc = (yyvsp[-5].interm.type).loc;
+ (yyval.interm).typeList = (yyvsp[-1].interm.typeList);
+ }
+#line 5119 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 104:
+#line 855 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.identifierList) = new TIdentifierList;
+ (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string);
+ }
+#line 5128 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 105:
+#line 859 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.identifierList) = (yyvsp[-2].interm.identifierList);
+ (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string);
+ }
+#line 5137 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 106:
+#line 866 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).function = (yyvsp[-1].interm.function);
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ }
+#line 5146 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 107:
+#line 873 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.function) = (yyvsp[0].interm.function);
+ }
+#line 5154 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 108:
+#line 876 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.function) = (yyvsp[0].interm.function);
+ }
+#line 5162 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 109:
+#line 883 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // Add the parameter
+ (yyval.interm.function) = (yyvsp[-1].interm.function);
+ if ((yyvsp[0].interm).param.type->getBasicType() != EbtVoid)
+ (yyvsp[-1].interm.function)->addParameter((yyvsp[0].interm).param);
+ else
+ delete (yyvsp[0].interm).param.type;
+ }
+#line 5175 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 110:
+#line 891 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ //
+ // Only first parameter of one-parameter functions can be void
+ // The check for named parameters not being void is done in parameter_declarator
+ //
+ if ((yyvsp[0].interm).param.type->getBasicType() == EbtVoid) {
+ //
+ // This parameter > first is void
+ //
+ parseContext.error((yyvsp[-1].lex).loc, "cannot be an argument type except for '(void)'", "void", "");
+ delete (yyvsp[0].interm).param.type;
+ } else {
+ // Add the parameter
+ (yyval.interm.function) = (yyvsp[-2].interm.function);
+ (yyvsp[-2].interm.function)->addParameter((yyvsp[0].interm).param);
+ }
+ }
+#line 5197 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 111:
+#line 911 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).qualifier.storage != EvqGlobal && (yyvsp[-2].interm.type).qualifier.storage != EvqTemporary) {
+ parseContext.error((yyvsp[-1].lex).loc, "no qualifiers allowed for function return",
+ GetStorageQualifierString((yyvsp[-2].interm.type).qualifier.storage), "");
+ }
+ if ((yyvsp[-2].interm.type).arraySizes)
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+
+ // Add the function as a prototype after parsing it (we do not support recursion)
+ TFunction *function;
+ TType type((yyvsp[-2].interm.type));
+
+ // Potentially rename shader entry point function. No-op most of the time.
+ parseContext.renameShaderFunction((yyvsp[-1].lex).string);
+
+ // Make the function
+ function = new TFunction((yyvsp[-1].lex).string, type);
+ (yyval.interm.function) = function;
+ }
+#line 5221 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 112:
+#line 934 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-1].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-1].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck((yyvsp[-1].interm.type).loc, *(yyvsp[-1].interm.type).arraySizes);
+ }
+ if ((yyvsp[-1].interm.type).basicType == EbtVoid) {
+ parseContext.error((yyvsp[0].lex).loc, "illegal use of type 'void'", (yyvsp[0].lex).string->c_str(), "");
+ }
+ parseContext.reservedErrorCheck((yyvsp[0].lex).loc, *(yyvsp[0].lex).string);
+
+ TParameter param = {(yyvsp[0].lex).string, new TType((yyvsp[-1].interm.type))};
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).param = param;
+ }
+#line 5241 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 113:
+#line 949 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+ }
+ TType* type = new TType((yyvsp[-2].interm.type));
+ type->transferArraySizes((yyvsp[0].interm).arraySizes);
+ type->copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes);
+
+ parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, type->getArraySizes());
+ parseContext.arraySizeRequiredCheck((yyvsp[0].interm).loc, *(yyvsp[0].interm).arraySizes);
+ parseContext.reservedErrorCheck((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string);
+
+ TParameter param = { (yyvsp[-1].lex).string, type };
+
+ (yyval.interm).loc = (yyvsp[-1].lex).loc;
+ (yyval.interm).param = param;
+ }
+#line 5265 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 114:
+#line 974 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone)
+ (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision;
+ parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers);
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type);
+ parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type);
+
+ }
+#line 5281 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 115:
+#line 985 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type);
+ parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type);
+ parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+ }
+#line 5293 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 116:
+#line 995 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone)
+ (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision;
+ parseContext.precisionQualifierCheck((yyvsp[-1].interm.type).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers);
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type);
+ parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type);
+ }
+#line 5308 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 117:
+#line 1005 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type);
+ parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type);
+ parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+ }
+#line 5320 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 118:
+#line 1015 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TParameter param = { 0, new TType((yyvsp[0].interm.type)) };
+ (yyval.interm).param = param;
+ if ((yyvsp[0].interm.type).arraySizes)
+ parseContext.arraySizeRequiredCheck((yyvsp[0].interm.type).loc, *(yyvsp[0].interm.type).arraySizes);
+ }
+#line 5331 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 119:
+#line 1024 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ }
+#line 5339 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 120:
+#line 1027 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-2].interm);
+ parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-2].interm).type);
+ }
+#line 5348 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 121:
+#line 1031 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-3].interm);
+ parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-3].interm).type, (yyvsp[0].interm).arraySizes);
+ }
+#line 5357 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 122:
+#line 1035 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-5].interm).type;
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-5].interm).type, (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-5].interm).intermNode, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5367 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 123:
+#line 1040 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-4].interm).type;
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-4].interm).type, 0, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-4].interm).intermNode, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5377 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 124:
+#line 1048 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[0].interm.type);
+ (yyval.interm).intermNode = 0;
+ parseContext.declareTypeDefaults((yyval.interm).loc, (yyval.interm).type);
+ }
+#line 5387 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 125:
+#line 1053 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-1].interm.type);
+ (yyval.interm).intermNode = 0;
+ parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-1].interm.type));
+ }
+#line 5397 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 126:
+#line 1058 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-2].interm.type);
+ (yyval.interm).intermNode = 0;
+ parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-2].interm.type), (yyvsp[0].interm).arraySizes);
+ }
+#line 5407 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 127:
+#line 1063 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-4].interm.type);
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-4].interm.type), (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5417 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 128:
+#line 1068 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-3].interm.type);
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5427 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 129:
+#line 1077 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+
+ parseContext.globalQualifierTypeCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyval.interm.type));
+ if ((yyvsp[0].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ parseContext.precisionQualifierCheck((yyval.interm.type).loc, (yyval.interm.type).basicType, (yyval.interm.type).qualifier);
+ }
+#line 5443 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 130:
+#line 1088 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier);
+ parseContext.globalQualifierTypeCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, (yyvsp[0].interm.type));
+
+ if ((yyvsp[0].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ if ((yyvsp[0].interm.type).arraySizes && parseContext.arrayQualifierError((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).qualifier))
+ (yyvsp[0].interm.type).arraySizes = nullptr;
+
+ parseContext.checkNoShaderLayouts((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers);
+ (yyvsp[0].interm.type).shaderQualifiers.merge((yyvsp[-1].interm.type).shaderQualifiers);
+ parseContext.mergeQualifiers((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyvsp[-1].interm.type).qualifier, true);
+ parseContext.precisionQualifierCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).basicType, (yyvsp[0].interm.type).qualifier);
+
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+
+ if (! (yyval.interm.type).qualifier.isInterpolation() &&
+ ((parseContext.language == EShLangVertex && (yyval.interm.type).qualifier.storage == EvqVaryingOut) ||
+ (parseContext.language == EShLangFragment && (yyval.interm.type).qualifier.storage == EvqVaryingIn)))
+ (yyval.interm.type).qualifier.smooth = true;
+ }
+#line 5472 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 131:
+#line 1115 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "invariant");
+ parseContext.profileRequires((yyval.interm.type).loc, ENoProfile, 120, 0, "invariant");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.invariant = true;
+ }
+#line 5483 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 132:
+#line 1124 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "smooth");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "smooth");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "smooth");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.smooth = true;
+ }
+#line 5495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 133:
+#line 1131 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "flat");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "flat");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "flat");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.flat = true;
+ }
+#line 5507 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 134:
+#line 1138 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "noperspective");
+#ifdef NV_EXTENSIONS
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective");
+#else
+ parseContext.requireProfile((yyvsp[0].lex).loc, ~EEsProfile, "noperspective");
+#endif
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "noperspective");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.nopersp = true;
+ }
+#line 5523 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 135:
+#line 1149 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "__explicitInterpAMD");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.explicitInterp = true;
+#endif
+ }
+#line 5537 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 136:
+#line 1158 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "pervertexNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.pervertexNV = true;
+#endif
+ }
+#line 5552 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 137:
+#line 1168 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck((yyvsp[0].lex).loc, "perprimitiveNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV");
+ // Fragment shader stage doesn't check for extension. So we explicitly add below extension check.
+ if (parseContext.language == EShLangFragment)
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.perPrimitiveNV = true;
+#endif
+ }
+#line 5569 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 138:
+#line 1180 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck((yyvsp[0].lex).loc, "perviewNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangMeshNV, "perviewNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.perViewNV = true;
+#endif
+ }
+#line 5583 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 139:
+#line 1189 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck((yyvsp[0].lex).loc, "taskNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.perTaskNV = true;
+#endif
+ }
+#line 5597 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 140:
+#line 1201 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-1].interm.type);
+ }
+#line 5605 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 141:
+#line 1207 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5613 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 142:
+#line 1210 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-2].interm.type);
+ (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers);
+ parseContext.mergeObjectLayoutQualifiers((yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false);
+ }
+#line 5623 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 143:
+#line 1217 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), *(yyvsp[0].lex).string);
+ }
+#line 5632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 144:
+#line 1221 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[-2].lex).loc);
+ parseContext.setLayoutQualifier((yyvsp[-2].lex).loc, (yyval.interm.type), *(yyvsp[-2].lex).string, (yyvsp[0].interm.intermTypedNode));
+ }
+#line 5641 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 145:
+#line 1225 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { // because "shared" is both an identifier and a keyword
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ TString strShared("shared");
+ parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), strShared);
+ }
+#line 5651 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 146:
+#line 1233 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyval.interm.type).loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.noContraction = true;
+ }
+#line 5662 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 147:
+#line 1242 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 148:
+#line 1245 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-1].interm.type);
+ if ((yyval.interm.type).basicType == EbtVoid)
+ (yyval.interm.type).basicType = (yyvsp[0].interm.type).basicType;
+
+ (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers);
+ parseContext.mergeQualifiers((yyval.interm.type).loc, (yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false);
+ }
+#line 5683 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 149:
+#line 1256 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5691 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 150:
+#line 1259 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5699 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 151:
+#line 1262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkPrecisionQualifier((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier.precision);
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5708 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 152:
+#line 1266 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // allow inheritance of storage qualifier from block declaration
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5717 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 153:
+#line 1270 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // allow inheritance of storage qualifier from block declaration
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5726 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 154:
+#line 1274 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // allow inheritance of storage qualifier from block declaration
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5735 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 155:
+#line 1278 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5743 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 156:
+#line 1284 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant
+ }
+#line 5752 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 157:
+#line 1288 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangVertex, "attribute");
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "attribute");
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "attribute");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "attribute");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "attribute");
+
+ parseContext.globalCheck((yyvsp[0].lex).loc, "attribute");
+
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqVaryingIn;
+ }
+#line 5769 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 158:
+#line 1300 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "varying");
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "varying");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "varying");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "varying");
+
+ parseContext.globalCheck((yyvsp[0].lex).loc, "varying");
+
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ if (parseContext.language == EShLangVertex)
+ (yyval.interm.type).qualifier.storage = EvqVaryingOut;
+ else
+ (yyval.interm.type).qualifier.storage = EvqVaryingIn;
+ }
+#line 5788 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 159:
+#line 1314 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "inout");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqInOut;
+ }
+#line 5798 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 160:
+#line 1319 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "in");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later
+ (yyval.interm.type).qualifier.storage = EvqIn;
+ }
+#line 5809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 161:
+#line 1325 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "out");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later
+ (yyval.interm.type).qualifier.storage = EvqOut;
+ }
+#line 5820 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 162:
+#line 1331 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 120, 0, "centroid");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "centroid");
+ parseContext.globalCheck((yyvsp[0].lex).loc, "centroid");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.centroid = true;
+ }
+#line 5832 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 163:
+#line 1338 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "patch");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.patch = true;
+ }
+#line 5843 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 164:
+#line 1344 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "sample");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.sample = true;
+ }
+#line 5853 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 165:
+#line 1349 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "uniform");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqUniform;
+ }
+#line 5863 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 166:
+#line 1354 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "buffer");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqBuffer;
+ }
+#line 5873 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 167:
+#line 1359 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectNVMask | EShLangClosestHitNVMask
+ | EShLangAnyHitNVMask), "hitAttributeNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqHitAttrNV;
+#endif
+ }
+#line 5888 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 168:
+#line 1369 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqPayloadNV;
+#endif
+ }
+#line 5903 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 169:
+#line 1379 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadInNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqPayloadInNV;
+#endif
+ }
+#line 5918 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 170:
+#line 1389 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenNVMask |
+ EShLangClosestHitNVMask | EShLangMissNVMask | EShLangCallableNVMask), "callableDataNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqCallableDataNV;
+#endif
+ }
+#line 5933 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 171:
+#line 1399 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableNVMask), "callableDataInNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqCallableDataInNV;
+#endif
+ }
+#line 5947 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 172:
+#line 1408 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "shared");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 310, 0, "shared");
+#ifdef NV_EXTENSIONS
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared");
+#else
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangCompute, "shared");
+#endif
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqShared;
+ }
+#line 5964 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 173:
+#line 1420 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.coherent = true;
+ }
+#line 5973 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 174:
+#line 1424 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent");
+ (yyval.interm.type).qualifier.devicecoherent = true;
+ }
+#line 5983 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 175:
+#line 1429 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent");
+ (yyval.interm.type).qualifier.queuefamilycoherent = true;
+ }
+#line 5993 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 176:
+#line 1434 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent");
+ (yyval.interm.type).qualifier.workgroupcoherent = true;
+ }
+#line 6003 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 177:
+#line 1439 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent");
+ (yyval.interm.type).qualifier.subgroupcoherent = true;
+ }
+#line 6013 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 178:
+#line 1444 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate");
+ (yyval.interm.type).qualifier.nonprivate = true;
+ }
+#line 6023 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 179:
+#line 1449 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.volatil = true;
+ }
+#line 6032 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 180:
+#line 1453 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.restrict = true;
+ }
+#line 6041 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 181:
+#line 1457 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.readonly = true;
+ }
+#line 6050 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 182:
+#line 1461 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.writeonly = true;
+ }
+#line 6059 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 183:
+#line 1465 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.spvRemoved((yyvsp[0].lex).loc, "subroutine");
+ parseContext.globalCheck((yyvsp[0].lex).loc, "subroutine");
+ parseContext.unimplemented((yyvsp[0].lex).loc, "subroutine");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ }
+#line 6070 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 184:
+#line 1471 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.spvRemoved((yyvsp[-3].lex).loc, "subroutine");
+ parseContext.globalCheck((yyvsp[-3].lex).loc, "subroutine");
+ parseContext.unimplemented((yyvsp[-3].lex).loc, "subroutine");
+ (yyval.interm.type).init((yyvsp[-3].lex).loc);
+ }
+#line 6081 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 185:
+#line 1480 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.nonUniform = true;
+ }
+#line 6090 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 186:
+#line 1487 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // TODO
+ }
+#line 6098 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 187:
+#line 1490 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // TODO: 4.0 semantics: subroutines
+ // 1) make sure each identifier is a type declared earlier with SUBROUTINE
+ // 2) save all of the identifiers for future comparison with the declared function
+ }
+#line 6108 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 188:
+#line 1498 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-1].interm.type);
+ (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type));
+ (yyval.interm.type).typeParameters = (yyvsp[0].interm.typeParameters);
+ }
+#line 6118 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 189:
+#line 1503 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayOfArrayVersionCheck((yyvsp[0].interm).loc, (yyvsp[0].interm).arraySizes);
+ (yyval.interm.type) = (yyvsp[-2].interm.type);
+ (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type));
+ (yyval.interm.type).typeParameters = (yyvsp[-1].interm.typeParameters);
+ (yyval.interm.type).arraySizes = (yyvsp[0].interm).arraySizes;
+ }
+#line 6130 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 190:
+#line 1513 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[-1].lex).loc;
+ (yyval.interm).arraySizes = new TArraySizes;
+ (yyval.interm).arraySizes->addInnerSize();
+ }
+#line 6140 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 191:
+#line 1518 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[-2].lex).loc;
+ (yyval.interm).arraySizes = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size");
+ (yyval.interm).arraySizes->addInnerSize(size);
+ }
+#line 6153 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 192:
+#line 1526 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-2].interm);
+ (yyval.interm).arraySizes->addInnerSize();
+ }
+#line 6162 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 193:
+#line 1530 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-3].interm);
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size");
+ (yyval.interm).arraySizes->addInnerSize(size);
+ }
+#line 6174 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 194:
+#line 1540 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = (yyvsp[0].interm.typeParameters);
+ }
+#line 6182 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 195:
+#line 1543 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = 0;
+ }
+#line 6190 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 196:
+#line 1549 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = (yyvsp[-1].interm.typeParameters);
+ }
+#line 6198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 197:
+#line 1555 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter");
+ (yyval.interm.typeParameters)->addInnerSize(size);
+ }
+#line 6210 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 198:
+#line 1562 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = (yyvsp[-2].interm.typeParameters);
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter");
+ (yyval.interm.typeParameters)->addInnerSize(size);
+ }
+#line 6222 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 199:
+#line 1572 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtVoid;
+ }
+#line 6231 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 200:
+#line 1576 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ }
+#line 6240 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 201:
+#line 1580 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ }
+#line 6250 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 202:
+#line 1585 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "float16_t", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ }
+#line 6260 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 203:
+#line 1590 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ }
+#line 6270 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 204:
+#line 1595 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ }
+#line 6280 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 205:
+#line 1600 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ }
+#line 6289 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 206:
+#line 1604 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ }
+#line 6299 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 207:
+#line 1609 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ }
+#line 6309 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 208:
+#line 1614 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ }
+#line 6319 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 209:
+#line 1619 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ }
+#line 6329 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 210:
+#line 1624 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ }
+#line 6339 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 211:
+#line 1629 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ }
+#line 6349 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 212:
+#line 1634 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ }
+#line 6359 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 213:
+#line 1639 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ }
+#line 6369 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 214:
+#line 1644 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ }
+#line 6379 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 215:
+#line 1649 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ }
+#line 6388 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 216:
+#line 1653 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6398 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 217:
+#line 1658 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6408 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 218:
+#line 1663 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6418 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 219:
+#line 1668 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6429 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 220:
+#line 1674 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6440 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 221:
+#line 1680 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6451 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 222:
+#line 1686 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6462 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 223:
+#line 1692 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6473 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 224:
+#line 1698 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6484 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 225:
+#line 1704 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 226:
+#line 1710 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6506 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 227:
+#line 1716 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6517 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 228:
+#line 1722 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 229:
+#line 1728 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6539 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 230:
+#line 1734 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6550 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 231:
+#line 1740 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6560 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 232:
+#line 1745 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6570 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 233:
+#line 1750 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6580 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 234:
+#line 1755 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6590 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 235:
+#line 1760 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6600 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 236:
+#line 1765 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6610 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 237:
+#line 1770 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6621 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 238:
+#line 1776 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 239:
+#line 1782 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6643 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 240:
+#line 1788 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6654 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 241:
+#line 1794 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6665 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 242:
+#line 1800 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 243:
+#line 1806 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6687 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 244:
+#line 1812 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6698 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 245:
+#line 1818 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6709 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 246:
+#line 1824 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6720 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 247:
+#line 1830 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6731 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 248:
+#line 1836 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6742 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 249:
+#line 1842 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6753 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 250:
+#line 1848 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6764 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 251:
+#line 1854 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6775 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 252:
+#line 1860 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6786 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 253:
+#line 1866 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6797 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 254:
+#line 1872 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6808 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 255:
+#line 1878 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6819 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 256:
+#line 1884 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6830 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 257:
+#line 1890 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6841 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 258:
+#line 1896 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6852 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 259:
+#line 1902 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6863 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 260:
+#line 1908 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6874 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 261:
+#line 1914 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 262:
+#line 1920 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6896 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 263:
+#line 1926 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6907 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 264:
+#line 1932 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 6917 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 265:
+#line 1937 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 6927 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 266:
+#line 1942 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 6937 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 267:
+#line 1947 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 6947 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 268:
+#line 1952 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 6957 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 269:
+#line 1957 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 6967 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 270:
+#line 1962 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 6977 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 271:
+#line 1967 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 6987 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 272:
+#line 1972 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 6997 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 273:
+#line 1977 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7007 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 274:
+#line 1982 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7017 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 275:
+#line 1987 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7027 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 276:
+#line 1992 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7038 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 277:
+#line 1998 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7049 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 278:
+#line 2004 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7060 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 279:
+#line 2010 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7071 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 280:
+#line 2016 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7082 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 281:
+#line 2022 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7093 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 282:
+#line 2028 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7104 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 283:
+#line 2034 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7115 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 284:
+#line 2040 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7126 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 285:
+#line 2046 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7137 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 286:
+#line 2052 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7148 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 287:
+#line 2058 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7159 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 288:
+#line 2064 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7170 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 289:
+#line 2070 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7181 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 290:
+#line 2076 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7192 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 291:
+#line 2082 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7203 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 292:
+#line 2088 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7214 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 293:
+#line 2094 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7225 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 294:
+#line 2100 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7236 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 295:
+#line 2106 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7247 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 296:
+#line 2112 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7258 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 297:
+#line 2118 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7269 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 298:
+#line 2124 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7280 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 299:
+#line 2130 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7291 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 300:
+#line 2136 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7302 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 301:
+#line 2142 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7313 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 302:
+#line 2148 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 303:
+#line 2154 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7335 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 304:
+#line 2160 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7346 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 305:
+#line 2166 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7357 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 306:
+#line 2172 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7368 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 307:
+#line 2178 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7379 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 308:
+#line 2184 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7390 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 309:
+#line 2190 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7401 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 310:
+#line 2196 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7412 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 311:
+#line 2202 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7423 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 312:
+#line 2208 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7434 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 313:
+#line 2214 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7445 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 314:
+#line 2220 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7456 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 315:
+#line 2226 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7467 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 316:
+#line 2232 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7478 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 317:
+#line 2238 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7489 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 318:
+#line 2244 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7500 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 319:
+#line 2250 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7511 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 320:
+#line 2256 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7522 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 321:
+#line 2262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7533 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 322:
+#line 2268 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7544 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 323:
+#line 2274 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7555 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 324:
+#line 2280 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtAccStructNV;
+#endif
+ }
+#line 7566 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 325:
+#line 2286 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.vulkanRemoved((yyvsp[0].lex).loc, "atomic counter types");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtAtomicUint;
+ }
+#line 7576 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 326:
+#line 2291 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D);
+ }
+#line 7586 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 327:
+#line 2296 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D);
+ }
+#line 7596 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 328:
+#line 2301 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd3D);
+ }
+#line 7606 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 329:
+#line 2306 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube);
+ }
+#line 7616 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 330:
+#line 2311 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D, false, true);
+ }
+#line 7626 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 331:
+#line 2316 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, true);
+ }
+#line 7636 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 332:
+#line 2321 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube, false, true);
+ }
+#line 7646 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 333:
+#line 2326 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true);
+ }
+#line 7656 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 334:
+#line 2331 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true);
+ }
+#line 7666 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 335:
+#line 2336 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true, true);
+ }
+#line 7676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 336:
+#line 2341 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, true);
+ }
+#line 7686 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 337:
+#line 2346 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true);
+ }
+#line 7696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 338:
+#line 2351 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true, true);
+ }
+#line 7706 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 339:
+#line 2356 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D);
+#endif
+ }
+#line 7719 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 340:
+#line 2364 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D);
+#endif
+ }
+#line 7732 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 341:
+#line 2372 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd3D);
+#endif
+ }
+#line 7745 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 342:
+#line 2380 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube);
+#endif
+ }
+#line 7758 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 343:
+#line 2388 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, false, true);
+#endif
+ }
+#line 7771 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 344:
+#line 2396 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, true);
+#endif
+ }
+#line 7784 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 345:
+#line 2404 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, false, true);
+#endif
+ }
+#line 7797 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 346:
+#line 2412 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true);
+#endif
+ }
+#line 7810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 347:
+#line 2420 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true);
+#endif
+ }
+#line 7823 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 348:
+#line 2428 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true, true);
+#endif
+ }
+#line 7836 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 349:
+#line 2436 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, true);
+#endif
+ }
+#line 7849 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 350:
+#line 2444 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true);
+#endif
+ }
+#line 7862 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 351:
+#line 2452 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true, true);
+#endif
+ }
+#line 7875 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 352:
+#line 2460 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd1D);
+ }
+#line 7885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 353:
+#line 2465 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D);
+ }
+#line 7895 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 354:
+#line 2470 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd3D);
+ }
+#line 7905 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 355:
+#line 2475 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdCube);
+ }
+#line 7915 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 356:
+#line 2480 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd1D, true);
+ }
+#line 7925 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 357:
+#line 2485 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D, true);
+ }
+#line 7935 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 358:
+#line 2490 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdCube, true);
+ }
+#line 7945 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 359:
+#line 2495 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd1D);
+ }
+#line 7955 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 360:
+#line 2500 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D);
+ }
+#line 7965 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 361:
+#line 2505 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd3D);
+ }
+#line 7975 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 362:
+#line 2510 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdCube);
+ }
+#line 7985 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 363:
+#line 2515 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd1D, true);
+ }
+#line 7995 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 364:
+#line 2520 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D, true);
+ }
+#line 8005 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 365:
+#line 2525 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdCube, true);
+ }
+#line 8015 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 366:
+#line 2530 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdRect);
+ }
+#line 8025 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 367:
+#line 2535 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdRect, false, true);
+ }
+#line 8035 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 368:
+#line 2540 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdRect);
+#endif
+ }
+#line 8048 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 369:
+#line 2548 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdRect, false, true);
+#endif
+ }
+#line 8061 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 370:
+#line 2556 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdRect);
+ }
+#line 8071 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 371:
+#line 2561 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdRect);
+ }
+#line 8081 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 372:
+#line 2566 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdBuffer);
+ }
+#line 8091 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 373:
+#line 2571 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdBuffer);
+#endif
+ }
+#line 8104 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 374:
+#line 2579 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdBuffer);
+ }
+#line 8114 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 375:
+#line 2584 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdBuffer);
+ }
+#line 8124 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 376:
+#line 2589 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, false, true);
+ }
+#line 8134 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 377:
+#line 2594 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+#line 8147 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 378:
+#line 2602 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D, false, false, true);
+ }
+#line 8157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 379:
+#line 2607 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D, false, false, true);
+ }
+#line 8167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 380:
+#line 2612 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, false, true);
+ }
+#line 8177 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 381:
+#line 2617 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+#line 8190 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 382:
+#line 2625 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D, true, false, true);
+ }
+#line 8200 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 383:
+#line 2630 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D, true, false, true);
+ }
+#line 8210 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 384:
+#line 2635 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setPureSampler(false);
+ }
+#line 8220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 385:
+#line 2640 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setPureSampler(true);
+ }
+#line 8230 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 386:
+#line 2645 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D);
+ }
+#line 8240 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 387:
+#line 2650 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D);
+#endif
+ }
+#line 8253 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 388:
+#line 2658 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D);
+ }
+#line 8263 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 389:
+#line 2663 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D);
+#endif
+ }
+#line 8276 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 390:
+#line 2671 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd3D);
+ }
+#line 8286 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 391:
+#line 2676 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd3D);
+#endif
+ }
+#line 8299 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 392:
+#line 2684 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube);
+ }
+#line 8309 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 393:
+#line 2689 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube);
+#endif
+ }
+#line 8322 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 394:
+#line 2697 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D, true);
+ }
+#line 8332 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 395:
+#line 2702 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D, true);
+#endif
+ }
+#line 8345 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 396:
+#line 2710 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true);
+ }
+#line 8355 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 397:
+#line 2715 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true);
+#endif
+ }
+#line 8368 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 398:
+#line 2723 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube, true);
+ }
+#line 8378 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 399:
+#line 2728 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube, true);
+#endif
+ }
+#line 8391 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 400:
+#line 2736 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D);
+ }
+#line 8401 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 401:
+#line 2741 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D);
+ }
+#line 8411 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 402:
+#line 2746 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd3D);
+ }
+#line 8421 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 403:
+#line 2751 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube);
+ }
+#line 8431 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 404:
+#line 2756 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D, true);
+ }
+#line 8441 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 405:
+#line 2761 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true);
+ }
+#line 8451 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 406:
+#line 2766 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube, true);
+ }
+#line 8461 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 407:
+#line 2771 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D);
+ }
+#line 8471 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 408:
+#line 2776 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D);
+ }
+#line 8481 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 409:
+#line 2781 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd3D);
+ }
+#line 8491 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 410:
+#line 2786 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube);
+ }
+#line 8501 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 411:
+#line 2791 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D, true);
+ }
+#line 8511 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 412:
+#line 2796 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true);
+ }
+#line 8521 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 413:
+#line 2801 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube, true);
+ }
+#line 8531 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 414:
+#line 2806 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdRect);
+ }
+#line 8541 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 415:
+#line 2811 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdRect);
+#endif
+ }
+#line 8554 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 416:
+#line 2819 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdRect);
+ }
+#line 8564 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 417:
+#line 2824 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdRect);
+ }
+#line 8574 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 418:
+#line 2829 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdBuffer);
+ }
+#line 8584 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 419:
+#line 2834 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdBuffer);
+#endif
+ }
+#line 8597 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 420:
+#line 2842 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdBuffer);
+ }
+#line 8607 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 421:
+#line 2847 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdBuffer);
+ }
+#line 8617 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 422:
+#line 2852 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, false, false, true);
+ }
+#line 8627 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 423:
+#line 2857 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+#line 8640 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 424:
+#line 2865 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, false, false, true);
+ }
+#line 8650 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 425:
+#line 2870 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, false, false, true);
+ }
+#line 8660 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 426:
+#line 2875 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true, false, true);
+ }
+#line 8670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 427:
+#line 2880 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+#line 8683 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 428:
+#line 2888 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true, false, true);
+ }
+#line 8693 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 429:
+#line 2893 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true, false, true);
+ }
+#line 8703 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 430:
+#line 2898 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D);
+ }
+#line 8713 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 431:
+#line 2903 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D);
+#endif
+ }
+#line 8726 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 432:
+#line 2911 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd1D);
+ }
+#line 8736 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 433:
+#line 2916 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd1D);
+ }
+#line 8746 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 434:
+#line 2921 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D);
+ }
+#line 8756 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 435:
+#line 2926 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D);
+#endif
+ }
+#line 8769 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 436:
+#line 2934 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D);
+ }
+#line 8779 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 437:
+#line 2939 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D);
+ }
+#line 8789 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 438:
+#line 2944 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd3D);
+ }
+#line 8799 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 439:
+#line 2949 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd3D);
+#endif
+ }
+#line 8812 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 440:
+#line 2957 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd3D);
+ }
+#line 8822 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 441:
+#line 2962 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd3D);
+ }
+#line 8832 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 442:
+#line 2967 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdRect);
+ }
+#line 8842 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 443:
+#line 2972 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdRect);
+#endif
+ }
+#line 8855 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 444:
+#line 2980 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdRect);
+ }
+#line 8865 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 445:
+#line 2985 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdRect);
+ }
+#line 8875 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 446:
+#line 2990 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube);
+ }
+#line 8885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 447:
+#line 2995 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube);
+#endif
+ }
+#line 8898 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 448:
+#line 3003 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdCube);
+ }
+#line 8908 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 449:
+#line 3008 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdCube);
+ }
+#line 8918 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 450:
+#line 3013 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdBuffer);
+ }
+#line 8928 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 451:
+#line 3018 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdBuffer);
+#endif
+ }
+#line 8941 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 452:
+#line 3026 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdBuffer);
+ }
+#line 8951 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 453:
+#line 3031 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdBuffer);
+ }
+#line 8961 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 454:
+#line 3036 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D, true);
+ }
+#line 8971 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 455:
+#line 3041 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D, true);
+#endif
+ }
+#line 8984 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 456:
+#line 3049 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd1D, true);
+ }
+#line 8994 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 457:
+#line 3054 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd1D, true);
+ }
+#line 9004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 458:
+#line 3059 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true);
+ }
+#line 9014 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 459:
+#line 3064 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true);
+#endif
+ }
+#line 9027 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 460:
+#line 3072 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true);
+ }
+#line 9037 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 461:
+#line 3077 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true);
+ }
+#line 9047 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 462:
+#line 3082 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube, true);
+ }
+#line 9057 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 463:
+#line 3087 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube, true);
+#endif
+ }
+#line 9070 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 464:
+#line 3095 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdCube, true);
+ }
+#line 9080 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 465:
+#line 3100 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdCube, true);
+ }
+#line 9090 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 466:
+#line 3105 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, false, false, true);
+ }
+#line 9100 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 467:
+#line 3110 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+#line 9113 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 468:
+#line 3118 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, false, false, true);
+ }
+#line 9123 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 469:
+#line 3123 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, false, false, true);
+ }
+#line 9133 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 470:
+#line 3128 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true, false, true);
+ }
+#line 9143 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 471:
+#line 3133 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+#line 9156 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 472:
+#line 3141 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true, false, true);
+ }
+#line 9166 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 473:
+#line 3146 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true, false, true);
+ }
+#line 9176 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 474:
+#line 3151 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { // GL_OES_EGL_image_external
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D);
+ (yyval.interm.type).sampler.external = true;
+ }
+#line 9187 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 475:
+#line 3157 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { // GL_EXT_YUV_target
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D);
+ (yyval.interm.type).sampler.yuv = true;
+ }
+#line 9198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 476:
+#line 3163 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat);
+ }
+#line 9209 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 477:
+#line 3169 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat, true);
+ }
+#line 9220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 478:
+#line 3175 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat16);
+#endif
+ }
+#line 9234 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 479:
+#line 3184 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat16, true);
+#endif
+ }
+#line 9248 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 480:
+#line 3193 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtInt);
+ }
+#line 9259 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 481:
+#line 3199 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtInt, true);
+ }
+#line 9270 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 482:
+#line 3205 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtUint);
+ }
+#line 9281 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 483:
+#line 3211 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtUint, true);
+ }
+#line 9292 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 484:
+#line 3217 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fcoopmatCheck((yyvsp[0].lex).loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).coopmat = true;
+ }
+#line 9303 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 485:
+#line 3223 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ (yyval.interm.type).qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary;
+ parseContext.structTypeCheck((yyval.interm.type).loc, (yyval.interm.type));
+ }
+#line 9313 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 486:
+#line 3228 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ //
+ // This is for user defined type names. The lexical phase looked up the
+ // type.
+ //
+ if (const TVariable* variable = ((yyvsp[0].lex).symbol)->getAsVariable()) {
+ const TType& structure = variable->getType();
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtStruct;
+ (yyval.interm.type).userDef = &structure;
+ } else
+ parseContext.error((yyvsp[0].lex).loc, "expected type name", (yyvsp[0].lex).string->c_str(), "");
+ }
+#line 9331 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 487:
+#line 3244 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "highp precision qualifier");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqHigh);
+ }
+#line 9341 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 488:
+#line 3249 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "mediump precision qualifier");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqMedium);
+ }
+#line 9351 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 489:
+#line 3254 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "lowp precision qualifier");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqLow);
+ }
+#line 9361 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 490:
+#line 3262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { parseContext.nestedStructCheck((yyvsp[-2].lex).loc); }
+#line 9367 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 491:
+#line 3262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TType* structure = new TType((yyvsp[-1].interm.typeList), *(yyvsp[-4].lex).string);
+ parseContext.structArrayCheck((yyvsp[-4].lex).loc, *structure);
+ TVariable* userTypeDef = new TVariable((yyvsp[-4].lex).string, *structure, true);
+ if (! parseContext.symbolTable.insert(*userTypeDef))
+ parseContext.error((yyvsp[-4].lex).loc, "redefinition", (yyvsp[-4].lex).string->c_str(), "struct");
+ (yyval.interm.type).init((yyvsp[-5].lex).loc);
+ (yyval.interm.type).basicType = EbtStruct;
+ (yyval.interm.type).userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+#line 9383 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 492:
+#line 3273 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { parseContext.nestedStructCheck((yyvsp[-1].lex).loc); }
+#line 9389 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 493:
+#line 3273 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TType* structure = new TType((yyvsp[-1].interm.typeList), TString(""));
+ (yyval.interm.type).init((yyvsp[-4].lex).loc);
+ (yyval.interm.type).basicType = EbtStruct;
+ (yyval.interm.type).userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+#line 9401 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 494:
+#line 3283 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList) = (yyvsp[0].interm.typeList);
+ }
+#line 9409 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 495:
+#line 3286 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList) = (yyvsp[-1].interm.typeList);
+ for (unsigned int i = 0; i < (yyvsp[0].interm.typeList)->size(); ++i) {
+ for (unsigned int j = 0; j < (yyval.interm.typeList)->size(); ++j) {
+ if ((*(yyval.interm.typeList))[j].type->getFieldName() == (*(yyvsp[0].interm.typeList))[i].type->getFieldName())
+ parseContext.error((*(yyvsp[0].interm.typeList))[i].loc, "duplicate member name:", "", (*(yyvsp[0].interm.typeList))[i].type->getFieldName().c_str());
+ }
+ (yyval.interm.typeList)->push_back((*(yyvsp[0].interm.typeList))[i]);
+ }
+ }
+#line 9424 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 496:
+#line 3299 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+ }
+
+ (yyval.interm.typeList) = (yyvsp[-1].interm.typeList);
+
+ parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType);
+ parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier);
+
+ for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) {
+ TType type((yyvsp[-2].interm.type));
+ type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName());
+ type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes());
+ type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes());
+ (*(yyval.interm.typeList))[i].type->shallowCopy(type);
+ }
+ }
+#line 9451 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 497:
+#line 3321 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+ }
+
+ (yyval.interm.typeList) = (yyvsp[-1].interm.typeList);
+
+ parseContext.memberQualifierCheck((yyvsp[-3].interm.type));
+ parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType);
+ parseContext.mergeQualifiers((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, (yyvsp[-3].interm.type).qualifier, true);
+ parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier);
+
+ for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) {
+ TType type((yyvsp[-2].interm.type));
+ type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName());
+ type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes());
+ type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes());
+ (*(yyval.interm.typeList))[i].type->shallowCopy(type);
+ }
+ }
+#line 9480 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 498:
+#line 3348 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList) = new TTypeList;
+ (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine));
+ }
+#line 9489 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 499:
+#line 3352 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine));
+ }
+#line 9497 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 500:
+#line 3358 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeLine).type = new TType(EbtVoid);
+ (yyval.interm.typeLine).loc = (yyvsp[0].lex).loc;
+ (yyval.interm.typeLine).type->setFieldName(*(yyvsp[0].lex).string);
+ }
+#line 9507 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 501:
+#line 3363 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, (yyvsp[0].interm).arraySizes);
+
+ (yyval.interm.typeLine).type = new TType(EbtVoid);
+ (yyval.interm.typeLine).loc = (yyvsp[-1].lex).loc;
+ (yyval.interm.typeLine).type->setFieldName(*(yyvsp[-1].lex).string);
+ (yyval.interm.typeLine).type->transferArraySizes((yyvsp[0].interm).arraySizes);
+ }
+#line 9520 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 502:
+#line 3374 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 9528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 503:
+#line 3377 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile((yyvsp[-2].lex).loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires((yyvsp[-2].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode);
+ }
+#line 9539 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 504:
+#line 3383 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile((yyvsp[-3].lex).loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires((yyvsp[-3].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 9550 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 505:
+#line 3392 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate(0, (yyvsp[0].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)->getLoc());
+ }
+#line 9558 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 506:
+#line 3395 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ }
+#line 9566 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 507:
+#line 3401 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9572 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 508:
+#line 3405 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9578 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 509:
+#line 3406 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9584 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 510:
+#line 3412 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9590 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 511:
+#line 3413 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9596 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 512:
+#line 3414 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9602 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 513:
+#line 3415 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9608 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 514:
+#line 3416 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9614 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 515:
+#line 3417 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9620 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 516:
+#line 3418 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9626 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 517:
+#line 3422 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = 0; }
+#line 9632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 518:
+#line 3423 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ }
+#line 9641 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 519:
+#line 3427 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ }
+#line 9650 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 520:
+#line 3431 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.intermNode) && (yyvsp[-2].interm.intermNode)->getAsAggregate())
+ (yyvsp[-2].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence);
+ (yyval.interm.intermNode) = (yyvsp[-2].interm.intermNode);
+ }
+#line 9660 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 521:
+#line 3439 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9666 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 522:
+#line 3440 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9672 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 523:
+#line 3444 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9680 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 524:
+#line 3447 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ --parseContext.controlFlowNestingLevel;
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9689 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 525:
+#line 3451 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9699 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 526:
+#line 3456 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9710 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 527:
+#line 3465 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ }
+#line 9718 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 528:
+#line 3468 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm.intermNode) && (yyvsp[-1].interm.intermNode)->getAsAggregate())
+ (yyvsp[-1].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence);
+ (yyval.interm.intermNode) = (yyvsp[-1].interm.intermNode);
+ }
+#line 9728 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 529:
+#line 3476 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[0].interm.intermNode));
+ if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase ||
+ (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence(0, (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case
+ }
+ }
+#line 9741 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 530:
+#line 3484 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase ||
+ (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence((yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0, (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case
+ } else
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode));
+ }
+#line 9754 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 531:
+#line 3495 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = 0; }
+#line 9760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 532:
+#line 3496 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = static_cast<TIntermNode*>((yyvsp[-1].interm.intermTypedNode)); }
+#line 9766 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 533:
+#line 3500 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9774 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 534:
+#line 3503 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleSelectionAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9783 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 535:
+#line 3509 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-2].interm.intermTypedNode));
+ (yyval.interm.intermNode) = parseContext.intermediate.addSelection((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.nodePair), (yyvsp[-4].lex).loc);
+ }
+#line 9792 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 536:
+#line 3516 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermNode);
+ (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermNode);
+ }
+#line 9801 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 537:
+#line 3520 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[0].interm.intermNode);
+ (yyval.interm.nodePair).node2 = 0;
+ }
+#line 9810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 538:
+#line 3528 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ parseContext.boolCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode));
+ }
+#line 9819 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 539:
+#line 3532 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.boolCheck((yyvsp[-2].lex).loc, (yyvsp[-3].interm.type));
+
+ TType type((yyvsp[-3].interm.type));
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode));
+ if (initNode)
+ (yyval.interm.intermTypedNode) = initNode->getAsTyped();
+ else
+ (yyval.interm.intermTypedNode) = 0;
+ }
+#line 9834 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 540:
+#line 3545 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9842 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 541:
+#line 3548 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleSwitchAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9851 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 542:
+#line 3554 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // start new switch sequence on the switch stack
+ ++parseContext.controlFlowNestingLevel;
+ ++parseContext.statementNestingLevel;
+ parseContext.switchSequenceStack.push_back(new TIntermSequence);
+ parseContext.switchLevel.push_back(parseContext.statementNestingLevel);
+ parseContext.symbolTable.push();
+ }
+#line 9864 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 543:
+#line 3562 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.addSwitch((yyvsp[-7].lex).loc, (yyvsp[-5].interm.intermTypedNode), (yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0);
+ delete parseContext.switchSequenceStack.back();
+ parseContext.switchSequenceStack.pop_back();
+ parseContext.switchLevel.pop_back();
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 9878 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 544:
+#line 3574 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ }
+#line 9886 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 545:
+#line 3577 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9894 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 546:
+#line 3583 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error((yyvsp[-2].lex).loc, "cannot appear outside switch statement", "case", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error((yyvsp[-2].lex).loc, "cannot be nested inside control flow", "case", "");
+ else {
+ parseContext.constantValueCheck((yyvsp[-1].interm.intermTypedNode), "case");
+ parseContext.integerCheck((yyvsp[-1].interm.intermTypedNode), "case");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpCase, (yyvsp[-1].interm.intermTypedNode), (yyvsp[-2].lex).loc);
+ }
+ }
+#line 9911 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 547:
+#line 3595 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error((yyvsp[-1].lex).loc, "cannot appear outside switch statement", "default", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error((yyvsp[-1].lex).loc, "cannot be nested inside control flow", "default", "");
+ else
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDefault, (yyvsp[-1].lex).loc);
+ }
+#line 9925 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 548:
+#line 3607 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9933 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 549:
+#line 3610 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleLoopAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 550:
+#line 3616 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error((yyvsp[-1].lex).loc, "while loops not available", "limitation", "");
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9955 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 551:
+#line 3624 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, true, (yyvsp[-5].lex).loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 9967 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 552:
+#line 3631 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9977 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 553:
+#line 3636 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error((yyvsp[-7].lex).loc, "do-while loops not available", "limitation", "");
+
+ parseContext.boolCheck((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode));
+
+ (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[-5].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, false, (yyvsp[-4].lex).loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 9993 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 554:
+#line 3647 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 10004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 555:
+#line 3653 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[-3].interm.intermNode), (yyvsp[-5].lex).loc);
+ TIntermLoop* forLoop = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), reinterpret_cast<TIntermTyped*>((yyvsp[-2].interm.nodePair).node1), reinterpret_cast<TIntermTyped*>((yyvsp[-2].interm.nodePair).node2), true, (yyvsp[-6].lex).loc);
+ if (! parseContext.limits.nonInductiveForLoops)
+ parseContext.inductiveLoopCheck((yyvsp[-6].lex).loc, (yyvsp[-3].interm.intermNode), forLoop);
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyval.interm.intermNode), forLoop, (yyvsp[-6].lex).loc);
+ (yyval.interm.intermNode)->getAsAggregate()->setOperator(EOpSequence);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 10021 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 556:
+#line 3668 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10029 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 557:
+#line 3671 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10037 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 558:
+#line 3677 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 10045 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 559:
+#line 3680 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = 0;
+ }
+#line 10053 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 560:
+#line 3686 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[-1].interm.intermTypedNode);
+ (yyval.interm.nodePair).node2 = 0;
+ }
+#line 10062 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 561:
+#line 3690 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermTypedNode);
+ (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 10071 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 562:
+#line 3697 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (parseContext.loopNestingLevel <= 0)
+ parseContext.error((yyvsp[-1].lex).loc, "continue statement only allowed in loops", "", "");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpContinue, (yyvsp[-1].lex).loc);
+ }
+#line 10081 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 563:
+#line 3702 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0)
+ parseContext.error((yyvsp[-1].lex).loc, "break statement only allowed in switch and loops", "", "");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpBreak, (yyvsp[-1].lex).loc);
+ }
+#line 10091 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 564:
+#line 3707 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpReturn, (yyvsp[-1].lex).loc);
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid)
+ parseContext.error((yyvsp[-1].lex).loc, "non-void function must return a value", "return", "");
+ if (parseContext.inMain)
+ parseContext.postEntryPointReturn = true;
+ }
+#line 10103 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 565:
+#line 3714 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.handleReturnValue((yyvsp[-2].lex).loc, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 10111 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 566:
+#line 3717 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "discard");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpKill, (yyvsp[-1].lex).loc);
+ }
+#line 10120 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 567:
+#line 3726 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ parseContext.intermediate.setTreeRoot((yyval.interm.intermNode));
+ }
+#line 10129 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 568:
+#line 3730 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[0].interm.intermNode) != nullptr) {
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode));
+ parseContext.intermediate.setTreeRoot((yyval.interm.intermNode));
+ }
+ }
+#line 10140 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 569:
+#line 3739 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10148 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 570:
+#line 3742 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10156 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 571:
+#line 3745 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireProfile((yyvsp[0].lex).loc, ~EEsProfile, "extraneous semicolon");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ~EEsProfile, 460, nullptr, "extraneous semicolon");
+ (yyval.interm.intermNode) = nullptr;
+ }
+#line 10166 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 572:
+#line 3753 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyvsp[0].interm).function = parseContext.handleFunctionDeclarator((yyvsp[0].interm).loc, *(yyvsp[0].interm).function, false /* not prototype */);
+ (yyvsp[0].interm).intermNode = parseContext.handleFunctionDefinition((yyvsp[0].interm).loc, *(yyvsp[0].interm).function);
+ }
+#line 10175 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 573:
+#line 3757 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // May be best done as post process phase on intermediate code
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue)
+ parseContext.error((yyvsp[-2].interm).loc, "function does not return a value:", "", (yyvsp[-2].interm).function->getName().c_str());
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermNode));
+ parseContext.intermediate.setAggregateOperator((yyval.interm.intermNode), EOpFunction, (yyvsp[-2].interm).function->getType(), (yyvsp[-2].interm).loc);
+ (yyval.interm.intermNode)->getAsAggregate()->setName((yyvsp[-2].interm).function->getMangledName().c_str());
+
+ // store the pragma information for debug and optimize and other vendor specific
+ // information. This information can be queried from the parse tree
+ (yyval.interm.intermNode)->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize);
+ (yyval.interm.intermNode)->getAsAggregate()->setDebug(parseContext.contextPragma.debug);
+ (yyval.interm.intermNode)->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable);
+ }
+#line 10195 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 574:
+#line 3775 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = (yyvsp[-2].interm.attributes);
+ parseContext.requireExtensions((yyvsp[-4].lex).loc, 1, &E_GL_EXT_control_flow_attributes, "attribute");
+ }
+#line 10204 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 575:
+#line 3781 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = (yyvsp[0].interm.attributes);
+ }
+#line 10212 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 576:
+#line 3784 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = parseContext.mergeAttributes((yyvsp[-2].interm.attributes), (yyvsp[0].interm.attributes));
+ }
+#line 10220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 577:
+#line 3789 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[0].lex).string);
+ }
+#line 10228 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 578:
+#line 3792 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[-3].lex).string, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 10236 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+
+#line 10240 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ default: break;
+ }
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+ /* Now 'shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
+ if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTOKENS];
+
+ goto yynewstate;
+
+
+/*--------------------------------------.
+| yyerrlab -- here on detecting error. |
+`--------------------------------------*/
+yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (pParseContext, YY_("syntax error"));
+#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
+ {
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (pParseContext, yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
+ }
+# undef YYSYNTAX_ERROR
+#endif
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval, pParseContext);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+
+ /* Pacify compilers like GCC when the user code never invokes
+ YYERROR and the label yyerrorlab therefore never appears in user
+ code. */
+ if (/*CONSTCOND*/ 0)
+ goto yyerrorlab;
+
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (!yypact_value_is_default (yyn))
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp, pParseContext);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+#if !defined yyoverflow || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (pParseContext, YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+yyreturn:
+ if (yychar != YYEMPTY)
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval, pParseContext);
+ }
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp, pParseContext);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ return yyresult;
+}
+#line 3796 "MachineIndependent/glslang.y" /* yacc.c:1906 */
+
diff --git a/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h
new file mode 100644
index 0000000000..a467db644b
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h
@@ -0,0 +1,509 @@
+/* A Bison parser, made by GNU Bison 3.0.4. */
+
+/* Bison interface for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int yydebug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ ATTRIBUTE = 258,
+ VARYING = 259,
+ FLOAT16_T = 260,
+ FLOAT = 261,
+ FLOAT32_T = 262,
+ DOUBLE = 263,
+ FLOAT64_T = 264,
+ CONST = 265,
+ BOOL = 266,
+ INT = 267,
+ UINT = 268,
+ INT64_T = 269,
+ UINT64_T = 270,
+ INT32_T = 271,
+ UINT32_T = 272,
+ INT16_T = 273,
+ UINT16_T = 274,
+ INT8_T = 275,
+ UINT8_T = 276,
+ BREAK = 277,
+ CONTINUE = 278,
+ DO = 279,
+ ELSE = 280,
+ FOR = 281,
+ IF = 282,
+ DISCARD = 283,
+ RETURN = 284,
+ SWITCH = 285,
+ CASE = 286,
+ DEFAULT = 287,
+ SUBROUTINE = 288,
+ BVEC2 = 289,
+ BVEC3 = 290,
+ BVEC4 = 291,
+ IVEC2 = 292,
+ IVEC3 = 293,
+ IVEC4 = 294,
+ UVEC2 = 295,
+ UVEC3 = 296,
+ UVEC4 = 297,
+ I64VEC2 = 298,
+ I64VEC3 = 299,
+ I64VEC4 = 300,
+ U64VEC2 = 301,
+ U64VEC3 = 302,
+ U64VEC4 = 303,
+ I32VEC2 = 304,
+ I32VEC3 = 305,
+ I32VEC4 = 306,
+ U32VEC2 = 307,
+ U32VEC3 = 308,
+ U32VEC4 = 309,
+ I16VEC2 = 310,
+ I16VEC3 = 311,
+ I16VEC4 = 312,
+ U16VEC2 = 313,
+ U16VEC3 = 314,
+ U16VEC4 = 315,
+ I8VEC2 = 316,
+ I8VEC3 = 317,
+ I8VEC4 = 318,
+ U8VEC2 = 319,
+ U8VEC3 = 320,
+ U8VEC4 = 321,
+ VEC2 = 322,
+ VEC3 = 323,
+ VEC4 = 324,
+ MAT2 = 325,
+ MAT3 = 326,
+ MAT4 = 327,
+ CENTROID = 328,
+ IN = 329,
+ OUT = 330,
+ INOUT = 331,
+ UNIFORM = 332,
+ PATCH = 333,
+ SAMPLE = 334,
+ BUFFER = 335,
+ SHARED = 336,
+ NONUNIFORM = 337,
+ PAYLOADNV = 338,
+ PAYLOADINNV = 339,
+ HITATTRNV = 340,
+ CALLDATANV = 341,
+ CALLDATAINNV = 342,
+ COHERENT = 343,
+ VOLATILE = 344,
+ RESTRICT = 345,
+ READONLY = 346,
+ WRITEONLY = 347,
+ DEVICECOHERENT = 348,
+ QUEUEFAMILYCOHERENT = 349,
+ WORKGROUPCOHERENT = 350,
+ SUBGROUPCOHERENT = 351,
+ NONPRIVATE = 352,
+ DVEC2 = 353,
+ DVEC3 = 354,
+ DVEC4 = 355,
+ DMAT2 = 356,
+ DMAT3 = 357,
+ DMAT4 = 358,
+ F16VEC2 = 359,
+ F16VEC3 = 360,
+ F16VEC4 = 361,
+ F16MAT2 = 362,
+ F16MAT3 = 363,
+ F16MAT4 = 364,
+ F32VEC2 = 365,
+ F32VEC3 = 366,
+ F32VEC4 = 367,
+ F32MAT2 = 368,
+ F32MAT3 = 369,
+ F32MAT4 = 370,
+ F64VEC2 = 371,
+ F64VEC3 = 372,
+ F64VEC4 = 373,
+ F64MAT2 = 374,
+ F64MAT3 = 375,
+ F64MAT4 = 376,
+ NOPERSPECTIVE = 377,
+ FLAT = 378,
+ SMOOTH = 379,
+ LAYOUT = 380,
+ EXPLICITINTERPAMD = 381,
+ PERVERTEXNV = 382,
+ PERPRIMITIVENV = 383,
+ PERVIEWNV = 384,
+ PERTASKNV = 385,
+ MAT2X2 = 386,
+ MAT2X3 = 387,
+ MAT2X4 = 388,
+ MAT3X2 = 389,
+ MAT3X3 = 390,
+ MAT3X4 = 391,
+ MAT4X2 = 392,
+ MAT4X3 = 393,
+ MAT4X4 = 394,
+ DMAT2X2 = 395,
+ DMAT2X3 = 396,
+ DMAT2X4 = 397,
+ DMAT3X2 = 398,
+ DMAT3X3 = 399,
+ DMAT3X4 = 400,
+ DMAT4X2 = 401,
+ DMAT4X3 = 402,
+ DMAT4X4 = 403,
+ F16MAT2X2 = 404,
+ F16MAT2X3 = 405,
+ F16MAT2X4 = 406,
+ F16MAT3X2 = 407,
+ F16MAT3X3 = 408,
+ F16MAT3X4 = 409,
+ F16MAT4X2 = 410,
+ F16MAT4X3 = 411,
+ F16MAT4X4 = 412,
+ F32MAT2X2 = 413,
+ F32MAT2X3 = 414,
+ F32MAT2X4 = 415,
+ F32MAT3X2 = 416,
+ F32MAT3X3 = 417,
+ F32MAT3X4 = 418,
+ F32MAT4X2 = 419,
+ F32MAT4X3 = 420,
+ F32MAT4X4 = 421,
+ F64MAT2X2 = 422,
+ F64MAT2X3 = 423,
+ F64MAT2X4 = 424,
+ F64MAT3X2 = 425,
+ F64MAT3X3 = 426,
+ F64MAT3X4 = 427,
+ F64MAT4X2 = 428,
+ F64MAT4X3 = 429,
+ F64MAT4X4 = 430,
+ ATOMIC_UINT = 431,
+ ACCSTRUCTNV = 432,
+ FCOOPMATNV = 433,
+ SAMPLER1D = 434,
+ SAMPLER2D = 435,
+ SAMPLER3D = 436,
+ SAMPLERCUBE = 437,
+ SAMPLER1DSHADOW = 438,
+ SAMPLER2DSHADOW = 439,
+ SAMPLERCUBESHADOW = 440,
+ SAMPLER1DARRAY = 441,
+ SAMPLER2DARRAY = 442,
+ SAMPLER1DARRAYSHADOW = 443,
+ SAMPLER2DARRAYSHADOW = 444,
+ ISAMPLER1D = 445,
+ ISAMPLER2D = 446,
+ ISAMPLER3D = 447,
+ ISAMPLERCUBE = 448,
+ ISAMPLER1DARRAY = 449,
+ ISAMPLER2DARRAY = 450,
+ USAMPLER1D = 451,
+ USAMPLER2D = 452,
+ USAMPLER3D = 453,
+ USAMPLERCUBE = 454,
+ USAMPLER1DARRAY = 455,
+ USAMPLER2DARRAY = 456,
+ SAMPLER2DRECT = 457,
+ SAMPLER2DRECTSHADOW = 458,
+ ISAMPLER2DRECT = 459,
+ USAMPLER2DRECT = 460,
+ SAMPLERBUFFER = 461,
+ ISAMPLERBUFFER = 462,
+ USAMPLERBUFFER = 463,
+ SAMPLERCUBEARRAY = 464,
+ SAMPLERCUBEARRAYSHADOW = 465,
+ ISAMPLERCUBEARRAY = 466,
+ USAMPLERCUBEARRAY = 467,
+ SAMPLER2DMS = 468,
+ ISAMPLER2DMS = 469,
+ USAMPLER2DMS = 470,
+ SAMPLER2DMSARRAY = 471,
+ ISAMPLER2DMSARRAY = 472,
+ USAMPLER2DMSARRAY = 473,
+ SAMPLEREXTERNALOES = 474,
+ SAMPLEREXTERNAL2DY2YEXT = 475,
+ F16SAMPLER1D = 476,
+ F16SAMPLER2D = 477,
+ F16SAMPLER3D = 478,
+ F16SAMPLER2DRECT = 479,
+ F16SAMPLERCUBE = 480,
+ F16SAMPLER1DARRAY = 481,
+ F16SAMPLER2DARRAY = 482,
+ F16SAMPLERCUBEARRAY = 483,
+ F16SAMPLERBUFFER = 484,
+ F16SAMPLER2DMS = 485,
+ F16SAMPLER2DMSARRAY = 486,
+ F16SAMPLER1DSHADOW = 487,
+ F16SAMPLER2DSHADOW = 488,
+ F16SAMPLER1DARRAYSHADOW = 489,
+ F16SAMPLER2DARRAYSHADOW = 490,
+ F16SAMPLER2DRECTSHADOW = 491,
+ F16SAMPLERCUBESHADOW = 492,
+ F16SAMPLERCUBEARRAYSHADOW = 493,
+ SAMPLER = 494,
+ SAMPLERSHADOW = 495,
+ TEXTURE1D = 496,
+ TEXTURE2D = 497,
+ TEXTURE3D = 498,
+ TEXTURECUBE = 499,
+ TEXTURE1DARRAY = 500,
+ TEXTURE2DARRAY = 501,
+ ITEXTURE1D = 502,
+ ITEXTURE2D = 503,
+ ITEXTURE3D = 504,
+ ITEXTURECUBE = 505,
+ ITEXTURE1DARRAY = 506,
+ ITEXTURE2DARRAY = 507,
+ UTEXTURE1D = 508,
+ UTEXTURE2D = 509,
+ UTEXTURE3D = 510,
+ UTEXTURECUBE = 511,
+ UTEXTURE1DARRAY = 512,
+ UTEXTURE2DARRAY = 513,
+ TEXTURE2DRECT = 514,
+ ITEXTURE2DRECT = 515,
+ UTEXTURE2DRECT = 516,
+ TEXTUREBUFFER = 517,
+ ITEXTUREBUFFER = 518,
+ UTEXTUREBUFFER = 519,
+ TEXTURECUBEARRAY = 520,
+ ITEXTURECUBEARRAY = 521,
+ UTEXTURECUBEARRAY = 522,
+ TEXTURE2DMS = 523,
+ ITEXTURE2DMS = 524,
+ UTEXTURE2DMS = 525,
+ TEXTURE2DMSARRAY = 526,
+ ITEXTURE2DMSARRAY = 527,
+ UTEXTURE2DMSARRAY = 528,
+ F16TEXTURE1D = 529,
+ F16TEXTURE2D = 530,
+ F16TEXTURE3D = 531,
+ F16TEXTURE2DRECT = 532,
+ F16TEXTURECUBE = 533,
+ F16TEXTURE1DARRAY = 534,
+ F16TEXTURE2DARRAY = 535,
+ F16TEXTURECUBEARRAY = 536,
+ F16TEXTUREBUFFER = 537,
+ F16TEXTURE2DMS = 538,
+ F16TEXTURE2DMSARRAY = 539,
+ SUBPASSINPUT = 540,
+ SUBPASSINPUTMS = 541,
+ ISUBPASSINPUT = 542,
+ ISUBPASSINPUTMS = 543,
+ USUBPASSINPUT = 544,
+ USUBPASSINPUTMS = 545,
+ F16SUBPASSINPUT = 546,
+ F16SUBPASSINPUTMS = 547,
+ IMAGE1D = 548,
+ IIMAGE1D = 549,
+ UIMAGE1D = 550,
+ IMAGE2D = 551,
+ IIMAGE2D = 552,
+ UIMAGE2D = 553,
+ IMAGE3D = 554,
+ IIMAGE3D = 555,
+ UIMAGE3D = 556,
+ IMAGE2DRECT = 557,
+ IIMAGE2DRECT = 558,
+ UIMAGE2DRECT = 559,
+ IMAGECUBE = 560,
+ IIMAGECUBE = 561,
+ UIMAGECUBE = 562,
+ IMAGEBUFFER = 563,
+ IIMAGEBUFFER = 564,
+ UIMAGEBUFFER = 565,
+ IMAGE1DARRAY = 566,
+ IIMAGE1DARRAY = 567,
+ UIMAGE1DARRAY = 568,
+ IMAGE2DARRAY = 569,
+ IIMAGE2DARRAY = 570,
+ UIMAGE2DARRAY = 571,
+ IMAGECUBEARRAY = 572,
+ IIMAGECUBEARRAY = 573,
+ UIMAGECUBEARRAY = 574,
+ IMAGE2DMS = 575,
+ IIMAGE2DMS = 576,
+ UIMAGE2DMS = 577,
+ IMAGE2DMSARRAY = 578,
+ IIMAGE2DMSARRAY = 579,
+ UIMAGE2DMSARRAY = 580,
+ F16IMAGE1D = 581,
+ F16IMAGE2D = 582,
+ F16IMAGE3D = 583,
+ F16IMAGE2DRECT = 584,
+ F16IMAGECUBE = 585,
+ F16IMAGE1DARRAY = 586,
+ F16IMAGE2DARRAY = 587,
+ F16IMAGECUBEARRAY = 588,
+ F16IMAGEBUFFER = 589,
+ F16IMAGE2DMS = 590,
+ F16IMAGE2DMSARRAY = 591,
+ STRUCT = 592,
+ VOID = 593,
+ WHILE = 594,
+ IDENTIFIER = 595,
+ TYPE_NAME = 596,
+ FLOATCONSTANT = 597,
+ DOUBLECONSTANT = 598,
+ INT16CONSTANT = 599,
+ UINT16CONSTANT = 600,
+ INT32CONSTANT = 601,
+ UINT32CONSTANT = 602,
+ INTCONSTANT = 603,
+ UINTCONSTANT = 604,
+ INT64CONSTANT = 605,
+ UINT64CONSTANT = 606,
+ BOOLCONSTANT = 607,
+ FLOAT16CONSTANT = 608,
+ LEFT_OP = 609,
+ RIGHT_OP = 610,
+ INC_OP = 611,
+ DEC_OP = 612,
+ LE_OP = 613,
+ GE_OP = 614,
+ EQ_OP = 615,
+ NE_OP = 616,
+ AND_OP = 617,
+ OR_OP = 618,
+ XOR_OP = 619,
+ MUL_ASSIGN = 620,
+ DIV_ASSIGN = 621,
+ ADD_ASSIGN = 622,
+ MOD_ASSIGN = 623,
+ LEFT_ASSIGN = 624,
+ RIGHT_ASSIGN = 625,
+ AND_ASSIGN = 626,
+ XOR_ASSIGN = 627,
+ OR_ASSIGN = 628,
+ SUB_ASSIGN = 629,
+ LEFT_PAREN = 630,
+ RIGHT_PAREN = 631,
+ LEFT_BRACKET = 632,
+ RIGHT_BRACKET = 633,
+ LEFT_BRACE = 634,
+ RIGHT_BRACE = 635,
+ DOT = 636,
+ COMMA = 637,
+ COLON = 638,
+ EQUAL = 639,
+ SEMICOLON = 640,
+ BANG = 641,
+ DASH = 642,
+ TILDE = 643,
+ PLUS = 644,
+ STAR = 645,
+ SLASH = 646,
+ PERCENT = 647,
+ LEFT_ANGLE = 648,
+ RIGHT_ANGLE = 649,
+ VERTICAL_BAR = 650,
+ CARET = 651,
+ AMPERSAND = 652,
+ QUESTION = 653,
+ INVARIANT = 654,
+ PRECISE = 655,
+ HIGH_PRECISION = 656,
+ MEDIUM_PRECISION = 657,
+ LOW_PRECISION = 658,
+ PRECISION = 659,
+ PACKED = 660,
+ RESOURCE = 661,
+ SUPERP = 662
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+
+union YYSTYPE
+{
+#line 71 "MachineIndependent/glslang.y" /* yacc.c:1909 */
+
+ struct {
+ glslang::TSourceLoc loc;
+ union {
+ glslang::TString *string;
+ int i;
+ unsigned int u;
+ long long i64;
+ unsigned long long u64;
+ bool b;
+ double d;
+ };
+ glslang::TSymbol* symbol;
+ } lex;
+ struct {
+ glslang::TSourceLoc loc;
+ glslang::TOperator op;
+ union {
+ TIntermNode* intermNode;
+ glslang::TIntermNodePair nodePair;
+ glslang::TIntermTyped* intermTypedNode;
+ glslang::TAttributes* attributes;
+ };
+ union {
+ glslang::TPublicType type;
+ glslang::TFunction* function;
+ glslang::TParameter param;
+ glslang::TTypeLoc typeLine;
+ glslang::TTypeList* typeList;
+ glslang::TArraySizes* arraySizes;
+ glslang::TIdentifierList* identifierList;
+ };
+ glslang::TArraySizes* typeParameters;
+ } interm;
+
+#line 498 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
+};
+
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+
+int yyparse (glslang::TParseContext* pParseContext);
+
+#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */
diff --git a/thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp b/thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp
new file mode 100644
index 0000000000..5e2eed16ed
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/intermOut.cpp
@@ -0,0 +1,1519 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "localintermediate.h"
+#include "../Include/InfoSink.h"
+
+#ifdef _MSC_VER
+#include <cfloat>
+#else
+#include <cmath>
+#endif
+#include <cstdint>
+
+namespace {
+
+bool IsInfinity(double x) {
+#ifdef _MSC_VER
+ switch (_fpclass(x)) {
+ case _FPCLASS_NINF:
+ case _FPCLASS_PINF:
+ return true;
+ default:
+ return false;
+ }
+#else
+ return std::isinf(x);
+#endif
+}
+
+bool IsNan(double x) {
+#ifdef _MSC_VER
+ switch (_fpclass(x)) {
+ case _FPCLASS_SNAN:
+ case _FPCLASS_QNAN:
+ return true;
+ default:
+ return false;
+ }
+#else
+ return std::isnan(x);
+#endif
+}
+
+}
+
+namespace glslang {
+
+//
+// Two purposes:
+// 1. Show an example of how to iterate tree. Functions can
+// also directly call Traverse() on children themselves to
+// have finer grained control over the process than shown here.
+// See the last function for how to get started.
+// 2. Print out a text based description of the tree.
+//
+
+//
+// Use this class to carry along data from node to node in
+// the traversal
+//
+class TOutputTraverser : public TIntermTraverser {
+public:
+ TOutputTraverser(TInfoSink& i) : infoSink(i), extraOutput(NoExtraOutput) { }
+
+ enum EExtraOutput {
+ NoExtraOutput,
+ BinaryDoubleOutput
+ };
+ void setDoubleOutput(EExtraOutput extra) { extraOutput = extra; }
+
+ virtual bool visitBinary(TVisit, TIntermBinary* node);
+ virtual bool visitUnary(TVisit, TIntermUnary* node);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+ virtual bool visitSelection(TVisit, TIntermSelection* node);
+ virtual void visitConstantUnion(TIntermConstantUnion* node);
+ virtual void visitSymbol(TIntermSymbol* node);
+ virtual bool visitLoop(TVisit, TIntermLoop* node);
+ virtual bool visitBranch(TVisit, TIntermBranch* node);
+ virtual bool visitSwitch(TVisit, TIntermSwitch* node);
+
+ TInfoSink& infoSink;
+protected:
+ TOutputTraverser(TOutputTraverser&);
+ TOutputTraverser& operator=(TOutputTraverser&);
+
+ EExtraOutput extraOutput;
+};
+
+//
+// Helper functions for printing, not part of traversing.
+//
+
+static void OutputTreeText(TInfoSink& infoSink, const TIntermNode* node, const int depth)
+{
+ int i;
+
+ infoSink.debug << node->getLoc().string << ":";
+ if (node->getLoc().line)
+ infoSink.debug << node->getLoc().line;
+ else
+ infoSink.debug << "? ";
+
+ for (i = 0; i < depth; ++i)
+ infoSink.debug << " ";
+}
+
+//
+// The rest of the file are the traversal functions. The last one
+// is the one that starts the traversal.
+//
+// Return true from interior nodes to have the external traversal
+// continue on to children. If you process children yourself,
+// return false.
+//
+
+bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getOp()) {
+ case EOpAssign: out.debug << "move second child to first child"; break;
+ case EOpAddAssign: out.debug << "add second child into first child"; break;
+ case EOpSubAssign: out.debug << "subtract second child into first child"; break;
+ case EOpMulAssign: out.debug << "multiply second child into first child"; break;
+ case EOpVectorTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break;
+ case EOpVectorTimesScalarAssign: out.debug << "vector scale second child into first child"; break;
+ case EOpMatrixTimesScalarAssign: out.debug << "matrix scale second child into first child"; break;
+ case EOpMatrixTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break;
+ case EOpDivAssign: out.debug << "divide second child into first child"; break;
+ case EOpModAssign: out.debug << "mod second child into first child"; break;
+ case EOpAndAssign: out.debug << "and second child into first child"; break;
+ case EOpInclusiveOrAssign: out.debug << "or second child into first child"; break;
+ case EOpExclusiveOrAssign: out.debug << "exclusive or second child into first child"; break;
+ case EOpLeftShiftAssign: out.debug << "left shift second child into first child"; break;
+ case EOpRightShiftAssign: out.debug << "right shift second child into first child"; break;
+
+ case EOpIndexDirect: out.debug << "direct index"; break;
+ case EOpIndexIndirect: out.debug << "indirect index"; break;
+ case EOpIndexDirectStruct:
+ {
+ bool reference = node->getLeft()->getType().getBasicType() == EbtReference;
+ const TTypeList *members = reference ? node->getLeft()->getType().getReferentType()->getStruct() : node->getLeft()->getType().getStruct();
+ out.debug << (*members)[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName();
+ out.debug << ": direct index for structure"; break;
+ }
+ case EOpVectorSwizzle: out.debug << "vector swizzle"; break;
+ case EOpMatrixSwizzle: out.debug << "matrix swizzle"; break;
+
+ case EOpAdd: out.debug << "add"; break;
+ case EOpSub: out.debug << "subtract"; break;
+ case EOpMul: out.debug << "component-wise multiply"; break;
+ case EOpDiv: out.debug << "divide"; break;
+ case EOpMod: out.debug << "mod"; break;
+ case EOpRightShift: out.debug << "right-shift"; break;
+ case EOpLeftShift: out.debug << "left-shift"; break;
+ case EOpAnd: out.debug << "bitwise and"; break;
+ case EOpInclusiveOr: out.debug << "inclusive-or"; break;
+ case EOpExclusiveOr: out.debug << "exclusive-or"; break;
+ case EOpEqual: out.debug << "Compare Equal"; break;
+ case EOpNotEqual: out.debug << "Compare Not Equal"; break;
+ case EOpLessThan: out.debug << "Compare Less Than"; break;
+ case EOpGreaterThan: out.debug << "Compare Greater Than"; break;
+ case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break;
+ case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break;
+ case EOpVectorEqual: out.debug << "Equal"; break;
+ case EOpVectorNotEqual: out.debug << "NotEqual"; break;
+
+ case EOpVectorTimesScalar: out.debug << "vector-scale"; break;
+ case EOpVectorTimesMatrix: out.debug << "vector-times-matrix"; break;
+ case EOpMatrixTimesVector: out.debug << "matrix-times-vector"; break;
+ case EOpMatrixTimesScalar: out.debug << "matrix-scale"; break;
+ case EOpMatrixTimesMatrix: out.debug << "matrix-multiply"; break;
+
+ case EOpLogicalOr: out.debug << "logical-or"; break;
+ case EOpLogicalXor: out.debug << "logical-xor"; break;
+ case EOpLogicalAnd: out.debug << "logical-and"; break;
+
+ default: out.debug << "<unknown op>";
+ }
+
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ out.debug << "\n";
+
+ return true;
+}
+
+bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getOp()) {
+ case EOpNegative: out.debug << "Negate value"; break;
+ case EOpVectorLogicalNot:
+ case EOpLogicalNot: out.debug << "Negate conditional"; break;
+ case EOpBitwiseNot: out.debug << "Bitwise not"; break;
+
+ case EOpPostIncrement: out.debug << "Post-Increment"; break;
+ case EOpPostDecrement: out.debug << "Post-Decrement"; break;
+ case EOpPreIncrement: out.debug << "Pre-Increment"; break;
+ case EOpPreDecrement: out.debug << "Pre-Decrement"; break;
+ case EOpCopyObject: out.debug << "copy object"; break;
+
+ // * -> bool
+ case EOpConvInt8ToBool: out.debug << "Convert int8_t to bool"; break;
+ case EOpConvUint8ToBool: out.debug << "Convert uint8_t to bool"; break;
+ case EOpConvInt16ToBool: out.debug << "Convert int16_t to bool"; break;
+ case EOpConvUint16ToBool: out.debug << "Convert uint16_t to bool";break;
+ case EOpConvIntToBool: out.debug << "Convert int to bool"; break;
+ case EOpConvUintToBool: out.debug << "Convert uint to bool"; break;
+ case EOpConvInt64ToBool: out.debug << "Convert int64 to bool"; break;
+ case EOpConvUint64ToBool: out.debug << "Convert uint64 to bool"; break;
+ case EOpConvFloat16ToBool: out.debug << "Convert float16_t to bool"; break;
+ case EOpConvFloatToBool: out.debug << "Convert float to bool"; break;
+ case EOpConvDoubleToBool: out.debug << "Convert double to bool"; break;
+
+ // bool -> *
+ case EOpConvBoolToInt8: out.debug << "Convert bool to int8_t"; break;
+ case EOpConvBoolToUint8: out.debug << "Convert bool to uint8_t"; break;
+ case EOpConvBoolToInt16: out.debug << "Convert bool to in16t_t"; break;
+ case EOpConvBoolToUint16: out.debug << "Convert bool to uint16_t";break;
+ case EOpConvBoolToInt: out.debug << "Convert bool to int" ; break;
+ case EOpConvBoolToUint: out.debug << "Convert bool to uint"; break;
+ case EOpConvBoolToInt64: out.debug << "Convert bool to int64"; break;
+ case EOpConvBoolToUint64: out.debug << "Convert bool to uint64";break;
+ case EOpConvBoolToFloat16: out.debug << "Convert bool to float16_t"; break;
+ case EOpConvBoolToFloat: out.debug << "Convert bool to float"; break;
+ case EOpConvBoolToDouble: out.debug << "Convert bool to double"; break;
+
+ // int8_t -> (u)int*
+ case EOpConvInt8ToInt16: out.debug << "Convert int8_t to int16_t";break;
+ case EOpConvInt8ToInt: out.debug << "Convert int8_t to int"; break;
+ case EOpConvInt8ToInt64: out.debug << "Convert int8_t to int64"; break;
+ case EOpConvInt8ToUint8: out.debug << "Convert int8_t to uint8_t";break;
+ case EOpConvInt8ToUint16: out.debug << "Convert int8_t to uint16_t";break;
+ case EOpConvInt8ToUint: out.debug << "Convert int8_t to uint"; break;
+ case EOpConvInt8ToUint64: out.debug << "Convert int8_t to uint64"; break;
+
+ // uint8_t -> (u)int*
+ case EOpConvUint8ToInt8: out.debug << "Convert uint8_t to int8_t";break;
+ case EOpConvUint8ToInt16: out.debug << "Convert uint8_t to int16_t";break;
+ case EOpConvUint8ToInt: out.debug << "Convert uint8_t to int"; break;
+ case EOpConvUint8ToInt64: out.debug << "Convert uint8_t to int64"; break;
+ case EOpConvUint8ToUint16: out.debug << "Convert uint8_t to uint16_t";break;
+ case EOpConvUint8ToUint: out.debug << "Convert uint8_t to uint"; break;
+ case EOpConvUint8ToUint64: out.debug << "Convert uint8_t to uint64"; break;
+
+ // int8_t -> float*
+ case EOpConvInt8ToFloat16: out.debug << "Convert int8_t to float16_t";break;
+ case EOpConvInt8ToFloat: out.debug << "Convert int8_t to float"; break;
+ case EOpConvInt8ToDouble: out.debug << "Convert int8_t to double"; break;
+
+ // uint8_t -> float*
+ case EOpConvUint8ToFloat16: out.debug << "Convert uint8_t to float16_t";break;
+ case EOpConvUint8ToFloat: out.debug << "Convert uint8_t to float"; break;
+ case EOpConvUint8ToDouble: out.debug << "Convert uint8_t to double"; break;
+
+ // int16_t -> (u)int*
+ case EOpConvInt16ToInt8: out.debug << "Convert int16_t to int8_t";break;
+ case EOpConvInt16ToInt: out.debug << "Convert int16_t to int"; break;
+ case EOpConvInt16ToInt64: out.debug << "Convert int16_t to int64"; break;
+ case EOpConvInt16ToUint8: out.debug << "Convert int16_t to uint8_t";break;
+ case EOpConvInt16ToUint16: out.debug << "Convert int16_t to uint16_t";break;
+ case EOpConvInt16ToUint: out.debug << "Convert int16_t to uint"; break;
+ case EOpConvInt16ToUint64: out.debug << "Convert int16_t to uint64"; break;
+
+ // int16_t -> float*
+ case EOpConvInt16ToFloat16: out.debug << "Convert int16_t to float16_t";break;
+ case EOpConvInt16ToFloat: out.debug << "Convert int16_t to float"; break;
+ case EOpConvInt16ToDouble: out.debug << "Convert int16_t to double"; break;
+
+ // uint16_t -> (u)int*
+ case EOpConvUint16ToInt8: out.debug << "Convert uint16_t to int8_t";break;
+ case EOpConvUint16ToInt16: out.debug << "Convert uint16_t to int16_t";break;
+ case EOpConvUint16ToInt: out.debug << "Convert uint16_t to int"; break;
+ case EOpConvUint16ToInt64: out.debug << "Convert uint16_t to int64"; break;
+ case EOpConvUint16ToUint8: out.debug << "Convert uint16_t to uint8_t";break;
+ case EOpConvUint16ToUint: out.debug << "Convert uint16_t to uint"; break;
+ case EOpConvUint16ToUint64: out.debug << "Convert uint16_t to uint64"; break;
+
+ // uint16_t -> float*
+ case EOpConvUint16ToFloat16: out.debug << "Convert uint16_t to float16_t";break;
+ case EOpConvUint16ToFloat: out.debug << "Convert uint16_t to float"; break;
+ case EOpConvUint16ToDouble: out.debug << "Convert uint16_t to double"; break;
+
+ // int32_t -> (u)int*
+ case EOpConvIntToInt8: out.debug << "Convert int to int8_t";break;
+ case EOpConvIntToInt16: out.debug << "Convert int to int16_t";break;
+ case EOpConvIntToInt64: out.debug << "Convert int to int64"; break;
+ case EOpConvIntToUint8: out.debug << "Convert int to uint8_t";break;
+ case EOpConvIntToUint16: out.debug << "Convert int to uint16_t";break;
+ case EOpConvIntToUint: out.debug << "Convert int to uint"; break;
+ case EOpConvIntToUint64: out.debug << "Convert int to uint64"; break;
+
+ // int32_t -> float*
+ case EOpConvIntToFloat16: out.debug << "Convert int to float16_t";break;
+ case EOpConvIntToFloat: out.debug << "Convert int to float"; break;
+ case EOpConvIntToDouble: out.debug << "Convert int to double"; break;
+
+ // uint32_t -> (u)int*
+ case EOpConvUintToInt8: out.debug << "Convert uint to int8_t";break;
+ case EOpConvUintToInt16: out.debug << "Convert uint to int16_t";break;
+ case EOpConvUintToInt: out.debug << "Convert uint to int";break;
+ case EOpConvUintToInt64: out.debug << "Convert uint to int64"; break;
+ case EOpConvUintToUint8: out.debug << "Convert uint to uint8_t";break;
+ case EOpConvUintToUint16: out.debug << "Convert uint to uint16_t";break;
+ case EOpConvUintToUint64: out.debug << "Convert uint to uint64"; break;
+
+ // uint32_t -> float*
+ case EOpConvUintToFloat16: out.debug << "Convert uint to float16_t";break;
+ case EOpConvUintToFloat: out.debug << "Convert uint to float"; break;
+ case EOpConvUintToDouble: out.debug << "Convert uint to double"; break;
+
+ // int64 -> (u)int*
+ case EOpConvInt64ToInt8: out.debug << "Convert int64 to int8_t"; break;
+ case EOpConvInt64ToInt16: out.debug << "Convert int64 to int16_t"; break;
+ case EOpConvInt64ToInt: out.debug << "Convert int64 to int"; break;
+ case EOpConvInt64ToUint8: out.debug << "Convert int64 to uint8_t";break;
+ case EOpConvInt64ToUint16: out.debug << "Convert int64 to uint16_t";break;
+ case EOpConvInt64ToUint: out.debug << "Convert int64 to uint"; break;
+ case EOpConvInt64ToUint64: out.debug << "Convert int64 to uint64"; break;
+
+ // int64 -> float*
+ case EOpConvInt64ToFloat16: out.debug << "Convert int64 to float16_t";break;
+ case EOpConvInt64ToFloat: out.debug << "Convert int64 to float"; break;
+ case EOpConvInt64ToDouble: out.debug << "Convert int64 to double"; break;
+
+ // uint64 -> (u)int*
+ case EOpConvUint64ToInt8: out.debug << "Convert uint64 to int8_t";break;
+ case EOpConvUint64ToInt16: out.debug << "Convert uint64 to int16_t";break;
+ case EOpConvUint64ToInt: out.debug << "Convert uint64 to int"; break;
+ case EOpConvUint64ToInt64: out.debug << "Convert uint64 to int64"; break;
+ case EOpConvUint64ToUint8: out.debug << "Convert uint64 to uint8_t";break;
+ case EOpConvUint64ToUint16: out.debug << "Convert uint64 to uint16"; break;
+ case EOpConvUint64ToUint: out.debug << "Convert uint64 to uint"; break;
+
+ // uint64 -> float*
+ case EOpConvUint64ToFloat16: out.debug << "Convert uint64 to float16_t";break;
+ case EOpConvUint64ToFloat: out.debug << "Convert uint64 to float"; break;
+ case EOpConvUint64ToDouble: out.debug << "Convert uint64 to double"; break;
+
+ // float16_t -> int*
+ case EOpConvFloat16ToInt8: out.debug << "Convert float16_t to int8_t"; break;
+ case EOpConvFloat16ToInt16: out.debug << "Convert float16_t to int16_t"; break;
+ case EOpConvFloat16ToInt: out.debug << "Convert float16_t to int"; break;
+ case EOpConvFloat16ToInt64: out.debug << "Convert float16_t to int64"; break;
+
+ // float16_t -> uint*
+ case EOpConvFloat16ToUint8: out.debug << "Convert float16_t to uint8_t"; break;
+ case EOpConvFloat16ToUint16: out.debug << "Convert float16_t to uint16_t"; break;
+ case EOpConvFloat16ToUint: out.debug << "Convert float16_t to uint"; break;
+ case EOpConvFloat16ToUint64: out.debug << "Convert float16_t to uint64"; break;
+
+ // float16_t -> float*
+ case EOpConvFloat16ToFloat: out.debug << "Convert float16_t to float"; break;
+ case EOpConvFloat16ToDouble: out.debug << "Convert float16_t to double"; break;
+
+ // float32 -> float*
+ case EOpConvFloatToFloat16: out.debug << "Convert float to float16_t"; break;
+ case EOpConvFloatToDouble: out.debug << "Convert float to double"; break;
+
+ // float32_t -> int*
+ case EOpConvFloatToInt8: out.debug << "Convert float to int8_t"; break;
+ case EOpConvFloatToInt16: out.debug << "Convert float to int16_t"; break;
+ case EOpConvFloatToInt: out.debug << "Convert float to int"; break;
+ case EOpConvFloatToInt64: out.debug << "Convert float to int64"; break;
+
+ // float32_t -> uint*
+ case EOpConvFloatToUint8: out.debug << "Convert float to uint8_t"; break;
+ case EOpConvFloatToUint16: out.debug << "Convert float to uint16_t"; break;
+ case EOpConvFloatToUint: out.debug << "Convert float to uint"; break;
+ case EOpConvFloatToUint64: out.debug << "Convert float to uint64"; break;
+
+ // double -> float*
+ case EOpConvDoubleToFloat16: out.debug << "Convert double to float16_t"; break;
+ case EOpConvDoubleToFloat: out.debug << "Convert double to float"; break;
+
+ // double -> int*
+ case EOpConvDoubleToInt8: out.debug << "Convert double to int8_t"; break;
+ case EOpConvDoubleToInt16: out.debug << "Convert double to int16_t"; break;
+ case EOpConvDoubleToInt: out.debug << "Convert double to int"; break;
+ case EOpConvDoubleToInt64: out.debug << "Convert double to int64"; break;
+
+ // float32_t -> uint*
+ case EOpConvDoubleToUint8: out.debug << "Convert double to uint8_t"; break;
+ case EOpConvDoubleToUint16: out.debug << "Convert double to uint16_t"; break;
+ case EOpConvDoubleToUint: out.debug << "Convert double to uint"; break;
+ case EOpConvDoubleToUint64: out.debug << "Convert double to uint64"; break;
+
+ case EOpConvUint64ToPtr: out.debug << "Convert uint64_t to pointer"; break;
+ case EOpConvPtrToUint64: out.debug << "Convert pointer to uint64_t"; break;
+
+ case EOpRadians: out.debug << "radians"; break;
+ case EOpDegrees: out.debug << "degrees"; break;
+ case EOpSin: out.debug << "sine"; break;
+ case EOpCos: out.debug << "cosine"; break;
+ case EOpTan: out.debug << "tangent"; break;
+ case EOpAsin: out.debug << "arc sine"; break;
+ case EOpAcos: out.debug << "arc cosine"; break;
+ case EOpAtan: out.debug << "arc tangent"; break;
+ case EOpSinh: out.debug << "hyp. sine"; break;
+ case EOpCosh: out.debug << "hyp. cosine"; break;
+ case EOpTanh: out.debug << "hyp. tangent"; break;
+ case EOpAsinh: out.debug << "arc hyp. sine"; break;
+ case EOpAcosh: out.debug << "arc hyp. cosine"; break;
+ case EOpAtanh: out.debug << "arc hyp. tangent"; break;
+
+ case EOpExp: out.debug << "exp"; break;
+ case EOpLog: out.debug << "log"; break;
+ case EOpExp2: out.debug << "exp2"; break;
+ case EOpLog2: out.debug << "log2"; break;
+ case EOpSqrt: out.debug << "sqrt"; break;
+ case EOpInverseSqrt: out.debug << "inverse sqrt"; break;
+
+ case EOpAbs: out.debug << "Absolute value"; break;
+ case EOpSign: out.debug << "Sign"; break;
+ case EOpFloor: out.debug << "Floor"; break;
+ case EOpTrunc: out.debug << "trunc"; break;
+ case EOpRound: out.debug << "round"; break;
+ case EOpRoundEven: out.debug << "roundEven"; break;
+ case EOpCeil: out.debug << "Ceiling"; break;
+ case EOpFract: out.debug << "Fraction"; break;
+
+ case EOpIsNan: out.debug << "isnan"; break;
+ case EOpIsInf: out.debug << "isinf"; break;
+
+ case EOpFloatBitsToInt: out.debug << "floatBitsToInt"; break;
+ case EOpFloatBitsToUint:out.debug << "floatBitsToUint"; break;
+ case EOpIntBitsToFloat: out.debug << "intBitsToFloat"; break;
+ case EOpUintBitsToFloat:out.debug << "uintBitsToFloat"; break;
+ case EOpDoubleBitsToInt64: out.debug << "doubleBitsToInt64"; break;
+ case EOpDoubleBitsToUint64: out.debug << "doubleBitsToUint64"; break;
+ case EOpInt64BitsToDouble: out.debug << "int64BitsToDouble"; break;
+ case EOpUint64BitsToDouble: out.debug << "uint64BitsToDouble"; break;
+ case EOpFloat16BitsToInt16: out.debug << "float16BitsToInt16"; break;
+ case EOpFloat16BitsToUint16: out.debug << "float16BitsToUint16"; break;
+ case EOpInt16BitsToFloat16: out.debug << "int16BitsToFloat16"; break;
+ case EOpUint16BitsToFloat16: out.debug << "uint16BitsToFloat16"; break;
+
+ case EOpPackSnorm2x16: out.debug << "packSnorm2x16"; break;
+ case EOpUnpackSnorm2x16:out.debug << "unpackSnorm2x16"; break;
+ case EOpPackUnorm2x16: out.debug << "packUnorm2x16"; break;
+ case EOpUnpackUnorm2x16:out.debug << "unpackUnorm2x16"; break;
+ case EOpPackHalf2x16: out.debug << "packHalf2x16"; break;
+ case EOpUnpackHalf2x16: out.debug << "unpackHalf2x16"; break;
+ case EOpPack16: out.debug << "pack16"; break;
+ case EOpPack32: out.debug << "pack32"; break;
+ case EOpPack64: out.debug << "pack64"; break;
+ case EOpUnpack32: out.debug << "unpack32"; break;
+ case EOpUnpack16: out.debug << "unpack16"; break;
+ case EOpUnpack8: out.debug << "unpack8"; break;
+
+ case EOpPackSnorm4x8: out.debug << "PackSnorm4x8"; break;
+ case EOpUnpackSnorm4x8: out.debug << "UnpackSnorm4x8"; break;
+ case EOpPackUnorm4x8: out.debug << "PackUnorm4x8"; break;
+ case EOpUnpackUnorm4x8: out.debug << "UnpackUnorm4x8"; break;
+ case EOpPackDouble2x32: out.debug << "PackDouble2x32"; break;
+ case EOpUnpackDouble2x32: out.debug << "UnpackDouble2x32"; break;
+
+ case EOpPackInt2x32: out.debug << "packInt2x32"; break;
+ case EOpUnpackInt2x32: out.debug << "unpackInt2x32"; break;
+ case EOpPackUint2x32: out.debug << "packUint2x32"; break;
+ case EOpUnpackUint2x32: out.debug << "unpackUint2x32"; break;
+
+ case EOpPackInt2x16: out.debug << "packInt2x16"; break;
+ case EOpUnpackInt2x16: out.debug << "unpackInt2x16"; break;
+ case EOpPackUint2x16: out.debug << "packUint2x16"; break;
+ case EOpUnpackUint2x16: out.debug << "unpackUint2x16"; break;
+
+ case EOpPackInt4x16: out.debug << "packInt4x16"; break;
+ case EOpUnpackInt4x16: out.debug << "unpackInt4x16"; break;
+ case EOpPackUint4x16: out.debug << "packUint4x16"; break;
+ case EOpUnpackUint4x16: out.debug << "unpackUint4x16"; break;
+ case EOpPackFloat2x16: out.debug << "packFloat2x16"; break;
+ case EOpUnpackFloat2x16: out.debug << "unpackFloat2x16"; break;
+
+ case EOpLength: out.debug << "length"; break;
+ case EOpNormalize: out.debug << "normalize"; break;
+ case EOpDPdx: out.debug << "dPdx"; break;
+ case EOpDPdy: out.debug << "dPdy"; break;
+ case EOpFwidth: out.debug << "fwidth"; break;
+ case EOpDPdxFine: out.debug << "dPdxFine"; break;
+ case EOpDPdyFine: out.debug << "dPdyFine"; break;
+ case EOpFwidthFine: out.debug << "fwidthFine"; break;
+ case EOpDPdxCoarse: out.debug << "dPdxCoarse"; break;
+ case EOpDPdyCoarse: out.debug << "dPdyCoarse"; break;
+ case EOpFwidthCoarse: out.debug << "fwidthCoarse"; break;
+
+ case EOpInterpolateAtCentroid: out.debug << "interpolateAtCentroid"; break;
+
+ case EOpDeterminant: out.debug << "determinant"; break;
+ case EOpMatrixInverse: out.debug << "inverse"; break;
+ case EOpTranspose: out.debug << "transpose"; break;
+
+ case EOpAny: out.debug << "any"; break;
+ case EOpAll: out.debug << "all"; break;
+
+ case EOpArrayLength: out.debug << "array length"; break;
+
+ case EOpEmitStreamVertex: out.debug << "EmitStreamVertex"; break;
+ case EOpEndStreamPrimitive: out.debug << "EndStreamPrimitive"; break;
+
+ case EOpAtomicCounterIncrement: out.debug << "AtomicCounterIncrement";break;
+ case EOpAtomicCounterDecrement: out.debug << "AtomicCounterDecrement";break;
+ case EOpAtomicCounter: out.debug << "AtomicCounter"; break;
+
+ case EOpTextureQuerySize: out.debug << "textureSize"; break;
+ case EOpTextureQueryLod: out.debug << "textureQueryLod"; break;
+ case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break;
+ case EOpTextureQuerySamples: out.debug << "textureSamples"; break;
+ case EOpImageQuerySize: out.debug << "imageQuerySize"; break;
+ case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break;
+ case EOpImageLoad: out.debug << "imageLoad"; break;
+
+ case EOpBitFieldReverse: out.debug << "bitFieldReverse"; break;
+ case EOpBitCount: out.debug << "bitCount"; break;
+ case EOpFindLSB: out.debug << "findLSB"; break;
+ case EOpFindMSB: out.debug << "findMSB"; break;
+
+ case EOpNoise: out.debug << "noise"; break;
+
+ case EOpBallot: out.debug << "ballot"; break;
+ case EOpReadFirstInvocation: out.debug << "readFirstInvocation"; break;
+
+ case EOpAnyInvocation: out.debug << "anyInvocation"; break;
+ case EOpAllInvocations: out.debug << "allInvocations"; break;
+ case EOpAllInvocationsEqual: out.debug << "allInvocationsEqual"; break;
+
+ case EOpSubgroupElect: out.debug << "subgroupElect"; break;
+ case EOpSubgroupAll: out.debug << "subgroupAll"; break;
+ case EOpSubgroupAny: out.debug << "subgroupAny"; break;
+ case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break;
+ case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break;
+ case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break;
+ case EOpSubgroupBallot: out.debug << "subgroupBallot"; break;
+ case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break;
+ case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break;
+ case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break;
+ case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break;
+ case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break;
+ case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break;
+ case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break;
+ case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break;
+ case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break;
+ case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break;
+ case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break;
+ case EOpSubgroupAdd: out.debug << "subgroupAdd"; break;
+ case EOpSubgroupMul: out.debug << "subgroupMul"; break;
+ case EOpSubgroupMin: out.debug << "subgroupMin"; break;
+ case EOpSubgroupMax: out.debug << "subgroupMax"; break;
+ case EOpSubgroupAnd: out.debug << "subgroupAnd"; break;
+ case EOpSubgroupOr: out.debug << "subgroupOr"; break;
+ case EOpSubgroupXor: out.debug << "subgroupXor"; break;
+ case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break;
+ case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break;
+ case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break;
+ case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break;
+ case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break;
+ case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break;
+ case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break;
+ case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break;
+ case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break;
+ case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break;
+ case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break;
+ case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break;
+ case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break;
+ case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break;
+ case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break;
+ case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break;
+ case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break;
+ case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break;
+ case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break;
+ case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break;
+ case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break;
+ case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break;
+ case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break;
+ case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
+ case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
+
+#ifdef NV_EXTENSIONS
+ case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
+ case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
+ case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
+ case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break;
+ case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break;
+ case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break;
+ case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break;
+ case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break;
+ case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break;
+ case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break;
+ case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break;
+ case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break;
+ case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break;
+ case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break;
+ case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break;
+ case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break;
+ case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break;
+ case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break;
+ case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break;
+ case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
+ case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
+ case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
+#endif
+
+ case EOpClip: out.debug << "clip"; break;
+ case EOpIsFinite: out.debug << "isfinite"; break;
+ case EOpLog10: out.debug << "log10"; break;
+ case EOpRcp: out.debug << "rcp"; break;
+ case EOpSaturate: out.debug << "saturate"; break;
+
+ case EOpSparseTexelsResident: out.debug << "sparseTexelsResident"; break;
+
+#ifdef AMD_EXTENSIONS
+ case EOpMinInvocations: out.debug << "minInvocations"; break;
+ case EOpMaxInvocations: out.debug << "maxInvocations"; break;
+ case EOpAddInvocations: out.debug << "addInvocations"; break;
+ case EOpMinInvocationsNonUniform: out.debug << "minInvocationsNonUniform"; break;
+ case EOpMaxInvocationsNonUniform: out.debug << "maxInvocationsNonUniform"; break;
+ case EOpAddInvocationsNonUniform: out.debug << "addInvocationsNonUniform"; break;
+
+ case EOpMinInvocationsInclusiveScan: out.debug << "minInvocationsInclusiveScan"; break;
+ case EOpMaxInvocationsInclusiveScan: out.debug << "maxInvocationsInclusiveScan"; break;
+ case EOpAddInvocationsInclusiveScan: out.debug << "addInvocationsInclusiveScan"; break;
+ case EOpMinInvocationsInclusiveScanNonUniform: out.debug << "minInvocationsInclusiveScanNonUniform"; break;
+ case EOpMaxInvocationsInclusiveScanNonUniform: out.debug << "maxInvocationsInclusiveScanNonUniform"; break;
+ case EOpAddInvocationsInclusiveScanNonUniform: out.debug << "addInvocationsInclusiveScanNonUniform"; break;
+
+ case EOpMinInvocationsExclusiveScan: out.debug << "minInvocationsExclusiveScan"; break;
+ case EOpMaxInvocationsExclusiveScan: out.debug << "maxInvocationsExclusiveScan"; break;
+ case EOpAddInvocationsExclusiveScan: out.debug << "addInvocationsExclusiveScan"; break;
+ case EOpMinInvocationsExclusiveScanNonUniform: out.debug << "minInvocationsExclusiveScanNonUniform"; break;
+ case EOpMaxInvocationsExclusiveScanNonUniform: out.debug << "maxInvocationsExclusiveScanNonUniform"; break;
+ case EOpAddInvocationsExclusiveScanNonUniform: out.debug << "addInvocationsExclusiveScanNonUniform"; break;
+
+ case EOpMbcnt: out.debug << "mbcnt"; break;
+
+ case EOpFragmentMaskFetch: out.debug << "fragmentMaskFetchAMD"; break;
+ case EOpFragmentFetch: out.debug << "fragmentFetchAMD"; break;
+
+ case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break;
+ case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break;
+#endif
+
+ case EOpSubpassLoad: out.debug << "subpassLoad"; break;
+ case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
+
+ case EOpConstructReference: out.debug << "Construct reference type"; break;
+
+ default: out.debug.message(EPrefixError, "Bad unary op");
+ }
+
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ out.debug << "\n";
+
+ return true;
+}
+
+bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ TInfoSink& out = infoSink;
+
+ if (node->getOp() == EOpNull) {
+ out.debug.message(EPrefixError, "node is still EOpNull!");
+ return true;
+ }
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getOp()) {
+ case EOpSequence: out.debug << "Sequence\n"; return true;
+ case EOpLinkerObjects: out.debug << "Linker Objects\n"; return true;
+ case EOpComma: out.debug << "Comma"; break;
+ case EOpFunction: out.debug << "Function Definition: " << node->getName(); break;
+ case EOpFunctionCall: out.debug << "Function Call: " << node->getName(); break;
+ case EOpParameters: out.debug << "Function Parameters: "; break;
+
+ case EOpConstructFloat: out.debug << "Construct float"; break;
+ case EOpConstructDouble:out.debug << "Construct double"; break;
+
+ case EOpConstructVec2: out.debug << "Construct vec2"; break;
+ case EOpConstructVec3: out.debug << "Construct vec3"; break;
+ case EOpConstructVec4: out.debug << "Construct vec4"; break;
+ case EOpConstructDVec2: out.debug << "Construct dvec2"; break;
+ case EOpConstructDVec3: out.debug << "Construct dvec3"; break;
+ case EOpConstructDVec4: out.debug << "Construct dvec4"; break;
+ case EOpConstructBool: out.debug << "Construct bool"; break;
+ case EOpConstructBVec2: out.debug << "Construct bvec2"; break;
+ case EOpConstructBVec3: out.debug << "Construct bvec3"; break;
+ case EOpConstructBVec4: out.debug << "Construct bvec4"; break;
+ case EOpConstructInt8: out.debug << "Construct int8_t"; break;
+ case EOpConstructI8Vec2: out.debug << "Construct i8vec2"; break;
+ case EOpConstructI8Vec3: out.debug << "Construct i8vec3"; break;
+ case EOpConstructI8Vec4: out.debug << "Construct i8vec4"; break;
+ case EOpConstructInt: out.debug << "Construct int"; break;
+ case EOpConstructIVec2: out.debug << "Construct ivec2"; break;
+ case EOpConstructIVec3: out.debug << "Construct ivec3"; break;
+ case EOpConstructIVec4: out.debug << "Construct ivec4"; break;
+ case EOpConstructUint8: out.debug << "Construct uint8_t"; break;
+ case EOpConstructU8Vec2: out.debug << "Construct u8vec2"; break;
+ case EOpConstructU8Vec3: out.debug << "Construct u8vec3"; break;
+ case EOpConstructU8Vec4: out.debug << "Construct u8vec4"; break;
+ case EOpConstructUint: out.debug << "Construct uint"; break;
+ case EOpConstructUVec2: out.debug << "Construct uvec2"; break;
+ case EOpConstructUVec3: out.debug << "Construct uvec3"; break;
+ case EOpConstructUVec4: out.debug << "Construct uvec4"; break;
+ case EOpConstructInt64: out.debug << "Construct int64"; break;
+ case EOpConstructI64Vec2: out.debug << "Construct i64vec2"; break;
+ case EOpConstructI64Vec3: out.debug << "Construct i64vec3"; break;
+ case EOpConstructI64Vec4: out.debug << "Construct i64vec4"; break;
+ case EOpConstructUint64: out.debug << "Construct uint64"; break;
+ case EOpConstructU64Vec2: out.debug << "Construct u64vec2"; break;
+ case EOpConstructU64Vec3: out.debug << "Construct u64vec3"; break;
+ case EOpConstructU64Vec4: out.debug << "Construct u64vec4"; break;
+ case EOpConstructInt16: out.debug << "Construct int16_t"; break;
+ case EOpConstructI16Vec2: out.debug << "Construct i16vec2"; break;
+ case EOpConstructI16Vec3: out.debug << "Construct i16vec3"; break;
+ case EOpConstructI16Vec4: out.debug << "Construct i16vec4"; break;
+ case EOpConstructUint16: out.debug << "Construct uint16_t"; break;
+ case EOpConstructU16Vec2: out.debug << "Construct u16vec2"; break;
+ case EOpConstructU16Vec3: out.debug << "Construct u16vec3"; break;
+ case EOpConstructU16Vec4: out.debug << "Construct u16vec4"; break;
+ case EOpConstructMat2x2: out.debug << "Construct mat2"; break;
+ case EOpConstructMat2x3: out.debug << "Construct mat2x3"; break;
+ case EOpConstructMat2x4: out.debug << "Construct mat2x4"; break;
+ case EOpConstructMat3x2: out.debug << "Construct mat3x2"; break;
+ case EOpConstructMat3x3: out.debug << "Construct mat3"; break;
+ case EOpConstructMat3x4: out.debug << "Construct mat3x4"; break;
+ case EOpConstructMat4x2: out.debug << "Construct mat4x2"; break;
+ case EOpConstructMat4x3: out.debug << "Construct mat4x3"; break;
+ case EOpConstructMat4x4: out.debug << "Construct mat4"; break;
+ case EOpConstructDMat2x2: out.debug << "Construct dmat2"; break;
+ case EOpConstructDMat2x3: out.debug << "Construct dmat2x3"; break;
+ case EOpConstructDMat2x4: out.debug << "Construct dmat2x4"; break;
+ case EOpConstructDMat3x2: out.debug << "Construct dmat3x2"; break;
+ case EOpConstructDMat3x3: out.debug << "Construct dmat3"; break;
+ case EOpConstructDMat3x4: out.debug << "Construct dmat3x4"; break;
+ case EOpConstructDMat4x2: out.debug << "Construct dmat4x2"; break;
+ case EOpConstructDMat4x3: out.debug << "Construct dmat4x3"; break;
+ case EOpConstructDMat4x4: out.debug << "Construct dmat4"; break;
+ case EOpConstructIMat2x2: out.debug << "Construct imat2"; break;
+ case EOpConstructIMat2x3: out.debug << "Construct imat2x3"; break;
+ case EOpConstructIMat2x4: out.debug << "Construct imat2x4"; break;
+ case EOpConstructIMat3x2: out.debug << "Construct imat3x2"; break;
+ case EOpConstructIMat3x3: out.debug << "Construct imat3"; break;
+ case EOpConstructIMat3x4: out.debug << "Construct imat3x4"; break;
+ case EOpConstructIMat4x2: out.debug << "Construct imat4x2"; break;
+ case EOpConstructIMat4x3: out.debug << "Construct imat4x3"; break;
+ case EOpConstructIMat4x4: out.debug << "Construct imat4"; break;
+ case EOpConstructUMat2x2: out.debug << "Construct umat2"; break;
+ case EOpConstructUMat2x3: out.debug << "Construct umat2x3"; break;
+ case EOpConstructUMat2x4: out.debug << "Construct umat2x4"; break;
+ case EOpConstructUMat3x2: out.debug << "Construct umat3x2"; break;
+ case EOpConstructUMat3x3: out.debug << "Construct umat3"; break;
+ case EOpConstructUMat3x4: out.debug << "Construct umat3x4"; break;
+ case EOpConstructUMat4x2: out.debug << "Construct umat4x2"; break;
+ case EOpConstructUMat4x3: out.debug << "Construct umat4x3"; break;
+ case EOpConstructUMat4x4: out.debug << "Construct umat4"; break;
+ case EOpConstructBMat2x2: out.debug << "Construct bmat2"; break;
+ case EOpConstructBMat2x3: out.debug << "Construct bmat2x3"; break;
+ case EOpConstructBMat2x4: out.debug << "Construct bmat2x4"; break;
+ case EOpConstructBMat3x2: out.debug << "Construct bmat3x2"; break;
+ case EOpConstructBMat3x3: out.debug << "Construct bmat3"; break;
+ case EOpConstructBMat3x4: out.debug << "Construct bmat3x4"; break;
+ case EOpConstructBMat4x2: out.debug << "Construct bmat4x2"; break;
+ case EOpConstructBMat4x3: out.debug << "Construct bmat4x3"; break;
+ case EOpConstructBMat4x4: out.debug << "Construct bmat4"; break;
+ case EOpConstructFloat16: out.debug << "Construct float16_t"; break;
+ case EOpConstructF16Vec2: out.debug << "Construct f16vec2"; break;
+ case EOpConstructF16Vec3: out.debug << "Construct f16vec3"; break;
+ case EOpConstructF16Vec4: out.debug << "Construct f16vec4"; break;
+ case EOpConstructF16Mat2x2: out.debug << "Construct f16mat2"; break;
+ case EOpConstructF16Mat2x3: out.debug << "Construct f16mat2x3"; break;
+ case EOpConstructF16Mat2x4: out.debug << "Construct f16mat2x4"; break;
+ case EOpConstructF16Mat3x2: out.debug << "Construct f16mat3x2"; break;
+ case EOpConstructF16Mat3x3: out.debug << "Construct f16mat3"; break;
+ case EOpConstructF16Mat3x4: out.debug << "Construct f16mat3x4"; break;
+ case EOpConstructF16Mat4x2: out.debug << "Construct f16mat4x2"; break;
+ case EOpConstructF16Mat4x3: out.debug << "Construct f16mat4x3"; break;
+ case EOpConstructF16Mat4x4: out.debug << "Construct f16mat4"; break;
+ case EOpConstructStruct: out.debug << "Construct structure"; break;
+ case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break;
+ case EOpConstructReference: out.debug << "Construct reference"; break;
+ case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break;
+
+ case EOpLessThan: out.debug << "Compare Less Than"; break;
+ case EOpGreaterThan: out.debug << "Compare Greater Than"; break;
+ case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break;
+ case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break;
+ case EOpVectorEqual: out.debug << "Equal"; break;
+ case EOpVectorNotEqual: out.debug << "NotEqual"; break;
+
+ case EOpMod: out.debug << "mod"; break;
+ case EOpModf: out.debug << "modf"; break;
+ case EOpPow: out.debug << "pow"; break;
+
+ case EOpAtan: out.debug << "arc tangent"; break;
+
+ case EOpMin: out.debug << "min"; break;
+ case EOpMax: out.debug << "max"; break;
+ case EOpClamp: out.debug << "clamp"; break;
+ case EOpMix: out.debug << "mix"; break;
+ case EOpStep: out.debug << "step"; break;
+ case EOpSmoothStep: out.debug << "smoothstep"; break;
+
+ case EOpDistance: out.debug << "distance"; break;
+ case EOpDot: out.debug << "dot-product"; break;
+ case EOpCross: out.debug << "cross-product"; break;
+ case EOpFaceForward: out.debug << "face-forward"; break;
+ case EOpReflect: out.debug << "reflect"; break;
+ case EOpRefract: out.debug << "refract"; break;
+ case EOpMul: out.debug << "component-wise multiply"; break;
+ case EOpOuterProduct: out.debug << "outer product"; break;
+
+ case EOpEmitVertex: out.debug << "EmitVertex"; break;
+ case EOpEndPrimitive: out.debug << "EndPrimitive"; break;
+
+ case EOpBarrier: out.debug << "Barrier"; break;
+ case EOpMemoryBarrier: out.debug << "MemoryBarrier"; break;
+ case EOpMemoryBarrierAtomicCounter: out.debug << "MemoryBarrierAtomicCounter"; break;
+ case EOpMemoryBarrierBuffer: out.debug << "MemoryBarrierBuffer"; break;
+ case EOpMemoryBarrierImage: out.debug << "MemoryBarrierImage"; break;
+ case EOpMemoryBarrierShared: out.debug << "MemoryBarrierShared"; break;
+ case EOpGroupMemoryBarrier: out.debug << "GroupMemoryBarrier"; break;
+
+ case EOpReadInvocation: out.debug << "readInvocation"; break;
+
+#ifdef AMD_EXTENSIONS
+ case EOpSwizzleInvocations: out.debug << "swizzleInvocations"; break;
+ case EOpSwizzleInvocationsMasked: out.debug << "swizzleInvocationsMasked"; break;
+ case EOpWriteInvocation: out.debug << "writeInvocation"; break;
+
+ case EOpMin3: out.debug << "min3"; break;
+ case EOpMax3: out.debug << "max3"; break;
+ case EOpMid3: out.debug << "mid3"; break;
+
+ case EOpTime: out.debug << "time"; break;
+#endif
+
+ case EOpAtomicAdd: out.debug << "AtomicAdd"; break;
+ case EOpAtomicMin: out.debug << "AtomicMin"; break;
+ case EOpAtomicMax: out.debug << "AtomicMax"; break;
+ case EOpAtomicAnd: out.debug << "AtomicAnd"; break;
+ case EOpAtomicOr: out.debug << "AtomicOr"; break;
+ case EOpAtomicXor: out.debug << "AtomicXor"; break;
+ case EOpAtomicExchange: out.debug << "AtomicExchange"; break;
+ case EOpAtomicCompSwap: out.debug << "AtomicCompSwap"; break;
+ case EOpAtomicLoad: out.debug << "AtomicLoad"; break;
+ case EOpAtomicStore: out.debug << "AtomicStore"; break;
+
+ case EOpAtomicCounterAdd: out.debug << "AtomicCounterAdd"; break;
+ case EOpAtomicCounterSubtract: out.debug << "AtomicCounterSubtract"; break;
+ case EOpAtomicCounterMin: out.debug << "AtomicCounterMin"; break;
+ case EOpAtomicCounterMax: out.debug << "AtomicCounterMax"; break;
+ case EOpAtomicCounterAnd: out.debug << "AtomicCounterAnd"; break;
+ case EOpAtomicCounterOr: out.debug << "AtomicCounterOr"; break;
+ case EOpAtomicCounterXor: out.debug << "AtomicCounterXor"; break;
+ case EOpAtomicCounterExchange: out.debug << "AtomicCounterExchange"; break;
+ case EOpAtomicCounterCompSwap: out.debug << "AtomicCounterCompSwap"; break;
+
+ case EOpImageQuerySize: out.debug << "imageQuerySize"; break;
+ case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break;
+ case EOpImageLoad: out.debug << "imageLoad"; break;
+ case EOpImageStore: out.debug << "imageStore"; break;
+ case EOpImageAtomicAdd: out.debug << "imageAtomicAdd"; break;
+ case EOpImageAtomicMin: out.debug << "imageAtomicMin"; break;
+ case EOpImageAtomicMax: out.debug << "imageAtomicMax"; break;
+ case EOpImageAtomicAnd: out.debug << "imageAtomicAnd"; break;
+ case EOpImageAtomicOr: out.debug << "imageAtomicOr"; break;
+ case EOpImageAtomicXor: out.debug << "imageAtomicXor"; break;
+ case EOpImageAtomicExchange: out.debug << "imageAtomicExchange"; break;
+ case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break;
+ case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break;
+ case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpImageLoadLod: out.debug << "imageLoadLod"; break;
+ case EOpImageStoreLod: out.debug << "imageStoreLod"; break;
+#endif
+
+ case EOpTextureQuerySize: out.debug << "textureSize"; break;
+ case EOpTextureQueryLod: out.debug << "textureQueryLod"; break;
+ case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break;
+ case EOpTextureQuerySamples: out.debug << "textureSamples"; break;
+ case EOpTexture: out.debug << "texture"; break;
+ case EOpTextureProj: out.debug << "textureProj"; break;
+ case EOpTextureLod: out.debug << "textureLod"; break;
+ case EOpTextureOffset: out.debug << "textureOffset"; break;
+ case EOpTextureFetch: out.debug << "textureFetch"; break;
+ case EOpTextureFetchOffset: out.debug << "textureFetchOffset"; break;
+ case EOpTextureProjOffset: out.debug << "textureProjOffset"; break;
+ case EOpTextureLodOffset: out.debug << "textureLodOffset"; break;
+ case EOpTextureProjLod: out.debug << "textureProjLod"; break;
+ case EOpTextureProjLodOffset: out.debug << "textureProjLodOffset"; break;
+ case EOpTextureGrad: out.debug << "textureGrad"; break;
+ case EOpTextureGradOffset: out.debug << "textureGradOffset"; break;
+ case EOpTextureProjGrad: out.debug << "textureProjGrad"; break;
+ case EOpTextureProjGradOffset: out.debug << "textureProjGradOffset"; break;
+ case EOpTextureGather: out.debug << "textureGather"; break;
+ case EOpTextureGatherOffset: out.debug << "textureGatherOffset"; break;
+ case EOpTextureGatherOffsets: out.debug << "textureGatherOffsets"; break;
+ case EOpTextureClamp: out.debug << "textureClamp"; break;
+ case EOpTextureOffsetClamp: out.debug << "textureOffsetClamp"; break;
+ case EOpTextureGradClamp: out.debug << "textureGradClamp"; break;
+ case EOpTextureGradOffsetClamp: out.debug << "textureGradOffsetClamp"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpTextureGatherLod: out.debug << "textureGatherLod"; break;
+ case EOpTextureGatherLodOffset: out.debug << "textureGatherLodOffset"; break;
+ case EOpTextureGatherLodOffsets: out.debug << "textureGatherLodOffsets"; break;
+#endif
+
+ case EOpSparseTexture: out.debug << "sparseTexture"; break;
+ case EOpSparseTextureOffset: out.debug << "sparseTextureOffset"; break;
+ case EOpSparseTextureLod: out.debug << "sparseTextureLod"; break;
+ case EOpSparseTextureLodOffset: out.debug << "sparseTextureLodOffset"; break;
+ case EOpSparseTextureFetch: out.debug << "sparseTexelFetch"; break;
+ case EOpSparseTextureFetchOffset: out.debug << "sparseTexelFetchOffset"; break;
+ case EOpSparseTextureGrad: out.debug << "sparseTextureGrad"; break;
+ case EOpSparseTextureGradOffset: out.debug << "sparseTextureGradOffset"; break;
+ case EOpSparseTextureGather: out.debug << "sparseTextureGather"; break;
+ case EOpSparseTextureGatherOffset: out.debug << "sparseTextureGatherOffset"; break;
+ case EOpSparseTextureGatherOffsets: out.debug << "sparseTextureGatherOffsets"; break;
+ case EOpSparseImageLoad: out.debug << "sparseImageLoad"; break;
+ case EOpSparseTextureClamp: out.debug << "sparseTextureClamp"; break;
+ case EOpSparseTextureOffsetClamp: out.debug << "sparseTextureOffsetClamp"; break;
+ case EOpSparseTextureGradClamp: out.debug << "sparseTextureGradClamp"; break;
+ case EOpSparseTextureGradOffsetClamp: out.debug << "sparseTextureGradOffsetClam"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpSparseTextureGatherLod: out.debug << "sparseTextureGatherLod"; break;
+ case EOpSparseTextureGatherLodOffset: out.debug << "sparseTextureGatherLodOffset"; break;
+ case EOpSparseTextureGatherLodOffsets: out.debug << "sparseTextureGatherLodOffsets"; break;
+ case EOpSparseImageLoadLod: out.debug << "sparseImageLoadLod"; break;
+#endif
+#ifdef NV_EXTENSIONS
+ case EOpImageSampleFootprintNV: out.debug << "imageSampleFootprintNV"; break;
+ case EOpImageSampleFootprintClampNV: out.debug << "imageSampleFootprintClampNV"; break;
+ case EOpImageSampleFootprintLodNV: out.debug << "imageSampleFootprintLodNV"; break;
+ case EOpImageSampleFootprintGradNV: out.debug << "imageSampleFootprintGradNV"; break;
+ case EOpImageSampleFootprintGradClampNV: out.debug << "mageSampleFootprintGradClampNV"; break;
+#endif
+ case EOpAddCarry: out.debug << "addCarry"; break;
+ case EOpSubBorrow: out.debug << "subBorrow"; break;
+ case EOpUMulExtended: out.debug << "uMulExtended"; break;
+ case EOpIMulExtended: out.debug << "iMulExtended"; break;
+ case EOpBitfieldExtract: out.debug << "bitfieldExtract"; break;
+ case EOpBitfieldInsert: out.debug << "bitfieldInsert"; break;
+
+ case EOpFma: out.debug << "fma"; break;
+ case EOpFrexp: out.debug << "frexp"; break;
+ case EOpLdexp: out.debug << "ldexp"; break;
+
+ case EOpInterpolateAtSample: out.debug << "interpolateAtSample"; break;
+ case EOpInterpolateAtOffset: out.debug << "interpolateAtOffset"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpInterpolateAtVertex: out.debug << "interpolateAtVertex"; break;
+#endif
+
+ case EOpSinCos: out.debug << "sincos"; break;
+ case EOpGenMul: out.debug << "mul"; break;
+
+ case EOpAllMemoryBarrierWithGroupSync: out.debug << "AllMemoryBarrierWithGroupSync"; break;
+ case EOpDeviceMemoryBarrier: out.debug << "DeviceMemoryBarrier"; break;
+ case EOpDeviceMemoryBarrierWithGroupSync: out.debug << "DeviceMemoryBarrierWithGroupSync"; break;
+ case EOpWorkgroupMemoryBarrier: out.debug << "WorkgroupMemoryBarrier"; break;
+ case EOpWorkgroupMemoryBarrierWithGroupSync: out.debug << "WorkgroupMemoryBarrierWithGroupSync"; break;
+
+ case EOpSubgroupBarrier: out.debug << "subgroupBarrier"; break;
+ case EOpSubgroupMemoryBarrier: out.debug << "subgroupMemoryBarrier"; break;
+ case EOpSubgroupMemoryBarrierBuffer: out.debug << "subgroupMemoryBarrierBuffer"; break;
+ case EOpSubgroupMemoryBarrierImage: out.debug << "subgroupMemoryBarrierImage"; break;
+ case EOpSubgroupMemoryBarrierShared: out.debug << "subgroupMemoryBarrierShared"; break;
+ case EOpSubgroupElect: out.debug << "subgroupElect"; break;
+ case EOpSubgroupAll: out.debug << "subgroupAll"; break;
+ case EOpSubgroupAny: out.debug << "subgroupAny"; break;
+ case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break;
+ case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break;
+ case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break;
+ case EOpSubgroupBallot: out.debug << "subgroupBallot"; break;
+ case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break;
+ case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break;
+ case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break;
+ case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break;
+ case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break;
+ case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break;
+ case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break;
+ case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break;
+ case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break;
+ case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break;
+ case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break;
+ case EOpSubgroupAdd: out.debug << "subgroupAdd"; break;
+ case EOpSubgroupMul: out.debug << "subgroupMul"; break;
+ case EOpSubgroupMin: out.debug << "subgroupMin"; break;
+ case EOpSubgroupMax: out.debug << "subgroupMax"; break;
+ case EOpSubgroupAnd: out.debug << "subgroupAnd"; break;
+ case EOpSubgroupOr: out.debug << "subgroupOr"; break;
+ case EOpSubgroupXor: out.debug << "subgroupXor"; break;
+ case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break;
+ case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break;
+ case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break;
+ case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break;
+ case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break;
+ case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break;
+ case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break;
+ case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break;
+ case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break;
+ case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break;
+ case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break;
+ case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break;
+ case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break;
+ case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break;
+ case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break;
+ case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break;
+ case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break;
+ case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break;
+ case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break;
+ case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break;
+ case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break;
+ case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break;
+ case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break;
+ case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
+ case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
+
+ case EOpSubpassLoad: out.debug << "subpassLoad"; break;
+ case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
+
+#ifdef NV_EXTENSIONS
+ case EOpTraceNV: out.debug << "traceNV"; break;
+ case EOpReportIntersectionNV: out.debug << "reportIntersectionNV"; break;
+ case EOpIgnoreIntersectionNV: out.debug << "ignoreIntersectionNV"; break;
+ case EOpTerminateRayNV: out.debug << "terminateRayNV"; break;
+ case EOpExecuteCallableNV: out.debug << "executeCallableNV"; break;
+ case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break;
+#endif
+
+ case EOpCooperativeMatrixLoad: out.debug << "Load cooperative matrix"; break;
+ case EOpCooperativeMatrixStore: out.debug << "Store cooperative matrix"; break;
+ case EOpCooperativeMatrixMulAdd: out.debug << "MulAdd cooperative matrices"; break;
+
+ default: out.debug.message(EPrefixError, "Bad aggregation op");
+ }
+
+ if (node->getOp() != EOpSequence && node->getOp() != EOpParameters)
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ out.debug << "\n";
+
+ return true;
+}
+
+bool TOutputTraverser::visitSelection(TVisit /* visit */, TIntermSelection* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ out.debug << "Test condition and select";
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ if (node->getShortCircuit() == false)
+ out.debug << ": no shortcircuit";
+ if (node->getFlatten())
+ out.debug << ": Flatten";
+ if (node->getDontFlatten())
+ out.debug << ": DontFlatten";
+ out.debug << "\n";
+
+ ++depth;
+
+ OutputTreeText(out, node, depth);
+ out.debug << "Condition\n";
+ node->getCondition()->traverse(this);
+
+ OutputTreeText(out, node, depth);
+ if (node->getTrueBlock()) {
+ out.debug << "true case\n";
+ node->getTrueBlock()->traverse(this);
+ } else
+ out.debug << "true case is null\n";
+
+ if (node->getFalseBlock()) {
+ OutputTreeText(out, node, depth);
+ out.debug << "false case\n";
+ node->getFalseBlock()->traverse(this);
+ }
+
+ --depth;
+
+ return false;
+}
+
+// Print infinities and NaNs, and numbers in a portable way.
+// Goals:
+// - portable (across IEEE 754 platforms)
+// - shows all possible IEEE values
+// - shows simple numbers in a simple way, e.g., no leading/trailing 0s
+// - shows all digits, no premature rounding
+static void OutputDouble(TInfoSink& out, double value, TOutputTraverser::EExtraOutput extra)
+{
+ if (IsInfinity(value)) {
+ if (value < 0)
+ out.debug << "-1.#INF";
+ else
+ out.debug << "+1.#INF";
+ } else if (IsNan(value))
+ out.debug << "1.#IND";
+ else {
+ const int maxSize = 340;
+ char buf[maxSize];
+ const char* format = "%f";
+ if (fabs(value) > 0.0 && (fabs(value) < 1e-5 || fabs(value) > 1e12))
+ format = "%-.13e";
+ int len = snprintf(buf, maxSize, format, value);
+ assert(len < maxSize);
+
+ // remove a leading zero in the 100s slot in exponent; it is not portable
+ // pattern: XX...XXXe+0XX or XX...XXXe-0XX
+ if (len > 5) {
+ if (buf[len-5] == 'e' && (buf[len-4] == '+' || buf[len-4] == '-') && buf[len-3] == '0') {
+ buf[len-3] = buf[len-2];
+ buf[len-2] = buf[len-1];
+ buf[len-1] = '\0';
+ }
+ }
+
+ out.debug << buf;
+
+ switch (extra) {
+ case TOutputTraverser::BinaryDoubleOutput:
+ {
+ uint64_t b;
+ static_assert(sizeof(b) == sizeof(value), "sizeof(uint64_t) != sizeof(double)");
+ memcpy(&b, &value, sizeof(b));
+
+ out.debug << " : ";
+ for (size_t i = 0; i < 8 * sizeof(value); ++i, ++b) {
+ out.debug << ((b & 0x8000000000000000) != 0 ? "1" : "0");
+ b <<= 1;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const TConstUnionArray& constUnion,
+ TOutputTraverser::EExtraOutput extra, int depth)
+{
+ int size = node->getType().computeNumComponents();
+
+ for (int i = 0; i < size; i++) {
+ OutputTreeText(out, node, depth);
+ switch (constUnion[i].getType()) {
+ case EbtBool:
+ if (constUnion[i].getBConst())
+ out.debug << "true";
+ else
+ out.debug << "false";
+
+ out.debug << " (" << "const bool" << ")";
+
+ out.debug << "\n";
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ OutputDouble(out, constUnion[i].getDConst(), extra);
+ out.debug << "\n";
+ break;
+ case EbtInt8:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI8Const(), "const int8_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint8:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU8Const(), "const uint8_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtInt16:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI16Const(), "const int16_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint16:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU16Const(), "const uint16_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtInt:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%d (%s)", constUnion[i].getIConst(), "const int");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%u (%s)", constUnion[i].getUConst(), "const uint");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtInt64:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%lld (%s)", constUnion[i].getI64Const(), "const int64_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint64:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%llu (%s)", constUnion[i].getU64Const(), "const uint64_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ default:
+ out.info.message(EPrefixInternalError, "Unknown constant", node->getLoc());
+ break;
+ }
+ }
+}
+
+void TOutputTraverser::visitConstantUnion(TIntermConstantUnion* node)
+{
+ OutputTreeText(infoSink, node, depth);
+ infoSink.debug << "Constant:\n";
+
+ OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1);
+}
+
+void TOutputTraverser::visitSymbol(TIntermSymbol* node)
+{
+ OutputTreeText(infoSink, node, depth);
+
+ infoSink.debug << "'" << node->getName() << "' (" << node->getCompleteString() << ")\n";
+
+ if (! node->getConstArray().empty())
+ OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1);
+ else if (node->getConstSubtree()) {
+ incrementDepth(node);
+ node->getConstSubtree()->traverse(this);
+ decrementDepth();
+ }
+}
+
+bool TOutputTraverser::visitLoop(TVisit /* visit */, TIntermLoop* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ out.debug << "Loop with condition ";
+ if (! node->testFirst())
+ out.debug << "not ";
+ out.debug << "tested first";
+
+ if (node->getUnroll())
+ out.debug << ": Unroll";
+ if (node->getDontUnroll())
+ out.debug << ": DontUnroll";
+ if (node->getLoopDependency()) {
+ out.debug << ": Dependency ";
+ out.debug << node->getLoopDependency();
+ }
+ out.debug << "\n";
+
+ ++depth;
+
+ OutputTreeText(infoSink, node, depth);
+ if (node->getTest()) {
+ out.debug << "Loop Condition\n";
+ node->getTest()->traverse(this);
+ } else
+ out.debug << "No loop condition\n";
+
+ OutputTreeText(infoSink, node, depth);
+ if (node->getBody()) {
+ out.debug << "Loop Body\n";
+ node->getBody()->traverse(this);
+ } else
+ out.debug << "No loop body\n";
+
+ if (node->getTerminal()) {
+ OutputTreeText(infoSink, node, depth);
+ out.debug << "Loop Terminal Expression\n";
+ node->getTerminal()->traverse(this);
+ }
+
+ --depth;
+
+ return false;
+}
+
+bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getFlowOp()) {
+ case EOpKill: out.debug << "Branch: Kill"; break;
+ case EOpBreak: out.debug << "Branch: Break"; break;
+ case EOpContinue: out.debug << "Branch: Continue"; break;
+ case EOpReturn: out.debug << "Branch: Return"; break;
+ case EOpCase: out.debug << "case: "; break;
+ case EOpDefault: out.debug << "default: "; break;
+ default: out.debug << "Branch: Unknown Branch"; break;
+ }
+
+ if (node->getExpression()) {
+ out.debug << " with expression\n";
+ ++depth;
+ node->getExpression()->traverse(this);
+ --depth;
+ } else
+ out.debug << "\n";
+
+ return false;
+}
+
+bool TOutputTraverser::visitSwitch(TVisit /* visit */, TIntermSwitch* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+ out.debug << "switch";
+
+ if (node->getFlatten())
+ out.debug << ": Flatten";
+ if (node->getDontFlatten())
+ out.debug << ": DontFlatten";
+ out.debug << "\n";
+
+ OutputTreeText(out, node, depth);
+ out.debug << "condition\n";
+ ++depth;
+ node->getCondition()->traverse(this);
+
+ --depth;
+ OutputTreeText(out, node, depth);
+ out.debug << "body\n";
+ ++depth;
+ node->getBody()->traverse(this);
+
+ --depth;
+
+ return false;
+}
+
+//
+// This function is the one to call externally to start the traversal.
+// Individual functions can be initialized to 0 to skip processing of that
+// type of node. It's children will still be processed.
+//
+void TIntermediate::output(TInfoSink& infoSink, bool tree)
+{
+ infoSink.debug << "Shader version: " << version << "\n";
+ if (requestedExtensions.size() > 0) {
+ for (auto extIt = requestedExtensions.begin(); extIt != requestedExtensions.end(); ++extIt)
+ infoSink.debug << "Requested " << *extIt << "\n";
+ }
+
+ if (xfbMode)
+ infoSink.debug << "in xfb mode\n";
+
+ switch (language) {
+ case EShLangVertex:
+ break;
+
+ case EShLangTessControl:
+ infoSink.debug << "vertices = " << vertices << "\n";
+
+ if (inputPrimitive != ElgNone)
+ infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n";
+ if (vertexSpacing != EvsNone)
+ infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n";
+ if (vertexOrder != EvoNone)
+ infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n";
+ break;
+
+ case EShLangTessEvaluation:
+ infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n";
+ infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n";
+ infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n";
+ if (pointMode)
+ infoSink.debug << "using point mode\n";
+ break;
+
+ case EShLangGeometry:
+ infoSink.debug << "invocations = " << invocations << "\n";
+ infoSink.debug << "max_vertices = " << vertices << "\n";
+ infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n";
+ infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n";
+ break;
+
+ case EShLangFragment:
+ if (pixelCenterInteger)
+ infoSink.debug << "gl_FragCoord pixel center is integer\n";
+ if (originUpperLeft)
+ infoSink.debug << "gl_FragCoord origin is upper left\n";
+ if (earlyFragmentTests)
+ infoSink.debug << "using early_fragment_tests\n";
+ if (postDepthCoverage)
+ infoSink.debug << "using post_depth_coverage\n";
+ if (depthLayout != EldNone)
+ infoSink.debug << "using " << TQualifier::getLayoutDepthString(depthLayout) << "\n";
+ if (blendEquations != 0) {
+ infoSink.debug << "using";
+ // blendEquations is a mask, decode it
+ for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) {
+ if (blendEquations & (1 << be))
+ infoSink.debug << " " << TQualifier::getBlendEquationString(be);
+ }
+ infoSink.debug << "\n";
+ }
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangMeshNV:
+ infoSink.debug << "max_vertices = " << vertices << "\n";
+ infoSink.debug << "max_primitives = " << primitives << "\n";
+ infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n";
+ // Fall through
+
+ case EShLangTaskNV:
+ // Fall through
+#endif
+ case EShLangCompute:
+ infoSink.debug << "local_size = (" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << ")\n";
+ {
+ if (localSizeSpecId[0] != TQualifier::layoutNotSet ||
+ localSizeSpecId[1] != TQualifier::layoutNotSet ||
+ localSizeSpecId[2] != TQualifier::layoutNotSet) {
+ infoSink.debug << "local_size ids = (" <<
+ localSizeSpecId[0] << ", " <<
+ localSizeSpecId[1] << ", " <<
+ localSizeSpecId[2] << ")\n";
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (treeRoot == 0 || ! tree)
+ return;
+
+ TOutputTraverser it(infoSink);
+ if (getBinaryDoubleOutput())
+ it.setDoubleOutput(TOutputTraverser::BinaryDoubleOutput);
+ treeRoot->traverse(&it);
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp b/thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp
new file mode 100644
index 0000000000..46c7558378
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/iomapper.cpp
@@ -0,0 +1,818 @@
+//
+// Copyright (C) 2016-2017 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "../Include/InfoSink.h"
+#include "iomapper.h"
+#include "LiveTraverser.h"
+#include "localintermediate.h"
+
+#include "gl_types.h"
+
+#include <unordered_set>
+#include <unordered_map>
+
+//
+// Map IO bindings.
+//
+// High-level algorithm for one stage:
+//
+// 1. Traverse all code (live+dead) to find the explicitly provided bindings.
+//
+// 2. Traverse (just) the live code to determine which non-provided bindings
+// require auto-numbering. We do not auto-number dead ones.
+//
+// 3. Traverse all the code to apply the bindings:
+// a. explicitly given bindings are offset according to their type
+// b. implicit live bindings are auto-numbered into the holes, using
+// any open binding slot.
+// c. implicit dead bindings are left un-bound.
+//
+
+
+namespace glslang {
+
+struct TVarEntryInfo
+{
+ int id;
+ TIntermSymbol* symbol;
+ bool live;
+ int newBinding;
+ int newSet;
+ int newLocation;
+ int newComponent;
+ int newIndex;
+
+ struct TOrderById
+ {
+ inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r)
+ {
+ return l.id < r.id;
+ }
+ };
+
+ struct TOrderByPriority
+ {
+ // ordering:
+ // 1) has both binding and set
+ // 2) has binding but no set
+ // 3) has no binding but set
+ // 4) has no binding and no set
+ inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r)
+ {
+ const TQualifier& lq = l.symbol->getQualifier();
+ const TQualifier& rq = r.symbol->getQualifier();
+
+ // simple rules:
+ // has binding gives 2 points
+ // has set gives 1 point
+ // who has the most points is more important.
+ int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0);
+ int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0);
+
+ if (lPoints == rPoints)
+ return l.id < r.id;
+ return lPoints > rPoints;
+ }
+ };
+};
+
+
+
+typedef std::vector<TVarEntryInfo> TVarLiveMap;
+
+class TVarGatherTraverser : public TLiveTraverser
+{
+public:
+ TVarGatherTraverser(const TIntermediate& i, bool traverseDeadCode, TVarLiveMap& inList, TVarLiveMap& outList, TVarLiveMap& uniformList)
+ : TLiveTraverser(i, traverseDeadCode, true, true, false)
+ , inputList(inList)
+ , outputList(outList)
+ , uniformList(uniformList)
+ {
+ }
+
+
+ virtual void visitSymbol(TIntermSymbol* base)
+ {
+ TVarLiveMap* target = nullptr;
+ if (base->getQualifier().storage == EvqVaryingIn)
+ target = &inputList;
+ else if (base->getQualifier().storage == EvqVaryingOut)
+ target = &outputList;
+ else if (base->getQualifier().isUniformOrBuffer() && !base->getQualifier().layoutPushConstant)
+ target = &uniformList;
+
+ if (target) {
+ TVarEntryInfo ent = { base->getId(), base, !traverseAll };
+ TVarLiveMap::iterator at = std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById());
+ if (at != target->end() && at->id == ent.id)
+ at->live = at->live || !traverseAll; // update live state
+ else
+ target->insert(at, ent);
+ }
+ }
+
+private:
+ TVarLiveMap& inputList;
+ TVarLiveMap& outputList;
+ TVarLiveMap& uniformList;
+};
+
+class TVarSetTraverser : public TLiveTraverser
+{
+public:
+ TVarSetTraverser(const TIntermediate& i, const TVarLiveMap& inList, const TVarLiveMap& outList, const TVarLiveMap& uniformList)
+ : TLiveTraverser(i, true, true, true, false)
+ , inputList(inList)
+ , outputList(outList)
+ , uniformList(uniformList)
+ {
+ }
+
+
+ virtual void visitSymbol(TIntermSymbol* base)
+ {
+ const TVarLiveMap* source;
+ if (base->getQualifier().storage == EvqVaryingIn)
+ source = &inputList;
+ else if (base->getQualifier().storage == EvqVaryingOut)
+ source = &outputList;
+ else if (base->getQualifier().isUniformOrBuffer())
+ source = &uniformList;
+ else
+ return;
+
+ TVarEntryInfo ent = { base->getId() };
+ TVarLiveMap::const_iterator at = std::lower_bound(source->begin(), source->end(), ent, TVarEntryInfo::TOrderById());
+ if (at == source->end())
+ return;
+
+ if (at->id != ent.id)
+ return;
+
+ if (at->newBinding != -1)
+ base->getWritableType().getQualifier().layoutBinding = at->newBinding;
+ if (at->newSet != -1)
+ base->getWritableType().getQualifier().layoutSet = at->newSet;
+ if (at->newLocation != -1)
+ base->getWritableType().getQualifier().layoutLocation = at->newLocation;
+ if (at->newComponent != -1)
+ base->getWritableType().getQualifier().layoutComponent = at->newComponent;
+ if (at->newIndex != -1)
+ base->getWritableType().getQualifier().layoutIndex = at->newIndex;
+ }
+
+ private:
+ const TVarLiveMap& inputList;
+ const TVarLiveMap& outputList;
+ const TVarLiveMap& uniformList;
+};
+
+struct TNotifyUniformAdaptor
+{
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ inline TNotifyUniformAdaptor(EShLanguage s, TIoMapResolver& r)
+ : stage(s)
+ , resolver(r)
+ {
+ }
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ resolver.notifyBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
+ }
+private:
+ TNotifyUniformAdaptor& operator=(TNotifyUniformAdaptor&);
+};
+
+struct TNotifyInOutAdaptor
+{
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ inline TNotifyInOutAdaptor(EShLanguage s, TIoMapResolver& r)
+ : stage(s)
+ , resolver(r)
+ {
+ }
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ resolver.notifyInOut(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
+ }
+private:
+ TNotifyInOutAdaptor& operator=(TNotifyInOutAdaptor&);
+};
+
+struct TResolverUniformAdaptor
+{
+ TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e, TIntermediate& interm)
+ : stage(s)
+ , resolver(r)
+ , infoSink(i)
+ , error(e)
+ , intermediate(interm)
+ {
+ }
+
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ ent.newLocation = -1;
+ ent.newComponent = -1;
+ ent.newBinding = -1;
+ ent.newSet = -1;
+ ent.newIndex = -1;
+ const bool isValid = resolver.validateBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(),
+ ent.live);
+ if (isValid) {
+ ent.newBinding = resolver.resolveBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(),
+ ent.live);
+ ent.newSet = resolver.resolveSet(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
+ ent.newLocation = resolver.resolveUniformLocation(stage, ent.symbol->getName().c_str(),
+ ent.symbol->getType(), ent.live);
+
+ if (ent.newBinding != -1) {
+ if (ent.newBinding >= int(TQualifier::layoutBindingEnd)) {
+ TString err = "mapped binding out of range: " + ent.symbol->getName();
+
+ infoSink.info.message(EPrefixInternalError, err.c_str());
+ error = true;
+ }
+ }
+ if (ent.newSet != -1) {
+ if (ent.newSet >= int(TQualifier::layoutSetEnd)) {
+ TString err = "mapped set out of range: " + ent.symbol->getName();
+
+ infoSink.info.message(EPrefixInternalError, err.c_str());
+ error = true;
+ }
+ }
+ } else {
+ TString errorMsg = "Invalid binding: " + ent.symbol->getName();
+ infoSink.info.message(EPrefixInternalError, errorMsg.c_str());
+ error = true;
+ }
+ }
+
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ TInfoSink& infoSink;
+ bool& error;
+ TIntermediate& intermediate;
+
+private:
+ TResolverUniformAdaptor& operator=(TResolverUniformAdaptor&);
+};
+
+struct TResolverInOutAdaptor
+{
+ TResolverInOutAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e, TIntermediate& interm)
+ : stage(s)
+ , resolver(r)
+ , infoSink(i)
+ , error(e)
+ , intermediate(interm)
+ {
+ }
+
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ ent.newLocation = -1;
+ ent.newComponent = -1;
+ ent.newBinding = -1;
+ ent.newSet = -1;
+ ent.newIndex = -1;
+ const bool isValid = resolver.validateInOut(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ if (isValid) {
+ ent.newLocation = resolver.resolveInOutLocation(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ ent.newComponent = resolver.resolveInOutComponent(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ ent.newIndex = resolver.resolveInOutIndex(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ } else {
+ TString errorMsg;
+ if (ent.symbol->getType().getQualifier().semanticName != nullptr) {
+ errorMsg = "Invalid shader In/Out variable semantic: ";
+ errorMsg += ent.symbol->getType().getQualifier().semanticName;
+ } else {
+ errorMsg = "Invalid shader In/Out variable: ";
+ errorMsg += ent.symbol->getName();
+ }
+ infoSink.info.message(EPrefixInternalError, errorMsg.c_str());
+ error = true;
+ }
+ }
+
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ TInfoSink& infoSink;
+ bool& error;
+ TIntermediate& intermediate;
+
+private:
+ TResolverInOutAdaptor& operator=(TResolverInOutAdaptor&);
+};
+
+// Base class for shared TIoMapResolver services, used by several derivations.
+struct TDefaultIoResolverBase : public glslang::TIoMapResolver
+{
+ TDefaultIoResolverBase(const TIntermediate &intermediate) :
+ intermediate(intermediate),
+ nextUniformLocation(intermediate.getUniformLocationBase()),
+ nextInputLocation(0),
+ nextOutputLocation(0)
+ { }
+
+ int getBaseBinding(TResourceType res, unsigned int set) const {
+ return selectBaseBinding(intermediate.getShiftBinding(res),
+ intermediate.getShiftBindingForSet(res, set));
+ }
+
+ const std::vector<std::string>& getResourceSetBinding() const { return intermediate.getResourceSetBinding(); }
+
+ bool doAutoBindingMapping() const { return intermediate.getAutoMapBindings(); }
+ bool doAutoLocationMapping() const { return intermediate.getAutoMapLocations(); }
+
+ typedef std::vector<int> TSlotSet;
+ typedef std::unordered_map<int, TSlotSet> TSlotSetMap;
+ TSlotSetMap slots;
+
+ TSlotSet::iterator findSlot(int set, int slot)
+ {
+ return std::lower_bound(slots[set].begin(), slots[set].end(), slot);
+ }
+
+ bool checkEmpty(int set, int slot)
+ {
+ TSlotSet::iterator at = findSlot(set, slot);
+ return !(at != slots[set].end() && *at == slot);
+ }
+
+ int reserveSlot(int set, int slot, int size = 1)
+ {
+ TSlotSet::iterator at = findSlot(set, slot);
+
+ // tolerate aliasing, by not double-recording aliases
+ // (policy about appropriateness of the alias is higher up)
+ for (int i = 0; i < size; i++) {
+ if (at == slots[set].end() || *at != slot + i)
+ at = slots[set].insert(at, slot + i);
+ ++at;
+ }
+
+ return slot;
+ }
+
+ int getFreeSlot(int set, int base, int size = 1)
+ {
+ TSlotSet::iterator at = findSlot(set, base);
+ if (at == slots[set].end())
+ return reserveSlot(set, base, size);
+
+ // look for a big enough gap
+ for (; at != slots[set].end(); ++at) {
+ if (*at - base >= size)
+ break;
+ base = *at + 1;
+ }
+ return reserveSlot(set, base, size);
+ }
+
+ virtual bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool /*is_live*/) override = 0;
+
+ virtual int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override = 0;
+
+ int resolveSet(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool /*is_live*/) override
+ {
+ if (type.getQualifier().hasSet())
+ return type.getQualifier().layoutSet;
+
+ // If a command line or API option requested a single descriptor set, use that (if not overrided by spaceN)
+ if (getResourceSetBinding().size() == 1)
+ return atoi(getResourceSetBinding()[0].c_str());
+
+ return 0;
+ }
+ int resolveUniformLocation(EShLanguage /*stage*/, const char* name, const glslang::TType& type, bool /*is_live*/) override
+ {
+ // kick out of not doing this
+ if (!doAutoLocationMapping())
+ return -1;
+
+ // no locations added if already present, a built-in variable, a block, or an opaque
+ if (type.getQualifier().hasLocation() || type.isBuiltIn() ||
+ type.getBasicType() == EbtBlock ||
+ type.getBasicType() == EbtAtomicUint ||
+ (type.containsOpaque() && intermediate.getSpv().openGl == 0))
+ return -1;
+
+ // no locations on blocks of built-in variables
+ if (type.isStruct()) {
+ if (type.getStruct()->size() < 1)
+ return -1;
+ if ((*type.getStruct())[0].type->isBuiltIn())
+ return -1;
+ }
+
+ int location = intermediate.getUniformLocationOverride(name);
+ if (location != -1)
+ return location;
+
+ location = nextUniformLocation;
+
+ nextUniformLocation += TIntermediate::computeTypeUniformLocationSize(type);
+
+ return location;
+ }
+ bool validateInOut(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
+ {
+ return true;
+ }
+ int resolveInOutLocation(EShLanguage stage, const char* /*name*/, const TType& type, bool /*is_live*/) override
+ {
+ // kick out of not doing this
+ if (!doAutoLocationMapping())
+ return -1;
+
+ // no locations added if already present, or a built-in variable
+ if (type.getQualifier().hasLocation() || type.isBuiltIn())
+ return -1;
+
+ // no locations on blocks of built-in variables
+ if (type.isStruct()) {
+ if (type.getStruct()->size() < 1)
+ return -1;
+ if ((*type.getStruct())[0].type->isBuiltIn())
+ return -1;
+ }
+
+ // point to the right input or output location counter
+ int& nextLocation = type.getQualifier().isPipeInput() ? nextInputLocation : nextOutputLocation;
+
+ // Placeholder. This does not do proper cross-stage lining up, nor
+ // work with mixed location/no-location declarations.
+ int location = nextLocation;
+ int typeLocationSize;
+ // Don’t take into account the outer-most array if the stage’s
+ // interface is automatically an array.
+ if (type.getQualifier().isArrayedIo(stage)) {
+ TType elementType(type, 0);
+ typeLocationSize = TIntermediate::computeTypeLocationSize(elementType, stage);
+ } else {
+ typeLocationSize = TIntermediate::computeTypeLocationSize(type, stage);
+ }
+ nextLocation += typeLocationSize;
+
+ return location;
+ }
+ int resolveInOutComponent(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
+ {
+ return -1;
+ }
+ int resolveInOutIndex(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
+ {
+ return -1;
+ }
+
+ void notifyBinding(EShLanguage, const char* /*name*/, const TType&, bool /*is_live*/) override {}
+ void notifyInOut(EShLanguage, const char* /*name*/, const TType&, bool /*is_live*/) override {}
+ void endNotifications(EShLanguage) override {}
+ void beginNotifications(EShLanguage) override {}
+ void beginResolve(EShLanguage) override {}
+ void endResolve(EShLanguage) override {}
+
+protected:
+ TDefaultIoResolverBase(TDefaultIoResolverBase&);
+ TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&);
+
+ const TIntermediate &intermediate;
+ int nextUniformLocation;
+ int nextInputLocation;
+ int nextOutputLocation;
+
+ // Return descriptor set specific base if there is one, and the generic base otherwise.
+ int selectBaseBinding(int base, int descriptorSetBase) const {
+ return descriptorSetBase != -1 ? descriptorSetBase : base;
+ }
+
+ static int getLayoutSet(const glslang::TType& type) {
+ if (type.getQualifier().hasSet())
+ return type.getQualifier().layoutSet;
+ else
+ return 0;
+ }
+
+ static bool isSamplerType(const glslang::TType& type) {
+ return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler();
+ }
+
+ static bool isTextureType(const glslang::TType& type) {
+ return (type.getBasicType() == glslang::EbtSampler &&
+ (type.getSampler().isTexture() || type.getSampler().isSubpass()));
+ }
+
+ static bool isUboType(const glslang::TType& type) {
+ return type.getQualifier().storage == EvqUniform;
+ }
+};
+
+/*
+ * Basic implementation of glslang::TIoMapResolver that replaces the
+ * previous offset behavior.
+ * It does the same, uses the offsets for the corresponding uniform
+ * types. Also respects the EOptionAutoMapBindings flag and binds
+ * them if needed.
+ */
+/*
+ * Default resolver
+ */
+struct TDefaultIoResolver : public TDefaultIoResolverBase
+{
+ TDefaultIoResolver(const TIntermediate &intermediate) : TDefaultIoResolverBase(intermediate) { }
+
+ bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& /*type*/, bool /*is_live*/) override
+ {
+ return true;
+ }
+
+ int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override
+ {
+ const int set = getLayoutSet(type);
+ // On OpenGL arrays of opaque types take a seperate binding for each element
+ int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1;
+
+ if (type.getQualifier().hasBinding()) {
+ if (isImageType(type))
+ return reserveSlot(set, getBaseBinding(EResImage, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isTextureType(type))
+ return reserveSlot(set, getBaseBinding(EResTexture, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isSsboType(type))
+ return reserveSlot(set, getBaseBinding(EResSsbo, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isSamplerType(type))
+ return reserveSlot(set, getBaseBinding(EResSampler, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isUboType(type))
+ return reserveSlot(set, getBaseBinding(EResUbo, set) + type.getQualifier().layoutBinding, numBindings);
+ } else if (is_live && doAutoBindingMapping()) {
+ // find free slot, the caller did make sure it passes all vars with binding
+ // first and now all are passed that do not have a binding and needs one
+
+ if (isImageType(type))
+ return getFreeSlot(set, getBaseBinding(EResImage, set), numBindings);
+
+ if (isTextureType(type))
+ return getFreeSlot(set, getBaseBinding(EResTexture, set), numBindings);
+
+ if (isSsboType(type))
+ return getFreeSlot(set, getBaseBinding(EResSsbo, set), numBindings);
+
+ if (isSamplerType(type))
+ return getFreeSlot(set, getBaseBinding(EResSampler, set), numBindings);
+
+ if (isUboType(type))
+ return getFreeSlot(set, getBaseBinding(EResUbo, set), numBindings);
+ }
+
+ return -1;
+ }
+
+protected:
+ static bool isImageType(const glslang::TType& type) {
+ return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage();
+ }
+
+ static bool isSsboType(const glslang::TType& type) {
+ return type.getQualifier().storage == EvqBuffer;
+ }
+};
+
+/********************************************************************************
+The following IO resolver maps types in HLSL register space, as follows:
+
+t - for shader resource views (SRV)
+ TEXTURE1D
+ TEXTURE1DARRAY
+ TEXTURE2D
+ TEXTURE2DARRAY
+ TEXTURE3D
+ TEXTURECUBE
+ TEXTURECUBEARRAY
+ TEXTURE2DMS
+ TEXTURE2DMSARRAY
+ STRUCTUREDBUFFER
+ BYTEADDRESSBUFFER
+ BUFFER
+ TBUFFER
+
+s - for samplers
+ SAMPLER
+ SAMPLER1D
+ SAMPLER2D
+ SAMPLER3D
+ SAMPLERCUBE
+ SAMPLERSTATE
+ SAMPLERCOMPARISONSTATE
+
+u - for unordered access views (UAV)
+ RWBYTEADDRESSBUFFER
+ RWSTRUCTUREDBUFFER
+ APPENDSTRUCTUREDBUFFER
+ CONSUMESTRUCTUREDBUFFER
+ RWBUFFER
+ RWTEXTURE1D
+ RWTEXTURE1DARRAY
+ RWTEXTURE2D
+ RWTEXTURE2DARRAY
+ RWTEXTURE3D
+
+b - for constant buffer views (CBV)
+ CBUFFER
+ CONSTANTBUFFER
+ ********************************************************************************/
+struct TDefaultHlslIoResolver : public TDefaultIoResolverBase
+{
+ TDefaultHlslIoResolver(const TIntermediate &intermediate) : TDefaultIoResolverBase(intermediate) { }
+
+ bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& /*type*/, bool /*is_live*/) override
+ {
+ return true;
+ }
+
+ int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override
+ {
+ const int set = getLayoutSet(type);
+
+ if (type.getQualifier().hasBinding()) {
+ if (isUavType(type))
+ return reserveSlot(set, getBaseBinding(EResUav, set) + type.getQualifier().layoutBinding);
+
+ if (isSrvType(type))
+ return reserveSlot(set, getBaseBinding(EResTexture, set) + type.getQualifier().layoutBinding);
+
+ if (isSamplerType(type))
+ return reserveSlot(set, getBaseBinding(EResSampler, set) + type.getQualifier().layoutBinding);
+
+ if (isUboType(type))
+ return reserveSlot(set, getBaseBinding(EResUbo, set) + type.getQualifier().layoutBinding);
+ } else if (is_live && doAutoBindingMapping()) {
+ // find free slot, the caller did make sure it passes all vars with binding
+ // first and now all are passed that do not have a binding and needs one
+
+ if (isUavType(type))
+ return getFreeSlot(set, getBaseBinding(EResUav, set));
+
+ if (isSrvType(type))
+ return getFreeSlot(set, getBaseBinding(EResTexture, set));
+
+ if (isSamplerType(type))
+ return getFreeSlot(set, getBaseBinding(EResSampler, set));
+
+ if (isUboType(type))
+ return getFreeSlot(set, getBaseBinding(EResUbo, set));
+ }
+
+ return -1;
+ }
+
+protected:
+ // Return true if this is a SRV (shader resource view) type:
+ static bool isSrvType(const glslang::TType& type) {
+ return isTextureType(type) || type.getQualifier().storage == EvqBuffer;
+ }
+
+ // Return true if this is a UAV (unordered access view) type:
+ static bool isUavType(const glslang::TType& type) {
+ if (type.getQualifier().readonly)
+ return false;
+
+ return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) ||
+ (type.getQualifier().storage == EvqBuffer);
+ }
+};
+
+
+// Map I/O variables to provided offsets, and make bindings for
+// unbound but live variables.
+//
+// Returns false if the input is too malformed to do this.
+bool TIoMapper::addStage(EShLanguage stage, TIntermediate &intermediate, TInfoSink &infoSink, TIoMapResolver *resolver)
+{
+ bool somethingToDo = !intermediate.getResourceSetBinding().empty() ||
+ intermediate.getAutoMapBindings() ||
+ intermediate.getAutoMapLocations();
+
+ for (int res = 0; res < EResCount; ++res) {
+ somethingToDo = somethingToDo ||
+ (intermediate.getShiftBinding(TResourceType(res)) != 0) ||
+ intermediate.hasShiftBindingForSet(TResourceType(res));
+ }
+
+ if (!somethingToDo && resolver == nullptr)
+ return true;
+
+ if (intermediate.getNumEntryPoints() != 1 || intermediate.isRecursive())
+ return false;
+
+ TIntermNode* root = intermediate.getTreeRoot();
+ if (root == nullptr)
+ return false;
+
+ // if no resolver is provided, use the default resolver with the given shifts and auto map settings
+ TDefaultIoResolver defaultResolver(intermediate);
+ TDefaultHlslIoResolver defaultHlslResolver(intermediate);
+
+ if (resolver == nullptr) {
+ // TODO: use a passed in IO mapper for this
+ if (intermediate.usingHlslIoMapping())
+ resolver = &defaultHlslResolver;
+ else
+ resolver = &defaultResolver;
+ }
+
+ TVarLiveMap inVarMap, outVarMap, uniformVarMap;
+ TVarGatherTraverser iter_binding_all(intermediate, true, inVarMap, outVarMap, uniformVarMap);
+ TVarGatherTraverser iter_binding_live(intermediate, false, inVarMap, outVarMap, uniformVarMap);
+
+ root->traverse(&iter_binding_all);
+ iter_binding_live.pushFunction(intermediate.getEntryPointMangledName().c_str());
+
+ while (!iter_binding_live.functions.empty()) {
+ TIntermNode* function = iter_binding_live.functions.back();
+ iter_binding_live.functions.pop_back();
+ function->traverse(&iter_binding_live);
+ }
+
+ // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info.
+ std::sort(uniformVarMap.begin(), uniformVarMap.end(), TVarEntryInfo::TOrderByPriority());
+
+ bool hadError = false;
+ TNotifyInOutAdaptor inOutNotify(stage, *resolver);
+ TNotifyUniformAdaptor uniformNotify(stage, *resolver);
+ TResolverUniformAdaptor uniformResolve(stage, *resolver, infoSink, hadError, intermediate);
+ TResolverInOutAdaptor inOutResolve(stage, *resolver, infoSink, hadError, intermediate);
+ resolver->beginNotifications(stage);
+ std::for_each(inVarMap.begin(), inVarMap.end(), inOutNotify);
+ std::for_each(outVarMap.begin(), outVarMap.end(), inOutNotify);
+ std::for_each(uniformVarMap.begin(), uniformVarMap.end(), uniformNotify);
+ resolver->endNotifications(stage);
+ resolver->beginResolve(stage);
+ std::for_each(inVarMap.begin(), inVarMap.end(), inOutResolve);
+ std::for_each(outVarMap.begin(), outVarMap.end(), inOutResolve);
+ std::for_each(uniformVarMap.begin(), uniformVarMap.end(), uniformResolve);
+ resolver->endResolve(stage);
+
+ if (!hadError) {
+ // sort by id again, so we can use lower bound to find entries
+ std::sort(uniformVarMap.begin(), uniformVarMap.end(), TVarEntryInfo::TOrderById());
+ TVarSetTraverser iter_iomap(intermediate, inVarMap, outVarMap, uniformVarMap);
+ root->traverse(&iter_iomap);
+ }
+
+ return !hadError;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/iomapper.h b/thirdparty/glslang/glslang/MachineIndependent/iomapper.h
new file mode 100644
index 0000000000..5e0d4391cc
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/iomapper.h
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _IOMAPPER_INCLUDED
+#define _IOMAPPER_INCLUDED
+
+#include "../Public/ShaderLang.h"
+
+//
+// A reflection database and its interface, consistent with the OpenGL API reflection queries.
+//
+
+class TInfoSink;
+
+namespace glslang {
+
+class TIntermediate;
+
+// I/O mapper
+class TIoMapper {
+public:
+ TIoMapper() {}
+ virtual ~TIoMapper() {}
+
+ // grow the reflection stage by stage
+ bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
+};
+
+} // end namespace glslang
+
+#endif // _IOMAPPER_INCLUDED
diff --git a/thirdparty/glslang/glslang/MachineIndependent/limits.cpp b/thirdparty/glslang/glslang/MachineIndependent/limits.cpp
new file mode 100644
index 0000000000..64d191b472
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/limits.cpp
@@ -0,0 +1,198 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Do sub tree walks for
+// 1) inductive loop bodies to see if the inductive variable is modified
+// 2) array-index expressions to see if they are "constant-index-expression"
+//
+// These are per Appendix A of ES 2.0:
+//
+// "Within the body of the loop, the loop index is not statically assigned to nor is it used as the
+// argument to a function out or inout parameter."
+//
+// "The following are constant-index-expressions:
+// - Constant expressions
+// - Loop indices as defined in section 4
+// - Expressions composed of both of the above"
+//
+// N.B.: assuming the last rule excludes function calls
+//
+
+#include "ParseHelper.h"
+
+namespace glslang {
+
+//
+// The inductive loop-body traverser.
+//
+// Just look at things that might modify the loop index.
+//
+
+class TInductiveTraverser : public TIntermTraverser {
+public:
+ TInductiveTraverser(int id, TSymbolTable& st)
+ : loopId(id), symbolTable(st), bad(false) { }
+
+ virtual bool visitBinary(TVisit, TIntermBinary* node);
+ virtual bool visitUnary(TVisit, TIntermUnary* node);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+
+ int loopId; // unique ID of the symbol that's the loop inductive variable
+ TSymbolTable& symbolTable;
+ bool bad;
+ TSourceLoc badLoc;
+
+protected:
+ TInductiveTraverser(TInductiveTraverser&);
+ TInductiveTraverser& operator=(TInductiveTraverser&);
+};
+
+// check binary operations for those modifying the loop index
+bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
+{
+ if (node->modifiesState() && node->getLeft()->getAsSymbolNode() &&
+ node->getLeft()->getAsSymbolNode()->getId() == loopId) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+
+ return true;
+}
+
+// check unary operations for those modifying the loop index
+bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
+{
+ if (node->modifiesState() && node->getOperand()->getAsSymbolNode() &&
+ node->getOperand()->getAsSymbolNode()->getId() == loopId) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+
+ return true;
+}
+
+// check function calls for arguments modifying the loop index
+bool TInductiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ if (node->getOp() == EOpFunctionCall) {
+ // see if an out or inout argument is the loop index
+ const TIntermSequence& args = node->getSequence();
+ for (int i = 0; i < (int)args.size(); ++i) {
+ if (args[i]->getAsSymbolNode() && args[i]->getAsSymbolNode()->getId() == loopId) {
+ TSymbol* function = symbolTable.find(node->getName());
+ const TType* type = (*function->getAsFunction())[i].type;
+ if (type->getQualifier().storage == EvqOut ||
+ type->getQualifier().storage == EvqInOut) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+//
+// External function to call for loop check.
+//
+void TParseContext::inductiveLoopBodyCheck(TIntermNode* body, int loopId, TSymbolTable& symbolTable)
+{
+ TInductiveTraverser it(loopId, symbolTable);
+
+ if (body == nullptr)
+ return;
+
+ body->traverse(&it);
+
+ if (it.bad)
+ error(it.badLoc, "inductive loop index modified", "limitations", "");
+}
+
+//
+// The "constant-index-expression" tranverser.
+//
+// Just look at things that can form an index.
+//
+
+class TIndexTraverser : public TIntermTraverser {
+public:
+ TIndexTraverser(const TIdSetType& ids) : inductiveLoopIds(ids), bad(false) { }
+ virtual void visitSymbol(TIntermSymbol* symbol);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+ const TIdSetType& inductiveLoopIds;
+ bool bad;
+ TSourceLoc badLoc;
+
+protected:
+ TIndexTraverser(TIndexTraverser&);
+ TIndexTraverser& operator=(TIndexTraverser&);
+};
+
+// make sure symbols are inductive-loop indexes
+void TIndexTraverser::visitSymbol(TIntermSymbol* symbol)
+{
+ if (inductiveLoopIds.find(symbol->getId()) == inductiveLoopIds.end()) {
+ bad = true;
+ badLoc = symbol->getLoc();
+ }
+}
+
+// check for function calls, assuming they are bad; spec. doesn't really say
+bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ if (node->getOp() == EOpFunctionCall) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+
+ return true;
+}
+
+//
+// External function to call for loop check.
+//
+void TParseContext::constantIndexExpressionCheck(TIntermNode* index)
+{
+ TIndexTraverser it(inductiveLoopIds);
+
+ index->traverse(&it);
+
+ if (it.bad)
+ error(it.badLoc, "Non-constant-index-expression", "limitations", "");
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp b/thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp
new file mode 100644
index 0000000000..f935d4a629
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/linkValidate.cpp
@@ -0,0 +1,1756 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Do link-time merging and validation of intermediate representations.
+//
+// Basic model is that during compilation, each compilation unit (shader) is
+// compiled into one TIntermediate instance. Then, at link time, multiple
+// units for the same stage can be merged together, which can generate errors.
+// Then, after all merging, a single instance of TIntermediate represents
+// the whole stage. A final error check can be done on the resulting stage,
+// even if no merging was done (i.e., the stage was only one compilation unit).
+//
+
+#include "localintermediate.h"
+#include "../Include/InfoSink.h"
+
+namespace glslang {
+
+//
+// Link-time error emitter.
+//
+void TIntermediate::error(TInfoSink& infoSink, const char* message)
+{
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
+
+ ++numErrors;
+}
+
+// Link-time warning.
+void TIntermediate::warn(TInfoSink& infoSink, const char* message)
+{
+ infoSink.info.prefix(EPrefixWarning);
+ infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
+}
+
+// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
+// name must have the exact same set of members qualified with offset and their integral-constant
+// expression values must be the same, or a link-time error results."
+
+//
+// Merge the information from 'unit' into 'this'
+//
+void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
+{
+ mergeCallGraphs(infoSink, unit);
+ mergeModes(infoSink, unit);
+ mergeTrees(infoSink, unit);
+}
+
+void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
+{
+ if (unit.getNumEntryPoints() > 0) {
+ if (getNumEntryPoints() > 0)
+ error(infoSink, "can't handle multiple entry points per stage");
+ else {
+ entryPointName = unit.getEntryPointName();
+ entryPointMangledName = unit.getEntryPointMangledName();
+ }
+ }
+ numEntryPoints += unit.getNumEntryPoints();
+
+ callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
+}
+
+#define MERGE_MAX(member) member = std::max(member, unit.member)
+#define MERGE_TRUE(member) if (unit.member) member = unit.member;
+
+void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
+{
+ if (language != unit.language)
+ error(infoSink, "stages must match when linking into a single stage");
+
+ if (source == EShSourceNone)
+ source = unit.source;
+ if (source != unit.source)
+ error(infoSink, "can't link compilation units from different source languages");
+
+ if (treeRoot == nullptr) {
+ profile = unit.profile;
+ version = unit.version;
+ requestedExtensions = unit.requestedExtensions;
+ } else {
+ if ((profile == EEsProfile) != (unit.profile == EEsProfile))
+ error(infoSink, "Cannot cross link ES and desktop profiles");
+ else if (unit.profile == ECompatibilityProfile)
+ profile = ECompatibilityProfile;
+ version = std::max(version, unit.version);
+ requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
+ }
+
+ MERGE_MAX(spvVersion.spv);
+ MERGE_MAX(spvVersion.vulkanGlsl);
+ MERGE_MAX(spvVersion.vulkan);
+ MERGE_MAX(spvVersion.openGl);
+
+ numErrors += unit.getNumErrors();
+ numPushConstants += unit.numPushConstants;
+
+ if (unit.invocations != TQualifier::layoutNotSet) {
+ if (invocations == TQualifier::layoutNotSet)
+ invocations = unit.invocations;
+ else if (invocations != unit.invocations)
+ error(infoSink, "number of invocations must match between compilation units");
+ }
+
+ if (vertices == TQualifier::layoutNotSet)
+ vertices = unit.vertices;
+ else if (vertices != unit.vertices) {
+ if (language == EShLangGeometry
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ )
+ error(infoSink, "Contradictory layout max_vertices values");
+ else if (language == EShLangTessControl)
+ error(infoSink, "Contradictory layout vertices values");
+ else
+ assert(0);
+ }
+#ifdef NV_EXTENSIONS
+ if (primitives == TQualifier::layoutNotSet)
+ primitives = unit.primitives;
+ else if (primitives != unit.primitives) {
+ if (language == EShLangMeshNV)
+ error(infoSink, "Contradictory layout max_primitives values");
+ else
+ assert(0);
+ }
+#endif
+
+ if (inputPrimitive == ElgNone)
+ inputPrimitive = unit.inputPrimitive;
+ else if (inputPrimitive != unit.inputPrimitive)
+ error(infoSink, "Contradictory input layout primitives");
+
+ if (outputPrimitive == ElgNone)
+ outputPrimitive = unit.outputPrimitive;
+ else if (outputPrimitive != unit.outputPrimitive)
+ error(infoSink, "Contradictory output layout primitives");
+
+ if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
+ error(infoSink, "gl_FragCoord redeclarations must match across shaders");
+
+ if (vertexSpacing == EvsNone)
+ vertexSpacing = unit.vertexSpacing;
+ else if (vertexSpacing != unit.vertexSpacing)
+ error(infoSink, "Contradictory input vertex spacing");
+
+ if (vertexOrder == EvoNone)
+ vertexOrder = unit.vertexOrder;
+ else if (vertexOrder != unit.vertexOrder)
+ error(infoSink, "Contradictory triangle ordering");
+
+ MERGE_TRUE(pointMode);
+
+ for (int i = 0; i < 3; ++i) {
+ if (localSize[i] > 1)
+ localSize[i] = unit.localSize[i];
+ else if (localSize[i] != unit.localSize[i])
+ error(infoSink, "Contradictory local size");
+
+ if (localSizeSpecId[i] != TQualifier::layoutNotSet)
+ localSizeSpecId[i] = unit.localSizeSpecId[i];
+ else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
+ error(infoSink, "Contradictory local size specialization ids");
+ }
+
+ MERGE_TRUE(earlyFragmentTests);
+ MERGE_TRUE(postDepthCoverage);
+
+ if (depthLayout == EldNone)
+ depthLayout = unit.depthLayout;
+ else if (depthLayout != unit.depthLayout)
+ error(infoSink, "Contradictory depth layouts");
+
+ MERGE_TRUE(depthReplacing);
+ MERGE_TRUE(hlslFunctionality1);
+
+ blendEquations |= unit.blendEquations;
+
+ MERGE_TRUE(xfbMode);
+
+ for (size_t b = 0; b < xfbBuffers.size(); ++b) {
+ if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
+ xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
+ else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
+ error(infoSink, "Contradictory xfb_stride");
+ xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
+ if (unit.xfbBuffers[b].contains64BitType)
+ xfbBuffers[b].contains64BitType = true;
+#ifdef AMD_EXTENSIONS
+ if (unit.xfbBuffers[b].contains32BitType)
+ xfbBuffers[b].contains32BitType = true;
+ if (unit.xfbBuffers[b].contains16BitType)
+ xfbBuffers[b].contains16BitType = true;
+#endif
+ // TODO: 4.4 link: enhanced layouts: compare ranges
+ }
+
+ MERGE_TRUE(multiStream);
+
+#ifdef NV_EXTENSIONS
+ MERGE_TRUE(layoutOverrideCoverage);
+ MERGE_TRUE(geoPassthroughEXT);
+#endif
+
+ for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
+ if (unit.shiftBinding[i] > 0)
+ setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
+ }
+
+ for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
+ for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
+ setShiftBindingForSet((TResourceType)i, it->second, it->first);
+ }
+
+ resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
+
+ MERGE_TRUE(autoMapBindings);
+ MERGE_TRUE(autoMapLocations);
+ MERGE_TRUE(invertY);
+ MERGE_TRUE(flattenUniformArrays);
+ MERGE_TRUE(useUnknownFormat);
+ MERGE_TRUE(hlslOffsets);
+ MERGE_TRUE(useStorageBuffer);
+ MERGE_TRUE(hlslIoMapping);
+
+ // TODO: sourceFile
+ // TODO: sourceText
+ // TODO: processes
+
+ MERGE_TRUE(needToLegalize);
+ MERGE_TRUE(binaryDoubleOutput);
+ MERGE_TRUE(usePhysicalStorageBuffer);
+}
+
+//
+// Merge the 'unit' AST into 'this' AST.
+// That includes rationalizing the unique IDs, which were set up independently,
+// and might have overlaps that are not the same symbol, or might have different
+// IDs for what should be the same shared symbol.
+//
+void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
+{
+ if (unit.treeRoot == nullptr)
+ return;
+
+ if (treeRoot == nullptr) {
+ treeRoot = unit.treeRoot;
+ return;
+ }
+
+ // Getting this far means we have two existing trees to merge...
+#ifdef NV_EXTENSIONS
+ numShaderRecordNVBlocks += unit.numShaderRecordNVBlocks;
+#endif
+
+#ifdef NV_EXTENSIONS
+ numTaskNVBlocks += unit.numTaskNVBlocks;
+#endif
+
+ // Get the top-level globals of each unit
+ TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
+ TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
+
+ // Get the linker-object lists
+ TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
+ const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
+
+ // Map by global name to unique ID to rationalize the same object having
+ // differing IDs in different trees.
+ TMap<TString, int> idMap;
+ int maxId;
+ seedIdMap(idMap, maxId);
+ remapIds(idMap, maxId + 1, unit);
+
+ mergeBodies(infoSink, globals, unitGlobals);
+ mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects);
+ ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
+}
+
+// Traverser that seeds an ID map with all built-ins, and tracks the
+// maximum ID used.
+// (It would be nice to put this in a function, but that causes warnings
+// on having no bodies for the copy-constructor/operator=.)
+class TBuiltInIdTraverser : public TIntermTraverser {
+public:
+ TBuiltInIdTraverser(TMap<TString, int>& idMap) : idMap(idMap), maxId(0) { }
+ // If it's a built in, add it to the map.
+ // Track the max ID.
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ const TQualifier& qualifier = symbol->getType().getQualifier();
+ if (qualifier.builtIn != EbvNone)
+ idMap[symbol->getName()] = symbol->getId();
+ maxId = std::max(maxId, symbol->getId());
+ }
+ int getMaxId() const { return maxId; }
+protected:
+ TBuiltInIdTraverser(TBuiltInIdTraverser&);
+ TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
+ TMap<TString, int>& idMap;
+ int maxId;
+};
+
+// Traverser that seeds an ID map with non-builtins.
+// (It would be nice to put this in a function, but that causes warnings
+// on having no bodies for the copy-constructor/operator=.)
+class TUserIdTraverser : public TIntermTraverser {
+public:
+ TUserIdTraverser(TMap<TString, int>& idMap) : idMap(idMap) { }
+ // If its a non-built-in global, add it to the map.
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ const TQualifier& qualifier = symbol->getType().getQualifier();
+ if (qualifier.builtIn == EbvNone)
+ idMap[symbol->getName()] = symbol->getId();
+ }
+
+protected:
+ TUserIdTraverser(TUserIdTraverser&);
+ TUserIdTraverser& operator=(TUserIdTraverser&);
+ TMap<TString, int>& idMap; // over biggest id
+};
+
+// Initialize the the ID map with what we know of 'this' AST.
+void TIntermediate::seedIdMap(TMap<TString, int>& idMap, int& maxId)
+{
+ // all built-ins everywhere need to align on IDs and contribute to the max ID
+ TBuiltInIdTraverser builtInIdTraverser(idMap);
+ treeRoot->traverse(&builtInIdTraverser);
+ maxId = builtInIdTraverser.getMaxId();
+
+ // user variables in the linker object list need to align on ids
+ TUserIdTraverser userIdTraverser(idMap);
+ findLinkerObjects()->traverse(&userIdTraverser);
+}
+
+// Traverser to map an AST ID to what was known from the seeding AST.
+// (It would be nice to put this in a function, but that causes warnings
+// on having no bodies for the copy-constructor/operator=.)
+class TRemapIdTraverser : public TIntermTraverser {
+public:
+ TRemapIdTraverser(const TMap<TString, int>& idMap, int idShift) : idMap(idMap), idShift(idShift) { }
+ // Do the mapping:
+ // - if the same symbol, adopt the 'this' ID
+ // - otherwise, ensure a unique ID by shifting to a new space
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ const TQualifier& qualifier = symbol->getType().getQualifier();
+ bool remapped = false;
+ if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
+ auto it = idMap.find(symbol->getName());
+ if (it != idMap.end()) {
+ symbol->changeId(it->second);
+ remapped = true;
+ }
+ }
+ if (!remapped)
+ symbol->changeId(symbol->getId() + idShift);
+ }
+protected:
+ TRemapIdTraverser(TRemapIdTraverser&);
+ TRemapIdTraverser& operator=(TRemapIdTraverser&);
+ const TMap<TString, int>& idMap;
+ int idShift;
+};
+
+void TIntermediate::remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate& unit)
+{
+ // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
+ TRemapIdTraverser idTraverser(idMap, idShift);
+ unit.getTreeRoot()->traverse(&idTraverser);
+}
+
+//
+// Merge the function bodies and global-level initializers from unitGlobals into globals.
+// Will error check duplication of function bodies for the same signature.
+//
+void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
+{
+ // TODO: link-time performance: Processing in alphabetical order will be faster
+
+ // Error check the global objects, not including the linker objects
+ for (unsigned int child = 0; child < globals.size() - 1; ++child) {
+ for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
+ TIntermAggregate* body = globals[child]->getAsAggregate();
+ TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
+ if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
+ error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
+ infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n";
+ }
+ }
+ }
+
+ // Merge the global objects, just in front of the linker objects
+ globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
+}
+
+//
+// Merge the linker objects from unitLinkerObjects into linkerObjects.
+// Duplication is expected and filtered out, but contradictions are an error.
+//
+void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects)
+{
+ // Error check and merge the linker objects (duplicates should not be created)
+ std::size_t initialNumLinkerObjects = linkerObjects.size();
+ for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
+ bool merge = true;
+ for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
+ TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
+ TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
+ assert(symbol && unitSymbol);
+ if (symbol->getName() == unitSymbol->getName()) {
+ // filter out copy
+ merge = false;
+
+ // but if one has an initializer and the other does not, update
+ // the initializer
+ if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
+ symbol->setConstArray(unitSymbol->getConstArray());
+
+ // Similarly for binding
+ if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
+ symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
+
+ // Update implicit array sizes
+ mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
+
+ // Check for consistent types/qualification/initializers etc.
+ mergeErrorCheck(infoSink, *symbol, *unitSymbol, false);
+ }
+ }
+ if (merge)
+ linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
+ }
+}
+
+// TODO 4.5 link functionality: cull distance array size checking
+
+// Recursively merge the implicit array sizes through the objects' respective type trees.
+void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
+{
+ if (type.isUnsizedArray()) {
+ if (unitType.isUnsizedArray()) {
+ type.updateImplicitArraySize(unitType.getImplicitArraySize());
+ if (unitType.isArrayVariablyIndexed())
+ type.setArrayVariablyIndexed();
+ } else if (unitType.isSizedArray())
+ type.changeOuterArraySize(unitType.getOuterArraySize());
+ }
+
+ // Type mismatches are caught and reported after this, just be careful for now.
+ if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
+ return;
+
+ for (int i = 0; i < (int)type.getStruct()->size(); ++i)
+ mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
+}
+
+//
+// Compare two global objects from two compilation units and see if they match
+// well enough. Rules can be different for intra- vs. cross-stage matching.
+//
+// This function only does one of intra- or cross-stage matching per call.
+//
+void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage)
+{
+ bool writeTypeComparison = false;
+
+ // Types have to match
+ if (symbol.getType() != unitSymbol.getType()) {
+ // but, we make an exception if one is an implicit array and the other is sized
+ if (! (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
+ symbol.getType().sameElementType(unitSymbol.getType()) &&
+ (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()))) {
+ error(infoSink, "Types must match:");
+ writeTypeComparison = true;
+ }
+ }
+
+ // Qualifiers have to (almost) match
+
+ // Storage...
+ if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
+ error(infoSink, "Storage qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Precision...
+ if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
+ error(infoSink, "Precision qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Invariance...
+ if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
+ error(infoSink, "Presence of invariant qualifier must match:");
+ writeTypeComparison = true;
+ }
+
+ // Precise...
+ if (! crossStage && symbol.getQualifier().noContraction != unitSymbol.getQualifier().noContraction) {
+ error(infoSink, "Presence of precise qualifier must match:");
+ writeTypeComparison = true;
+ }
+
+ // Auxiliary and interpolation...
+ if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid ||
+ symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth ||
+ symbol.getQualifier().flat != unitSymbol.getQualifier().flat ||
+ symbol.getQualifier().sample != unitSymbol.getQualifier().sample ||
+ symbol.getQualifier().patch != unitSymbol.getQualifier().patch ||
+ symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) {
+ error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Memory...
+ if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
+ symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent ||
+ symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent ||
+ symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
+ symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent ||
+ symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate ||
+ symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
+ symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
+ symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
+ symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
+ error(infoSink, "Memory qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Layouts...
+ // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
+ // requires separate user-supplied offset from actual computed offset, but
+ // current implementation only has one offset.
+ if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix ||
+ symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking ||
+ symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation ||
+ symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent ||
+ symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex ||
+ symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding ||
+ (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) {
+ error(infoSink, "Layout qualification must match:");
+ writeTypeComparison = true;
+ }
+
+ // Initializers have to match, if both are present, and if we don't already know the types don't match
+ if (! writeTypeComparison) {
+ if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
+ if (symbol.getConstArray() != unitSymbol.getConstArray()) {
+ error(infoSink, "Initializers must match:");
+ infoSink.info << " " << symbol.getName() << "\n";
+ }
+ }
+ }
+
+ if (writeTypeComparison)
+ infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" <<
+ unitSymbol.getType().getCompleteString() << "\"\n";
+}
+
+//
+// Do final link-time error checking of a complete (merged) intermediate representation.
+// (Much error checking was done during merging).
+//
+// Also, lock in defaults of things not set, including array sizes.
+//
+void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
+{
+ if (getTreeRoot() == nullptr)
+ return;
+
+ if (numEntryPoints < 1) {
+ if (source == EShSourceGlsl)
+ error(infoSink, "Missing entry point: Each stage requires one entry point");
+ else
+ warn(infoSink, "Entry point not found");
+ }
+
+ if (numPushConstants > 1)
+ error(infoSink, "Only one push_constant block is allowed per stage");
+
+ // recursion and missing body checking
+ checkCallGraphCycles(infoSink);
+ checkCallGraphBodies(infoSink, keepUncalled);
+
+ // overlap/alias/missing I/O, etc.
+ inOutLocationCheck(infoSink);
+
+ // invocations
+ if (invocations == TQualifier::layoutNotSet)
+ invocations = 1;
+
+ if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
+ error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
+ if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
+ error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
+
+ if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
+ error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
+ if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
+ error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
+
+ for (size_t b = 0; b < xfbBuffers.size(); ++b) {
+ if (xfbBuffers[b].contains64BitType)
+ RoundToPow2(xfbBuffers[b].implicitStride, 8);
+#ifdef AMD_EXTENSIONS
+ else if (xfbBuffers[b].contains32BitType)
+ RoundToPow2(xfbBuffers[b].implicitStride, 4);
+ else if (xfbBuffers[b].contains16BitType)
+ RoundToPow2(xfbBuffers[b].implicitStride, 2);
+#endif
+
+ // "It is a compile-time or link-time error to have
+ // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
+ // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
+ // compile-time or link-time error to have different values specified for the stride for the same buffer."
+ if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
+ error(infoSink, "xfb_stride is too small to hold all buffer entries:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
+ }
+ if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
+ xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
+
+ // "If the buffer is capturing any
+ // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
+ // multiple of 4, or a compile-time or link-time error results."
+ if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
+ error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
+#ifdef AMD_EXTENSIONS
+ } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
+#else
+ } else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
+#endif
+ error(infoSink, "xfb_stride must be multiple of 4:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
+ }
+#ifdef AMD_EXTENSIONS
+ // "If the buffer is capturing any
+ // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
+ else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
+ error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
+ }
+
+#endif
+ // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
+ // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
+ if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
+ error(infoSink, "xfb_stride is too large:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n";
+ }
+ }
+
+ switch (language) {
+ case EShLangVertex:
+ break;
+ case EShLangTessControl:
+ if (vertices == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify an output layout(vertices=...)");
+ break;
+ case EShLangTessEvaluation:
+ if (source == EShSourceGlsl) {
+ if (inputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an input layout primitive");
+ if (vertexSpacing == EvsNone)
+ vertexSpacing = EvsEqual;
+ if (vertexOrder == EvoNone)
+ vertexOrder = EvoCcw;
+ }
+ break;
+ case EShLangGeometry:
+ if (inputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an input layout primitive");
+ if (outputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an output layout primitive");
+ if (vertices == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
+ break;
+ case EShLangFragment:
+ // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
+ // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
+ // requiring explicit early_fragment_tests
+ if (getPostDepthCoverage() && !getEarlyFragmentTests())
+ error(infoSink, "post_depth_coverage requires early_fragment_tests");
+ break;
+ case EShLangCompute:
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ if (numShaderRecordNVBlocks > 1)
+ error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
+ break;
+ case EShLangMeshNV:
+ // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
+ if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
+ error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
+ if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
+ error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
+ if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
+ error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
+ if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
+ error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
+ if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
+ error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
+ if (outputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an output layout primitive");
+ if (vertices == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
+ if (primitives == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
+ // fall through
+ case EShLangTaskNV:
+ if (numTaskNVBlocks > 1)
+ error(infoSink, "Only one taskNV interface block is allowed per shader");
+ break;
+#endif
+
+ default:
+ error(infoSink, "Unknown Stage.");
+ break;
+ }
+
+ // Process the tree for any node-specific work.
+ class TFinalLinkTraverser : public TIntermTraverser {
+ public:
+ TFinalLinkTraverser() { }
+ virtual ~TFinalLinkTraverser() { }
+
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ // Implicitly size arrays.
+ // If an unsized array is left as unsized, it effectively
+ // becomes run-time sized.
+ symbol->getWritableType().adoptImplicitArraySizes(false);
+ }
+ } finalLinkTraverser;
+
+ treeRoot->traverse(&finalLinkTraverser);
+}
+
+//
+// See if the call graph contains any static recursion, which is disallowed
+// by the specification.
+//
+void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
+{
+ // Clear fields we'll use for this.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ call->visited = false;
+ call->currentPath = false;
+ call->errorGiven = false;
+ }
+
+ //
+ // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration.
+ //
+
+ TCall* newRoot;
+ do {
+ // See if we have unvisited parts of the graph.
+ newRoot = 0;
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (! call->visited) {
+ newRoot = &(*call);
+ break;
+ }
+ }
+
+ // If not, we are done.
+ if (! newRoot)
+ break;
+
+ // Otherwise, we found a new subgraph, process it:
+ // See what all can be reached by this new root, and if any of
+ // that is recursive. This is done by depth-first traversals, seeing
+ // if a new call is found that was already in the currentPath (a back edge),
+ // thereby detecting recursion.
+ std::list<TCall*> stack;
+ newRoot->currentPath = true; // currentPath will be true iff it is on the stack
+ stack.push_back(newRoot);
+ while (! stack.empty()) {
+ // get a caller
+ TCall* call = stack.back();
+
+ // Add to the stack just one callee.
+ // This algorithm always terminates, because only !visited and !currentPath causes a push
+ // and all pushes change currentPath to true, and all pops change visited to true.
+ TGraph::iterator child = callGraph.begin();
+ for (; child != callGraph.end(); ++child) {
+
+ // If we already visited this node, its whole subgraph has already been processed, so skip it.
+ if (child->visited)
+ continue;
+
+ if (call->callee == child->caller) {
+ if (child->currentPath) {
+ // Then, we found a back edge
+ if (! child->errorGiven) {
+ error(infoSink, "Recursion detected:");
+ infoSink.info << " " << call->callee << " calling " << child->callee << "\n";
+ child->errorGiven = true;
+ recursive = true;
+ }
+ } else {
+ child->currentPath = true;
+ stack.push_back(&(*child));
+ break;
+ }
+ }
+ }
+ if (child == callGraph.end()) {
+ // no more callees, we bottomed out, never look at this node again
+ stack.back()->currentPath = false;
+ stack.back()->visited = true;
+ stack.pop_back();
+ }
+ } // end while, meaning nothing left to process in this subtree
+
+ } while (newRoot); // redundant loop check; should always exit via the 'break' above
+}
+
+//
+// See which functions are reachable from the entry point and which have bodies.
+// Reachable ones with missing bodies are errors.
+// Unreachable bodies are dead code.
+//
+void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
+{
+ // Clear fields we'll use for this.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ call->visited = false;
+ call->calleeBodyPosition = -1;
+ }
+
+ // The top level of the AST includes function definitions (bodies).
+ // Compare these to function calls in the call graph.
+ // We'll end up knowing which have bodies, and if so,
+ // how to map the call-graph node to the location in the AST.
+ TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
+ std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
+ for (int f = 0; f < (int)functionSequence.size(); ++f) {
+ glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
+ if (node && (node->getOp() == glslang::EOpFunction)) {
+ if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
+ reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->callee == node->getName())
+ call->calleeBodyPosition = f;
+ }
+ }
+ }
+
+ // Start call-graph traversal by visiting the entry point nodes.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
+ call->visited = true;
+ }
+
+ // Propagate 'visited' through the call-graph to every part of the graph it
+ // can reach (seeded with the entry-point setting above).
+ bool changed;
+ do {
+ changed = false;
+ for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
+ if (call1->visited) {
+ for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
+ if (! call2->visited) {
+ if (call1->callee == call2->caller) {
+ changed = true;
+ call2->visited = true;
+ }
+ }
+ }
+ }
+ }
+ } while (changed);
+
+ // Any call-graph node set to visited but without a callee body is an error.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->visited) {
+ if (call->calleeBodyPosition == -1) {
+ error(infoSink, "No function definition (body) found: ");
+ infoSink.info << " " << call->callee << "\n";
+ } else
+ reachable[call->calleeBodyPosition] = true;
+ }
+ }
+
+ // Bodies in the AST not reached by the call graph are dead;
+ // clear them out, since they can't be reached and also can't
+ // be translated further due to possibility of being ill defined.
+ if (! keepUncalled) {
+ for (int f = 0; f < (int)functionSequence.size(); ++f) {
+ if (! reachable[f])
+ functionSequence[f] = nullptr;
+ }
+ functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
+ }
+}
+
+//
+// Satisfy rules for location qualifiers on inputs and outputs
+//
+void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
+{
+ // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
+ bool fragOutWithNoLocation = false;
+ int numFragOut = 0;
+
+ // TODO: linker functionality: location collision checking
+
+ TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
+ for (size_t i = 0; i < linkObjects.size(); ++i) {
+ const TType& type = linkObjects[i]->getAsTyped()->getType();
+ const TQualifier& qualifier = type.getQualifier();
+ if (language == EShLangFragment) {
+ if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
+ ++numFragOut;
+ if (!qualifier.hasAnyLocation())
+ fragOutWithNoLocation = true;
+ }
+ }
+ }
+
+ if (profile == EEsProfile) {
+ if (numFragOut > 1 && fragOutWithNoLocation)
+ error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
+ }
+}
+
+TIntermAggregate* TIntermediate::findLinkerObjects() const
+{
+ // Get the top-level globals
+ TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
+
+ // Get the last member of the sequences, expected to be the linker-object lists
+ assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
+
+ return globals.back()->getAsAggregate();
+}
+
+// See if a variable was both a user-declared output and used.
+// Note: the spec discusses writing to one, but this looks at read or write, which
+// is more useful, and perhaps the spec should be changed to reflect that.
+bool TIntermediate::userOutputUsed() const
+{
+ const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
+
+ bool found = false;
+ for (size_t i = 0; i < linkerObjects.size(); ++i) {
+ const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
+ if (symbolNode.getQualifier().storage == EvqVaryingOut &&
+ symbolNode.getName().compare(0, 3, "gl_") != 0 &&
+ inIoAccessed(symbolNode.getName())) {
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
+// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions
+// as the accumulation is done.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+// typeCollision is set to true if there is no direct collision, but the types in the same location
+// are different.
+//
+int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
+{
+ typeCollision = false;
+
+ int set;
+ if (qualifier.isPipeInput())
+ set = 0;
+ else if (qualifier.isPipeOutput())
+ set = 1;
+ else if (qualifier.storage == EvqUniform)
+ set = 2;
+ else if (qualifier.storage == EvqBuffer)
+ set = 3;
+ else
+ return -1;
+
+ int size;
+ if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
+ if (type.isSizedArray())
+ size = type.getCumulativeArraySize();
+ else
+ size = 1;
+ } else {
+ // Strip off the outer array dimension for those having an extra one.
+ if (type.isArray() && qualifier.isArrayedIo(language)) {
+ TType elementType(type, 0);
+ size = computeTypeLocationSize(elementType, language);
+ } else
+ size = computeTypeLocationSize(type, language);
+ }
+
+ // Locations, and components within locations.
+ //
+ // Almost always, dealing with components means a single location is involved.
+ // The exception is a dvec3. From the spec:
+ //
+ // "A dvec3 will consume all four components of the first location and components 0 and 1 of
+ // the second location. This leaves components 2 and 3 available for other component-qualified
+ // declarations."
+ //
+ // That means, without ever mentioning a component, a component range
+ // for a different location gets specified, if it's not a vertex shader input. (!)
+ // (A vertex shader input will show using only one location, even for a dvec3/4.)
+ //
+ // So, for the case of dvec3, we need two independent ioRanges.
+
+ int collision = -1; // no collision
+ if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
+ (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
+ // Dealing with dvec3 in/out split across two locations.
+ // Need two io-ranges.
+ // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
+
+ // First range:
+ TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
+ TRange componentRange(0, 3);
+ TIoRange range(locationRange, componentRange, type.getBasicType(), 0);
+
+ // check for collisions
+ collision = checkLocationRange(set, range, type, typeCollision);
+ if (collision < 0) {
+ usedIo[set].push_back(range);
+
+ // Second range:
+ TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
+ TRange componentRange2(0, 1);
+ TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0);
+
+ // check for collisions
+ collision = checkLocationRange(set, range2, type, typeCollision);
+ if (collision < 0)
+ usedIo[set].push_back(range2);
+ }
+ } else {
+ // Not a dvec3 in/out split across two locations, generic path.
+ // Need a single IO-range block.
+
+ TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
+ TRange componentRange(0, 3);
+ if (qualifier.hasComponent() || type.getVectorSize() > 0) {
+ int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
+ if (qualifier.hasComponent())
+ componentRange.start = qualifier.layoutComponent;
+ componentRange.last = componentRange.start + consumedComponents - 1;
+ }
+
+ // combine location and component ranges
+ TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0);
+
+ // check for collisions, except for vertex inputs on desktop targeting OpenGL
+ if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
+ collision = checkLocationRange(set, range, type, typeCollision);
+
+ if (collision < 0)
+ usedIo[set].push_back(range);
+ }
+
+ return collision;
+}
+
+// Compare a new (the passed in) 'range' against the existing set, and see
+// if there are any collisions.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
+{
+ for (size_t r = 0; r < usedIo[set].size(); ++r) {
+ if (range.overlap(usedIo[set][r])) {
+ // there is a collision; pick one
+ return std::max(range.location.start, usedIo[set][r].location.start);
+ } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) {
+ // aliased-type mismatch
+ typeCollision = true;
+ return std::max(range.location.start, usedIo[set][r].location.start);
+ }
+ }
+
+ return -1; // no collision
+}
+
+// Accumulate bindings and offsets, and check for collisions
+// as the accumulation is done.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
+{
+ TRange bindingRange(binding, binding);
+ TRange offsetRange(offset, offset + numOffsets - 1);
+ TOffsetRange range(bindingRange, offsetRange);
+
+ // check for collisions, except for vertex inputs on desktop
+ for (size_t r = 0; r < usedAtomics.size(); ++r) {
+ if (range.overlap(usedAtomics[r])) {
+ // there is a collision; pick one
+ return std::max(offset, usedAtomics[r].offset.start);
+ }
+ }
+
+ usedAtomics.push_back(range);
+
+ return -1; // no collision
+}
+
+// Accumulate used constant_id values.
+//
+// Return false is one was already used.
+bool TIntermediate::addUsedConstantId(int id)
+{
+ if (usedConstantId.find(id) != usedConstantId.end())
+ return false;
+
+ usedConstantId.insert(id);
+
+ return true;
+}
+
+// Recursively figure out how many locations are used up by an input or output type.
+// Return the size of type, as measured by "locations".
+int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
+{
+ // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
+ // consecutive locations..."
+ if (type.isArray()) {
+ // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ // TODO: are there valid cases of having an unsized array with a location? If so, running this code too early.
+ TType elementType(type, 0);
+ if (type.isSizedArray()
+#ifdef NV_EXTENSIONS
+ && !type.getQualifier().isPerView()
+#endif
+ )
+ return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
+ else {
+#ifdef NV_EXTENSIONS
+ // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
+ elementType.getQualifier().perViewNV = false;
+#endif
+ return computeTypeLocationSize(elementType, stage);
+ }
+ }
+
+ // "The locations consumed by block and structure members are determined by applying the rules above
+ // recursively..."
+ if (type.isStruct()) {
+ int size = 0;
+ for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
+ TType memberType(type, member);
+ size += computeTypeLocationSize(memberType, stage);
+ }
+ return size;
+ }
+
+ // ES: "If a shader input is any scalar or vector type, it will consume a single location."
+
+ // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
+ // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
+ // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
+ // consume only a single location, in all stages."
+ if (type.isScalar())
+ return 1;
+ if (type.isVector()) {
+ if (stage == EShLangVertex && type.getQualifier().isPipeInput())
+ return 1;
+ if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
+ return 2;
+ else
+ return 1;
+ }
+
+ // "If the declared input is an n x m single- or double-precision matrix, ...
+ // The number of locations assigned for each matrix will be the same as
+ // for an n-element array of m-component vectors..."
+ if (type.isMatrix()) {
+ TType columnType(type, 0);
+ return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
+ }
+
+ assert(0);
+ return 1;
+}
+
+// Same as computeTypeLocationSize but for uniforms
+int TIntermediate::computeTypeUniformLocationSize(const TType& type)
+{
+ // "Individual elements of a uniform array are assigned
+ // consecutive locations with the first element taking location
+ // location."
+ if (type.isArray()) {
+ // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ TType elementType(type, 0);
+ if (type.isSizedArray()) {
+ return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
+ } else {
+ // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early.
+ return computeTypeUniformLocationSize(elementType);
+ }
+ }
+
+ // "Each subsequent inner-most member or element gets incremental
+ // locations for the entire structure or array."
+ if (type.isStruct()) {
+ int size = 0;
+ for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
+ TType memberType(type, member);
+ size += computeTypeUniformLocationSize(memberType);
+ }
+ return size;
+ }
+
+ return 1;
+}
+
+// Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+int TIntermediate::addXfbBufferOffset(const TType& type)
+{
+ const TQualifier& qualifier = type.getQualifier();
+
+ assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
+ TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
+
+ // compute the range
+#ifdef AMD_EXTENSIONS
+ unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
+#else
+ unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType);
+#endif
+ buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
+ TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
+
+ // check for collisions
+ for (size_t r = 0; r < buffer.ranges.size(); ++r) {
+ if (range.overlap(buffer.ranges[r])) {
+ // there is a collision; pick an example to return
+ return std::max(range.start, buffer.ranges[r].start);
+ }
+ }
+
+ buffer.ranges.push_back(range);
+
+ return -1; // no collision
+}
+
+// Recursively figure out how many bytes of xfb buffer are used by the given type.
+// Return the size of type, in bytes.
+// Sets contains64BitType to true if the type contains a 64-bit data type.
+#ifdef AMD_EXTENSIONS
+// Sets contains32BitType to true if the type contains a 32-bit data type.
+// Sets contains16BitType to true if the type contains a 16-bit data type.
+// N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
+unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
+#else
+// N.B. Caller must set contains64BitType to false before calling.
+unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType) const
+#endif
+{
+ // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
+ // and the space taken in the buffer will be a multiple of 8.
+ // ...within the qualified entity, subsequent components are each
+ // assigned, in order, to the next available offset aligned to a multiple of
+ // that component's size. Aggregate types are flattened down to the component
+ // level to get this sequence of components."
+
+ if (type.isArray()) {
+ // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ assert(type.isSizedArray());
+ TType elementType(type, 0);
+#ifdef AMD_EXTENSIONS
+ return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
+#else
+ return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType);
+#endif
+ }
+
+ if (type.isStruct()) {
+ unsigned int size = 0;
+ bool structContains64BitType = false;
+#ifdef AMD_EXTENSIONS
+ bool structContains32BitType = false;
+ bool structContains16BitType = false;
+#endif
+ for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
+ TType memberType(type, member);
+ // "... if applied to
+ // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
+ // and the space taken in the buffer will be a multiple of 8."
+ bool memberContains64BitType = false;
+#ifdef AMD_EXTENSIONS
+ bool memberContains32BitType = false;
+ bool memberContains16BitType = false;
+ int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
+#else
+ int memberSize = computeTypeXfbSize(memberType, memberContains64BitType);
+#endif
+ if (memberContains64BitType) {
+ structContains64BitType = true;
+ RoundToPow2(size, 8);
+#ifdef AMD_EXTENSIONS
+ } else if (memberContains32BitType) {
+ structContains32BitType = true;
+ RoundToPow2(size, 4);
+ } else if (memberContains16BitType) {
+ structContains16BitType = true;
+ RoundToPow2(size, 2);
+#endif
+ }
+ size += memberSize;
+ }
+
+ if (structContains64BitType) {
+ contains64BitType = true;
+ RoundToPow2(size, 8);
+#ifdef AMD_EXTENSIONS
+ } else if (structContains32BitType) {
+ contains32BitType = true;
+ RoundToPow2(size, 4);
+ } else if (structContains16BitType) {
+ contains16BitType = true;
+ RoundToPow2(size, 2);
+#endif
+ }
+ return size;
+ }
+
+ int numComponents;
+ if (type.isScalar())
+ numComponents = 1;
+ else if (type.isVector())
+ numComponents = type.getVectorSize();
+ else if (type.isMatrix())
+ numComponents = type.getMatrixCols() * type.getMatrixRows();
+ else {
+ assert(0);
+ numComponents = 1;
+ }
+
+ if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
+ contains64BitType = true;
+ return 8 * numComponents;
+#ifdef AMD_EXTENSIONS
+ } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
+ contains16BitType = true;
+ return 2 * numComponents;
+ } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
+ return numComponents;
+ else {
+ contains32BitType = true;
+ return 4 * numComponents;
+ }
+#else
+ } else
+ return 4 * numComponents;
+#endif
+}
+
+const int baseAlignmentVec4Std140 = 16;
+
+// Return the size and alignment of a component of the given type.
+// The size is returned in the 'size' parameter
+// Return value is the alignment..
+int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
+{
+ switch (type.getBasicType()) {
+ case EbtInt64:
+ case EbtUint64:
+ case EbtDouble: size = 8; return 8;
+ case EbtFloat16: size = 2; return 2;
+ case EbtInt8:
+ case EbtUint8: size = 1; return 1;
+ case EbtInt16:
+ case EbtUint16: size = 2; return 2;
+ case EbtReference: size = 8; return 8;
+ default: size = 4; return 4;
+ }
+}
+
+// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
+// Operates recursively.
+//
+// If std140 is true, it does the rounding up to vec4 size required by std140,
+// otherwise it does not, yielding std430 rules.
+//
+// The size is returned in the 'size' parameter
+//
+// The stride is only non-0 for arrays or matrices, and is the stride of the
+// top-level object nested within the type. E.g., for an array of matrices,
+// it is the distances needed between matrices, despite the rules saying the
+// stride comes from the flattening down to vectors.
+//
+// Return value is the alignment of the type.
+int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
+{
+ int alignment;
+
+ bool std140 = layoutPacking == glslang::ElpStd140;
+ // When using the std140 storage layout, structures will be laid out in buffer
+ // storage with its members stored in monotonically increasing order based on their
+ // location in the declaration. A structure and each structure member have a base
+ // offset and a base alignment, from which an aligned offset is computed by rounding
+ // the base offset up to a multiple of the base alignment. The base offset of the first
+ // member of a structure is taken from the aligned offset of the structure itself. The
+ // base offset of all other structure members is derived by taking the offset of the
+ // last basic machine unit consumed by the previous member and adding one. Each
+ // structure member is stored in memory at its aligned offset. The members of a top-
+ // level uniform block are laid out in buffer storage by treating the uniform block as
+ // a structure with a base offset of zero.
+ //
+ // 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
+ //
+ // 2. If the member is a two- or four-component vector with components consuming N basic
+ // machine units, the base alignment is 2N or 4N, respectively.
+ //
+ // 3. If the member is a three-component vector with components consuming N
+ // basic machine units, the base alignment is 4N.
+ //
+ // 4. If the member is an array of scalars or vectors, the base alignment and array
+ // stride are set to match the base alignment of a single array element, according
+ // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
+ // array may have padding at the end; the base offset of the member following
+ // the array is rounded up to the next multiple of the base alignment.
+ //
+ // 5. If the member is a column-major matrix with C columns and R rows, the
+ // matrix is stored identically to an array of C column vectors with R
+ // components each, according to rule (4).
+ //
+ // 6. If the member is an array of S column-major matrices with C columns and
+ // R rows, the matrix is stored identically to a row of S X C column vectors
+ // with R components each, according to rule (4).
+ //
+ // 7. If the member is a row-major matrix with C columns and R rows, the matrix
+ // is stored identically to an array of R row vectors with C components each,
+ // according to rule (4).
+ //
+ // 8. If the member is an array of S row-major matrices with C columns and R
+ // rows, the matrix is stored identically to a row of S X R row vectors with C
+ // components each, according to rule (4).
+ //
+ // 9. If the member is a structure, the base alignment of the structure is N , where
+ // N is the largest base alignment value of any of its members, and rounded
+ // up to the base alignment of a vec4. The individual members of this substructure
+ // are then assigned offsets by applying this set of rules recursively,
+ // where the base offset of the first member of the sub-structure is equal to the
+ // aligned offset of the structure. The structure may have padding at the end;
+ // the base offset of the member following the sub-structure is rounded up to
+ // the next multiple of the base alignment of the structure.
+ //
+ // 10. If the member is an array of S structures, the S elements of the array are laid
+ // out in order, according to rule (9).
+ //
+ // Assuming, for rule 10: The stride is the same as the size of an element.
+
+ stride = 0;
+ int dummyStride;
+
+ // rules 4, 6, 8, and 10
+ if (type.isArray()) {
+ // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ TType derefType(type, 0);
+ alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
+ if (std140)
+ alignment = std::max(baseAlignmentVec4Std140, alignment);
+ RoundToPow2(size, alignment);
+ stride = size; // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
+ // uses the assumption for rule 10 in the comment above
+ size = stride * type.getOuterArraySize();
+ return alignment;
+ }
+
+ // rule 9
+ if (type.getBasicType() == EbtStruct) {
+ const TTypeList& memberList = *type.getStruct();
+
+ size = 0;
+ int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
+ for (size_t m = 0; m < memberList.size(); ++m) {
+ int memberSize;
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
+ int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
+ (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
+ maxAlignment = std::max(maxAlignment, memberAlignment);
+ RoundToPow2(size, memberAlignment);
+ size += memberSize;
+ }
+
+ // The structure may have padding at the end; the base offset of
+ // the member following the sub-structure is rounded up to the next
+ // multiple of the base alignment of the structure.
+ RoundToPow2(size, maxAlignment);
+
+ return maxAlignment;
+ }
+
+ // rule 1
+ if (type.isScalar())
+ return getBaseAlignmentScalar(type, size);
+
+ // rules 2 and 3
+ if (type.isVector()) {
+ int scalarAlign = getBaseAlignmentScalar(type, size);
+ switch (type.getVectorSize()) {
+ case 1: // HLSL has this, GLSL does not
+ return scalarAlign;
+ case 2:
+ size *= 2;
+ return 2 * scalarAlign;
+ default:
+ size *= type.getVectorSize();
+ return 4 * scalarAlign;
+ }
+ }
+
+ // rules 5 and 7
+ if (type.isMatrix()) {
+ // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
+ TType derefType(type, 0, rowMajor);
+
+ alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
+ if (std140)
+ alignment = std::max(baseAlignmentVec4Std140, alignment);
+ RoundToPow2(size, alignment);
+ stride = size; // use intra-matrix stride for stride of a just a matrix
+ if (rowMajor)
+ size = stride * type.getMatrixRows();
+ else
+ size = stride * type.getMatrixCols();
+
+ return alignment;
+ }
+
+ assert(0); // all cases should be covered above
+ size = baseAlignmentVec4Std140;
+ return baseAlignmentVec4Std140;
+}
+
+// To aid the basic HLSL rule about crossing vec4 boundaries.
+bool TIntermediate::improperStraddle(const TType& type, int size, int offset)
+{
+ if (! type.isVector() || type.isArray())
+ return false;
+
+ return size <= 16 ? offset / 16 != (offset + size - 1) / 16
+ : offset % 16 != 0;
+}
+
+int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
+{
+ int alignment;
+
+ stride = 0;
+ int dummyStride;
+
+ if (type.isArray()) {
+ TType derefType(type, 0);
+ alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
+
+ stride = size;
+ RoundToPow2(stride, alignment);
+
+ size = stride * (type.getOuterArraySize() - 1) + size;
+ return alignment;
+ }
+
+ if (type.getBasicType() == EbtStruct) {
+ const TTypeList& memberList = *type.getStruct();
+
+ size = 0;
+ int maxAlignment = 0;
+ for (size_t m = 0; m < memberList.size(); ++m) {
+ int memberSize;
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
+ int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
+ (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
+ maxAlignment = std::max(maxAlignment, memberAlignment);
+ RoundToPow2(size, memberAlignment);
+ size += memberSize;
+ }
+
+ return maxAlignment;
+ }
+
+ if (type.isScalar())
+ return getBaseAlignmentScalar(type, size);
+
+ if (type.isVector()) {
+ int scalarAlign = getBaseAlignmentScalar(type, size);
+
+ size *= type.getVectorSize();
+ return scalarAlign;
+ }
+
+ if (type.isMatrix()) {
+ TType derefType(type, 0, rowMajor);
+
+ alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
+
+ stride = size; // use intra-matrix stride for stride of a just a matrix
+ if (rowMajor)
+ size = stride * type.getMatrixRows();
+ else
+ size = stride * type.getMatrixCols();
+
+ return alignment;
+ }
+
+ assert(0); // all cases should be covered above
+ size = 1;
+ return 1;
+}
+
+int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
+{
+ if (layoutPacking == glslang::ElpScalar) {
+ return getScalarAlignment(type, size, stride, rowMajor);
+ } else {
+ return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
+ }
+}
+
+// shared calculation by getOffset and getOffsets
+void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
+{
+ int dummyStride;
+
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
+ int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
+ parentType.getQualifier().layoutPacking,
+ subMatrixLayout != ElmNone
+ ? subMatrixLayout == ElmRowMajor
+ : parentType.getQualifier().layoutMatrix == ElmRowMajor);
+ RoundToPow2(offset, memberAlignment);
+}
+
+// Lookup or calculate the offset of a block member, using the recursively
+// defined block offset rules.
+int TIntermediate::getOffset(const TType& type, int index)
+{
+ const TTypeList& memberList = *type.getStruct();
+
+ // Don't calculate offset if one is present, it could be user supplied
+ // and different than what would be calculated. That is, this is faster,
+ // but not just an optimization.
+ if (memberList[index].type->getQualifier().hasOffset())
+ return memberList[index].type->getQualifier().layoutOffset;
+
+ int memberSize = 0;
+ int offset = 0;
+ for (int m = 0; m <= index; ++m) {
+ updateOffset(type, *memberList[m].type, offset, memberSize);
+
+ if (m < index)
+ offset += memberSize;
+ }
+
+ return offset;
+}
+
+// Calculate the block data size.
+// Block arrayness is not taken into account, each element is backed by a separate buffer.
+int TIntermediate::getBlockSize(const TType& blockType)
+{
+ const TTypeList& memberList = *blockType.getStruct();
+ int lastIndex = (int)memberList.size() - 1;
+ int lastOffset = getOffset(blockType, lastIndex);
+
+ int lastMemberSize;
+ int dummyStride;
+ getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
+ blockType.getQualifier().layoutPacking,
+ blockType.getQualifier().layoutMatrix == ElmRowMajor);
+
+ return lastOffset + lastMemberSize;
+}
+
+int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
+{
+ assert(type.getBasicType() == EbtReference);
+ int size = getBlockSize(*type.getReferentType());
+
+ int align = type.getBufferReferenceAlignment();
+
+ if (align) {
+ size = (size + align - 1) & ~(align-1);
+ }
+
+ return size;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/localintermediate.h b/thirdparty/glslang/glslang/MachineIndependent/localintermediate.h
new file mode 100644
index 0000000000..f29c93c08e
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/localintermediate.h
@@ -0,0 +1,900 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2016 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _LOCAL_INTERMEDIATE_INCLUDED_
+#define _LOCAL_INTERMEDIATE_INCLUDED_
+
+#include "../Include/intermediate.h"
+#include "../Public/ShaderLang.h"
+#include "Versions.h"
+
+#include <string>
+#include <vector>
+#include <algorithm>
+#include <set>
+#include <array>
+
+class TInfoSink;
+
+namespace glslang {
+
+struct TMatrixSelector {
+ int coord1; // stay agnostic about column/row; this is parse order
+ int coord2;
+};
+
+typedef int TVectorSelector;
+
+const int MaxSwizzleSelectors = 4;
+
+template<typename selectorType>
+class TSwizzleSelectors {
+public:
+ TSwizzleSelectors() : size_(0) { }
+
+ void push_back(selectorType comp)
+ {
+ if (size_ < MaxSwizzleSelectors)
+ components[size_++] = comp;
+ }
+ void resize(int s)
+ {
+ assert(s <= size_);
+ size_ = s;
+ }
+ int size() const { return size_; }
+ selectorType operator[](int i) const
+ {
+ assert(i < MaxSwizzleSelectors);
+ return components[i];
+ }
+
+private:
+ int size_;
+ selectorType components[MaxSwizzleSelectors];
+};
+
+//
+// Some helper structures for TIntermediate. Their contents are encapsulated
+// by TIntermediate.
+//
+
+// Used for call-graph algorithms for detecting recursion, missing bodies, and dead bodies.
+// A "call" is a pair: <caller, callee>.
+// There can be duplicates. General assumption is the list is small.
+struct TCall {
+ TCall(const TString& pCaller, const TString& pCallee) : caller(pCaller), callee(pCallee) { }
+ TString caller;
+ TString callee;
+ bool visited;
+ bool currentPath;
+ bool errorGiven;
+ int calleeBodyPosition;
+};
+
+// A generic 1-D range.
+struct TRange {
+ TRange(int start, int last) : start(start), last(last) { }
+ bool overlap(const TRange& rhs) const
+ {
+ return last >= rhs.start && start <= rhs.last;
+ }
+ int start;
+ int last;
+};
+
+// An IO range is a 3-D rectangle; the set of (location, component, index) triples all lying
+// within the same location range, component range, and index value. Locations don't alias unless
+// all other dimensions of their range overlap.
+struct TIoRange {
+ TIoRange(TRange location, TRange component, TBasicType basicType, int index)
+ : location(location), component(component), basicType(basicType), index(index) { }
+ bool overlap(const TIoRange& rhs) const
+ {
+ return location.overlap(rhs.location) && component.overlap(rhs.component) && index == rhs.index;
+ }
+ TRange location;
+ TRange component;
+ TBasicType basicType;
+ int index;
+};
+
+// An offset range is a 2-D rectangle; the set of (binding, offset) pairs all lying
+// within the same binding and offset range.
+struct TOffsetRange {
+ TOffsetRange(TRange binding, TRange offset)
+ : binding(binding), offset(offset) { }
+ bool overlap(const TOffsetRange& rhs) const
+ {
+ return binding.overlap(rhs.binding) && offset.overlap(rhs.offset);
+ }
+ TRange binding;
+ TRange offset;
+};
+
+// Things that need to be tracked per xfb buffer.
+struct TXfbBuffer {
+#ifdef AMD_EXTENSIONS
+ TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false),
+ contains32BitType(false), contains16BitType(false) { }
+#else
+ TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false) { }
+#endif
+ std::vector<TRange> ranges; // byte offsets that have already been assigned
+ unsigned int stride;
+ unsigned int implicitStride;
+ bool contains64BitType;
+#ifdef AMD_EXTENSIONS
+ bool contains32BitType;
+ bool contains16BitType;
+#endif
+};
+
+// Track a set of strings describing how the module was processed.
+// Using the form:
+// process arg0 arg1 arg2 ...
+// process arg0 arg1 arg2 ...
+// where everything is textual, and there can be zero or more arguments
+class TProcesses {
+public:
+ TProcesses() {}
+ ~TProcesses() {}
+
+ void addProcess(const char* process)
+ {
+ processes.push_back(process);
+ }
+ void addProcess(const std::string& process)
+ {
+ processes.push_back(process);
+ }
+ void addArgument(int arg)
+ {
+ processes.back().append(" ");
+ std::string argString = std::to_string(arg);
+ processes.back().append(argString);
+ }
+ void addArgument(const char* arg)
+ {
+ processes.back().append(" ");
+ processes.back().append(arg);
+ }
+ void addArgument(const std::string& arg)
+ {
+ processes.back().append(" ");
+ processes.back().append(arg);
+ }
+ void addIfNonZero(const char* process, int value)
+ {
+ if (value != 0) {
+ addProcess(process);
+ addArgument(value);
+ }
+ }
+
+ const std::vector<std::string>& getProcesses() const { return processes; }
+
+private:
+ std::vector<std::string> processes;
+};
+
+class TSymbolTable;
+class TSymbol;
+class TVariable;
+
+#ifdef NV_EXTENSIONS
+//
+// Texture and Sampler transformation mode.
+//
+enum ComputeDerivativeMode {
+ LayoutDerivativeNone, // default layout as SPV_NV_compute_shader_derivatives not enabled
+ LayoutDerivativeGroupQuads, // derivative_group_quadsNV
+ LayoutDerivativeGroupLinear, // derivative_group_linearNV
+};
+#endif
+
+//
+// Set of helper functions to help parse and build the tree.
+//
+class TIntermediate {
+public:
+ explicit TIntermediate(EShLanguage l, int v = 0, EProfile p = ENoProfile) :
+ implicitThisName("@this"), implicitCounterName("@count"),
+ language(l), source(EShSourceNone), profile(p), version(v), treeRoot(0),
+ numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false),
+ invocations(TQualifier::layoutNotSet), vertices(TQualifier::layoutNotSet),
+ inputPrimitive(ElgNone), outputPrimitive(ElgNone),
+ pixelCenterInteger(false), originUpperLeft(false),
+ vertexSpacing(EvsNone), vertexOrder(EvoNone), pointMode(false), earlyFragmentTests(false),
+ postDepthCoverage(false), depthLayout(EldNone), depthReplacing(false),
+ hlslFunctionality1(false),
+ blendEquations(0), xfbMode(false), multiStream(false),
+#ifdef NV_EXTENSIONS
+ layoutOverrideCoverage(false),
+ geoPassthroughEXT(false),
+ numShaderRecordNVBlocks(0),
+ computeDerivativeMode(LayoutDerivativeNone),
+ primitives(TQualifier::layoutNotSet),
+ numTaskNVBlocks(0),
+#endif
+ autoMapBindings(false),
+ autoMapLocations(false),
+ invertY(false),
+ flattenUniformArrays(false),
+ useUnknownFormat(false),
+ hlslOffsets(false),
+ useStorageBuffer(false),
+ useVulkanMemoryModel(false),
+ hlslIoMapping(false),
+ useVariablePointers(false),
+ textureSamplerTransformMode(EShTexSampTransKeep),
+ needToLegalize(false),
+ binaryDoubleOutput(false),
+ usePhysicalStorageBuffer(false),
+ uniformLocationBase(0)
+ {
+ localSize[0] = 1;
+ localSize[1] = 1;
+ localSize[2] = 1;
+ localSizeSpecId[0] = TQualifier::layoutNotSet;
+ localSizeSpecId[1] = TQualifier::layoutNotSet;
+ localSizeSpecId[2] = TQualifier::layoutNotSet;
+ xfbBuffers.resize(TQualifier::layoutXfbBufferEnd);
+
+ shiftBinding.fill(0);
+ }
+ void setLimits(const TBuiltInResource& r) { resources = r; }
+
+ bool postProcess(TIntermNode*, EShLanguage);
+ void output(TInfoSink&, bool tree);
+ void removeTree();
+
+ void setSource(EShSource s) { source = s; }
+ EShSource getSource() const { return source; }
+ void setEntryPointName(const char* ep)
+ {
+ entryPointName = ep;
+ processes.addProcess("entry-point");
+ processes.addArgument(entryPointName);
+ }
+ void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; }
+ const std::string& getEntryPointName() const { return entryPointName; }
+ const std::string& getEntryPointMangledName() const { return entryPointMangledName; }
+
+ void setShiftBinding(TResourceType res, unsigned int shift)
+ {
+ shiftBinding[res] = shift;
+
+ const char* name = getResourceName(res);
+ if (name != nullptr)
+ processes.addIfNonZero(name, shift);
+ }
+
+ unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; }
+
+ void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set)
+ {
+ if (shift == 0) // ignore if there's no shift: it's a no-op.
+ return;
+
+ shiftBindingForSet[res][set] = shift;
+
+ const char* name = getResourceName(res);
+ if (name != nullptr) {
+ processes.addProcess(name);
+ processes.addArgument(shift);
+ processes.addArgument(set);
+ }
+ }
+
+ int getShiftBindingForSet(TResourceType res, unsigned int set) const
+ {
+ const auto shift = shiftBindingForSet[res].find(set);
+ return shift == shiftBindingForSet[res].end() ? -1 : shift->second;
+ }
+ bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); }
+
+ void setResourceSetBinding(const std::vector<std::string>& shift)
+ {
+ resourceSetBinding = shift;
+ if (shift.size() > 0) {
+ processes.addProcess("resource-set-binding");
+ for (int s = 0; s < (int)shift.size(); ++s)
+ processes.addArgument(shift[s]);
+ }
+ }
+ const std::vector<std::string>& getResourceSetBinding() const { return resourceSetBinding; }
+ void setAutoMapBindings(bool map)
+ {
+ autoMapBindings = map;
+ if (autoMapBindings)
+ processes.addProcess("auto-map-bindings");
+ }
+ bool getAutoMapBindings() const { return autoMapBindings; }
+ void setAutoMapLocations(bool map)
+ {
+ autoMapLocations = map;
+ if (autoMapLocations)
+ processes.addProcess("auto-map-locations");
+ }
+ bool getAutoMapLocations() const { return autoMapLocations; }
+ void setInvertY(bool invert)
+ {
+ invertY = invert;
+ if (invertY)
+ processes.addProcess("invert-y");
+ }
+ bool getInvertY() const { return invertY; }
+
+ void setFlattenUniformArrays(bool flatten)
+ {
+ flattenUniformArrays = flatten;
+ if (flattenUniformArrays)
+ processes.addProcess("flatten-uniform-arrays");
+ }
+ bool getFlattenUniformArrays() const { return flattenUniformArrays; }
+ void setNoStorageFormat(bool b)
+ {
+ useUnknownFormat = b;
+ if (useUnknownFormat)
+ processes.addProcess("no-storage-format");
+ }
+ bool getNoStorageFormat() const { return useUnknownFormat; }
+ void setHlslOffsets()
+ {
+ hlslOffsets = true;
+ if (hlslOffsets)
+ processes.addProcess("hlsl-offsets");
+ }
+ bool usingHlslOffsets() const { return hlslOffsets; }
+ void setUseStorageBuffer()
+ {
+ useStorageBuffer = true;
+ processes.addProcess("use-storage-buffer");
+ }
+ bool usingStorageBuffer() const { return useStorageBuffer; }
+ void setHlslIoMapping(bool b)
+ {
+ hlslIoMapping = b;
+ if (hlslIoMapping)
+ processes.addProcess("hlsl-iomap");
+ }
+ bool usingHlslIoMapping() { return hlslIoMapping; }
+ void setUseVulkanMemoryModel()
+ {
+ useVulkanMemoryModel = true;
+ processes.addProcess("use-vulkan-memory-model");
+ }
+ bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; }
+ void setUsePhysicalStorageBuffer()
+ {
+ usePhysicalStorageBuffer = true;
+ }
+ bool usingPhysicalStorageBuffer() const { return usePhysicalStorageBuffer; }
+ void setUseVariablePointers()
+ {
+ useVariablePointers = true;
+ processes.addProcess("use-variable-pointers");
+ }
+ bool usingVariablePointers() const { return useVariablePointers; }
+
+ template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
+ bool hasCounterBufferName(const TString& name) const {
+ size_t len = strlen(implicitCounterName);
+ return name.size() > len &&
+ name.compare(name.size() - len, len, implicitCounterName) == 0;
+ }
+
+ void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; }
+
+ void setVersion(int v) { version = v; }
+ int getVersion() const { return version; }
+ void setProfile(EProfile p) { profile = p; }
+ EProfile getProfile() const { return profile; }
+ void setSpv(const SpvVersion& s)
+ {
+ spvVersion = s;
+
+ // client processes
+ if (spvVersion.vulkan > 0)
+ processes.addProcess("client vulkan100");
+ if (spvVersion.openGl > 0)
+ processes.addProcess("client opengl100");
+
+ // target SPV
+ switch (spvVersion.spv) {
+ case 0:
+ break;
+ case EShTargetSpv_1_0:
+ break;
+ case EShTargetSpv_1_1:
+ processes.addProcess("target-env spirv1.1");
+ break;
+ case EShTargetSpv_1_2:
+ processes.addProcess("target-env spirv1.2");
+ break;
+ case EShTargetSpv_1_3:
+ processes.addProcess("target-env spirv1.3");
+ break;
+ default:
+ processes.addProcess("target-env spirvUnknown");
+ break;
+ }
+
+ // target-environment processes
+ switch (spvVersion.vulkan) {
+ case 0:
+ break;
+ case EShTargetVulkan_1_0:
+ processes.addProcess("target-env vulkan1.0");
+ break;
+ case EShTargetVulkan_1_1:
+ processes.addProcess("target-env vulkan1.1");
+ break;
+ default:
+ processes.addProcess("target-env vulkanUnknown");
+ break;
+ }
+ if (spvVersion.openGl > 0)
+ processes.addProcess("target-env opengl");
+ }
+ const SpvVersion& getSpv() const { return spvVersion; }
+ EShLanguage getStage() const { return language; }
+ void addRequestedExtension(const char* extension) { requestedExtensions.insert(extension); }
+ const std::set<std::string>& getRequestedExtensions() const { return requestedExtensions; }
+
+ void setTreeRoot(TIntermNode* r) { treeRoot = r; }
+ TIntermNode* getTreeRoot() const { return treeRoot; }
+ void incrementEntryPointCount() { ++numEntryPoints; }
+ int getNumEntryPoints() const { return numEntryPoints; }
+ int getNumErrors() const { return numErrors; }
+ void addPushConstantCount() { ++numPushConstants; }
+#ifdef NV_EXTENSIONS
+ void addShaderRecordNVCount() { ++numShaderRecordNVBlocks; }
+ void addTaskNVCount() { ++numTaskNVBlocks; }
+#endif
+
+ bool isRecursive() const { return recursive; }
+
+ TIntermSymbol* addSymbol(const TVariable&);
+ TIntermSymbol* addSymbol(const TVariable&, const TSourceLoc&);
+ TIntermSymbol* addSymbol(const TType&, const TSourceLoc&);
+ TIntermSymbol* addSymbol(const TIntermSymbol&);
+ TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*);
+ std::tuple<TIntermTyped*, TIntermTyped*> addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1);
+ TIntermTyped* addUniShapeConversion(TOperator, const TType&, TIntermTyped*);
+ TIntermTyped* addConversion(TBasicType convertTo, TIntermTyped* node) const;
+ void addBiShapeConversion(TOperator, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode);
+ TIntermTyped* addShapeConversion(const TType&, TIntermTyped*);
+ TIntermTyped* addBinaryMath(TOperator, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
+ TIntermTyped* addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
+ TIntermTyped* addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc);
+ TIntermTyped* addUnaryMath(TOperator, TIntermTyped* child, TSourceLoc);
+ TIntermTyped* addBuiltInFunctionCall(const TSourceLoc& line, TOperator, bool unary, TIntermNode*, const TType& returnType);
+ bool canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op = EOpNull) const;
+ bool isIntegralPromotion(TBasicType from, TBasicType to) const;
+ bool isFPPromotion(TBasicType from, TBasicType to) const;
+ bool isIntegralConversion(TBasicType from, TBasicType to) const;
+ bool isFPConversion(TBasicType from, TBasicType to) const;
+ bool isFPIntegralConversion(TBasicType from, TBasicType to) const;
+ TOperator mapTypeToConstructorOp(const TType&) const;
+ TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right);
+ TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc&);
+ TIntermAggregate* makeAggregate(TIntermNode* node);
+ TIntermAggregate* makeAggregate(TIntermNode* node, const TSourceLoc&);
+ TIntermAggregate* makeAggregate(const TSourceLoc&);
+ TIntermTyped* setAggregateOperator(TIntermNode*, TOperator, const TType& type, TSourceLoc);
+ bool areAllChildConst(TIntermAggregate* aggrNode);
+ TIntermSelection* addSelection(TIntermTyped* cond, TIntermNodePair code, const TSourceLoc&);
+ TIntermTyped* addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, const TSourceLoc&);
+ TIntermTyped* addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc&);
+ TIntermTyped* addMethod(TIntermTyped*, const TType&, const TString*, const TSourceLoc&);
+ TIntermConstantUnion* addConstantUnion(const TConstUnionArray&, const TType&, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(signed char, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned char, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(signed short, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned short, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(int, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned int, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(long long, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned long long, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(bool, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(double, TBasicType, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(const TString*, const TSourceLoc&, bool literal = false) const;
+ TIntermTyped* promoteConstantUnion(TBasicType, TIntermConstantUnion*) const;
+ bool parseConstTree(TIntermNode*, TConstUnionArray, TOperator, const TType&, bool singleConstantParam = false);
+ TIntermLoop* addLoop(TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst, const TSourceLoc&);
+ TIntermAggregate* addForLoop(TIntermNode*, TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst,
+ const TSourceLoc&, TIntermLoop*&);
+ TIntermBranch* addBranch(TOperator, const TSourceLoc&);
+ TIntermBranch* addBranch(TOperator, TIntermTyped*, const TSourceLoc&);
+ template<typename selectorType> TIntermTyped* addSwizzle(TSwizzleSelectors<selectorType>&, const TSourceLoc&);
+
+ // Low level functions to add nodes (no conversions or other higher level transformations)
+ // If a type is provided, the node's type will be set to it.
+ TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc) const;
+ TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc, const TType&) const;
+ TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc) const;
+ TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc, const TType&) const;
+
+ // Constant folding (in Constant.cpp)
+ TIntermTyped* fold(TIntermAggregate* aggrNode);
+ TIntermTyped* foldConstructor(TIntermAggregate* aggrNode);
+ TIntermTyped* foldDereference(TIntermTyped* node, int index, const TSourceLoc&);
+ TIntermTyped* foldSwizzle(TIntermTyped* node, TSwizzleSelectors<TVectorSelector>& fields, const TSourceLoc&);
+
+ // Tree ops
+ static const TIntermTyped* findLValueBase(const TIntermTyped*, bool swizzleOkay);
+
+ // Linkage related
+ void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&);
+ void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&);
+
+ bool setInvocations(int i)
+ {
+ if (invocations != TQualifier::layoutNotSet)
+ return invocations == i;
+ invocations = i;
+ return true;
+ }
+ int getInvocations() const { return invocations; }
+ bool setVertices(int m)
+ {
+ if (vertices != TQualifier::layoutNotSet)
+ return vertices == m;
+ vertices = m;
+ return true;
+ }
+ int getVertices() const { return vertices; }
+ bool setInputPrimitive(TLayoutGeometry p)
+ {
+ if (inputPrimitive != ElgNone)
+ return inputPrimitive == p;
+ inputPrimitive = p;
+ return true;
+ }
+ TLayoutGeometry getInputPrimitive() const { return inputPrimitive; }
+ bool setVertexSpacing(TVertexSpacing s)
+ {
+ if (vertexSpacing != EvsNone)
+ return vertexSpacing == s;
+ vertexSpacing = s;
+ return true;
+ }
+ TVertexSpacing getVertexSpacing() const { return vertexSpacing; }
+ bool setVertexOrder(TVertexOrder o)
+ {
+ if (vertexOrder != EvoNone)
+ return vertexOrder == o;
+ vertexOrder = o;
+ return true;
+ }
+ TVertexOrder getVertexOrder() const { return vertexOrder; }
+ void setPointMode() { pointMode = true; }
+ bool getPointMode() const { return pointMode; }
+
+ bool setLocalSize(int dim, int size)
+ {
+ if (localSize[dim] > 1)
+ return size == localSize[dim];
+ localSize[dim] = size;
+ return true;
+ }
+ unsigned int getLocalSize(int dim) const { return localSize[dim]; }
+
+ bool setLocalSizeSpecId(int dim, int id)
+ {
+ if (localSizeSpecId[dim] != TQualifier::layoutNotSet)
+ return id == localSizeSpecId[dim];
+ localSizeSpecId[dim] = id;
+ return true;
+ }
+ int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; }
+
+ void setXfbMode() { xfbMode = true; }
+ bool getXfbMode() const { return xfbMode; }
+ void setMultiStream() { multiStream = true; }
+ bool isMultiStream() const { return multiStream; }
+ bool setOutputPrimitive(TLayoutGeometry p)
+ {
+ if (outputPrimitive != ElgNone)
+ return outputPrimitive == p;
+ outputPrimitive = p;
+ return true;
+ }
+ TLayoutGeometry getOutputPrimitive() const { return outputPrimitive; }
+ void setOriginUpperLeft() { originUpperLeft = true; }
+ bool getOriginUpperLeft() const { return originUpperLeft; }
+ void setPixelCenterInteger() { pixelCenterInteger = true; }
+ bool getPixelCenterInteger() const { return pixelCenterInteger; }
+ void setEarlyFragmentTests() { earlyFragmentTests = true; }
+ bool getEarlyFragmentTests() const { return earlyFragmentTests; }
+ void setPostDepthCoverage() { postDepthCoverage = true; }
+ bool getPostDepthCoverage() const { return postDepthCoverage; }
+ bool setDepth(TLayoutDepth d)
+ {
+ if (depthLayout != EldNone)
+ return depthLayout == d;
+ depthLayout = d;
+ return true;
+ }
+ TLayoutDepth getDepth() const { return depthLayout; }
+ void setDepthReplacing() { depthReplacing = true; }
+ bool isDepthReplacing() const { return depthReplacing; }
+
+ void setHlslFunctionality1() { hlslFunctionality1 = true; }
+ bool getHlslFunctionality1() const { return hlslFunctionality1; }
+
+ void addBlendEquation(TBlendEquationShift b) { blendEquations |= (1 << b); }
+ unsigned int getBlendEquations() const { return blendEquations; }
+
+ void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee);
+ void merge(TInfoSink&, TIntermediate&);
+ void finalCheck(TInfoSink&, bool keepUncalled);
+
+ void addIoAccessed(const TString& name) { ioAccessed.insert(name); }
+ bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); }
+
+ int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision);
+ int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision);
+ int addUsedOffsets(int binding, int offset, int numOffsets);
+ bool addUsedConstantId(int id);
+ static int computeTypeLocationSize(const TType&, EShLanguage);
+ static int computeTypeUniformLocationSize(const TType&);
+
+ bool setXfbBufferStride(int buffer, unsigned stride)
+ {
+ if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd)
+ return xfbBuffers[buffer].stride == stride;
+ xfbBuffers[buffer].stride = stride;
+ return true;
+ }
+ unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; }
+ int addXfbBufferOffset(const TType&);
+#ifdef AMD_EXTENSIONS
+ unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const;
+#else
+ unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType) const;
+#endif
+ static int getBaseAlignmentScalar(const TType&, int& size);
+ static int getBaseAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
+ static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor);
+ static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
+ static bool improperStraddle(const TType& type, int size, int offset);
+ static void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize);
+ static int getOffset(const TType& type, int index);
+ static int getBlockSize(const TType& blockType);
+ static int computeBufferReferenceTypeSize(const TType&);
+ bool promote(TIntermOperator*);
+
+#ifdef NV_EXTENSIONS
+ void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
+ bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
+ void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
+ bool getGeoPassthroughEXT() const { return geoPassthroughEXT; }
+ void setLayoutDerivativeMode(ComputeDerivativeMode mode) { computeDerivativeMode = mode; }
+ ComputeDerivativeMode getLayoutDerivativeModeNone() const { return computeDerivativeMode; }
+ bool setPrimitives(int m)
+ {
+ if (primitives != TQualifier::layoutNotSet)
+ return primitives == m;
+ primitives = m;
+ return true;
+ }
+ int getPrimitives() const { return primitives; }
+#endif
+
+ const char* addSemanticName(const TString& name)
+ {
+ return semanticNameSet.insert(name).first->c_str();
+ }
+
+ void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; }
+ const std::string& getSourceFile() const { return sourceFile; }
+ void addSourceText(const char* text, size_t len) { sourceText.append(text, len); }
+ const std::string& getSourceText() const { return sourceText; }
+ const std::map<std::string, std::string>& getIncludeText() const { return includeText; }
+ void addIncludeText(const char* name, const char* text, size_t len) { includeText[name].assign(text,len); }
+ void addProcesses(const std::vector<std::string>& p)
+ {
+ for (int i = 0; i < (int)p.size(); ++i)
+ processes.addProcess(p[i]);
+ }
+ void addProcess(const std::string& process) { processes.addProcess(process); }
+ void addProcessArgument(const std::string& arg) { processes.addArgument(arg); }
+ const std::vector<std::string>& getProcesses() const { return processes.getProcesses(); }
+
+ void addUniformLocationOverride(const char* nameStr, int location)
+ {
+ std::string name = nameStr;
+ uniformLocationOverrides[name] = location;
+ }
+
+ int getUniformLocationOverride(const char* nameStr) const
+ {
+ std::string name = nameStr;
+ auto pos = uniformLocationOverrides.find(name);
+ if (pos == uniformLocationOverrides.end())
+ return -1;
+ else
+ return pos->second;
+ }
+
+ void setUniformLocationBase(int base) { uniformLocationBase = base; }
+ int getUniformLocationBase() const { return uniformLocationBase; }
+
+ void setNeedsLegalization() { needToLegalize = true; }
+ bool needsLegalization() const { return needToLegalize; }
+
+ void setBinaryDoubleOutput() { binaryDoubleOutput = true; }
+ bool getBinaryDoubleOutput() { return binaryDoubleOutput; }
+
+ const char* const implicitThisName;
+ const char* const implicitCounterName;
+
+protected:
+ TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&);
+ void error(TInfoSink& infoSink, const char*);
+ void warn(TInfoSink& infoSink, const char*);
+ void mergeCallGraphs(TInfoSink&, TIntermediate&);
+ void mergeModes(TInfoSink&, TIntermediate&);
+ void mergeTrees(TInfoSink&, TIntermediate&);
+ void seedIdMap(TMap<TString, int>& idMap, int& maxId);
+ void remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate&);
+ void mergeBodies(TInfoSink&, TIntermSequence& globals, const TIntermSequence& unitGlobals);
+ void mergeLinkerObjects(TInfoSink&, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects);
+ void mergeImplicitArraySizes(TType&, const TType&);
+ void mergeErrorCheck(TInfoSink&, const TIntermSymbol&, const TIntermSymbol&, bool crossStage);
+ void checkCallGraphCycles(TInfoSink&);
+ void checkCallGraphBodies(TInfoSink&, bool keepUncalled);
+ void inOutLocationCheck(TInfoSink&);
+ TIntermAggregate* findLinkerObjects() const;
+ bool userOutputUsed() const;
+ bool isSpecializationOperation(const TIntermOperator&) const;
+ bool isNonuniformPropagating(TOperator) const;
+ bool promoteUnary(TIntermUnary&);
+ bool promoteBinary(TIntermBinary&);
+ void addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable&, const TString&);
+ bool promoteAggregate(TIntermAggregate&);
+ void pushSelector(TIntermSequence&, const TVectorSelector&, const TSourceLoc&);
+ void pushSelector(TIntermSequence&, const TMatrixSelector&, const TSourceLoc&);
+ bool specConstantPropagates(const TIntermTyped&, const TIntermTyped&);
+ void performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root);
+ bool isConversionAllowed(TOperator op, TIntermTyped* node) const;
+ TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const;
+ std::tuple<TBasicType, TBasicType> getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const;
+ bool extensionRequested(const char *extension) const {return requestedExtensions.find(extension) != requestedExtensions.end();}
+ static const char* getResourceName(TResourceType);
+
+ const EShLanguage language; // stage, known at construction time
+ EShSource source; // source language, known a bit later
+ std::string entryPointName;
+ std::string entryPointMangledName;
+ typedef std::list<TCall> TGraph;
+ TGraph callGraph;
+
+ EProfile profile; // source profile
+ int version; // source version
+ SpvVersion spvVersion;
+ TIntermNode* treeRoot;
+ std::set<std::string> requestedExtensions; // cumulation of all enabled or required extensions; not connected to what subset of the shader used them
+ TBuiltInResource resources;
+ int numEntryPoints;
+ int numErrors;
+ int numPushConstants;
+ bool recursive;
+ int invocations;
+ int vertices;
+ TLayoutGeometry inputPrimitive;
+ TLayoutGeometry outputPrimitive;
+ bool pixelCenterInteger;
+ bool originUpperLeft;
+ TVertexSpacing vertexSpacing;
+ TVertexOrder vertexOrder;
+ bool pointMode;
+ int localSize[3];
+ int localSizeSpecId[3];
+ bool earlyFragmentTests;
+ bool postDepthCoverage;
+ TLayoutDepth depthLayout;
+ bool depthReplacing;
+ bool hlslFunctionality1;
+ int blendEquations; // an 'or'ing of masks of shifts of TBlendEquationShift
+ bool xfbMode;
+ std::vector<TXfbBuffer> xfbBuffers; // all the data we need to track per xfb buffer
+ bool multiStream;
+
+#ifdef NV_EXTENSIONS
+ bool layoutOverrideCoverage;
+ bool geoPassthroughEXT;
+ int numShaderRecordNVBlocks;
+ ComputeDerivativeMode computeDerivativeMode;
+ int primitives;
+ int numTaskNVBlocks;
+#endif
+
+ // Base shift values
+ std::array<unsigned int, EResCount> shiftBinding;
+
+ // Per-descriptor-set shift values
+ std::array<std::map<int, int>, EResCount> shiftBindingForSet;
+
+ std::vector<std::string> resourceSetBinding;
+ bool autoMapBindings;
+ bool autoMapLocations;
+ bool invertY;
+ bool flattenUniformArrays;
+ bool useUnknownFormat;
+ bool hlslOffsets;
+ bool useStorageBuffer;
+ bool useVulkanMemoryModel;
+ bool hlslIoMapping;
+ bool useVariablePointers;
+
+ std::set<TString> ioAccessed; // set of names of statically read/written I/O that might need extra checking
+ std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
+ std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
+ std::unordered_set<int> usedConstantId; // specialization constant ids used
+ std::set<TString> semanticNameSet;
+
+ EShTextureSamplerTransformMode textureSamplerTransformMode;
+
+ // source code of shader, useful as part of debug information
+ std::string sourceFile;
+ std::string sourceText;
+
+ // Included text. First string is a name, second is the included text
+ std::map<std::string, std::string> includeText;
+
+ // for OpModuleProcessed, or equivalent
+ TProcesses processes;
+
+ bool needToLegalize;
+ bool binaryDoubleOutput;
+ bool usePhysicalStorageBuffer;
+
+ std::unordered_map<std::string, int> uniformLocationOverrides;
+ int uniformLocationBase;
+
+private:
+ void operator=(TIntermediate&); // prevent assignments
+};
+
+} // end namespace glslang
+
+#endif // _LOCAL_INTERMEDIATE_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/parseConst.cpp b/thirdparty/glslang/glslang/MachineIndependent/parseConst.cpp
new file mode 100644
index 0000000000..1a8e6d9987
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/parseConst.cpp
@@ -0,0 +1,204 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Traverse a tree of constants to create a single folded constant.
+// It should only be used when the whole tree is known to be constant.
+//
+
+#include "ParseHelper.h"
+
+namespace glslang {
+
+class TConstTraverser : public TIntermTraverser {
+public:
+ TConstTraverser(const TConstUnionArray& cUnion, bool singleConstParam, TOperator constructType, const TType& t)
+ : unionArray(cUnion), type(t),
+ constructorType(constructType), singleConstantParam(singleConstParam), error(false), isMatrix(false),
+ matrixCols(0), matrixRows(0) { index = 0; tOp = EOpNull; }
+
+ virtual void visitConstantUnion(TIntermConstantUnion* node);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+
+ int index;
+ TConstUnionArray unionArray;
+ TOperator tOp;
+ const TType& type;
+ TOperator constructorType;
+ bool singleConstantParam;
+ bool error;
+ int size; // size of the constructor ( 4 for vec4)
+ bool isMatrix;
+ int matrixCols;
+ int matrixRows;
+
+protected:
+ TConstTraverser(TConstTraverser&);
+ TConstTraverser& operator=(TConstTraverser&);
+};
+
+bool TConstTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ if (! node->isConstructor() && node->getOp() != EOpComma) {
+ error = true;
+
+ return false;
+ }
+
+ bool flag = node->getSequence().size() == 1 && node->getSequence()[0]->getAsTyped()->getAsConstantUnion();
+ if (flag) {
+ singleConstantParam = true;
+ constructorType = node->getOp();
+ size = node->getType().computeNumComponents();
+
+ if (node->getType().isMatrix()) {
+ isMatrix = true;
+ matrixCols = node->getType().getMatrixCols();
+ matrixRows = node->getType().getMatrixRows();
+ }
+ }
+
+ for (TIntermSequence::iterator p = node->getSequence().begin();
+ p != node->getSequence().end(); p++) {
+
+ if (node->getOp() == EOpComma)
+ index = 0;
+
+ (*p)->traverse(this);
+ }
+ if (flag)
+ {
+ singleConstantParam = false;
+ constructorType = EOpNull;
+ size = 0;
+ isMatrix = false;
+ matrixCols = 0;
+ matrixRows = 0;
+ }
+
+ return false;
+}
+
+void TConstTraverser::visitConstantUnion(TIntermConstantUnion* node)
+{
+ TConstUnionArray leftUnionArray(unionArray);
+ int instanceSize = type.computeNumComponents();
+
+ if (index >= instanceSize)
+ return;
+
+ if (! singleConstantParam) {
+ int rightUnionSize = node->getType().computeNumComponents();
+
+ const TConstUnionArray& rightUnionArray = node->getConstArray();
+ for (int i = 0; i < rightUnionSize; i++) {
+ if (index >= instanceSize)
+ return;
+ leftUnionArray[index] = rightUnionArray[i];
+
+ index++;
+ }
+ } else {
+ int endIndex = index + size;
+ const TConstUnionArray& rightUnionArray = node->getConstArray();
+ if (! isMatrix) {
+ int count = 0;
+ int nodeComps = node->getType().computeNumComponents();
+ for (int i = index; i < endIndex; i++) {
+ if (i >= instanceSize)
+ return;
+
+ leftUnionArray[i] = rightUnionArray[count];
+
+ (index)++;
+
+ if (nodeComps > 1)
+ count++;
+ }
+ } else {
+ // constructing a matrix, but from what?
+ if (node->isMatrix()) {
+ // Matrix from a matrix; this has the outer matrix, node is the argument matrix.
+ // Traverse the outer, potentially bigger matrix, fill in missing pieces with the
+ // identity matrix.
+ for (int c = 0; c < matrixCols; ++c) {
+ for (int r = 0; r < matrixRows; ++r) {
+ int targetOffset = index + c * matrixRows + r;
+ if (r < node->getType().getMatrixRows() && c < node->getType().getMatrixCols()) {
+ int srcOffset = c * node->getType().getMatrixRows() + r;
+ leftUnionArray[targetOffset] = rightUnionArray[srcOffset];
+ } else if (r == c)
+ leftUnionArray[targetOffset].setDConst(1.0);
+ else
+ leftUnionArray[targetOffset].setDConst(0.0);
+ }
+ }
+ } else {
+ // matrix from vector
+ int count = 0;
+ const int startIndex = index;
+ int nodeComps = node->getType().computeNumComponents();
+ for (int i = startIndex; i < endIndex; i++) {
+ if (i >= instanceSize)
+ return;
+ if (i == startIndex || (i - startIndex) % (matrixRows + 1) == 0 )
+ leftUnionArray[i] = rightUnionArray[count];
+ else
+ leftUnionArray[i].setDConst(0.0);
+
+ index++;
+
+ if (nodeComps > 1)
+ count++;
+ }
+ }
+ }
+ }
+}
+
+bool TIntermediate::parseConstTree(TIntermNode* root, TConstUnionArray unionArray, TOperator constructorType, const TType& t, bool singleConstantParam)
+{
+ if (root == 0)
+ return false;
+
+ TConstTraverser it(unionArray, singleConstantParam, constructorType, t);
+
+ root->traverse(&it);
+ if (it.error)
+ return true;
+ else
+ return false;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/parseVersions.h b/thirdparty/glslang/glslang/MachineIndependent/parseVersions.h
new file mode 100644
index 0000000000..02af76a8a3
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/parseVersions.h
@@ -0,0 +1,159 @@
+//
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+// This is implemented in Versions.cpp
+
+#ifndef _PARSE_VERSIONS_INCLUDED_
+#define _PARSE_VERSIONS_INCLUDED_
+
+#include "../Public/ShaderLang.h"
+#include "../Include/InfoSink.h"
+#include "Scan.h"
+
+#include <map>
+
+namespace glslang {
+
+//
+// Base class for parse helpers.
+// This just has version-related information and checking.
+// This class should be sufficient for preprocessing.
+//
+class TParseVersions {
+public:
+ TParseVersions(TIntermediate& interm, int version, EProfile profile,
+ const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink,
+ bool forwardCompatible, EShMessages messages)
+ : infoSink(infoSink), version(version), profile(profile), language(language),
+ spvVersion(spvVersion), forwardCompatible(forwardCompatible),
+ intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
+ virtual ~TParseVersions() { }
+ virtual void initializeExtensionBehavior();
+ virtual void requireProfile(const TSourceLoc&, int queryProfiles, const char* featureDesc);
+ virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, const char* const extension, const char* featureDesc);
+ virtual void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
+ virtual void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
+ virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc);
+ virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc);
+ virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
+ virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual TExtensionBehavior getExtensionBehavior(const char*);
+ virtual bool extensionTurnedOn(const char* const extension);
+ virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]);
+ virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
+ virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
+ virtual void doubleCheck(const TSourceLoc&, const char* op);
+ virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual bool float16Arithmetic();
+ virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
+ virtual void int16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual bool int16Arithmetic();
+ virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
+ virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual bool int8Arithmetic();
+ virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
+#ifdef AMD_EXTENSIONS
+ virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+#endif
+ virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void spvRemoved(const TSourceLoc&, const char* op);
+ virtual void vulkanRemoved(const TSourceLoc&, const char* op);
+ virtual void requireVulkan(const TSourceLoc&, const char* op);
+ virtual void requireSpv(const TSourceLoc&, const char* op);
+ virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
+ virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
+ virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+
+ virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+ virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+ virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+ virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+
+ void addError() { ++numErrors; }
+ int getNumErrors() const { return numErrors; }
+
+ void setScanner(TInputScanner* scanner) { currentScanner = scanner; }
+ TInputScanner* getScanner() const { return currentScanner; }
+ const TSourceLoc& getCurrentLoc() const { return currentScanner->getSourceLoc(); }
+ void setCurrentLine(int line) { currentScanner->setLine(line); }
+ void setCurrentColumn(int col) { currentScanner->setColumn(col); }
+ void setCurrentSourceName(const char* name) { currentScanner->setFile(name); }
+ void setCurrentString(int string) { currentScanner->setString(string); }
+
+ void getPreamble(std::string&);
+ bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
+ bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
+ bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; }
+ bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; }
+ bool hlslDX9Compatible() const { return (messages & EShMsgHlslDX9Compatible) != 0; }
+
+ TInfoSink& infoSink;
+
+ // compilation mode
+ int version; // version, updated by #version in the shader
+ EProfile profile; // the declared profile in the shader (core by default)
+ EShLanguage language; // really the stage
+ SpvVersion spvVersion;
+ bool forwardCompatible; // true if errors are to be given for use of deprecated features
+ TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree
+
+protected:
+ TMap<TString, TExtensionBehavior> extensionBehavior; // for each extension string, what its current behavior is set to
+ EShMessages messages; // errors/warnings/rule-sets
+ int numErrors; // number of compile-time errors encountered
+ TInputScanner* currentScanner;
+
+private:
+ explicit TParseVersions(const TParseVersions&);
+ TParseVersions& operator=(const TParseVersions&);
+};
+
+} // end namespace glslang
+
+#endif // _PARSE_VERSIONS_INCLUDED_
diff --git a/thirdparty/glslang/glslang/MachineIndependent/pch.cpp b/thirdparty/glslang/glslang/MachineIndependent/pch.cpp
new file mode 100644
index 0000000000..b7a08654a5
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/pch.cpp
@@ -0,0 +1,35 @@
+//
+// Copyright (C) 2018 The Khronos Group Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "pch.h"
diff --git a/thirdparty/glslang/glslang/MachineIndependent/pch.h b/thirdparty/glslang/glslang/MachineIndependent/pch.h
new file mode 100644
index 0000000000..6ea3761ea1
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/pch.h
@@ -0,0 +1,49 @@
+#ifndef _PCH_H
+#define _PCH_H
+//
+// Copyright (C) 2018 The Khronos Group Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#include <sstream>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+#include <climits>
+#include <iostream>
+#include <sstream>
+#include <memory>
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "Scan.h"
+#include "ScanContext.h"
+
+#endif /* _PCH_H */
diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp
new file mode 100644
index 0000000000..c74e44f0fd
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp
@@ -0,0 +1,1320 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#include <sstream>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+#include <climits>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+
+namespace glslang {
+
+// Handle #define
+int TPpContext::CPPdefine(TPpToken* ppToken)
+{
+ MacroSymbol mac;
+
+ // get the macro name
+ int token = scanToken(ppToken);
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#define", "");
+ return token;
+ }
+ if (ppToken->loc.string >= 0) {
+ // We are in user code; check for reserved name use:
+ parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#define");
+ }
+
+ // save the macro name
+ const int defAtom = atomStrings.getAddAtom(ppToken->name);
+ TSourceLoc defineLoc = ppToken->loc; // because ppToken might go to the next line before we report errors
+
+ // gather parameters to the macro, between (...)
+ token = scanToken(ppToken);
+ if (token == '(' && !ppToken->space) {
+ mac.functionLike = 1;
+ do {
+ token = scanToken(ppToken);
+ if (mac.args.size() == 0 && token == ')')
+ break;
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "bad argument", "#define", "");
+
+ return token;
+ }
+ const int argAtom = atomStrings.getAddAtom(ppToken->name);
+
+ // check for duplication of parameter name
+ bool duplicate = false;
+ for (size_t a = 0; a < mac.args.size(); ++a) {
+ if (mac.args[a] == argAtom) {
+ parseContext.ppError(ppToken->loc, "duplicate macro parameter", "#define", "");
+ duplicate = true;
+ break;
+ }
+ }
+ if (! duplicate)
+ mac.args.push_back(argAtom);
+ token = scanToken(ppToken);
+ } while (token == ',');
+ if (token != ')') {
+ parseContext.ppError(ppToken->loc, "missing parenthesis", "#define", "");
+
+ return token;
+ }
+
+ token = scanToken(ppToken);
+ } else if (token != '\n' && token != EndOfInput && !ppToken->space) {
+ parseContext.ppWarn(ppToken->loc, "missing space after macro name", "#define", "");
+
+ return token;
+ }
+
+ // record the definition of the macro
+ while (token != '\n' && token != EndOfInput) {
+ mac.body.putToken(token, ppToken);
+ token = scanToken(ppToken);
+ if (token != '\n' && ppToken->space)
+ mac.body.putToken(' ', ppToken);
+ }
+
+ // check for duplicate definition
+ MacroSymbol* existing = lookupMacroDef(defAtom);
+ if (existing != nullptr) {
+ if (! existing->undef) {
+ // Already defined -- need to make sure they are identical:
+ // "Two replacement lists are identical if and only if the
+ // preprocessing tokens in both have the same number,
+ // ordering, spelling, and white-space separation, where all
+ // white-space separations are considered identical."
+ if (existing->functionLike != mac.functionLike) {
+ parseContext.ppError(defineLoc, "Macro redefined; function-like versus object-like:", "#define",
+ atomStrings.getString(defAtom));
+ } else if (existing->args.size() != mac.args.size()) {
+ parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define",
+ atomStrings.getString(defAtom));
+ } else {
+ if (existing->args != mac.args) {
+ parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define",
+ atomStrings.getString(defAtom));
+ }
+ // set up to compare the two
+ existing->body.reset();
+ mac.body.reset();
+ int newToken;
+ bool firstToken = true;
+ do {
+ int oldToken;
+ TPpToken oldPpToken;
+ TPpToken newPpToken;
+ oldToken = existing->body.getToken(parseContext, &oldPpToken);
+ newToken = mac.body.getToken(parseContext, &newPpToken);
+ // for the first token, preceding spaces don't matter
+ if (firstToken) {
+ newPpToken.space = oldPpToken.space;
+ firstToken = false;
+ }
+ if (oldToken != newToken || oldPpToken != newPpToken) {
+ parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define",
+ atomStrings.getString(defAtom));
+ break;
+ }
+ } while (newToken != EndOfInput);
+ }
+ }
+ *existing = mac;
+ } else
+ addMacroDef(defAtom, mac);
+
+ return '\n';
+}
+
+// Handle #undef
+int TPpContext::CPPundef(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#undef", "");
+
+ return token;
+ }
+
+ parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#undef");
+
+ MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name));
+ if (macro != nullptr)
+ macro->undef = 1;
+ token = scanToken(ppToken);
+ if (token != '\n')
+ parseContext.ppError(ppToken->loc, "can only be followed by a single macro name", "#undef", "");
+
+ return token;
+}
+
+// Handle #else
+/* Skip forward to appropriate spot. This is used both
+** to skip to a #endif after seeing an #else, AND to skip to a #else,
+** #elif, or #endif after a #if/#ifdef/#ifndef/#elif test was false.
+*/
+int TPpContext::CPPelse(int matchelse, TPpToken* ppToken)
+{
+ int depth = 0;
+ int token = scanToken(ppToken);
+
+ while (token != EndOfInput) {
+ if (token != '#') {
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+
+ if (token == EndOfInput)
+ return token;
+
+ token = scanToken(ppToken);
+ continue;
+ }
+
+ if ((token = scanToken(ppToken)) != PpAtomIdentifier)
+ continue;
+
+ int nextAtom = atomStrings.getAtom(ppToken->name);
+ if (nextAtom == PpAtomIf || nextAtom == PpAtomIfdef || nextAtom == PpAtomIfndef) {
+ depth++;
+ if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) {
+ parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if/#ifdef/#ifndef", "");
+ return EndOfInput;
+ } else {
+ ifdepth++;
+ elsetracker++;
+ }
+ } else if (nextAtom == PpAtomEndif) {
+ token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken));
+ elseSeen[elsetracker] = false;
+ --elsetracker;
+ if (depth == 0) {
+ // found the #endif we are looking for
+ if (ifdepth > 0)
+ --ifdepth;
+ break;
+ }
+ --depth;
+ --ifdepth;
+ } else if (matchelse && depth == 0) {
+ if (nextAtom == PpAtomElse) {
+ elseSeen[elsetracker] = true;
+ token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken));
+ // found the #else we are looking for
+ break;
+ } else if (nextAtom == PpAtomElif) {
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", "");
+ /* we decrement ifdepth here, because CPPif will increment
+ * it and we really want to leave it alone */
+ if (ifdepth > 0) {
+ --ifdepth;
+ elseSeen[elsetracker] = false;
+ --elsetracker;
+ }
+
+ return CPPif(ppToken);
+ }
+ } else if (nextAtom == PpAtomElse) {
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#else after #else", "#else", "");
+ else
+ elseSeen[elsetracker] = true;
+ token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken));
+ } else if (nextAtom == PpAtomElif) {
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", "");
+ }
+ }
+
+ return token;
+}
+
+// Call when there should be no more tokens left on a line.
+int TPpContext::extraTokenCheck(int contextAtom, TPpToken* ppToken, int token)
+{
+ if (token != '\n' && token != EndOfInput) {
+ static const char* message = "unexpected tokens following directive";
+
+ const char* label;
+ if (contextAtom == PpAtomElse)
+ label = "#else";
+ else if (contextAtom == PpAtomElif)
+ label = "#elif";
+ else if (contextAtom == PpAtomEndif)
+ label = "#endif";
+ else if (contextAtom == PpAtomIf)
+ label = "#if";
+ else if (contextAtom == PpAtomLine)
+ label = "#line";
+ else
+ label = "";
+
+ if (parseContext.relaxedErrors())
+ parseContext.ppWarn(ppToken->loc, message, label, "");
+ else
+ parseContext.ppError(ppToken->loc, message, label, "");
+
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+ }
+
+ return token;
+}
+
+enum eval_prec {
+ MIN_PRECEDENCE,
+ COND, LOGOR, LOGAND, OR, XOR, AND, EQUAL, RELATION, SHIFT, ADD, MUL, UNARY,
+ MAX_PRECEDENCE
+};
+
+namespace {
+
+ int op_logor(int a, int b) { return a || b; }
+ int op_logand(int a, int b) { return a && b; }
+ int op_or(int a, int b) { return a | b; }
+ int op_xor(int a, int b) { return a ^ b; }
+ int op_and(int a, int b) { return a & b; }
+ int op_eq(int a, int b) { return a == b; }
+ int op_ne(int a, int b) { return a != b; }
+ int op_ge(int a, int b) { return a >= b; }
+ int op_le(int a, int b) { return a <= b; }
+ int op_gt(int a, int b) { return a > b; }
+ int op_lt(int a, int b) { return a < b; }
+ int op_shl(int a, int b) { return a << b; }
+ int op_shr(int a, int b) { return a >> b; }
+ int op_add(int a, int b) { return a + b; }
+ int op_sub(int a, int b) { return a - b; }
+ int op_mul(int a, int b) { return a * b; }
+ int op_div(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a / b; }
+ int op_mod(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a % b; }
+ int op_pos(int a) { return a; }
+ int op_neg(int a) { return -a; }
+ int op_cmpl(int a) { return ~a; }
+ int op_not(int a) { return !a; }
+
+};
+
+struct TBinop {
+ int token, precedence, (*op)(int, int);
+} binop[] = {
+ { PpAtomOr, LOGOR, op_logor },
+ { PpAtomAnd, LOGAND, op_logand },
+ { '|', OR, op_or },
+ { '^', XOR, op_xor },
+ { '&', AND, op_and },
+ { PpAtomEQ, EQUAL, op_eq },
+ { PpAtomNE, EQUAL, op_ne },
+ { '>', RELATION, op_gt },
+ { PpAtomGE, RELATION, op_ge },
+ { '<', RELATION, op_lt },
+ { PpAtomLE, RELATION, op_le },
+ { PpAtomLeft, SHIFT, op_shl },
+ { PpAtomRight, SHIFT, op_shr },
+ { '+', ADD, op_add },
+ { '-', ADD, op_sub },
+ { '*', MUL, op_mul },
+ { '/', MUL, op_div },
+ { '%', MUL, op_mod },
+};
+
+struct TUnop {
+ int token, (*op)(int);
+} unop[] = {
+ { '+', op_pos },
+ { '-', op_neg },
+ { '~', op_cmpl },
+ { '!', op_not },
+};
+
+#define NUM_ELEMENTS(A) (sizeof(A) / sizeof(A[0]))
+
+int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
+{
+ TSourceLoc loc = ppToken->loc; // because we sometimes read the newline before reporting the error
+ if (token == PpAtomIdentifier) {
+ if (strcmp("defined", ppToken->name) == 0) {
+ if (! parseContext.isReadingHLSL() && isMacroInput()) {
+ if (parseContext.relaxedErrors())
+ parseContext.ppWarn(ppToken->loc, "nonportable when expanded from macros for preprocessor expression",
+ "defined", "");
+ else
+ parseContext.ppError(ppToken->loc, "cannot use in preprocessor expression when expanded from macros",
+ "defined", "");
+ }
+ bool needclose = 0;
+ token = scanToken(ppToken);
+ if (token == '(') {
+ needclose = true;
+ token = scanToken(ppToken);
+ }
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(loc, "incorrect directive, expected identifier", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+
+ MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name));
+ res = macro != nullptr ? !macro->undef : 0;
+ token = scanToken(ppToken);
+ if (needclose) {
+ if (token != ')') {
+ parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+ token = scanToken(ppToken);
+ }
+ } else {
+ token = evalToToken(token, shortCircuit, res, err, ppToken);
+ return eval(token, precedence, shortCircuit, res, err, ppToken);
+ }
+ } else if (token == PpAtomConstInt) {
+ res = ppToken->ival;
+ token = scanToken(ppToken);
+ } else if (token == '(') {
+ token = scanToken(ppToken);
+ token = eval(token, MIN_PRECEDENCE, shortCircuit, res, err, ppToken);
+ if (! err) {
+ if (token != ')') {
+ parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+ token = scanToken(ppToken);
+ }
+ } else {
+ int op = NUM_ELEMENTS(unop) - 1;
+ for (; op >= 0; op--) {
+ if (unop[op].token == token)
+ break;
+ }
+ if (op >= 0) {
+ token = scanToken(ppToken);
+ token = eval(token, UNARY, shortCircuit, res, err, ppToken);
+ res = unop[op].op(res);
+ } else {
+ parseContext.ppError(loc, "bad expression", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+ }
+
+ token = evalToToken(token, shortCircuit, res, err, ppToken);
+
+ // Perform evaluation of binary operation, if there is one, otherwise we are done.
+ while (! err) {
+ if (token == ')' || token == '\n')
+ break;
+ int op;
+ for (op = NUM_ELEMENTS(binop) - 1; op >= 0; op--) {
+ if (binop[op].token == token)
+ break;
+ }
+ if (op < 0 || binop[op].precedence <= precedence)
+ break;
+ int leftSide = res;
+
+ // Setup short-circuiting, needed for ES, unless already in a short circuit.
+ // (Once in a short-circuit, can't turn off again, until that whole subexpression is done.
+ if (! shortCircuit) {
+ if ((token == PpAtomOr && leftSide == 1) ||
+ (token == PpAtomAnd && leftSide == 0))
+ shortCircuit = true;
+ }
+
+ token = scanToken(ppToken);
+ token = eval(token, binop[op].precedence, shortCircuit, res, err, ppToken);
+
+ if (binop[op].op == op_div || binop[op].op == op_mod) {
+ if (res == 0) {
+ parseContext.ppError(loc, "division by 0", "preprocessor evaluation", "");
+ res = 1;
+ }
+ }
+ res = binop[op].op(leftSide, res);
+ }
+
+ return token;
+}
+
+// Expand macros, skipping empty expansions, to get to the first real token in those expansions.
+int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
+{
+ while (token == PpAtomIdentifier && strcmp("defined", ppToken->name) != 0) {
+ switch (MacroExpand(ppToken, true, false)) {
+ case MacroExpandNotStarted:
+ case MacroExpandError:
+ parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+ break;
+ case MacroExpandStarted:
+ break;
+ case MacroExpandUndef:
+ if (! shortCircuit && parseContext.profile == EEsProfile) {
+ const char* message = "undefined macro in expression not allowed in es profile";
+ if (parseContext.relaxedErrors())
+ parseContext.ppWarn(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
+ else
+ parseContext.ppError(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
+ }
+ break;
+ }
+ token = scanToken(ppToken);
+ if (err)
+ break;
+ }
+
+ return token;
+}
+
+// Handle #if
+int TPpContext::CPPif(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) {
+ parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if", "");
+ return EndOfInput;
+ } else {
+ elsetracker++;
+ ifdepth++;
+ }
+ int res = 0;
+ bool err = false;
+ token = eval(token, MIN_PRECEDENCE, false, res, err, ppToken);
+ token = extraTokenCheck(PpAtomIf, ppToken, token);
+ if (!res && !err)
+ token = CPPelse(1, ppToken);
+
+ return token;
+}
+
+// Handle #ifdef
+int TPpContext::CPPifdef(int defined, TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ if (ifdepth > maxIfNesting || elsetracker > maxIfNesting) {
+ parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#ifdef", "");
+ return EndOfInput;
+ } else {
+ elsetracker++;
+ ifdepth++;
+ }
+
+ if (token != PpAtomIdentifier) {
+ if (defined)
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifdef", "");
+ else
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifndef", "");
+ } else {
+ MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name));
+ token = scanToken(ppToken);
+ if (token != '\n') {
+ parseContext.ppError(ppToken->loc, "unexpected tokens following #ifdef directive - expected a newline", "#ifdef", "");
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+ }
+ if (((macro != nullptr && !macro->undef) ? 1 : 0) != defined)
+ token = CPPelse(1, ppToken);
+ }
+
+ return token;
+}
+
+// Handle #include ...
+// TODO: Handle macro expansions for the header name
+int TPpContext::CPPinclude(TPpToken* ppToken)
+{
+ const TSourceLoc directiveLoc = ppToken->loc;
+ bool startWithLocalSearch = true; // to additionally include the extra "" paths
+ int token = scanToken(ppToken);
+
+ // handle <header-name>-style #include
+ if (token == '<') {
+ startWithLocalSearch = false;
+ token = scanHeaderName(ppToken, '>');
+ }
+ // otherwise ppToken already has the header name and it was "header-name" style
+
+ if (token != PpAtomConstString) {
+ parseContext.ppError(directiveLoc, "must be followed by a header name", "#include", "");
+ return token;
+ }
+
+ // Make a copy of the name because it will be overwritten by the next token scan.
+ const std::string filename = ppToken->name;
+
+ // See if the directive was well formed
+ token = scanToken(ppToken);
+ if (token != '\n') {
+ if (token == EndOfInput)
+ parseContext.ppError(ppToken->loc, "expected newline after header name:", "#include", "%s", filename.c_str());
+ else
+ parseContext.ppError(ppToken->loc, "extra content after header name:", "#include", "%s", filename.c_str());
+ return token;
+ }
+
+ // Process well-formed directive
+
+ // Find the inclusion, first look in "Local" ("") paths, if requested,
+ // otherwise, only search the "System" (<>) paths.
+ TShader::Includer::IncludeResult* res = nullptr;
+ if (startWithLocalSearch)
+ res = includer.includeLocal(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1);
+ if (res == nullptr || res->headerName.empty()) {
+ includer.releaseInclude(res);
+ res = includer.includeSystem(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1);
+ }
+
+ // Process the results
+ if (res != nullptr && !res->headerName.empty()) {
+ if (res->headerData != nullptr && res->headerLength > 0) {
+ // path for processing one or more tokens from an included header, hand off 'res'
+ const bool forNextLine = parseContext.lineDirectiveShouldSetNextLine();
+ std::ostringstream prologue;
+ std::ostringstream epilogue;
+ prologue << "#line " << forNextLine << " " << "\"" << res->headerName << "\"\n";
+ epilogue << (res->headerData[res->headerLength - 1] == '\n'? "" : "\n") <<
+ "#line " << directiveLoc.line + forNextLine << " " << directiveLoc.getStringNameOrNum() << "\n";
+ pushInput(new TokenizableIncludeFile(directiveLoc, prologue.str(), res, epilogue.str(), this));
+ parseContext.intermediate.addIncludeText(res->headerName.c_str(), res->headerData, res->headerLength);
+ // There's no "current" location anymore.
+ parseContext.setCurrentColumn(0);
+ } else {
+ // things are okay, but there is nothing to process
+ includer.releaseInclude(res);
+ }
+ } else {
+ // error path, clean up
+ std::string message =
+ res != nullptr ? std::string(res->headerData, res->headerLength)
+ : std::string("Could not process include directive");
+ parseContext.ppError(directiveLoc, message.c_str(), "#include", "for header name: %s", filename.c_str());
+ includer.releaseInclude(res);
+ }
+
+ return token;
+}
+
+// Handle #line
+int TPpContext::CPPline(TPpToken* ppToken)
+{
+ // "#line must have, after macro substitution, one of the following forms:
+ // "#line line
+ // "#line line source-string-number"
+
+ int token = scanToken(ppToken);
+ const TSourceLoc directiveLoc = ppToken->loc;
+ if (token == '\n') {
+ parseContext.ppError(ppToken->loc, "must by followed by an integral literal", "#line", "");
+ return token;
+ }
+
+ int lineRes = 0; // Line number after macro expansion.
+ int lineToken = 0;
+ bool hasFile = false;
+ int fileRes = 0; // Source file number after macro expansion.
+ const char* sourceName = nullptr; // Optional source file name.
+ bool lineErr = false;
+ bool fileErr = false;
+ token = eval(token, MIN_PRECEDENCE, false, lineRes, lineErr, ppToken);
+ if (! lineErr) {
+ lineToken = lineRes;
+ if (token == '\n')
+ ++lineRes;
+
+ if (parseContext.lineDirectiveShouldSetNextLine())
+ --lineRes;
+ parseContext.setCurrentLine(lineRes);
+
+ if (token != '\n') {
+ if (token == PpAtomConstString) {
+ parseContext.ppRequireExtensions(directiveLoc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based #line");
+ // We need to save a copy of the string instead of pointing
+ // to the name field of the token since the name field
+ // will likely be overwritten by the next token scan.
+ sourceName = atomStrings.getString(atomStrings.getAddAtom(ppToken->name));
+ parseContext.setCurrentSourceName(sourceName);
+ hasFile = true;
+ token = scanToken(ppToken);
+ } else {
+ token = eval(token, MIN_PRECEDENCE, false, fileRes, fileErr, ppToken);
+ if (! fileErr) {
+ parseContext.setCurrentString(fileRes);
+ hasFile = true;
+ }
+ }
+ }
+ }
+ if (!fileErr && !lineErr) {
+ parseContext.notifyLineDirective(directiveLoc.line, lineToken, hasFile, fileRes, sourceName);
+ }
+ token = extraTokenCheck(PpAtomLine, ppToken, token);
+
+ return token;
+}
+
+// Handle #error
+int TPpContext::CPPerror(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ std::string message;
+ TSourceLoc loc = ppToken->loc;
+
+ while (token != '\n' && token != EndOfInput) {
+ if (token == PpAtomConstInt16 || token == PpAtomConstUint16 ||
+ token == PpAtomConstInt || token == PpAtomConstUint ||
+ token == PpAtomConstInt64 || token == PpAtomConstUint64 ||
+ token == PpAtomConstFloat16 ||
+ token == PpAtomConstFloat || token == PpAtomConstDouble) {
+ message.append(ppToken->name);
+ } else if (token == PpAtomIdentifier || token == PpAtomConstString) {
+ message.append(ppToken->name);
+ } else {
+ message.append(atomStrings.getString(token));
+ }
+ message.append(" ");
+ token = scanToken(ppToken);
+ }
+ parseContext.notifyErrorDirective(loc.line, message.c_str());
+ // store this msg into the shader's information log..set the Compile Error flag!!!!
+ parseContext.ppError(loc, message.c_str(), "#error", "");
+
+ return '\n';
+}
+
+// Handle #pragma
+int TPpContext::CPPpragma(TPpToken* ppToken)
+{
+ char SrcStrName[2];
+ TVector<TString> tokens;
+
+ TSourceLoc loc = ppToken->loc; // because we go to the next line before processing
+ int token = scanToken(ppToken);
+ while (token != '\n' && token != EndOfInput) {
+ switch (token) {
+ case PpAtomIdentifier:
+ case PpAtomConstInt:
+ case PpAtomConstUint:
+ case PpAtomConstInt64:
+ case PpAtomConstUint64:
+#ifdef AMD_EXTENSIONS
+ case PpAtomConstInt16:
+ case PpAtomConstUint16:
+#endif
+ case PpAtomConstFloat:
+ case PpAtomConstDouble:
+ case PpAtomConstFloat16:
+ tokens.push_back(ppToken->name);
+ break;
+ default:
+ SrcStrName[0] = (char)token;
+ SrcStrName[1] = '\0';
+ tokens.push_back(SrcStrName);
+ }
+ token = scanToken(ppToken);
+ }
+
+ if (token == EndOfInput)
+ parseContext.ppError(loc, "directive must end with a newline", "#pragma", "");
+ else
+ parseContext.handlePragma(loc, tokens);
+
+ return token;
+}
+
+// #version: This is just for error checking: the version and profile are decided before preprocessing starts
+int TPpContext::CPPversion(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+
+ if (errorOnVersion || versionSeen) {
+ if (parseContext.isReadingHLSL())
+ parseContext.ppError(ppToken->loc, "invalid preprocessor command", "#version", "");
+ else
+ parseContext.ppError(ppToken->loc, "must occur first in shader", "#version", "");
+ }
+ versionSeen = true;
+
+ if (token == '\n') {
+ parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", "");
+
+ return token;
+ }
+
+ if (token != PpAtomConstInt)
+ parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", "");
+
+ ppToken->ival = atoi(ppToken->name);
+ int versionNumber = ppToken->ival;
+ int line = ppToken->loc.line;
+ token = scanToken(ppToken);
+
+ if (token == '\n') {
+ parseContext.notifyVersion(line, versionNumber, nullptr);
+ return token;
+ } else {
+ int profileAtom = atomStrings.getAtom(ppToken->name);
+ if (profileAtom != PpAtomCore &&
+ profileAtom != PpAtomCompatibility &&
+ profileAtom != PpAtomEs)
+ parseContext.ppError(ppToken->loc, "bad profile name; use es, core, or compatibility", "#version", "");
+ parseContext.notifyVersion(line, versionNumber, ppToken->name);
+ token = scanToken(ppToken);
+
+ if (token == '\n')
+ return token;
+ else
+ parseContext.ppError(ppToken->loc, "bad tokens following profile -- expected newline", "#version", "");
+ }
+
+ return token;
+}
+
+// Handle #extension
+int TPpContext::CPPextension(TPpToken* ppToken)
+{
+ int line = ppToken->loc.line;
+ int token = scanToken(ppToken);
+ char extensionName[MaxTokenLength + 1];
+
+ if (token=='\n') {
+ parseContext.ppError(ppToken->loc, "extension name not specified", "#extension", "");
+ return token;
+ }
+
+ if (token != PpAtomIdentifier)
+ parseContext.ppError(ppToken->loc, "extension name expected", "#extension", "");
+
+ snprintf(extensionName, sizeof(extensionName), "%s", ppToken->name);
+
+ token = scanToken(ppToken);
+ if (token != ':') {
+ parseContext.ppError(ppToken->loc, "':' missing after extension name", "#extension", "");
+ return token;
+ }
+
+ token = scanToken(ppToken);
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "behavior for extension not specified", "#extension", "");
+ return token;
+ }
+
+ parseContext.updateExtensionBehavior(line, extensionName, ppToken->name);
+ parseContext.notifyExtensionDirective(line, extensionName, ppToken->name);
+
+ token = scanToken(ppToken);
+ if (token == '\n')
+ return token;
+ else
+ parseContext.ppError(ppToken->loc, "extra tokens -- expected newline", "#extension","");
+
+ return token;
+}
+
+int TPpContext::readCPPline(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+
+ if (token == PpAtomIdentifier) {
+ switch (atomStrings.getAtom(ppToken->name)) {
+ case PpAtomDefine:
+ token = CPPdefine(ppToken);
+ break;
+ case PpAtomElse:
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#else after #else", "#else", "");
+ elseSeen[elsetracker] = true;
+ if (ifdepth == 0)
+ parseContext.ppError(ppToken->loc, "mismatched statements", "#else", "");
+ token = extraTokenCheck(PpAtomElse, ppToken, scanToken(ppToken));
+ token = CPPelse(0, ppToken);
+ break;
+ case PpAtomElif:
+ if (ifdepth == 0)
+ parseContext.ppError(ppToken->loc, "mismatched statements", "#elif", "");
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", "");
+ // this token is really a dont care, but we still need to eat the tokens
+ token = scanToken(ppToken);
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+ token = CPPelse(0, ppToken);
+ break;
+ case PpAtomEndif:
+ if (ifdepth == 0)
+ parseContext.ppError(ppToken->loc, "mismatched statements", "#endif", "");
+ else {
+ elseSeen[elsetracker] = false;
+ --elsetracker;
+ --ifdepth;
+ }
+ token = extraTokenCheck(PpAtomEndif, ppToken, scanToken(ppToken));
+ break;
+ case PpAtomIf:
+ token = CPPif(ppToken);
+ break;
+ case PpAtomIfdef:
+ token = CPPifdef(1, ppToken);
+ break;
+ case PpAtomIfndef:
+ token = CPPifdef(0, ppToken);
+ break;
+ case PpAtomInclude:
+ if(!parseContext.isReadingHLSL()) {
+ parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_include_directive, "#include");
+ }
+ token = CPPinclude(ppToken);
+ break;
+ case PpAtomLine:
+ token = CPPline(ppToken);
+ break;
+ case PpAtomPragma:
+ token = CPPpragma(ppToken);
+ break;
+ case PpAtomUndef:
+ token = CPPundef(ppToken);
+ break;
+ case PpAtomError:
+ token = CPPerror(ppToken);
+ break;
+ case PpAtomVersion:
+ token = CPPversion(ppToken);
+ break;
+ case PpAtomExtension:
+ token = CPPextension(ppToken);
+ break;
+ default:
+ parseContext.ppError(ppToken->loc, "invalid directive:", "#", ppToken->name);
+ break;
+ }
+ } else if (token != '\n' && token != EndOfInput)
+ parseContext.ppError(ppToken->loc, "invalid directive", "#", "");
+
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+
+ return token;
+}
+
+// Context-dependent parsing of a #include <header-name>.
+// Assumes no macro expansions etc. are being done; the name is just on the current input.
+// Always creates a name and returns PpAtomicConstString, unless we run out of input.
+int TPpContext::scanHeaderName(TPpToken* ppToken, char delimit)
+{
+ bool tooLong = false;
+
+ if (inputStack.empty())
+ return EndOfInput;
+
+ int len = 0;
+ ppToken->name[0] = '\0';
+ do {
+ int ch = inputStack.back()->getch();
+
+ // done yet?
+ if (ch == delimit) {
+ ppToken->name[len] = '\0';
+ if (tooLong)
+ parseContext.ppError(ppToken->loc, "header name too long", "", "");
+ return PpAtomConstString;
+ } else if (ch == EndOfInput)
+ return EndOfInput;
+
+ // found a character to expand the name with
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else
+ tooLong = true;
+ } while (true);
+}
+
+// Macro-expand a macro argument 'arg' to create 'expandedArg'.
+// Does not replace 'arg'.
+// Returns nullptr if no expanded argument is created.
+TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken* ppToken, bool newLineOkay)
+{
+ // expand the argument
+ TokenStream* expandedArg = new TokenStream;
+ pushInput(new tMarkerInput(this));
+ pushTokenStreamInput(arg);
+ int token;
+ while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput) {
+ token = tokenPaste(token, *ppToken);
+ if (token == PpAtomIdentifier) {
+ switch (MacroExpand(ppToken, false, newLineOkay)) {
+ case MacroExpandNotStarted:
+ break;
+ case MacroExpandError:
+ // toss the rest of the pushed-input argument by scanning until tMarkerInput
+ while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput)
+ ;
+ break;
+ case MacroExpandStarted:
+ case MacroExpandUndef:
+ continue;
+ }
+ }
+ if (token == tMarkerInput::marker || token == EndOfInput)
+ break;
+ expandedArg->putToken(token, ppToken);
+ }
+
+ if (token != tMarkerInput::marker) {
+ // Error, or MacroExpand ate the marker, so had bad input, recover
+ delete expandedArg;
+ expandedArg = nullptr;
+ }
+
+ return expandedArg;
+}
+
+//
+// Return the next token for a macro expansion, handling macro arguments,
+// whose semantics are dependent on being adjacent to ##.
+//
+int TPpContext::tMacroInput::scan(TPpToken* ppToken)
+{
+ int token;
+ do {
+ token = mac->body.getToken(pp->parseContext, ppToken);
+ } while (token == ' '); // handle white space in macro
+
+ // Hash operators basically turn off a round of macro substitution
+ // (the round done on the argument before the round done on the RHS of the
+ // macro definition):
+ //
+ // "A parameter in the replacement list, unless preceded by a # or ##
+ // preprocessing token or followed by a ## preprocessing token (see below),
+ // is replaced by the corresponding argument after all macros contained
+ // therein have been expanded."
+ //
+ // "If, in the replacement list, a parameter is immediately preceded or
+ // followed by a ## preprocessing token, the parameter is replaced by the
+ // corresponding argument's preprocessing token sequence."
+
+ bool pasting = false;
+ if (postpaste) {
+ // don't expand next token
+ pasting = true;
+ postpaste = false;
+ }
+
+ if (prepaste) {
+ // already know we should be on a ##, verify
+ assert(token == PpAtomPaste);
+ prepaste = false;
+ postpaste = true;
+ }
+
+ // see if are preceding a ##
+ if (mac->body.peekUntokenizedPasting()) {
+ prepaste = true;
+ pasting = true;
+ }
+
+ // HLSL does expand macros before concatenation
+ if (pasting && pp->parseContext.isReadingHLSL())
+ pasting = false;
+
+ // TODO: preprocessor: properly handle whitespace (or lack of it) between tokens when expanding
+ if (token == PpAtomIdentifier) {
+ int i;
+ for (i = (int)mac->args.size() - 1; i >= 0; i--)
+ if (strcmp(pp->atomStrings.getString(mac->args[i]), ppToken->name) == 0)
+ break;
+ if (i >= 0) {
+ TokenStream* arg = expandedArgs[i];
+ if (arg == nullptr || pasting)
+ arg = args[i];
+ pp->pushTokenStreamInput(*arg, prepaste);
+
+ return pp->scanToken(ppToken);
+ }
+ }
+
+ if (token == EndOfInput)
+ mac->busy = 0;
+
+ return token;
+}
+
+// return a textual zero, for scanning a macro that was never defined
+int TPpContext::tZeroInput::scan(TPpToken* ppToken)
+{
+ if (done)
+ return EndOfInput;
+
+ ppToken->name[0] = '0';
+ ppToken->name[1] = 0;
+ ppToken->ival = 0;
+ ppToken->space = false;
+ done = true;
+
+ return PpAtomConstInt;
+}
+
+//
+// Check a token to see if it is a macro that should be expanded:
+// - If it is, and defined, push a tInput that will produce the appropriate
+// expansion and return MacroExpandStarted.
+// - If it is, but undefined, and expandUndef is requested, push a tInput
+// that will expand to 0 and return MacroExpandUndef.
+// - Otherwise, there is no expansion, and there are two cases:
+// * It might be okay there is no expansion, and no specific error was
+// detected. Returns MacroExpandNotStarted.
+// * The expansion was started, but could not be completed, due to an error
+// that cannot be recovered from. Returns MacroExpandError.
+//
+MacroExpandResult TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay)
+{
+ ppToken->space = false;
+ int macroAtom = atomStrings.getAtom(ppToken->name);
+ switch (macroAtom) {
+ case PpAtomLineMacro:
+ ppToken->ival = parseContext.getCurrentLoc().line;
+ snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
+ UngetToken(PpAtomConstInt, ppToken);
+ return MacroExpandStarted;
+
+ case PpAtomFileMacro: {
+ if (parseContext.getCurrentLoc().name)
+ parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based __FILE__");
+ ppToken->ival = parseContext.getCurrentLoc().string;
+ snprintf(ppToken->name, sizeof(ppToken->name), "%s", ppToken->loc.getStringNameOrNum().c_str());
+ UngetToken(PpAtomConstInt, ppToken);
+ return MacroExpandStarted;
+ }
+
+ case PpAtomVersionMacro:
+ ppToken->ival = parseContext.version;
+ snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
+ UngetToken(PpAtomConstInt, ppToken);
+ return MacroExpandStarted;
+
+ default:
+ break;
+ }
+
+ MacroSymbol* macro = macroAtom == 0 ? nullptr : lookupMacroDef(macroAtom);
+
+ // no recursive expansions
+ if (macro != nullptr && macro->busy)
+ return MacroExpandNotStarted;
+
+ // not expanding undefined macros
+ if ((macro == nullptr || macro->undef) && ! expandUndef)
+ return MacroExpandNotStarted;
+
+ // 0 is the value of an undefined macro
+ if ((macro == nullptr || macro->undef) && expandUndef) {
+ pushInput(new tZeroInput(this));
+ return MacroExpandUndef;
+ }
+
+ tMacroInput *in = new tMacroInput(this);
+
+ TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error
+ in->mac = macro;
+ if (macro->functionLike) {
+ // We don't know yet if this will be a successful call of a
+ // function-like macro; need to look for a '(', but without trashing
+ // the passed in ppToken, until we know we are no longer speculative.
+ TPpToken parenToken;
+ int token = scanToken(&parenToken);
+ if (newLineOkay) {
+ while (token == '\n')
+ token = scanToken(&parenToken);
+ }
+ if (token != '(') {
+ // Function-like macro called with object-like syntax: okay, don't expand.
+ // (We ate exactly one token that might not be white space; put it back.
+ UngetToken(token, &parenToken);
+ delete in;
+ return MacroExpandNotStarted;
+ }
+ in->args.resize(in->mac->args.size());
+ for (size_t i = 0; i < in->mac->args.size(); i++)
+ in->args[i] = new TokenStream;
+ in->expandedArgs.resize(in->mac->args.size());
+ for (size_t i = 0; i < in->mac->args.size(); i++)
+ in->expandedArgs[i] = nullptr;
+ size_t arg = 0;
+ bool tokenRecorded = false;
+ do {
+ TVector<char> nestStack;
+ while (true) {
+ token = scanToken(ppToken);
+ if (token == EndOfInput || token == tMarkerInput::marker) {
+ parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ if (token == '\n') {
+ if (! newLineOkay) {
+ parseContext.ppError(loc, "End of line in macro substitution:", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ continue;
+ }
+ if (token == '#') {
+ parseContext.ppError(ppToken->loc, "unexpected '#'", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ if (in->mac->args.size() == 0 && token != ')')
+ break;
+ if (nestStack.size() == 0 && (token == ',' || token == ')'))
+ break;
+ if (token == '(')
+ nestStack.push_back(')');
+ else if (token == '{' && parseContext.isReadingHLSL())
+ nestStack.push_back('}');
+ else if (nestStack.size() > 0 && token == nestStack.back())
+ nestStack.pop_back();
+ in->args[arg]->putToken(token, ppToken);
+ tokenRecorded = true;
+ }
+ // end of single argument scan
+
+ if (token == ')') {
+ // closing paren of call
+ if (in->mac->args.size() == 1 && !tokenRecorded)
+ break;
+ arg++;
+ break;
+ }
+ arg++;
+ } while (arg < in->mac->args.size());
+ // end of all arguments scan
+
+ if (arg < in->mac->args.size())
+ parseContext.ppError(loc, "Too few args in Macro", "macro expansion", atomStrings.getString(macroAtom));
+ else if (token != ')') {
+ // Error recover code; find end of call, if possible
+ int depth = 0;
+ while (token != EndOfInput && (depth > 0 || token != ')')) {
+ if (token == ')' || token == '}')
+ depth--;
+ token = scanToken(ppToken);
+ if (token == '(' || token == '{')
+ depth++;
+ }
+
+ if (token == EndOfInput) {
+ parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ parseContext.ppError(loc, "Too many args in macro", "macro expansion", atomStrings.getString(macroAtom));
+ }
+
+ // We need both expanded and non-expanded forms of the argument, for whether or
+ // not token pasting will be applied later when the argument is consumed next to ##.
+ for (size_t i = 0; i < in->mac->args.size(); i++)
+ in->expandedArgs[i] = PrescanMacroArg(*in->args[i], ppToken, newLineOkay);
+ }
+
+ pushInput(in);
+ macro->busy = 1;
+ macro->body.reset();
+
+ return MacroExpandStarted;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp
new file mode 100644
index 0000000000..06c2333ef1
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp
@@ -0,0 +1,181 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+
+namespace {
+
+using namespace glslang;
+
+const struct {
+ int val;
+ const char* str;
+} tokens[] = {
+
+ { PPAtomAddAssign, "+=" },
+ { PPAtomSubAssign, "-=" },
+ { PPAtomMulAssign, "*=" },
+ { PPAtomDivAssign, "/=" },
+ { PPAtomModAssign, "%=" },
+
+ { PpAtomRight, ">>" },
+ { PpAtomLeft, "<<" },
+ { PpAtomAnd, "&&" },
+ { PpAtomOr, "||" },
+ { PpAtomXor, "^^" },
+
+ { PpAtomRightAssign, ">>=" },
+ { PpAtomLeftAssign, "<<=" },
+ { PpAtomAndAssign, "&=" },
+ { PpAtomOrAssign, "|=" },
+ { PpAtomXorAssign, "^=" },
+
+ { PpAtomEQ, "==" },
+ { PpAtomNE, "!=" },
+ { PpAtomGE, ">=" },
+ { PpAtomLE, "<=" },
+
+ { PpAtomDecrement, "--" },
+ { PpAtomIncrement, "++" },
+
+ { PpAtomColonColon, "::" },
+
+ { PpAtomDefine, "define" },
+ { PpAtomUndef, "undef" },
+ { PpAtomIf, "if" },
+ { PpAtomElif, "elif" },
+ { PpAtomElse, "else" },
+ { PpAtomEndif, "endif" },
+ { PpAtomIfdef, "ifdef" },
+ { PpAtomIfndef, "ifndef" },
+ { PpAtomLine, "line" },
+ { PpAtomPragma, "pragma" },
+ { PpAtomError, "error" },
+
+ { PpAtomVersion, "version" },
+ { PpAtomCore, "core" },
+ { PpAtomCompatibility, "compatibility" },
+ { PpAtomEs, "es" },
+ { PpAtomExtension, "extension" },
+
+ { PpAtomLineMacro, "__LINE__" },
+ { PpAtomFileMacro, "__FILE__" },
+ { PpAtomVersionMacro, "__VERSION__" },
+
+ { PpAtomInclude, "include" },
+};
+
+} // end anonymous namespace
+
+namespace glslang {
+
+//
+// Initialize the atom table.
+//
+TStringAtomMap::TStringAtomMap()
+{
+ badToken.assign("<bad token>");
+
+ // Add single character tokens to the atom table:
+ const char* s = "~!%^&*()-+=|,.<>/?;:[]{}#\\";
+ char t[2];
+
+ t[1] = '\0';
+ while (*s) {
+ t[0] = *s;
+ addAtomFixed(t, s[0]);
+ s++;
+ }
+
+ // Add multiple character scanner tokens :
+ for (size_t ii = 0; ii < sizeof(tokens)/sizeof(tokens[0]); ii++)
+ addAtomFixed(tokens[ii].str, tokens[ii].val);
+
+ nextAtom = PpAtomLast;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp
new file mode 100644
index 0000000000..cc003a8d12
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#include <cstdlib>
+#include <locale>
+
+#include "PpContext.h"
+
+namespace glslang {
+
+TPpContext::TPpContext(TParseContextBase& pc, const std::string& rootFileName, TShader::Includer& inclr) :
+ preamble(0), strings(0), previous_token('\n'), parseContext(pc), includer(inclr), inComment(false),
+ rootFileName(rootFileName),
+ currentSourceFile(rootFileName)
+{
+ ifdepth = 0;
+ for (elsetracker = 0; elsetracker < maxIfNesting; elsetracker++)
+ elseSeen[elsetracker] = false;
+ elsetracker = 0;
+
+ strtodStream.imbue(std::locale::classic());
+}
+
+TPpContext::~TPpContext()
+{
+ delete [] preamble;
+
+ // free up the inputStack
+ while (! inputStack.empty())
+ popInput();
+}
+
+void TPpContext::setInput(TInputScanner& input, bool versionWillBeError)
+{
+ assert(inputStack.size() == 0);
+
+ pushInput(new tStringInput(this, input));
+
+ errorOnVersion = versionWillBeError;
+ versionSeen = false;
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h
new file mode 100644
index 0000000000..8470e172a2
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h
@@ -0,0 +1,702 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef PPCONTEXT_H
+#define PPCONTEXT_H
+
+#include <stack>
+#include <unordered_map>
+#include <sstream>
+
+#include "../ParseHelper.h"
+#include "PpTokens.h"
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4127)
+#endif
+
+namespace glslang {
+
+class TPpToken {
+public:
+ TPpToken() { clear(); }
+ void clear()
+ {
+ space = false;
+ i64val = 0;
+ loc.init();
+ name[0] = 0;
+ }
+
+ // Used for comparing macro definitions, so checks what is relevant for that.
+ bool operator==(const TPpToken& right)
+ {
+ return space == right.space &&
+ ival == right.ival && dval == right.dval && i64val == right.i64val &&
+ strncmp(name, right.name, MaxTokenLength) == 0;
+ }
+ bool operator!=(const TPpToken& right) { return ! operator==(right); }
+
+ TSourceLoc loc;
+ // True if a space (for white space or a removed comment) should also be
+ // recognized, in front of the token returned:
+ bool space;
+ // Numeric value of the token:
+ union {
+ int ival;
+ double dval;
+ long long i64val;
+ };
+ // Text string of the token:
+ char name[MaxTokenLength + 1];
+};
+
+class TStringAtomMap {
+//
+// Implementation is in PpAtom.cpp
+//
+// Maintain a bi-directional mapping between relevant preprocessor strings and
+// "atoms" which a unique integers (small, contiguous, not hash-like) per string.
+//
+public:
+ TStringAtomMap();
+
+ // Map string -> atom.
+ // Return 0 if no existing string.
+ int getAtom(const char* s) const
+ {
+ auto it = atomMap.find(s);
+ return it == atomMap.end() ? 0 : it->second;
+ }
+
+ // Map a new or existing string -> atom, inventing a new atom if necessary.
+ int getAddAtom(const char* s)
+ {
+ int atom = getAtom(s);
+ if (atom == 0) {
+ atom = nextAtom++;
+ addAtomFixed(s, atom);
+ }
+ return atom;
+ }
+
+ // Map atom -> string.
+ const char* getString(int atom) const { return stringMap[atom]->c_str(); }
+
+protected:
+ TStringAtomMap(TStringAtomMap&);
+ TStringAtomMap& operator=(TStringAtomMap&);
+
+ TUnorderedMap<TString, int> atomMap;
+ TVector<const TString*> stringMap; // these point into the TString in atomMap
+ int nextAtom;
+
+ // Bad source characters can lead to bad atoms, so gracefully handle those by
+ // pre-filling the table with them (to avoid if tests later).
+ TString badToken;
+
+ // Add bi-directional mappings:
+ // - string -> atom
+ // - atom -> string
+ void addAtomFixed(const char* s, int atom)
+ {
+ auto it = atomMap.insert(std::pair<TString, int>(s, atom)).first;
+ if (stringMap.size() < (size_t)atom + 1)
+ stringMap.resize(atom + 100, &badToken);
+ stringMap[atom] = &it->first;
+ }
+};
+
+class TInputScanner;
+
+enum MacroExpandResult {
+ MacroExpandNotStarted, // macro not expanded, which might not be an error
+ MacroExpandError, // a clear error occurred while expanding, no expansion
+ MacroExpandStarted, // macro expansion process has started
+ MacroExpandUndef // macro is undefined and will be expanded
+};
+
+// This class is the result of turning a huge pile of C code communicating through globals
+// into a class. This was done to allowing instancing to attain thread safety.
+// Don't expect too much in terms of OO design.
+class TPpContext {
+public:
+ TPpContext(TParseContextBase&, const std::string& rootFileName, TShader::Includer&);
+ virtual ~TPpContext();
+
+ void setPreamble(const char* preamble, size_t length);
+
+ int tokenize(TPpToken& ppToken);
+ int tokenPaste(int token, TPpToken&);
+
+ class tInput {
+ public:
+ tInput(TPpContext* p) : done(false), pp(p) { }
+ virtual ~tInput() { }
+
+ virtual int scan(TPpToken*) = 0;
+ virtual int getch() = 0;
+ virtual void ungetch() = 0;
+ virtual bool peekPasting() { return false; } // true when about to see ##
+ virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste
+ virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define)
+ virtual bool isMacroInput() { return false; }
+
+ // Will be called when we start reading tokens from this instance
+ virtual void notifyActivated() {}
+ // Will be called when we do not read tokens from this instance anymore
+ virtual void notifyDeleted() {}
+ protected:
+ bool done;
+ TPpContext* pp;
+ };
+
+ void setInput(TInputScanner& input, bool versionWillBeError);
+
+ void pushInput(tInput* in)
+ {
+ inputStack.push_back(in);
+ in->notifyActivated();
+ }
+ void popInput()
+ {
+ inputStack.back()->notifyDeleted();
+ delete inputStack.back();
+ inputStack.pop_back();
+ }
+
+ //
+ // From PpTokens.cpp
+ //
+
+ // Capture the needed parts of a token stream for macro recording/playback.
+ class TokenStream {
+ public:
+ // Manage a stream of these 'Token', which capture the relevant parts
+ // of a TPpToken, plus its atom.
+ class Token {
+ public:
+ Token(int atom, const TPpToken& ppToken) :
+ atom(atom),
+ space(ppToken.space),
+ i64val(ppToken.i64val),
+ name(ppToken.name) { }
+ int get(TPpToken& ppToken)
+ {
+ ppToken.clear();
+ ppToken.space = space;
+ ppToken.i64val = i64val;
+ snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str());
+ return atom;
+ }
+ bool isAtom(int a) const { return atom == a; }
+ int getAtom() const { return atom; }
+ bool nonSpaced() const { return !space; }
+ protected:
+ Token() {}
+ int atom;
+ bool space; // did a space precede the token?
+ long long i64val;
+ TString name;
+ };
+
+ TokenStream() : currentPos(0) { }
+
+ void putToken(int token, TPpToken* ppToken);
+ bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); }
+ bool peekContinuedPasting(int atom)
+ {
+ // This is basically necessary because, for example, the PP
+ // tokenizer only accepts valid numeric-literals plus suffixes, so
+ // separates numeric-literals plus bad suffix into two tokens, which
+ // should get both pasted together as one token when token pasting.
+ //
+ // The following code is a bit more generalized than the above example.
+ if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) {
+ switch(stream[currentPos].getAtom()) {
+ case PpAtomConstInt:
+ case PpAtomConstUint:
+ case PpAtomConstInt64:
+ case PpAtomConstUint64:
+ case PpAtomConstInt16:
+ case PpAtomConstUint16:
+ case PpAtomConstFloat:
+ case PpAtomConstDouble:
+ case PpAtomConstFloat16:
+ case PpAtomConstString:
+ case PpAtomIdentifier:
+ return true;
+ default:
+ break;
+ }
+ }
+
+ return false;
+ }
+ int getToken(TParseContextBase&, TPpToken*);
+ bool atEnd() { return currentPos >= stream.size(); }
+ bool peekTokenizedPasting(bool lastTokenPastes);
+ bool peekUntokenizedPasting();
+ void reset() { currentPos = 0; }
+
+ protected:
+ TVector<Token> stream;
+ size_t currentPos;
+ };
+
+ //
+ // From Pp.cpp
+ //
+
+ struct MacroSymbol {
+ MacroSymbol() : functionLike(0), busy(0), undef(0) { }
+ TVector<int> args;
+ TokenStream body;
+ unsigned functionLike : 1; // 0 means object-like, 1 means function-like
+ unsigned busy : 1;
+ unsigned undef : 1;
+ };
+
+ typedef TMap<int, MacroSymbol> TSymbolMap;
+ TSymbolMap macroDefs; // map atoms to macro definitions
+ MacroSymbol* lookupMacroDef(int atom)
+ {
+ auto existingMacroIt = macroDefs.find(atom);
+ return (existingMacroIt == macroDefs.end()) ? nullptr : &(existingMacroIt->second);
+ }
+ void addMacroDef(int atom, MacroSymbol& macroDef) { macroDefs[atom] = macroDef; }
+
+protected:
+ TPpContext(TPpContext&);
+ TPpContext& operator=(TPpContext&);
+
+ TStringAtomMap atomStrings;
+ char* preamble; // string to parse, all before line 1 of string 0, it is 0 if no preamble
+ int preambleLength;
+ char** strings; // official strings of shader, starting a string 0 line 1
+ size_t* lengths;
+ int numStrings; // how many official strings there are
+ int currentString; // which string we're currently parsing (-1 for preamble)
+
+ // Scanner data:
+ int previous_token;
+ TParseContextBase& parseContext;
+
+ // Get the next token from *stack* of input sources, popping input sources
+ // that are out of tokens, down until an input source is found that has a token.
+ // Return EndOfInput when there are no more tokens to be found by doing this.
+ int scanToken(TPpToken* ppToken)
+ {
+ int token = EndOfInput;
+
+ while (! inputStack.empty()) {
+ token = inputStack.back()->scan(ppToken);
+ if (token != EndOfInput || inputStack.empty())
+ break;
+ popInput();
+ }
+
+ return token;
+ }
+ int getChar() { return inputStack.back()->getch(); }
+ void ungetChar() { inputStack.back()->ungetch(); }
+ bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); }
+ bool peekContinuedPasting(int a)
+ {
+ return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a);
+ }
+ bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); }
+ bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); }
+
+ static const int maxIfNesting = 65;
+
+ int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor)
+ bool elseSeen[maxIfNesting]; // Keep a track of whether an else has been seen at a particular depth
+ int elsetracker; // #if-#else and #endif constructs...Counter.
+
+ class tMacroInput : public tInput {
+ public:
+ tMacroInput(TPpContext* pp) : tInput(pp), prepaste(false), postpaste(false) { }
+ virtual ~tMacroInput()
+ {
+ for (size_t i = 0; i < args.size(); ++i)
+ delete args[i];
+ for (size_t i = 0; i < expandedArgs.size(); ++i)
+ delete expandedArgs[i];
+ }
+
+ virtual int scan(TPpToken*) override;
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ bool peekPasting() override { return prepaste; }
+ bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); }
+ bool endOfReplacementList() override { return mac->body.atEnd(); }
+ bool isMacroInput() override { return true; }
+
+ MacroSymbol *mac;
+ TVector<TokenStream*> args;
+ TVector<TokenStream*> expandedArgs;
+
+ protected:
+ bool prepaste; // true if we are just before ##
+ bool postpaste; // true if we are right after ##
+ };
+
+ class tMarkerInput : public tInput {
+ public:
+ tMarkerInput(TPpContext* pp) : tInput(pp) { }
+ virtual int scan(TPpToken*) override
+ {
+ if (done)
+ return EndOfInput;
+ done = true;
+
+ return marker;
+ }
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ static const int marker = -3;
+ };
+
+ class tZeroInput : public tInput {
+ public:
+ tZeroInput(TPpContext* pp) : tInput(pp) { }
+ virtual int scan(TPpToken*) override;
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ };
+
+ std::vector<tInput*> inputStack;
+ bool errorOnVersion;
+ bool versionSeen;
+
+ //
+ // from Pp.cpp
+ //
+
+ // Used to obtain #include content.
+ TShader::Includer& includer;
+
+ int CPPdefine(TPpToken * ppToken);
+ int CPPundef(TPpToken * ppToken);
+ int CPPelse(int matchelse, TPpToken * ppToken);
+ int extraTokenCheck(int atom, TPpToken* ppToken, int token);
+ int eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
+ int evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
+ int CPPif (TPpToken * ppToken);
+ int CPPifdef(int defined, TPpToken * ppToken);
+ int CPPinclude(TPpToken * ppToken);
+ int CPPline(TPpToken * ppToken);
+ int CPPerror(TPpToken * ppToken);
+ int CPPpragma(TPpToken * ppToken);
+ int CPPversion(TPpToken * ppToken);
+ int CPPextension(TPpToken * ppToken);
+ int readCPPline(TPpToken * ppToken);
+ int scanHeaderName(TPpToken* ppToken, char delimit);
+ TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
+ MacroExpandResult MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
+
+ //
+ // From PpTokens.cpp
+ //
+ void pushTokenStreamInput(TokenStream&, bool pasting = false);
+ void UngetToken(int token, TPpToken*);
+
+ class tTokenInput : public tInput {
+ public:
+ tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) :
+ tInput(pp),
+ tokens(t),
+ lastTokenPastes(prepasting) { }
+ virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
+ bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); }
+ protected:
+ TokenStream* tokens;
+ bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
+ };
+
+ class tUngotTokenInput : public tInput {
+ public:
+ tUngotTokenInput(TPpContext* pp, int t, TPpToken* p) : tInput(pp), token(t), lval(*p) { }
+ virtual int scan(TPpToken *) override;
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ protected:
+ int token;
+ TPpToken lval;
+ };
+
+ //
+ // From PpScanner.cpp
+ //
+ class tStringInput : public tInput {
+ public:
+ tStringInput(TPpContext* pp, TInputScanner& i) : tInput(pp), input(&i) { }
+ virtual int scan(TPpToken*) override;
+
+ // Scanner used to get source stream characters.
+ // - Escaped newlines are handled here, invisibly to the caller.
+ // - All forms of newline are handled, and turned into just a '\n'.
+ int getch() override
+ {
+ int ch = input->get();
+
+ if (ch == '\\') {
+ // Move past escaped newlines, as many as sequentially exist
+ do {
+ if (input->peek() == '\r' || input->peek() == '\n') {
+ bool allowed = pp->parseContext.lineContinuationCheck(input->getSourceLoc(), pp->inComment);
+ if (! allowed && pp->inComment)
+ return '\\';
+
+ // escape one newline now
+ ch = input->get();
+ int nextch = input->get();
+ if (ch == '\r' && nextch == '\n')
+ ch = input->get();
+ else
+ ch = nextch;
+ } else
+ return '\\';
+ } while (ch == '\\');
+ }
+
+ // handle any non-escaped newline
+ if (ch == '\r' || ch == '\n') {
+ if (ch == '\r' && input->peek() == '\n')
+ input->get();
+ return '\n';
+ }
+
+ return ch;
+ }
+
+ // Scanner used to backup the source stream characters. Newlines are
+ // handled here, invisibly to the caller, meaning have to undo exactly
+ // what getch() above does (e.g., don't leave things in the middle of a
+ // sequence of escaped newlines).
+ void ungetch() override
+ {
+ input->unget();
+
+ do {
+ int ch = input->peek();
+ if (ch == '\r' || ch == '\n') {
+ if (ch == '\n') {
+ // correct for two-character newline
+ input->unget();
+ if (input->peek() != '\r')
+ input->get();
+ }
+ // now in front of a complete newline, move past an escape character
+ input->unget();
+ if (input->peek() == '\\')
+ input->unget();
+ else {
+ input->get();
+ break;
+ }
+ } else
+ break;
+ } while (true);
+ }
+
+ protected:
+ TInputScanner* input;
+ };
+
+ // Holds a reference to included file data, as well as a
+ // prologue and an epilogue string. This can be scanned using the tInput
+ // interface and acts as a single source string.
+ class TokenizableIncludeFile : public tInput {
+ public:
+ // Copies prologue and epilogue. The includedFile must remain valid
+ // until this TokenizableIncludeFile is no longer used.
+ TokenizableIncludeFile(const TSourceLoc& startLoc,
+ const std::string& prologue,
+ TShader::Includer::IncludeResult* includedFile,
+ const std::string& epilogue,
+ TPpContext* pp)
+ : tInput(pp),
+ prologue_(prologue),
+ epilogue_(epilogue),
+ includedFile_(includedFile),
+ scanner(3, strings, lengths, nullptr, 0, 0, true),
+ prevScanner(nullptr),
+ stringInput(pp, scanner)
+ {
+ strings[0] = prologue_.data();
+ strings[1] = includedFile_->headerData;
+ strings[2] = epilogue_.data();
+
+ lengths[0] = prologue_.size();
+ lengths[1] = includedFile_->headerLength;
+ lengths[2] = epilogue_.size();
+
+ scanner.setLine(startLoc.line);
+ scanner.setString(startLoc.string);
+
+ scanner.setFile(startLoc.getFilenameStr(), 0);
+ scanner.setFile(startLoc.getFilenameStr(), 1);
+ scanner.setFile(startLoc.getFilenameStr(), 2);
+ }
+
+ // tInput methods:
+ int scan(TPpToken* t) override { return stringInput.scan(t); }
+ int getch() override { return stringInput.getch(); }
+ void ungetch() override { stringInput.ungetch(); }
+
+ void notifyActivated() override
+ {
+ prevScanner = pp->parseContext.getScanner();
+ pp->parseContext.setScanner(&scanner);
+ pp->push_include(includedFile_);
+ }
+
+ void notifyDeleted() override
+ {
+ pp->parseContext.setScanner(prevScanner);
+ pp->pop_include();
+ }
+
+ private:
+ TokenizableIncludeFile& operator=(const TokenizableIncludeFile&);
+
+ // Stores the prologue for this string.
+ const std::string prologue_;
+
+ // Stores the epilogue for this string.
+ const std::string epilogue_;
+
+ // Points to the IncludeResult that this TokenizableIncludeFile represents.
+ TShader::Includer::IncludeResult* includedFile_;
+
+ // Will point to prologue_, includedFile_->headerData and epilogue_
+ // This is passed to scanner constructor.
+ // These do not own the storage and it must remain valid until this
+ // object has been destroyed.
+ const char* strings[3];
+ // Length of str_, passed to scanner constructor.
+ size_t lengths[3];
+ // Scans over str_.
+ TInputScanner scanner;
+ // The previous effective scanner before the scanner in this instance
+ // has been activated.
+ TInputScanner* prevScanner;
+ // Delegate object implementing the tInput interface.
+ tStringInput stringInput;
+ };
+
+ int ScanFromString(char* s);
+ void missingEndifCheck();
+ int lFloatConst(int len, int ch, TPpToken* ppToken);
+ int characterLiteral(TPpToken* ppToken);
+
+ void push_include(TShader::Includer::IncludeResult* result)
+ {
+ currentSourceFile = result->headerName;
+ includeStack.push(result);
+ }
+
+ void pop_include()
+ {
+ TShader::Includer::IncludeResult* include = includeStack.top();
+ includeStack.pop();
+ includer.releaseInclude(include);
+ if (includeStack.empty()) {
+ currentSourceFile = rootFileName;
+ } else {
+ currentSourceFile = includeStack.top()->headerName;
+ }
+ }
+
+ bool inComment;
+ std::string rootFileName;
+ std::stack<TShader::Includer::IncludeResult*> includeStack;
+ std::string currentSourceFile;
+
+ std::istringstream strtodStream;
+};
+
+} // end namespace glslang
+
+#endif // PPCONTEXT_H
diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp
new file mode 100644
index 0000000000..f6f52d7d55
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp
@@ -0,0 +1,1246 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#include <cstdlib>
+#include <cstring>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+#include "../Scan.h"
+
+namespace glslang {
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////// Floating point constants: /////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////////
+
+//
+// Scan a single- or double-precision floating point constant.
+// Assumes that the scanner has seen at least one digit,
+// followed by either a decimal '.' or the letter 'e', or a
+// precision ending (e.g., F or LF).
+//
+// This is technically not correct, as the preprocessor should just
+// accept the numeric literal along with whatever suffix it has, but
+// currently, it stops on seeing a bad suffix, treating that as the
+// next token. This effects things like token pasting, where it is
+// relevant how many tokens something was broken into.
+//
+// See peekContinuedPasting().
+int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
+{
+ const auto saveName = [&](int ch) {
+ if (len <= MaxTokenLength)
+ ppToken->name[len++] = static_cast<char>(ch);
+ };
+
+ // find the range of non-zero digits before the decimal point
+ int startNonZero = 0;
+ while (startNonZero < len && ppToken->name[startNonZero] == '0')
+ ++startNonZero;
+ int endNonZero = len;
+ while (endNonZero > startNonZero && ppToken->name[endNonZero-1] == '0')
+ --endNonZero;
+ int numWholeNumberDigits = endNonZero - startNonZero;
+
+ // accumulate the range's value
+ bool fastPath = numWholeNumberDigits <= 15; // when the number gets too complex, set to false
+ unsigned long long wholeNumber = 0;
+ if (fastPath) {
+ for (int i = startNonZero; i < endNonZero; ++i)
+ wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0');
+ }
+ int decimalShift = len - endNonZero;
+
+ // Decimal point:
+ bool hasDecimalOrExponent = false;
+ if (ch == '.') {
+ hasDecimalOrExponent = true;
+ saveName(ch);
+ ch = getChar();
+ int firstDecimal = len;
+
+ // 1.#INF or -1.#INF
+ if (ch == '#' && (ifdepth > 0 || parseContext.intermediate.getSource() == EShSourceHlsl)) {
+ if ((len < 2) ||
+ (len == 2 && ppToken->name[0] != '1') ||
+ (len == 3 && ppToken->name[1] != '1' && !(ppToken->name[0] == '-' || ppToken->name[0] == '+')) ||
+ (len > 3))
+ parseContext.ppError(ppToken->loc, "unexpected use of", "#", "");
+ else {
+ // we have 1.# or -1.# or +1.#, check for 'INF'
+ if ((ch = getChar()) != 'I' ||
+ (ch = getChar()) != 'N' ||
+ (ch = getChar()) != 'F')
+ parseContext.ppError(ppToken->loc, "expected 'INF'", "#", "");
+ else {
+ // we have [+-].#INF, and we are targeting IEEE 754, so wrap it up:
+ saveName('I');
+ saveName('N');
+ saveName('F');
+ ppToken->name[len] = '\0';
+ if (ppToken->name[0] == '-')
+ ppToken->i64val = 0xfff0000000000000; // -Infinity
+ else
+ ppToken->i64val = 0x7ff0000000000000; // +Infinity
+ return PpAtomConstFloat;
+ }
+ }
+ }
+
+ // Consume leading-zero digits after the decimal point
+ while (ch == '0') {
+ saveName(ch);
+ ch = getChar();
+ }
+ int startNonZeroDecimal = len;
+ int endNonZeroDecimal = len;
+
+ // Consume remaining digits, up to the exponent
+ while (ch >= '0' && ch <= '9') {
+ saveName(ch);
+ if (ch != '0')
+ endNonZeroDecimal = len;
+ ch = getChar();
+ }
+
+ // Compute accumulation up to the last non-zero digit
+ if (endNonZeroDecimal > startNonZeroDecimal) {
+ numWholeNumberDigits += endNonZeroDecimal - endNonZero - 1; // don't include the "."
+ if (numWholeNumberDigits > 15)
+ fastPath = false;
+ if (fastPath) {
+ for (int i = endNonZero; i < endNonZeroDecimal; ++i) {
+ if (ppToken->name[i] != '.')
+ wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0');
+ }
+ }
+ decimalShift = firstDecimal - endNonZeroDecimal;
+ }
+ }
+
+ // Exponent:
+ bool negativeExponent = false;
+ double exponentValue = 0.0;
+ int exponent = 0;
+ {
+ if (ch == 'e' || ch == 'E') {
+ hasDecimalOrExponent = true;
+ saveName(ch);
+ ch = getChar();
+ if (ch == '+' || ch == '-') {
+ negativeExponent = ch == '-';
+ saveName(ch);
+ ch = getChar();
+ }
+ if (ch >= '0' && ch <= '9') {
+ while (ch >= '0' && ch <= '9') {
+ exponent = exponent * 10 + (ch - '0');
+ saveName(ch);
+ ch = getChar();
+ }
+ } else {
+ parseContext.ppError(ppToken->loc, "bad character in float exponent", "", "");
+ }
+ }
+
+ // Compensate for location of decimal
+ if (negativeExponent)
+ exponent -= decimalShift;
+ else {
+ exponent += decimalShift;
+ if (exponent < 0) {
+ negativeExponent = true;
+ exponent = -exponent;
+ }
+ }
+ if (exponent > 22)
+ fastPath = false;
+
+ if (fastPath) {
+ // Compute the floating-point value of the exponent
+ exponentValue = 1.0;
+ if (exponent > 0) {
+ double expFactor = 10;
+ while (exponent > 0) {
+ if (exponent & 0x1)
+ exponentValue *= expFactor;
+ expFactor *= expFactor;
+ exponent >>= 1;
+ }
+ }
+ }
+ }
+
+ // Suffix:
+ bool isDouble = false;
+ bool isFloat16 = false;
+ if (ch == 'l' || ch == 'L') {
+ if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
+ parseContext.doubleCheck(ppToken->loc, "double floating-point suffix");
+ if (ifdepth == 0 && !hasDecimalOrExponent)
+ parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
+ if (parseContext.intermediate.getSource() == EShSourceGlsl) {
+ int ch2 = getChar();
+ if (ch2 != 'f' && ch2 != 'F') {
+ ungetChar();
+ ungetChar();
+ } else {
+ saveName(ch);
+ saveName(ch2);
+ isDouble = true;
+ }
+ } else if (parseContext.intermediate.getSource() == EShSourceHlsl) {
+ saveName(ch);
+ isDouble = true;
+ }
+ } else if (ch == 'h' || ch == 'H') {
+ if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
+ parseContext.float16Check(ppToken->loc, "half floating-point suffix");
+ if (ifdepth == 0 && !hasDecimalOrExponent)
+ parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
+ if (parseContext.intermediate.getSource() == EShSourceGlsl) {
+ int ch2 = getChar();
+ if (ch2 != 'f' && ch2 != 'F') {
+ ungetChar();
+ ungetChar();
+ } else {
+ saveName(ch);
+ saveName(ch2);
+ isFloat16 = true;
+ }
+ } else if (parseContext.intermediate.getSource() == EShSourceHlsl) {
+ saveName(ch);
+ isFloat16 = true;
+ }
+ } else if (ch == 'f' || ch == 'F') {
+ if (ifdepth == 0)
+ parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix");
+ if (ifdepth == 0 && !parseContext.relaxedErrors())
+ parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix");
+ if (ifdepth == 0 && !hasDecimalOrExponent)
+ parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
+ saveName(ch);
+ } else
+ ungetChar();
+
+ // Patch up the name and length for overflow
+
+ if (len > MaxTokenLength) {
+ len = MaxTokenLength;
+ parseContext.ppError(ppToken->loc, "float literal too long", "", "");
+ }
+ ppToken->name[len] = '\0';
+
+ // Compute the numerical value
+ if (fastPath) {
+ // compute the floating-point value of the exponent
+ if (exponentValue == 0.0)
+ ppToken->dval = (double)wholeNumber;
+ else if (negativeExponent)
+ ppToken->dval = (double)wholeNumber / exponentValue;
+ else
+ ppToken->dval = (double)wholeNumber * exponentValue;
+ } else {
+ // slow path
+ ppToken->dval = 0.0;
+
+ // remove suffix
+ TString numstr(ppToken->name);
+ if (numstr.back() == 'f' || numstr.back() == 'F')
+ numstr.pop_back();
+ if (numstr.back() == 'h' || numstr.back() == 'H')
+ numstr.pop_back();
+ if (numstr.back() == 'l' || numstr.back() == 'L')
+ numstr.pop_back();
+
+ // use platform library
+ strtodStream.clear();
+ strtodStream.str(numstr.c_str());
+ strtodStream >> ppToken->dval;
+ if (strtodStream.fail()) {
+ // Assume failure combined with a large exponent was overflow, in
+ // an attempt to set INF.
+ if (!negativeExponent && exponent + numWholeNumberDigits > 300)
+ ppToken->i64val = 0x7ff0000000000000; // +Infinity
+ // Assume failure combined with a small exponent was overflow.
+ if (negativeExponent && exponent + numWholeNumberDigits > 300)
+ ppToken->dval = 0.0;
+ // Unknown reason for failure. Theory is that either
+ // - the 0.0 is still there, or
+ // - something reasonable was written that is better than 0.0
+ }
+ }
+
+ // Return the right token type
+ if (isDouble)
+ return PpAtomConstDouble;
+ else if (isFloat16)
+ return PpAtomConstFloat16;
+ else
+ return PpAtomConstFloat;
+}
+
+// Recognize a character literal.
+//
+// The first ' has already been accepted, read the rest, through the closing '.
+//
+// Always returns PpAtomConstInt.
+//
+int TPpContext::characterLiteral(TPpToken* ppToken)
+{
+ ppToken->name[0] = 0;
+ ppToken->ival = 0;
+
+ if (parseContext.intermediate.getSource() != EShSourceHlsl) {
+ // illegal, except in macro definition, for which case we report the character
+ return '\'';
+ }
+
+ int ch = getChar();
+ switch (ch) {
+ case '\'':
+ // As empty sequence: ''
+ parseContext.ppError(ppToken->loc, "unexpected", "\'", "");
+ return PpAtomConstInt;
+ case '\\':
+ // As escape sequence: '\XXX'
+ switch (ch = getChar()) {
+ case 'a':
+ ppToken->ival = 7;
+ break;
+ case 'b':
+ ppToken->ival = 8;
+ break;
+ case 't':
+ ppToken->ival = 9;
+ break;
+ case 'n':
+ ppToken->ival = 10;
+ break;
+ case 'v':
+ ppToken->ival = 11;
+ break;
+ case 'f':
+ ppToken->ival = 12;
+ break;
+ case 'r':
+ ppToken->ival = 13;
+ break;
+ case 'x':
+ case '0':
+ parseContext.ppError(ppToken->loc, "octal and hex sequences not supported", "\\", "");
+ break;
+ default:
+ // This catches '\'', '\"', '\?', etc.
+ // Also, things like '\C' mean the same thing as 'C'
+ // (after the above cases are filtered out).
+ ppToken->ival = ch;
+ break;
+ }
+ break;
+ default:
+ ppToken->ival = ch;
+ break;
+ }
+ ppToken->name[0] = (char)ppToken->ival;
+ ppToken->name[1] = '\0';
+ ch = getChar();
+ if (ch != '\'') {
+ parseContext.ppError(ppToken->loc, "expected", "\'", "");
+ // Look ahead for a closing '
+ do {
+ ch = getChar();
+ } while (ch != '\'' && ch != EndOfInput && ch != '\n');
+ }
+
+ return PpAtomConstInt;
+}
+
+//
+// Scanner used to tokenize source stream.
+//
+// N.B. Invalid numeric suffixes are not consumed.//
+// This is technically not correct, as the preprocessor should just
+// accept the numeric literal along with whatever suffix it has, but
+// currently, it stops on seeing a bad suffix, treating that as the
+// next token. This effects things like token pasting, where it is
+// relevant how many tokens something was broken into.
+// See peekContinuedPasting().
+//
+int TPpContext::tStringInput::scan(TPpToken* ppToken)
+{
+ int AlreadyComplained = 0;
+ int len = 0;
+ int ch = 0;
+ int ii = 0;
+ unsigned long long ival = 0;
+ const auto floatingPointChar = [&](int ch) { return ch == '.' || ch == 'e' || ch == 'E' ||
+ ch == 'f' || ch == 'F' ||
+ ch == 'h' || ch == 'H'; };
+
+ static const char* const Int64_Extensions[] = {
+ E_GL_ARB_gpu_shader_int64,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int64 };
+ static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]);
+
+ static const char* const Int16_Extensions[] = {
+#ifdef AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16 };
+ static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]);
+
+ ppToken->ival = 0;
+ ppToken->i64val = 0;
+ ppToken->space = false;
+ ch = getch();
+ for (;;) {
+ while (ch == ' ' || ch == '\t') {
+ ppToken->space = true;
+ ch = getch();
+ }
+
+ ppToken->loc = pp->parseContext.getCurrentLoc();
+ len = 0;
+ switch (ch) {
+ default:
+ // Single character token, including EndOfInput, '#' and '\' (escaped newlines are handled at a lower level, so this is just a '\' token)
+ if (ch > PpAtomMaxSingle)
+ ch = PpAtomBadToken;
+ return ch;
+
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'L': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z': case '_':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+ do {
+ if (len < MaxTokenLength) {
+ ppToken->name[len++] = (char)ch;
+ ch = getch();
+ } else {
+ if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "name too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ch = getch();
+ }
+ } while ((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '_');
+
+ // line continuation with no token before or after makes len == 0, and need to start over skipping white space, etc.
+ if (len == 0)
+ continue;
+
+ ppToken->name[len] = '\0';
+ ungetch();
+ return PpAtomIdentifier;
+ case '0':
+ ppToken->name[len++] = (char)ch;
+ ch = getch();
+ if (ch == 'x' || ch == 'X') {
+ // must be hexadecimal
+
+ bool isUnsigned = false;
+ bool isInt64 = false;
+ bool isInt16 = false;
+ ppToken->name[len++] = (char)ch;
+ ch = getch();
+ if ((ch >= '0' && ch <= '9') ||
+ (ch >= 'A' && ch <= 'F') ||
+ (ch >= 'a' && ch <= 'f')) {
+
+ ival = 0;
+ do {
+ if (len < MaxTokenLength && ival <= 0x0fffffffffffffffull) {
+ ppToken->name[len++] = (char)ch;
+ if (ch >= '0' && ch <= '9') {
+ ii = ch - '0';
+ } else if (ch >= 'A' && ch <= 'F') {
+ ii = ch - 'A' + 10;
+ } else if (ch >= 'a' && ch <= 'f') {
+ ii = ch - 'a' + 10;
+ } else
+ pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", "");
+ ival = (ival << 4) | ii;
+ } else {
+ if (! AlreadyComplained) {
+ if(len < MaxTokenLength)
+ pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", "");
+ else
+ pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ival = 0xffffffffffffffffull;
+ }
+ ch = getch();
+ } while ((ch >= '0' && ch <= '9') ||
+ (ch >= 'A' && ch <= 'F') ||
+ (ch >= 'a' && ch <= 'f'));
+ } else {
+ pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", "");
+ }
+ if (ch == 'u' || ch == 'U') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isUnsigned = true;
+
+ int nextCh = getch();
+ if (nextCh == 'l' || nextCh == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt64 = true;
+ } else
+ ungetch();
+
+#ifdef AMD_EXTENSIONS
+ nextCh = getch();
+ if ((nextCh == 's' || nextCh == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt16 = true;
+ } else
+ ungetch();
+#endif
+ } else if (ch == 'l' || ch == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt64 = true;
+#ifdef AMD_EXTENSIONS
+ } else if ((ch == 's' || ch == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt16 = true;
+#endif
+ } else
+ ungetch();
+ ppToken->name[len] = '\0';
+
+ if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (pp->ifdepth == 0) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "64-bit hexadecimal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int64_Extensions, Int64_Extensions, "64-bit hexadecimal literal");
+ }
+ ppToken->i64val = ival;
+ return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64;
+ } else if (isInt16) {
+ if (pp->ifdepth == 0) {
+ if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "16-bit hexadecimal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int16_Extensions, Int16_Extensions, "16-bit hexadecimal literal");
+ }
+ }
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
+ } else {
+ if (ival > 0xffffffffu && !AlreadyComplained)
+ pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", "");
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint : PpAtomConstInt;
+ }
+ } else {
+ // could be octal integer or floating point, speculative pursue octal until it must be floating point
+
+ bool isUnsigned = false;
+ bool isInt64 = false;
+ bool isInt16 = false;
+ bool octalOverflow = false;
+ bool nonOctal = false;
+ ival = 0;
+
+ // see how much octal-like stuff we can read
+ while (ch >= '0' && ch <= '7') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ if (ival <= 0x1fffffffffffffffull) {
+ ii = ch - '0';
+ ival = (ival << 3) | ii;
+ } else
+ octalOverflow = true;
+ ch = getch();
+ }
+
+ // could be part of a float...
+ if (ch == '8' || ch == '9') {
+ nonOctal = true;
+ do {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ch = getch();
+ } while (ch >= '0' && ch <= '9');
+ }
+ if (floatingPointChar(ch))
+ return pp->lFloatConst(len, ch, ppToken);
+
+ // wasn't a float, so must be octal...
+ if (nonOctal)
+ pp->parseContext.ppError(ppToken->loc, "octal literal digit too large", "", "");
+
+ if (ch == 'u' || ch == 'U') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isUnsigned = true;
+
+ int nextCh = getch();
+ if (nextCh == 'l' || nextCh == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt64 = true;
+ } else
+ ungetch();
+
+#ifdef AMD_EXTENSIONS
+ nextCh = getch();
+ if ((nextCh == 's' || nextCh == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt16 = true;
+ } else
+ ungetch();
+#endif
+ } else if (ch == 'l' || ch == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt64 = true;
+#ifdef AMD_EXTENSIONS
+ } else if ((ch == 's' || ch == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt16 = true;
+#endif
+ } else
+ ungetch();
+ ppToken->name[len] = '\0';
+
+ if (!isInt64 && ival > 0xffffffffu)
+ octalOverflow = true;
+
+ if (octalOverflow)
+ pp->parseContext.ppError(ppToken->loc, "octal literal too big", "", "");
+
+ if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (pp->ifdepth == 0) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "64-bit octal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int64_Extensions, Int64_Extensions, "64-bit octal literal");
+ }
+ ppToken->i64val = ival;
+ return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64;
+ } else if (isInt16) {
+ if (pp->ifdepth == 0) {
+ if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "16-bit octal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int16_Extensions, Int16_Extensions, "16-bit octal literal");
+ }
+ }
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
+ } else {
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint : PpAtomConstInt;
+ }
+ }
+ break;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // can't be hexadecimal or octal, is either decimal or floating point
+
+ do {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ch = getch();
+ } while (ch >= '0' && ch <= '9');
+ if (floatingPointChar(ch))
+ return pp->lFloatConst(len, ch, ppToken);
+ else {
+ // Finish handling signed and unsigned integers
+ int numericLen = len;
+ bool isUnsigned = false;
+ bool isInt64 = false;
+ bool isInt16 = false;
+ if (ch == 'u' || ch == 'U') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isUnsigned = true;
+
+ int nextCh = getch();
+ if (nextCh == 'l' || nextCh == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt64 = true;
+ } else
+ ungetch();
+
+#ifdef AMD_EXTENSIONS
+ nextCh = getch();
+ if ((nextCh == 's' || nextCh == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt16 = true;
+ } else
+ ungetch();
+#endif
+ } else if (ch == 'l' || ch == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt64 = true;
+#ifdef AMD_EXTENSIONS
+ } else if ((ch == 's' || ch == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt16 = true;
+#endif
+ } else
+ ungetch();
+
+ ppToken->name[len] = '\0';
+ ival = 0;
+ const unsigned oneTenthMaxInt = 0xFFFFFFFFu / 10;
+ const unsigned remainderMaxInt = 0xFFFFFFFFu - 10 * oneTenthMaxInt;
+ const unsigned long long oneTenthMaxInt64 = 0xFFFFFFFFFFFFFFFFull / 10;
+ const unsigned long long remainderMaxInt64 = 0xFFFFFFFFFFFFFFFFull - 10 * oneTenthMaxInt64;
+ const unsigned short oneTenthMaxInt16 = 0xFFFFu / 10;
+ const unsigned short remainderMaxInt16 = 0xFFFFu - 10 * oneTenthMaxInt16;
+ for (int i = 0; i < numericLen; i++) {
+ ch = ppToken->name[i] - '0';
+ bool overflow = false;
+ if (isInt64)
+ overflow = (ival > oneTenthMaxInt64 || (ival == oneTenthMaxInt64 && (unsigned long long)ch > remainderMaxInt64));
+ else if (isInt16)
+ overflow = (ival > oneTenthMaxInt16 || (ival == oneTenthMaxInt16 && (unsigned short)ch > remainderMaxInt16));
+ else
+ overflow = (ival > oneTenthMaxInt || (ival == oneTenthMaxInt && (unsigned)ch > remainderMaxInt));
+ if (overflow) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too big", "", "");
+ ival = 0xFFFFFFFFFFFFFFFFull;
+ break;
+ } else
+ ival = ival * 10 + ch;
+ }
+
+ if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (pp->ifdepth == 0) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "64-bit literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int64_Extensions, Int64_Extensions, "64-bit literal");
+ }
+ ppToken->i64val = ival;
+ return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64;
+ } else if (isInt16) {
+ if (pp->ifdepth == 0 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "16-bit literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int16_Extensions, Int16_Extensions, "16-bit literal");
+ }
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
+ } else {
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint : PpAtomConstInt;
+ }
+ }
+ break;
+ case '-':
+ ch = getch();
+ if (ch == '-') {
+ return PpAtomDecrement;
+ } else if (ch == '=') {
+ return PPAtomSubAssign;
+ } else {
+ ungetch();
+ return '-';
+ }
+ case '+':
+ ch = getch();
+ if (ch == '+') {
+ return PpAtomIncrement;
+ } else if (ch == '=') {
+ return PPAtomAddAssign;
+ } else {
+ ungetch();
+ return '+';
+ }
+ case '*':
+ ch = getch();
+ if (ch == '=') {
+ return PPAtomMulAssign;
+ } else {
+ ungetch();
+ return '*';
+ }
+ case '%':
+ ch = getch();
+ if (ch == '=') {
+ return PPAtomModAssign;
+ } else {
+ ungetch();
+ return '%';
+ }
+ case '^':
+ ch = getch();
+ if (ch == '^') {
+ return PpAtomXor;
+ } else {
+ if (ch == '=')
+ return PpAtomXorAssign;
+ else{
+ ungetch();
+ return '^';
+ }
+ }
+
+ case '=':
+ ch = getch();
+ if (ch == '=') {
+ return PpAtomEQ;
+ } else {
+ ungetch();
+ return '=';
+ }
+ case '!':
+ ch = getch();
+ if (ch == '=') {
+ return PpAtomNE;
+ } else {
+ ungetch();
+ return '!';
+ }
+ case '|':
+ ch = getch();
+ if (ch == '|') {
+ return PpAtomOr;
+ } else if (ch == '=') {
+ return PpAtomOrAssign;
+ } else {
+ ungetch();
+ return '|';
+ }
+ case '&':
+ ch = getch();
+ if (ch == '&') {
+ return PpAtomAnd;
+ } else if (ch == '=') {
+ return PpAtomAndAssign;
+ } else {
+ ungetch();
+ return '&';
+ }
+ case '<':
+ ch = getch();
+ if (ch == '<') {
+ ch = getch();
+ if (ch == '=')
+ return PpAtomLeftAssign;
+ else {
+ ungetch();
+ return PpAtomLeft;
+ }
+ } else if (ch == '=') {
+ return PpAtomLE;
+ } else {
+ ungetch();
+ return '<';
+ }
+ case '>':
+ ch = getch();
+ if (ch == '>') {
+ ch = getch();
+ if (ch == '=')
+ return PpAtomRightAssign;
+ else {
+ ungetch();
+ return PpAtomRight;
+ }
+ } else if (ch == '=') {
+ return PpAtomGE;
+ } else {
+ ungetch();
+ return '>';
+ }
+ case '.':
+ ch = getch();
+ if (ch >= '0' && ch <= '9') {
+ ungetch();
+ return pp->lFloatConst(0, '.', ppToken);
+ } else {
+ ungetch();
+ return '.';
+ }
+ case '/':
+ ch = getch();
+ if (ch == '/') {
+ pp->inComment = true;
+ do {
+ ch = getch();
+ } while (ch != '\n' && ch != EndOfInput);
+ ppToken->space = true;
+ pp->inComment = false;
+
+ return ch;
+ } else if (ch == '*') {
+ ch = getch();
+ do {
+ while (ch != '*') {
+ if (ch == EndOfInput) {
+ pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", "");
+ return ch;
+ }
+ ch = getch();
+ }
+ ch = getch();
+ if (ch == EndOfInput) {
+ pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", "");
+ return ch;
+ }
+ } while (ch != '/');
+ ppToken->space = true;
+ // loop again to get the next token...
+ break;
+ } else if (ch == '=') {
+ return PPAtomDivAssign;
+ } else {
+ ungetch();
+ return '/';
+ }
+ break;
+ case '\'':
+ return pp->characterLiteral(ppToken);
+ case '"':
+ // TODO: If this gets enhanced to handle escape sequences, or
+ // anything that is different than what #include needs, then
+ // #include needs to use scanHeaderName() for this.
+ ch = getch();
+ while (ch != '"' && ch != '\n' && ch != EndOfInput) {
+ if (len < MaxTokenLength) {
+ ppToken->name[len] = (char)ch;
+ len++;
+ ch = getch();
+ } else
+ break;
+ };
+ ppToken->name[len] = '\0';
+ if (ch != '"') {
+ ungetch();
+ pp->parseContext.ppError(ppToken->loc, "End of line in string", "string", "");
+ }
+ return PpAtomConstString;
+ case ':':
+ ch = getch();
+ if (ch == ':')
+ return PpAtomColonColon;
+ ungetch();
+ return ':';
+ }
+
+ ch = getch();
+ }
+}
+
+//
+// The main functional entry point into the preprocessor, which will
+// scan the source strings to figure out and return the next processing token.
+//
+// Return the token, or EndOfInput when no more tokens.
+//
+int TPpContext::tokenize(TPpToken& ppToken)
+{
+ for(;;) {
+ int token = scanToken(&ppToken);
+
+ // Handle token-pasting logic
+ token = tokenPaste(token, ppToken);
+
+ if (token == EndOfInput) {
+ missingEndifCheck();
+ return EndOfInput;
+ }
+ if (token == '#') {
+ if (previous_token == '\n') {
+ token = readCPPline(&ppToken);
+ if (token == EndOfInput) {
+ missingEndifCheck();
+ return EndOfInput;
+ }
+ continue;
+ } else {
+ parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", "");
+ return EndOfInput;
+ }
+ }
+ previous_token = token;
+
+ if (token == '\n')
+ continue;
+
+ // expand macros
+ if (token == PpAtomIdentifier) {
+ switch (MacroExpand(&ppToken, false, true)) {
+ case MacroExpandNotStarted:
+ break;
+ case MacroExpandError:
+ return EndOfInput;
+ case MacroExpandStarted:
+ case MacroExpandUndef:
+ continue;
+ }
+ }
+
+ switch (token) {
+ case PpAtomIdentifier:
+ case PpAtomConstInt:
+ case PpAtomConstUint:
+ case PpAtomConstFloat:
+ case PpAtomConstInt64:
+ case PpAtomConstUint64:
+ case PpAtomConstInt16:
+ case PpAtomConstUint16:
+ case PpAtomConstDouble:
+ case PpAtomConstFloat16:
+ if (ppToken.name[0] == '\0')
+ continue;
+ break;
+ case PpAtomConstString:
+ if (ifdepth == 0 && parseContext.intermediate.getSource() != EShSourceHlsl) {
+ // HLSL allows string literals.
+ parseContext.ppError(ppToken.loc, "string literals not supported", "\"\"", "");
+ continue;
+ }
+ break;
+ case '\'':
+ parseContext.ppError(ppToken.loc, "character literals not supported", "\'", "");
+ continue;
+ default:
+ snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(token));
+ break;
+ }
+
+ return token;
+ }
+}
+
+//
+// Do all token-pasting related combining of two pasted tokens when getting a
+// stream of tokens from a replacement list. Degenerates to no processing if a
+// replacement list is not the source of the token stream.
+//
+int TPpContext::tokenPaste(int token, TPpToken& ppToken)
+{
+ // starting with ## is illegal, skip to next token
+ if (token == PpAtomPaste) {
+ parseContext.ppError(ppToken.loc, "unexpected location", "##", "");
+ return scanToken(&ppToken);
+ }
+
+ int resultToken = token; // "foo" pasted with "35" is an identifier, not a number
+
+ // ## can be chained, process all in the chain at once
+ while (peekPasting()) {
+ TPpToken pastedPpToken;
+
+ // next token has to be ##
+ token = scanToken(&pastedPpToken);
+ assert(token == PpAtomPaste);
+
+ // This covers end of macro expansion
+ if (endOfReplacementList()) {
+ parseContext.ppError(ppToken.loc, "unexpected location; end of replacement list", "##", "");
+ break;
+ }
+
+ // Get the token(s) after the ##.
+ // Because of "space" semantics, and prior tokenization, what
+ // appeared a single token, e.g. "3A", might have been tokenized
+ // into two tokens "3" and "A", but the "A" will have 'space' set to
+ // false. Accumulate all of these to recreate the original lexical
+ // appearing token.
+ do {
+ token = scanToken(&pastedPpToken);
+
+ // This covers end of argument expansion
+ if (token == tMarkerInput::marker) {
+ parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
+ return resultToken;
+ }
+
+ // get the token text
+ switch (resultToken) {
+ case PpAtomIdentifier:
+ // already have the correct text in token.names
+ break;
+ case '=':
+ case '!':
+ case '-':
+ case '~':
+ case '+':
+ case '*':
+ case '/':
+ case '%':
+ case '<':
+ case '>':
+ case '|':
+ case '^':
+ case '&':
+ case PpAtomRight:
+ case PpAtomLeft:
+ case PpAtomAnd:
+ case PpAtomOr:
+ case PpAtomXor:
+ snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken));
+ snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token));
+ break;
+ default:
+ parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
+ return resultToken;
+ }
+
+ // combine the tokens
+ if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
+ parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
+ return resultToken;
+ }
+ snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name),
+ "%s", pastedPpToken.name);
+
+ // correct the kind of token we are making, if needed (identifiers stay identifiers)
+ if (resultToken != PpAtomIdentifier) {
+ int newToken = atomStrings.getAtom(ppToken.name);
+ if (newToken > 0)
+ resultToken = newToken;
+ else
+ parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
+ }
+ } while (peekContinuedPasting(resultToken));
+ }
+
+ return resultToken;
+}
+
+// Checks if we've seen balanced #if...#endif
+void TPpContext::missingEndifCheck()
+{
+ if (ifdepth > 0)
+ parseContext.ppError(parseContext.getCurrentLoc(), "missing #endif", "", "");
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp
new file mode 100644
index 0000000000..ac9d8ac351
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp
@@ -0,0 +1,219 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+//
+// For recording and playing back the stream of tokens in a macro definition.
+//
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/)
+#define snprintf sprintf_s
+#endif
+
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+
+namespace glslang {
+
+// Add a token (including backing string) to the end of a macro
+// token stream, for later playback.
+void TPpContext::TokenStream::putToken(int atom, TPpToken* ppToken)
+{
+ TokenStream::Token streamToken(atom, *ppToken);
+ stream.push_back(streamToken);
+}
+
+// Read the next token from a macro token stream.
+int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken)
+{
+ if (atEnd())
+ return EndOfInput;
+
+ int atom = stream[currentPos++].get(*ppToken);
+ ppToken->loc = parseContext.getCurrentLoc();
+
+ // Check for ##, unless the current # is the last character
+ if (atom == '#') {
+ if (peekToken('#')) {
+ parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
+ parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
+ currentPos++;
+ atom = PpAtomPaste;
+ }
+ }
+
+ return atom;
+}
+
+// We are pasting if
+// 1. we are preceding a pasting operator within this stream
+// or
+// 2. the entire macro is preceding a pasting operator (lastTokenPastes)
+// and we are also on the last token
+bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
+{
+ // 1. preceding ##?
+
+ size_t savePos = currentPos;
+ // skip white space
+ while (peekToken(' '))
+ ++currentPos;
+ if (peekToken(PpAtomPaste)) {
+ currentPos = savePos;
+ return true;
+ }
+
+ // 2. last token and we've been told after this there will be a ##
+
+ if (! lastTokenPastes)
+ return false;
+ // Getting here means the last token will be pasted, after this
+
+ // Are we at the last non-whitespace token?
+ savePos = currentPos;
+ bool moreTokens = false;
+ do {
+ if (atEnd())
+ break;
+ if (!peekToken(' ')) {
+ moreTokens = true;
+ break;
+ }
+ ++currentPos;
+ } while (true);
+ currentPos = savePos;
+
+ return !moreTokens;
+}
+
+// See if the next non-white-space tokens are two consecutive #
+bool TPpContext::TokenStream::peekUntokenizedPasting()
+{
+ // don't return early, have to restore this
+ size_t savePos = currentPos;
+
+ // skip white-space
+ while (peekToken(' '))
+ ++currentPos;
+
+ // check for ##
+ bool pasting = false;
+ if (peekToken('#')) {
+ ++currentPos;
+ if (peekToken('#'))
+ pasting = true;
+ }
+
+ currentPos = savePos;
+
+ return pasting;
+}
+
+void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting)
+{
+ pushInput(new tTokenInput(this, &ts, prepasting));
+ ts.reset();
+}
+
+int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken)
+{
+ if (done)
+ return EndOfInput;
+
+ int ret = token;
+ *ppToken = lval;
+ done = true;
+
+ return ret;
+}
+
+void TPpContext::UngetToken(int token, TPpToken* ppToken)
+{
+ pushInput(new tUngotTokenInput(this, token, ppToken));
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h
new file mode 100644
index 0000000000..7b0f815500
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h
@@ -0,0 +1,179 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef PARSER_H
+#define PARSER_H
+
+namespace glslang {
+
+// Multi-character tokens
+enum EFixedAtoms {
+ // single character tokens get their own char value as their token; start here for multi-character tokens
+ PpAtomMaxSingle = 127,
+
+ // replace bad character tokens with this, to avoid accidental aliasing with the below
+ PpAtomBadToken,
+
+ // Operators
+
+ PPAtomAddAssign,
+ PPAtomSubAssign,
+ PPAtomMulAssign,
+ PPAtomDivAssign,
+ PPAtomModAssign,
+
+ PpAtomRight,
+ PpAtomLeft,
+
+ PpAtomRightAssign,
+ PpAtomLeftAssign,
+ PpAtomAndAssign,
+ PpAtomOrAssign,
+ PpAtomXorAssign,
+
+ PpAtomAnd,
+ PpAtomOr,
+ PpAtomXor,
+
+ PpAtomEQ,
+ PpAtomNE,
+ PpAtomGE,
+ PpAtomLE,
+
+ PpAtomDecrement,
+ PpAtomIncrement,
+
+ PpAtomColonColon,
+
+ PpAtomPaste,
+
+ // Constants
+
+ PpAtomConstInt,
+ PpAtomConstUint,
+ PpAtomConstInt64,
+ PpAtomConstUint64,
+ PpAtomConstInt16,
+ PpAtomConstUint16,
+ PpAtomConstFloat,
+ PpAtomConstDouble,
+ PpAtomConstFloat16,
+ PpAtomConstString,
+
+ // Identifiers
+ PpAtomIdentifier,
+
+ // preprocessor "keywords"
+
+ PpAtomDefine,
+ PpAtomUndef,
+
+ PpAtomIf,
+ PpAtomIfdef,
+ PpAtomIfndef,
+ PpAtomElse,
+ PpAtomElif,
+ PpAtomEndif,
+
+ PpAtomLine,
+ PpAtomPragma,
+ PpAtomError,
+
+ // #version ...
+ PpAtomVersion,
+ PpAtomCore,
+ PpAtomCompatibility,
+ PpAtomEs,
+
+ // #extension
+ PpAtomExtension,
+
+ // __LINE__, __FILE__, __VERSION__
+
+ PpAtomLineMacro,
+ PpAtomFileMacro,
+ PpAtomVersionMacro,
+
+ // #include
+ PpAtomInclude,
+
+ PpAtomLast,
+};
+
+} // end namespace glslang
+
+#endif /* not PARSER_H */
diff --git a/thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp b/thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp
new file mode 100644
index 0000000000..ae95688ae8
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp
@@ -0,0 +1,866 @@
+//
+// Copyright (C) 2015-2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Visit the nodes in the glslang intermediate tree representation to
+// propagate the 'noContraction' qualifier.
+//
+
+#include "propagateNoContraction.h"
+
+#include <cstdlib>
+#include <string>
+#include <tuple>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "localintermediate.h"
+namespace {
+
+// Use a string to hold the access chain information, as in most cases the
+// access chain is short and may contain only one element, which is the symbol
+// ID.
+// Example: struct {float a; float b;} s;
+// Object s.a will be represented with: <symbol ID of s>/0
+// Object s.b will be represented with: <symbol ID of s>/1
+// Object s will be represented with: <symbol ID of s>
+// For members of vector, matrix and arrays, they will be represented with the
+// same symbol ID of their container symbol objects. This is because their
+// preciseness is always the same as their container symbol objects.
+typedef std::string ObjectAccessChain;
+
+// The delimiter used in the ObjectAccessChain string to separate symbol ID and
+// different level of struct indices.
+const char ObjectAccesschainDelimiter = '/';
+
+// Mapping from Symbol IDs of symbol nodes, to their defining operation
+// nodes.
+typedef std::unordered_multimap<ObjectAccessChain, glslang::TIntermOperator*> NodeMapping;
+// Mapping from object nodes to their access chain info string.
+typedef std::unordered_map<glslang::TIntermTyped*, ObjectAccessChain> AccessChainMapping;
+
+// Set of object IDs.
+typedef std::unordered_set<ObjectAccessChain> ObjectAccesschainSet;
+// Set of return branch nodes.
+typedef std::unordered_set<glslang::TIntermBranch*> ReturnBranchNodeSet;
+
+// A helper function to tell whether a node is 'noContraction'. Returns true if
+// the node has 'noContraction' qualifier, otherwise false.
+bool isPreciseObjectNode(glslang::TIntermTyped* node)
+{
+ return node->getType().getQualifier().noContraction;
+}
+
+// Returns true if the opcode is a dereferencing one.
+bool isDereferenceOperation(glslang::TOperator op)
+{
+ switch (op) {
+ case glslang::EOpIndexDirect:
+ case glslang::EOpIndexDirectStruct:
+ case glslang::EOpIndexIndirect:
+ case glslang::EOpVectorSwizzle:
+ case glslang::EOpMatrixSwizzle:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns true if the opcode leads to an assignment operation.
+bool isAssignOperation(glslang::TOperator op)
+{
+ switch (op) {
+ case glslang::EOpAssign:
+ case glslang::EOpAddAssign:
+ case glslang::EOpSubAssign:
+ case glslang::EOpMulAssign:
+ case glslang::EOpVectorTimesMatrixAssign:
+ case glslang::EOpVectorTimesScalarAssign:
+ case glslang::EOpMatrixTimesScalarAssign:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ case glslang::EOpDivAssign:
+ case glslang::EOpModAssign:
+ case glslang::EOpAndAssign:
+ case glslang::EOpLeftShiftAssign:
+ case glslang::EOpRightShiftAssign:
+ case glslang::EOpInclusiveOrAssign:
+ case glslang::EOpExclusiveOrAssign:
+
+ case glslang::EOpPostIncrement:
+ case glslang::EOpPostDecrement:
+ case glslang::EOpPreIncrement:
+ case glslang::EOpPreDecrement:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// A helper function to get the unsigned int from a given constant union node.
+// Note the node should only hold a uint scalar.
+unsigned getStructIndexFromConstantUnion(glslang::TIntermTyped* node)
+{
+ assert(node->getAsConstantUnion() && node->getAsConstantUnion()->isScalar());
+ unsigned struct_dereference_index = node->getAsConstantUnion()->getConstArray()[0].getUConst();
+ return struct_dereference_index;
+}
+
+// A helper function to generate symbol_label.
+ObjectAccessChain generateSymbolLabel(glslang::TIntermSymbol* node)
+{
+ ObjectAccessChain symbol_id =
+ std::to_string(node->getId()) + "(" + node->getName().c_str() + ")";
+ return symbol_id;
+}
+
+// Returns true if the operation is an arithmetic operation and valid for
+// the 'NoContraction' decoration.
+bool isArithmeticOperation(glslang::TOperator op)
+{
+ switch (op) {
+ case glslang::EOpAddAssign:
+ case glslang::EOpSubAssign:
+ case glslang::EOpMulAssign:
+ case glslang::EOpVectorTimesMatrixAssign:
+ case glslang::EOpVectorTimesScalarAssign:
+ case glslang::EOpMatrixTimesScalarAssign:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ case glslang::EOpDivAssign:
+ case glslang::EOpModAssign:
+
+ case glslang::EOpNegative:
+
+ case glslang::EOpAdd:
+ case glslang::EOpSub:
+ case glslang::EOpMul:
+ case glslang::EOpDiv:
+ case glslang::EOpMod:
+
+ case glslang::EOpVectorTimesScalar:
+ case glslang::EOpVectorTimesMatrix:
+ case glslang::EOpMatrixTimesVector:
+ case glslang::EOpMatrixTimesScalar:
+ case glslang::EOpMatrixTimesMatrix:
+
+ case glslang::EOpDot:
+
+ case glslang::EOpPostIncrement:
+ case glslang::EOpPostDecrement:
+ case glslang::EOpPreIncrement:
+ case glslang::EOpPreDecrement:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// A helper class to help manage the populating_initial_no_contraction_ flag.
+template <typename T> class StateSettingGuard {
+public:
+ StateSettingGuard(T* state_ptr, T new_state_value)
+ : state_ptr_(state_ptr), previous_state_(*state_ptr)
+ {
+ *state_ptr = new_state_value;
+ }
+ StateSettingGuard(T* state_ptr) : state_ptr_(state_ptr), previous_state_(*state_ptr) {}
+ void setState(T new_state_value) { *state_ptr_ = new_state_value; }
+ ~StateSettingGuard() { *state_ptr_ = previous_state_; }
+
+private:
+ T* state_ptr_;
+ T previous_state_;
+};
+
+// A helper function to get the front element from a given ObjectAccessChain
+ObjectAccessChain getFrontElement(const ObjectAccessChain& chain)
+{
+ size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter);
+ return pos_delimiter == std::string::npos ? chain : chain.substr(0, pos_delimiter);
+}
+
+// A helper function to get the access chain starting from the second element.
+ObjectAccessChain subAccessChainFromSecondElement(const ObjectAccessChain& chain)
+{
+ size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter);
+ return pos_delimiter == std::string::npos ? "" : chain.substr(pos_delimiter + 1);
+}
+
+// A helper function to get the access chain after removing a given prefix.
+ObjectAccessChain getSubAccessChainAfterPrefix(const ObjectAccessChain& chain,
+ const ObjectAccessChain& prefix)
+{
+ size_t pos = chain.find(prefix);
+ if (pos != 0)
+ return chain;
+ return chain.substr(prefix.length() + sizeof(ObjectAccesschainDelimiter));
+}
+
+//
+// A traverser which traverses the whole AST and populates:
+// 1) A mapping from symbol nodes' IDs to their defining operation nodes.
+// 2) A set of access chains of the initial precise object nodes.
+//
+class TSymbolDefinitionCollectingTraverser : public glslang::TIntermTraverser {
+public:
+ TSymbolDefinitionCollectingTraverser(NodeMapping* symbol_definition_mapping,
+ AccessChainMapping* accesschain_mapping,
+ ObjectAccesschainSet* precise_objects,
+ ReturnBranchNodeSet* precise_return_nodes);
+
+ bool visitUnary(glslang::TVisit, glslang::TIntermUnary*) override;
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary*) override;
+ void visitSymbol(glslang::TIntermSymbol*) override;
+ bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*) override;
+ bool visitBranch(glslang::TVisit, glslang::TIntermBranch*) override;
+
+protected:
+ TSymbolDefinitionCollectingTraverser& operator=(const TSymbolDefinitionCollectingTraverser&);
+
+ // The mapping from symbol node IDs to their defining nodes. This should be
+ // populated along traversing the AST.
+ NodeMapping& symbol_definition_mapping_;
+ // The set of symbol node IDs for precise symbol nodes, the ones marked as
+ // 'noContraction'.
+ ObjectAccesschainSet& precise_objects_;
+ // The set of precise return nodes.
+ ReturnBranchNodeSet& precise_return_nodes_;
+ // A temporary cache of the symbol node whose defining node is to be found
+ // currently along traversing the AST.
+ ObjectAccessChain current_object_;
+ // A map from object node to its access chain. This traverser stores
+ // the built access chains into this map for each object node it has
+ // visited.
+ AccessChainMapping& accesschain_mapping_;
+ // The pointer to the Function Definition node, so we can get the
+ // preciseness of the return expression from it when we traverse the
+ // return branch node.
+ glslang::TIntermAggregate* current_function_definition_node_;
+};
+
+TSymbolDefinitionCollectingTraverser::TSymbolDefinitionCollectingTraverser(
+ NodeMapping* symbol_definition_mapping, AccessChainMapping* accesschain_mapping,
+ ObjectAccesschainSet* precise_objects,
+ std::unordered_set<glslang::TIntermBranch*>* precise_return_nodes)
+ : TIntermTraverser(true, false, false), symbol_definition_mapping_(*symbol_definition_mapping),
+ precise_objects_(*precise_objects), precise_return_nodes_(*precise_return_nodes),
+ current_object_(), accesschain_mapping_(*accesschain_mapping),
+ current_function_definition_node_(nullptr) {}
+
+// Visits a symbol node, set the current_object_ to the
+// current node symbol ID, and record a mapping from this node to the current
+// current_object_, which is the just obtained symbol
+// ID.
+void TSymbolDefinitionCollectingTraverser::visitSymbol(glslang::TIntermSymbol* node)
+{
+ current_object_ = generateSymbolLabel(node);
+ accesschain_mapping_[node] = current_object_;
+}
+
+// Visits an aggregate node, traverses all of its children.
+bool TSymbolDefinitionCollectingTraverser::visitAggregate(glslang::TVisit,
+ glslang::TIntermAggregate* node)
+{
+ // This aggregate node might be a function definition node, in which case we need to
+ // cache this node, so we can get the preciseness information of the return value
+ // of this function later.
+ StateSettingGuard<glslang::TIntermAggregate*> current_function_definition_node_setting_guard(
+ &current_function_definition_node_);
+ if (node->getOp() == glslang::EOpFunction) {
+ // This is function definition node, we need to cache this node so that we can
+ // get the preciseness of the return value later.
+ current_function_definition_node_setting_guard.setState(node);
+ }
+ // Traverse the items in the sequence.
+ glslang::TIntermSequence& seq = node->getSequence();
+ for (int i = 0; i < (int)seq.size(); ++i) {
+ current_object_.clear();
+ seq[i]->traverse(this);
+ }
+ return false;
+}
+
+bool TSymbolDefinitionCollectingTraverser::visitBranch(glslang::TVisit,
+ glslang::TIntermBranch* node)
+{
+ if (node->getFlowOp() == glslang::EOpReturn && node->getExpression() &&
+ current_function_definition_node_ &&
+ current_function_definition_node_->getType().getQualifier().noContraction) {
+ // This node is a return node with an expression, and its function has a
+ // precise return value. We need to find the involved objects in its
+ // expression and add them to the set of initial precise objects.
+ precise_return_nodes_.insert(node);
+ node->getExpression()->traverse(this);
+ }
+ return false;
+}
+
+// Visits a unary node. This might be an implicit assignment like i++, i--. etc.
+bool TSymbolDefinitionCollectingTraverser::visitUnary(glslang::TVisit /* visit */,
+ glslang::TIntermUnary* node)
+{
+ current_object_.clear();
+ node->getOperand()->traverse(this);
+ if (isAssignOperation(node->getOp())) {
+ // We should always be able to get an access chain of the operand node.
+ assert(!current_object_.empty());
+
+ // If the operand node object is 'precise', we collect its access chain
+ // for the initial set of 'precise' objects.
+ if (isPreciseObjectNode(node->getOperand())) {
+ // The operand node is an 'precise' object node, add its
+ // access chain to the set of 'precise' objects. This is to collect
+ // the initial set of 'precise' objects.
+ precise_objects_.insert(current_object_);
+ }
+ // Gets the symbol ID from the object's access chain.
+ ObjectAccessChain id_symbol = getFrontElement(current_object_);
+ // Add a mapping from the symbol ID to this assignment operation node.
+ symbol_definition_mapping_.insert(std::make_pair(id_symbol, node));
+ }
+ // A unary node is not a dereference node, so we clear the access chain which
+ // is under construction.
+ current_object_.clear();
+ return false;
+}
+
+// Visits a binary node and updates the mapping from symbol IDs to the definition
+// nodes. Also collects the access chains for the initial precise objects.
+bool TSymbolDefinitionCollectingTraverser::visitBinary(glslang::TVisit /* visit */,
+ glslang::TIntermBinary* node)
+{
+ // Traverses the left node to build the access chain info for the object.
+ current_object_.clear();
+ node->getLeft()->traverse(this);
+
+ if (isAssignOperation(node->getOp())) {
+ // We should always be able to get an access chain for the left node.
+ assert(!current_object_.empty());
+
+ // If the left node object is 'precise', it is an initial precise object
+ // specified in the shader source. Adds it to the initial work list to
+ // process later.
+ if (isPreciseObjectNode(node->getLeft())) {
+ // The left node is an 'precise' object node, add its access chain to
+ // the set of 'precise' objects. This is to collect the initial set
+ // of 'precise' objects.
+ precise_objects_.insert(current_object_);
+ }
+ // Gets the symbol ID from the object access chain, which should be the
+ // first element recorded in the access chain.
+ ObjectAccessChain id_symbol = getFrontElement(current_object_);
+ // Adds a mapping from the symbol ID to this assignment operation node.
+ symbol_definition_mapping_.insert(std::make_pair(id_symbol, node));
+
+ // Traverses the right node, there may be other 'assignment'
+ // operations in the right.
+ current_object_.clear();
+ node->getRight()->traverse(this);
+
+ } else if (isDereferenceOperation(node->getOp())) {
+ // The left node (parent node) is a struct type object. We need to
+ // record the access chain information of the current node into its
+ // object id.
+ if (node->getOp() == glslang::EOpIndexDirectStruct) {
+ unsigned struct_dereference_index = getStructIndexFromConstantUnion(node->getRight());
+ current_object_.push_back(ObjectAccesschainDelimiter);
+ current_object_.append(std::to_string(struct_dereference_index));
+ }
+ accesschain_mapping_[node] = current_object_;
+
+ // For a dereference node, there is no need to traverse the right child
+ // node as the right node should always be an integer type object.
+
+ } else {
+ // For other binary nodes, still traverse the right node.
+ current_object_.clear();
+ node->getRight()->traverse(this);
+ }
+ return false;
+}
+
+// Traverses the AST and returns a tuple of four members:
+// 1) a mapping from symbol IDs to the definition nodes (aka. assignment nodes) of these symbols.
+// 2) a mapping from object nodes in the AST to the access chains of these objects.
+// 3) a set of access chains of precise objects.
+// 4) a set of return nodes with precise expressions.
+std::tuple<NodeMapping, AccessChainMapping, ObjectAccesschainSet, ReturnBranchNodeSet>
+getSymbolToDefinitionMappingAndPreciseSymbolIDs(const glslang::TIntermediate& intermediate)
+{
+ auto result_tuple = std::make_tuple(NodeMapping(), AccessChainMapping(), ObjectAccesschainSet(),
+ ReturnBranchNodeSet());
+
+ TIntermNode* root = intermediate.getTreeRoot();
+ if (root == 0)
+ return result_tuple;
+
+ NodeMapping& symbol_definition_mapping = std::get<0>(result_tuple);
+ AccessChainMapping& accesschain_mapping = std::get<1>(result_tuple);
+ ObjectAccesschainSet& precise_objects = std::get<2>(result_tuple);
+ ReturnBranchNodeSet& precise_return_nodes = std::get<3>(result_tuple);
+
+ // Traverses the AST and populate the results.
+ TSymbolDefinitionCollectingTraverser collector(&symbol_definition_mapping, &accesschain_mapping,
+ &precise_objects, &precise_return_nodes);
+ root->traverse(&collector);
+
+ return result_tuple;
+}
+
+//
+// A traverser that determine whether the left node (or operand node for unary
+// node) of an assignment node is 'precise', containing 'precise' or not,
+// according to the access chain a given precise object which share the same
+// symbol as the left node.
+//
+// Post-orderly traverses the left node subtree of an binary assignment node and:
+//
+// 1) Propagates the 'precise' from the left object nodes to this object node.
+//
+// 2) Builds object access chain along the traversal, and also compares with
+// the access chain of the given 'precise' object along with the traversal to
+// tell if the node to be defined is 'precise' or not.
+//
+class TNoContractionAssigneeCheckingTraverser : public glslang::TIntermTraverser {
+
+ enum DecisionStatus {
+ // The object node to be assigned to may contain 'precise' objects and also not 'precise' objects.
+ Mixed = 0,
+ // The object node to be assigned to is either a 'precise' object or a struct objects whose members are all 'precise'.
+ Precise = 1,
+ // The object node to be assigned to is not a 'precise' object.
+ NotPreicse = 2,
+ };
+
+public:
+ TNoContractionAssigneeCheckingTraverser(const AccessChainMapping& accesschain_mapping)
+ : TIntermTraverser(true, false, false), accesschain_mapping_(accesschain_mapping),
+ precise_object_(nullptr) {}
+
+ // Checks the preciseness of a given assignment node with a precise object
+ // represented as access chain. The precise object shares the same symbol
+ // with the assignee of the given assignment node. Return a tuple of two:
+ //
+ // 1) The preciseness of the assignee node of this assignment node. True
+ // if the assignee contains 'precise' objects or is 'precise', false if
+ // the assignee is not 'precise' according to the access chain of the given
+ // precise object.
+ //
+ // 2) The incremental access chain from the assignee node to its nested
+ // 'precise' object, according to the access chain of the given precise
+ // object. This incremental access chain can be empty, which means the
+ // assignee is 'precise'. Otherwise it shows the path to the nested
+ // precise object.
+ std::tuple<bool, ObjectAccessChain>
+ getPrecisenessAndRemainedAccessChain(glslang::TIntermOperator* node,
+ const ObjectAccessChain& precise_object)
+ {
+ assert(isAssignOperation(node->getOp()));
+ precise_object_ = &precise_object;
+ ObjectAccessChain assignee_object;
+ if (glslang::TIntermBinary* BN = node->getAsBinaryNode()) {
+ // This is a binary assignment node, we need to check the
+ // preciseness of the left node.
+ assert(accesschain_mapping_.count(BN->getLeft()));
+ // The left node (assignee node) is an object node, traverse the
+ // node to let the 'precise' of nesting objects being transfered to
+ // nested objects.
+ BN->getLeft()->traverse(this);
+ // After traversing the left node, if the left node is 'precise',
+ // we can conclude this assignment should propagate 'precise'.
+ if (isPreciseObjectNode(BN->getLeft())) {
+ return make_tuple(true, ObjectAccessChain());
+ }
+ // If the preciseness of the left node (assignee node) can not
+ // be determined by now, we need to compare the access chain string
+ // of the assignee object with the given precise object.
+ assignee_object = accesschain_mapping_.at(BN->getLeft());
+
+ } else if (glslang::TIntermUnary* UN = node->getAsUnaryNode()) {
+ // This is a unary assignment node, we need to check the
+ // preciseness of the operand node. For unary assignment node, the
+ // operand node should always be an object node.
+ assert(accesschain_mapping_.count(UN->getOperand()));
+ // Traverse the operand node to let the 'precise' being propagated
+ // from lower nodes to upper nodes.
+ UN->getOperand()->traverse(this);
+ // After traversing the operand node, if the operand node is
+ // 'precise', this assignment should propagate 'precise'.
+ if (isPreciseObjectNode(UN->getOperand())) {
+ return make_tuple(true, ObjectAccessChain());
+ }
+ // If the preciseness of the operand node (assignee node) can not
+ // be determined by now, we need to compare the access chain string
+ // of the assignee object with the given precise object.
+ assignee_object = accesschain_mapping_.at(UN->getOperand());
+ } else {
+ // Not a binary or unary node, should not happen.
+ assert(false);
+ }
+
+ // Compare the access chain string of the assignee node with the given
+ // precise object to determine if this assignment should propagate
+ // 'precise'.
+ if (assignee_object.find(precise_object) == 0) {
+ // The access chain string of the given precise object is a prefix
+ // of assignee's access chain string. The assignee should be
+ // 'precise'.
+ return make_tuple(true, ObjectAccessChain());
+ } else if (precise_object.find(assignee_object) == 0) {
+ // The assignee's access chain string is a prefix of the given
+ // precise object, the assignee object contains 'precise' object,
+ // and we need to pass the remained access chain to the object nodes
+ // in the right.
+ return make_tuple(true, getSubAccessChainAfterPrefix(precise_object, assignee_object));
+ } else {
+ // The access chain strings do not match, the assignee object can
+ // not be labeled as 'precise' according to the given precise
+ // object.
+ return make_tuple(false, ObjectAccessChain());
+ }
+ }
+
+protected:
+ TNoContractionAssigneeCheckingTraverser& operator=(const TNoContractionAssigneeCheckingTraverser&);
+
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override;
+ void visitSymbol(glslang::TIntermSymbol* node) override;
+
+ // A map from object nodes to their access chain string (used as object ID).
+ const AccessChainMapping& accesschain_mapping_;
+ // A given precise object, represented in it access chain string. This
+ // precise object is used to be compared with the assignee node to tell if
+ // the assignee node is 'precise', contains 'precise' object or not
+ // 'precise'.
+ const ObjectAccessChain* precise_object_;
+};
+
+// Visits a binary node. If the node is an object node, it must be a dereference
+// node. In such cases, if the left node is 'precise', this node should also be
+// 'precise'.
+bool TNoContractionAssigneeCheckingTraverser::visitBinary(glslang::TVisit,
+ glslang::TIntermBinary* node)
+{
+ // Traverses the left so that we transfer the 'precise' from nesting object
+ // to its nested object.
+ node->getLeft()->traverse(this);
+ // If this binary node is an object node, we should have it in the
+ // accesschain_mapping_.
+ if (accesschain_mapping_.count(node)) {
+ // A binary object node must be a dereference node.
+ assert(isDereferenceOperation(node->getOp()));
+ // If the left node is 'precise', this node should also be precise,
+ // otherwise, compare with the given precise_object_. If the
+ // access chain of this node matches with the given precise_object_,
+ // this node should be marked as 'precise'.
+ if (isPreciseObjectNode(node->getLeft())) {
+ node->getWritableType().getQualifier().noContraction = true;
+ } else if (accesschain_mapping_.at(node) == *precise_object_) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+ }
+ return false;
+}
+
+// Visits a symbol node, if the symbol node ID (its access chain string) matches
+// with the given precise object, this node should be 'precise'.
+void TNoContractionAssigneeCheckingTraverser::visitSymbol(glslang::TIntermSymbol* node)
+{
+ // A symbol node should always be an object node, and should have been added
+ // to the map from object nodes to their access chain strings.
+ assert(accesschain_mapping_.count(node));
+ if (accesschain_mapping_.at(node) == *precise_object_) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+}
+
+//
+// A traverser that only traverses the right side of binary assignment nodes
+// and the operand node of unary assignment nodes.
+//
+// 1) Marks arithmetic operations as 'NoContraction'.
+//
+// 2) Find the object which should be marked as 'precise' in the right and
+// update the 'precise' object work list.
+//
+class TNoContractionPropagator : public glslang::TIntermTraverser {
+public:
+ TNoContractionPropagator(ObjectAccesschainSet* precise_objects,
+ const AccessChainMapping& accesschain_mapping)
+ : TIntermTraverser(true, false, false),
+ precise_objects_(*precise_objects), added_precise_object_ids_(),
+ remained_accesschain_(), accesschain_mapping_(accesschain_mapping) {}
+
+ // Propagates 'precise' in the right nodes of a given assignment node with
+ // access chain record from the assignee node to a 'precise' object it
+ // contains.
+ void
+ propagateNoContractionInOneExpression(glslang::TIntermTyped* defining_node,
+ const ObjectAccessChain& assignee_remained_accesschain)
+ {
+ remained_accesschain_ = assignee_remained_accesschain;
+ if (glslang::TIntermBinary* BN = defining_node->getAsBinaryNode()) {
+ assert(isAssignOperation(BN->getOp()));
+ BN->getRight()->traverse(this);
+ if (isArithmeticOperation(BN->getOp())) {
+ BN->getWritableType().getQualifier().noContraction = true;
+ }
+ } else if (glslang::TIntermUnary* UN = defining_node->getAsUnaryNode()) {
+ assert(isAssignOperation(UN->getOp()));
+ UN->getOperand()->traverse(this);
+ if (isArithmeticOperation(UN->getOp())) {
+ UN->getWritableType().getQualifier().noContraction = true;
+ }
+ }
+ }
+
+ // Propagates 'precise' in a given precise return node.
+ void propagateNoContractionInReturnNode(glslang::TIntermBranch* return_node)
+ {
+ remained_accesschain_ = "";
+ assert(return_node->getFlowOp() == glslang::EOpReturn && return_node->getExpression());
+ return_node->getExpression()->traverse(this);
+ }
+
+protected:
+ TNoContractionPropagator& operator=(const TNoContractionPropagator&);
+
+ // Visits an aggregate node. The node can be a initializer list, in which
+ // case we need to find the 'precise' or 'precise' containing object node
+ // with the access chain record. In other cases, just need to traverse all
+ // the children nodes.
+ bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node) override
+ {
+ if (!remained_accesschain_.empty() && node->getOp() == glslang::EOpConstructStruct) {
+ // This is a struct initializer node, and the remained
+ // access chain is not empty, we need to refer to the
+ // assignee_remained_access_chain_ to find the nested
+ // 'precise' object. And we don't need to visit other nodes in this
+ // aggregate node.
+
+ // Gets the struct dereference index that leads to 'precise' object.
+ ObjectAccessChain precise_accesschain_index_str =
+ getFrontElement(remained_accesschain_);
+ unsigned precise_accesschain_index = (unsigned)strtoul(precise_accesschain_index_str.c_str(), nullptr, 10);
+ // Gets the node pointed by the access chain index extracted before.
+ glslang::TIntermTyped* potential_precise_node =
+ node->getSequence()[precise_accesschain_index]->getAsTyped();
+ assert(potential_precise_node);
+ // Pop the front access chain index from the path, and visit the nested node.
+ {
+ ObjectAccessChain next_level_accesschain =
+ subAccessChainFromSecondElement(remained_accesschain_);
+ StateSettingGuard<ObjectAccessChain> setup_remained_accesschain_for_next_level(
+ &remained_accesschain_, next_level_accesschain);
+ potential_precise_node->traverse(this);
+ }
+ return false;
+ }
+ return true;
+ }
+
+ // Visits a binary node. A binary node can be an object node, e.g. a dereference node.
+ // As only the top object nodes in the right side of an assignment needs to be visited
+ // and added to 'precise' work list, this traverser won't visit the children nodes of
+ // an object node. If the binary node does not represent an object node, it should
+ // go on to traverse its children nodes and if it is an arithmetic operation node, this
+ // operation should be marked as 'noContraction'.
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override
+ {
+ if (isDereferenceOperation(node->getOp())) {
+ // This binary node is an object node. Need to update the precise
+ // object set with the access chain of this node + remained
+ // access chain .
+ ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node);
+ if (remained_accesschain_.empty()) {
+ node->getWritableType().getQualifier().noContraction = true;
+ } else {
+ new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_;
+ }
+ // Cache the access chain as added precise object, so we won't add the
+ // same object to the work list again.
+ if (!added_precise_object_ids_.count(new_precise_accesschain)) {
+ precise_objects_.insert(new_precise_accesschain);
+ added_precise_object_ids_.insert(new_precise_accesschain);
+ }
+ // Only the upper-most object nodes should be visited, so do not
+ // visit children of this object node.
+ return false;
+ }
+ // If this is an arithmetic operation, marks this node as 'noContraction'.
+ if (isArithmeticOperation(node->getOp()) && node->getBasicType() != glslang::EbtInt) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+ // As this node is not an object node, need to traverse the children nodes.
+ return true;
+ }
+
+ // Visits a unary node. A unary node can not be an object node. If the operation
+ // is an arithmetic operation, need to mark this node as 'noContraction'.
+ bool visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node) override
+ {
+ // If this is an arithmetic operation, marks this with 'noContraction'
+ if (isArithmeticOperation(node->getOp())) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+ return true;
+ }
+
+ // Visits a symbol node. A symbol node is always an object node. So we
+ // should always be able to find its in our collected mapping from object
+ // nodes to access chains. As an object node, a symbol node can be either
+ // 'precise' or containing 'precise' objects according to unused
+ // access chain information we have when we visit this node.
+ void visitSymbol(glslang::TIntermSymbol* node) override
+ {
+ // Symbol nodes are object nodes and should always have an
+ // access chain collected before matches with it.
+ assert(accesschain_mapping_.count(node));
+ ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node);
+ // If the unused access chain is empty, this symbol node should be
+ // marked as 'precise'. Otherwise, the unused access chain should be
+ // appended to the symbol ID to build a new access chain which points to
+ // the nested 'precise' object in this symbol object.
+ if (remained_accesschain_.empty()) {
+ node->getWritableType().getQualifier().noContraction = true;
+ } else {
+ new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_;
+ }
+ // Add the new 'precise' access chain to the work list and make sure we
+ // don't visit it again.
+ if (!added_precise_object_ids_.count(new_precise_accesschain)) {
+ precise_objects_.insert(new_precise_accesschain);
+ added_precise_object_ids_.insert(new_precise_accesschain);
+ }
+ }
+
+ // A set of precise objects, represented as access chains.
+ ObjectAccesschainSet& precise_objects_;
+ // Visited symbol nodes, should not revisit these nodes.
+ ObjectAccesschainSet added_precise_object_ids_;
+ // The left node of an assignment operation might be an parent of 'precise' objects.
+ // This means the left node might not be an 'precise' object node, but it may contains
+ // 'precise' qualifier which should be propagated to the corresponding child node in
+ // the right. So we need the path from the left node to its nested 'precise' node to
+ // tell us how to find the corresponding 'precise' node in the right.
+ ObjectAccessChain remained_accesschain_;
+ // A map from node pointers to their access chains.
+ const AccessChainMapping& accesschain_mapping_;
+};
+}
+
+namespace glslang {
+
+void PropagateNoContraction(const glslang::TIntermediate& intermediate)
+{
+ // First, traverses the AST, records symbols with their defining operations
+ // and collects the initial set of precise symbols (symbol nodes that marked
+ // as 'noContraction') and precise return nodes.
+ auto mappings_and_precise_objects =
+ getSymbolToDefinitionMappingAndPreciseSymbolIDs(intermediate);
+
+ // The mapping of symbol node IDs to their defining nodes. This enables us
+ // to get the defining node directly from a given symbol ID without
+ // traversing the tree again.
+ NodeMapping& symbol_definition_mapping = std::get<0>(mappings_and_precise_objects);
+
+ // The mapping of object nodes to their access chains recorded.
+ AccessChainMapping& accesschain_mapping = std::get<1>(mappings_and_precise_objects);
+
+ // The initial set of 'precise' objects which are represented as the
+ // access chain toward them.
+ ObjectAccesschainSet& precise_object_accesschains = std::get<2>(mappings_and_precise_objects);
+
+ // The set of 'precise' return nodes.
+ ReturnBranchNodeSet& precise_return_nodes = std::get<3>(mappings_and_precise_objects);
+
+ // Second, uses the initial set of precise objects as a work list, pops an
+ // access chain, extract the symbol ID from it. Then:
+ // 1) Check the assignee object, see if it is 'precise' object node or
+ // contains 'precise' object. Obtain the incremental access chain from the
+ // assignee node to its nested 'precise' node (if any).
+ // 2) If the assignee object node is 'precise' or it contains 'precise'
+ // objects, traverses the right side of the assignment operation
+ // expression to mark arithmetic operations as 'noContration' and update
+ // 'precise' access chain work list with new found object nodes.
+ // Repeat above steps until the work list is empty.
+ TNoContractionAssigneeCheckingTraverser checker(accesschain_mapping);
+ TNoContractionPropagator propagator(&precise_object_accesschains, accesschain_mapping);
+
+ // We have two initial precise work lists to handle:
+ // 1) precise return nodes
+ // 2) precise object access chains
+ // We should process the precise return nodes first and the involved
+ // objects in the return expression should be added to the precise object
+ // access chain set.
+ while (!precise_return_nodes.empty()) {
+ glslang::TIntermBranch* precise_return_node = *precise_return_nodes.begin();
+ propagator.propagateNoContractionInReturnNode(precise_return_node);
+ precise_return_nodes.erase(precise_return_node);
+ }
+
+ while (!precise_object_accesschains.empty()) {
+ // Get the access chain of a precise object from the work list.
+ ObjectAccessChain precise_object_accesschain = *precise_object_accesschains.begin();
+ // Get the symbol id from the access chain.
+ ObjectAccessChain symbol_id = getFrontElement(precise_object_accesschain);
+ // Get all the defining nodes of that symbol ID.
+ std::pair<NodeMapping::iterator, NodeMapping::iterator> range =
+ symbol_definition_mapping.equal_range(symbol_id);
+ // Visits all the assignment nodes of that symbol ID and
+ // 1) Check if the assignee node is 'precise' or contains 'precise'
+ // objects.
+ // 2) Propagate the 'precise' to the top layer object nodes
+ // in the right side of the assignment operation, update the 'precise'
+ // work list with new access chains representing the new 'precise'
+ // objects, and mark arithmetic operations as 'noContraction'.
+ for (NodeMapping::iterator defining_node_iter = range.first;
+ defining_node_iter != range.second; defining_node_iter++) {
+ TIntermOperator* defining_node = defining_node_iter->second;
+ // Check the assignee node.
+ auto checker_result = checker.getPrecisenessAndRemainedAccessChain(
+ defining_node, precise_object_accesschain);
+ bool& contain_precise = std::get<0>(checker_result);
+ ObjectAccessChain& remained_accesschain = std::get<1>(checker_result);
+ // If the assignee node is 'precise' or contains 'precise', propagate the
+ // 'precise' to the right. Otherwise just skip this assignment node.
+ if (contain_precise) {
+ propagator.propagateNoContractionInOneExpression(defining_node,
+ remained_accesschain);
+ }
+ }
+ // Remove the last processed 'precise' object from the work list.
+ precise_object_accesschains.erase(precise_object_accesschain);
+ }
+}
+};
diff --git a/thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h b/thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h
new file mode 100644
index 0000000000..8521ad7d6a
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2015-2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Visit the nodes in the glslang intermediate tree representation to
+// propagate 'noContraction' qualifier.
+//
+
+#pragma once
+
+#include "../Include/intermediate.h"
+
+namespace glslang {
+
+// Propagates the 'precise' qualifier for objects (objects marked with
+// 'noContraction' qualifier) from the shader source specified 'precise'
+// variables to all the involved objects, and add 'noContraction' qualifier for
+// the involved arithmetic operations.
+// Note that the same qualifier: 'noContraction' is used in both object nodes
+// and arithmetic operation nodes, but has different meaning. For object nodes,
+// 'noContraction' means the object is 'precise'; and for arithmetic operation
+// nodes, it means the operation should not be contracted.
+void PropagateNoContraction(const glslang::TIntermediate& intermediate);
+};
diff --git a/thirdparty/glslang/glslang/MachineIndependent/reflection.cpp b/thirdparty/glslang/glslang/MachineIndependent/reflection.cpp
new file mode 100644
index 0000000000..a09a04880e
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/reflection.cpp
@@ -0,0 +1,1200 @@
+//
+// Copyright (C) 2013-2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "reflection.h"
+#include "LiveTraverser.h"
+#include "localintermediate.h"
+
+#include "gl_types.h"
+
+//
+// Grow the reflection database through a friend traverser class of TReflection and a
+// collection of functions to do a liveness traversal that note what uniforms are used
+// in semantically non-dead code.
+//
+// Can be used multiple times, once per stage, to grow a program reflection.
+//
+// High-level algorithm for one stage:
+//
+// 1. Put the entry point on the list of live functions.
+//
+// 2. Traverse any live function, while skipping if-tests with a compile-time constant
+// condition of false, and while adding any encountered function calls to the live
+// function list.
+//
+// Repeat until the live function list is empty.
+//
+// 3. Add any encountered uniform variables and blocks to the reflection database.
+//
+// Can be attempted with a failed link, but will return false if recursion had been detected, or
+// there wasn't exactly one entry point.
+//
+
+namespace glslang {
+
+//
+// The traverser: mostly pass through, except
+// - processing binary nodes to see if they are dereferences of an aggregates to track
+// - processing symbol nodes to see if they are non-aggregate objects to track
+//
+// This ignores semantically dead code by using TLiveTraverser.
+//
+// This is in the glslang namespace directly so it can be a friend of TReflection.
+//
+
+class TReflectionTraverser : public TLiveTraverser {
+public:
+ TReflectionTraverser(const TIntermediate& i, TReflection& r) :
+ TLiveTraverser(i), reflection(r) { }
+
+ virtual bool visitBinary(TVisit, TIntermBinary* node);
+ virtual void visitSymbol(TIntermSymbol* base);
+
+ // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
+ // However, no dereference doesn't mean simple... it could be a complex aggregate.
+ void addUniform(const TIntermSymbol& base)
+ {
+ if (processedDerefs.find(&base) == processedDerefs.end()) {
+ processedDerefs.insert(&base);
+
+ // Use a degenerate (empty) set of dereferences to immediately put as at the end of
+ // the dereference change expected by blowUpActiveAggregate.
+ TList<TIntermBinary*> derefs;
+ blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
+ base.getQualifier().storage, true);
+ }
+ }
+
+ void addPipeIOVariable(const TIntermSymbol& base)
+ {
+ if (processedDerefs.find(&base) == processedDerefs.end()) {
+ processedDerefs.insert(&base);
+
+ const TString &name = base.getName();
+ const TType &type = base.getType();
+ const bool input = base.getQualifier().isPipeInput();
+
+ TReflection::TMapIndexToReflection &ioItems =
+ input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
+
+ if (reflection.options & EShReflectionUnwrapIOBlocks) {
+ bool anonymous = IsAnonymous(name);
+
+ TString baseName;
+ if (type.getBasicType() == EbtBlock) {
+ baseName = anonymous ? TString() : type.getTypeName();
+ } else {
+ baseName = anonymous ? TString() : name;
+ }
+
+ // by convention if this is an arrayed block we ignore the array in the reflection
+ if (type.isArray() && type.getBasicType() == EbtBlock) {
+ blowUpIOAggregate(input, baseName, TType(type, 0));
+ } else {
+ blowUpIOAggregate(input, baseName, type);
+ }
+ } else {
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
+ if (it == reflection.nameToIndex.end()) {
+ reflection.nameToIndex[name.c_str()] = (int)ioItems.size();
+ ioItems.push_back(
+ TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
+
+ EShLanguageMask& stages = ioItems.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ } else {
+ EShLanguageMask& stages = ioItems[it->second].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
+ }
+ }
+
+ // Lookup or calculate the offset of all block members at once, using the recursively
+ // defined block offset rules.
+ void getOffsets(const TType& type, TVector<int>& offsets)
+ {
+ const TTypeList& memberList = *type.getStruct();
+
+ int memberSize = 0;
+ int offset = 0;
+ for (size_t m = 0; m < offsets.size(); ++m) {
+ // if the user supplied an offset, snap to it now
+ if (memberList[m].type->getQualifier().hasOffset())
+ offset = memberList[m].type->getQualifier().layoutOffset;
+
+ // calculate the offset of the next member and align the current offset to this member
+ intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
+
+ // save the offset of this member
+ offsets[m] = offset;
+
+ // update for the next member
+ offset += memberSize;
+ }
+ }
+
+ // Calculate the stride of an array type
+ int getArrayStride(const TType& baseType, const TType& type)
+ {
+ int dummySize;
+ int stride;
+
+ // consider blocks to have 0 stride, so that all offsets are relative to the start of their block
+ if (type.getBasicType() == EbtBlock)
+ return 0;
+
+ TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
+ intermediate.getMemberAlignment(type, dummySize, stride,
+ baseType.getQualifier().layoutPacking,
+ subMatrixLayout != ElmNone
+ ? subMatrixLayout == ElmRowMajor
+ : baseType.getQualifier().layoutMatrix == ElmRowMajor);
+
+ return stride;
+ }
+
+ // count the total number of leaf members from iterating out of a block type
+ int countAggregateMembers(const TType& parentType)
+ {
+ if (! parentType.isStruct())
+ return 1;
+
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
+
+ bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
+
+ const TTypeList &memberList = *parentType.getStruct();
+
+ int ret = 0;
+
+ for (size_t i = 0; i < memberList.size(); i++)
+ {
+ const TType &memberType = *memberList[i].type;
+ int numMembers = countAggregateMembers(memberType);
+ // for sized arrays of structs, apply logic to expand out the same as we would below in
+ // blowUpActiveAggregate
+ if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
+ if (! strictArraySuffix || ! blockParent)
+ numMembers *= memberType.getArraySizes()->getCumulativeSize();
+ }
+ ret += numMembers;
+ }
+
+ return ret;
+ }
+
+ // Traverse the provided deref chain, including the base, and
+ // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
+ // - recursively expand any variable array index in the middle of that traversal
+ // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
+ //
+ // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
+ // A value of 0 for arraySize will mean to use the full array's size.
+ void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
+ TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
+ int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
+ {
+ // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
+ // Broadly:
+ // * arrays-of-structs always have a [x] suffix.
+ // * with array-of-struct variables in the root of a buffer block, only ever return [0].
+ // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
+
+ // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
+ bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
+
+ // process the part of the dereference chain that was explicit in the shader
+ TString name = baseName;
+ const TType* terminalType = &baseType;
+ for (; deref != derefs.end(); ++deref) {
+ TIntermBinary* visitNode = *deref;
+ terminalType = &visitNode->getType();
+ int index;
+ switch (visitNode->getOp()) {
+ case EOpIndexIndirect: {
+ int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
+
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = stride;
+
+ // Visit all the indices of this array, and for each one add on the remaining dereferencing
+ for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
+ TString newBaseName = name;
+ if (strictArraySuffix && blockParent)
+ newBaseName.append(TString("[0]"));
+ else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
+ newBaseName.append(TString("[") + String(i) + "]");
+ TList<TIntermBinary*>::const_iterator nextDeref = deref;
+ ++nextDeref;
+ blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
+ topLevelArrayStride, baseStorage, active);
+
+ if (offset >= 0)
+ offset += stride;
+ }
+
+ // it was all completed in the recursive calls above
+ return;
+ }
+ case EOpIndexDirect: {
+ int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
+
+ index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (strictArraySuffix && blockParent) {
+ name.append(TString("[0]"));
+ } else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
+ name.append(TString("[") + String(index) + "]");
+
+ if (offset >= 0)
+ offset += stride * index;
+ }
+
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = stride;
+
+ blockParent = false;
+ break;
+ }
+ case EOpIndexDirectStruct:
+ index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (offset >= 0)
+ offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
+ if (name.size() > 0)
+ name.append(".");
+ name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
+ break;
+ default:
+ break;
+ }
+ }
+
+ // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
+ if (! isReflectionGranularity(*terminalType)) {
+ // the base offset of this node, that children are relative to
+ int baseOffset = offset;
+
+ if (terminalType->isArray()) {
+ // Visit all the indices of this array, and for each one,
+ // fully explode the remaining aggregate to dereference
+
+ int stride = 0;
+ if (offset >= 0)
+ stride = getArrayStride(baseType, *terminalType);
+
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = stride;
+
+ int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
+
+ // for top-level arrays in blocks, only expand [0] to avoid explosion of items
+ if (strictArraySuffix && blockParent)
+ arrayIterateSize = 1;
+
+ for (int i = 0; i < arrayIterateSize; ++i) {
+ TString newBaseName = name;
+ newBaseName.append(TString("[") + String(i) + "]");
+ TType derefType(*terminalType, 0);
+ if (offset >= 0)
+ offset = baseOffset + stride * i;
+
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
+ topLevelArrayStride, baseStorage, active);
+ }
+ } else {
+ // Visit all members of this aggregate, and for each one,
+ // fully explode the remaining aggregate to dereference
+ const TTypeList& typeList = *terminalType->getStruct();
+
+ TVector<int> memberOffsets;
+
+ if (baseOffset >= 0) {
+ memberOffsets.resize(typeList.size());
+ getOffsets(*terminalType, memberOffsets);
+ }
+
+ for (int i = 0; i < (int)typeList.size(); ++i) {
+ TString newBaseName = name;
+ if (newBaseName.size() > 0)
+ newBaseName.append(".");
+ newBaseName.append(typeList[i].type->getFieldName());
+ TType derefType(*terminalType, i);
+ if (offset >= 0)
+ offset = baseOffset + memberOffsets[i];
+
+ int arrayStride = topLevelArrayStride;
+ if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
+ derefType.isArray()) {
+ arrayStride = getArrayStride(baseType, derefType);
+ }
+
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
+ arrayStride, baseStorage, active);
+ }
+ }
+
+ // it was all completed in the recursive calls above
+ return;
+ }
+
+ if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
+ name.append(TString("[0]"));
+ }
+
+ // Finally, add a full string to the reflection database, and update the array size if necessary.
+ // If the dereferenced entity to record is an array, compute the size and update the maximum size.
+
+ // there might not be a final array dereference, it could have been copied as an array object
+ if (arraySize == 0)
+ arraySize = mapToGlArraySize(*terminalType);
+
+ TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
+
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
+ if (it == reflection.nameToIndex.end()) {
+ int uniformIndex = (int)variables.size();
+ reflection.nameToIndex[name.c_str()] = uniformIndex;
+ variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
+ arraySize, blockIndex));
+ if (terminalType->isArray()) {
+ variables.back().arrayStride = getArrayStride(baseType, *terminalType);
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = variables.back().arrayStride;
+ }
+
+ if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->getBasicType() == EbtAtomicUint)
+ reflection.atomicCounterUniformIndices.push_back(uniformIndex);
+
+ variables.back().topLevelArrayStride = topLevelArrayStride;
+
+ if ((reflection.options & EShReflectionAllBlockVariables) && active) {
+ EShLanguageMask& stages = variables.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ } else {
+ if (arraySize > 1) {
+ int& reflectedArraySize = variables[it->second].size;
+ reflectedArraySize = std::max(arraySize, reflectedArraySize);
+ }
+
+ if ((reflection.options & EShReflectionAllBlockVariables) && active) {
+ EShLanguageMask& stages = variables[it->second].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
+ }
+
+ // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
+ void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
+ {
+ TString name = baseName;
+
+ // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
+ if (! isReflectionGranularity(type)) {
+ if (type.isArray()) {
+ // Visit all the indices of this array, and for each one,
+ // fully explode the remaining aggregate to dereference
+ for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
+ TString newBaseName = name;
+ newBaseName.append(TString("[") + String(i) + "]");
+ TType derefType(type, 0);
+
+ blowUpIOAggregate(input, newBaseName, derefType);
+ }
+ } else {
+ // Visit all members of this aggregate, and for each one,
+ // fully explode the remaining aggregate to dereference
+ const TTypeList& typeList = *type.getStruct();
+
+ for (int i = 0; i < (int)typeList.size(); ++i) {
+ TString newBaseName = name;
+ if (newBaseName.size() > 0)
+ newBaseName.append(".");
+ newBaseName.append(typeList[i].type->getFieldName());
+ TType derefType(type, i);
+
+ blowUpIOAggregate(input, newBaseName, derefType);
+ }
+ }
+
+ // it was all completed in the recursive calls above
+ return;
+ }
+
+ if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
+ name.append(TString("[0]"));
+ }
+
+ TReflection::TMapIndexToReflection &ioItems =
+ input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
+
+ std::string namespacedName = input ? "in " : "out ";
+ namespacedName += name.c_str();
+
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
+ if (it == reflection.nameToIndex.end()) {
+ reflection.nameToIndex[namespacedName] = (int)ioItems.size();
+ ioItems.push_back(
+ TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
+
+ EShLanguageMask& stages = ioItems.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ } else {
+ EShLanguageMask& stages = ioItems[it->second].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
+
+ // Add a uniform dereference where blocks/struct/arrays are involved in the access.
+ // Handles the situation where the left node is at the correct or too coarse a
+ // granularity for reflection. (That is, further dereferences up the tree will be
+ // skipped.) Earlier dereferences, down the tree, will be handled
+ // at the same time, and logged to prevent reprocessing as the tree is traversed.
+ //
+ // Note: Other things like the following must be caught elsewhere:
+ // - a simple non-array, non-struct variable (no dereference even conceivable)
+ // - an aggregrate consumed en masse, without a dereference
+ //
+ // So, this code is for cases like
+ // - a struct/block dereferencing a member (whether the member is array or not)
+ // - an array of struct
+ // - structs/arrays containing the above
+ //
+ void addDereferencedUniform(TIntermBinary* topNode)
+ {
+ // See if too fine-grained to process (wait to get further down the tree)
+ const TType& leftType = topNode->getLeft()->getType();
+ if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
+ return;
+
+ // We have an array or structure or block dereference, see if it's a uniform
+ // based dereference (if not, skip it).
+ TIntermSymbol* base = findBase(topNode);
+ if (! base || ! base->getQualifier().isUniformOrBuffer())
+ return;
+
+ // See if we've already processed this (e.g., in the middle of something
+ // we did earlier), and if so skip it
+ if (processedDerefs.find(topNode) != processedDerefs.end())
+ return;
+
+ // Process this uniform dereference
+
+ int offset = -1;
+ int blockIndex = -1;
+ bool anonymous = false;
+
+ // See if we need to record the block itself
+ bool block = base->getBasicType() == EbtBlock;
+ if (block) {
+ offset = 0;
+ anonymous = IsAnonymous(base->getName());
+
+ const TString& blockName = base->getType().getTypeName();
+ TString baseName;
+
+ if (! anonymous)
+ baseName = blockName;
+
+ if (base->getType().isArray()) {
+ TType derefType(base->getType(), 0);
+
+ assert(! anonymous);
+ for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
+ blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
+ intermediate.getBlockSize(base->getType()));
+ baseName.append(TString("[0]"));
+ } else
+ blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
+
+ if (reflection.options & EShReflectionAllBlockVariables) {
+ // Use a degenerate (empty) set of dereferences to immediately put as at the end of
+ // the dereference change expected by blowUpActiveAggregate.
+ TList<TIntermBinary*> derefs;
+
+ // because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each
+ // member in the struct definition. This will lose any information about whether the parent was a buffer
+ // block. So if we're using strict array rules which don't expand the first child of a buffer block we
+ // instead iterate over the children here.
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
+ bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
+
+ if (strictArraySuffix && blockParent) {
+ const TTypeList& typeList = *base->getType().getStruct();
+
+ TVector<int> memberOffsets;
+
+ memberOffsets.resize(typeList.size());
+ getOffsets(base->getType(), memberOffsets);
+
+ for (int i = 0; i < (int)typeList.size(); ++i) {
+ TType derefType(base->getType(), i);
+ TString name = baseName;
+ if (name.size() > 0)
+ name.append(".");
+ name.append(typeList[i].type->getFieldName());
+
+ // if this member is an array, store the top-level array stride but start the explosion from
+ // the inner struct type.
+ if (derefType.isArray() && derefType.isStruct()) {
+ name.append("[0]");
+ blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
+ blockIndex, 0, getArrayStride(base->getType(), derefType),
+ base->getQualifier().storage, false);
+ } else {
+ blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
+ 0, 0, base->getQualifier().storage, false);
+ }
+ }
+ } else {
+ // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
+ // expanding root arrays anyway, just start the iteration from the base block type.
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0,
+ base->getQualifier().storage, false);
+ }
+ }
+ }
+
+ // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
+ // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
+ TList<TIntermBinary*> derefs;
+ for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
+ if (isReflectionGranularity(visitNode->getLeft()->getType()))
+ continue;
+
+ derefs.push_front(visitNode);
+ processedDerefs.insert(visitNode);
+ }
+ processedDerefs.insert(base);
+
+ // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
+ int arraySize = 0;
+ if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
+ if (topNode->getOp() == EOpIndexDirect)
+ arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
+ }
+
+ // Put the dereference chain together, forward
+ TString baseName;
+ if (! anonymous) {
+ if (block)
+ baseName = base->getType().getTypeName();
+ else
+ baseName = base->getName();
+ }
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0,
+ base->getQualifier().storage, true);
+ }
+
+ int addBlockName(const TString& name, const TType& type, int size)
+ {
+ TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
+
+ int blockIndex;
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
+ if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
+ blockIndex = (int)blocks.size();
+ reflection.nameToIndex[name.c_str()] = blockIndex;
+ blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
+
+ blocks.back().numMembers = countAggregateMembers(type);
+
+ EShLanguageMask& stages = blocks.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ } else {
+ blockIndex = it->second;
+
+ EShLanguageMask& stages = blocks[blockIndex].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+
+ return blockIndex;
+ }
+
+ // Are we at a level in a dereference chain at which individual active uniform queries are made?
+ bool isReflectionGranularity(const TType& type)
+ {
+ return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
+ }
+
+ // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
+ // Return 0 if the topology does not fit this situation.
+ TIntermSymbol* findBase(const TIntermBinary* node)
+ {
+ TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
+ if (base)
+ return base;
+ TIntermBinary* left = node->getLeft()->getAsBinaryNode();
+ if (! left)
+ return nullptr;
+
+ return findBase(left);
+ }
+
+ //
+ // Translate a glslang sampler type into the GL API #define number.
+ //
+ int mapSamplerToGlType(TSampler sampler)
+ {
+ if (! sampler.image) {
+ // a sampler...
+ switch (sampler.type) {
+ case EbtFloat:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
+ case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
+ }
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
+ case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
+ }
+ case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_SAMPLER_3D;
+ case EsdCube:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
+ case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
+ }
+ case EsdRect:
+ return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
+ case EsdBuffer:
+ return GL_SAMPLER_BUFFER;
+ }
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD;
+ }
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD;
+ }
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD;
+ }
+ case Esd3D:
+ return GL_FLOAT16_SAMPLER_3D_AMD;
+ case EsdCube:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD;
+ }
+ case EsdRect:
+ return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD;
+ case EsdBuffer:
+ return GL_FLOAT16_SAMPLER_BUFFER_AMD;
+ }
+#endif
+ case EbtInt:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
+ case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
+ : GL_INT_SAMPLER_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_INT_SAMPLER_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
+ case EsdRect:
+ return GL_INT_SAMPLER_2D_RECT;
+ case EsdBuffer:
+ return GL_INT_SAMPLER_BUFFER;
+ }
+ case EbtUint:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
+ case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
+ : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_UNSIGNED_INT_SAMPLER_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
+ case EsdRect:
+ return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
+ case EsdBuffer:
+ return GL_UNSIGNED_INT_SAMPLER_BUFFER;
+ }
+ default:
+ return 0;
+ }
+ } else {
+ // an image...
+ switch (sampler.type) {
+ case EbtFloat:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
+ case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_IMAGE_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
+ case EsdRect:
+ return GL_IMAGE_2D_RECT;
+ case EsdBuffer:
+ return GL_IMAGE_BUFFER;
+ }
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD;
+ }
+ case Esd3D:
+ return GL_FLOAT16_IMAGE_3D_AMD;
+ case EsdCube:
+ return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD;
+ case EsdRect:
+ return GL_FLOAT16_IMAGE_2D_RECT_AMD;
+ case EsdBuffer:
+ return GL_FLOAT16_IMAGE_BUFFER_AMD;
+ }
+#endif
+ case EbtInt:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
+ case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_INT_IMAGE_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
+ case EsdRect:
+ return GL_INT_IMAGE_2D_RECT;
+ case EsdBuffer:
+ return GL_INT_IMAGE_BUFFER;
+ }
+ case EbtUint:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
+ case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
+ : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_UNSIGNED_INT_IMAGE_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
+ case EsdRect:
+ return GL_UNSIGNED_INT_IMAGE_2D_RECT;
+ case EsdBuffer:
+ return GL_UNSIGNED_INT_IMAGE_BUFFER;
+ }
+ default:
+ return 0;
+ }
+ }
+ }
+
+ //
+ // Translate a glslang type into the GL API #define number.
+ // Ignores arrayness.
+ //
+ int mapToGlType(const TType& type)
+ {
+ switch (type.getBasicType()) {
+ case EbtSampler:
+ return mapSamplerToGlType(type.getSampler());
+ case EbtStruct:
+ case EbtBlock:
+ case EbtVoid:
+ return 0;
+ default:
+ break;
+ }
+
+ if (type.isVector()) {
+ int offset = type.getVectorSize() - 2;
+ switch (type.getBasicType()) {
+ case EbtFloat: return GL_FLOAT_VEC2 + offset;
+ case EbtDouble: return GL_DOUBLE_VEC2 + offset;
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
+#endif
+ case EbtInt: return GL_INT_VEC2 + offset;
+ case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
+ case EbtInt64: return GL_INT64_ARB + offset;
+ case EbtUint64: return GL_UNSIGNED_INT64_ARB + offset;
+ case EbtBool: return GL_BOOL_VEC2 + offset;
+ case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
+ default: return 0;
+ }
+ }
+ if (type.isMatrix()) {
+ switch (type.getBasicType()) {
+ case EbtFloat:
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT_MAT2;
+ case 3: return GL_FLOAT_MAT2x3;
+ case 4: return GL_FLOAT_MAT2x4;
+ default: return 0;
+ }
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT_MAT3x2;
+ case 3: return GL_FLOAT_MAT3;
+ case 4: return GL_FLOAT_MAT3x4;
+ default: return 0;
+ }
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT_MAT4x2;
+ case 3: return GL_FLOAT_MAT4x3;
+ case 4: return GL_FLOAT_MAT4;
+ default: return 0;
+ }
+ }
+ case EbtDouble:
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_DOUBLE_MAT2;
+ case 3: return GL_DOUBLE_MAT2x3;
+ case 4: return GL_DOUBLE_MAT2x4;
+ default: return 0;
+ }
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_DOUBLE_MAT3x2;
+ case 3: return GL_DOUBLE_MAT3;
+ case 4: return GL_DOUBLE_MAT3x4;
+ default: return 0;
+ }
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_DOUBLE_MAT4x2;
+ case 3: return GL_DOUBLE_MAT4x3;
+ case 4: return GL_DOUBLE_MAT4;
+ default: return 0;
+ }
+ }
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16:
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT16_MAT2_AMD;
+ case 3: return GL_FLOAT16_MAT2x3_AMD;
+ case 4: return GL_FLOAT16_MAT2x4_AMD;
+ default: return 0;
+ }
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT16_MAT3x2_AMD;
+ case 3: return GL_FLOAT16_MAT3_AMD;
+ case 4: return GL_FLOAT16_MAT3x4_AMD;
+ default: return 0;
+ }
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT16_MAT4x2_AMD;
+ case 3: return GL_FLOAT16_MAT4x3_AMD;
+ case 4: return GL_FLOAT16_MAT4_AMD;
+ default: return 0;
+ }
+ }
+#endif
+ default:
+ return 0;
+ }
+ }
+ if (type.getVectorSize() == 1) {
+ switch (type.getBasicType()) {
+ case EbtFloat: return GL_FLOAT;
+ case EbtDouble: return GL_DOUBLE;
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: return GL_FLOAT16_NV;
+#endif
+ case EbtInt: return GL_INT;
+ case EbtUint: return GL_UNSIGNED_INT;
+ case EbtInt64: return GL_INT64_ARB;
+ case EbtUint64: return GL_UNSIGNED_INT64_ARB;
+ case EbtBool: return GL_BOOL;
+ case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
+ default: return 0;
+ }
+ }
+
+ return 0;
+ }
+
+ int mapToGlArraySize(const TType& type)
+ {
+ return type.isArray() ? type.getOuterArraySize() : 1;
+ }
+
+ TReflection& reflection;
+ std::set<const TIntermNode*> processedDerefs;
+
+protected:
+ TReflectionTraverser(TReflectionTraverser&);
+ TReflectionTraverser& operator=(TReflectionTraverser&);
+};
+
+//
+// Implement the traversal functions of interest.
+//
+
+// To catch dereferenced aggregates that must be reflected.
+// This catches them at the highest level possible in the tree.
+bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
+{
+ switch (node->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ addDereferencedUniform(node);
+ break;
+ default:
+ break;
+ }
+
+ // still need to visit everything below, which could contain sub-expressions
+ // containing different uniforms
+ return true;
+}
+
+// To reflect non-dereferenced objects.
+void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
+{
+ if (base->getQualifier().storage == EvqUniform)
+ addUniform(*base);
+
+ if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
+ (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
+ addPipeIOVariable(*base);
+}
+
+//
+// Implement TObjectReflection methods.
+//
+
+TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
+ int pSize, int pIndex)
+ : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
+ numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
+{
+}
+
+int TObjectReflection::getBinding() const
+{
+ if (type == nullptr || !type->getQualifier().hasBinding())
+ return -1;
+ return type->getQualifier().layoutBinding;
+}
+
+void TObjectReflection::dump() const
+{
+ printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
+ index, getBinding(), stages);
+
+ if (counterIndex != -1)
+ printf(", counter %d", counterIndex);
+
+ if (numMembers != -1)
+ printf(", numMembers %d", numMembers);
+
+ if (arrayStride != 0)
+ printf(", arrayStride %d", arrayStride);
+
+ if (topLevelArrayStride != 0)
+ printf(", topLevelArrayStride %d", topLevelArrayStride);
+
+ printf("\n");
+}
+
+//
+// Implement TReflection methods.
+//
+
+// Track any required attribute reflection, such as compute shader numthreads.
+//
+void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
+{
+ if (stage == EShLangCompute) {
+ // Remember thread dimensions
+ for (int dim=0; dim<3; ++dim)
+ localSize[dim] = intermediate.getLocalSize(dim);
+ }
+}
+
+// build counter block index associations for buffers
+void TReflection::buildCounterIndices(const TIntermediate& intermediate)
+{
+ // search for ones that have counters
+ for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
+ const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
+ const int index = getIndex(counterName);
+
+ if (index >= 0)
+ indexToUniformBlock[i].counterIndex = index;
+ }
+}
+
+// build Shader Stages mask for all uniforms
+void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
+{
+ if (options & EShReflectionAllBlockVariables)
+ return;
+
+ for (int i = 0; i < int(indexToUniform.size()); ++i) {
+ indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
+ }
+
+ for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
+ indexToBufferVariable[i].stages =
+ static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
+ }
+}
+
+// Merge live symbols from 'intermediate' into the existing reflection database.
+//
+// Returns false if the input is too malformed to do this.
+bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
+{
+ if (intermediate.getTreeRoot() == nullptr ||
+ intermediate.getNumEntryPoints() != 1 ||
+ intermediate.isRecursive())
+ return false;
+
+ buildAttributeReflection(stage, intermediate);
+
+ TReflectionTraverser it(intermediate, *this);
+
+ // put the entry point on the list of functions to process
+ it.pushFunction(intermediate.getEntryPointMangledName().c_str());
+
+ // process all the functions
+ while (! it.functions.empty()) {
+ TIntermNode* function = it.functions.back();
+ it.functions.pop_back();
+ function->traverse(&it);
+ }
+
+ buildCounterIndices(intermediate);
+ buildUniformStageMask(intermediate);
+
+ return true;
+}
+
+void TReflection::dump()
+{
+ printf("Uniform reflection:\n");
+ for (size_t i = 0; i < indexToUniform.size(); ++i)
+ indexToUniform[i].dump();
+ printf("\n");
+
+ printf("Uniform block reflection:\n");
+ for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
+ indexToUniformBlock[i].dump();
+ printf("\n");
+
+ printf("Buffer variable reflection:\n");
+ for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
+ indexToBufferVariable[i].dump();
+ printf("\n");
+
+ printf("Buffer block reflection:\n");
+ for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
+ indexToBufferBlock[i].dump();
+ printf("\n");
+
+ printf("Pipeline input reflection:\n");
+ for (size_t i = 0; i < indexToPipeInput.size(); ++i)
+ indexToPipeInput[i].dump();
+ printf("\n");
+
+ printf("Pipeline output reflection:\n");
+ for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
+ indexToPipeOutput[i].dump();
+ printf("\n");
+
+ if (getLocalSize(0) > 1) {
+ static const char* axis[] = { "X", "Y", "Z" };
+
+ for (int dim=0; dim<3; ++dim)
+ if (getLocalSize(dim) > 1)
+ printf("Local size %s: %d\n", axis[dim], getLocalSize(dim));
+
+ printf("\n");
+ }
+
+ // printf("Live names\n");
+ // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
+ // printf("%s: %d\n", it->first.c_str(), it->second);
+ // printf("\n");
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/MachineIndependent/reflection.h b/thirdparty/glslang/glslang/MachineIndependent/reflection.h
new file mode 100644
index 0000000000..44b17a05ad
--- /dev/null
+++ b/thirdparty/glslang/glslang/MachineIndependent/reflection.h
@@ -0,0 +1,203 @@
+//
+// Copyright (C) 2013-2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _REFLECTION_INCLUDED
+#define _REFLECTION_INCLUDED
+
+#include "../Public/ShaderLang.h"
+#include "../Include/Types.h"
+
+#include <list>
+#include <set>
+
+//
+// A reflection database and its interface, consistent with the OpenGL API reflection queries.
+//
+
+namespace glslang {
+
+class TIntermediate;
+class TIntermAggregate;
+class TReflectionTraverser;
+
+// The full reflection database
+class TReflection {
+public:
+ TReflection(EShReflectionOptions opts, EShLanguage first, EShLanguage last)
+ : options(opts), firstStage(first), lastStage(last), badReflection(TObjectReflection::badReflection())
+ {
+ for (int dim=0; dim<3; ++dim)
+ localSize[dim] = 0;
+ }
+
+ virtual ~TReflection() {}
+
+ // grow the reflection stage by stage
+ bool addStage(EShLanguage, const TIntermediate&);
+
+ // for mapping a uniform index to a uniform object's description
+ int getNumUniforms() { return (int)indexToUniform.size(); }
+ const TObjectReflection& getUniform(int i) const
+ {
+ if (i >= 0 && i < (int)indexToUniform.size())
+ return indexToUniform[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping a block index to the block's description
+ int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); }
+ const TObjectReflection& getUniformBlock(int i) const
+ {
+ if (i >= 0 && i < (int)indexToUniformBlock.size())
+ return indexToUniformBlock[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping an pipeline input index to the input's description
+ int getNumPipeInputs() { return (int)indexToPipeInput.size(); }
+ const TObjectReflection& getPipeInput(int i) const
+ {
+ if (i >= 0 && i < (int)indexToPipeInput.size())
+ return indexToPipeInput[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping an pipeline output index to the output's description
+ int getNumPipeOutputs() { return (int)indexToPipeOutput.size(); }
+ const TObjectReflection& getPipeOutput(int i) const
+ {
+ if (i >= 0 && i < (int)indexToPipeOutput.size())
+ return indexToPipeOutput[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping from an atomic counter to the uniform index
+ int getNumAtomicCounters() const { return (int)atomicCounterUniformIndices.size(); }
+ const TObjectReflection& getAtomicCounter(int i) const
+ {
+ if (i >= 0 && i < (int)atomicCounterUniformIndices.size())
+ return getUniform(atomicCounterUniformIndices[i]);
+ else
+ return badReflection;
+ }
+
+ // for mapping a buffer variable index to a buffer variable object's description
+ int getNumBufferVariables() { return (int)indexToBufferVariable.size(); }
+ const TObjectReflection& getBufferVariable(int i) const
+ {
+ if (i >= 0 && i < (int)indexToBufferVariable.size())
+ return indexToBufferVariable[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping a storage block index to the storage block's description
+ int getNumStorageBuffers() const { return (int)indexToBufferBlock.size(); }
+ const TObjectReflection& getStorageBufferBlock(int i) const
+ {
+ if (i >= 0 && i < (int)indexToBufferBlock.size())
+ return indexToBufferBlock[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping any name to its index (block names, uniform names and input/output names)
+ int getIndex(const char* name) const
+ {
+ TNameToIndex::const_iterator it = nameToIndex.find(name);
+ if (it == nameToIndex.end())
+ return -1;
+ else
+ return it->second;
+ }
+
+ // see getIndex(const char*)
+ int getIndex(const TString& name) const { return getIndex(name.c_str()); }
+
+ // Thread local size
+ unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
+
+ void dump();
+
+protected:
+ friend class glslang::TReflectionTraverser;
+
+ void buildCounterIndices(const TIntermediate&);
+ void buildUniformStageMask(const TIntermediate& intermediate);
+ void buildAttributeReflection(EShLanguage, const TIntermediate&);
+
+ // Need a TString hash: typedef std::unordered_map<TString, int> TNameToIndex;
+ typedef std::map<std::string, int> TNameToIndex;
+ typedef std::vector<TObjectReflection> TMapIndexToReflection;
+ typedef std::vector<int> TIndices;
+
+ TMapIndexToReflection& GetBlockMapForStorage(TStorageQualifier storage)
+ {
+ if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
+ return indexToBufferBlock;
+ return indexToUniformBlock;
+ }
+ TMapIndexToReflection& GetVariableMapForStorage(TStorageQualifier storage)
+ {
+ if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
+ return indexToBufferVariable;
+ return indexToUniform;
+ }
+
+ EShReflectionOptions options;
+
+ EShLanguage firstStage;
+ EShLanguage lastStage;
+
+ TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this
+ TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed
+ TMapIndexToReflection indexToUniform;
+ TMapIndexToReflection indexToUniformBlock;
+ TMapIndexToReflection indexToBufferVariable;
+ TMapIndexToReflection indexToBufferBlock;
+ TMapIndexToReflection indexToPipeInput;
+ TMapIndexToReflection indexToPipeOutput;
+ TIndices atomicCounterUniformIndices;
+
+ unsigned int localSize[3];
+};
+
+} // end namespace glslang
+
+#endif // _REFLECTION_INCLUDED
diff --git a/thirdparty/glslang/glslang/OSDependent/Unix/ossource.cpp b/thirdparty/glslang/glslang/OSDependent/Unix/ossource.cpp
new file mode 100644
index 0000000000..3f029f0239
--- /dev/null
+++ b/thirdparty/glslang/glslang/OSDependent/Unix/ossource.cpp
@@ -0,0 +1,207 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// This file contains the Linux-specific functions
+//
+#include "../osinclude.h"
+#include "../../../OGLCompilersDLL/InitializeDll.h"
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdint.h>
+#include <cstdio>
+#include <sys/time.h>
+
+#if !defined(__Fuchsia__)
+#include <sys/resource.h>
+#endif
+
+namespace glslang {
+
+//
+// Thread cleanup
+//
+
+//
+// Wrapper for Linux call to DetachThread. This is required as pthread_cleanup_push() expects
+// the cleanup routine to return void.
+//
+static void DetachThreadLinux(void *)
+{
+ DetachThread();
+}
+
+//
+// Registers cleanup handler, sets cancel type and state, and executes the thread specific
+// cleanup handler. This function will be called in the Standalone.cpp for regression
+// testing. When OpenGL applications are run with the driver code, Linux OS does the
+// thread cleanup.
+//
+void OS_CleanupThreadData(void)
+{
+#if defined(__ANDROID__) || defined(__Fuchsia__)
+ DetachThreadLinux(NULL);
+#else
+ int old_cancel_state, old_cancel_type;
+ void *cleanupArg = NULL;
+
+ //
+ // Set thread cancel state and push cleanup handler.
+ //
+ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancel_state);
+ pthread_cleanup_push(DetachThreadLinux, (void *) cleanupArg);
+
+ //
+ // Put the thread in deferred cancellation mode.
+ //
+ pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_cancel_type);
+
+ //
+ // Pop cleanup handler and execute it prior to unregistering the cleanup handler.
+ //
+ pthread_cleanup_pop(1);
+
+ //
+ // Restore the thread's previous cancellation mode.
+ //
+ pthread_setcanceltype(old_cancel_state, NULL);
+#endif
+}
+
+//
+// Thread Local Storage Operations
+//
+inline OS_TLSIndex PthreadKeyToTLSIndex(pthread_key_t key)
+{
+ return (OS_TLSIndex)((uintptr_t)key + 1);
+}
+
+inline pthread_key_t TLSIndexToPthreadKey(OS_TLSIndex nIndex)
+{
+ return (pthread_key_t)((uintptr_t)nIndex - 1);
+}
+
+OS_TLSIndex OS_AllocTLSIndex()
+{
+ pthread_key_t pPoolIndex;
+
+ //
+ // Create global pool key.
+ //
+ if ((pthread_key_create(&pPoolIndex, NULL)) != 0) {
+ assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
+ return OS_INVALID_TLS_INDEX;
+ }
+ else
+ return PthreadKeyToTLSIndex(pPoolIndex);
+}
+
+bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0)
+ return true;
+ else
+ return false;
+}
+
+void* OS_GetTLSValue(OS_TLSIndex nIndex)
+{
+ //
+ // This function should return 0 if nIndex is invalid.
+ //
+ assert(nIndex != OS_INVALID_TLS_INDEX);
+ return pthread_getspecific(TLSIndexToPthreadKey(nIndex));
+}
+
+bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ //
+ // Delete the global pool key.
+ //
+ if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0)
+ return true;
+ else
+ return false;
+}
+
+namespace {
+ pthread_mutex_t gMutex;
+}
+
+void InitGlobalLock()
+{
+ pthread_mutexattr_t mutexattr;
+ pthread_mutexattr_init(&mutexattr);
+ pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&gMutex, &mutexattr);
+}
+
+void GetGlobalLock()
+{
+ pthread_mutex_lock(&gMutex);
+}
+
+void ReleaseGlobalLock()
+{
+ pthread_mutex_unlock(&gMutex);
+}
+
+// #define DUMP_COUNTERS
+
+void OS_DumpMemoryCounters()
+{
+#ifdef DUMP_COUNTERS
+ struct rusage usage;
+
+ if (getrusage(RUSAGE_SELF, &usage) == 0)
+ printf("Working set size: %ld\n", usage.ru_maxrss * 1024);
+#else
+ printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
+#endif
+}
+
+} // end namespace glslang
diff --git a/thirdparty/glslang/glslang/OSDependent/Windows/main.cpp b/thirdparty/glslang/glslang/OSDependent/Windows/main.cpp
new file mode 100644
index 0000000000..0bcde7b660
--- /dev/null
+++ b/thirdparty/glslang/glslang/OSDependent/Windows/main.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "InitializeDll.h"
+
+#define STRICT
+#define VC_EXTRALEAN 1
+#include <windows.h>
+#include <assert.h>
+
+BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
+{
+ switch (fdwReason)
+ {
+ case DLL_PROCESS_ATTACH:
+
+ if (! glslang::InitProcess())
+ return FALSE;
+ break;
+ case DLL_THREAD_ATTACH:
+
+ if (! glslang::InitThread())
+ return FALSE;
+ break;
+
+ case DLL_THREAD_DETACH:
+
+ if (! glslang::DetachThread())
+ return FALSE;
+ break;
+
+ case DLL_PROCESS_DETACH:
+
+ glslang::DetachProcess();
+ break;
+
+ default:
+ assert(0 && "DllMain(): Reason for calling DLL Main is unknown");
+ return FALSE;
+ }
+
+ return TRUE;
+}
diff --git a/thirdparty/glslang/glslang/OSDependent/Windows/ossource.cpp b/thirdparty/glslang/glslang/OSDependent/Windows/ossource.cpp
new file mode 100644
index 0000000000..870840c56e
--- /dev/null
+++ b/thirdparty/glslang/glslang/OSDependent/Windows/ossource.cpp
@@ -0,0 +1,147 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../osinclude.h"
+
+#define STRICT
+#define VC_EXTRALEAN 1
+#include <windows.h>
+#include <cassert>
+#include <process.h>
+#include <psapi.h>
+#include <cstdio>
+#include <cstdint>
+
+//
+// This file contains the Window-OS-specific functions
+//
+
+#if !(defined(_WIN32) || defined(_WIN64))
+#error Trying to build a windows specific file in a non windows build.
+#endif
+
+namespace glslang {
+
+inline OS_TLSIndex ToGenericTLSIndex (DWORD handle)
+{
+ return (OS_TLSIndex)((uintptr_t)handle + 1);
+}
+
+inline DWORD ToNativeTLSIndex (OS_TLSIndex nIndex)
+{
+ return (DWORD)((uintptr_t)nIndex - 1);
+}
+
+//
+// Thread Local Storage Operations
+//
+OS_TLSIndex OS_AllocTLSIndex()
+{
+ DWORD dwIndex = TlsAlloc();
+ if (dwIndex == TLS_OUT_OF_INDEXES) {
+ assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
+ return OS_INVALID_TLS_INDEX;
+ }
+
+ return ToGenericTLSIndex(dwIndex);
+}
+
+bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ if (TlsSetValue(ToNativeTLSIndex(nIndex), lpvValue))
+ return true;
+ else
+ return false;
+}
+
+void* OS_GetTLSValue(OS_TLSIndex nIndex)
+{
+ assert(nIndex != OS_INVALID_TLS_INDEX);
+ return TlsGetValue(ToNativeTLSIndex(nIndex));
+}
+
+bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ if (TlsFree(ToNativeTLSIndex(nIndex)))
+ return true;
+ else
+ return false;
+}
+
+HANDLE GlobalLock;
+
+void InitGlobalLock()
+{
+ GlobalLock = CreateMutex(0, false, 0);
+}
+
+void GetGlobalLock()
+{
+ WaitForSingleObject(GlobalLock, INFINITE);
+}
+
+void ReleaseGlobalLock()
+{
+ ReleaseMutex(GlobalLock);
+}
+
+unsigned int __stdcall EnterGenericThread (void* entry)
+{
+ return ((TThreadEntrypoint)entry)(0);
+}
+
+//#define DUMP_COUNTERS
+
+void OS_DumpMemoryCounters()
+{
+#ifdef DUMP_COUNTERS
+ PROCESS_MEMORY_COUNTERS counters;
+ GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters));
+ printf("Working set size: %d\n", counters.WorkingSetSize);
+#else
+ printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
+#endif
+}
+
+} // namespace glslang
diff --git a/thirdparty/glslang/glslang/OSDependent/osinclude.h b/thirdparty/glslang/glslang/OSDependent/osinclude.h
new file mode 100644
index 0000000000..218abe4f23
--- /dev/null
+++ b/thirdparty/glslang/glslang/OSDependent/osinclude.h
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef __OSINCLUDE_H
+#define __OSINCLUDE_H
+
+namespace glslang {
+
+//
+// Thread Local Storage Operations
+//
+typedef void* OS_TLSIndex;
+#define OS_INVALID_TLS_INDEX ((void*)0)
+
+OS_TLSIndex OS_AllocTLSIndex();
+bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue);
+bool OS_FreeTLSIndex(OS_TLSIndex nIndex);
+void* OS_GetTLSValue(OS_TLSIndex nIndex);
+
+void InitGlobalLock();
+void GetGlobalLock();
+void ReleaseGlobalLock();
+
+typedef unsigned int (*TThreadEntrypoint)(void*);
+
+void OS_CleanupThreadData(void);
+
+void OS_DumpMemoryCounters();
+
+} // end namespace glslang
+
+#endif // __OSINCLUDE_H
diff --git a/thirdparty/glslang/glslang/Public/ShaderLang.h b/thirdparty/glslang/glslang/Public/ShaderLang.h
new file mode 100644
index 0000000000..33f05e2cdf
--- /dev/null
+++ b/thirdparty/glslang/glslang/Public/ShaderLang.h
@@ -0,0 +1,847 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef _COMPILER_INTERFACE_INCLUDED_
+#define _COMPILER_INTERFACE_INCLUDED_
+
+#include "../Include/ResourceLimits.h"
+#include "../MachineIndependent/Versions.h"
+
+#include <cstring>
+#include <vector>
+
+#ifdef _WIN32
+#define C_DECL __cdecl
+//#ifdef SH_EXPORTING
+// #define SH_IMPORT_EXPORT __declspec(dllexport)
+//#else
+// #define SH_IMPORT_EXPORT __declspec(dllimport)
+//#endif
+#define SH_IMPORT_EXPORT
+#else
+#define SH_IMPORT_EXPORT
+#define C_DECL
+#endif
+
+//
+// This is the platform independent interface between an OGL driver
+// and the shading language compiler/linker.
+//
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+// This should always increase, as some paths to do not consume
+// a more major number.
+// It should increment by one when new functionality is added.
+#define GLSLANG_MINOR_VERSION 12
+
+//
+// Call before doing any other compiler/linker operations.
+//
+// (Call once per process, not once per thread.)
+//
+SH_IMPORT_EXPORT int ShInitialize();
+
+//
+// Call this at process shutdown to clean up memory.
+//
+SH_IMPORT_EXPORT int ShFinalize();
+
+//
+// Types of languages the compiler can consume.
+//
+typedef enum {
+ EShLangVertex,
+ EShLangTessControl,
+ EShLangTessEvaluation,
+ EShLangGeometry,
+ EShLangFragment,
+ EShLangCompute,
+ EShLangRayGenNV,
+ EShLangIntersectNV,
+ EShLangAnyHitNV,
+ EShLangClosestHitNV,
+ EShLangMissNV,
+ EShLangCallableNV,
+ EShLangTaskNV,
+ EShLangMeshNV,
+ EShLangCount,
+} EShLanguage; // would be better as stage, but this is ancient now
+
+typedef enum {
+ EShLangVertexMask = (1 << EShLangVertex),
+ EShLangTessControlMask = (1 << EShLangTessControl),
+ EShLangTessEvaluationMask = (1 << EShLangTessEvaluation),
+ EShLangGeometryMask = (1 << EShLangGeometry),
+ EShLangFragmentMask = (1 << EShLangFragment),
+ EShLangComputeMask = (1 << EShLangCompute),
+ EShLangRayGenNVMask = (1 << EShLangRayGenNV),
+ EShLangIntersectNVMask = (1 << EShLangIntersectNV),
+ EShLangAnyHitNVMask = (1 << EShLangAnyHitNV),
+ EShLangClosestHitNVMask = (1 << EShLangClosestHitNV),
+ EShLangMissNVMask = (1 << EShLangMissNV),
+ EShLangCallableNVMask = (1 << EShLangCallableNV),
+ EShLangTaskNVMask = (1 << EShLangTaskNV),
+ EShLangMeshNVMask = (1 << EShLangMeshNV),
+} EShLanguageMask;
+
+namespace glslang {
+
+class TType;
+
+typedef enum {
+ EShSourceNone,
+ EShSourceGlsl,
+ EShSourceHlsl,
+} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
+
+typedef enum {
+ EShClientNone,
+ EShClientVulkan,
+ EShClientOpenGL,
+} EShClient;
+
+typedef enum {
+ EShTargetNone,
+ EShTargetSpv, // preferred spelling
+ EshTargetSpv = EShTargetSpv, // legacy spelling
+} EShTargetLanguage;
+
+typedef enum {
+ EShTargetVulkan_1_0 = (1 << 22),
+ EShTargetVulkan_1_1 = (1 << 22) | (1 << 12),
+ EShTargetOpenGL_450 = 450,
+} EShTargetClientVersion;
+
+typedef EShTargetClientVersion EshTargetClientVersion;
+
+typedef enum {
+ EShTargetSpv_1_0 = (1 << 16),
+ EShTargetSpv_1_1 = (1 << 16) | (1 << 8),
+ EShTargetSpv_1_2 = (1 << 16) | (2 << 8),
+ EShTargetSpv_1_3 = (1 << 16) | (3 << 8),
+ EShTargetSpv_1_4 = (1 << 16) | (4 << 8),
+} EShTargetLanguageVersion;
+
+struct TInputLanguage {
+ EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone
+ EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone
+ EShClient dialect;
+ int dialectVersion; // version of client's language definition, not the client (when not EShClientNone)
+};
+
+struct TClient {
+ EShClient client;
+ EShTargetClientVersion version; // version of client itself (not the client's input dialect)
+};
+
+struct TTarget {
+ EShTargetLanguage language;
+ EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header
+ bool hlslFunctionality1; // can target hlsl_functionality1 extension(s)
+};
+
+// All source/client/target versions and settings.
+// Can override previous methods of setting, when items are set here.
+// Expected to grow, as more are added, rather than growing parameter lists.
+struct TEnvironment {
+ TInputLanguage input; // definition of the input language
+ TClient client; // what client is the overall compilation being done for?
+ TTarget target; // what to generate
+};
+
+const char* StageName(EShLanguage);
+
+} // end namespace glslang
+
+//
+// Types of output the linker will create.
+//
+typedef enum {
+ EShExVertexFragment,
+ EShExFragment
+} EShExecutable;
+
+//
+// Optimization level for the compiler.
+//
+typedef enum {
+ EShOptNoGeneration,
+ EShOptNone,
+ EShOptSimple, // Optimizations that can be done quickly
+ EShOptFull, // Optimizations that will take more time
+} EShOptimizationLevel;
+
+//
+// Texture and Sampler transformation mode.
+//
+typedef enum {
+ EShTexSampTransKeep, // keep textures and samplers as is (default)
+ EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers
+} EShTextureSamplerTransformMode;
+
+//
+// Message choices for what errors and warnings are given.
+//
+enum EShMessages {
+ EShMsgDefault = 0, // default is to give all required errors and extra warnings
+ EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input
+ EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification
+ EShMsgAST = (1 << 2), // print the AST intermediate representation
+ EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation
+ EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V
+ EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor
+ EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics
+ EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit
+ EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions
+ EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules
+ EShMsgDebugInfo = (1 << 10), // save debug information
+ EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
+ EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
+ EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (right now only for samplers)
+ EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table
+};
+
+//
+// Options for building reflection
+//
+typedef enum {
+ EShReflectionDefault = 0, // default is original behaviour before options were added
+ EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
+ EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
+ EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
+ EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
+ EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
+ EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
+} EShReflectionOptions;
+
+//
+// Build a table for bindings. This can be used for locating
+// attributes, uniforms, globals, etc., as needed.
+//
+typedef struct {
+ const char* name;
+ int binding;
+} ShBinding;
+
+typedef struct {
+ int numBindings;
+ ShBinding* bindings; // array of bindings
+} ShBindingTable;
+
+//
+// ShHandle held by but opaque to the driver. It is allocated,
+// managed, and de-allocated by the compiler/linker. It's contents
+// are defined by and used by the compiler and linker. For example,
+// symbol table information and object code passed from the compiler
+// to the linker can be stored where ShHandle points.
+//
+// If handle creation fails, 0 will be returned.
+//
+typedef void* ShHandle;
+
+//
+// Driver calls these to create and destroy compiler/linker
+// objects.
+//
+SH_IMPORT_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int debugOptions); // one per shader
+SH_IMPORT_EXPORT ShHandle ShConstructLinker(const EShExecutable, int debugOptions); // one per shader pair
+SH_IMPORT_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object)
+SH_IMPORT_EXPORT void ShDestruct(ShHandle);
+
+//
+// The return value of ShCompile is boolean, non-zero indicating
+// success.
+//
+// The info-log should be written by ShCompile into
+// ShHandle, so it can answer future queries.
+//
+SH_IMPORT_EXPORT int ShCompile(
+ const ShHandle,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* lengths,
+ const EShOptimizationLevel,
+ const TBuiltInResource *resources,
+ int debugOptions,
+ int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader
+ bool forwardCompatible = false, // give errors for use of deprecated features
+ EShMessages messages = EShMsgDefault // warnings and errors
+ );
+
+SH_IMPORT_EXPORT int ShLinkExt(
+ const ShHandle, // linker object
+ const ShHandle h[], // compiler objects to link together
+ const int numHandles);
+
+//
+// ShSetEncrpytionMethod is a place-holder for specifying
+// how source code is encrypted.
+//
+SH_IMPORT_EXPORT void ShSetEncryptionMethod(ShHandle);
+
+//
+// All the following return 0 if the information is not
+// available in the object passed down, or the object is bad.
+//
+SH_IMPORT_EXPORT const char* ShGetInfoLog(const ShHandle);
+SH_IMPORT_EXPORT const void* ShGetExecutable(const ShHandle);
+SH_IMPORT_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing
+SH_IMPORT_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings
+//
+// Tell the linker to never assign a vertex attribute to this list of physical attributes
+//
+SH_IMPORT_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count);
+
+//
+// Returns the location ID of the named uniform.
+// Returns -1 if error.
+//
+SH_IMPORT_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name);
+
+#ifdef __cplusplus
+ } // end extern "C"
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////
+//
+// Deferred-Lowering C++ Interface
+// -----------------------------------
+//
+// Below is a new alternate C++ interface, which deprecates the above
+// opaque handle-based interface.
+//
+// The below is further designed to handle multiple compilation units per stage, where
+// the intermediate results, including the parse tree, are preserved until link time,
+// rather than the above interface which is designed to have each compilation unit
+// lowered at compile time. In the above model, linking occurs on the lowered results,
+// whereas in this model intra-stage linking can occur at the parse tree
+// (treeRoot in TIntermediate) level, and then a full stage can be lowered.
+//
+
+#include <list>
+#include <string>
+#include <utility>
+
+class TCompiler;
+class TInfoSink;
+
+namespace glslang {
+
+const char* GetEsslVersionString();
+const char* GetGlslVersionString();
+int GetKhronosToolId();
+
+class TIntermediate;
+class TProgram;
+class TPoolAllocator;
+
+// Call this exactly once per process before using anything else
+bool InitializeProcess();
+
+// Call once per process to tear down everything
+void FinalizeProcess();
+
+// Resource type for IO resolver
+enum TResourceType {
+ EResSampler,
+ EResTexture,
+ EResImage,
+ EResUbo,
+ EResSsbo,
+ EResUav,
+ EResCount
+};
+
+// Make one TShader per shader that you will link into a program. Then
+// - provide the shader through setStrings() or setStringsWithLengths()
+// - optionally call setEnv*(), see below for more detail
+// - optionally use setPreamble() to set a special shader string that will be
+// processed before all others but won't affect the validity of #version
+// - call parse(): source language and target environment must be selected
+// either by correct setting of EShMessages sent to parse(), or by
+// explicitly calling setEnv*()
+// - query the info logs
+//
+// N.B.: Does not yet support having the same TShader instance being linked into
+// multiple programs.
+//
+// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
+//
+class TShader {
+public:
+ explicit TShader(EShLanguage);
+ virtual ~TShader();
+ void setStrings(const char* const* s, int n);
+ void setStringsWithLengths(const char* const* s, const int* l, int n);
+ void setStringsWithLengthsAndNames(
+ const char* const* s, const int* l, const char* const* names, int n);
+ void setPreamble(const char* s) { preamble = s; }
+ void setEntryPoint(const char* entryPoint);
+ void setSourceEntryPoint(const char* sourceEntryPointName);
+ void addProcesses(const std::vector<std::string>&);
+
+ // IO resolver binding data: see comments in ShaderLang.cpp
+ void setShiftBinding(TResourceType res, unsigned int base);
+ void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding
+ void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set);
+ void setResourceSetBinding(const std::vector<std::string>& base);
+ void setAutoMapBindings(bool map);
+ void setAutoMapLocations(bool map);
+ void addUniformLocationOverride(const char* name, int loc);
+ void setUniformLocationBase(int base);
+ void setInvertY(bool invert);
+ void setHlslIoMapping(bool hlslIoMap);
+ void setFlattenUniformArrays(bool flatten);
+ void setNoStorageFormat(bool useUnknownFormat);
+ void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode);
+
+ // For setting up the environment (cleared to nothingness in the constructor).
+ // These must be called so that parsing is done for the right source language and
+ // target environment, either indirectly through TranslateEnvironment() based on
+ // EShMessages et. al., or directly by the user.
+ void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version)
+ {
+ environment.input.languageFamily = lang;
+ environment.input.stage = envStage;
+ environment.input.dialect = client;
+ environment.input.dialectVersion = version;
+ }
+ void setEnvClient(EShClient client, EShTargetClientVersion version)
+ {
+ environment.client.client = client;
+ environment.client.version = version;
+ }
+ void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version)
+ {
+ environment.target.language = lang;
+ environment.target.version = version;
+ }
+ void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; }
+ bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; }
+
+ // Interface to #include handlers.
+ //
+ // To support #include, a client of Glslang does the following:
+ // 1. Call setStringsWithNames to set the source strings and associated
+ // names. For example, the names could be the names of the files
+ // containing the shader sources.
+ // 2. Call parse with an Includer.
+ //
+ // When the Glslang parser encounters an #include directive, it calls
+ // the Includer's include method with the requested include name
+ // together with the current string name. The returned IncludeResult
+ // contains the fully resolved name of the included source, together
+ // with the source text that should replace the #include directive
+ // in the source stream. After parsing that source, Glslang will
+ // release the IncludeResult object.
+ class Includer {
+ public:
+ // An IncludeResult contains the resolved name and content of a source
+ // inclusion.
+ struct IncludeResult {
+ IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) :
+ headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { }
+ // For a successful inclusion, the fully resolved name of the requested
+ // include. For example, in a file system-based includer, full resolution
+ // should convert a relative path name into an absolute path name.
+ // For a failed inclusion, this is an empty string.
+ const std::string headerName;
+ // The content and byte length of the requested inclusion. The
+ // Includer producing this IncludeResult retains ownership of the
+ // storage.
+ // For a failed inclusion, the header
+ // field points to a string containing error details.
+ const char* const headerData;
+ const size_t headerLength;
+ // Include resolver's context.
+ void* userData;
+ protected:
+ IncludeResult& operator=(const IncludeResult&);
+ IncludeResult();
+ };
+
+ // For both include methods below:
+ //
+ // Resolves an inclusion request by name, current source name,
+ // and include depth.
+ // On success, returns an IncludeResult containing the resolved name
+ // and content of the include.
+ // On failure, returns a nullptr, or an IncludeResult
+ // with an empty string for the headerName and error details in the
+ // header field.
+ // The Includer retains ownership of the contents
+ // of the returned IncludeResult value, and those contents must
+ // remain valid until the releaseInclude method is called on that
+ // IncludeResult object.
+ //
+ // Note "local" vs. "system" is not an "either/or": "local" is an
+ // extra thing to do over "system". Both might get called, as per
+ // the C++ specification.
+
+ // For the "system" or <>-style includes; search the "system" paths.
+ virtual IncludeResult* includeSystem(const char* /*headerName*/,
+ const char* /*includerName*/,
+ size_t /*inclusionDepth*/) { return nullptr; }
+
+ // For the "local"-only aspect of a "" include. Should not search in the
+ // "system" paths, because on returning a failure, the parser will
+ // call includeSystem() to look in the "system" locations.
+ virtual IncludeResult* includeLocal(const char* /*headerName*/,
+ const char* /*includerName*/,
+ size_t /*inclusionDepth*/) { return nullptr; }
+
+ // Signals that the parser will no longer use the contents of the
+ // specified IncludeResult.
+ virtual void releaseInclude(IncludeResult*) = 0;
+ virtual ~Includer() {}
+ };
+
+ // Fail all Includer searches
+ class ForbidIncluder : public Includer {
+ public:
+ virtual void releaseInclude(IncludeResult*) override { }
+ };
+
+ bool parse(const TBuiltInResource*, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages, Includer&);
+
+ bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages messages)
+ {
+ TShader::ForbidIncluder includer;
+ return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer);
+ }
+
+ // Equivalent to parse() without a default profile and without forcing defaults.
+ bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages)
+ {
+ return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages);
+ }
+
+ bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages,
+ Includer& includer)
+ {
+ return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer);
+ }
+
+ // NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
+ // is not an officially supported or fully working path.
+ bool preprocess(const TBuiltInResource* builtInResources,
+ int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages message, std::string* outputString,
+ Includer& includer);
+
+ const char* getInfoLog();
+ const char* getInfoDebugLog();
+ EShLanguage getStage() const { return stage; }
+ TIntermediate* getIntermediate() const { return intermediate; }
+
+protected:
+ TPoolAllocator* pool;
+ EShLanguage stage;
+ TCompiler* compiler;
+ TIntermediate* intermediate;
+ TInfoSink* infoSink;
+ // strings and lengths follow the standard for glShaderSource:
+ // strings is an array of numStrings pointers to string data.
+ // lengths can be null, but if not it is an array of numStrings
+ // integers containing the length of the associated strings.
+ // if lengths is null or lengths[n] < 0 the associated strings[n] is
+ // assumed to be null-terminated.
+ // stringNames is the optional names for all the strings. If stringNames
+ // is null, then none of the strings has name. If a certain element in
+ // stringNames is null, then the corresponding string does not have name.
+ const char* const* strings;
+ const int* lengths;
+ const char* const* stringNames;
+ const char* preamble;
+ int numStrings;
+
+ // a function in the source string can be renamed FROM this TO the name given in setEntryPoint.
+ std::string sourceEntryPointName;
+
+ TEnvironment environment;
+
+ friend class TProgram;
+
+private:
+ TShader& operator=(TShader&);
+};
+
+//
+// A reflection database and its interface, consistent with the OpenGL API reflection queries.
+//
+
+// Data needed for just a single object at the granularity exchanged by the reflection API
+class TObjectReflection {
+public:
+ TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex);
+
+ const TType* getType() const { return type; }
+ int getBinding() const;
+ void dump() const;
+ static TObjectReflection badReflection() { return TObjectReflection(); }
+
+ std::string name;
+ int offset;
+ int glDefineType;
+ int size; // data size in bytes for a block, array size for a (non-block) object that's an array
+ int index;
+ int counterIndex;
+ int numMembers;
+ int arrayStride; // stride of an array variable
+ int topLevelArrayStride; // stride of the top-level variable in a storage buffer member
+ EShLanguageMask stages;
+
+protected:
+ TObjectReflection()
+ : offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0),
+ topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr)
+ {
+ }
+
+ const TType* type;
+};
+
+class TReflection;
+class TIoMapper;
+
+// Allows to customize the binding layout after linking.
+// All used uniform variables will invoke at least validateBinding.
+// If validateBinding returned true then the other resolveBinding,
+// resolveSet, and resolveLocation are invoked to resolve the binding
+// and descriptor set index respectively.
+//
+// Invocations happen in a particular order:
+// 1) all shader inputs
+// 2) all shader outputs
+// 3) all uniforms with binding and set already defined
+// 4) all uniforms with binding but no set defined
+// 5) all uniforms with set but no binding defined
+// 6) all uniforms with no binding and no set defined
+//
+// mapIO will use this resolver in two phases. The first
+// phase is a notification phase, calling the corresponging
+// notifiy callbacks, this phase ends with a call to endNotifications.
+// Phase two starts directly after the call to endNotifications
+// and calls all other callbacks to validate and to get the
+// bindings, sets, locations, component and color indices.
+//
+// NOTE: that still limit checks are applied to bindings and sets
+// and may result in an error.
+class TIoMapResolver
+{
+public:
+ virtual ~TIoMapResolver() {}
+
+ // Should return true if the resulting/current binding would be okay.
+ // Basic idea is to do aliasing binding checks with this.
+ virtual bool validateBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current binding should be overridden.
+ // Return -1 if the current binding (including no binding) should be kept.
+ virtual int resolveBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current set should be overridden.
+ // Return -1 if the current set (including no set) should be kept.
+ virtual int resolveSet(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current location should be overridden.
+ // Return -1 if the current location (including no location) should be kept.
+ virtual int resolveUniformLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return true if the resulting/current setup would be okay.
+ // Basic idea is to do aliasing checks and reject invalid semantic names.
+ virtual bool validateInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current location should be overridden.
+ // Return -1 if the current location (including no location) should be kept.
+ virtual int resolveInOutLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current component index should be overridden.
+ // Return -1 if the current component index (including no index) should be kept.
+ virtual int resolveInOutComponent(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current color index should be overridden.
+ // Return -1 if the current color index (including no index) should be kept.
+ virtual int resolveInOutIndex(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Notification of a uniform variable
+ virtual void notifyBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Notification of a in or out variable
+ virtual void notifyInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Called by mapIO when it has finished the notify pass
+ virtual void endNotifications(EShLanguage stage) = 0;
+ // Called by mapIO when it starts its notify pass for the given stage
+ virtual void beginNotifications(EShLanguage stage) = 0;
+ // Called by mipIO when it starts its resolve pass for the given stage
+ virtual void beginResolve(EShLanguage stage) = 0;
+ // Called by mapIO when it has finished the resolve pass
+ virtual void endResolve(EShLanguage stage) = 0;
+};
+
+// Make one TProgram per set of shaders that will get linked together. Add all
+// the shaders that are to be linked together. After calling shader.parse()
+// for all shaders, call link().
+//
+// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
+//
+class TProgram {
+public:
+ TProgram();
+ virtual ~TProgram();
+ void addShader(TShader* shader) { stages[shader->stage].push_back(shader); }
+
+ // Link Validation interface
+ bool link(EShMessages);
+ const char* getInfoLog();
+ const char* getInfoDebugLog();
+
+ TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; }
+
+ // Reflection Interface
+
+ // call first, to do liveness analysis, index mapping, etc.; returns false on failure
+ bool buildReflection(int opts = EShReflectionDefault);
+
+ unsigned getLocalSize(int dim) const; // return dim'th local size
+ int getReflectionIndex(const char *name) const;
+
+ int getNumUniformVariables() const;
+ const TObjectReflection& getUniform(int index) const;
+ int getNumUniformBlocks() const;
+ const TObjectReflection& getUniformBlock(int index) const;
+ int getNumPipeInputs() const;
+ const TObjectReflection& getPipeInput(int index) const;
+ int getNumPipeOutputs() const;
+ const TObjectReflection& getPipeOutput(int index) const;
+ int getNumBufferVariables() const;
+ const TObjectReflection& getBufferVariable(int index) const;
+ int getNumBufferBlocks() const;
+ const TObjectReflection& getBufferBlock(int index) const;
+ int getNumAtomicCounters() const;
+ const TObjectReflection& getAtomicCounter(int index) const;
+
+ // Legacy Reflection Interface - expressed in terms of above interface
+
+ // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
+ int getNumLiveUniformVariables() const { return getNumUniformVariables(); }
+
+ // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
+ int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); }
+
+ // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
+ int getNumLiveAttributes() const { return getNumPipeInputs(); }
+
+ // can be used for glGetUniformIndices()
+ int getUniformIndex(const char *name) const { return getReflectionIndex(name); }
+
+ // can be used for "name" part of glGetActiveUniform()
+ const char *getUniformName(int index) const { return getUniform(index).name.c_str(); }
+
+ // returns the binding number
+ int getUniformBinding(int index) const { return getUniform(index).getBinding(); }
+
+ // returns Shaders Stages where a Uniform is present
+ EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
+ int getUniformBlockIndex(int index) const { return getUniform(index).index; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
+ int getUniformType(int index) const { return getUniform(index).glDefineType; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
+ int getUniformBufferOffset(int index) const { return getUniform(index).offset; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
+ int getUniformArraySize(int index) const { return getUniform(index).size; }
+
+ // returns a TType*
+ const TType *getUniformTType(int index) const { return getUniform(index).getType(); }
+
+ // can be used for glGetActiveUniformBlockName()
+ const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); }
+
+ // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
+ int getUniformBlockSize(int index) const { return getUniformBlock(index).size; }
+
+ // returns the block binding number
+ int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); }
+
+ // returns block index of associated counter.
+ int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; }
+
+ // returns a TType*
+ const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); }
+
+ // can be used for glGetActiveAttrib()
+ const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); }
+
+ // can be used for glGetActiveAttrib()
+ int getAttributeType(int index) const { return getPipeInput(index).glDefineType; }
+
+ // returns a TType*
+ const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); }
+
+ void dumpReflection();
+
+ // I/O mapping: apply base offsets and map live unbound variables
+ // If resolver is not provided it uses the previous approach
+ // and respects auto assignment and offsets.
+ bool mapIO(TIoMapResolver* resolver = NULL);
+
+protected:
+ bool linkStage(EShLanguage, EShMessages);
+
+ TPoolAllocator* pool;
+ std::list<TShader*> stages[EShLangCount];
+ TIntermediate* intermediate[EShLangCount];
+ bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage
+ TInfoSink* infoSink;
+ TReflection* reflection;
+ TIoMapper* ioMapper;
+ bool linked;
+
+private:
+ TProgram(TProgram&);
+ TProgram& operator=(TProgram&);
+};
+
+} // end namespace glslang
+
+#endif // _COMPILER_INTERFACE_INCLUDED_