summaryrefslogtreecommitdiff
path: root/drivers/vulkan
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vulkan')
-rw-r--r--drivers/vulkan/SCsub75
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp7071
-rw-r--r--drivers/vulkan/rendering_device_vulkan.h1129
-rw-r--r--drivers/vulkan/vulkan_context.cpp1517
-rw-r--r--drivers/vulkan/vulkan_context.h212
5 files changed, 10004 insertions, 0 deletions
diff --git a/drivers/vulkan/SCsub b/drivers/vulkan/SCsub
new file mode 100644
index 0000000000..85a5ae8d26
--- /dev/null
+++ b/drivers/vulkan/SCsub
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+Import('env')
+
+env.add_source_files(env.drivers_sources, "*.cpp")
+
+if env['builtin_vulkan']:
+ # Use bundled Vulkan headers
+ thirdparty_dir = "#thirdparty/vulkan"
+ env.Prepend(CPPPATH=[thirdparty_dir, thirdparty_dir + "/include", thirdparty_dir + "/loader"])
+
+ # Build Vulkan loader library
+ env_thirdparty = env.Clone()
+ env_thirdparty.disable_warnings()
+
+ loader_sources = [
+ "cJSON.c",
+ "debug_utils.c",
+ "dev_ext_trampoline.c",
+ "loader.c",
+ "murmurhash.c",
+ "phys_dev_ext.c",
+ "trampoline.c",
+ "unknown_ext_chain.c",
+ "wsi.c",
+ "extension_manual.c",
+ ]
+ vma_sources = [thirdparty_dir + "/vk_mem_alloc.cpp"]
+
+ if env['platform'] == "windows":
+ loader_sources.append("dirent_on_windows.c")
+ loader_sources.append("dxgi_loader.c")
+ env_thirdparty.AppendUnique(CPPDEFINES=[
+ 'VK_USE_PLATFORM_WIN32_KHR',
+ 'VULKAN_NON_CMAKE_BUILD',
+ 'WIN32_LEAN_AND_MEAN',
+ 'API_NAME=\\"%s\\"' % 'Vulkan'
+ ])
+ if not env.msvc: # Windows 7+, missing in mingw headers
+ env_thirdparty.AppendUnique(CPPDEFINES=[
+ "CM_GETIDLIST_FILTER_CLASS=0x00000200",
+ "CM_GETIDLIST_FILTER_PRESENT=0x00000100"
+ ])
+ elif env['platform'] == "osx":
+ env_thirdparty.AppendUnique(CPPDEFINES=[
+ 'VK_USE_PLATFORM_MACOS_MVK',
+ 'VULKAN_NON_CMAKE_BUILD',
+ 'SYSCONFDIR=\\"%s\\"' % '/etc',
+ 'FALLBACK_DATA_DIRS=\\"%s\\"' % '/usr/local/share:/usr/share',
+ 'FALLBACK_CONFIG_DIRS=\\"%s\\"' % '/etc/xdg'
+ ])
+ elif env['platform'] == "iphone":
+ env_thirdparty.AppendUnique(CPPDEFINES=[
+ 'VK_USE_PLATFORM_IOS_MVK',
+ 'VULKAN_NON_CMAKE_BUILD',
+ 'SYSCONFDIR=\\"%s\\"' % '/etc',
+ 'FALLBACK_DATA_DIRS=\\"%s\\"' % '/usr/local/share:/usr/share',
+ 'FALLBACK_CONFIG_DIRS=\\"%s\\"' % '/etc/xdg'
+ ])
+ elif env['platform'] == "x11":
+ env_thirdparty.AppendUnique(CPPDEFINES=[
+ 'VK_USE_PLATFORM_XLIB_KHR',
+ 'VULKAN_NON_CMAKE_BUILD',
+ 'SYSCONFDIR=\\"%s\\"' % '/etc',
+ 'FALLBACK_DATA_DIRS=\\"%s\\"' % '/usr/local/share:/usr/share',
+ 'FALLBACK_CONFIG_DIRS=\\"%s\\"' % '/etc/xdg'
+ ])
+ import platform
+ if (platform.system() == "Linux"):
+ # In glibc since 2.17 and musl libc since 1.1.24. Used by loader.c.
+ env_thirdparty.AppendUnique(CPPDEFINES=['HAVE_SECURE_GETENV'])
+
+ loader_sources = [thirdparty_dir + "/loader/" + file for file in loader_sources]
+ env_thirdparty.add_source_files(env.drivers_sources, loader_sources)
+ env_thirdparty.add_source_files(env.drivers_sources, vma_sources)
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
new file mode 100644
index 0000000000..2bf8a16091
--- /dev/null
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -0,0 +1,7071 @@
+/*************************************************************************/
+/* rendering_device_vulkan.cpp */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#include "rendering_device_vulkan.h"
+#include "core/hashfuncs.h"
+#include "core/os/file_access.h"
+#include "core/os/os.h"
+#include "core/project_settings.h"
+#include "drivers/vulkan/vulkan_context.h"
+#include "thirdparty/spirv-reflect/spirv_reflect.h"
+
+//#define FORCE_FULL_BARRIER
+
+void RenderingDeviceVulkan::_add_dependency(RID p_id, RID p_depends_on) {
+
+ if (!dependency_map.has(p_depends_on)) {
+ dependency_map[p_depends_on] = Set<RID>();
+ }
+
+ dependency_map[p_depends_on].insert(p_id);
+
+ if (!reverse_dependency_map.has(p_id)) {
+ reverse_dependency_map[p_id] = Set<RID>();
+ }
+
+ reverse_dependency_map[p_id].insert(p_depends_on);
+}
+
+void RenderingDeviceVulkan::_free_dependencies(RID p_id) {
+
+ //direct dependencies must be freed
+
+ Map<RID, Set<RID> >::Element *E = dependency_map.find(p_id);
+ if (E) {
+
+ while (E->get().size()) {
+ free(E->get().front()->get());
+ }
+ dependency_map.erase(E);
+ }
+
+ //reverse depenencies must be unreferenced
+ E = reverse_dependency_map.find(p_id);
+
+ if (E) {
+
+ for (Set<RID>::Element *F = E->get().front(); F; F = F->next()) {
+ Map<RID, Set<RID> >::Element *G = dependency_map.find(F->get());
+ ERR_CONTINUE(!G);
+ ERR_CONTINUE(!G->get().has(p_id));
+ G->get().erase(p_id);
+ }
+
+ reverse_dependency_map.erase(E);
+ }
+}
+
+const VkFormat RenderingDeviceVulkan::vulkan_formats[RenderingDevice::DATA_FORMAT_MAX] = {
+ VK_FORMAT_R4G4_UNORM_PACK8,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ VK_FORMAT_B4G4R4A4_UNORM_PACK16,
+ VK_FORMAT_R5G6B5_UNORM_PACK16,
+ VK_FORMAT_B5G6R5_UNORM_PACK16,
+ VK_FORMAT_R5G5B5A1_UNORM_PACK16,
+ VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+ VK_FORMAT_R8_UNORM,
+ VK_FORMAT_R8_SNORM,
+ VK_FORMAT_R8_USCALED,
+ VK_FORMAT_R8_SSCALED,
+ VK_FORMAT_R8_UINT,
+ VK_FORMAT_R8_SINT,
+ VK_FORMAT_R8_SRGB,
+ VK_FORMAT_R8G8_UNORM,
+ VK_FORMAT_R8G8_SNORM,
+ VK_FORMAT_R8G8_USCALED,
+ VK_FORMAT_R8G8_SSCALED,
+ VK_FORMAT_R8G8_UINT,
+ VK_FORMAT_R8G8_SINT,
+ VK_FORMAT_R8G8_SRGB,
+ VK_FORMAT_R8G8B8_UNORM,
+ VK_FORMAT_R8G8B8_SNORM,
+ VK_FORMAT_R8G8B8_USCALED,
+ VK_FORMAT_R8G8B8_SSCALED,
+ VK_FORMAT_R8G8B8_UINT,
+ VK_FORMAT_R8G8B8_SINT,
+ VK_FORMAT_R8G8B8_SRGB,
+ VK_FORMAT_B8G8R8_UNORM,
+ VK_FORMAT_B8G8R8_SNORM,
+ VK_FORMAT_B8G8R8_USCALED,
+ VK_FORMAT_B8G8R8_SSCALED,
+ VK_FORMAT_B8G8R8_UINT,
+ VK_FORMAT_B8G8R8_SINT,
+ VK_FORMAT_B8G8R8_SRGB,
+ VK_FORMAT_R8G8B8A8_UNORM,
+ VK_FORMAT_R8G8B8A8_SNORM,
+ VK_FORMAT_R8G8B8A8_USCALED,
+ VK_FORMAT_R8G8B8A8_SSCALED,
+ VK_FORMAT_R8G8B8A8_UINT,
+ VK_FORMAT_R8G8B8A8_SINT,
+ VK_FORMAT_R8G8B8A8_SRGB,
+ VK_FORMAT_B8G8R8A8_UNORM,
+ VK_FORMAT_B8G8R8A8_SNORM,
+ VK_FORMAT_B8G8R8A8_USCALED,
+ VK_FORMAT_B8G8R8A8_SSCALED,
+ VK_FORMAT_B8G8R8A8_UINT,
+ VK_FORMAT_B8G8R8A8_SINT,
+ VK_FORMAT_B8G8R8A8_SRGB,
+ VK_FORMAT_A8B8G8R8_UNORM_PACK32,
+ VK_FORMAT_A8B8G8R8_SNORM_PACK32,
+ VK_FORMAT_A8B8G8R8_USCALED_PACK32,
+ VK_FORMAT_A8B8G8R8_SSCALED_PACK32,
+ VK_FORMAT_A8B8G8R8_UINT_PACK32,
+ VK_FORMAT_A8B8G8R8_SINT_PACK32,
+ VK_FORMAT_A8B8G8R8_SRGB_PACK32,
+ VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ VK_FORMAT_A2R10G10B10_SNORM_PACK32,
+ VK_FORMAT_A2R10G10B10_USCALED_PACK32,
+ VK_FORMAT_A2R10G10B10_SSCALED_PACK32,
+ VK_FORMAT_A2R10G10B10_UINT_PACK32,
+ VK_FORMAT_A2R10G10B10_SINT_PACK32,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ VK_FORMAT_A2B10G10R10_SNORM_PACK32,
+ VK_FORMAT_A2B10G10R10_USCALED_PACK32,
+ VK_FORMAT_A2B10G10R10_SSCALED_PACK32,
+ VK_FORMAT_A2B10G10R10_UINT_PACK32,
+ VK_FORMAT_A2B10G10R10_SINT_PACK32,
+ VK_FORMAT_R16_UNORM,
+ VK_FORMAT_R16_SNORM,
+ VK_FORMAT_R16_USCALED,
+ VK_FORMAT_R16_SSCALED,
+ VK_FORMAT_R16_UINT,
+ VK_FORMAT_R16_SINT,
+ VK_FORMAT_R16_SFLOAT,
+ VK_FORMAT_R16G16_UNORM,
+ VK_FORMAT_R16G16_SNORM,
+ VK_FORMAT_R16G16_USCALED,
+ VK_FORMAT_R16G16_SSCALED,
+ VK_FORMAT_R16G16_UINT,
+ VK_FORMAT_R16G16_SINT,
+ VK_FORMAT_R16G16_SFLOAT,
+ VK_FORMAT_R16G16B16_UNORM,
+ VK_FORMAT_R16G16B16_SNORM,
+ VK_FORMAT_R16G16B16_USCALED,
+ VK_FORMAT_R16G16B16_SSCALED,
+ VK_FORMAT_R16G16B16_UINT,
+ VK_FORMAT_R16G16B16_SINT,
+ VK_FORMAT_R16G16B16_SFLOAT,
+ VK_FORMAT_R16G16B16A16_UNORM,
+ VK_FORMAT_R16G16B16A16_SNORM,
+ VK_FORMAT_R16G16B16A16_USCALED,
+ VK_FORMAT_R16G16B16A16_SSCALED,
+ VK_FORMAT_R16G16B16A16_UINT,
+ VK_FORMAT_R16G16B16A16_SINT,
+ VK_FORMAT_R16G16B16A16_SFLOAT,
+ VK_FORMAT_R32_UINT,
+ VK_FORMAT_R32_SINT,
+ VK_FORMAT_R32_SFLOAT,
+ VK_FORMAT_R32G32_UINT,
+ VK_FORMAT_R32G32_SINT,
+ VK_FORMAT_R32G32_SFLOAT,
+ VK_FORMAT_R32G32B32_UINT,
+ VK_FORMAT_R32G32B32_SINT,
+ VK_FORMAT_R32G32B32_SFLOAT,
+ VK_FORMAT_R32G32B32A32_UINT,
+ VK_FORMAT_R32G32B32A32_SINT,
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+ VK_FORMAT_R64_UINT,
+ VK_FORMAT_R64_SINT,
+ VK_FORMAT_R64_SFLOAT,
+ VK_FORMAT_R64G64_UINT,
+ VK_FORMAT_R64G64_SINT,
+ VK_FORMAT_R64G64_SFLOAT,
+ VK_FORMAT_R64G64B64_UINT,
+ VK_FORMAT_R64G64B64_SINT,
+ VK_FORMAT_R64G64B64_SFLOAT,
+ VK_FORMAT_R64G64B64A64_UINT,
+ VK_FORMAT_R64G64B64A64_SINT,
+ VK_FORMAT_R64G64B64A64_SFLOAT,
+ VK_FORMAT_B10G11R11_UFLOAT_PACK32,
+ VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
+ VK_FORMAT_D16_UNORM,
+ VK_FORMAT_X8_D24_UNORM_PACK32,
+ VK_FORMAT_D32_SFLOAT,
+ VK_FORMAT_S8_UINT,
+ VK_FORMAT_D16_UNORM_S8_UINT,
+ VK_FORMAT_D24_UNORM_S8_UINT,
+ VK_FORMAT_D32_SFLOAT_S8_UINT,
+ VK_FORMAT_BC1_RGB_UNORM_BLOCK,
+ VK_FORMAT_BC1_RGB_SRGB_BLOCK,
+ VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
+ VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
+ VK_FORMAT_BC2_UNORM_BLOCK,
+ VK_FORMAT_BC2_SRGB_BLOCK,
+ VK_FORMAT_BC3_UNORM_BLOCK,
+ VK_FORMAT_BC3_SRGB_BLOCK,
+ VK_FORMAT_BC4_UNORM_BLOCK,
+ VK_FORMAT_BC4_SNORM_BLOCK,
+ VK_FORMAT_BC5_UNORM_BLOCK,
+ VK_FORMAT_BC5_SNORM_BLOCK,
+ VK_FORMAT_BC6H_UFLOAT_BLOCK,
+ VK_FORMAT_BC6H_SFLOAT_BLOCK,
+ VK_FORMAT_BC7_UNORM_BLOCK,
+ VK_FORMAT_BC7_SRGB_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK,
+ VK_FORMAT_EAC_R11_UNORM_BLOCK,
+ VK_FORMAT_EAC_R11_SNORM_BLOCK,
+ VK_FORMAT_EAC_R11G11_UNORM_BLOCK,
+ VK_FORMAT_EAC_R11G11_SNORM_BLOCK,
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK,
+ VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK,
+ VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK,
+ VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
+ VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ VK_FORMAT_G8B8G8R8_422_UNORM,
+ VK_FORMAT_B8G8R8G8_422_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_420_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM,
+ VK_FORMAT_G8_B8R8_2PLANE_422_UNORM,
+ VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM,
+ VK_FORMAT_R10X6_UNORM_PACK16,
+ VK_FORMAT_R10X6G10X6_UNORM_2PACK16,
+ VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16,
+ VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16,
+ VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_R12X4_UNORM_PACK16,
+ VK_FORMAT_R12X4G12X4_UNORM_2PACK16,
+ VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16,
+ VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16,
+ VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16,
+ VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16,
+ VK_FORMAT_G16B16G16R16_422_UNORM,
+ VK_FORMAT_B16G16R16G16_422_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM,
+ VK_FORMAT_G16_B16R16_2PLANE_420_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM,
+ VK_FORMAT_G16_B16R16_2PLANE_422_UNORM,
+ VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM,
+ VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG,
+ VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG,
+ VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG,
+ VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG,
+};
+
+const char *RenderingDeviceVulkan::named_formats[RenderingDevice::DATA_FORMAT_MAX] = {
+ "R4G4_Unorm_Pack8",
+ "R4G4B4A4_Unorm_Pack16",
+ "B4G4R4A4_Unorm_Pack16",
+ "R5G6B5_Unorm_Pack16",
+ "B5G6R5_Unorm_Pack16",
+ "R5G5B5A1_Unorm_Pack16",
+ "B5G5R5A1_Unorm_Pack16",
+ "A1R5G5B5_Unorm_Pack16",
+ "R8_Unorm",
+ "R8_Snorm",
+ "R8_Uscaled",
+ "R8_Sscaled",
+ "R8_Uint",
+ "R8_Sint",
+ "R8_Srgb",
+ "R8G8_Unorm",
+ "R8G8_Snorm",
+ "R8G8_Uscaled",
+ "R8G8_Sscaled",
+ "R8G8_Uint",
+ "R8G8_Sint",
+ "R8G8_Srgb",
+ "R8G8B8_Unorm",
+ "R8G8B8_Snorm",
+ "R8G8B8_Uscaled",
+ "R8G8B8_Sscaled",
+ "R8G8B8_Uint",
+ "R8G8B8_Sint",
+ "R8G8B8_Srgb",
+ "B8G8R8_Unorm",
+ "B8G8R8_Snorm",
+ "B8G8R8_Uscaled",
+ "B8G8R8_Sscaled",
+ "B8G8R8_Uint",
+ "B8G8R8_Sint",
+ "B8G8R8_Srgb",
+ "R8G8B8A8_Unorm",
+ "R8G8B8A8_Snorm",
+ "R8G8B8A8_Uscaled",
+ "R8G8B8A8_Sscaled",
+ "R8G8B8A8_Uint",
+ "R8G8B8A8_Sint",
+ "R8G8B8A8_Srgb",
+ "B8G8R8A8_Unorm",
+ "B8G8R8A8_Snorm",
+ "B8G8R8A8_Uscaled",
+ "B8G8R8A8_Sscaled",
+ "B8G8R8A8_Uint",
+ "B8G8R8A8_Sint",
+ "B8G8R8A8_Srgb",
+ "A8B8G8R8_Unorm_Pack32",
+ "A8B8G8R8_Snorm_Pack32",
+ "A8B8G8R8_Uscaled_Pack32",
+ "A8B8G8R8_Sscaled_Pack32",
+ "A8B8G8R8_Uint_Pack32",
+ "A8B8G8R8_Sint_Pack32",
+ "A8B8G8R8_Srgb_Pack32",
+ "A2R10G10B10_Unorm_Pack32",
+ "A2R10G10B10_Snorm_Pack32",
+ "A2R10G10B10_Uscaled_Pack32",
+ "A2R10G10B10_Sscaled_Pack32",
+ "A2R10G10B10_Uint_Pack32",
+ "A2R10G10B10_Sint_Pack32",
+ "A2B10G10R10_Unorm_Pack32",
+ "A2B10G10R10_Snorm_Pack32",
+ "A2B10G10R10_Uscaled_Pack32",
+ "A2B10G10R10_Sscaled_Pack32",
+ "A2B10G10R10_Uint_Pack32",
+ "A2B10G10R10_Sint_Pack32",
+ "R16_Unorm",
+ "R16_Snorm",
+ "R16_Uscaled",
+ "R16_Sscaled",
+ "R16_Uint",
+ "R16_Sint",
+ "R16_Sfloat",
+ "R16G16_Unorm",
+ "R16G16_Snorm",
+ "R16G16_Uscaled",
+ "R16G16_Sscaled",
+ "R16G16_Uint",
+ "R16G16_Sint",
+ "R16G16_Sfloat",
+ "R16G16B16_Unorm",
+ "R16G16B16_Snorm",
+ "R16G16B16_Uscaled",
+ "R16G16B16_Sscaled",
+ "R16G16B16_Uint",
+ "R16G16B16_Sint",
+ "R16G16B16_Sfloat",
+ "R16G16B16A16_Unorm",
+ "R16G16B16A16_Snorm",
+ "R16G16B16A16_Uscaled",
+ "R16G16B16A16_Sscaled",
+ "R16G16B16A16_Uint",
+ "R16G16B16A16_Sint",
+ "R16G16B16A16_Sfloat",
+ "R32_Uint",
+ "R32_Sint",
+ "R32_Sfloat",
+ "R32G32_Uint",
+ "R32G32_Sint",
+ "R32G32_Sfloat",
+ "R32G32B32_Uint",
+ "R32G32B32_Sint",
+ "R32G32B32_Sfloat",
+ "R32G32B32A32_Uint",
+ "R32G32B32A32_Sint",
+ "R32G32B32A32_Sfloat",
+ "R64_Uint",
+ "R64_Sint",
+ "R64_Sfloat",
+ "R64G64_Uint",
+ "R64G64_Sint",
+ "R64G64_Sfloat",
+ "R64G64B64_Uint",
+ "R64G64B64_Sint",
+ "R64G64B64_Sfloat",
+ "R64G64B64A64_Uint",
+ "R64G64B64A64_Sint",
+ "R64G64B64A64_Sfloat",
+ "B10G11R11_Ufloat_Pack32",
+ "E5B9G9R9_Ufloat_Pack32",
+ "D16_Unorm",
+ "X8_D24_Unorm_Pack32",
+ "D32_Sfloat",
+ "S8_Uint",
+ "D16_Unorm_S8_Uint",
+ "D24_Unorm_S8_Uint",
+ "D32_Sfloat_S8_Uint",
+ "Bc1_Rgb_Unorm_Block",
+ "Bc1_Rgb_Srgb_Block",
+ "Bc1_Rgba_Unorm_Block",
+ "Bc1_Rgba_Srgb_Block",
+ "Bc2_Unorm_Block",
+ "Bc2_Srgb_Block",
+ "Bc3_Unorm_Block",
+ "Bc3_Srgb_Block",
+ "Bc4_Unorm_Block",
+ "Bc4_Snorm_Block",
+ "Bc5_Unorm_Block",
+ "Bc5_Snorm_Block",
+ "Bc6H_Ufloat_Block",
+ "Bc6H_Sfloat_Block",
+ "Bc7_Unorm_Block",
+ "Bc7_Srgb_Block",
+ "Etc2_R8G8B8_Unorm_Block",
+ "Etc2_R8G8B8_Srgb_Block",
+ "Etc2_R8G8B8A1_Unorm_Block",
+ "Etc2_R8G8B8A1_Srgb_Block",
+ "Etc2_R8G8B8A8_Unorm_Block",
+ "Etc2_R8G8B8A8_Srgb_Block",
+ "Eac_R11_Unorm_Block",
+ "Eac_R11_Snorm_Block",
+ "Eac_R11G11_Unorm_Block",
+ "Eac_R11G11_Snorm_Block",
+ "Astc_4X4_Unorm_Block",
+ "Astc_4X4_Srgb_Block",
+ "Astc_5X4_Unorm_Block",
+ "Astc_5X4_Srgb_Block",
+ "Astc_5X5_Unorm_Block",
+ "Astc_5X5_Srgb_Block",
+ "Astc_6X5_Unorm_Block",
+ "Astc_6X5_Srgb_Block",
+ "Astc_6X6_Unorm_Block",
+ "Astc_6X6_Srgb_Block",
+ "Astc_8X5_Unorm_Block",
+ "Astc_8X5_Srgb_Block",
+ "Astc_8X6_Unorm_Block",
+ "Astc_8X6_Srgb_Block",
+ "Astc_8X8_Unorm_Block",
+ "Astc_8X8_Srgb_Block",
+ "Astc_10X5_Unorm_Block",
+ "Astc_10X5_Srgb_Block",
+ "Astc_10X6_Unorm_Block",
+ "Astc_10X6_Srgb_Block",
+ "Astc_10X8_Unorm_Block",
+ "Astc_10X8_Srgb_Block",
+ "Astc_10X10_Unorm_Block",
+ "Astc_10X10_Srgb_Block",
+ "Astc_12X10_Unorm_Block",
+ "Astc_12X10_Srgb_Block",
+ "Astc_12X12_Unorm_Block",
+ "Astc_12X12_Srgb_Block",
+ "G8B8G8R8_422_Unorm",
+ "B8G8R8G8_422_Unorm",
+ "G8_B8_R8_3Plane_420_Unorm",
+ "G8_B8R8_2Plane_420_Unorm",
+ "G8_B8_R8_3Plane_422_Unorm",
+ "G8_B8R8_2Plane_422_Unorm",
+ "G8_B8_R8_3Plane_444_Unorm",
+ "R10X6_Unorm_Pack16",
+ "R10X6G10X6_Unorm_2Pack16",
+ "R10X6G10X6B10X6A10X6_Unorm_4Pack16",
+ "G10X6B10X6G10X6R10X6_422_Unorm_4Pack16",
+ "B10X6G10X6R10X6G10X6_422_Unorm_4Pack16",
+ "G10X6_B10X6_R10X6_3Plane_420_Unorm_3Pack16",
+ "G10X6_B10X6R10X6_2Plane_420_Unorm_3Pack16",
+ "G10X6_B10X6_R10X6_3Plane_422_Unorm_3Pack16",
+ "G10X6_B10X6R10X6_2Plane_422_Unorm_3Pack16",
+ "G10X6_B10X6_R10X6_3Plane_444_Unorm_3Pack16",
+ "R12X4_Unorm_Pack16",
+ "R12X4G12X4_Unorm_2Pack16",
+ "R12X4G12X4B12X4A12X4_Unorm_4Pack16",
+ "G12X4B12X4G12X4R12X4_422_Unorm_4Pack16",
+ "B12X4G12X4R12X4G12X4_422_Unorm_4Pack16",
+ "G12X4_B12X4_R12X4_3Plane_420_Unorm_3Pack16",
+ "G12X4_B12X4R12X4_2Plane_420_Unorm_3Pack16",
+ "G12X4_B12X4_R12X4_3Plane_422_Unorm_3Pack16",
+ "G12X4_B12X4R12X4_2Plane_422_Unorm_3Pack16",
+ "G12X4_B12X4_R12X4_3Plane_444_Unorm_3Pack16",
+ "G16B16G16R16_422_Unorm",
+ "B16G16R16G16_422_Unorm",
+ "G16_B16_R16_3Plane_420_Unorm",
+ "G16_B16R16_2Plane_420_Unorm",
+ "G16_B16_R16_3Plane_422_Unorm",
+ "G16_B16R16_2Plane_422_Unorm",
+ "G16_B16_R16_3Plane_444_Unorm",
+ "Pvrtc1_2Bpp_Unorm_Block_Img",
+ "Pvrtc1_4Bpp_Unorm_Block_Img",
+ "Pvrtc2_2Bpp_Unorm_Block_Img",
+ "Pvrtc2_4Bpp_Unorm_Block_Img",
+ "Pvrtc1_2Bpp_Srgb_Block_Img",
+ "Pvrtc1_4Bpp_Srgb_Block_Img",
+ "Pvrtc2_2Bpp_Srgb_Block_Img",
+ "Pvrtc2_4Bpp_Srgb_Block_Img"
+};
+
+int RenderingDeviceVulkan::get_format_vertex_size(DataFormat p_format) {
+ switch (p_format) {
+ case DATA_FORMAT_R8_UNORM:
+ case DATA_FORMAT_R8_SNORM:
+ case DATA_FORMAT_R8_UINT:
+ case DATA_FORMAT_R8_SINT:
+ case DATA_FORMAT_R8G8_UNORM:
+ case DATA_FORMAT_R8G8_SNORM:
+ case DATA_FORMAT_R8G8_UINT:
+ case DATA_FORMAT_R8G8_SINT:
+ case DATA_FORMAT_R8G8B8_UNORM:
+ case DATA_FORMAT_R8G8B8_SNORM:
+ case DATA_FORMAT_R8G8B8_UINT:
+ case DATA_FORMAT_R8G8B8_SINT:
+ case DATA_FORMAT_B8G8R8_UNORM:
+ case DATA_FORMAT_B8G8R8_SNORM:
+ case DATA_FORMAT_B8G8R8_UINT:
+ case DATA_FORMAT_B8G8R8_SINT:
+ case DATA_FORMAT_R8G8B8A8_UNORM:
+ case DATA_FORMAT_R8G8B8A8_SNORM:
+ case DATA_FORMAT_R8G8B8A8_UINT:
+ case DATA_FORMAT_R8G8B8A8_SINT:
+ case DATA_FORMAT_B8G8R8A8_UNORM:
+ case DATA_FORMAT_B8G8R8A8_SNORM:
+ case DATA_FORMAT_B8G8R8A8_UINT:
+ case DATA_FORMAT_B8G8R8A8_SINT: return 4;
+ case DATA_FORMAT_R16_UNORM:
+ case DATA_FORMAT_R16_SNORM:
+ case DATA_FORMAT_R16_UINT:
+ case DATA_FORMAT_R16_SINT:
+ case DATA_FORMAT_R16_SFLOAT: return 4;
+ case DATA_FORMAT_R16G16_UNORM:
+ case DATA_FORMAT_R16G16_SNORM:
+ case DATA_FORMAT_R16G16_UINT:
+ case DATA_FORMAT_R16G16_SINT:
+ case DATA_FORMAT_R16G16_SFLOAT: return 4;
+ case DATA_FORMAT_R16G16B16_UNORM:
+ case DATA_FORMAT_R16G16B16_SNORM:
+ case DATA_FORMAT_R16G16B16_UINT:
+ case DATA_FORMAT_R16G16B16_SINT:
+ case DATA_FORMAT_R16G16B16_SFLOAT: return 8;
+ case DATA_FORMAT_R16G16B16A16_UNORM:
+ case DATA_FORMAT_R16G16B16A16_SNORM:
+ case DATA_FORMAT_R16G16B16A16_UINT:
+ case DATA_FORMAT_R16G16B16A16_SINT:
+ case DATA_FORMAT_R16G16B16A16_SFLOAT: return 8;
+ case DATA_FORMAT_R32_UINT:
+ case DATA_FORMAT_R32_SINT:
+ case DATA_FORMAT_R32_SFLOAT: return 4;
+ case DATA_FORMAT_R32G32_UINT:
+ case DATA_FORMAT_R32G32_SINT:
+ case DATA_FORMAT_R32G32_SFLOAT: return 8;
+ case DATA_FORMAT_R32G32B32_UINT:
+ case DATA_FORMAT_R32G32B32_SINT:
+ case DATA_FORMAT_R32G32B32_SFLOAT: return 12;
+ case DATA_FORMAT_R32G32B32A32_UINT:
+ case DATA_FORMAT_R32G32B32A32_SINT:
+ case DATA_FORMAT_R32G32B32A32_SFLOAT: return 16;
+ case DATA_FORMAT_R64_UINT:
+ case DATA_FORMAT_R64_SINT:
+ case DATA_FORMAT_R64_SFLOAT: return 8;
+ case DATA_FORMAT_R64G64_UINT:
+ case DATA_FORMAT_R64G64_SINT:
+ case DATA_FORMAT_R64G64_SFLOAT: return 16;
+ case DATA_FORMAT_R64G64B64_UINT:
+ case DATA_FORMAT_R64G64B64_SINT:
+ case DATA_FORMAT_R64G64B64_SFLOAT: return 24;
+ case DATA_FORMAT_R64G64B64A64_UINT:
+ case DATA_FORMAT_R64G64B64A64_SINT:
+ case DATA_FORMAT_R64G64B64A64_SFLOAT: return 32;
+ default: return 0;
+ }
+}
+
+uint32_t RenderingDeviceVulkan::get_image_format_pixel_size(DataFormat p_format) {
+
+ switch (p_format) {
+
+ case DATA_FORMAT_R4G4_UNORM_PACK8: return 1;
+ case DATA_FORMAT_R4G4B4A4_UNORM_PACK16:
+ case DATA_FORMAT_B4G4R4A4_UNORM_PACK16:
+ case DATA_FORMAT_R5G6B5_UNORM_PACK16:
+ case DATA_FORMAT_B5G6R5_UNORM_PACK16:
+ case DATA_FORMAT_R5G5B5A1_UNORM_PACK16:
+ case DATA_FORMAT_B5G5R5A1_UNORM_PACK16:
+ case DATA_FORMAT_A1R5G5B5_UNORM_PACK16: return 2;
+ case DATA_FORMAT_R8_UNORM:
+ case DATA_FORMAT_R8_SNORM:
+ case DATA_FORMAT_R8_USCALED:
+ case DATA_FORMAT_R8_SSCALED:
+ case DATA_FORMAT_R8_UINT:
+ case DATA_FORMAT_R8_SINT:
+ case DATA_FORMAT_R8_SRGB: return 1;
+ case DATA_FORMAT_R8G8_UNORM:
+ case DATA_FORMAT_R8G8_SNORM:
+ case DATA_FORMAT_R8G8_USCALED:
+ case DATA_FORMAT_R8G8_SSCALED:
+ case DATA_FORMAT_R8G8_UINT:
+ case DATA_FORMAT_R8G8_SINT:
+ case DATA_FORMAT_R8G8_SRGB: return 2;
+ case DATA_FORMAT_R8G8B8_UNORM:
+ case DATA_FORMAT_R8G8B8_SNORM:
+ case DATA_FORMAT_R8G8B8_USCALED:
+ case DATA_FORMAT_R8G8B8_SSCALED:
+ case DATA_FORMAT_R8G8B8_UINT:
+ case DATA_FORMAT_R8G8B8_SINT:
+ case DATA_FORMAT_R8G8B8_SRGB:
+ case DATA_FORMAT_B8G8R8_UNORM:
+ case DATA_FORMAT_B8G8R8_SNORM:
+ case DATA_FORMAT_B8G8R8_USCALED:
+ case DATA_FORMAT_B8G8R8_SSCALED:
+ case DATA_FORMAT_B8G8R8_UINT:
+ case DATA_FORMAT_B8G8R8_SINT:
+ case DATA_FORMAT_B8G8R8_SRGB: return 3;
+ case DATA_FORMAT_R8G8B8A8_UNORM:
+ case DATA_FORMAT_R8G8B8A8_SNORM:
+ case DATA_FORMAT_R8G8B8A8_USCALED:
+ case DATA_FORMAT_R8G8B8A8_SSCALED:
+ case DATA_FORMAT_R8G8B8A8_UINT:
+ case DATA_FORMAT_R8G8B8A8_SINT:
+ case DATA_FORMAT_R8G8B8A8_SRGB:
+ case DATA_FORMAT_B8G8R8A8_UNORM:
+ case DATA_FORMAT_B8G8R8A8_SNORM:
+ case DATA_FORMAT_B8G8R8A8_USCALED:
+ case DATA_FORMAT_B8G8R8A8_SSCALED:
+ case DATA_FORMAT_B8G8R8A8_UINT:
+ case DATA_FORMAT_B8G8R8A8_SINT:
+ case DATA_FORMAT_B8G8R8A8_SRGB: return 4;
+ case DATA_FORMAT_A8B8G8R8_UNORM_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SNORM_PACK32:
+ case DATA_FORMAT_A8B8G8R8_USCALED_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SSCALED_PACK32:
+ case DATA_FORMAT_A8B8G8R8_UINT_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SINT_PACK32:
+ case DATA_FORMAT_A8B8G8R8_SRGB_PACK32:
+ case DATA_FORMAT_A2R10G10B10_UNORM_PACK32:
+ case DATA_FORMAT_A2R10G10B10_SNORM_PACK32:
+ case DATA_FORMAT_A2R10G10B10_USCALED_PACK32:
+ case DATA_FORMAT_A2R10G10B10_SSCALED_PACK32:
+ case DATA_FORMAT_A2R10G10B10_UINT_PACK32:
+ case DATA_FORMAT_A2R10G10B10_SINT_PACK32:
+ case DATA_FORMAT_A2B10G10R10_UNORM_PACK32:
+ case DATA_FORMAT_A2B10G10R10_SNORM_PACK32:
+ case DATA_FORMAT_A2B10G10R10_USCALED_PACK32:
+ case DATA_FORMAT_A2B10G10R10_SSCALED_PACK32:
+ case DATA_FORMAT_A2B10G10R10_UINT_PACK32:
+ case DATA_FORMAT_A2B10G10R10_SINT_PACK32: return 4;
+ case DATA_FORMAT_R16_UNORM:
+ case DATA_FORMAT_R16_SNORM:
+ case DATA_FORMAT_R16_USCALED:
+ case DATA_FORMAT_R16_SSCALED:
+ case DATA_FORMAT_R16_UINT:
+ case DATA_FORMAT_R16_SINT:
+ case DATA_FORMAT_R16_SFLOAT: return 2;
+ case DATA_FORMAT_R16G16_UNORM:
+ case DATA_FORMAT_R16G16_SNORM:
+ case DATA_FORMAT_R16G16_USCALED:
+ case DATA_FORMAT_R16G16_SSCALED:
+ case DATA_FORMAT_R16G16_UINT:
+ case DATA_FORMAT_R16G16_SINT:
+ case DATA_FORMAT_R16G16_SFLOAT: return 4;
+ case DATA_FORMAT_R16G16B16_UNORM:
+ case DATA_FORMAT_R16G16B16_SNORM:
+ case DATA_FORMAT_R16G16B16_USCALED:
+ case DATA_FORMAT_R16G16B16_SSCALED:
+ case DATA_FORMAT_R16G16B16_UINT:
+ case DATA_FORMAT_R16G16B16_SINT:
+ case DATA_FORMAT_R16G16B16_SFLOAT: return 6;
+ case DATA_FORMAT_R16G16B16A16_UNORM:
+ case DATA_FORMAT_R16G16B16A16_SNORM:
+ case DATA_FORMAT_R16G16B16A16_USCALED:
+ case DATA_FORMAT_R16G16B16A16_SSCALED:
+ case DATA_FORMAT_R16G16B16A16_UINT:
+ case DATA_FORMAT_R16G16B16A16_SINT:
+ case DATA_FORMAT_R16G16B16A16_SFLOAT: return 8;
+ case DATA_FORMAT_R32_UINT:
+ case DATA_FORMAT_R32_SINT:
+ case DATA_FORMAT_R32_SFLOAT: return 4;
+ case DATA_FORMAT_R32G32_UINT:
+ case DATA_FORMAT_R32G32_SINT:
+ case DATA_FORMAT_R32G32_SFLOAT: return 8;
+ case DATA_FORMAT_R32G32B32_UINT:
+ case DATA_FORMAT_R32G32B32_SINT:
+ case DATA_FORMAT_R32G32B32_SFLOAT: return 12;
+ case DATA_FORMAT_R32G32B32A32_UINT:
+ case DATA_FORMAT_R32G32B32A32_SINT:
+ case DATA_FORMAT_R32G32B32A32_SFLOAT: return 16;
+ case DATA_FORMAT_R64_UINT:
+ case DATA_FORMAT_R64_SINT:
+ case DATA_FORMAT_R64_SFLOAT: return 8;
+ case DATA_FORMAT_R64G64_UINT:
+ case DATA_FORMAT_R64G64_SINT:
+ case DATA_FORMAT_R64G64_SFLOAT: return 16;
+ case DATA_FORMAT_R64G64B64_UINT:
+ case DATA_FORMAT_R64G64B64_SINT:
+ case DATA_FORMAT_R64G64B64_SFLOAT: return 24;
+ case DATA_FORMAT_R64G64B64A64_UINT:
+ case DATA_FORMAT_R64G64B64A64_SINT:
+ case DATA_FORMAT_R64G64B64A64_SFLOAT: return 32;
+ case DATA_FORMAT_B10G11R11_UFLOAT_PACK32:
+ case DATA_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4;
+ case DATA_FORMAT_D16_UNORM: return 2;
+ case DATA_FORMAT_X8_D24_UNORM_PACK32: return 4;
+ case DATA_FORMAT_D32_SFLOAT: return 4;
+ case DATA_FORMAT_S8_UINT: return 1;
+ case DATA_FORMAT_D16_UNORM_S8_UINT: return 4;
+ case DATA_FORMAT_D24_UNORM_S8_UINT: return 4;
+ case DATA_FORMAT_D32_SFLOAT_S8_UINT:
+ return 5; //?
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ case DATA_FORMAT_BC2_UNORM_BLOCK:
+ case DATA_FORMAT_BC2_SRGB_BLOCK:
+ case DATA_FORMAT_BC3_UNORM_BLOCK:
+ case DATA_FORMAT_BC3_SRGB_BLOCK:
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK:
+ case DATA_FORMAT_BC5_UNORM_BLOCK:
+ case DATA_FORMAT_BC5_SNORM_BLOCK:
+ case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
+ case DATA_FORMAT_BC6H_SFLOAT_BLOCK:
+ case DATA_FORMAT_BC7_UNORM_BLOCK:
+ case DATA_FORMAT_BC7_SRGB_BLOCK: return 1;
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return 1;
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK: return 1;
+ case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK: return 1;
+ case DATA_FORMAT_G8B8G8R8_422_UNORM:
+ case DATA_FORMAT_B8G8R8G8_422_UNORM: return 4;
+ case DATA_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+ case DATA_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+ case DATA_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
+ case DATA_FORMAT_G8_B8R8_2PLANE_422_UNORM:
+ case DATA_FORMAT_G8_B8_R8_3PLANE_444_UNORM: return 4;
+ case DATA_FORMAT_R10X6_UNORM_PACK16:
+ case DATA_FORMAT_R10X6G10X6_UNORM_2PACK16:
+ case DATA_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:
+ case DATA_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:
+ case DATA_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:
+ case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
+ case DATA_FORMAT_R12X4_UNORM_PACK16:
+ case DATA_FORMAT_R12X4G12X4_UNORM_2PACK16:
+ case DATA_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:
+ case DATA_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:
+ case DATA_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:
+ case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
+ case DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16: return 2;
+ case DATA_FORMAT_G16B16G16R16_422_UNORM:
+ case DATA_FORMAT_B16G16R16G16_422_UNORM:
+ case DATA_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
+ case DATA_FORMAT_G16_B16R16_2PLANE_420_UNORM:
+ case DATA_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
+ case DATA_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+ case DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM: return 8;
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return 1;
+ default: {
+ ERR_PRINT("Format not handled, bug");
+ }
+ }
+
+ return 1;
+}
+
+// https://www.khronos.org/registry/DataFormat/specs/1.1/dataformat.1.1.pdf
+
+void RenderingDeviceVulkan::get_compressed_image_format_block_dimensions(DataFormat p_format, uint32_t &r_w, uint32_t &r_h) {
+
+ switch (p_format) {
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ case DATA_FORMAT_BC2_UNORM_BLOCK:
+ case DATA_FORMAT_BC2_SRGB_BLOCK:
+ case DATA_FORMAT_BC3_UNORM_BLOCK:
+ case DATA_FORMAT_BC3_SRGB_BLOCK:
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK:
+ case DATA_FORMAT_BC5_UNORM_BLOCK:
+ case DATA_FORMAT_BC5_SNORM_BLOCK:
+ case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
+ case DATA_FORMAT_BC6H_SFLOAT_BLOCK:
+ case DATA_FORMAT_BC7_UNORM_BLOCK:
+ case DATA_FORMAT_BC7_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK:
+ case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: //again, not sure about astc
+ case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ r_w = 4;
+ r_h = 4;
+ return;
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
+ r_w = 4;
+ r_h = 4;
+ return;
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
+ r_w = 8;
+ r_h = 4;
+ return;
+ default: {
+ r_w = 1;
+ r_h = 1;
+ }
+ }
+}
+
+uint32_t RenderingDeviceVulkan::get_compressed_image_format_block_byte_size(DataFormat p_format) {
+
+ switch (p_format) {
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK: return 8;
+ case DATA_FORMAT_BC2_UNORM_BLOCK:
+ case DATA_FORMAT_BC2_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_BC3_UNORM_BLOCK:
+ case DATA_FORMAT_BC3_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK: return 8;
+ case DATA_FORMAT_BC5_UNORM_BLOCK:
+ case DATA_FORMAT_BC5_SNORM_BLOCK: return 16;
+ case DATA_FORMAT_BC6H_UFLOAT_BLOCK:
+ case DATA_FORMAT_BC6H_SFLOAT_BLOCK: return 16;
+ case DATA_FORMAT_BC7_UNORM_BLOCK:
+ case DATA_FORMAT_BC7_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return 8;
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return 8;
+ case DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return 16;
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK: return 8;
+ case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK: return 16;
+ case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: //again, not sure about astc
+ case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_6x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_10x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK:
+ return 8; //wrong
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
+ return 8; //what varies is resolution
+ default: {
+ }
+ }
+ return 1;
+}
+
+uint32_t RenderingDeviceVulkan::get_compressed_image_format_pixel_rshift(DataFormat p_format) {
+
+ switch (p_format) {
+ case DATA_FORMAT_BC1_RGB_UNORM_BLOCK: //these formats are half byte size, so rshift is 1
+ case DATA_FORMAT_BC1_RGB_SRGB_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_UNORM_BLOCK:
+ case DATA_FORMAT_BC1_RGBA_SRGB_BLOCK:
+ case DATA_FORMAT_BC4_UNORM_BLOCK:
+ case DATA_FORMAT_BC4_SNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+ case DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+ case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
+ case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
+ case DATA_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return 1;
+ case DATA_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG: //these formats are quarter byte size, so rshift is 1
+ case DATA_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
+ case DATA_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: return 2;
+ default: {
+ }
+ }
+
+ return 0;
+}
+
+bool RenderingDeviceVulkan::format_has_stencil(DataFormat p_format) {
+ switch (p_format) {
+ case DATA_FORMAT_S8_UINT:
+ case DATA_FORMAT_D16_UNORM_S8_UINT:
+ case DATA_FORMAT_D24_UNORM_S8_UINT:
+ case DATA_FORMAT_D32_SFLOAT_S8_UINT: {
+ return true;
+ }
+ default: {
+ }
+ }
+ return false;
+}
+
+uint32_t RenderingDeviceVulkan::get_image_format_required_size(DataFormat p_format, uint32_t p_width, uint32_t p_height, uint32_t p_depth, uint32_t p_mipmaps, uint32_t *r_blockw, uint32_t *r_blockh, uint32_t *r_depth) {
+
+ ERR_FAIL_COND_V(p_mipmaps == 0, 0);
+ uint32_t w = p_width;
+ uint32_t h = p_height;
+ uint32_t d = p_depth;
+
+ uint32_t size = 0;
+
+ uint32_t pixel_size = get_image_format_pixel_size(p_format);
+ uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(p_format);
+ uint32_t blockw, blockh;
+ get_compressed_image_format_block_dimensions(p_format, blockw, blockh);
+
+ for (uint32_t i = 0; i < p_mipmaps; i++) {
+ uint32_t bw = w % blockw != 0 ? w + (blockw - w % blockw) : w;
+ uint32_t bh = h % blockh != 0 ? h + (blockh - h % blockh) : h;
+
+ uint32_t s = bw * bh;
+
+ s *= pixel_size;
+ s >>= pixel_rshift;
+ size += s * d;
+ if (r_blockw) {
+ *r_blockw = bw;
+ }
+ if (r_blockh) {
+ *r_blockh = bh;
+ }
+ if (r_depth) {
+ *r_depth = d;
+ }
+ w = MAX(blockw, w >> 1);
+ h = MAX(blockh, h >> 1);
+ d = MAX(1, d >> 1);
+ }
+
+ return size;
+}
+
+uint32_t RenderingDeviceVulkan::get_image_required_mipmaps(uint32_t p_width, uint32_t p_height, uint32_t p_depth) {
+
+ //formats and block size don't really matter here since they can all go down to 1px (even if block is larger)
+ int w = p_width;
+ int h = p_height;
+ int d = p_depth;
+
+ int mipmaps = 1;
+
+ while (true) {
+
+ if (w == 1 && h == 1 && d == 1) {
+ break;
+ }
+
+ w = MAX(1, w >> 1);
+ h = MAX(1, h >> 1);
+ d = MAX(1, d >> 1);
+
+ mipmaps++;
+ };
+
+ return mipmaps;
+}
+
+///////////////////////
+
+const VkCompareOp RenderingDeviceVulkan::compare_operators[RenderingDevice::COMPARE_OP_MAX] = {
+ VK_COMPARE_OP_NEVER,
+ VK_COMPARE_OP_LESS,
+ VK_COMPARE_OP_EQUAL,
+ VK_COMPARE_OP_LESS_OR_EQUAL,
+ VK_COMPARE_OP_GREATER,
+ VK_COMPARE_OP_NOT_EQUAL,
+ VK_COMPARE_OP_GREATER_OR_EQUAL,
+ VK_COMPARE_OP_ALWAYS
+};
+
+const VkStencilOp RenderingDeviceVulkan::stencil_operations[RenderingDevice::STENCIL_OP_MAX] = {
+ VK_STENCIL_OP_KEEP,
+ VK_STENCIL_OP_ZERO,
+ VK_STENCIL_OP_REPLACE,
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP,
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP,
+ VK_STENCIL_OP_INVERT,
+ VK_STENCIL_OP_INCREMENT_AND_WRAP,
+ VK_STENCIL_OP_DECREMENT_AND_WRAP
+};
+
+const VkSampleCountFlagBits RenderingDeviceVulkan::rasterization_sample_count[RenderingDevice::TEXTURE_SAMPLES_MAX] = {
+ VK_SAMPLE_COUNT_1_BIT,
+ VK_SAMPLE_COUNT_2_BIT,
+ VK_SAMPLE_COUNT_4_BIT,
+ VK_SAMPLE_COUNT_8_BIT,
+ VK_SAMPLE_COUNT_16_BIT,
+ VK_SAMPLE_COUNT_32_BIT,
+ VK_SAMPLE_COUNT_64_BIT,
+};
+
+const VkLogicOp RenderingDeviceVulkan::logic_operations[RenderingDevice::LOGIC_OP_MAX] = {
+ VK_LOGIC_OP_CLEAR,
+ VK_LOGIC_OP_AND,
+ VK_LOGIC_OP_AND_REVERSE,
+ VK_LOGIC_OP_COPY,
+ VK_LOGIC_OP_AND_INVERTED,
+ VK_LOGIC_OP_NO_OP,
+ VK_LOGIC_OP_XOR,
+ VK_LOGIC_OP_OR,
+ VK_LOGIC_OP_NOR,
+ VK_LOGIC_OP_EQUIVALENT,
+ VK_LOGIC_OP_INVERT,
+ VK_LOGIC_OP_OR_REVERSE,
+ VK_LOGIC_OP_COPY_INVERTED,
+ VK_LOGIC_OP_OR_INVERTED,
+ VK_LOGIC_OP_NAND,
+ VK_LOGIC_OP_SET
+};
+
+const VkBlendFactor RenderingDeviceVulkan::blend_factors[RenderingDevice::BLEND_FACTOR_MAX] = {
+ VK_BLEND_FACTOR_ZERO,
+ VK_BLEND_FACTOR_ONE,
+ VK_BLEND_FACTOR_SRC_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR,
+ VK_BLEND_FACTOR_DST_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR,
+ VK_BLEND_FACTOR_SRC_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
+ VK_BLEND_FACTOR_DST_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA,
+ VK_BLEND_FACTOR_CONSTANT_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,
+ VK_BLEND_FACTOR_CONSTANT_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA,
+ VK_BLEND_FACTOR_SRC_ALPHA_SATURATE,
+ VK_BLEND_FACTOR_SRC1_COLOR,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR,
+ VK_BLEND_FACTOR_SRC1_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA
+};
+const VkBlendOp RenderingDeviceVulkan::blend_operations[RenderingDevice::BLEND_OP_MAX] = {
+ VK_BLEND_OP_ADD,
+ VK_BLEND_OP_SUBTRACT,
+ VK_BLEND_OP_REVERSE_SUBTRACT,
+ VK_BLEND_OP_MIN,
+ VK_BLEND_OP_MAX
+};
+
+const VkSamplerAddressMode RenderingDeviceVulkan::address_modes[RenderingDevice::SAMPLER_REPEAT_MODE_MAX] = {
+ VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
+ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE
+};
+
+const VkBorderColor RenderingDeviceVulkan::sampler_border_colors[RenderingDevice::SAMPLER_BORDER_COLOR_MAX] = {
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
+ VK_BORDER_COLOR_INT_OPAQUE_BLACK,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
+ VK_BORDER_COLOR_INT_OPAQUE_WHITE
+};
+
+const VkImageType RenderingDeviceVulkan::vulkan_image_type[RenderingDevice::TEXTURE_TYPE_MAX] = {
+ VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TYPE_3D,
+ VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TYPE_1D,
+ VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TYPE_2D
+};
+
+/***************************/
+/**** BUFFER MANAGEMENT ****/
+/***************************/
+
+Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mapping) {
+ VkBufferCreateInfo bufferInfo;
+ bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufferInfo.pNext = NULL;
+ bufferInfo.flags = 0;
+ bufferInfo.size = p_size;
+ bufferInfo.usage = p_usage;
+ bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufferInfo.queueFamilyIndexCount = 0;
+ bufferInfo.pQueueFamilyIndices = 0;
+
+ VmaAllocationCreateInfo allocInfo;
+ allocInfo.flags = 0;
+ allocInfo.usage = p_mapping;
+ allocInfo.requiredFlags = 0;
+ allocInfo.preferredFlags = 0;
+ allocInfo.memoryTypeBits = 0;
+ allocInfo.pool = NULL;
+ allocInfo.pUserData = NULL;
+
+ VkResult err = vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &p_buffer->buffer, &p_buffer->allocation, NULL);
+ ERR_FAIL_COND_V_MSG(err, ERR_CANT_CREATE, "Can't create buffer of size: " + itos(p_size));
+ p_buffer->size = p_size;
+ p_buffer->buffer_info.buffer = p_buffer->buffer;
+ p_buffer->buffer_info.offset = 0;
+ p_buffer->buffer_info.range = p_size;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_buffer_free(Buffer *p_buffer) {
+ ERR_FAIL_COND_V(p_buffer->size == 0, ERR_INVALID_PARAMETER);
+
+ vmaDestroyBuffer(allocator, p_buffer->buffer, p_buffer->allocation);
+ p_buffer->buffer = NULL;
+ p_buffer->allocation = NULL;
+ p_buffer->size = 0;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_insert_staging_block() {
+
+ VkBufferCreateInfo bufferInfo;
+ bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufferInfo.pNext = NULL;
+ bufferInfo.flags = 0;
+ bufferInfo.size = staging_buffer_block_size;
+ bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufferInfo.queueFamilyIndexCount = 0;
+ bufferInfo.pQueueFamilyIndices = 0;
+
+ VmaAllocationCreateInfo allocInfo;
+ allocInfo.flags = 0;
+ allocInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
+ allocInfo.requiredFlags = 0;
+ allocInfo.preferredFlags = 0;
+ allocInfo.memoryTypeBits = 0;
+ allocInfo.pool = NULL;
+ allocInfo.pUserData = NULL;
+
+ StagingBufferBlock block;
+
+ VkResult err = vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &block.buffer, &block.allocation, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ block.frame_used = 0;
+ block.fill_amount = 0;
+
+ staging_buffer_blocks.insert(staging_buffer_current, block);
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment, bool p_on_draw_command_buffer) {
+ //determine a block to use
+
+ r_alloc_size = p_amount;
+
+ while (true) {
+
+ r_alloc_offset = 0;
+
+ //see if we can use current block
+ if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) {
+ //we used this block this frame, let's see if there is still room
+
+ uint32_t write_from = staging_buffer_blocks[staging_buffer_current].fill_amount;
+
+ {
+ uint32_t align_remainder = write_from % p_required_align;
+ if (align_remainder != 0) {
+ write_from += p_required_align - align_remainder;
+ }
+ }
+
+ int32_t available_bytes = int32_t(staging_buffer_block_size) - int32_t(write_from);
+
+ if ((int32_t)p_amount < available_bytes) {
+ //all is good, we should be ok, all will fit
+ r_alloc_offset = write_from;
+ } else if (p_can_segment && available_bytes >= (int32_t)p_required_align) {
+ //ok all won't fit but at least we can fit a chunkie
+ //all is good, update what needs to be written to
+ r_alloc_offset = write_from;
+ r_alloc_size = available_bytes - (available_bytes % p_required_align);
+
+ } else {
+ //can't fit it into this buffer.
+ //will need to try next buffer
+
+ staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size();
+
+ // before doing anything, though, let's check that we didn't manage to fill all blocks
+ // possible in a single frame
+ if (staging_buffer_blocks[staging_buffer_current].frame_used == frames_drawn) {
+ //guess we did.. ok, let's see if we can insert a new block..
+ if (staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) {
+ //we can, so we are safe
+ Error err = _insert_staging_block();
+ if (err) {
+ return err;
+ }
+ //claim for this frame
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ } else {
+ // Ok, worst case scenario, all the staging buffers belong to this frame
+ // and this frame is not even done.
+ // If this is the main thread, it means the user is likely loading a lot of resources at once,
+ // otherwise, the thread should just be blocked until the next frame (currently unimplemented)
+
+ if (false) { //separate thread from render
+
+ //block_until_next_frame()
+ continue;
+ } else {
+
+ //flush EVERYTHING including setup commands. IF not immediate, also need to flush the draw commands
+ _flush(true);
+
+ //clear the whole staging buffer
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ staging_buffer_blocks.write[i].frame_used = 0;
+ staging_buffer_blocks.write[i].fill_amount = 0;
+ }
+ //claim current
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ }
+ }
+
+ } else {
+ //not from current frame, so continue and try again
+ continue;
+ }
+ }
+
+ } else if (staging_buffer_blocks[staging_buffer_current].frame_used <= frames_drawn - frame_count) {
+ //this is an old block, which was already processed, let's reuse
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ staging_buffer_blocks.write[staging_buffer_current].fill_amount = 0;
+ } else if (staging_buffer_blocks[staging_buffer_current].frame_used > frames_drawn - frame_count) {
+ //this block may still be in use, let's not touch it unless we have to, so.. can we create a new one?
+ if (staging_buffer_blocks.size() * staging_buffer_block_size < staging_buffer_max_size) {
+ //we are still allowed to create a new block, so let's do that and insert it for current pos
+ Error err = _insert_staging_block();
+ if (err) {
+ return err;
+ }
+ //claim for this frame
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ } else {
+ // oops, we are out of room and we can't create more.
+ // let's flush older frames.
+ // The logic here is that if a game is loading a lot of data from the main thread, it will need to be stalled anyway.
+ // If loading from a separate thread, we can block that thread until next frame when more room is made (not currently implemented, though).
+
+ if (false) {
+ //separate thread from render
+ //block_until_next_frame()
+ continue; //and try again
+ } else {
+
+ _flush(false);
+
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ //clear all blocks but the ones from this frame
+ int block_idx = (i + staging_buffer_current) % staging_buffer_blocks.size();
+ if (staging_buffer_blocks[block_idx].frame_used == frames_drawn) {
+ break; //ok, we reached something from this frame, abort
+ }
+
+ staging_buffer_blocks.write[block_idx].frame_used = 0;
+ staging_buffer_blocks.write[block_idx].fill_amount = 0;
+ }
+
+ //claim for current frame
+ staging_buffer_blocks.write[staging_buffer_current].frame_used = frames_drawn;
+ }
+ }
+ }
+
+ //all was good, break
+ break;
+ }
+
+ staging_buffer_used = true;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer, uint32_t p_required_align) {
+
+ //submitting may get chunked for various reasons, so convert this to a task
+ size_t to_submit = p_data_size;
+ size_t submit_from = 0;
+
+ while (to_submit > 0) {
+
+ uint32_t block_write_offset;
+ uint32_t block_write_amount;
+
+ Error err = _staging_buffer_allocate(MIN(to_submit, staging_buffer_block_size), p_required_align, block_write_offset, block_write_amount, p_use_draw_command_buffer);
+ if (err) {
+ return err;
+ }
+
+ //map staging buffer (It's CPU and coherent)
+
+ void *data_ptr = NULL;
+ {
+ VkResult vkerr = vmaMapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation, &data_ptr);
+ if (vkerr) {
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ }
+
+ //copy to staging buffer
+ copymem(((uint8_t *)data_ptr) + block_write_offset, p_data + submit_from, block_write_amount);
+
+ //unmap
+ vmaUnmapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation);
+ //insert a command to copy this
+
+ VkBufferCopy region;
+ region.srcOffset = block_write_offset;
+ region.dstOffset = submit_from + p_offset;
+ region.size = block_write_amount;
+
+ vkCmdCopyBuffer(p_use_draw_command_buffer ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, staging_buffer_blocks[staging_buffer_current].buffer, p_buffer->buffer, 1, &region);
+
+ staging_buffer_blocks.write[staging_buffer_current].fill_amount = block_write_offset + block_write_amount;
+
+ to_submit -= block_write_amount;
+ submit_from += block_write_amount;
+ }
+
+ return OK;
+}
+
+void RenderingDeviceVulkan::_memory_barrier(VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw) {
+
+ VkMemoryBarrier mem_barrier;
+ mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
+ mem_barrier.pNext = NULL;
+ mem_barrier.srcAccessMask = p_src_access;
+ mem_barrier.dstAccessMask = p_dst_sccess;
+
+ vkCmdPipelineBarrier(p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, p_src_stage_mask, p_dst_stage_mask, 0, 1, &mem_barrier, 0, NULL, 0, NULL);
+}
+
+void RenderingDeviceVulkan::_full_barrier(bool p_sync_with_draw) {
+ //used for debug
+ _memory_barrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_INDEX_READ_BIT |
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
+ VK_ACCESS_UNIFORM_READ_BIT |
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_SHADER_WRITE_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_INDEX_READ_BIT |
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
+ VK_ACCESS_UNIFORM_READ_BIT |
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_SHADER_WRITE_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT,
+ p_sync_with_draw);
+}
+
+void RenderingDeviceVulkan::_buffer_memory_barrier(VkBuffer buffer, uint64_t p_from, uint64_t p_size, VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw) {
+
+ VkBufferMemoryBarrier buffer_mem_barrier;
+ buffer_mem_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ buffer_mem_barrier.pNext = NULL;
+ buffer_mem_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ buffer_mem_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ buffer_mem_barrier.srcAccessMask = p_src_access;
+ buffer_mem_barrier.dstAccessMask = p_dst_sccess;
+ buffer_mem_barrier.buffer = buffer;
+ buffer_mem_barrier.offset = p_from;
+ buffer_mem_barrier.size = p_size;
+
+ vkCmdPipelineBarrier(p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, p_src_stage_mask, p_dst_stage_mask, 0, 0, NULL, 1, &buffer_mem_barrier, 0, NULL);
+}
+
+/*****************/
+/**** TEXTURE ****/
+/*****************/
+
+RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t> > &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ VkImageCreateInfo image_create_info;
+ image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ image_create_info.pNext = NULL;
+ image_create_info.flags = 0;
+
+ VkImageFormatListCreateInfoKHR format_list_create_info;
+ Vector<VkFormat> allowed_formats;
+
+ if (p_format.shareable_formats.size()) {
+ image_create_info.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+ for (int i = 0; i < p_format.shareable_formats.size(); i++) {
+ allowed_formats.push_back(vulkan_formats[p_format.shareable_formats[i]]);
+ }
+
+ format_list_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR;
+ format_list_create_info.pNext = NULL;
+ format_list_create_info.viewFormatCount = allowed_formats.size();
+ format_list_create_info.pViewFormats = allowed_formats.ptr();
+ image_create_info.pNext = &format_list_create_info;
+
+ ERR_FAIL_COND_V_MSG(p_format.shareable_formats.find(p_format.format) == -1, RID(),
+ "If supplied a list of shareable formats, the current format must be present in the list");
+ ERR_FAIL_COND_V_MSG(p_view.format_override != DATA_FORMAT_MAX && p_format.shareable_formats.find(p_view.format_override) == -1, RID(),
+ "If supplied a list of shareable formats, the current view format override must be present in the list");
+ }
+ if (p_format.type == TEXTURE_TYPE_CUBE || p_format.type == TEXTURE_TYPE_CUBE_ARRAY) {
+ image_create_info.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ }
+ /*if (p_format.type == TEXTURE_TYPE_2D || p_format.type == TEXTURE_TYPE_2D_ARRAY) {
+ image_create_info.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
+ }*/
+
+ ERR_FAIL_INDEX_V(p_format.type, TEXTURE_TYPE_MAX, RID());
+
+ image_create_info.imageType = vulkan_image_type[p_format.type];
+
+ ERR_FAIL_COND_V_MSG(p_format.width < 1, RID(), "Width must be equal or greater than 1 for all textures");
+
+ image_create_info.format = vulkan_formats[p_format.format];
+
+ image_create_info.extent.width = p_format.width;
+ if (image_create_info.imageType == VK_IMAGE_TYPE_3D || image_create_info.imageType == VK_IMAGE_TYPE_2D) {
+ ERR_FAIL_COND_V_MSG(p_format.height < 1, RID(), "Height must be equal or greater than 1 for 2D and 3D textures");
+ image_create_info.extent.height = p_format.height;
+ } else {
+ image_create_info.extent.height = 1;
+ }
+
+ if (image_create_info.imageType == VK_IMAGE_TYPE_3D) {
+ ERR_FAIL_COND_V_MSG(p_format.depth < 1, RID(), "Depth must be equal or greater than 1 for 3D textures");
+ image_create_info.extent.depth = p_format.depth;
+ } else {
+ image_create_info.extent.depth = 1;
+ }
+
+ ERR_FAIL_COND_V(p_format.mipmaps < 1, RID());
+
+ image_create_info.mipLevels = p_format.mipmaps;
+
+ if (p_format.type == TEXTURE_TYPE_1D_ARRAY || p_format.type == TEXTURE_TYPE_2D_ARRAY || p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) {
+ ERR_FAIL_COND_V_MSG(p_format.array_layers < 1, RID(),
+ "Amount of layers must be equal or greater than 1 for arrays and cubemaps.");
+ ERR_FAIL_COND_V_MSG((p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) && (p_format.array_layers % 6) != 0, RID(),
+ "Cubemap and cubemap array textures must provide a layer number that is multiple of 6");
+ image_create_info.arrayLayers = p_format.array_layers;
+ } else {
+ image_create_info.arrayLayers = 1;
+ }
+
+ ERR_FAIL_INDEX_V(p_format.samples, TEXTURE_SAMPLES_MAX, RID());
+
+ image_create_info.samples = rasterization_sample_count[p_format.samples];
+ image_create_info.tiling = (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+
+ //usage
+ image_create_info.usage = 0;
+
+ if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+ if (p_format.usage_bits & TEXTURE_USAGE_CAN_COPY_FROM_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_CAN_COPY_TO_BIT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+
+ image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ image_create_info.queueFamilyIndexCount = 0;
+ image_create_info.pQueueFamilyIndices = NULL;
+ image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ uint32_t required_mipmaps = get_image_required_mipmaps(image_create_info.extent.width, image_create_info.extent.height, image_create_info.extent.depth);
+
+ ERR_FAIL_COND_V_MSG(required_mipmaps < image_create_info.mipLevels, RID(),
+ "Too many mipmaps requested for texture format and dimensions (" + itos(image_create_info.mipLevels) + "), maximum allowed: (" + itos(required_mipmaps) + ").");
+
+ if (p_data.size()) {
+
+ ERR_FAIL_COND_V_MSG(!(p_format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT), RID(),
+ "Texture needs the TEXTURE_USAGE_CAN_UPDATE_BIT usage flag in order to be updated at initialization or later");
+
+ int expected_images = image_create_info.arrayLayers;
+ ERR_FAIL_COND_V_MSG(p_data.size() != expected_images, RID(),
+ "Default supplied data for image format is of invalid length (" + itos(p_data.size()) + "), should be (" + itos(expected_images) + ").");
+
+ for (uint32_t i = 0; i < image_create_info.arrayLayers; i++) {
+ uint32_t required_size = get_image_format_required_size(p_format.format, image_create_info.extent.width, image_create_info.extent.height, image_create_info.extent.depth, image_create_info.mipLevels);
+ ERR_FAIL_COND_V_MSG((uint32_t)p_data[i].size() != required_size, RID(),
+ "Data for slice index " + itos(i) + " (mapped to layer " + itos(i) + ") differs in size (supplied: " + itos(p_data[i].size()) + ") than what is required by the format (" + itos(required_size) + ").");
+ }
+ }
+
+ {
+ //validate that this image is supported for the intended use
+ VkFormatProperties properties;
+ vkGetPhysicalDeviceFormatProperties(context->get_physical_device(), image_create_info.format, &properties);
+ VkFormatFeatureFlags flags;
+
+ String format_text = "'" + String(named_formats[p_format.format]) + "'";
+
+ if (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) {
+ flags = properties.linearTilingFeatures;
+ format_text += " (with CPU read bit)";
+ } else {
+ flags = properties.optimalTilingFeatures;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT && !(flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+ ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as sampling texture.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+ ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as color attachment.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ printf("vkformat: %x\n", image_create_info.format);
+ ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as depth-stencil attachment.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+ ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as storage image.");
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_ATOMIC_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)) {
+ ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as atomic storage image.");
+ }
+ }
+
+ //some view validation
+
+ if (p_view.format_override != DATA_FORMAT_MAX) {
+ ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
+ }
+ ERR_FAIL_INDEX_V(p_view.swizzle_r, TEXTURE_SWIZZLE_MAX, RID());
+ ERR_FAIL_INDEX_V(p_view.swizzle_g, TEXTURE_SWIZZLE_MAX, RID());
+ ERR_FAIL_INDEX_V(p_view.swizzle_b, TEXTURE_SWIZZLE_MAX, RID());
+ ERR_FAIL_INDEX_V(p_view.swizzle_a, TEXTURE_SWIZZLE_MAX, RID());
+
+ //allocate memory
+
+ VmaAllocationCreateInfo allocInfo;
+ allocInfo.flags = 0;
+ allocInfo.usage = p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT ? VMA_MEMORY_USAGE_CPU_ONLY : VMA_MEMORY_USAGE_GPU_ONLY;
+ allocInfo.requiredFlags = 0;
+ allocInfo.preferredFlags = 0;
+ allocInfo.memoryTypeBits = 0;
+ allocInfo.pool = NULL;
+ allocInfo.pUserData = NULL;
+
+ Texture texture;
+
+ VkResult err = vmaCreateImage(allocator, &image_create_info, &allocInfo, &texture.image, &texture.allocation, &texture.allocation_info);
+ ERR_FAIL_COND_V(err, RID());
+
+ texture.type = p_format.type;
+ texture.format = p_format.format;
+ texture.width = image_create_info.extent.width;
+ texture.height = image_create_info.extent.height;
+ texture.depth = image_create_info.extent.depth;
+ texture.layers = image_create_info.arrayLayers;
+ texture.mipmaps = image_create_info.mipLevels;
+ texture.usage_flags = p_format.usage_bits;
+ texture.samples = p_format.samples;
+ texture.allowed_shared_formats = p_format.shareable_formats;
+
+ //set base layout based on usage priority
+
+ if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) {
+ //first priority, readable
+ texture.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ } else if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT) {
+ //second priority, storage
+
+ texture.layout = VK_IMAGE_LAYOUT_GENERAL;
+
+ } else if (p_format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ //third priority, color or depth
+
+ texture.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ } else if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ texture.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+ } else {
+ texture.layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
+
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ texture.read_aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ texture.barrier_aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
+
+ if (format_has_stencil(p_format.format)) {
+ texture.barrier_aspect_mask |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+ } else {
+ texture.read_aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT;
+ texture.barrier_aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ texture.bound = false;
+
+ //create view
+
+ VkImageViewCreateInfo image_view_create_info;
+ image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ image_view_create_info.pNext = NULL;
+ image_view_create_info.flags = 0;
+ image_view_create_info.image = texture.image;
+
+ static const VkImageViewType view_types[TEXTURE_TYPE_MAX] = {
+ VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_3D,
+ VK_IMAGE_VIEW_TYPE_CUBE,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+ };
+
+ image_view_create_info.viewType = view_types[p_format.type];
+ if (p_view.format_override == DATA_FORMAT_MAX) {
+ image_view_create_info.format = image_create_info.format;
+ } else {
+ image_view_create_info.format = vulkan_formats[p_view.format_override];
+ }
+
+ static const VkComponentSwizzle component_swizzles[TEXTURE_SWIZZLE_MAX] = {
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_ZERO,
+ VK_COMPONENT_SWIZZLE_ONE,
+ VK_COMPONENT_SWIZZLE_R,
+ VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B,
+ VK_COMPONENT_SWIZZLE_A
+ };
+
+ image_view_create_info.components.r = component_swizzles[p_view.swizzle_r];
+ image_view_create_info.components.g = component_swizzles[p_view.swizzle_g];
+ image_view_create_info.components.b = component_swizzles[p_view.swizzle_b];
+ image_view_create_info.components.a = component_swizzles[p_view.swizzle_a];
+
+ image_view_create_info.subresourceRange.baseMipLevel = 0;
+ image_view_create_info.subresourceRange.levelCount = image_create_info.mipLevels;
+ image_view_create_info.subresourceRange.baseArrayLayer = 0;
+ image_view_create_info.subresourceRange.layerCount = image_create_info.arrayLayers;
+ if (p_format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ } else {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ err = vkCreateImageView(device, &image_view_create_info, NULL, &texture.view);
+
+ if (err) {
+ vmaDestroyImage(allocator, texture.image, texture.allocation);
+ ERR_FAIL_V(RID());
+ }
+
+ //barrier to set layout
+ {
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ image_memory_barrier.newLayout = texture.layout;
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = texture.image;
+ image_memory_barrier.subresourceRange.aspectMask = texture.barrier_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = image_create_info.mipLevels;
+ image_memory_barrier.subresourceRange.baseArrayLayer = 0;
+ image_memory_barrier.subresourceRange.layerCount = image_create_info.arrayLayers;
+
+ vkCmdPipelineBarrier(frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ RID id = texture_owner.make_rid(texture);
+
+ if (p_data.size()) {
+
+ for (uint32_t i = 0; i < image_create_info.arrayLayers; i++) {
+ texture_update(id, i, p_data[i]);
+ }
+ }
+ return id;
+}
+
+RID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, RID p_with_texture) {
+
+ _THREAD_SAFE_METHOD_
+
+ Texture *src_texture = texture_owner.getornull(p_with_texture);
+ ERR_FAIL_COND_V(!src_texture, RID());
+
+ if (src_texture->owner.is_valid()) { //ahh this is a share
+ p_with_texture = src_texture->owner;
+ src_texture = texture_owner.getornull(src_texture->owner);
+ ERR_FAIL_COND_V(!src_texture, RID()); //this is a bug
+ }
+
+ //create view
+
+ Texture texture = *src_texture;
+
+ VkImageViewCreateInfo image_view_create_info;
+ image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ image_view_create_info.pNext = NULL;
+ image_view_create_info.flags = 0;
+ image_view_create_info.image = texture.image;
+
+ static const VkImageViewType view_types[TEXTURE_TYPE_MAX] = {
+ VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_3D,
+ VK_IMAGE_VIEW_TYPE_CUBE,
+ VK_IMAGE_VIEW_TYPE_1D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_2D_ARRAY,
+ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
+ };
+
+ image_view_create_info.viewType = view_types[texture.type];
+ if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
+ image_view_create_info.format = vulkan_formats[texture.format];
+ } else {
+ ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
+
+ ERR_FAIL_COND_V_MSG(texture.allowed_shared_formats.find(p_view.format_override) == -1, RID(),
+ "Format override is not in the list of allowed shareable formats for original texture.");
+ image_view_create_info.format = vulkan_formats[p_view.format_override];
+ }
+
+ static const VkComponentSwizzle component_swizzles[TEXTURE_SWIZZLE_MAX] = {
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_ZERO,
+ VK_COMPONENT_SWIZZLE_ONE,
+ VK_COMPONENT_SWIZZLE_R,
+ VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B,
+ VK_COMPONENT_SWIZZLE_A
+ };
+
+ image_view_create_info.components.r = component_swizzles[p_view.swizzle_r];
+ image_view_create_info.components.g = component_swizzles[p_view.swizzle_g];
+ image_view_create_info.components.b = component_swizzles[p_view.swizzle_b];
+ image_view_create_info.components.a = component_swizzles[p_view.swizzle_a];
+
+ image_view_create_info.subresourceRange.baseMipLevel = 0;
+ image_view_create_info.subresourceRange.levelCount = texture.mipmaps;
+ image_view_create_info.subresourceRange.layerCount = texture.layers;
+ image_view_create_info.subresourceRange.baseArrayLayer = 0;
+
+ if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ } else {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ VkResult err = vkCreateImageView(device, &image_view_create_info, NULL, &texture.view);
+
+ if (err) {
+ ERR_FAIL_V(RID());
+ }
+
+ texture.owner = p_with_texture;
+ RID id = texture_owner.make_rid(texture);
+ _add_dependency(id, p_with_texture);
+
+ return id;
+}
+
+RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, TextureSliceType p_slice_type) {
+
+ _THREAD_SAFE_METHOD_
+
+ Texture *src_texture = texture_owner.getornull(p_with_texture);
+ ERR_FAIL_COND_V(!src_texture, RID());
+
+ if (src_texture->owner.is_valid()) { //ahh this is a share
+ p_with_texture = src_texture->owner;
+ src_texture = texture_owner.getornull(src_texture->owner);
+ ERR_FAIL_COND_V(!src_texture, RID()); //this is a bug
+ }
+
+ ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_CUBEMAP && (src_texture->type != TEXTURE_TYPE_CUBE && src_texture->type != TEXTURE_TYPE_CUBE_ARRAY), RID(),
+ "Can only create a cubemap slice from a cubemap or cubemap array mipmap");
+
+ ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_3D && src_texture->type != TEXTURE_TYPE_3D, RID(),
+ "Can only create a 3D slice from a 3D texture");
+
+ //create view
+
+ ERR_FAIL_UNSIGNED_INDEX_V(p_mipmap, src_texture->mipmaps, RID());
+ ERR_FAIL_UNSIGNED_INDEX_V(p_layer, src_texture->layers, RID());
+
+ Texture texture = *src_texture;
+ get_image_format_required_size(texture.format, texture.width, texture.height, texture.depth, p_mipmap + 1, &texture.width, &texture.height);
+ texture.mipmaps = 1;
+ texture.layers = p_slice_type == TEXTURE_SLICE_CUBEMAP ? 6 : 1;
+
+ VkImageViewCreateInfo image_view_create_info;
+ image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ image_view_create_info.pNext = NULL;
+ image_view_create_info.flags = 0;
+ image_view_create_info.image = texture.image;
+
+ static const VkImageViewType view_types[TEXTURE_TYPE_MAX] = {
+ VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_1D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ VK_IMAGE_VIEW_TYPE_2D,
+ };
+
+ image_view_create_info.viewType = p_slice_type == TEXTURE_SLICE_CUBEMAP ? VK_IMAGE_VIEW_TYPE_CUBE : (p_slice_type == TEXTURE_SLICE_3D ? VK_IMAGE_VIEW_TYPE_3D : view_types[texture.type]);
+ if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
+ image_view_create_info.format = vulkan_formats[texture.format];
+ } else {
+ ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
+
+ ERR_FAIL_COND_V_MSG(texture.allowed_shared_formats.find(p_view.format_override) == -1, RID(),
+ "Format override is not in the list of allowed shareable formats for original texture.");
+ image_view_create_info.format = vulkan_formats[p_view.format_override];
+ }
+
+ static const VkComponentSwizzle component_swizzles[TEXTURE_SWIZZLE_MAX] = {
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_ZERO,
+ VK_COMPONENT_SWIZZLE_ONE,
+ VK_COMPONENT_SWIZZLE_R,
+ VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B,
+ VK_COMPONENT_SWIZZLE_A
+ };
+
+ image_view_create_info.components.r = component_swizzles[p_view.swizzle_r];
+ image_view_create_info.components.g = component_swizzles[p_view.swizzle_g];
+ image_view_create_info.components.b = component_swizzles[p_view.swizzle_b];
+ image_view_create_info.components.a = component_swizzles[p_view.swizzle_a];
+
+ if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
+ ERR_FAIL_COND_V_MSG(p_layer >= src_texture->layers, RID(),
+ "Specified layer is invalid for cubemap");
+ ERR_FAIL_COND_V_MSG((p_layer % 6) != 0, RID(),
+ "Specified layer must be a multiple of 6.");
+ }
+ image_view_create_info.subresourceRange.baseMipLevel = p_mipmap;
+ image_view_create_info.subresourceRange.levelCount = 1;
+ image_view_create_info.subresourceRange.layerCount = p_slice_type == TEXTURE_SLICE_CUBEMAP ? 6 : 1;
+ image_view_create_info.subresourceRange.baseArrayLayer = p_layer;
+
+ if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ } else {
+ image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ VkResult err = vkCreateImageView(device, &image_view_create_info, NULL, &texture.view);
+
+ if (err) {
+ ERR_FAIL_V(RID());
+ }
+
+ texture.owner = p_with_texture;
+ RID id = texture_owner.make_rid(texture);
+ _add_dependency(id, p_with_texture);
+
+ return id;
+}
+
+Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, bool p_sync_with_draw) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER,
+ "Updating textures in 'sync to draw' mode is forbidden during creation of a draw list");
+
+ Texture *texture = texture_owner.getornull(p_texture);
+ ERR_FAIL_COND_V(!texture, ERR_INVALID_PARAMETER);
+
+ if (texture->owner != RID()) {
+ p_texture = texture->owner;
+ texture = texture_owner.getornull(texture->owner);
+ ERR_FAIL_COND_V(!texture, ERR_BUG); //this is a bug
+ }
+
+ ERR_FAIL_COND_V_MSG(texture->bound, ERR_CANT_ACQUIRE_RESOURCE,
+ "Texture can't be updated while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
+
+ ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT), ERR_INVALID_PARAMETER,
+ "Texture requires the TEXTURE_USAGE_CAN_UPDATE_BIT in order to be updatable.");
+
+ uint32_t layer_count = texture->layers;
+ if (texture->type == TEXTURE_TYPE_CUBE || texture->type == TEXTURE_TYPE_CUBE_ARRAY) {
+ layer_count *= 6;
+ }
+ ERR_FAIL_COND_V(p_layer >= layer_count, ERR_INVALID_PARAMETER);
+
+ uint32_t width, height;
+ uint32_t image_size = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, texture->mipmaps, &width, &height);
+ uint32_t required_size = image_size;
+ uint32_t required_align = get_compressed_image_format_block_byte_size(texture->format);
+ if (required_align == 1) {
+ required_align = get_image_format_pixel_size(texture->format);
+ }
+ if ((required_align % 4) != 0) { //alignment rules are really strange
+ required_align *= 4;
+ }
+
+ ERR_FAIL_COND_V_MSG(required_size != (uint32_t)p_data.size(), ERR_INVALID_PARAMETER,
+ "Required size for texture update (" + itos(required_size) + ") does not match data supplied size (" + itos(p_data.size()) + ").");
+
+ uint32_t region_size = texture_upload_region_size_px;
+
+ const uint8_t *r = p_data.ptr();
+
+ VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+
+ //barrier to transfer
+ {
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.oldLayout = texture->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = texture->image;
+ image_memory_barrier.subresourceRange.aspectMask = texture->barrier_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = texture->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ uint32_t mipmap_offset = 0;
+ for (uint32_t mm_i = 0; mm_i < texture->mipmaps; mm_i++) {
+
+ uint32_t depth;
+ uint32_t image_total = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, mm_i + 1, &width, &height, &depth);
+
+ const uint8_t *read_ptr_mipmap = r + mipmap_offset;
+ image_size = image_total - mipmap_offset;
+
+ for (uint32_t z = 0; z < depth; z++) { //for 3D textures, depth may be > 0
+
+ const uint8_t *read_ptr = read_ptr_mipmap + image_size * z / depth;
+
+ for (uint32_t x = 0; x < width; x += region_size) {
+ for (uint32_t y = 0; y < height; y += region_size) {
+
+ uint32_t region_w = MIN(region_size, width - x);
+ uint32_t region_h = MIN(region_size, height - y);
+
+ uint32_t pixel_size = get_image_format_pixel_size(texture->format);
+ uint32_t to_allocate = region_w * region_h * pixel_size;
+ to_allocate >>= get_compressed_image_format_pixel_rshift(texture->format);
+
+ uint32_t alloc_offset, alloc_size;
+ Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, false, p_sync_with_draw);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ uint8_t *write_ptr;
+
+ { //map
+ void *data_ptr = NULL;
+ VkResult vkerr = vmaMapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation, &data_ptr);
+ if (vkerr) {
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ write_ptr = (uint8_t *)data_ptr;
+ write_ptr += alloc_offset;
+ }
+
+ uint32_t block_w, block_h;
+ get_compressed_image_format_block_dimensions(texture->format, block_w, block_h);
+
+ ERR_FAIL_COND_V(region_w % block_w, ERR_BUG);
+ ERR_FAIL_COND_V(region_h % block_h, ERR_BUG);
+
+ if (block_w != 1 || block_h != 1) {
+ //compressed image (blocks)
+ //must copy a block region
+
+ uint32_t block_size = get_compressed_image_format_block_byte_size(texture->format);
+ //re-create current variables in blocky format
+ uint32_t xb = x / block_w;
+ uint32_t yb = y / block_h;
+ uint32_t wb = width / block_w;
+ //uint32_t hb = height / block_h;
+ uint32_t region_wb = region_w / block_w;
+ uint32_t region_hb = region_h / block_h;
+ for (uint32_t xr = 0; xr < region_wb; xr++) {
+ for (uint32_t yr = 0; yr < region_hb; yr++) {
+ uint32_t src_offset = ((yr + yb) * wb + xr + xb) * block_size;
+ uint32_t dst_offset = (yr * region_wb + xr) * block_size;
+ //copy block
+ for (uint32_t i = 0; i < block_size; i++) {
+ write_ptr[dst_offset + i] = read_ptr[src_offset + i];
+ }
+ }
+ }
+
+ } else {
+ //regular image (pixels)
+ //must copy a pixel region
+
+ for (uint32_t xr = 0; xr < region_w; xr++) {
+ for (uint32_t yr = 0; yr < region_h; yr++) {
+ uint32_t src_offset = ((yr + y) * width + xr + x) * pixel_size;
+ uint32_t dst_offset = (yr * region_w + xr) * pixel_size;
+ //copy block
+ for (uint32_t i = 0; i < pixel_size; i++) {
+
+ write_ptr[dst_offset + i] = read_ptr[src_offset + i];
+ }
+ }
+ }
+ }
+
+ { //unmap
+ vmaUnmapMemory(allocator, staging_buffer_blocks[staging_buffer_current].allocation);
+ }
+
+ VkBufferImageCopy buffer_image_copy;
+ buffer_image_copy.bufferOffset = alloc_offset;
+ buffer_image_copy.bufferRowLength = 0; //tigthly packed
+ buffer_image_copy.bufferImageHeight = 0; //tigthly packed
+
+ buffer_image_copy.imageSubresource.aspectMask = texture->read_aspect_mask;
+ buffer_image_copy.imageSubresource.mipLevel = mm_i;
+ buffer_image_copy.imageSubresource.baseArrayLayer = p_layer;
+ buffer_image_copy.imageSubresource.layerCount = 1;
+
+ buffer_image_copy.imageOffset.x = x;
+ buffer_image_copy.imageOffset.y = y;
+ buffer_image_copy.imageOffset.z = z;
+
+ buffer_image_copy.imageExtent.width = region_w;
+ buffer_image_copy.imageExtent.height = region_h;
+ buffer_image_copy.imageExtent.depth = 1;
+
+ vkCmdCopyBufferToImage(command_buffer, staging_buffer_blocks[staging_buffer_current].buffer, texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &buffer_image_copy);
+
+ staging_buffer_blocks.write[staging_buffer_current].fill_amount += alloc_size;
+ }
+ }
+ }
+
+ mipmap_offset = image_total;
+ }
+
+ //barrier to restore layout
+ {
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ image_memory_barrier.newLayout = texture->layout;
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = texture->image;
+ image_memory_barrier.subresourceRange.aspectMask = texture->barrier_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = texture->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ return OK;
+}
+
+Vector<uint8_t> RenderingDeviceVulkan::_texture_get_data_from_image(Texture *tex, VkImage p_image, VmaAllocation p_allocation, uint32_t p_layer, bool p_2d) {
+
+ uint32_t width, height, depth;
+ uint32_t image_size = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, tex->mipmaps, &width, &height, &depth);
+
+ Vector<uint8_t> image_data;
+ image_data.resize(image_size);
+
+ void *img_mem;
+ vmaMapMemory(allocator, p_allocation, &img_mem);
+
+ uint32_t blockw, blockh;
+ get_compressed_image_format_block_dimensions(tex->format, blockw, blockh);
+ uint32_t block_size = get_compressed_image_format_block_byte_size(tex->format);
+ uint32_t pixel_size = get_image_format_pixel_size(tex->format);
+
+ {
+ uint8_t *w = image_data.ptrw();
+
+ uint32_t mipmap_offset = 0;
+ for (uint32_t mm_i = 0; mm_i < tex->mipmaps; mm_i++) {
+
+ uint32_t image_total = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, mm_i + 1, &width, &height, &depth);
+
+ uint8_t *write_ptr_mipmap = w + mipmap_offset;
+ image_size = image_total - mipmap_offset;
+
+ VkImageSubresource image_sub_resorce;
+ image_sub_resorce.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ image_sub_resorce.arrayLayer = p_layer;
+ image_sub_resorce.mipLevel = mm_i;
+ VkSubresourceLayout layout;
+ vkGetImageSubresourceLayout(device, p_image, &image_sub_resorce, &layout);
+
+ for (uint32_t z = 0; z < depth; z++) {
+ uint8_t *write_ptr = write_ptr_mipmap + z * image_size / depth;
+ const uint8_t *slice_read_ptr = ((uint8_t *)img_mem) + layout.offset + z * layout.depthPitch;
+
+ if (block_size > 1) {
+ //compressed
+ uint32_t line_width = (block_size * (width / blockw));
+ for (uint32_t y = 0; y < height / blockh; y++) {
+ const uint8_t *rptr = slice_read_ptr + y * layout.rowPitch;
+ uint8_t *wptr = write_ptr + y * line_width;
+
+ copymem(wptr, rptr, line_width);
+ }
+
+ } else {
+ //uncompressed
+ for (uint32_t y = 0; y < height; y++) {
+ const uint8_t *rptr = slice_read_ptr + y * layout.rowPitch;
+ uint8_t *wptr = write_ptr + y * pixel_size * width;
+ copymem(wptr, rptr, pixel_size * width);
+ }
+ }
+ }
+
+ mipmap_offset = image_total;
+ }
+ }
+
+ vmaUnmapMemory(allocator, p_allocation);
+
+ return image_data;
+}
+
+Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t p_layer) {
+
+ _THREAD_SAFE_METHOD_
+
+ Texture *tex = texture_owner.getornull(p_texture);
+ ERR_FAIL_COND_V(!tex, Vector<uint8_t>());
+
+ ERR_FAIL_COND_V_MSG(tex->bound, Vector<uint8_t>(),
+ "Texture can't be retrieved while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
+ ERR_FAIL_COND_V_MSG(!(tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), Vector<uint8_t>(),
+ "Texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved.");
+
+ uint32_t layer_count = tex->layers;
+ if (tex->type == TEXTURE_TYPE_CUBE || tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
+ layer_count *= 6;
+ }
+ ERR_FAIL_COND_V(p_layer >= layer_count, Vector<uint8_t>());
+
+ if (tex->usage_flags & TEXTURE_USAGE_CPU_READ_BIT) {
+ //does not need anything fancy, map and read.
+ return _texture_get_data_from_image(tex, tex->image, tex->allocation, p_layer);
+ } else {
+
+ //compute total image size
+ uint32_t width, height, depth;
+ uint32_t buffer_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, tex->mipmaps, &width, &height, &depth);
+
+ //allocate buffer
+ VkCommandBuffer command_buffer = frames[frame].setup_command_buffer;
+ Buffer tmp_buffer;
+ _buffer_allocate(&tmp_buffer, buffer_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_CPU_ONLY);
+
+ { //Source image barrier
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ image_memory_barrier.oldLayout = tex->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = tex->barrier_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = tex->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ uint32_t computed_w = tex->width;
+ uint32_t computed_h = tex->height;
+ uint32_t computed_d = tex->depth;
+
+ uint32_t prev_size = 0;
+ uint32_t offset = 0;
+ for (uint32_t i = 0; i < tex->mipmaps; i++) {
+
+ VkBufferImageCopy buffer_image_copy;
+
+ uint32_t image_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, i + 1);
+ uint32_t size = image_size - prev_size;
+ prev_size = image_size;
+
+ buffer_image_copy.bufferOffset = offset;
+ buffer_image_copy.bufferImageHeight = 0;
+ buffer_image_copy.bufferRowLength = 0;
+ buffer_image_copy.imageSubresource.aspectMask = tex->read_aspect_mask;
+ buffer_image_copy.imageSubresource.baseArrayLayer = p_layer;
+ buffer_image_copy.imageSubresource.layerCount = 1;
+ buffer_image_copy.imageSubresource.mipLevel = i;
+ buffer_image_copy.imageOffset.x = 0;
+ buffer_image_copy.imageOffset.y = 0;
+ buffer_image_copy.imageOffset.z = 0;
+ buffer_image_copy.imageExtent.width = computed_w;
+ buffer_image_copy.imageExtent.height = computed_h;
+ buffer_image_copy.imageExtent.depth = computed_d;
+
+ vkCmdCopyImageToBuffer(command_buffer, tex->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, tmp_buffer.buffer, 1, &buffer_image_copy);
+
+ computed_w = MAX(1, computed_w >> 1);
+ computed_h = MAX(1, computed_h >> 1);
+ computed_d = MAX(1, computed_d >> 1);
+ offset += size;
+ }
+
+ { //restore src
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
+ image_memory_barrier.newLayout = tex->layout;
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = tex->barrier_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = tex->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ _flush(true);
+
+ void *buffer_mem;
+ VkResult vkerr = vmaMapMemory(allocator, tmp_buffer.allocation, &buffer_mem);
+ if (vkerr) {
+ ERR_FAIL_V(Vector<uint8_t>());
+ }
+
+ Vector<uint8_t> buffer_data;
+ {
+
+ buffer_data.resize(buffer_size);
+ uint8_t *w = buffer_data.ptrw();
+ copymem(w, buffer_mem, buffer_size);
+ }
+
+ vmaUnmapMemory(allocator, tmp_buffer.allocation);
+
+ _buffer_free(&tmp_buffer);
+
+ return buffer_data;
+ }
+}
+
+bool RenderingDeviceVulkan::texture_is_shared(RID p_texture) {
+ _THREAD_SAFE_METHOD_
+
+ Texture *tex = texture_owner.getornull(p_texture);
+ ERR_FAIL_COND_V(!tex, false);
+ return tex->owner.is_valid();
+}
+
+bool RenderingDeviceVulkan::texture_is_valid(RID p_texture) {
+ return texture_owner.owns(p_texture);
+}
+
+Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, bool p_sync_with_draw) {
+
+ _THREAD_SAFE_METHOD_
+
+ Texture *src_tex = texture_owner.getornull(p_from_texture);
+ ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER);
+
+ ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER,
+ "Source texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
+ ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
+ "Source texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved.");
+
+ uint32_t src_layer_count = src_tex->layers;
+ uint32_t src_width, src_height, src_depth;
+ get_image_format_required_size(src_tex->format, src_tex->width, src_tex->height, src_tex->depth, p_src_mipmap + 1, &src_width, &src_height, &src_depth);
+ if (src_tex->type == TEXTURE_TYPE_CUBE || src_tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
+ src_layer_count *= 6;
+ }
+
+ ERR_FAIL_COND_V(p_from.x < 0 || p_from.x + p_size.x > src_width, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_from.y < 0 || p_from.y + p_size.y > src_height, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_from.z < 0 || p_from.z + p_size.z > src_depth, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_src_mipmap >= src_tex->mipmaps, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_src_layer >= src_layer_count, ERR_INVALID_PARAMETER);
+
+ Texture *dst_tex = texture_owner.getornull(p_to_texture);
+ ERR_FAIL_COND_V(!dst_tex, ERR_INVALID_PARAMETER);
+
+ ERR_FAIL_COND_V_MSG(p_sync_with_draw && dst_tex->bound, ERR_INVALID_PARAMETER,
+ "Destination texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
+ ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
+ "Destination texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be retrieved.");
+
+ uint32_t dst_layer_count = dst_tex->layers;
+ uint32_t dst_width, dst_height, dst_depth;
+ get_image_format_required_size(dst_tex->format, dst_tex->width, dst_tex->height, dst_tex->depth, p_dst_mipmap + 1, &dst_width, &dst_height, &dst_depth);
+ if (dst_tex->type == TEXTURE_TYPE_CUBE || dst_tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
+ dst_layer_count *= 6;
+ }
+
+ ERR_FAIL_COND_V(p_to.x < 0 || p_to.x + p_size.x > dst_width, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_to.y < 0 || p_to.y + p_size.y > dst_height, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_to.z < 0 || p_to.z + p_size.z > dst_depth, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_dst_mipmap >= dst_tex->mipmaps, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_dst_layer >= dst_layer_count, ERR_INVALID_PARAMETER);
+
+ ERR_FAIL_COND_V_MSG(src_tex->read_aspect_mask != dst_tex->read_aspect_mask, ERR_INVALID_PARAMETER,
+ "Source and destination texture must be of the same type (color or depth).");
+
+ VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+
+ {
+
+ //PRE Copy the image
+
+ { //Source
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ image_memory_barrier.oldLayout = src_tex->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = src_tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = src_tex->barrier_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_src_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = 1;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+ { //Dest
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.oldLayout = dst_tex->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = dst_tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = dst_tex->read_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_dst_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = 1;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_dst_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ //COPY
+
+ {
+
+ VkImageCopy image_copy_region;
+ image_copy_region.srcSubresource.aspectMask = src_tex->read_aspect_mask;
+ image_copy_region.srcSubresource.baseArrayLayer = p_src_layer;
+ image_copy_region.srcSubresource.layerCount = 1;
+ image_copy_region.srcSubresource.mipLevel = p_src_mipmap;
+ image_copy_region.srcOffset.x = p_from.x;
+ image_copy_region.srcOffset.y = p_from.y;
+ image_copy_region.srcOffset.z = p_from.z;
+
+ image_copy_region.dstSubresource.aspectMask = dst_tex->read_aspect_mask;
+ image_copy_region.dstSubresource.baseArrayLayer = p_dst_layer;
+ image_copy_region.dstSubresource.layerCount = 1;
+ image_copy_region.dstSubresource.mipLevel = p_dst_mipmap;
+ image_copy_region.dstOffset.x = p_to.x;
+ image_copy_region.dstOffset.y = p_to.y;
+ image_copy_region.dstOffset.z = p_to.z;
+
+ image_copy_region.extent.width = p_size.x;
+ image_copy_region.extent.height = p_size.y;
+ image_copy_region.extent.depth = p_size.z;
+
+ vkCmdCopyImage(command_buffer, src_tex->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_tex->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy_region);
+ }
+
+ // RESTORE LAYOUT for SRC and DST
+
+ { //restore src
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
+ image_memory_barrier.newLayout = src_tex->layout;
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = src_tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = src_tex->barrier_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_src_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = src_tex->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ { //make dst readable
+
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ image_memory_barrier.newLayout = dst_tex->layout;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = dst_tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_src_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = 1;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+ }
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, bool p_sync_with_draw) {
+
+ _THREAD_SAFE_METHOD_
+
+ Texture *src_tex = texture_owner.getornull(p_texture);
+ ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER);
+
+ ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER,
+ "Source texture can't be cleared while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
+
+ ERR_FAIL_COND_V(p_layers == 0, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_mipmaps == 0, ERR_INVALID_PARAMETER);
+
+ ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
+ "Source texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be cleared.");
+
+ uint32_t src_layer_count = src_tex->layers;
+ if (src_tex->type == TEXTURE_TYPE_CUBE || src_tex->type == TEXTURE_TYPE_CUBE_ARRAY) {
+ src_layer_count *= 6;
+ }
+
+ ERR_FAIL_COND_V(p_base_mipmap + p_mipmaps > src_tex->mipmaps, ERR_INVALID_PARAMETER);
+ ERR_FAIL_COND_V(p_base_layer + p_layers > src_layer_count, ERR_INVALID_PARAMETER);
+
+ VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+
+ VkImageLayout layout = src_tex->layout;
+
+ if (src_tex->layout != VK_IMAGE_LAYOUT_GENERAL) { //storage may be in general state
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.oldLayout = src_tex->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = src_tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = src_tex->read_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_base_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = p_mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_base_layer;
+ image_memory_barrier.subresourceRange.layerCount = p_layers;
+
+ layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ VkClearColorValue clear_color;
+ clear_color.float32[0] = p_color.r;
+ clear_color.float32[1] = p_color.g;
+ clear_color.float32[2] = p_color.b;
+ clear_color.float32[3] = p_color.a;
+
+ VkImageSubresourceRange range;
+ range.aspectMask = src_tex->read_aspect_mask;
+ range.baseArrayLayer = p_base_layer;
+ range.layerCount = p_layers;
+ range.baseMipLevel = p_base_mipmap;
+ range.levelCount = p_mipmaps;
+
+ vkCmdClearColorImage(command_buffer, src_tex->image, layout, &clear_color, 1, &range);
+
+ if (src_tex->layout != VK_IMAGE_LAYOUT_GENERAL) { //storage may be in general state
+
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ image_memory_barrier.newLayout = src_tex->layout;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = src_tex->image;
+ image_memory_barrier.subresourceRange.aspectMask = src_tex->read_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = p_base_mipmap;
+ image_memory_barrier.subresourceRange.levelCount = p_mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = p_base_layer;
+ image_memory_barrier.subresourceRange.layerCount = p_layers;
+
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+ }
+
+ return OK;
+}
+
+bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const {
+ ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
+
+ _THREAD_SAFE_METHOD_
+
+ //validate that this image is supported for the intended use
+ VkFormatProperties properties;
+ vkGetPhysicalDeviceFormatProperties(context->get_physical_device(), vulkan_formats[p_format], &properties);
+ VkFormatFeatureFlags flags;
+
+ if (p_usage & TEXTURE_USAGE_CPU_READ_BIT) {
+ flags = properties.linearTilingFeatures;
+ } else {
+ flags = properties.optimalTilingFeatures;
+ }
+
+ if (p_usage & TEXTURE_USAGE_SAMPLING_BIT && !(flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_STORAGE_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+ return false;
+ }
+
+ if (p_usage & TEXTURE_USAGE_STORAGE_ATOMIC_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)) {
+ return false;
+ }
+
+ return true;
+}
+
+/********************/
+/**** ATTACHMENT ****/
+/********************/
+
+VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentFormat> &p_format, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, int *r_color_attachment_count) {
+
+ Vector<VkAttachmentDescription> attachments;
+ Vector<VkAttachmentReference> color_references;
+ Vector<VkAttachmentReference> depth_stencil_references;
+ Vector<VkAttachmentReference> resolve_references;
+
+ for (int i = 0; i < p_format.size(); i++) {
+
+ VkAttachmentDescription description;
+
+ description.flags = 0;
+ ERR_FAIL_INDEX_V(p_format[i].format, DATA_FORMAT_MAX, VK_NULL_HANDLE);
+ description.format = vulkan_formats[p_format[i].format];
+ ERR_FAIL_INDEX_V(p_format[i].samples, TEXTURE_SAMPLES_MAX, VK_NULL_HANDLE);
+ description.samples = rasterization_sample_count[p_format[i].samples];
+ //anything below does not really matter, as vulkan just ignores it when creating a pipeline
+ ERR_FAIL_COND_V_MSG(!(p_format[i].usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT)), VK_NULL_HANDLE,
+ "Texture format for index (" + itos(i) + ") requires an attachment (depth, stencil or resolve) bit set.");
+
+ bool is_depth_stencil = p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ bool is_sampled = p_format[i].usage_flags & TEXTURE_USAGE_SAMPLING_BIT;
+ bool is_storage = p_format[i].usage_flags & TEXTURE_USAGE_STORAGE_BIT;
+
+ switch (is_depth_stencil ? p_initial_depth_action : p_initial_color_action) {
+
+ case INITIAL_ACTION_CLEAR: {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ } break;
+ case INITIAL_ACTION_KEEP: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ case INITIAL_ACTION_CONTINUE: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; //don't care what is there
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ default: {
+ ERR_FAIL_V(VK_NULL_HANDLE); //should never reach here
+ }
+ }
+
+ switch (is_depth_stencil ? p_final_depth_action : p_final_color_action) {
+ case FINAL_ACTION_READ: {
+
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ case FINAL_ACTION_DISCARD: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ description.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+ } break;
+ case FINAL_ACTION_CONTINUE: {
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ description.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ description.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ }
+
+ } break;
+ default: {
+ ERR_FAIL_V(VK_NULL_HANDLE); //should never reach here
+ }
+ }
+
+ attachments.push_back(description);
+
+ VkAttachmentReference reference;
+ reference.attachment = i;
+
+ if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ color_references.push_back(reference);
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ depth_stencil_references.push_back(reference);
+ } else if (p_format[i].usage_flags & TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT) {
+ reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ resolve_references.push_back(reference);
+ } else {
+ ERR_FAIL_V_MSG(VK_NULL_HANDLE, "Texture index " + itos(i) + " is neither color, depth stencil or resolve so it can't be used as attachment.");
+ }
+ }
+
+ ERR_FAIL_COND_V_MSG(depth_stencil_references.size() > 1, VK_NULL_HANDLE,
+ "Formats can only have one depth/stencil attachment, supplied (" + itos(depth_stencil_references.size()) + ").");
+
+ ERR_FAIL_COND_V_MSG(resolve_references.size() > 1, VK_NULL_HANDLE,
+ "Formats can only have one resolve attachment, supplied (" + itos(resolve_references.size()) + ").");
+
+ VkSubpassDescription subpass;
+ subpass.flags = 0;
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.inputAttachmentCount = 0; //unsupported for now
+ subpass.pInputAttachments = NULL;
+ subpass.colorAttachmentCount = color_references.size();
+ subpass.pColorAttachments = color_references.ptr();
+ subpass.pDepthStencilAttachment = depth_stencil_references.ptr();
+ subpass.pResolveAttachments = resolve_references.ptr();
+ subpass.preserveAttachmentCount = 0;
+ subpass.pPreserveAttachments = NULL;
+
+ VkRenderPassCreateInfo render_pass_create_info;
+ render_pass_create_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ render_pass_create_info.pNext = NULL;
+ render_pass_create_info.flags = 0;
+ render_pass_create_info.attachmentCount = attachments.size();
+ render_pass_create_info.pAttachments = attachments.ptr();
+ render_pass_create_info.subpassCount = 1;
+ render_pass_create_info.pSubpasses = &subpass;
+ render_pass_create_info.dependencyCount = 0;
+ render_pass_create_info.pDependencies = NULL;
+
+ VkRenderPass render_pass;
+ VkResult res = vkCreateRenderPass(device, &render_pass_create_info, NULL, &render_pass);
+ ERR_FAIL_COND_V(res, VK_NULL_HANDLE);
+
+ if (r_color_attachment_count) {
+ *r_color_attachment_count = color_references.size();
+ }
+ return render_pass;
+}
+
+RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_create(const Vector<AttachmentFormat> &p_format) {
+
+ _THREAD_SAFE_METHOD_
+
+ FramebufferFormatKey key;
+ key.attachments = p_format;
+
+ const Map<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key);
+ if (E) {
+ //exists, return
+ return E->get();
+ }
+
+ int color_references;
+ VkRenderPass render_pass = _render_pass_create(p_format, INITIAL_ACTION_CLEAR, FINAL_ACTION_DISCARD, INITIAL_ACTION_CLEAR, FINAL_ACTION_DISCARD, &color_references); //actions don't matter for this use case
+
+ if (render_pass == VK_NULL_HANDLE) { //was likely invalid
+ return INVALID_ID;
+ }
+ FramebufferFormatID id = FramebufferFormatID(framebuffer_format_cache.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT));
+
+ E = framebuffer_format_cache.insert(key, id);
+ FramebufferFormat fb_format;
+ fb_format.E = E;
+ fb_format.color_attachments = color_references;
+ fb_format.render_pass = render_pass;
+ fb_format.samples = p_format[0].samples;
+ framebuffer_formats[id] = fb_format;
+ return id;
+}
+
+RenderingDevice::TextureSamples RenderingDeviceVulkan::framebuffer_format_get_texture_samples(FramebufferFormatID p_format) {
+ Map<FramebufferFormatID, FramebufferFormat>::Element *E = framebuffer_formats.find(p_format);
+ ERR_FAIL_COND_V(!E, TEXTURE_SAMPLES_1);
+
+ return E->get().samples;
+}
+
+/***********************/
+/**** RENDER TARGET ****/
+/***********************/
+
+RID RenderingDeviceVulkan::framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check) {
+
+ _THREAD_SAFE_METHOD_
+
+ Vector<AttachmentFormat> attachments;
+ Size2i size;
+
+ for (int i = 0; i < p_texture_attachments.size(); i++) {
+ Texture *texture = texture_owner.getornull(p_texture_attachments[i]);
+ ERR_FAIL_COND_V_MSG(!texture, RID(), "Texture index supplied for framebuffer (" + itos(i) + ") is not a valid texture.");
+
+ if (i == 0) {
+ size.width = texture->width;
+ size.height = texture->height;
+ } else {
+ ERR_FAIL_COND_V_MSG((uint32_t)size.width != texture->width || (uint32_t)size.height != texture->height, RID(),
+ "All textures in a framebuffer should be the same size.");
+ }
+
+ AttachmentFormat af;
+ af.format = texture->format;
+ af.samples = texture->samples;
+ af.usage_flags = texture->usage_flags;
+ attachments.push_back(af);
+ }
+
+ FramebufferFormatID format_id = framebuffer_format_create(attachments);
+ if (format_id == INVALID_ID) {
+ return RID();
+ }
+
+ ERR_FAIL_COND_V_MSG(p_format_check != INVALID_ID && format_id != p_format_check, RID(),
+ "The format used to check this framebuffer differs from the intended framebuffer format.");
+
+ Framebuffer framebuffer;
+ framebuffer.format_id = format_id;
+ framebuffer.texture_ids = p_texture_attachments;
+ framebuffer.size = size;
+
+ RID id = framebuffer_owner.make_rid(framebuffer);
+
+ for (int i = 0; i < p_texture_attachments.size(); i++) {
+ _add_dependency(id, p_texture_attachments[i]);
+ }
+
+ return id;
+}
+
+RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_get_format(RID p_framebuffer) {
+
+ _THREAD_SAFE_METHOD_
+
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_framebuffer);
+ ERR_FAIL_COND_V(!framebuffer, INVALID_ID);
+
+ return framebuffer->format_id;
+}
+
+/*****************/
+/**** SAMPLER ****/
+/*****************/
+
+RID RenderingDeviceVulkan::sampler_create(const SamplerState &p_state) {
+
+ _THREAD_SAFE_METHOD_
+
+ VkSamplerCreateInfo sampler_create_info;
+ sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ sampler_create_info.pNext = NULL;
+ sampler_create_info.flags = 0;
+ sampler_create_info.magFilter = p_state.mag_filter == SAMPLER_FILTER_LINEAR ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
+ sampler_create_info.minFilter = p_state.min_filter == SAMPLER_FILTER_LINEAR ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
+ sampler_create_info.mipmapMode = p_state.mip_filter == SAMPLER_FILTER_LINEAR ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST;
+
+ ERR_FAIL_INDEX_V(p_state.repeat_u, SAMPLER_REPEAT_MODE_MAX, RID());
+ sampler_create_info.addressModeU = address_modes[p_state.repeat_u];
+ ERR_FAIL_INDEX_V(p_state.repeat_v, SAMPLER_REPEAT_MODE_MAX, RID());
+ sampler_create_info.addressModeV = address_modes[p_state.repeat_v];
+ ERR_FAIL_INDEX_V(p_state.repeat_w, SAMPLER_REPEAT_MODE_MAX, RID());
+ sampler_create_info.addressModeW = address_modes[p_state.repeat_w];
+
+ sampler_create_info.mipLodBias = p_state.lod_bias;
+ sampler_create_info.anisotropyEnable = p_state.use_anisotropy;
+ sampler_create_info.maxAnisotropy = p_state.anisotropy_max;
+ sampler_create_info.compareEnable = p_state.enable_compare;
+
+ ERR_FAIL_INDEX_V(p_state.compare_op, COMPARE_OP_MAX, RID());
+ sampler_create_info.compareOp = compare_operators[p_state.compare_op];
+
+ sampler_create_info.minLod = p_state.min_lod;
+ sampler_create_info.maxLod = p_state.max_lod;
+
+ ERR_FAIL_INDEX_V(p_state.border_color, SAMPLER_BORDER_COLOR_MAX, RID());
+ sampler_create_info.borderColor = sampler_border_colors[p_state.border_color];
+
+ sampler_create_info.unnormalizedCoordinates = p_state.unnormalized_uvw;
+
+ VkSampler sampler;
+ VkResult res = vkCreateSampler(device, &sampler_create_info, NULL, &sampler);
+ ERR_FAIL_COND_V(res, RID());
+
+ return sampler_owner.make_rid(sampler);
+}
+
+/**********************/
+/**** VERTEX ARRAY ****/
+/**********************/
+
+RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
+
+ Buffer buffer;
+ _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ const uint8_t *r = p_data.ptr();
+ _buffer_update(&buffer, 0, r, data_size);
+ _buffer_memory_barrier(buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, false);
+ }
+
+ return vertex_buffer_owner.make_rid(buffer);
+}
+
+// Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated
+RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(const Vector<VertexDescription> &p_vertex_formats) {
+
+ _THREAD_SAFE_METHOD_
+
+ VertexDescriptionKey key;
+ key.vertex_formats = p_vertex_formats;
+
+ VertexFormatID *idptr = vertex_format_cache.getptr(key);
+ if (idptr) {
+ return *idptr;
+ }
+
+ //does not exist, create one and cache it
+ VertexDescriptionCache vdcache;
+ vdcache.bindings = memnew_arr(VkVertexInputBindingDescription, p_vertex_formats.size());
+ vdcache.attributes = memnew_arr(VkVertexInputAttributeDescription, p_vertex_formats.size());
+
+ Set<int> used_locations;
+ for (int i = 0; i < p_vertex_formats.size(); i++) {
+ ERR_CONTINUE(p_vertex_formats[i].format >= DATA_FORMAT_MAX);
+ ERR_FAIL_COND_V(used_locations.has(p_vertex_formats[i].location), INVALID_ID);
+
+ ERR_FAIL_COND_V_MSG(get_format_vertex_size(p_vertex_formats[i].format) == 0, INVALID_ID,
+ "Data format for attachment (" + itos(i) + ") is not valid for a vertex array.");
+
+ vdcache.bindings[i].binding = i;
+ vdcache.bindings[i].stride = p_vertex_formats[i].stride;
+ vdcache.bindings[i].inputRate = p_vertex_formats[i].frequency == VERTEX_FREQUENCY_INSTANCE ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
+ vdcache.attributes[i].binding = i;
+ vdcache.attributes[i].location = p_vertex_formats[i].location;
+ vdcache.attributes[i].format = vulkan_formats[p_vertex_formats[i].format];
+ vdcache.attributes[i].offset = p_vertex_formats[i].offset;
+ used_locations.insert(p_vertex_formats[i].location);
+ }
+
+ vdcache.create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vdcache.create_info.pNext = NULL;
+ vdcache.create_info.flags = 0;
+
+ vdcache.create_info.vertexAttributeDescriptionCount = p_vertex_formats.size();
+ vdcache.create_info.pVertexAttributeDescriptions = vdcache.attributes;
+
+ vdcache.create_info.vertexBindingDescriptionCount = p_vertex_formats.size();
+ vdcache.create_info.pVertexBindingDescriptions = vdcache.bindings;
+ vdcache.vertex_formats = p_vertex_formats;
+
+ VertexFormatID id = VertexFormatID(vertex_format_cache.size()) | (VertexFormatID(ID_TYPE_VERTEX_FORMAT) << ID_BASE_SHIFT);
+ vertex_format_cache[key] = id;
+ vertex_formats[id] = vdcache;
+ return id;
+}
+
+RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
+ const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
+
+ ERR_FAIL_COND_V(vd.vertex_formats.size() != p_src_buffers.size(), RID());
+
+ for (int i = 0; i < p_src_buffers.size(); i++) {
+ ERR_FAIL_COND_V(!vertex_buffer_owner.owns(p_src_buffers[i]), RID());
+ }
+
+ VertexArray vertex_array;
+
+ vertex_array.vertex_count = p_vertex_count;
+ vertex_array.description = p_vertex_format;
+ vertex_array.max_instances_allowed = 0xFFFFFFFF; //by default as many as you want
+ for (int i = 0; i < p_src_buffers.size(); i++) {
+ Buffer *buffer = vertex_buffer_owner.getornull(p_src_buffers[i]);
+
+ //validate with buffer
+ {
+ const VertexDescription &atf = vd.vertex_formats[i];
+
+ uint32_t element_size = get_format_vertex_size(atf.format);
+ ERR_FAIL_COND_V(element_size == 0, RID()); //should never happens since this was prevalidated
+
+ if (atf.frequency == VERTEX_FREQUENCY_VERTEX) {
+ //validate size for regular drawing
+ uint64_t total_size = uint64_t(atf.stride) * (p_vertex_count - 1) + atf.offset + element_size;
+ ERR_FAIL_COND_V_MSG(total_size > buffer->size, RID(),
+ "Attachment (" + itos(i) + ") will read past the end of the buffer.");
+
+ } else {
+ //validate size for instances drawing
+ uint64_t available = buffer->size - atf.offset;
+ ERR_FAIL_COND_V_MSG(available < element_size, RID(),
+ "Attachment (" + itos(i) + ") uses instancing, but it's just too small.");
+
+ uint32_t instances_allowed = available / atf.stride;
+ vertex_array.max_instances_allowed = MIN(instances_allowed, vertex_array.max_instances_allowed);
+ }
+ }
+
+ vertex_array.buffers.push_back(buffer->buffer);
+ vertex_array.offsets.push_back(0); //offset unused, but passing anyway
+ }
+
+ RID id = vertex_array_owner.make_rid(vertex_array);
+ for (int i = 0; i < p_src_buffers.size(); i++) {
+ _add_dependency(id, p_src_buffers[i]);
+ }
+
+ return id;
+}
+
+RID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_index_count == 0, RID());
+
+ IndexBuffer index_buffer;
+ index_buffer.index_type = (p_format == INDEX_BUFFER_FORMAT_UINT16) ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
+ index_buffer.supports_restart_indices = p_use_restart_indices;
+ index_buffer.index_count = p_index_count;
+ uint32_t size_bytes = p_index_count * ((p_format == INDEX_BUFFER_FORMAT_UINT16) ? 2 : 4);
+#ifdef DEBUG_ENABLED
+ if (p_data.size()) {
+ index_buffer.max_index = 0;
+ ERR_FAIL_COND_V_MSG((uint32_t)p_data.size() != size_bytes, RID(),
+ "Default index buffer initializer array size (" + itos(p_data.size()) + ") does not match format required size (" + itos(size_bytes) + ").");
+ const uint8_t *r = p_data.ptr();
+ if (p_format == INDEX_BUFFER_FORMAT_UINT16) {
+ const uint16_t *index16 = (const uint16_t *)r;
+ for (uint32_t i = 0; i < p_index_count; i++) {
+ if (p_use_restart_indices && index16[i] == 0xFFFF) {
+ continue; //restart index, ingnore
+ }
+ index_buffer.max_index = MAX(index16[i], index_buffer.max_index);
+ }
+ } else {
+ const uint32_t *index32 = (const uint32_t *)r;
+ for (uint32_t i = 0; i < p_index_count; i++) {
+ if (p_use_restart_indices && index32[i] == 0xFFFFFFFF) {
+ continue; //restart index, ingnore
+ }
+ index_buffer.max_index = MAX(index32[i], index_buffer.max_index);
+ }
+ }
+ } else {
+ index_buffer.max_index = 0xFFFFFFFF;
+ }
+#else
+ index_buffer.max_index = 0xFFFFFFFF;
+#endif
+ _buffer_allocate(&index_buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ const uint8_t *r = p_data.ptr();
+ _buffer_update(&index_buffer, 0, r, data_size);
+ _buffer_memory_barrier(index_buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT, false);
+ }
+ return index_buffer_owner.make_rid(index_buffer);
+}
+
+RID RenderingDeviceVulkan::index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(!index_buffer_owner.owns(p_index_buffer), RID());
+
+ IndexBuffer *index_buffer = index_buffer_owner.getornull(p_index_buffer);
+
+ ERR_FAIL_COND_V(p_index_count == 0, RID());
+ ERR_FAIL_COND_V(p_index_offset + p_index_count > index_buffer->index_count, RID());
+
+ IndexArray index_array;
+ index_array.max_index = index_buffer->max_index;
+ index_array.buffer = index_buffer->buffer;
+ index_array.offset = p_index_offset;
+ index_array.indices = p_index_count;
+ index_array.index_type = index_buffer->index_type;
+ index_array.supports_restart_indices = index_buffer->supports_restart_indices;
+
+ RID id = index_array_owner.make_rid(index_array);
+ _add_dependency(id, p_index_buffer);
+ return id;
+}
+
+/****************/
+/**** SHADER ****/
+/****************/
+
+static const char *shader_stage_names[RenderingDevice::SHADER_STAGE_MAX] = {
+ "Vertex",
+ "Fragment",
+ "TesselationControl",
+ "TesselationEvaluation",
+ "Compute"
+};
+
+static const char *shader_uniform_names[RenderingDevice::UNIFORM_TYPE_MAX] = {
+ "Sampler", "CombinedSampler", "Texture", "Image", "TextureBuffer", "SamplerTextureBuffer", "ImageBuffer", "UniformBuffer", "StorageBuffer", "InputAttachment"
+};
+
+static VkShaderStageFlagBits shader_stage_masks[RenderingDevice::SHADER_STAGE_MAX] = {
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ VK_SHADER_STAGE_COMPUTE_BIT,
+};
+
+String RenderingDeviceVulkan::_shader_uniform_debug(RID p_shader, int p_set) {
+ String ret;
+ const Shader *shader = shader_owner.getornull(p_shader);
+ ERR_FAIL_COND_V(!shader, String());
+ for (int i = 0; i < shader->sets.size(); i++) {
+ if (p_set >= 0 && i != p_set) {
+ continue;
+ }
+ for (int j = 0; j < shader->sets[i].uniform_info.size(); j++) {
+ const UniformInfo &ui = shader->sets[i].uniform_info[j];
+ if (ret != String()) {
+ ret += "\n";
+ }
+ ret += "Set: " + itos(i) + " Binding: " + itos(ui.binding) + " Type: " + shader_uniform_names[ui.type] + " Length: " + itos(ui.length);
+ }
+ }
+ return ret;
+}
+#if 0
+bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLayoutBinding> > &bindings, Vector<Vector<UniformInfo> > &uniform_infos, const glslang::TObjectReflection &reflection, RenderingDevice::ShaderStage p_stage, Shader::PushConstant &push_constant, String *r_error) {
+
+ VkDescriptorSetLayoutBinding layout_binding;
+ UniformInfo info;
+
+ switch (reflection.getType()->getBasicType()) {
+ case glslang::EbtSampler: {
+
+ //print_line("DEBUG: IsSampler");
+ if (reflection.getType()->getSampler().dim == glslang::EsdBuffer) {
+ //texture buffers
+ if (reflection.getType()->getSampler().isCombined()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER;
+ //print_line("DEBUG: SAMPLER: texel combined");
+ } else if (reflection.getType()->getSampler().isTexture()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_TEXTURE_BUFFER;
+ //print_line("DEBUG: SAMPLER: texel alone");
+ } else if (reflection.getType()->getSampler().isImage()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_IMAGE_BUFFER;
+ //print_line("DEBUG: SAMPLER: texel buffer");
+ } else {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' is of unsupported buffer type.";
+ }
+ return false;
+ }
+ } else if (reflection.getType()->getSampler().isCombined()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
+ //print_line("DEBUG: SAMPLER: combined");
+ } else if (reflection.getType()->getSampler().isPureSampler()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ info.type = UNIFORM_TYPE_SAMPLER;
+ //print_line("DEBUG: SAMPLER: sampler");
+ } else if (reflection.getType()->getSampler().isTexture()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ info.type = UNIFORM_TYPE_TEXTURE;
+ //print_line("DEBUG: SAMPLER: image");
+ } else if (reflection.getType()->getSampler().isImage()) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ info.type = UNIFORM_TYPE_IMAGE;
+ //print_line("DEBUG: SAMPLER: storage image");
+ } else {
+ //print_line("DEBUG: sampler unknown");
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' is of unsupported sampler type.";
+ }
+ return false;
+ }
+
+ if (reflection.getType()->isArray()) {
+ layout_binding.descriptorCount = reflection.getType()->getArraySizes()->getCumulativeSize();
+ //print_line("DEBUG: array of size: " + itos(layout_binding.descriptorCount));
+ } else {
+ layout_binding.descriptorCount = 1;
+ }
+
+ info.length = layout_binding.descriptorCount;
+
+ } break;
+ /*case glslang::EbtStruct: {
+ print_line("DEBUG: Struct");
+
+ } break;*/
+ case glslang::EbtBlock: {
+ //print_line("DEBUG: Block");
+ if (reflection.getType()->getQualifier().storage == glslang::EvqUniform) {
+ if (reflection.getType()->getQualifier().layoutPushConstant) {
+ uint32_t len = reflection.size;
+ if (push_constant.push_constant_size != 0 && push_constant.push_constant_size != len) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' push constants for different stages should all be the same size.";
+ return false;
+ }
+ push_constant.push_constant_size = len;
+ push_constant.push_constants_vk_stage |= shader_stage_masks[p_stage];
+ return true;
+ }
+ //print_line("DEBUG: Uniform buffer");
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ info.type = UNIFORM_TYPE_UNIFORM_BUFFER;
+ } else if (reflection.getType()->getQualifier().storage == glslang::EvqBuffer) {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ info.type = UNIFORM_TYPE_STORAGE_BUFFER;
+ //print_line("DEBUG: Storage buffer");
+ } else {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' is of unsupported block type: (" + itos(reflection.getType()->getQualifier().storage) + ").";
+ }
+ return false;
+ }
+
+ if (reflection.getType()->isArray()) {
+ layout_binding.descriptorCount = reflection.getType()->getArraySizes()->getCumulativeSize();
+ //print_line("DEBUG: array of size: " + itos(layout_binding.descriptorCount));
+ } else {
+ layout_binding.descriptorCount = 1;
+ }
+
+ info.length = reflection.size;
+
+ } break;
+ /*case glslang::EbtReference: {
+
+ } break;*/
+ /*case glslang::EbtAtomicUint: {
+
+ } break;*/
+ default: {
+
+ if (reflection.getType()->getQualifier().hasOffset() || reflection.name.find(".") != std::string::npos) {
+ //member of uniform block?
+ return true;
+ }
+
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' unsupported uniform type.";
+ }
+ return false;
+ }
+ }
+
+ if (!reflection.getType()->getQualifier().hasBinding()) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' lacks a binding number.";
+ }
+ return false;
+ }
+
+ uint32_t set = reflection.getType()->getQualifier().hasSet() ? reflection.getType()->getQualifier().layoutSet : 0;
+
+ if (set >= MAX_UNIFORM_SETS) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(MAX_UNIFORM_SETS) + ").";
+ }
+ return false;
+ }
+
+ if (set >= limits.maxBoundDescriptorSets) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' uses a set (" + itos(set) + ") index larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").";
+ }
+ return false;
+ }
+
+ uint32_t binding = reflection.getType()->getQualifier().layoutBinding;
+
+ if (set < (uint32_t)bindings.size()) {
+ //check if this already exists
+ for (int i = 0; i < bindings[set].size(); i++) {
+ if (bindings[set][i].binding == binding) {
+ //already exists, verify that it's the same type
+ if (bindings[set][i].descriptorType != layout_binding.descriptorType) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform type.";
+ }
+ return false;
+ }
+
+ //also, verify that it's the same size
+ if (bindings[set][i].descriptorCount != layout_binding.descriptorCount || uniform_infos[set][i].length != info.length) {
+ if (r_error) {
+ *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name.c_str() + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform size.";
+ }
+ return false;
+ }
+
+ //just append stage mask and return
+ bindings.write[set].write[i].stageFlags |= shader_stage_masks[p_stage];
+ uniform_infos.write[set].write[i].stages |= 1 << p_stage;
+ return true;
+ }
+ }
+ }
+ layout_binding.binding = binding;
+ layout_binding.stageFlags = shader_stage_masks[p_stage];
+ layout_binding.pImmutableSamplers = NULL; //no support for this yet
+
+ info.stages = 1 << p_stage;
+ info.binding = binding;
+
+ if (set >= (uint32_t)bindings.size()) {
+ bindings.resize(set + 1);
+ uniform_infos.resize(set + 1);
+ }
+#if 0
+ print_line("stage: " + String(shader_stage_names[p_stage]) + " set: " + itos(set) + " binding: " + itos(info.binding) + " type:" + shader_uniform_names[info.type] + " length: " + itos(info.length));
+#endif
+ bindings.write[set].push_back(layout_binding);
+ uniform_infos.write[set].push_back(info);
+
+ return true;
+}
+#endif
+
+RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages) {
+
+ //descriptor layouts
+ Vector<Vector<VkDescriptorSetLayoutBinding> > set_bindings;
+ Vector<Vector<UniformInfo> > uniform_info;
+ Shader::PushConstant push_constant;
+ push_constant.push_constant_size = 0;
+ push_constant.push_constants_vk_stage = 0;
+
+ uint32_t vertex_input_mask = 0;
+
+ uint32_t fragment_outputs = 0;
+
+ uint32_t stages_processed = 0;
+
+ bool is_compute = false;
+
+ for (int i = 0; i < p_stages.size(); i++) {
+
+ if (p_stages[i].shader_stage == SHADER_STAGE_COMPUTE) {
+ is_compute = true;
+ ERR_FAIL_COND_V_MSG(p_stages.size() != 1, RID(),
+ "Compute shaders can only receive one stage, dedicated to compute.");
+ }
+ ERR_FAIL_COND_V_MSG(stages_processed & (1 << p_stages[i].shader_stage), RID(),
+ "Stage " + String(shader_stage_names[p_stages[i].shader_stage]) + " submitted more than once.");
+
+ {
+ SpvReflectShaderModule module;
+ const uint8_t *spirv = p_stages[i].spir_v.ptr();
+ SpvReflectResult result = spvReflectCreateShaderModule(p_stages[i].spir_v.size(), spirv, &module);
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed parsing shader.");
+
+ uint32_t binding_count = 0;
+ result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, NULL);
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating descriptor bindings.");
+
+ uint32_t stage = p_stages[i].shader_stage;
+
+ if (binding_count > 0) {
+
+ //Parse bindings
+
+ Vector<SpvReflectDescriptorBinding *> bindings;
+ bindings.resize(binding_count);
+ result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, bindings.ptrw());
+
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed getting descriptor bindings.");
+
+ for (uint32_t j = 0; j < binding_count; j++) {
+ const SpvReflectDescriptorBinding &binding = *bindings[j];
+
+ VkDescriptorSetLayoutBinding layout_binding;
+ UniformInfo info;
+
+ bool need_array_dimensions = false;
+ bool need_block_size = false;
+
+ switch (binding.descriptor_type) {
+ case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ info.type = UNIFORM_TYPE_SAMPLER;
+ need_array_dimensions = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
+ need_array_dimensions = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ info.type = UNIFORM_TYPE_TEXTURE;
+ need_array_dimensions = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ info.type = UNIFORM_TYPE_IMAGE;
+ need_array_dimensions = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_TEXTURE_BUFFER;
+ need_array_dimensions = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ info.type = UNIFORM_TYPE_IMAGE_BUFFER;
+ need_array_dimensions = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ info.type = UNIFORM_TYPE_UNIFORM_BUFFER;
+ need_block_size = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ info.type = UNIFORM_TYPE_STORAGE_BUFFER;
+ need_block_size = true;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
+ ERR_PRINT("Dynamic uniform buffer not supported.");
+ continue;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+ ERR_PRINT("Dynamic storage buffer not supported.");
+ continue;
+ } break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
+ layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
+ info.type = UNIFORM_TYPE_INPUT_ATTACHMENT;
+ } break;
+ }
+
+ if (need_array_dimensions) {
+ if (binding.array.dims_count == 0) {
+ info.length = 1;
+ } else {
+ for (uint32_t k = 0; k < binding.array.dims_count; k++) {
+ if (k == 0) {
+ info.length = binding.array.dims[0];
+ } else {
+ info.length *= binding.array.dims[k];
+ }
+ }
+ }
+
+ layout_binding.descriptorCount = info.length;
+
+ } else if (need_block_size) {
+ info.length = binding.block.size;
+ layout_binding.descriptorCount = 1;
+ } else {
+ info.length = 0;
+ layout_binding.descriptorCount = 1;
+ }
+
+ info.binding = binding.binding;
+ uint32_t set = binding.set;
+
+ //print_line("Stage: " + String(shader_stage_names[stage]) + " set=" + itos(set) + " binding=" + itos(info.binding) + " type=" + shader_uniform_names[info.type] + " length=" + itos(info.length));
+
+ ERR_FAIL_COND_V_MSG(set >= MAX_UNIFORM_SETS, RID(),
+ "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(MAX_UNIFORM_SETS) + ").");
+
+ ERR_FAIL_COND_V_MSG(set >= limits.maxBoundDescriptorSets, RID(),
+ "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").");
+
+ if (set < (uint32_t)set_bindings.size()) {
+ //check if this already exists
+ bool exists = false;
+ for (int k = 0; k < set_bindings[set].size(); k++) {
+ if (set_bindings[set][k].binding == (uint32_t)info.binding) {
+ //already exists, verify that it's the same type
+ ERR_FAIL_COND_V_MSG(set_bindings[set][k].descriptorType != layout_binding.descriptorType, RID(),
+ "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform type.");
+
+ //also, verify that it's the same size
+ ERR_FAIL_COND_V_MSG(set_bindings[set][k].descriptorCount != layout_binding.descriptorCount || uniform_info[set][k].length != info.length, RID(),
+ "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform size.");
+
+ //just append stage mask and return
+ set_bindings.write[set].write[k].stageFlags |= shader_stage_masks[stage];
+ uniform_info.write[set].write[k].stages |= 1 << stage;
+ exists = true;
+ }
+ }
+
+ if (exists) {
+ continue; //merged
+ }
+ }
+
+ layout_binding.binding = info.binding;
+ layout_binding.stageFlags = shader_stage_masks[stage];
+ layout_binding.pImmutableSamplers = NULL; //no support for this yet
+
+ info.stages = 1 << stage;
+ info.binding = info.binding;
+
+ if (set >= (uint32_t)set_bindings.size()) {
+ set_bindings.resize(set + 1);
+ uniform_info.resize(set + 1);
+ }
+
+ set_bindings.write[set].push_back(layout_binding);
+ uniform_info.write[set].push_back(info);
+ }
+ }
+
+ if (stage == SHADER_STAGE_VERTEX) {
+
+ uint32_t iv_count = 0;
+ result = spvReflectEnumerateInputVariables(&module, &iv_count, NULL);
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating input variables.");
+
+ if (iv_count) {
+ Vector<SpvReflectInterfaceVariable *> input_vars;
+ input_vars.resize(iv_count);
+
+ result = spvReflectEnumerateInputVariables(&module, &iv_count, input_vars.ptrw());
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed obtaining input variables.");
+
+ for (uint32_t j = 0; j < iv_count; j++) {
+ if (input_vars[j] && input_vars[j]->decoration_flags == 0) { //regular input
+ vertex_input_mask |= (1 << uint32_t(input_vars[j]->location));
+ }
+ }
+ }
+ }
+
+ if (stage == SHADER_STAGE_FRAGMENT) {
+
+ uint32_t ov_count = 0;
+ result = spvReflectEnumerateOutputVariables(&module, &ov_count, NULL);
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating output variables.");
+
+ if (ov_count) {
+ Vector<SpvReflectInterfaceVariable *> output_vars;
+ output_vars.resize(ov_count);
+
+ result = spvReflectEnumerateOutputVariables(&module, &ov_count, output_vars.ptrw());
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed obtaining output variables.");
+
+ for (uint32_t j = 0; j < ov_count; j++) {
+ if (output_vars[j]) {
+ fragment_outputs = MAX(fragment_outputs, output_vars[j]->location + 1);
+ }
+ }
+ }
+ }
+ uint32_t pc_count = 0;
+ result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, NULL);
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating push constants.");
+
+ if (pc_count) {
+ ERR_FAIL_COND_V_MSG(pc_count > 1, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "': Only one push constant is supported, which should be the same across shader stages.");
+
+ Vector<SpvReflectBlockVariable *> pconstants;
+ pconstants.resize(pc_count);
+ result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, pconstants.ptrw());
+ ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed obtaining push constants.");
+#if 0
+ if (pconstants[0] == NULL) {
+ FileAccess *f = FileAccess::open("res://popo.spv", FileAccess::WRITE);
+ f->store_buffer((const uint8_t *)&SpirV[0], SpirV.size() * sizeof(uint32_t));
+ memdelete(f);
+ }
+#endif
+
+ ERR_FAIL_COND_V_MSG(push_constant.push_constant_size && push_constant.push_constant_size != pconstants[0]->size, RID(),
+ "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
+
+ push_constant.push_constant_size = pconstants[0]->size;
+ push_constant.push_constants_vk_stage |= shader_stage_masks[stage];
+
+ //print_line("Stage: " + String(shader_stage_names[stage]) + " push constant of size=" + itos(push_constant.push_constant_size));
+ }
+
+ // Destroy the reflection data when no longer required.
+ spvReflectDestroyShaderModule(&module);
+ }
+
+ stages_processed |= (1 << p_stages[i].shader_stage);
+ }
+
+ //all good, let's create modules
+
+ _THREAD_SAFE_METHOD_
+
+ Shader shader;
+
+ shader.vertex_input_mask = vertex_input_mask;
+ shader.fragment_outputs = fragment_outputs;
+ shader.push_constant = push_constant;
+ shader.is_compute = is_compute;
+
+ String error_text;
+
+ bool success = true;
+ for (int i = 0; i < p_stages.size(); i++) {
+ VkShaderModuleCreateInfo shader_module_create_info;
+ shader_module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ shader_module_create_info.pNext = NULL;
+ shader_module_create_info.flags = 0;
+ shader_module_create_info.codeSize = p_stages[i].spir_v.size();
+ const uint8_t *r = p_stages[i].spir_v.ptr();
+
+ shader_module_create_info.pCode = (const uint32_t *)r;
+
+ VkShaderModule module;
+ VkResult res = vkCreateShaderModule(device, &shader_module_create_info, NULL, &module);
+ if (res) {
+ success = false;
+ error_text = "Error creating shader module for stage: " + String(shader_stage_names[p_stages[i].shader_stage]);
+ break;
+ }
+
+ const VkShaderStageFlagBits shader_stage_bits[SHADER_STAGE_MAX] = {
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ VK_SHADER_STAGE_COMPUTE_BIT,
+ };
+
+ VkPipelineShaderStageCreateInfo shader_stage;
+ shader_stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shader_stage.pNext = NULL;
+ shader_stage.flags = 0;
+ shader_stage.stage = shader_stage_bits[p_stages[i].shader_stage];
+ shader_stage.module = module;
+ shader_stage.pName = "main";
+ shader_stage.pSpecializationInfo = NULL;
+
+ shader.pipeline_stages.push_back(shader_stage);
+ }
+ //proceed to create descriptor sets
+
+ if (success) {
+
+ for (int i = 0; i < set_bindings.size(); i++) {
+
+ //empty ones are fine if they were not used according to spec (binding count will be 0)
+ VkDescriptorSetLayoutCreateInfo layout_create_info;
+ layout_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ layout_create_info.pNext = NULL;
+ layout_create_info.flags = 0;
+ layout_create_info.bindingCount = set_bindings[i].size();
+ layout_create_info.pBindings = set_bindings[i].ptr();
+
+ VkDescriptorSetLayout layout;
+ VkResult res = vkCreateDescriptorSetLayout(device, &layout_create_info, NULL, &layout);
+ if (res) {
+ error_text = "Error creating descriptor set layout for set " + itos(i);
+ success = false;
+ break;
+ }
+
+ Shader::Set set;
+ set.descriptor_set_layout = layout;
+ set.uniform_info = uniform_info[i];
+ //sort and hash
+ set.uniform_info.sort();
+
+ uint32_t format = 0; //no format, default
+
+ if (set.uniform_info.size()) {
+ //has data, needs an actual format;
+ UniformSetFormat usformat;
+ usformat.uniform_info = set.uniform_info;
+ Map<UniformSetFormat, uint32_t>::Element *E = uniform_set_format_cache.find(usformat);
+ if (E) {
+ format = E->get();
+ } else {
+ format = uniform_set_format_cache.size() + 1;
+ uniform_set_format_cache.insert(usformat, format);
+ }
+ }
+
+ shader.sets.push_back(set);
+ shader.set_formats.push_back(format);
+ }
+ }
+
+ if (success) {
+ //create pipeline layout
+ VkPipelineLayoutCreateInfo pipeline_layout_create_info;
+ pipeline_layout_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ pipeline_layout_create_info.pNext = NULL;
+ pipeline_layout_create_info.flags = 0;
+ pipeline_layout_create_info.setLayoutCount = shader.sets.size();
+
+ Vector<VkDescriptorSetLayout> layouts;
+ layouts.resize(shader.sets.size());
+
+ for (int i = 0; i < layouts.size(); i++) {
+ layouts.write[i] = shader.sets[i].descriptor_set_layout;
+ }
+
+ pipeline_layout_create_info.pSetLayouts = layouts.ptr();
+ if (push_constant.push_constant_size) {
+ VkPushConstantRange push_constant_range;
+ push_constant_range.stageFlags = push_constant.push_constants_vk_stage;
+ push_constant_range.offset = 0;
+ push_constant_range.size = push_constant.push_constant_size;
+
+ pipeline_layout_create_info.pushConstantRangeCount = 1;
+ pipeline_layout_create_info.pPushConstantRanges = &push_constant_range;
+ } else {
+ pipeline_layout_create_info.pushConstantRangeCount = 0;
+ pipeline_layout_create_info.pPushConstantRanges = NULL;
+ }
+
+ VkResult err = vkCreatePipelineLayout(device, &pipeline_layout_create_info, NULL, &shader.pipeline_layout);
+
+ if (err) {
+ error_text = "Error creating pipeline layout.";
+ success = false;
+ }
+ }
+
+ if (!success) {
+ //clean up if failed
+ for (int i = 0; i < shader.pipeline_stages.size(); i++) {
+ vkDestroyShaderModule(device, shader.pipeline_stages[i].module, NULL);
+ }
+
+ for (int i = 0; i < shader.sets.size(); i++) {
+ vkDestroyDescriptorSetLayout(device, shader.sets[i].descriptor_set_layout, NULL);
+ }
+
+ ERR_FAIL_V_MSG(RID(), error_text);
+ }
+
+ return shader_owner.make_rid(shader);
+}
+
+uint32_t RenderingDeviceVulkan::shader_get_vertex_input_attribute_mask(RID p_shader) {
+ _THREAD_SAFE_METHOD_
+
+ const Shader *shader = shader_owner.getornull(p_shader);
+ ERR_FAIL_COND_V(!shader, 0);
+ return shader->vertex_input_mask;
+}
+
+/******************/
+/**** UNIFORMS ****/
+/******************/
+
+RID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
+
+ Buffer buffer;
+ Error err = _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ ERR_FAIL_COND_V(err != OK, RID());
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ const uint8_t *r = p_data.ptr();
+ _buffer_update(&buffer, 0, r, data_size);
+ _buffer_memory_barrier(buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_UNIFORM_READ_BIT, false);
+ }
+ return uniform_buffer_owner.make_rid(buffer);
+}
+
+RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
+
+ Buffer buffer;
+ Error err = _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ ERR_FAIL_COND_V(err != OK, RID());
+
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ const uint8_t *r = p_data.ptr();
+ _buffer_update(&buffer, 0, r, data_size);
+ _buffer_memory_barrier(buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, false);
+ }
+ return storage_buffer_owner.make_rid(buffer);
+}
+
+RID RenderingDeviceVulkan::texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data) {
+
+ _THREAD_SAFE_METHOD_
+
+ uint32_t element_size = get_format_vertex_size(p_format);
+ ERR_FAIL_COND_V_MSG(element_size == 0, RID(), "Format requested is not supported for texture buffers");
+ uint64_t size_bytes = uint64_t(element_size) * p_size_elements;
+
+ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != size_bytes, RID());
+
+ TextureBuffer texture_buffer;
+ Error err = _buffer_allocate(&texture_buffer.buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ ERR_FAIL_COND_V(err != OK, RID());
+
+ if (p_data.size()) {
+ uint64_t data_size = p_data.size();
+ const uint8_t *r = p_data.ptr();
+ _buffer_update(&texture_buffer.buffer, 0, r, data_size);
+ _buffer_memory_barrier(texture_buffer.buffer.buffer, 0, data_size, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, false);
+ }
+
+ VkBufferViewCreateInfo view_create_info;
+ view_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
+ view_create_info.pNext = NULL;
+ view_create_info.flags = 0;
+ view_create_info.buffer = texture_buffer.buffer.buffer;
+ view_create_info.format = vulkan_formats[p_format];
+ view_create_info.offset = 0;
+ view_create_info.range = size_bytes;
+
+ texture_buffer.view = VK_NULL_HANDLE;
+
+ VkResult res = vkCreateBufferView(device, &view_create_info, NULL, &texture_buffer.view);
+ if (res) {
+ _buffer_free(&texture_buffer.buffer);
+ ERR_FAIL_V_MSG(RID(), "Unable to create buffer view");
+ }
+
+ //allocate the view
+ return texture_buffer_owner.make_rid(texture_buffer);
+}
+
+RenderingDeviceVulkan::DescriptorPool *RenderingDeviceVulkan::_descriptor_pool_allocate(const DescriptorPoolKey &p_key) {
+ if (!descriptor_pools.has(p_key)) {
+ descriptor_pools[p_key] = Set<DescriptorPool *>();
+ }
+
+ DescriptorPool *pool = NULL;
+
+ for (Set<DescriptorPool *>::Element *E = descriptor_pools[p_key].front(); E; E = E->next()) {
+ if (E->get()->usage < max_descriptors_per_pool) {
+ pool = E->get();
+ break;
+ }
+ }
+
+ if (!pool) {
+ //create a new one
+ pool = memnew(DescriptorPool);
+ pool->usage = 0;
+
+ VkDescriptorPoolCreateInfo descriptor_pool_create_info;
+ descriptor_pool_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ descriptor_pool_create_info.pNext = NULL;
+ descriptor_pool_create_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // can't think how somebody may NOT need this flag..
+ descriptor_pool_create_info.maxSets = max_descriptors_per_pool;
+ Vector<VkDescriptorPoolSize> sizes;
+ //here comes more vulkan API strangeness
+
+ if (p_key.uniform_type[UNIFORM_TYPE_SAMPLER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_SAMPLER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_SAMPLER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_TEXTURE]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_TEXTURE] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_IMAGE]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_IMAGE] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_TEXTURE_BUFFER] || p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ s.descriptorCount = (p_key.uniform_type[UNIFORM_TYPE_TEXTURE_BUFFER] + p_key.uniform_type[UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER]) * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_IMAGE_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_IMAGE_BUFFER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+ if (p_key.uniform_type[UNIFORM_TYPE_UNIFORM_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_UNIFORM_BUFFER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+
+ if (p_key.uniform_type[UNIFORM_TYPE_STORAGE_BUFFER]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_STORAGE_BUFFER] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+
+ if (p_key.uniform_type[UNIFORM_TYPE_INPUT_ATTACHMENT]) {
+ VkDescriptorPoolSize s;
+ s.type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
+ s.descriptorCount = p_key.uniform_type[UNIFORM_TYPE_INPUT_ATTACHMENT] * max_descriptors_per_pool;
+ sizes.push_back(s);
+ }
+
+ descriptor_pool_create_info.poolSizeCount = sizes.size();
+ descriptor_pool_create_info.pPoolSizes = sizes.ptr();
+ VkResult res = vkCreateDescriptorPool(device, &descriptor_pool_create_info, NULL, &pool->pool);
+ ERR_FAIL_COND_V(res, NULL);
+ descriptor_pools[p_key].insert(pool);
+ }
+
+ pool->usage++;
+
+ return pool;
+}
+
+void RenderingDeviceVulkan::_descriptor_pool_free(const DescriptorPoolKey &p_key, DescriptorPool *p_pool) {
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND(!descriptor_pools[p_key].has(p_pool));
+#endif
+ ERR_FAIL_COND(p_pool->usage == 0);
+ p_pool->usage--;
+ if (p_pool->usage == 0) {
+ vkDestroyDescriptorPool(device, p_pool->pool, NULL);
+ descriptor_pools[p_key].erase(p_pool);
+ memdelete(p_pool);
+ if (descriptor_pools[p_key].empty()) {
+ descriptor_pools.erase(p_key);
+ }
+ }
+}
+
+RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_uniforms.size() == 0, RID());
+
+ Shader *shader = shader_owner.getornull(p_shader);
+ ERR_FAIL_COND_V(!shader, RID());
+
+ ERR_FAIL_COND_V_MSG(p_shader_set >= (uint32_t)shader->sets.size() || shader->sets[p_shader_set].uniform_info.size() == 0, RID(),
+ "Desired set (" + itos(p_shader_set) + ") not used by shader.");
+ //see that all sets in shader are satisfied
+
+ const Shader::Set &set = shader->sets[p_shader_set];
+
+ uint32_t uniform_count = p_uniforms.size();
+ const Uniform *uniforms = p_uniforms.ptr();
+
+ uint32_t set_uniform_count = set.uniform_info.size();
+ const UniformInfo *set_uniforms = set.uniform_info.ptr();
+
+ Vector<VkWriteDescriptorSet> writes;
+ DescriptorPoolKey pool_key;
+
+ //to keep them alive until update call
+ List<Vector<VkDescriptorBufferInfo> > buffer_infos;
+ List<Vector<VkBufferView> > buffer_views;
+ List<Vector<VkDescriptorImageInfo> > image_infos;
+ //used for verification to make sure a uniform set does not use a framebuffer bound texture
+ Vector<RID> attachable_textures;
+ Vector<Texture *> mutable_sampled_textures;
+ Vector<Texture *> mutable_storage_textures;
+
+ for (uint32_t i = 0; i < set_uniform_count; i++) {
+ const UniformInfo &set_uniform = set_uniforms[i];
+ int uniform_idx = -1;
+ for (int j = 0; j < (int)uniform_count; j++) {
+ if (uniforms[j].binding == set_uniform.binding) {
+ uniform_idx = j;
+ }
+ }
+ ERR_FAIL_COND_V_MSG(uniform_idx == -1, RID(),
+ "All the shader bindings for the given set must be covered by the uniforms provided.");
+
+ const Uniform &uniform = uniforms[uniform_idx];
+
+ ERR_FAIL_COND_V_MSG(uniform.type != set_uniform.type, RID(),
+ "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.type] + "'.");
+
+ VkWriteDescriptorSet write; //common header
+ write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write.pNext = NULL;
+ write.dstSet = NULL; //will assign afterwards when everything is valid
+ write.dstBinding = set_uniform.binding;
+ uint32_t type_size = 1;
+
+ switch (uniform.type) {
+ case UNIFORM_TYPE_SAMPLER: {
+ if (uniform.ids.size() != set_uniform.length) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler elements, so it should be provided equal number of sampler IDs to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") should provide one ID referencing a sampler (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+
+ for (int j = 0; j < uniform.ids.size(); j++) {
+ VkSampler *sampler = sampler_owner.getornull(uniform.ids[j]);
+ ERR_FAIL_COND_V_MSG(!sampler, RID(), "Sampler (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid sampler.");
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = *sampler;
+ img_info.imageView = VK_NULL_HANDLE;
+ img_info.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ image_info.push_back(img_info);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size();
+ write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = NULL;
+ write.pTexelBufferView = NULL;
+
+ type_size = uniform.ids.size();
+
+ } break;
+ case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
+
+ if (uniform.ids.size() != set_uniform.length * 2) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler&texture elements, so it should provided twice the amount of IDs (sampler,texture pairs) to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+
+ for (int j = 0; j < uniform.ids.size(); j += 2) {
+ VkSampler *sampler = sampler_owner.getornull(uniform.ids[j + 0]);
+ ERR_FAIL_COND_V_MSG(!sampler, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
+
+ Texture *texture = texture_owner.getornull(uniform.ids[j + 1]);
+ ERR_FAIL_COND_V_MSG(!texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
+
+ ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
+ "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = *sampler;
+ img_info.imageView = texture->view;
+
+ if (texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT)) {
+ attachable_textures.push_back(texture->owner.is_valid() ? texture->owner : uniform.ids[j + 1]);
+ }
+
+ if (texture->owner.is_valid()) {
+ texture = texture_owner.getornull(texture->owner);
+ ERR_FAIL_COND_V(!texture, RID()); //bug, should never happen
+ }
+
+ img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ image_info.push_back(img_info);
+
+ if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
+ //can also be used as storage, add to mutable sampled
+ mutable_sampled_textures.push_back(texture);
+ }
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size() / 2;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = NULL;
+ write.pTexelBufferView = NULL;
+
+ type_size = uniform.ids.size() / 2;
+
+ } break;
+ case UNIFORM_TYPE_TEXTURE: {
+
+ if (uniform.ids.size() != set_uniform.length) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+
+ for (int j = 0; j < uniform.ids.size(); j++) {
+ Texture *texture = texture_owner.getornull(uniform.ids[j]);
+ ERR_FAIL_COND_V_MSG(!texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
+
+ ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
+ "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = NULL;
+ img_info.imageView = texture->view;
+
+ if (texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT)) {
+ attachable_textures.push_back(texture->owner.is_valid() ? texture->owner : uniform.ids[j]);
+ }
+
+ if (texture->owner.is_valid()) {
+ texture = texture_owner.getornull(texture->owner);
+ ERR_FAIL_COND_V(!texture, RID()); //bug, should never happen
+ }
+
+ img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ image_info.push_back(img_info);
+
+ if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
+ //can also be used as storage, add to mutable sampled
+ mutable_sampled_textures.push_back(texture);
+ }
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size();
+ write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = NULL;
+ write.pTexelBufferView = NULL;
+
+ type_size = uniform.ids.size();
+ } break;
+ case UNIFORM_TYPE_IMAGE: {
+
+ if (uniform.ids.size() != set_uniform.length) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+
+ for (int j = 0; j < uniform.ids.size(); j++) {
+ Texture *texture = texture_owner.getornull(uniform.ids[j]);
+
+ ERR_FAIL_COND_V_MSG(!texture, RID(),
+ "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
+
+ ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), RID(),
+ "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_STORAGE_BIT usage flag set in order to be used as uniform.");
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = NULL;
+ img_info.imageView = texture->view;
+
+ if (texture->owner.is_valid()) {
+ texture = texture_owner.getornull(texture->owner);
+ ERR_FAIL_COND_V(!texture, RID()); //bug, should never happen
+ }
+
+ img_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+ image_info.push_back(img_info);
+
+ if (texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT) {
+ //can also be used as storage, add to mutable sampled
+ mutable_storage_textures.push_back(texture);
+ }
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size();
+ write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = NULL;
+ write.pTexelBufferView = NULL;
+
+ type_size = uniform.ids.size();
+
+ } break;
+ case UNIFORM_TYPE_TEXTURE_BUFFER: {
+ if (uniform.ids.size() != set_uniform.length) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") texture buffer elements, so it should be provided equal number of texture buffer IDs to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture buffer (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorBufferInfo> buffer_info;
+ Vector<VkBufferView> buffer_view;
+
+ for (int j = 0; j < uniform.ids.size(); j++) {
+ TextureBuffer *buffer = texture_buffer_owner.getornull(uniform.ids[j]);
+ ERR_FAIL_COND_V_MSG(!buffer, RID(), "Texture Buffer (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture buffer.");
+
+ buffer_info.push_back(buffer->buffer.buffer_info);
+ buffer_view.push_back(buffer->view);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size();
+ write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ write.pImageInfo = NULL;
+ write.pBufferInfo = buffer_infos.push_back(buffer_info)->get().ptr();
+ write.pTexelBufferView = buffer_views.push_back(buffer_view)->get().ptr();
+
+ type_size = uniform.ids.size();
+
+ } break;
+ case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
+
+ if (uniform.ids.size() != set_uniform.length * 2) {
+ if (set_uniform.length > 1) {
+ ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler buffer elements, so it should provided twice the amount of IDs (sampler,buffer pairs) to satisfy it (IDs provided: " + itos(uniform.ids.size()) + ").");
+ } else {
+ ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture buffer (IDs provided: " + itos(uniform.ids.size()) + ").");
+ }
+ }
+
+ Vector<VkDescriptorImageInfo> image_info;
+ Vector<VkDescriptorBufferInfo> buffer_info;
+ Vector<VkBufferView> buffer_view;
+
+ for (int j = 0; j < uniform.ids.size(); j += 2) {
+ VkSampler *sampler = sampler_owner.getornull(uniform.ids[j + 0]);
+ ERR_FAIL_COND_V_MSG(!sampler, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
+
+ TextureBuffer *buffer = texture_buffer_owner.getornull(uniform.ids[j + 1]);
+
+ VkDescriptorImageInfo img_info;
+ img_info.sampler = *sampler;
+ img_info.imageView = VK_NULL_HANDLE;
+ img_info.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ image_info.push_back(img_info);
+
+ ERR_FAIL_COND_V_MSG(!buffer, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid texture buffer.");
+
+ buffer_info.push_back(buffer->buffer.buffer_info);
+ buffer_view.push_back(buffer->view);
+ }
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = uniform.ids.size() / 2;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ write.pImageInfo = image_infos.push_back(image_info)->get().ptr();
+ write.pBufferInfo = buffer_infos.push_back(buffer_info)->get().ptr();
+ write.pTexelBufferView = buffer_views.push_back(buffer_view)->get().ptr();
+
+ type_size = uniform.ids.size() / 2;
+ } break;
+ case UNIFORM_TYPE_IMAGE_BUFFER: {
+ //todo
+
+ } break;
+ case UNIFORM_TYPE_UNIFORM_BUFFER: {
+ ERR_FAIL_COND_V_MSG(uniform.ids.size() != 1, RID(),
+ "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.ids.size()) + " provided).");
+
+ Buffer *buffer = uniform_buffer_owner.getornull(uniform.ids[0]);
+ ERR_FAIL_COND_V_MSG(!buffer, RID(), "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
+
+ ERR_FAIL_COND_V_MSG(buffer->size != (uint32_t)set_uniform.length, RID(),
+ "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " does not match size of shader uniform: (" + itos(set_uniform.length) + ").");
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = 1;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ write.pImageInfo = NULL;
+ write.pBufferInfo = &buffer->buffer_info;
+ write.pTexelBufferView = NULL;
+
+ } break;
+ case UNIFORM_TYPE_STORAGE_BUFFER: {
+ ERR_FAIL_COND_V_MSG(uniform.ids.size() != 1, RID(),
+ "Storage buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.ids.size()) + " provided).");
+
+ Buffer *buffer = storage_buffer_owner.getornull(uniform.ids[0]);
+ ERR_FAIL_COND_V_MSG(!buffer, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
+
+ //if 0, then its sized on link time
+ ERR_FAIL_COND_V_MSG(set_uniform.length > 0 && buffer->size != (uint32_t)set_uniform.length, RID(),
+ "Storage buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + " does not match size of shader uniform: (" + itos(set_uniform.length) + ").");
+
+ write.dstArrayElement = 0;
+ write.descriptorCount = 1;
+ write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ write.pImageInfo = NULL;
+ write.pBufferInfo = &buffer->buffer_info;
+ write.pTexelBufferView = NULL;
+ } break;
+ case UNIFORM_TYPE_INPUT_ATTACHMENT: {
+
+ } break;
+ default: {
+ }
+ }
+
+ writes.push_back(write);
+
+ ERR_FAIL_COND_V_MSG(pool_key.uniform_type[set_uniform.type] == MAX_DESCRIPTOR_POOL_ELEMENT, RID(),
+ "Uniform set reached the limit of bindings for the same type (" + itos(MAX_DESCRIPTOR_POOL_ELEMENT) + ").");
+ pool_key.uniform_type[set_uniform.type] += type_size;
+ }
+
+ //need a descriptor pool
+ DescriptorPool *pool = _descriptor_pool_allocate(pool_key);
+
+ ERR_FAIL_COND_V(!pool, RID());
+
+ VkDescriptorSetAllocateInfo descriptor_set_allocate_info;
+
+ descriptor_set_allocate_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ descriptor_set_allocate_info.pNext = NULL;
+ descriptor_set_allocate_info.descriptorPool = pool->pool;
+ descriptor_set_allocate_info.descriptorSetCount = 1;
+ descriptor_set_allocate_info.pSetLayouts = &shader->sets[p_shader_set].descriptor_set_layout;
+
+ VkDescriptorSet descriptor_set;
+
+ VkResult res = vkAllocateDescriptorSets(device, &descriptor_set_allocate_info, &descriptor_set);
+ if (res) {
+ _descriptor_pool_free(pool_key, pool); // meh
+ ERR_FAIL_V_MSG(RID(), "Cannot allocate descriptor sets.");
+ }
+
+ UniformSet uniform_set;
+ uniform_set.pool = pool;
+ uniform_set.pool_key = pool_key;
+ uniform_set.descriptor_set = descriptor_set;
+ uniform_set.format = shader->set_formats[p_shader_set];
+ uniform_set.attachable_textures = attachable_textures;
+ uniform_set.mutable_sampled_textures = mutable_sampled_textures;
+ uniform_set.mutable_storage_textures = mutable_storage_textures;
+ uniform_set.shader_set = p_shader_set;
+ uniform_set.shader_id = p_shader;
+
+ RID id = uniform_set_owner.make_rid(uniform_set);
+ //add dependencies
+ _add_dependency(id, p_shader);
+ for (uint32_t i = 0; i < uniform_count; i++) {
+ const Uniform &uniform = uniforms[i];
+ int id_count = uniform.ids.size();
+ const RID *ids = uniform.ids.ptr();
+ for (int j = 0; j < id_count; j++) {
+ _add_dependency(id, ids[j]);
+ }
+ }
+
+ //write the contents
+ if (writes.size()) {
+ for (int i = 0; i < writes.size(); i++) {
+ writes.write[i].dstSet = descriptor_set;
+ }
+ vkUpdateDescriptorSets(device, writes.size(), writes.ptr(), 0, NULL);
+ }
+
+ return id;
+}
+
+bool RenderingDeviceVulkan::uniform_set_is_valid(RID p_uniform_set) {
+ return uniform_set_owner.owns(p_uniform_set);
+}
+
+Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, bool p_sync_with_draw) {
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER,
+ "Updating buffers in 'sync to draw' mode is forbidden during creation of a draw list");
+
+ VkPipelineStageFlags dst_stage_mask;
+ VkAccessFlags dst_access;
+
+ Buffer *buffer = NULL;
+ if (vertex_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ dst_access = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ buffer = vertex_buffer_owner.getornull(p_buffer);
+ } else if (index_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ dst_access = VK_ACCESS_INDEX_READ_BIT;
+ buffer = index_buffer_owner.getornull(p_buffer);
+ } else if (uniform_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access = VK_ACCESS_UNIFORM_READ_BIT;
+ buffer = uniform_buffer_owner.getornull(p_buffer);
+ } else if (texture_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access = VK_ACCESS_SHADER_READ_BIT;
+ buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
+ } else if (storage_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ buffer = storage_buffer_owner.getornull(p_buffer);
+ } else {
+ ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
+ }
+
+ ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
+ "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
+
+ Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw);
+ if (err) {
+ return err;
+ }
+
+ _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_sync_with_draw);
+#ifdef FORCE_FULL_BARRIER
+ _full_barrier(p_sync_with_draw);
+#else
+ _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_sync_with_draw);
+#endif
+ return err;
+}
+
+Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
+
+ _THREAD_SAFE_METHOD_
+
+ Buffer *buffer = NULL;
+ if (vertex_buffer_owner.owns(p_buffer)) {
+ buffer = vertex_buffer_owner.getornull(p_buffer);
+ } else if (index_buffer_owner.owns(p_buffer)) {
+ buffer = index_buffer_owner.getornull(p_buffer);
+ } else if (texture_buffer_owner.owns(p_buffer)) {
+ buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
+ } else if (storage_buffer_owner.owns(p_buffer)) {
+ buffer = storage_buffer_owner.getornull(p_buffer);
+ } else {
+ ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving.");
+ }
+
+ VkCommandBuffer command_buffer = frames[frame].setup_command_buffer;
+ Buffer tmp_buffer;
+ _buffer_allocate(&tmp_buffer, buffer->size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_CPU_ONLY);
+ VkBufferCopy region;
+ region.srcOffset = 0;
+ region.dstOffset = 0;
+ region.size = buffer->size;
+ vkCmdCopyBuffer(command_buffer, buffer->buffer, tmp_buffer.buffer, 1, &region); //dst buffer is in CPU, but I wonder if src buffer needs a barrier for this..
+ //flush everything so memory can be safely mapped
+ _flush(true);
+
+ void *buffer_mem;
+ VkResult vkerr = vmaMapMemory(allocator, tmp_buffer.allocation, &buffer_mem);
+ if (vkerr) {
+ ERR_FAIL_V(Vector<uint8_t>());
+ }
+
+ Vector<uint8_t> buffer_data;
+ {
+
+ buffer_data.resize(buffer->size);
+ uint8_t *w = buffer_data.ptrw();
+ copymem(w, buffer_mem, buffer->size);
+ }
+
+ vmaUnmapMemory(allocator, tmp_buffer.allocation);
+
+ _buffer_free(&tmp_buffer);
+
+ return buffer_data;
+}
+
+/*************************/
+/**** RENDER PIPELINE ****/
+/*************************/
+
+RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags) {
+
+ _THREAD_SAFE_METHOD_
+
+ //needs a shader
+ Shader *shader = shader_owner.getornull(p_shader);
+ ERR_FAIL_COND_V(!shader, RID());
+
+ ERR_FAIL_COND_V_MSG(shader->is_compute, RID(),
+ "Compute shaders can't be used in render pipelines");
+
+ if (p_framebuffer_format == INVALID_ID) {
+ //if nothing provided, use an empty one (no attachments)
+ p_framebuffer_format = framebuffer_format_create(Vector<AttachmentFormat>());
+ }
+ ERR_FAIL_COND_V(!framebuffer_formats.has(p_framebuffer_format), RID());
+ const FramebufferFormat &fb_format = framebuffer_formats[p_framebuffer_format];
+
+ { //validate shader vs framebuffer
+
+ ERR_FAIL_COND_V_MSG(shader->fragment_outputs != fb_format.color_attachments, RID(),
+ "Mismatch fragment output bindings (" + itos(shader->fragment_outputs) + ") and framebuffer color buffers (" + itos(fb_format.color_attachments) + ") when binding both in render pipeline.");
+ }
+ //vertex
+ VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info;
+
+ if (p_vertex_format != INVALID_ID) {
+ //uses vertices, else it does not
+ ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
+ const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
+
+ pipeline_vertex_input_state_create_info = vd.create_info;
+
+ //validate with inputs
+ for (uint32_t i = 0; i < 32; i++) {
+ if (!(shader->vertex_input_mask & (1 << i))) {
+ continue;
+ }
+ bool found = false;
+ for (int j = 0; j < vd.vertex_formats.size(); j++) {
+ if (vd.vertex_formats[j].location == i) {
+ found = true;
+ }
+ }
+
+ ERR_FAIL_COND_V_MSG(!found, RID(),
+ "Shader vertex input location (" + itos(i) + ") not provided in vertex input description for pipeline creation.");
+ }
+
+ } else {
+ //does not use vertices
+ pipeline_vertex_input_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ pipeline_vertex_input_state_create_info.pNext = NULL;
+ pipeline_vertex_input_state_create_info.flags = 0;
+ pipeline_vertex_input_state_create_info.vertexBindingDescriptionCount = 0;
+ pipeline_vertex_input_state_create_info.pVertexBindingDescriptions = NULL;
+ pipeline_vertex_input_state_create_info.vertexAttributeDescriptionCount = 0;
+ pipeline_vertex_input_state_create_info.pVertexAttributeDescriptions = NULL;
+
+ ERR_FAIL_COND_V_MSG(shader->vertex_input_mask != 0, RID(),
+ "Shader contains vertex inputs, but no vertex input description was provided for pipeline creation.");
+ }
+ //input assembly
+
+ ERR_FAIL_INDEX_V(p_render_primitive, RENDER_PRIMITIVE_MAX, RID());
+
+ VkPipelineInputAssemblyStateCreateInfo input_assembly_create_info;
+ input_assembly_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ input_assembly_create_info.pNext = NULL;
+ input_assembly_create_info.flags = 0;
+
+ static const VkPrimitiveTopology topology_list[RENDER_PRIMITIVE_MAX] = {
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST
+ };
+
+ input_assembly_create_info.topology = topology_list[p_render_primitive];
+ input_assembly_create_info.primitiveRestartEnable = (p_render_primitive == RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX);
+
+ //tesselation
+ VkPipelineTessellationStateCreateInfo tesselation_create_info;
+ tesselation_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
+ tesselation_create_info.pNext = NULL;
+ tesselation_create_info.flags = 0;
+ ERR_FAIL_COND_V(p_rasterization_state.patch_control_points < 1 || p_rasterization_state.patch_control_points > limits.maxTessellationPatchSize, RID());
+ tesselation_create_info.patchControlPoints = p_rasterization_state.patch_control_points;
+
+ VkPipelineViewportStateCreateInfo viewport_state_create_info;
+ viewport_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport_state_create_info.pNext = NULL;
+ viewport_state_create_info.flags = 0;
+ viewport_state_create_info.viewportCount = 1; //if VR extensions are supported at some point, this will have to be customizable in the framebuffer format
+ viewport_state_create_info.pViewports = NULL;
+ viewport_state_create_info.scissorCount = 1;
+ viewport_state_create_info.pScissors = NULL;
+
+ //rasterization
+ VkPipelineRasterizationStateCreateInfo rasterization_state_create_info;
+ rasterization_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization_state_create_info.pNext = NULL;
+ rasterization_state_create_info.flags = 0;
+ rasterization_state_create_info.depthClampEnable = p_rasterization_state.enable_depth_clamp;
+ rasterization_state_create_info.rasterizerDiscardEnable = p_rasterization_state.discard_primitives;
+ rasterization_state_create_info.polygonMode = (p_rasterization_state.wireframe ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL);
+ static VkCullModeFlags cull_mode[3] = {
+ VK_CULL_MODE_NONE,
+ VK_CULL_MODE_FRONT_BIT,
+ VK_CULL_MODE_BACK_BIT
+ };
+
+ ERR_FAIL_INDEX_V(p_rasterization_state.cull_mode, 3, RID());
+ rasterization_state_create_info.cullMode = cull_mode[p_rasterization_state.cull_mode];
+ rasterization_state_create_info.frontFace = (p_rasterization_state.front_face == POLYGON_FRONT_FACE_CLOCKWISE ? VK_FRONT_FACE_CLOCKWISE : VK_FRONT_FACE_COUNTER_CLOCKWISE);
+ rasterization_state_create_info.depthBiasEnable = p_rasterization_state.depth_bias_enable;
+ rasterization_state_create_info.depthBiasConstantFactor = p_rasterization_state.depth_bias_constant_factor;
+ rasterization_state_create_info.depthBiasClamp = p_rasterization_state.depth_bias_clamp;
+ rasterization_state_create_info.depthBiasSlopeFactor = p_rasterization_state.depth_bias_slope_factor;
+ rasterization_state_create_info.lineWidth = p_rasterization_state.line_width;
+
+ //multisample
+ VkPipelineMultisampleStateCreateInfo multisample_state_create_info;
+ multisample_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample_state_create_info.pNext = NULL;
+ multisample_state_create_info.flags = 0;
+
+ multisample_state_create_info.rasterizationSamples = rasterization_sample_count[p_multisample_state.sample_count];
+ multisample_state_create_info.sampleShadingEnable = p_multisample_state.enable_sample_shading;
+ multisample_state_create_info.minSampleShading = p_multisample_state.min_sample_shading;
+ Vector<VkSampleMask> sample_mask;
+ if (p_multisample_state.sample_mask.size()) {
+ //use sample mask
+ int rasterization_sample_mask_expected_size[TEXTURE_SAMPLES_MAX] = {
+ 1, 2, 4, 8, 16, 32, 64
+ };
+ ERR_FAIL_COND_V(rasterization_sample_mask_expected_size[p_multisample_state.sample_count] != p_multisample_state.sample_mask.size(), RID());
+ sample_mask.resize(p_multisample_state.sample_mask.size());
+ for (int i = 0; i < p_multisample_state.sample_mask.size(); i++) {
+ VkSampleMask mask = p_multisample_state.sample_mask[i];
+ sample_mask.push_back(mask);
+ }
+ multisample_state_create_info.pSampleMask = sample_mask.ptr();
+ } else {
+ multisample_state_create_info.pSampleMask = NULL;
+ }
+
+ multisample_state_create_info.alphaToCoverageEnable = p_multisample_state.enable_alpha_to_coverage;
+ multisample_state_create_info.alphaToOneEnable = p_multisample_state.enable_alpha_to_one;
+
+ //depth stencil
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil_state_create_info;
+ depth_stencil_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ depth_stencil_state_create_info.pNext = NULL;
+ depth_stencil_state_create_info.flags = 0;
+ depth_stencil_state_create_info.depthTestEnable = p_depth_stencil_state.enable_depth_test;
+ depth_stencil_state_create_info.depthWriteEnable = p_depth_stencil_state.enable_depth_write;
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.depth_compare_operator, COMPARE_OP_MAX, RID());
+ depth_stencil_state_create_info.depthCompareOp = compare_operators[p_depth_stencil_state.depth_compare_operator];
+ depth_stencil_state_create_info.depthBoundsTestEnable = p_depth_stencil_state.enable_depth_range;
+ depth_stencil_state_create_info.stencilTestEnable = p_depth_stencil_state.enable_stencil;
+
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.fail, STENCIL_OP_MAX, RID());
+ depth_stencil_state_create_info.front.failOp = stencil_operations[p_depth_stencil_state.stencil_operation_front.fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.pass, STENCIL_OP_MAX, RID());
+ depth_stencil_state_create_info.front.passOp = stencil_operations[p_depth_stencil_state.stencil_operation_front.pass];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.depth_fail, STENCIL_OP_MAX, RID());
+ depth_stencil_state_create_info.front.depthFailOp = stencil_operations[p_depth_stencil_state.stencil_operation_front.depth_fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_front.compare, COMPARE_OP_MAX, RID());
+ depth_stencil_state_create_info.front.compareOp = compare_operators[p_depth_stencil_state.stencil_operation_front.compare];
+ depth_stencil_state_create_info.front.compareMask = p_depth_stencil_state.stencil_operation_front.compare_mask;
+ depth_stencil_state_create_info.front.writeMask = p_depth_stencil_state.stencil_operation_front.write_mask;
+ depth_stencil_state_create_info.front.reference = p_depth_stencil_state.stencil_operation_front.reference;
+
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.fail, STENCIL_OP_MAX, RID());
+ depth_stencil_state_create_info.back.failOp = stencil_operations[p_depth_stencil_state.stencil_operation_back.fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.pass, STENCIL_OP_MAX, RID());
+ depth_stencil_state_create_info.back.passOp = stencil_operations[p_depth_stencil_state.stencil_operation_back.pass];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.depth_fail, STENCIL_OP_MAX, RID());
+ depth_stencil_state_create_info.back.depthFailOp = stencil_operations[p_depth_stencil_state.stencil_operation_back.depth_fail];
+ ERR_FAIL_INDEX_V(p_depth_stencil_state.stencil_operation_back.compare, COMPARE_OP_MAX, RID());
+ depth_stencil_state_create_info.back.compareOp = compare_operators[p_depth_stencil_state.stencil_operation_back.compare];
+ depth_stencil_state_create_info.back.compareMask = p_depth_stencil_state.stencil_operation_back.compare_mask;
+ depth_stencil_state_create_info.back.writeMask = p_depth_stencil_state.stencil_operation_back.write_mask;
+ depth_stencil_state_create_info.back.reference = p_depth_stencil_state.stencil_operation_back.reference;
+
+ depth_stencil_state_create_info.minDepthBounds = p_depth_stencil_state.depth_range_min;
+ depth_stencil_state_create_info.maxDepthBounds = p_depth_stencil_state.depth_range_max;
+
+ //blend state
+ VkPipelineColorBlendStateCreateInfo color_blend_state_create_info;
+ color_blend_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ color_blend_state_create_info.pNext = NULL;
+ color_blend_state_create_info.flags = 0;
+ color_blend_state_create_info.logicOpEnable = p_blend_state.enable_logic_op;
+ ERR_FAIL_INDEX_V(p_blend_state.logic_op, LOGIC_OP_MAX, RID());
+ color_blend_state_create_info.logicOp = logic_operations[p_blend_state.logic_op];
+
+ ERR_FAIL_COND_V(fb_format.color_attachments != p_blend_state.attachments.size(), RID());
+
+ Vector<VkPipelineColorBlendAttachmentState> attachment_states;
+
+ for (int i = 0; i < p_blend_state.attachments.size(); i++) {
+ VkPipelineColorBlendAttachmentState state;
+ state.blendEnable = p_blend_state.attachments[i].enable_blend;
+
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_color_blend_factor, BLEND_FACTOR_MAX, RID());
+ state.srcColorBlendFactor = blend_factors[p_blend_state.attachments[i].src_color_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_color_blend_factor, BLEND_FACTOR_MAX, RID());
+ state.dstColorBlendFactor = blend_factors[p_blend_state.attachments[i].dst_color_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].color_blend_op, BLEND_OP_MAX, RID());
+ state.colorBlendOp = blend_operations[p_blend_state.attachments[i].color_blend_op];
+
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
+ state.srcAlphaBlendFactor = blend_factors[p_blend_state.attachments[i].src_alpha_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
+ state.dstAlphaBlendFactor = blend_factors[p_blend_state.attachments[i].dst_alpha_blend_factor];
+ ERR_FAIL_INDEX_V(p_blend_state.attachments[i].alpha_blend_op, BLEND_OP_MAX, RID());
+ state.alphaBlendOp = blend_operations[p_blend_state.attachments[i].alpha_blend_op];
+
+ state.colorWriteMask = 0;
+ if (p_blend_state.attachments[i].write_r) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_R_BIT;
+ }
+ if (p_blend_state.attachments[i].write_g) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_G_BIT;
+ }
+ if (p_blend_state.attachments[i].write_b) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_B_BIT;
+ }
+ if (p_blend_state.attachments[i].write_a) {
+ state.colorWriteMask |= VK_COLOR_COMPONENT_A_BIT;
+ }
+
+ attachment_states.push_back(state);
+ };
+
+ color_blend_state_create_info.attachmentCount = attachment_states.size();
+ color_blend_state_create_info.pAttachments = attachment_states.ptr();
+
+ color_blend_state_create_info.blendConstants[0] = p_blend_state.blend_constant.r;
+ color_blend_state_create_info.blendConstants[1] = p_blend_state.blend_constant.g;
+ color_blend_state_create_info.blendConstants[2] = p_blend_state.blend_constant.b;
+ color_blend_state_create_info.blendConstants[3] = p_blend_state.blend_constant.a;
+
+ //dynamic state
+
+ VkPipelineDynamicStateCreateInfo dynamic_state_create_info;
+ dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic_state_create_info.pNext = NULL;
+ dynamic_state_create_info.flags = 0;
+ Vector<VkDynamicState> dynamic_states; //vulkan is weird..
+
+ dynamic_states.push_back(VK_DYNAMIC_STATE_VIEWPORT); //viewport and scissor are always dynamic
+ dynamic_states.push_back(VK_DYNAMIC_STATE_SCISSOR);
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_LINE_WIDTH) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_WIDTH);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_DEPTH_BIAS) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_BIAS);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_BLEND_CONSTANTS) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_DEPTH_BOUNDS) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_BOUNDS);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_COMPARE_MASK) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_WRITE_MASK) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
+ }
+
+ if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_REFERENCE) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_REFERENCE);
+ }
+
+ dynamic_state_create_info.dynamicStateCount = dynamic_states.size();
+ dynamic_state_create_info.pDynamicStates = dynamic_states.ptr();
+
+ //finally, pipeline create info
+ VkGraphicsPipelineCreateInfo graphics_pipeline_create_info;
+
+ graphics_pipeline_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ graphics_pipeline_create_info.pNext = NULL;
+ graphics_pipeline_create_info.flags = 0;
+
+ graphics_pipeline_create_info.stageCount = shader->pipeline_stages.size();
+ graphics_pipeline_create_info.pStages = shader->pipeline_stages.ptr();
+ graphics_pipeline_create_info.pVertexInputState = &pipeline_vertex_input_state_create_info;
+ graphics_pipeline_create_info.pInputAssemblyState = &input_assembly_create_info;
+ graphics_pipeline_create_info.pTessellationState = &tesselation_create_info;
+ graphics_pipeline_create_info.pViewportState = &viewport_state_create_info;
+ graphics_pipeline_create_info.pRasterizationState = &rasterization_state_create_info;
+ graphics_pipeline_create_info.pMultisampleState = &multisample_state_create_info;
+ graphics_pipeline_create_info.pDepthStencilState = &depth_stencil_state_create_info;
+ graphics_pipeline_create_info.pColorBlendState = &color_blend_state_create_info;
+ graphics_pipeline_create_info.pDynamicState = &dynamic_state_create_info;
+ graphics_pipeline_create_info.layout = shader->pipeline_layout;
+ graphics_pipeline_create_info.renderPass = fb_format.render_pass;
+
+ graphics_pipeline_create_info.subpass = 0;
+ graphics_pipeline_create_info.basePipelineHandle = NULL;
+ graphics_pipeline_create_info.basePipelineIndex = 0;
+
+ RenderPipeline pipeline;
+ VkResult err = vkCreateGraphicsPipelines(device, NULL, 1, &graphics_pipeline_create_info, NULL, &pipeline.pipeline);
+ ERR_FAIL_COND_V(err, RID());
+
+ pipeline.set_formats = shader->set_formats;
+ pipeline.push_constant_stages = shader->push_constant.push_constants_vk_stage;
+ pipeline.pipeline_layout = shader->pipeline_layout;
+ pipeline.shader = p_shader;
+ pipeline.push_constant_size = shader->push_constant.push_constant_size;
+
+#ifdef DEBUG_ENABLED
+ pipeline.validation.dynamic_state = p_dynamic_state_flags;
+ pipeline.validation.framebuffer_format = p_framebuffer_format;
+ pipeline.validation.vertex_format = p_vertex_format;
+ pipeline.validation.uses_restart_indices = input_assembly_create_info.primitiveRestartEnable;
+
+ static const uint32_t primitive_divisor[RENDER_PRIMITIVE_MAX] = {
+ 1, 2, 1, 1, 1, 3, 1, 1, 1, 1, 1
+ };
+ pipeline.validation.primitive_divisor = primitive_divisor[p_render_primitive];
+ static const uint32_t primitive_minimum[RENDER_PRIMITIVE_MAX] = {
+ 1,
+ 2,
+ 2,
+ 2,
+ 2,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 1,
+ };
+ pipeline.validation.primitive_minimum = primitive_minimum[p_render_primitive];
+#endif
+ //create ID to associate with this pipeline
+ RID id = render_pipeline_owner.make_rid(pipeline);
+ //now add aall the dependencies
+ _add_dependency(id, p_shader);
+ return id;
+}
+
+bool RenderingDeviceVulkan::render_pipeline_is_valid(RID p_pipeline) {
+ _THREAD_SAFE_METHOD_
+ return render_pipeline_owner.owns(p_pipeline);
+}
+
+/**************************/
+/**** COMPUTE PIPELINE ****/
+/**************************/
+
+RID RenderingDeviceVulkan::compute_pipeline_create(RID p_shader) {
+ _THREAD_SAFE_METHOD_
+
+ //needs a shader
+ Shader *shader = shader_owner.getornull(p_shader);
+ ERR_FAIL_COND_V(!shader, RID());
+
+ ERR_FAIL_COND_V_MSG(!shader->is_compute, RID(),
+ "Non-compute shaders can't be used in compute pipelines");
+
+ //finally, pipeline create info
+ VkComputePipelineCreateInfo compute_pipeline_create_info;
+
+ compute_pipeline_create_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
+ compute_pipeline_create_info.pNext = NULL;
+ compute_pipeline_create_info.flags = 0;
+
+ compute_pipeline_create_info.stage = shader->pipeline_stages[0];
+ compute_pipeline_create_info.layout = shader->pipeline_layout;
+ compute_pipeline_create_info.basePipelineHandle = NULL;
+ compute_pipeline_create_info.basePipelineIndex = 0;
+
+ ComputePipeline pipeline;
+ VkResult err = vkCreateComputePipelines(device, NULL, 1, &compute_pipeline_create_info, NULL, &pipeline.pipeline);
+ ERR_FAIL_COND_V(err, RID());
+
+ pipeline.set_formats = shader->set_formats;
+ pipeline.push_constant_stages = shader->push_constant.push_constants_vk_stage;
+ pipeline.pipeline_layout = shader->pipeline_layout;
+ pipeline.shader = p_shader;
+ pipeline.push_constant_size = shader->push_constant.push_constant_size;
+
+ //create ID to associate with this pipeline
+ RID id = compute_pipeline_owner.make_rid(pipeline);
+ //now add aall the dependencies
+ _add_dependency(id, p_shader);
+ return id;
+}
+
+bool RenderingDeviceVulkan::compute_pipeline_is_valid(RID p_pipeline) {
+
+ return compute_pipeline_owner.owns(p_pipeline);
+}
+
+/****************/
+/**** SCREEN ****/
+/****************/
+
+int RenderingDeviceVulkan::screen_get_width(int p_screen) const {
+ _THREAD_SAFE_METHOD_
+
+ return context->window_get_width(p_screen);
+}
+int RenderingDeviceVulkan::screen_get_height(int p_screen) const {
+ _THREAD_SAFE_METHOD_
+
+ return context->window_get_height(p_screen);
+}
+RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::screen_get_framebuffer_format() const {
+
+ _THREAD_SAFE_METHOD_
+
+ //very hacky, but not used often per frame so I guess ok
+ VkFormat vkformat = context->get_screen_format();
+ DataFormat format = DATA_FORMAT_MAX;
+ for (int i = 0; i < DATA_FORMAT_MAX; i++) {
+ if (vkformat == vulkan_formats[i]) {
+ format = DataFormat(i);
+ break;
+ }
+ }
+
+ ERR_FAIL_COND_V(format == DATA_FORMAT_MAX, INVALID_ID);
+
+ AttachmentFormat attachment;
+ attachment.format = format;
+ attachment.samples = TEXTURE_SAMPLES_1;
+ attachment.usage_flags = TEXTURE_USAGE_COLOR_ATTACHMENT_BIT;
+ Vector<AttachmentFormat> screen_attachment;
+ screen_attachment.push_back(attachment);
+ return const_cast<RenderingDeviceVulkan *>(this)->framebuffer_format_create(screen_attachment);
+}
+
+/*******************/
+/**** DRAW LIST ****/
+/*******************/
+
+RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin_for_screen(int p_screen, const Color &p_clear_color) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V_MSG(draw_list != NULL, INVALID_ID, "Only one draw list can be active at the same time.");
+ ERR_FAIL_COND_V_MSG(compute_list != NULL, INVALID_ID, "Only one draw/compute list can be active at the same time.");
+
+ VkCommandBuffer command_buffer = frames[frame].draw_command_buffer;
+ draw_list = memnew(DrawList);
+ draw_list->command_buffer = command_buffer;
+#ifdef DEBUG_ENABLED
+ draw_list->validation.framebuffer_format = screen_get_framebuffer_format();
+#endif
+ draw_list_count = 0;
+ draw_list_split = false;
+
+ VkRenderPassBeginInfo render_pass_begin;
+ render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ render_pass_begin.pNext = NULL;
+ render_pass_begin.renderPass = context->window_get_render_pass(p_screen);
+ render_pass_begin.framebuffer = context->window_get_framebuffer(p_screen);
+
+ render_pass_begin.renderArea.extent.width = context->window_get_width(p_screen);
+ render_pass_begin.renderArea.extent.height = context->window_get_height(p_screen);
+ render_pass_begin.renderArea.offset.x = 0;
+ render_pass_begin.renderArea.offset.y = 0;
+
+ render_pass_begin.clearValueCount = 1;
+
+ VkClearValue clear_value;
+ clear_value.color.float32[0] = p_clear_color.r;
+ clear_value.color.float32[1] = p_clear_color.g;
+ clear_value.color.float32[2] = p_clear_color.b;
+ clear_value.color.float32[3] = p_clear_color.a;
+
+ render_pass_begin.pClearValues = &clear_value;
+
+ vkCmdBeginRenderPass(command_buffer, &render_pass_begin, VK_SUBPASS_CONTENTS_INLINE);
+
+ uint32_t size_x = screen_get_width(p_screen);
+ uint32_t size_y = screen_get_height(p_screen);
+
+ VkViewport viewport;
+ viewport.x = 0;
+ viewport.y = 0;
+ viewport.width = size_x;
+ viewport.height = size_y;
+ viewport.minDepth = 0;
+ viewport.maxDepth = 1.0;
+
+ vkCmdSetViewport(command_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor;
+ scissor.offset.x = 0;
+ scissor.offset.y = 0;
+ scissor.extent.width = size_x;
+ scissor.extent.height = size_y;
+
+ vkCmdSetScissor(command_buffer, 0, 1, &scissor);
+
+ return ID_TYPE_DRAW_LIST;
+}
+
+Error RenderingDeviceVulkan::_draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass) {
+
+ Framebuffer::VersionKey vk;
+ vk.initial_color_action = p_initial_color_action;
+ vk.final_color_action = p_final_color_action;
+ vk.initial_depth_action = p_initial_depth_action;
+ vk.final_depth_action = p_final_depth_action;
+
+ if (!p_framebuffer->framebuffers.has(vk)) {
+ //need to create this version
+ Framebuffer::Version version;
+
+ version.render_pass = _render_pass_create(framebuffer_formats[p_framebuffer->format_id].E->key().attachments, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action);
+
+ VkFramebufferCreateInfo framebuffer_create_info;
+ framebuffer_create_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ framebuffer_create_info.pNext = NULL;
+ framebuffer_create_info.flags = 0;
+ framebuffer_create_info.renderPass = version.render_pass;
+ Vector<VkImageView> attachments;
+ for (int i = 0; i < p_framebuffer->texture_ids.size(); i++) {
+ Texture *texture = texture_owner.getornull(p_framebuffer->texture_ids[i]);
+ ERR_FAIL_COND_V(!texture, ERR_BUG);
+ attachments.push_back(texture->view);
+ ERR_FAIL_COND_V(texture->width != p_framebuffer->size.width, ERR_BUG);
+ ERR_FAIL_COND_V(texture->height != p_framebuffer->size.height, ERR_BUG);
+ }
+ framebuffer_create_info.attachmentCount = attachments.size();
+ framebuffer_create_info.pAttachments = attachments.ptr();
+ framebuffer_create_info.width = p_framebuffer->size.width;
+ framebuffer_create_info.height = p_framebuffer->size.height;
+ framebuffer_create_info.layers = 1;
+
+ VkResult err = vkCreateFramebuffer(device, &framebuffer_create_info, NULL, &version.framebuffer);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ p_framebuffer->framebuffers.insert(vk, version);
+ }
+ const Framebuffer::Version &version = p_framebuffer->framebuffers[vk];
+ *r_framebuffer = version.framebuffer;
+ *r_render_pass = version.render_pass;
+
+ return OK;
+}
+
+Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents) {
+
+ VkRenderPassBeginInfo render_pass_begin;
+ render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ render_pass_begin.pNext = NULL;
+ render_pass_begin.renderPass = render_pass;
+ render_pass_begin.framebuffer = vkframebuffer;
+
+ render_pass_begin.renderArea.extent.width = viewport_size.width;
+ render_pass_begin.renderArea.extent.height = viewport_size.height;
+ render_pass_begin.renderArea.offset.x = viewport_offset.x;
+ render_pass_begin.renderArea.offset.y = viewport_offset.y;
+
+ Vector<VkClearValue> clear_values;
+ clear_values.resize(framebuffer->texture_ids.size());
+
+ {
+ int color_index = 0;
+ for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
+ Texture *texture = texture_owner.getornull(framebuffer->texture_ids[i]);
+ VkClearValue clear_value;
+
+ if (color_index < p_clear_colors.size() && texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ ERR_FAIL_INDEX_V(color_index, p_clear_colors.size(), ERR_BUG); //a bug
+ Color clear_color = p_clear_colors[color_index];
+ clear_value.color.float32[0] = clear_color.r;
+ clear_value.color.float32[1] = clear_color.g;
+ clear_value.color.float32[2] = clear_color.b;
+ clear_value.color.float32[3] = clear_color.a;
+ color_index++;
+ } else if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ clear_value.depthStencil.depth = p_clear_depth;
+ clear_value.depthStencil.stencil = p_clear_stencil;
+ } else {
+ clear_value.color.float32[0] = 0;
+ clear_value.color.float32[1] = 0;
+ clear_value.color.float32[2] = 0;
+ clear_value.color.float32[3] = 0;
+ }
+ clear_values.write[i] = clear_value;
+ }
+ }
+
+ render_pass_begin.clearValueCount = clear_values.size();
+ render_pass_begin.pClearValues = clear_values.ptr();
+
+ vkCmdBeginRenderPass(command_buffer, &render_pass_begin, subpass_contents);
+
+ //mark textures as bound
+ draw_list_bound_textures.clear();
+ draw_list_unbind_color_textures = p_final_color_action != FINAL_ACTION_CONTINUE;
+ draw_list_unbind_depth_textures = p_final_depth_action != FINAL_ACTION_CONTINUE;
+
+ for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
+ Texture *texture = texture_owner.getornull(framebuffer->texture_ids[i]);
+ texture->bound = true;
+ draw_list_bound_textures.push_back(framebuffer->texture_ids[i]);
+ }
+
+ return OK;
+}
+
+void RenderingDeviceVulkan::_draw_list_insert_clear_region(DrawList *draw_list, Framebuffer *framebuffer, Point2i viewport_offset, Point2i viewport_size, bool p_clear_color, const Vector<Color> &p_clear_colors, bool p_clear_depth, float p_depth, uint32_t p_stencil) {
+ Vector<VkClearAttachment> clear_attachments;
+ int color_index = 0;
+ for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
+ Texture *texture = texture_owner.getornull(framebuffer->texture_ids[i]);
+ VkClearAttachment clear_at;
+ if (p_clear_color && texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ ERR_FAIL_INDEX(color_index, p_clear_colors.size()); //a bug
+ Color clear_color = p_clear_colors[color_index];
+ clear_at.clearValue.color.float32[0] = clear_color.r;
+ clear_at.clearValue.color.float32[1] = clear_color.g;
+ clear_at.clearValue.color.float32[2] = clear_color.b;
+ clear_at.clearValue.color.float32[3] = clear_color.a;
+ clear_at.colorAttachment = color_index++;
+ clear_at.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ } else if (p_clear_depth && texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+
+ clear_at.clearValue.depthStencil.depth = p_depth;
+ clear_at.clearValue.depthStencil.stencil = p_stencil;
+ clear_at.colorAttachment = 0;
+ clear_at.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ if (format_has_stencil(texture->format)) {
+ clear_at.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+ } else {
+ ERR_CONTINUE(true);
+ }
+ clear_attachments.push_back(clear_at);
+ }
+
+ VkClearRect cr;
+ cr.baseArrayLayer = 0;
+ cr.layerCount = 1;
+ cr.rect.offset.x = viewport_offset.x;
+ cr.rect.offset.y = viewport_offset.y;
+ cr.rect.extent.width = viewport_size.width;
+ cr.rect.extent.height = viewport_size.height;
+
+ vkCmdClearAttachments(draw_list->command_buffer, clear_attachments.size(), clear_attachments.ptr(), 1, &cr);
+}
+
+RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V_MSG(draw_list != NULL, INVALID_ID, "Only one draw list can be active at the same time.");
+ ERR_FAIL_COND_V_MSG(compute_list != NULL, INVALID_ID, "Only one draw/compute list can be active at the same time.");
+
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_framebuffer);
+ ERR_FAIL_COND_V(!framebuffer, INVALID_ID);
+
+ Point2i viewport_offset;
+ Point2i viewport_size = framebuffer->size;
+ bool needs_clear_color = false;
+ bool needs_clear_depth = false;
+
+ if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { //check custom region
+ Rect2i viewport(viewport_offset, viewport_size);
+ Rect2i regioni = p_region;
+ if (!(regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) &&
+ ((regioni.position.x + regioni.size.x) <= (viewport.position.x + viewport.size.x)) &&
+ ((regioni.position.y + regioni.size.y) <= (viewport.position.y + viewport.size.y))) {
+ ERR_FAIL_V_MSG(INVALID_ID, "When supplying a custom region, it must be contained within the framebuffer rectangle");
+ }
+
+ viewport_offset = regioni.position;
+ viewport_size = regioni.size;
+
+ if (p_initial_color_action == INITIAL_ACTION_CLEAR) {
+ needs_clear_color = true;
+ p_initial_color_action = INITIAL_ACTION_KEEP;
+ }
+ if (p_initial_depth_action == INITIAL_ACTION_CLEAR) {
+ needs_clear_depth = true;
+ p_initial_depth_action = INITIAL_ACTION_KEEP;
+ }
+ }
+
+ if (p_initial_color_action == INITIAL_ACTION_CLEAR) { //check clear values
+
+ int color_attachments = framebuffer_formats[framebuffer->format_id].color_attachments;
+ ERR_FAIL_COND_V_MSG(p_clear_color_values.size() != color_attachments, INVALID_ID,
+ "Clear color values supplied (" + itos(p_clear_color_values.size()) + ") differ from the amount required for framebuffer (" + itos(color_attachments) + ").");
+ }
+
+ VkFramebuffer vkframebuffer;
+ VkRenderPass render_pass;
+
+ Error err = _draw_list_setup_framebuffer(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, &vkframebuffer, &render_pass);
+ ERR_FAIL_COND_V(err != OK, INVALID_ID);
+
+ VkCommandBuffer command_buffer = frames[frame].draw_command_buffer;
+ err = _draw_list_render_pass_begin(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, viewport_offset, viewport_size, vkframebuffer, render_pass, command_buffer, VK_SUBPASS_CONTENTS_INLINE);
+
+ if (err != OK) {
+ return INVALID_ID;
+ }
+
+ draw_list = memnew(DrawList);
+ draw_list->command_buffer = command_buffer;
+#ifdef DEBUG_ENABLED
+ draw_list->validation.framebuffer_format = framebuffer->format_id;
+#endif
+ draw_list_count = 0;
+ draw_list_split = false;
+
+ if (needs_clear_color || needs_clear_depth) {
+ _draw_list_insert_clear_region(draw_list, framebuffer, viewport_offset, viewport_size, needs_clear_color, p_clear_color_values, needs_clear_depth, p_clear_depth, p_clear_stencil);
+ }
+
+ VkViewport viewport;
+ viewport.x = viewport_offset.x;
+ viewport.y = viewport_offset.y;
+ viewport.width = viewport_size.width;
+ viewport.height = viewport_size.height;
+ viewport.minDepth = 0;
+ viewport.maxDepth = 1.0;
+
+ vkCmdSetViewport(command_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor;
+ scissor.offset.x = viewport_offset.x;
+ scissor.offset.y = viewport_offset.y;
+ scissor.extent.width = viewport_size.width;
+ scissor.extent.height = viewport_size.height;
+
+ vkCmdSetScissor(command_buffer, 0, 1, &scissor);
+
+ draw_list->viewport = Rect2i(viewport_offset, viewport_size);
+ return ID_TYPE_DRAW_LIST;
+}
+
+Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region) {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V(p_splits < 1, ERR_INVALID_DECLARATION);
+
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_framebuffer);
+ ERR_FAIL_COND_V(!framebuffer, ERR_INVALID_DECLARATION);
+
+ Point2i viewport_offset;
+ Point2i viewport_size = framebuffer->size;
+
+ bool needs_clear_color = false;
+ bool needs_clear_depth = false;
+
+ if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { //check custom region
+ Rect2i viewport(viewport_offset, viewport_size);
+ Rect2i regioni = p_region;
+ if (!(regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) &&
+ ((regioni.position.x + regioni.size.x) <= (viewport.position.x + viewport.size.x)) &&
+ ((regioni.position.y + regioni.size.y) <= (viewport.position.y + viewport.size.y))) {
+ ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "When supplying a custom region, it must be contained within the framebuffer rectangle");
+ }
+
+ viewport_offset = regioni.position;
+ viewport_size = regioni.size;
+
+ if (p_initial_color_action == INITIAL_ACTION_CLEAR) {
+ needs_clear_color = true;
+ p_initial_color_action = INITIAL_ACTION_KEEP;
+ }
+ if (p_initial_depth_action == INITIAL_ACTION_CLEAR) {
+ needs_clear_depth = true;
+ p_initial_depth_action = INITIAL_ACTION_KEEP;
+ }
+ }
+
+ if (p_initial_color_action == INITIAL_ACTION_CLEAR) { //check clear values
+
+ int color_attachments = framebuffer_formats[framebuffer->format_id].color_attachments;
+ ERR_FAIL_COND_V_MSG(p_clear_color_values.size() != color_attachments, ERR_INVALID_PARAMETER,
+ "Clear color values supplied (" + itos(p_clear_color_values.size()) + ") differ from the amount required for framebuffer (" + itos(color_attachments) + ").");
+ }
+
+ if (p_splits > (uint32_t)split_draw_list_allocators.size()) {
+ uint32_t from = split_draw_list_allocators.size();
+ split_draw_list_allocators.resize(p_splits);
+ for (uint32_t i = from; i < p_splits; i++) {
+
+ VkCommandPoolCreateInfo cmd_pool_info;
+ cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ cmd_pool_info.pNext = NULL;
+ cmd_pool_info.queueFamilyIndex = context->get_graphics_queue();
+ cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+
+ VkResult res = vkCreateCommandPool(device, &cmd_pool_info, NULL, &split_draw_list_allocators.write[i].command_pool);
+ ERR_FAIL_COND_V(res, ERR_CANT_CREATE);
+
+ for (int j = 0; j < frame_count; j++) {
+
+ VkCommandBuffer command_buffer;
+
+ VkCommandBufferAllocateInfo cmdbuf;
+ //no command buffer exists, create it.
+ cmdbuf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cmdbuf.pNext = NULL;
+ cmdbuf.commandPool = split_draw_list_allocators[i].command_pool;
+ cmdbuf.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
+ cmdbuf.commandBufferCount = 1;
+
+ VkResult err = vkAllocateCommandBuffers(device, &cmdbuf, &command_buffer);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ split_draw_list_allocators.write[i].command_buffers.push_back(command_buffer);
+ }
+ }
+ }
+
+ VkFramebuffer vkframebuffer;
+ VkRenderPass render_pass;
+
+ Error err = _draw_list_setup_framebuffer(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, &vkframebuffer, &render_pass);
+ ERR_FAIL_COND_V(err != OK, ERR_CANT_CREATE);
+
+ VkCommandBuffer frame_command_buffer = frames[frame].draw_command_buffer;
+ err = _draw_list_render_pass_begin(framebuffer, p_initial_color_action, p_final_color_action, p_initial_depth_action, p_final_depth_action, p_clear_color_values, p_clear_depth, p_clear_stencil, viewport_offset, viewport_size, vkframebuffer, render_pass, frame_command_buffer, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
+
+ if (err != OK) {
+ return ERR_CANT_CREATE;
+ }
+
+ draw_list = memnew_arr(DrawList, p_splits);
+ draw_list_count = p_splits;
+ draw_list_split = true;
+
+ for (uint32_t i = 0; i < p_splits; i++) {
+
+ //take a command buffer and initialize it
+ VkCommandBuffer command_buffer = split_draw_list_allocators[p_splits].command_buffers[frame];
+
+ VkCommandBufferInheritanceInfo inheritance_info;
+ inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
+ inheritance_info.pNext = NULL;
+ inheritance_info.renderPass = render_pass;
+ inheritance_info.subpass = 0;
+ inheritance_info.framebuffer = vkframebuffer;
+ inheritance_info.occlusionQueryEnable = false;
+ inheritance_info.queryFlags = 0; //?
+ inheritance_info.pipelineStatistics = 0;
+
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
+ cmdbuf_begin.pInheritanceInfo = &inheritance_info;
+
+ VkResult res = vkResetCommandBuffer(command_buffer, 0);
+ if (res) {
+ memdelete_arr(draw_list);
+ draw_list = NULL;
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ res = vkBeginCommandBuffer(command_buffer, &cmdbuf_begin);
+ if (res) {
+ memdelete_arr(draw_list);
+ draw_list = NULL;
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ draw_list[i].command_buffer = command_buffer;
+#ifdef DEBUG_ENABLED
+ draw_list[i].validation.framebuffer_format = framebuffer->format_id;
+#endif
+
+ if (i == 0 && (needs_clear_color || needs_clear_depth)) {
+ _draw_list_insert_clear_region(draw_list, framebuffer, viewport_offset, viewport_size, needs_clear_color, p_clear_color_values, needs_clear_depth, p_clear_depth, p_clear_stencil);
+ }
+
+ VkViewport viewport;
+ viewport.x = viewport_offset.x;
+ viewport.y = viewport_offset.y;
+ viewport.width = viewport_size.width;
+ viewport.height = viewport_size.height;
+ viewport.minDepth = 0;
+ viewport.maxDepth = 1.0;
+
+ vkCmdSetViewport(command_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor;
+ scissor.offset.x = viewport_offset.x;
+ scissor.offset.y = viewport_offset.y;
+ scissor.extent.width = viewport_size.width;
+ scissor.extent.height = viewport_size.height;
+
+ vkCmdSetScissor(command_buffer, 0, 1, &scissor);
+ r_split_ids[i] = (DrawListID(1) << DrawListID(ID_TYPE_SPLIT_DRAW_LIST)) + i;
+
+ draw_list[i].viewport = Rect2i(viewport_offset, viewport_size);
+ }
+
+ return OK;
+}
+
+RenderingDeviceVulkan::DrawList *RenderingDeviceVulkan::_get_draw_list_ptr(DrawListID p_id) {
+
+ if (p_id < 0) {
+ return NULL;
+ }
+
+ if (!draw_list) {
+ return NULL;
+ } else if (p_id == ID_TYPE_DRAW_LIST) {
+ if (draw_list_split) {
+ return NULL;
+ }
+ return draw_list;
+ } else if (p_id >> DrawListID(ID_BASE_SHIFT) == ID_TYPE_SPLIT_DRAW_LIST) {
+ if (!draw_list_split) {
+ return NULL;
+ }
+
+ uint64_t index = p_id & ((DrawListID(1) << DrawListID(ID_BASE_SHIFT)) - 1); //mask
+
+ if (index >= draw_list_count) {
+ return NULL;
+ }
+
+ return &draw_list[index];
+ } else {
+ return NULL;
+ }
+}
+
+void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+ const RenderPipeline *pipeline = render_pipeline_owner.getornull(p_render_pipeline);
+ ERR_FAIL_COND(!pipeline);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND(pipeline->validation.framebuffer_format != dl->validation.framebuffer_format);
+#endif
+
+ if (p_render_pipeline == dl->state.pipeline) {
+ return; //redundant state, return.
+ }
+
+ dl->state.pipeline = p_render_pipeline;
+ dl->state.pipeline_layout = pipeline->pipeline_layout;
+
+ vkCmdBindPipeline(dl->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->pipeline);
+
+ if (dl->state.pipeline_shader != pipeline->shader) {
+ // shader changed, so descriptor sets may become incompatible.
+
+ //go through ALL sets, and unbind them (and all those above) if the format is different
+
+ uint32_t pcount = pipeline->set_formats.size(); //formats count in this pipeline
+ dl->state.set_count = MAX(dl->state.set_count, pcount);
+ const uint32_t *pformats = pipeline->set_formats.ptr(); //pipeline set formats
+
+ bool sets_valid = true; //once invalid, all above become invalid
+ for (uint32_t i = 0; i < pcount; i++) {
+ //if a part of the format is different, invalidate it (and the rest)
+ if (!sets_valid || dl->state.sets[i].pipeline_expected_format != pformats[i]) {
+ dl->state.sets[i].bound = false;
+ dl->state.sets[i].pipeline_expected_format = pformats[i];
+ sets_valid = false;
+ }
+ }
+
+ for (uint32_t i = pcount; i < dl->state.set_count; i++) {
+ //unbind the ones above (not used) if exist
+ dl->state.sets[i].bound = false;
+ }
+
+ dl->state.set_count = pcount; //update set count
+
+ if (pipeline->push_constant_size) {
+ dl->state.pipeline_push_constant_stages = pipeline->push_constant_stages;
+#ifdef DEBUG_ENABLED
+ dl->validation.pipeline_push_constant_suppplied = false;
+#endif
+ }
+
+ dl->state.pipeline_shader = pipeline->shader;
+ }
+
+#ifdef DEBUG_ENABLED
+ //update render pass pipeline info
+ dl->validation.pipeline_active = true;
+ dl->validation.pipeline_dynamic_state = pipeline->validation.dynamic_state;
+ dl->validation.pipeline_vertex_format = pipeline->validation.vertex_format;
+ dl->validation.pipeline_uses_restart_indices = pipeline->validation.uses_restart_indices;
+ dl->validation.pipeline_primitive_divisor = pipeline->validation.primitive_divisor;
+ dl->validation.pipeline_primitive_minimum = pipeline->validation.primitive_minimum;
+ dl->validation.pipeline_push_constant_size = pipeline->push_constant_size;
+#endif
+}
+
+void RenderingDeviceVulkan::draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index) {
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index > MAX_UNIFORM_SETS,
+ "Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(limits.maxBoundDescriptorSets) + ").");
+#endif
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+ const UniformSet *uniform_set = uniform_set_owner.getornull(p_uniform_set);
+ ERR_FAIL_COND(!uniform_set);
+
+ if (p_index > dl->state.set_count) {
+ dl->state.set_count = p_index;
+ }
+
+ dl->state.sets[p_index].descriptor_set = uniform_set->descriptor_set; //update set pointer
+ dl->state.sets[p_index].bound = false; //needs rebind
+ dl->state.sets[p_index].uniform_set_format = uniform_set->format;
+ dl->state.sets[p_index].uniform_set = p_uniform_set;
+
+#ifdef DEBUG_ENABLED
+ { //validate that textures bound are not attached as framebuffer bindings
+ uint32_t attachable_count = uniform_set->attachable_textures.size();
+ const RID *attachable_ptr = uniform_set->attachable_textures.ptr();
+ uint32_t bound_count = draw_list_bound_textures.size();
+ const RID *bound_ptr = draw_list_bound_textures.ptr();
+ for (uint32_t i = 0; i < attachable_count; i++) {
+ for (uint32_t j = 0; j < bound_count; j++) {
+ ERR_FAIL_COND_MSG(attachable_ptr[i] == bound_ptr[j],
+ "Attempted to use the same texture in framebuffer attachment and a uniform set, this is not allowed.");
+ }
+ }
+ }
+#endif
+}
+
+void RenderingDeviceVulkan::draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array) {
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+ const VertexArray *vertex_array = vertex_array_owner.getornull(p_vertex_array);
+ ERR_FAIL_COND(!vertex_array);
+
+ if (dl->state.vertex_array == p_vertex_array) {
+ return; //already set
+ }
+
+ dl->state.vertex_array = p_vertex_array;
+
+#ifdef DEBUG_ENABLED
+ dl->validation.vertex_format = vertex_array->description;
+ dl->validation.vertex_max_instances_allowed = vertex_array->max_instances_allowed;
+#endif
+ dl->validation.vertex_array_size = vertex_array->vertex_count;
+ vkCmdBindVertexBuffers(dl->command_buffer, 0, vertex_array->buffers.size(), vertex_array->buffers.ptr(), vertex_array->offsets.ptr());
+}
+void RenderingDeviceVulkan::draw_list_bind_index_array(DrawListID p_list, RID p_index_array) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+ const IndexArray *index_array = index_array_owner.getornull(p_index_array);
+ ERR_FAIL_COND(!index_array);
+
+ if (dl->state.index_array == p_index_array) {
+ return; //already set
+ }
+
+ dl->state.index_array = p_index_array;
+#ifdef DEBUG_ENABLED
+ dl->validation.index_array_max_index = index_array->max_index;
+#endif
+ dl->validation.index_array_size = index_array->indices;
+ dl->validation.index_array_offset = index_array->offset;
+
+ vkCmdBindIndexBuffer(dl->command_buffer, index_array->buffer, index_array->offset, index_array->index_type);
+}
+
+void RenderingDeviceVulkan::draw_list_set_line_width(DrawListID p_list, float p_width) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+ vkCmdSetLineWidth(dl->command_buffer, p_width);
+}
+
+void RenderingDeviceVulkan::draw_list_set_push_constant(DrawListID p_list, void *p_data, uint32_t p_data_size) {
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(p_data_size != dl->validation.pipeline_push_constant_size,
+ "This render pipeline requires (" + itos(dl->validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
+#endif
+ vkCmdPushConstants(dl->command_buffer, dl->state.pipeline_layout, dl->state.pipeline_push_constant_stages, 0, p_data_size, p_data);
+#ifdef DEBUG_ENABLED
+ dl->validation.pipeline_push_constant_suppplied = true;
+#endif
+}
+
+void RenderingDeviceVulkan::draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances, uint32_t p_procedural_vertices) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.pipeline_active,
+ "No render pipeline was set before attempting to draw.");
+ if (dl->validation.pipeline_vertex_format != INVALID_ID) {
+ //pipeline uses vertices, validate format
+ ERR_FAIL_COND_MSG(dl->validation.vertex_format == INVALID_ID,
+ "No vertex array was bound, and render pipeline expects vertices.");
+ //make sure format is right
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != dl->validation.vertex_format,
+ "The vertex format used to create the pipeline does not match the vertex format bound.");
+ //make sure amount of instances is valid
+ ERR_FAIL_COND_MSG(p_instances > dl->validation.vertex_max_instances_allowed,
+ "Amount of instances requested (" + itos(p_instances) + " is larger than the maximum amount suported by the bound vertex array (" + itos(dl->validation.vertex_max_instances_allowed) + ").");
+ }
+
+ if (dl->validation.pipeline_push_constant_size > 0) {
+ //using push constants, check that they were supplied
+ ERR_FAIL_COND_MSG(!dl->validation.pipeline_push_constant_suppplied,
+ "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
+ }
+
+#endif
+
+ //Bind descriptor sets
+
+ for (uint32_t i = 0; i < dl->state.set_count; i++) {
+
+ if (dl->state.sets[i].pipeline_expected_format == 0) {
+ continue; //nothing expected by this pipeline
+ }
+#ifdef DEBUG_ENABLED
+ if (dl->state.sets[i].pipeline_expected_format != dl->state.sets[i].uniform_set_format) {
+
+ if (dl->state.sets[i].uniform_set_format == 0) {
+ ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline");
+ } else if (uniform_set_owner.owns(dl->state.sets[i].uniform_set)) {
+ UniformSet *us = uniform_set_owner.getornull(dl->state.sets[i].uniform_set);
+ ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(dl->state.pipeline_shader));
+ } else {
+ ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(dl->state.pipeline_shader));
+ }
+ }
+#endif
+ if (!dl->state.sets[i].bound) {
+ //All good, see if this requires re-binding
+ vkCmdBindDescriptorSets(dl->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, dl->state.pipeline_layout, i, 1, &dl->state.sets[i].descriptor_set, 0, NULL);
+ dl->state.sets[i].bound = true;
+ }
+ }
+
+ if (p_use_indices) {
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(p_procedural_vertices > 0,
+ "Procedural vertices can't be used together with indices.");
+
+ ERR_FAIL_COND_MSG(!dl->validation.index_array_size,
+ "Draw command requested indices, but no index buffer was set.");
+
+ if (dl->validation.pipeline_vertex_format != INVALID_ID) {
+ //uses vertices, do some vertex validations
+ ERR_FAIL_COND_MSG(dl->validation.vertex_array_size < dl->validation.index_array_max_index,
+ "Index array references (max index: " + itos(dl->validation.index_array_max_index) + ") indices beyond the vertex array size (" + itos(dl->validation.vertex_array_size) + ").");
+ }
+
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_uses_restart_indices != dl->validation.index_buffer_uses_restart_indices,
+ "The usage of restart indices in index buffer does not match the render primitive in the pipeline.");
+#endif
+ uint32_t to_draw = dl->validation.index_array_size;
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
+ "Too few indices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
+
+ ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
+ "Index amount (" + itos(to_draw) + ") must be a multiple of the amount of indices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
+#endif
+ vkCmdDrawIndexed(dl->command_buffer, to_draw, p_instances, dl->validation.index_array_offset, 0, 0);
+ } else {
+
+ uint32_t to_draw;
+
+ if (p_procedural_vertices > 0) {
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != INVALID_ID,
+ "Procedural vertices requested, but pipeline expects a vertex array.");
+#endif
+ to_draw = p_procedural_vertices;
+ } else {
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format == INVALID_ID,
+ "Draw command lacks indices, but pipeline format does not use vertices.");
+#endif
+ to_draw = dl->validation.vertex_array_size;
+ }
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
+ "Too few vertices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
+
+ ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
+ "Vertex amount (" + itos(to_draw) + ") must be a multiple of the amount of vertices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
+#endif
+
+ vkCmdDraw(dl->command_buffer, to_draw, p_instances, 0, 0);
+ }
+}
+
+void RenderingDeviceVulkan::draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect) {
+ DrawList *dl = _get_draw_list_ptr(p_list);
+
+ ERR_FAIL_COND(!dl);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+ Rect2i rect = p_rect;
+ rect.position += dl->viewport.position;
+
+ rect = dl->viewport.clip(rect);
+
+ if (rect.get_area() == 0) {
+ return;
+ }
+ VkRect2D scissor;
+ scissor.offset.x = rect.position.x;
+ scissor.offset.y = rect.position.y;
+ scissor.extent.width = rect.size.width;
+ scissor.extent.height = rect.size.height;
+
+ vkCmdSetScissor(dl->command_buffer, 0, 1, &scissor);
+}
+void RenderingDeviceVulkan::draw_list_disable_scissor(DrawListID p_list) {
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+#endif
+
+ VkRect2D scissor;
+ scissor.offset.x = dl->viewport.position.x;
+ scissor.offset.y = dl->viewport.position.y;
+ scissor.extent.width = dl->viewport.size.width;
+ scissor.extent.height = dl->viewport.size.height;
+ vkCmdSetScissor(dl->command_buffer, 0, 1, &scissor);
+}
+
+void RenderingDeviceVulkan::draw_list_end() {
+
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive.");
+
+ if (draw_list_split) {
+ //send all command buffers
+ VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * draw_list_count);
+ for (uint32_t i = 0; i < draw_list_count; i++) {
+ vkEndCommandBuffer(draw_list->command_buffer);
+ command_buffers[i] = draw_list->command_buffer;
+ }
+
+ vkCmdExecuteCommands(frames[frame].draw_command_buffer, draw_list_count, command_buffers);
+ vkCmdEndRenderPass(frames[frame].draw_command_buffer);
+ memdelete_arr(draw_list);
+ draw_list = NULL;
+
+ } else {
+ //just end the list
+ vkCmdEndRenderPass(draw_list->command_buffer);
+ memdelete(draw_list);
+ draw_list = NULL;
+ }
+
+ for (int i = 0; i < draw_list_bound_textures.size(); i++) {
+ Texture *texture = texture_owner.getornull(draw_list_bound_textures[i]);
+ ERR_CONTINUE(!texture); //wtf
+ if (draw_list_unbind_color_textures && (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
+ texture->bound = false;
+ }
+ if (draw_list_unbind_depth_textures && (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ texture->bound = false;
+ }
+ }
+
+ draw_list_bound_textures.clear();
+
+ // To ensure proper synchronization, we must make sure rendering is done before:
+ // * Some buffer is copied
+ // * Another render pass happens (since we may be done
+
+#ifdef FORCE_FULL_BARRIER
+ _full_barrier(true);
+#else
+ _memory_barrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, true);
+#endif
+}
+
+/***********************/
+/**** COMPUTE LISTS ****/
+/***********************/
+
+RenderingDevice::ComputeListID RenderingDeviceVulkan::compute_list_begin() {
+
+ ERR_FAIL_COND_V_MSG(draw_list != NULL, INVALID_ID, "Only one draw list can be active at the same time.");
+ ERR_FAIL_COND_V_MSG(compute_list != NULL, INVALID_ID, "Only one draw/compute list can be active at the same time.");
+
+ compute_list = memnew(ComputeList);
+ compute_list->command_buffer = frames[frame].draw_command_buffer;
+
+ return ID_TYPE_COMPUTE_LIST;
+}
+
+void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline) {
+ ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
+ ERR_FAIL_COND(!compute_list);
+
+ ComputeList *cl = compute_list;
+
+ const ComputePipeline *pipeline = compute_pipeline_owner.getornull(p_compute_pipeline);
+ ERR_FAIL_COND(!pipeline);
+
+ if (p_compute_pipeline == cl->state.pipeline) {
+ return; //redundant state, return.
+ }
+
+ cl->state.pipeline = p_compute_pipeline;
+ cl->state.pipeline_layout = pipeline->pipeline_layout;
+
+ vkCmdBindPipeline(cl->command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline->pipeline);
+
+ if (cl->state.pipeline_shader != pipeline->shader) {
+ // shader changed, so descriptor sets may become incompatible.
+
+ //go through ALL sets, and unbind them (and all those above) if the format is different
+
+ uint32_t pcount = pipeline->set_formats.size(); //formats count in this pipeline
+ cl->state.set_count = MAX(cl->state.set_count, pcount);
+ const uint32_t *pformats = pipeline->set_formats.ptr(); //pipeline set formats
+
+ bool sets_valid = true; //once invalid, all above become invalid
+ for (uint32_t i = 0; i < pcount; i++) {
+ //if a part of the format is different, invalidate it (and the rest)
+ if (!sets_valid || cl->state.sets[i].pipeline_expected_format != pformats[i]) {
+ cl->state.sets[i].bound = false;
+ cl->state.sets[i].pipeline_expected_format = pformats[i];
+ sets_valid = false;
+ }
+ }
+
+ for (uint32_t i = pcount; i < cl->state.set_count; i++) {
+ //unbind the ones above (not used) if exist
+ cl->state.sets[i].bound = false;
+ }
+
+ cl->state.set_count = pcount; //update set count
+
+ if (pipeline->push_constant_size) {
+ cl->state.pipeline_push_constant_stages = pipeline->push_constant_stages;
+#ifdef DEBUG_ENABLED
+ cl->validation.pipeline_push_constant_suppplied = false;
+#endif
+ }
+
+ cl->state.pipeline_shader = pipeline->shader;
+ }
+
+#ifdef DEBUG_ENABLED
+ //update compute pass pipeline info
+ cl->validation.pipeline_active = true;
+ cl->validation.pipeline_push_constant_size = pipeline->push_constant_size;
+#endif
+}
+void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index) {
+ ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
+ ERR_FAIL_COND(!compute_list);
+
+ ComputeList *cl = compute_list;
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index > MAX_UNIFORM_SETS,
+ "Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(limits.maxBoundDescriptorSets) + ").");
+#endif
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
+#endif
+
+ UniformSet *uniform_set = uniform_set_owner.getornull(p_uniform_set);
+ ERR_FAIL_COND(!uniform_set);
+
+ if (p_index > cl->state.set_count) {
+ cl->state.set_count = p_index;
+ }
+
+ cl->state.sets[p_index].descriptor_set = uniform_set->descriptor_set; //update set pointer
+ cl->state.sets[p_index].bound = false; //needs rebind
+ cl->state.sets[p_index].uniform_set_format = uniform_set->format;
+ cl->state.sets[p_index].uniform_set = p_uniform_set;
+
+ uint32_t textures_to_sampled_count = uniform_set->mutable_sampled_textures.size();
+ Texture **textures_to_sampled = uniform_set->mutable_sampled_textures.ptrw();
+
+ for (uint32_t i = 0; i < textures_to_sampled_count; i++) {
+ if (textures_to_sampled[i]->layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
+
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.oldLayout = textures_to_sampled[i]->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = textures_to_sampled[i]->image;
+ image_memory_barrier.subresourceRange.aspectMask = textures_to_sampled[i]->read_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = textures_to_sampled[i]->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = 0;
+ image_memory_barrier.subresourceRange.layerCount = textures_to_sampled[i]->layers;
+
+ vkCmdPipelineBarrier(cl->command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+
+ textures_to_sampled[i]->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ cl->state.textures_to_sampled_layout.erase(textures_to_sampled[i]);
+ }
+ }
+
+ uint32_t textures_to_storage_count = uniform_set->mutable_storage_textures.size();
+ Texture **textures_to_storage = uniform_set->mutable_storage_textures.ptrw();
+
+ for (uint32_t i = 0; i < textures_to_storage_count; i++) {
+ if (textures_to_storage[i]->layout != VK_IMAGE_LAYOUT_GENERAL) {
+
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.oldLayout = textures_to_storage[i]->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = textures_to_storage[i]->image;
+ image_memory_barrier.subresourceRange.aspectMask = textures_to_storage[i]->read_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = textures_to_storage[i]->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = 0;
+ image_memory_barrier.subresourceRange.layerCount = textures_to_storage[i]->layers;
+
+ vkCmdPipelineBarrier(cl->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+
+ textures_to_storage[i]->layout = VK_IMAGE_LAYOUT_GENERAL;
+
+ cl->state.textures_to_sampled_layout.insert(textures_to_storage[i]); //needs to go back to sampled layout afterwards
+ }
+ }
+
+#if 0
+ { //validate that textures bound are not attached as framebuffer bindings
+ uint32_t attachable_count = uniform_set->attachable_textures.size();
+ const RID *attachable_ptr = uniform_set->attachable_textures.ptr();
+ uint32_t bound_count = draw_list_bound_textures.size();
+ const RID *bound_ptr = draw_list_bound_textures.ptr();
+ for (uint32_t i = 0; i < attachable_count; i++) {
+ for (uint32_t j = 0; j < bound_count; j++) {
+ ERR_FAIL_COND_MSG(attachable_ptr[i] == bound_ptr[j],
+ "Attempted to use the same texture in framebuffer attachment and a uniform set, this is not allowed.");
+ }
+ }
+ }
+#endif
+}
+
+void RenderingDeviceVulkan::compute_list_set_push_constant(ComputeListID p_list, void *p_data, uint32_t p_data_size) {
+ ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
+ ERR_FAIL_COND(!compute_list);
+
+ ComputeList *cl = compute_list;
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
+#endif
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(p_data_size != cl->validation.pipeline_push_constant_size,
+ "This compute pipeline requires (" + itos(cl->validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
+#endif
+ vkCmdPushConstants(cl->command_buffer, cl->state.pipeline_layout, cl->state.pipeline_push_constant_stages, 0, p_data_size, p_data);
+#ifdef DEBUG_ENABLED
+ cl->validation.pipeline_push_constant_suppplied = true;
+#endif
+}
+void RenderingDeviceVulkan::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
+ ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
+ ERR_FAIL_COND(!compute_list);
+
+ ComputeList *cl = compute_list;
+
+#ifdef DEBUG_ENABLED
+ ERR_FAIL_COND_MSG(p_x_groups > limits.maxComputeWorkGroupCount[0],
+ "Dispatch amount of X compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(limits.maxComputeWorkGroupCount[0]) + ")");
+ ERR_FAIL_COND_MSG(p_y_groups > limits.maxComputeWorkGroupCount[1],
+ "Dispatch amount of Y compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(limits.maxComputeWorkGroupCount[0]) + ")");
+ ERR_FAIL_COND_MSG(p_z_groups > limits.maxComputeWorkGroupCount[2],
+ "Dispatch amount of Z compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(limits.maxComputeWorkGroupCount[0]) + ")");
+
+ ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
+#endif
+
+#ifdef DEBUG_ENABLED
+
+ ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
+
+ if (cl->validation.pipeline_push_constant_size > 0) {
+ //using push constants, check that they were supplied
+ ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_suppplied,
+ "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
+ }
+
+#endif
+
+ //Bind descriptor sets
+
+ for (uint32_t i = 0; i < cl->state.set_count; i++) {
+
+ if (cl->state.sets[i].pipeline_expected_format == 0) {
+ continue; //nothing expected by this pipeline
+ }
+#ifdef DEBUG_ENABLED
+ if (cl->state.sets[i].pipeline_expected_format != cl->state.sets[i].uniform_set_format) {
+
+ if (cl->state.sets[i].uniform_set_format == 0) {
+ ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline");
+ } else if (uniform_set_owner.owns(cl->state.sets[i].uniform_set)) {
+ UniformSet *us = uniform_set_owner.getornull(cl->state.sets[i].uniform_set);
+ ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
+ } else {
+ ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
+ }
+ }
+#endif
+ if (!cl->state.sets[i].bound) {
+ //All good, see if this requires re-binding
+ vkCmdBindDescriptorSets(cl->command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, cl->state.pipeline_layout, i, 1, &cl->state.sets[i].descriptor_set, 0, NULL);
+ cl->state.sets[i].bound = true;
+ }
+ }
+
+ vkCmdDispatch(cl->command_buffer, p_x_groups, p_y_groups, p_z_groups);
+}
+
+void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) {
+#ifdef FORCE_FULL_BARRIER
+ _full_barrier(true);
+#else
+ _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, true);
+#endif
+}
+
+void RenderingDeviceVulkan::compute_list_end() {
+ ERR_FAIL_COND(!compute_list);
+
+ for (Set<Texture *>::Element *E = compute_list->state.textures_to_sampled_layout.front(); E; E = E->next()) {
+
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = NULL;
+ image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.oldLayout = E->get()->layout;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = E->get()->image;
+ image_memory_barrier.subresourceRange.aspectMask = E->get()->read_aspect_mask;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = E->get()->mipmaps;
+ image_memory_barrier.subresourceRange.baseArrayLayer = 0;
+ image_memory_barrier.subresourceRange.layerCount = E->get()->layers;
+
+ vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
+
+ E->get()->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ }
+
+ memdelete(compute_list);
+ compute_list = NULL;
+#ifdef FORCE_FULL_BARRIER
+ _full_barrier(true);
+#else
+ _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT, true);
+#endif
+}
+
+#if 0
+void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_framebuffer, ID *p_draw_lists, uint32_t p_draw_list_count, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Variant> &p_clear_colors) {
+
+ VkCommandBuffer frame_cmdbuf = frames[frame].frame_buffer;
+ ERR_FAIL_COND(!frame_cmdbuf);
+
+ VkRenderPassBeginInfo render_pass_begin;
+ render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ render_pass_begin.pNext = NULL;
+ render_pass_begin.renderPass = context->get_render_pass();
+ render_pass_begin.framebuffer = context->get_frame_framebuffer(frame);
+
+ render_pass_begin.renderArea.extent.width = context->get_screen_width(p_screen);
+ render_pass_begin.renderArea.extent.height = context->get_screen_height(p_screen);
+ render_pass_begin.renderArea.offset.x = 0;
+ render_pass_begin.renderArea.offset.y = 0;
+
+ render_pass_begin.clearValueCount = 1;
+
+ VkClearValue clear_value;
+ clear_value.color.float32[0] = p_clear_color.r;
+ clear_value.color.float32[1] = p_clear_color.g;
+ clear_value.color.float32[2] = p_clear_color.b;
+ clear_value.color.float32[3] = p_clear_color.a;
+
+ render_pass_begin.pClearValues = &clear_value;
+
+ vkCmdBeginRenderPass(frame_cmdbuf, &render_pass_begin, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
+
+ ID screen_format = screen_get_framebuffer_format();
+ {
+
+ VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * p_draw_list_count);
+ uint32_t command_buffer_count = 0;
+
+ for (uint32_t i = 0; i < p_draw_list_count; i++) {
+ DrawList *dl = _get_draw_list_ptr(p_draw_lists[i]);
+ ERR_CONTINUE_MSG(!dl, "Draw list index (" + itos(i) + ") is not a valid draw list ID.");
+ ERR_CONTINUE_MSG(dl->validation.framebuffer_format != p_format_check,
+ "Draw list index (" + itos(i) + ") is created with a framebuffer format incompatible with this render pass.");
+
+ if (dl->validation.active) {
+ //needs to be closed, so close it.
+ vkEndCommandBuffer(dl->command_buffer);
+ dl->validation.active = false;
+ }
+
+ command_buffers[command_buffer_count++] = dl->command_buffer;
+ }
+
+ print_line("to draw: " + itos(command_buffer_count));
+ vkCmdExecuteCommands(p_primary, command_buffer_count, command_buffers);
+ }
+
+ vkCmdEndRenderPass(frame_cmdbuf);
+
+}
+#endif
+
+void RenderingDeviceVulkan::_free_internal(RID p_id) {
+
+ //push everything so it's disposed of next time this frame index is processed (means, it's safe to do it)
+ if (texture_owner.owns(p_id)) {
+ Texture *texture = texture_owner.getornull(p_id);
+ frames[frame].textures_to_dispose_of.push_back(*texture);
+ texture_owner.free(p_id);
+ } else if (framebuffer_owner.owns(p_id)) {
+ Framebuffer *framebuffer = framebuffer_owner.getornull(p_id);
+ frames[frame].framebuffers_to_dispose_of.push_back(*framebuffer);
+ framebuffer_owner.free(p_id);
+ } else if (sampler_owner.owns(p_id)) {
+ VkSampler *sampler = sampler_owner.getornull(p_id);
+ frames[frame].samplers_to_dispose_of.push_back(*sampler);
+ sampler_owner.free(p_id);
+ } else if (vertex_buffer_owner.owns(p_id)) {
+ Buffer *vertex_buffer = vertex_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(*vertex_buffer);
+ vertex_buffer_owner.free(p_id);
+ } else if (vertex_array_owner.owns(p_id)) {
+ vertex_array_owner.free(p_id);
+ } else if (index_buffer_owner.owns(p_id)) {
+ IndexBuffer *index_buffer = index_buffer_owner.getornull(p_id);
+ Buffer b;
+ b.allocation = index_buffer->allocation;
+ b.buffer = index_buffer->buffer;
+ b.size = index_buffer->size;
+ frames[frame].buffers_to_dispose_of.push_back(b);
+ index_buffer_owner.free(p_id);
+ } else if (index_array_owner.owns(p_id)) {
+ index_array_owner.free(p_id);
+ } else if (shader_owner.owns(p_id)) {
+ Shader *shader = shader_owner.getornull(p_id);
+ frames[frame].shaders_to_dispose_of.push_back(*shader);
+ shader_owner.free(p_id);
+ } else if (uniform_buffer_owner.owns(p_id)) {
+ Buffer *uniform_buffer = uniform_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(*uniform_buffer);
+ uniform_buffer_owner.free(p_id);
+ } else if (texture_buffer_owner.owns(p_id)) {
+ TextureBuffer *texture_buffer = texture_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(texture_buffer->buffer);
+ frames[frame].buffer_views_to_dispose_of.push_back(texture_buffer->view);
+ texture_buffer_owner.free(p_id);
+ } else if (storage_buffer_owner.owns(p_id)) {
+ Buffer *storage_buffer = storage_buffer_owner.getornull(p_id);
+ frames[frame].buffers_to_dispose_of.push_back(*storage_buffer);
+ storage_buffer_owner.free(p_id);
+ } else if (uniform_set_owner.owns(p_id)) {
+ UniformSet *uniform_set = uniform_set_owner.getornull(p_id);
+ frames[frame].uniform_sets_to_dispose_of.push_back(*uniform_set);
+ uniform_set_owner.free(p_id);
+ } else if (render_pipeline_owner.owns(p_id)) {
+ RenderPipeline *pipeline = render_pipeline_owner.getornull(p_id);
+ frames[frame].render_pipelines_to_dispose_of.push_back(*pipeline);
+ render_pipeline_owner.free(p_id);
+ } else if (compute_pipeline_owner.owns(p_id)) {
+ ComputePipeline *pipeline = compute_pipeline_owner.getornull(p_id);
+ frames[frame].compute_pipelines_to_dispose_of.push_back(*pipeline);
+ compute_pipeline_owner.free(p_id);
+ } else {
+ ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()));
+ }
+}
+void RenderingDeviceVulkan::free(RID p_id) {
+
+ _THREAD_SAFE_METHOD_
+
+ _free_dependencies(p_id); //recursively erase dependencies first, to avoid potential API problems
+ _free_internal(p_id);
+}
+void RenderingDeviceVulkan::swap_buffers() {
+
+ _THREAD_SAFE_METHOD_
+
+ { //finalize frame
+
+ if (draw_list) {
+ ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work).");
+ }
+
+ if (compute_list) {
+ ERR_PRINT("Found open compute list at the end of the frame, this should never happen (further compute will likely not work).");
+ }
+
+ { //complete the setup buffer (that needs to be processed before anything else)
+ vkEndCommandBuffer(frames[frame].setup_command_buffer);
+ vkEndCommandBuffer(frames[frame].draw_command_buffer);
+ }
+ screen_prepared = false;
+ }
+
+ //swap buffers
+ context->swap_buffers();
+
+ { //advance frame
+
+ frame = (frame + 1) % frame_count;
+
+ //erase pending resources
+ _free_pending_resources(frame);
+
+ //create setup command buffer and set as the setup buffer
+
+ {
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_begin.pInheritanceInfo = NULL;
+
+ VkResult err = vkResetCommandBuffer(frames[frame].setup_command_buffer, 0);
+ ERR_FAIL_COND(err);
+
+ err = vkBeginCommandBuffer(frames[frame].setup_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->set_setup_buffer(frames[frame].setup_command_buffer); //append now so it's added before everything else
+ err = vkBeginCommandBuffer(frames[frame].draw_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->append_command_buffer(frames[frame].draw_command_buffer);
+ }
+
+ //advance current frame
+ frames_drawn++;
+ //advance staging buffer if used
+ if (staging_buffer_used) {
+ staging_buffer_current = (staging_buffer_current + 1) % staging_buffer_blocks.size();
+ staging_buffer_used = false;
+ }
+
+ if (frames[frame].timestamp_count) {
+ vkGetQueryPoolResults(device, frames[frame].timestamp_pool, 0, frames[frame].timestamp_count, sizeof(uint64_t) * max_timestamp_query_elements, frames[frame].timestamp_result_values, sizeof(uint64_t), VK_QUERY_RESULT_64_BIT);
+ SWAP(frames[frame].timestamp_names, frames[frame].timestamp_result_names);
+ SWAP(frames[frame].timestamp_cpu_values, frames[frame].timestamp_cpu_result_values);
+ }
+
+ frames[frame].timestamp_result_count = frames[frame].timestamp_count;
+ frames[frame].timestamp_count = 0;
+ frames[frame].index = Engine::get_singleton()->get_frames_drawn();
+ }
+}
+
+void RenderingDeviceVulkan::_free_pending_resources(int p_frame) {
+ //free in dependency usage order, so nothing weird happens
+ //pipelines
+ while (frames[p_frame].render_pipelines_to_dispose_of.front()) {
+ RenderPipeline *pipeline = &frames[p_frame].render_pipelines_to_dispose_of.front()->get();
+
+ vkDestroyPipeline(device, pipeline->pipeline, NULL);
+
+ frames[p_frame].render_pipelines_to_dispose_of.pop_front();
+ }
+
+ while (frames[p_frame].compute_pipelines_to_dispose_of.front()) {
+ ComputePipeline *pipeline = &frames[p_frame].compute_pipelines_to_dispose_of.front()->get();
+
+ vkDestroyPipeline(device, pipeline->pipeline, NULL);
+
+ frames[p_frame].compute_pipelines_to_dispose_of.pop_front();
+ }
+
+ //uniform sets
+ while (frames[p_frame].uniform_sets_to_dispose_of.front()) {
+ UniformSet *uniform_set = &frames[p_frame].uniform_sets_to_dispose_of.front()->get();
+
+ vkFreeDescriptorSets(device, uniform_set->pool->pool, 1, &uniform_set->descriptor_set);
+ _descriptor_pool_free(uniform_set->pool_key, uniform_set->pool);
+
+ frames[p_frame].uniform_sets_to_dispose_of.pop_front();
+ }
+
+ //buffer views
+ while (frames[p_frame].buffer_views_to_dispose_of.front()) {
+ VkBufferView buffer_view = frames[p_frame].buffer_views_to_dispose_of.front()->get();
+
+ vkDestroyBufferView(device, buffer_view, NULL);
+
+ frames[p_frame].buffer_views_to_dispose_of.pop_front();
+ }
+
+ //shaders
+ while (frames[p_frame].shaders_to_dispose_of.front()) {
+ Shader *shader = &frames[p_frame].shaders_to_dispose_of.front()->get();
+
+ //descriptor set layout for each set
+ for (int i = 0; i < shader->sets.size(); i++) {
+ vkDestroyDescriptorSetLayout(device, shader->sets[i].descriptor_set_layout, NULL);
+ }
+
+ //pipeline layout
+ vkDestroyPipelineLayout(device, shader->pipeline_layout, NULL);
+
+ //shaders themselves
+ for (int i = 0; i < shader->pipeline_stages.size(); i++) {
+ vkDestroyShaderModule(device, shader->pipeline_stages[i].module, NULL);
+ }
+
+ frames[p_frame].shaders_to_dispose_of.pop_front();
+ }
+
+ //samplers
+ while (frames[p_frame].samplers_to_dispose_of.front()) {
+ VkSampler sampler = frames[p_frame].samplers_to_dispose_of.front()->get();
+
+ vkDestroySampler(device, sampler, NULL);
+
+ frames[p_frame].samplers_to_dispose_of.pop_front();
+ }
+
+ //framebuffers
+ while (frames[p_frame].framebuffers_to_dispose_of.front()) {
+ Framebuffer *framebuffer = &frames[p_frame].framebuffers_to_dispose_of.front()->get();
+
+ for (Map<Framebuffer::VersionKey, Framebuffer::Version>::Element *E = framebuffer->framebuffers.front(); E; E = E->next()) {
+ //first framebuffer, then render pass because it depends on it
+ vkDestroyFramebuffer(device, E->get().framebuffer, NULL);
+ vkDestroyRenderPass(device, E->get().render_pass, NULL);
+ }
+
+ frames[p_frame].framebuffers_to_dispose_of.pop_front();
+ }
+
+ //textures
+ while (frames[p_frame].textures_to_dispose_of.front()) {
+ Texture *texture = &frames[p_frame].textures_to_dispose_of.front()->get();
+
+ if (texture->bound) {
+ WARN_PRINT("Deleted a texture while it was bound..");
+ }
+ vkDestroyImageView(device, texture->view, NULL);
+ if (texture->owner.is_null()) {
+ //actually owns the image and the allocation too
+ vmaDestroyImage(allocator, texture->image, texture->allocation);
+ }
+ frames[p_frame].textures_to_dispose_of.pop_front();
+ }
+
+ //buffers
+ while (frames[p_frame].buffers_to_dispose_of.front()) {
+
+ _buffer_free(&frames[p_frame].buffers_to_dispose_of.front()->get());
+
+ frames[p_frame].buffers_to_dispose_of.pop_front();
+ }
+}
+
+void RenderingDeviceVulkan::prepare_screen_for_drawing() {
+ _THREAD_SAFE_METHOD_
+ context->prepare_buffers();
+ screen_prepared = true;
+}
+
+uint32_t RenderingDeviceVulkan::get_frame_delay() const {
+ return frame_count;
+}
+
+void RenderingDeviceVulkan::_flush(bool p_current_frame) {
+
+ //not doing this crashes RADV (undefined behavior)
+ if (p_current_frame) {
+ vkEndCommandBuffer(frames[frame].setup_command_buffer);
+ vkEndCommandBuffer(frames[frame].draw_command_buffer);
+ }
+ context->flush(p_current_frame, p_current_frame);
+ //re-create the setup command
+ if (p_current_frame) {
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_begin.pInheritanceInfo = NULL;
+
+ VkResult err = vkBeginCommandBuffer(frames[frame].setup_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->set_setup_buffer(frames[frame].setup_command_buffer); //append now so it's added before everything else
+ }
+
+ if (p_current_frame) {
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_begin.pInheritanceInfo = NULL;
+
+ VkResult err = vkBeginCommandBuffer(frames[frame].draw_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->append_command_buffer(frames[frame].draw_command_buffer);
+ }
+}
+
+void RenderingDeviceVulkan::initialize(VulkanContext *p_context) {
+
+ context = p_context;
+ device = p_context->get_device();
+ frame_count = p_context->get_swapchain_image_count() + 1; //always need one extra to ensure it's unused at any time, without having to use a fence for this.
+ limits = p_context->get_device_limits();
+ max_timestamp_query_elements = 256;
+
+ { //initialize allocator
+
+ VmaAllocatorCreateInfo allocatorInfo;
+ memset(&allocatorInfo, 0, sizeof(VmaAllocatorCreateInfo));
+ allocatorInfo.physicalDevice = p_context->get_physical_device();
+ allocatorInfo.device = device;
+ vmaCreateAllocator(&allocatorInfo, &allocator);
+ }
+
+ frames = memnew_arr(Frame, frame_count);
+ frame = 0;
+ //create setup and frame buffers
+ for (int i = 0; i < frame_count; i++) {
+
+ frames[i].index = 0;
+
+ { //create command pool, one per frame is recommended
+ VkCommandPoolCreateInfo cmd_pool_info;
+ cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ cmd_pool_info.pNext = NULL;
+ cmd_pool_info.queueFamilyIndex = p_context->get_graphics_queue();
+ cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+
+ VkResult res = vkCreateCommandPool(device, &cmd_pool_info, NULL, &frames[i].command_pool);
+ ERR_FAIL_COND(res);
+ }
+
+ { //create command buffers
+
+ VkCommandBufferAllocateInfo cmdbuf;
+ //no command buffer exists, create it.
+ cmdbuf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cmdbuf.pNext = NULL;
+ cmdbuf.commandPool = frames[i].command_pool;
+ cmdbuf.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ cmdbuf.commandBufferCount = 1;
+
+ VkResult err = vkAllocateCommandBuffers(device, &cmdbuf, &frames[i].setup_command_buffer);
+ ERR_CONTINUE(err);
+
+ err = vkAllocateCommandBuffers(device, &cmdbuf, &frames[i].draw_command_buffer);
+ ERR_CONTINUE(err);
+ }
+
+ {
+ //create query pool
+ VkQueryPoolCreateInfo query_pool_create_info;
+ query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+ query_pool_create_info.flags = 0;
+ query_pool_create_info.pNext = NULL;
+ query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
+ query_pool_create_info.queryCount = max_timestamp_query_elements;
+ query_pool_create_info.pipelineStatistics = 0;
+
+ vkCreateQueryPool(device, &query_pool_create_info, NULL, &frames[i].timestamp_pool);
+
+ frames[i].timestamp_names = memnew_arr(String, max_timestamp_query_elements);
+ frames[i].timestamp_cpu_values = memnew_arr(uint64_t, max_timestamp_query_elements);
+ frames[i].timestamp_count = 0;
+ frames[i].timestamp_result_names = memnew_arr(String, max_timestamp_query_elements);
+ frames[i].timestamp_cpu_result_values = memnew_arr(uint64_t, max_timestamp_query_elements);
+ frames[i].timestamp_result_values = memnew_arr(uint64_t, max_timestamp_query_elements);
+ frames[i].timestamp_result_count = 0;
+ }
+ }
+
+ {
+ //begin the first command buffer for the first frame, so
+ //setting up things can be done in the meantime until swap_buffers(), which is called before advance.
+ VkCommandBufferBeginInfo cmdbuf_begin;
+ cmdbuf_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_begin.pNext = NULL;
+ cmdbuf_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_begin.pInheritanceInfo = NULL;
+
+ VkResult err = vkBeginCommandBuffer(frames[0].setup_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->set_setup_buffer(frames[0].setup_command_buffer); //append now so it's added before everything else
+
+ err = vkBeginCommandBuffer(frames[0].draw_command_buffer, &cmdbuf_begin);
+ ERR_FAIL_COND(err);
+ context->append_command_buffer(frames[0].draw_command_buffer);
+ }
+
+ staging_buffer_block_size = GLOBAL_DEF("rendering/vulkan/staging_buffer/block_size_kb", 256);
+ staging_buffer_block_size = MAX(4, staging_buffer_block_size);
+ staging_buffer_block_size *= 1024; //kb -> bytes
+ staging_buffer_max_size = GLOBAL_DEF("rendering/vulkan/staging_buffer/max_size_mb", 128);
+ staging_buffer_max_size = MAX(1, staging_buffer_max_size);
+ staging_buffer_max_size *= 1024 * 1024;
+
+ if (staging_buffer_max_size < staging_buffer_block_size * 4) {
+ //validate enough blocks
+ staging_buffer_max_size = staging_buffer_block_size * 4;
+ }
+ texture_upload_region_size_px = GLOBAL_DEF("rendering/vulkan/staging_buffer/texture_upload_region_size_px", 64);
+ texture_upload_region_size_px = nearest_power_of_2_templated(texture_upload_region_size_px);
+
+ frames_drawn = frame_count; //start from frame count, so everything else is immediately old
+
+ //ensure current staging block is valid and at least one per frame exists
+ staging_buffer_current = 0;
+ staging_buffer_used = false;
+
+ for (int i = 0; i < frame_count; i++) {
+ //staging was never used, create a block
+ Error err = _insert_staging_block();
+ ERR_CONTINUE(err != OK);
+ }
+
+ max_descriptors_per_pool = GLOBAL_DEF("rendering/vulkan/descriptor_pools/max_descriptors_per_pool", 64);
+
+ //check to make sure DescriptorPoolKey is good
+ ERR_FAIL_COND(sizeof(uint64_t) * 3 < UNIFORM_TYPE_MAX * sizeof(uint16_t));
+
+ draw_list = NULL;
+ draw_list_count = 0;
+ draw_list_split = false;
+
+ compute_list = NULL;
+}
+
+template <class T>
+void RenderingDeviceVulkan::_free_rids(T &p_owner, const char *p_type) {
+ List<RID> owned;
+ p_owner.get_owned_list(&owned);
+ if (owned.size()) {
+ WARN_PRINT(itos(owned.size()) + " RIDs of type '" + p_type + "' were leaked.");
+ for (List<RID>::Element *E = owned.front(); E; E = E->next()) {
+ free(E->get());
+ }
+ }
+}
+
+void RenderingDeviceVulkan::capture_timestamp(const String &p_name, bool p_sync_to_draw) {
+
+ ERR_FAIL_COND(frames[frame].timestamp_count >= max_timestamp_query_elements);
+
+ {
+ VkMemoryBarrier memoryBarrier;
+
+ memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
+ memoryBarrier.pNext = NULL;
+ memoryBarrier.srcAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_INDEX_READ_BIT |
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
+ VK_ACCESS_UNIFORM_READ_BIT |
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_SHADER_WRITE_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT;
+ memoryBarrier.dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_INDEX_READ_BIT |
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
+ VK_ACCESS_UNIFORM_READ_BIT |
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_SHADER_WRITE_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT;
+
+ vkCmdPipelineBarrier(p_sync_to_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 1, &memoryBarrier, 0, NULL, 0, NULL);
+ }
+ vkCmdWriteTimestamp(p_sync_to_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, frames[frame].timestamp_pool, frames[frame].timestamp_count);
+ frames[frame].timestamp_names[frames[frame].timestamp_count] = p_name;
+ frames[frame].timestamp_cpu_values[frames[frame].timestamp_count] = OS::get_singleton()->get_ticks_usec();
+ frames[frame].timestamp_count++;
+}
+
+uint32_t RenderingDeviceVulkan::get_captured_timestamps_count() const {
+ return frames[frame].timestamp_result_count;
+}
+
+uint64_t RenderingDeviceVulkan::get_captured_timestamps_frame() const {
+ return frames[frame].index;
+}
+
+uint64_t RenderingDeviceVulkan::get_captured_timestamp_gpu_time(uint32_t p_index) const {
+ ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
+ return frames[frame].timestamp_result_values[p_index] * limits.timestampPeriod;
+}
+uint64_t RenderingDeviceVulkan::get_captured_timestamp_cpu_time(uint32_t p_index) const {
+ ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
+ return frames[frame].timestamp_cpu_result_values[p_index];
+}
+String RenderingDeviceVulkan::get_captured_timestamp_name(uint32_t p_index) const {
+ ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, String());
+ return frames[frame].timestamp_result_names[p_index];
+}
+
+int RenderingDeviceVulkan::limit_get(Limit p_limit) {
+ switch (p_limit) {
+ case LIMIT_MAX_BOUND_UNIFORM_SETS: return limits.maxBoundDescriptorSets;
+ case LIMIT_MAX_FRAMEBUFFER_COLOR_ATTACHMENTS: return limits.maxColorAttachments;
+ case LIMIT_MAX_TEXTURES_PER_UNIFORM_SET: return limits.maxDescriptorSetSampledImages;
+ case LIMIT_MAX_SAMPLERS_PER_UNIFORM_SET: return limits.maxDescriptorSetSamplers;
+ case LIMIT_MAX_STORAGE_BUFFERS_PER_UNIFORM_SET: return limits.maxDescriptorSetStorageBuffers;
+ case LIMIT_MAX_STORAGE_IMAGES_PER_UNIFORM_SET: return limits.maxDescriptorSetStorageImages;
+ case LIMIT_MAX_UNIFORM_BUFFERS_PER_UNIFORM_SET: return limits.maxDescriptorSetUniformBuffers;
+ case LIMIT_MAX_DRAW_INDEXED_INDEX: return limits.maxDrawIndexedIndexValue;
+ case LIMIT_MAX_FRAMEBUFFER_HEIGHT: return limits.maxFramebufferHeight;
+ case LIMIT_MAX_FRAMEBUFFER_WIDTH: return limits.maxFramebufferWidth;
+ case LIMIT_MAX_TEXTURE_ARRAY_LAYERS: return limits.maxImageArrayLayers;
+ case LIMIT_MAX_TEXTURE_SIZE_1D: return limits.maxImageDimension1D;
+ case LIMIT_MAX_TEXTURE_SIZE_2D: return limits.maxImageDimension2D;
+ case LIMIT_MAX_TEXTURE_SIZE_3D: return limits.maxImageDimension3D;
+ case LIMIT_MAX_TEXTURE_SIZE_CUBE: return limits.maxImageDimensionCube;
+ case LIMIT_MAX_TEXTURES_PER_SHADER_STAGE: return limits.maxPerStageDescriptorSampledImages;
+ case LIMIT_MAX_SAMPLERS_PER_SHADER_STAGE: return limits.maxPerStageDescriptorSamplers;
+ case LIMIT_MAX_STORAGE_BUFFERS_PER_SHADER_STAGE: return limits.maxPerStageDescriptorStorageBuffers;
+ case LIMIT_MAX_STORAGE_IMAGES_PER_SHADER_STAGE: return limits.maxPerStageDescriptorStorageImages;
+ case LIMIT_MAX_UNIFORM_BUFFERS_PER_SHADER_STAGE: return limits.maxPerStageDescriptorUniformBuffers;
+ case LIMIT_MAX_PUSH_CONSTANT_SIZE: return limits.maxPushConstantsSize;
+ case LIMIT_MAX_UNIFORM_BUFFER_SIZE: return limits.maxUniformBufferRange;
+ case LIMIT_MAX_VERTEX_INPUT_ATTRIBUTE_OFFSET: return limits.maxVertexInputAttributeOffset;
+ case LIMIT_MAX_VERTEX_INPUT_ATTRIBUTES: return limits.maxVertexInputAttributes;
+ case LIMIT_MAX_VERTEX_INPUT_BINDINGS: return limits.maxVertexInputBindings;
+ case LIMIT_MAX_VERTEX_INPUT_BINDING_STRIDE: return limits.maxVertexInputBindingStride;
+ case LIMIT_MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT: return limits.minUniformBufferOffsetAlignment;
+ case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X: return limits.maxComputeWorkGroupCount[0];
+ case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y: return limits.maxComputeWorkGroupCount[1];
+ case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z: return limits.maxComputeWorkGroupCount[2];
+ case LIMIT_MAX_COMPUTE_WORKGROUP_INVOCATIONS: return limits.maxComputeWorkGroupInvocations;
+ case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_X: return limits.maxComputeWorkGroupSize[0];
+ case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Y: return limits.maxComputeWorkGroupSize[1];
+ case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Z: return limits.maxComputeWorkGroupSize[2];
+
+ default: ERR_FAIL_V(0);
+ }
+
+ return 0;
+}
+
+void RenderingDeviceVulkan::finalize() {
+
+ //free all resources
+
+ _flush(false);
+
+ _free_rids(render_pipeline_owner, "Pipeline");
+ _free_rids(compute_pipeline_owner, "Compute");
+ _free_rids(uniform_set_owner, "UniformSet");
+ _free_rids(texture_buffer_owner, "TextureBuffer");
+ _free_rids(storage_buffer_owner, "StorageBuffer");
+ _free_rids(uniform_buffer_owner, "UniformBuffer");
+ _free_rids(shader_owner, "Shader");
+ _free_rids(index_array_owner, "IndexArray");
+ _free_rids(index_buffer_owner, "IndexBuffer");
+ _free_rids(vertex_array_owner, "VertexArray");
+ _free_rids(vertex_buffer_owner, "VertexBuffer");
+ _free_rids(framebuffer_owner, "Framebuffer");
+ _free_rids(sampler_owner, "Sampler");
+ {
+ //for textures it's a bit more difficult because they may be shared
+ List<RID> owned;
+ texture_owner.get_owned_list(&owned);
+ if (owned.size()) {
+ WARN_PRINT(itos(owned.size()) + " RIDs of type 'Texture' were leaked.");
+ //free shared first
+ for (List<RID>::Element *E = owned.front(); E;) {
+
+ List<RID>::Element *N = E->next();
+ if (texture_is_shared(E->get())) {
+ free(E->get());
+ owned.erase(E->get());
+ }
+ E = N;
+ }
+ //free non shared second, this will avoid an error trying to free unexisting textures due to dependencies.
+ for (List<RID>::Element *E = owned.front(); E; E = E->next()) {
+ free(E->get());
+ }
+ }
+ }
+
+ //free everything pending
+ for (int i = 0; i < frame_count; i++) {
+ int f = (frame + i) % frame_count;
+ _free_pending_resources(f);
+ vkDestroyCommandPool(device, frames[i].command_pool, NULL);
+ vkDestroyQueryPool(device, frames[i].timestamp_pool, NULL);
+ memdelete_arr(frames[i].timestamp_names);
+ memdelete_arr(frames[i].timestamp_cpu_values);
+ memdelete_arr(frames[i].timestamp_result_names);
+ memdelete_arr(frames[i].timestamp_result_values);
+ memdelete_arr(frames[i].timestamp_cpu_result_values);
+ }
+
+ for (int i = 0; i < split_draw_list_allocators.size(); i++) {
+ vkDestroyCommandPool(device, split_draw_list_allocators[i].command_pool, NULL);
+ }
+
+ memdelete_arr(frames);
+
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ vmaDestroyBuffer(allocator, staging_buffer_blocks[i].buffer, staging_buffer_blocks[i].allocation);
+ }
+
+ //all these should be clear at this point
+ ERR_FAIL_COND(descriptor_pools.size());
+ ERR_FAIL_COND(dependency_map.size());
+ ERR_FAIL_COND(reverse_dependency_map.size());
+}
+
+RenderingDeviceVulkan::RenderingDeviceVulkan() {
+ screen_prepared = false;
+}
diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h
new file mode 100644
index 0000000000..30c10e922e
--- /dev/null
+++ b/drivers/vulkan/rendering_device_vulkan.h
@@ -0,0 +1,1129 @@
+/*************************************************************************/
+/* rendering_device_vulkan.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef RENDERING_DEVICE_VULKAN_H
+#define RENDERING_DEVICE_VULKAN_H
+
+#include "core/oa_hash_map.h"
+#include "core/os/thread_safe.h"
+#include "core/rid_owner.h"
+#include "servers/visual/rendering_device.h"
+
+#ifdef DEBUG_ENABLED
+#define _DEBUG
+#endif
+#include "vk_mem_alloc.h"
+#include <vulkan/vulkan.h>
+//todo:
+//compute
+//push constants
+//views of texture slices
+
+class VulkanContext;
+
+class RenderingDeviceVulkan : public RenderingDevice {
+
+ _THREAD_SAFE_CLASS_
+
+ // Miscellaneous tables that map
+ // our enums to enums used
+ // by vulkan.
+
+ VkPhysicalDeviceLimits limits;
+ static const VkFormat vulkan_formats[DATA_FORMAT_MAX];
+ static const char *named_formats[DATA_FORMAT_MAX];
+ static const VkCompareOp compare_operators[COMPARE_OP_MAX];
+ static const VkStencilOp stencil_operations[STENCIL_OP_MAX];
+ static const VkSampleCountFlagBits rasterization_sample_count[TEXTURE_SAMPLES_MAX];
+ static const VkLogicOp logic_operations[RenderingDevice::LOGIC_OP_MAX];
+ static const VkBlendFactor blend_factors[RenderingDevice::BLEND_FACTOR_MAX];
+ static const VkBlendOp blend_operations[RenderingDevice::BLEND_OP_MAX];
+ static const VkSamplerAddressMode address_modes[SAMPLER_REPEAT_MODE_MAX];
+ static const VkBorderColor sampler_border_colors[SAMPLER_BORDER_COLOR_MAX];
+ static const VkImageType vulkan_image_type[TEXTURE_TYPE_MAX];
+
+ // Functions used for format
+ // validation, and ensures the
+ // user passes valid data.
+
+ static int get_format_vertex_size(DataFormat p_format);
+ static uint32_t get_image_format_pixel_size(DataFormat p_format);
+ static void get_compressed_image_format_block_dimensions(DataFormat p_format, uint32_t &r_w, uint32_t &r_h);
+ uint32_t get_compressed_image_format_block_byte_size(DataFormat p_format);
+ static uint32_t get_compressed_image_format_pixel_rshift(DataFormat p_format);
+ static uint32_t get_image_format_required_size(DataFormat p_format, uint32_t p_width, uint32_t p_height, uint32_t p_depth, uint32_t p_mipmaps, uint32_t *r_blockw = NULL, uint32_t *r_blockh = NULL, uint32_t *r_depth = NULL);
+ static uint32_t get_image_required_mipmaps(uint32_t p_width, uint32_t p_height, uint32_t p_depth);
+ static bool format_has_stencil(DataFormat p_format);
+
+ /***************************/
+ /**** ID INFRASTRUCTURE ****/
+ /***************************/
+
+ enum IDType {
+ ID_TYPE_FRAMEBUFFER_FORMAT,
+ ID_TYPE_VERTEX_FORMAT,
+ ID_TYPE_DRAW_LIST,
+ ID_TYPE_SPLIT_DRAW_LIST,
+ ID_TYPE_COMPUTE_LIST,
+ ID_TYPE_MAX,
+ ID_BASE_SHIFT = 58 //5 bits for ID types
+ };
+
+ VkDevice device;
+
+ Map<RID, Set<RID> > dependency_map; //IDs to IDs that depend on it
+ Map<RID, Set<RID> > reverse_dependency_map; //same as above, but in reverse
+
+ void _add_dependency(RID p_id, RID p_depends_on);
+ void _free_dependencies(RID p_id);
+
+ /*****************/
+ /**** TEXTURE ****/
+ /*****************/
+
+ // In Vulkan, the concept of textures does not exist,
+ // intead there is the image (the memory prety much,
+ // the view (how the memory is interpreted) and the
+ // sampler (how it's sampled from the shader).
+ //
+ // Texture here includes the first two stages, but
+ // It's possible to create textures sharing the image
+ // but with different views. The main use case for this
+ // is textures that can be read as both SRGB/Linear,
+ // or slices of a texture (a mipmap, a layer, a 3D slice)
+ // for a framebuffer to render into it.
+
+ struct Texture {
+
+ VkImage image;
+ VmaAllocation allocation;
+ VmaAllocationInfo allocation_info;
+ VkImageView view;
+
+ TextureType type;
+ DataFormat format;
+ TextureSamples samples;
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ uint32_t layers;
+ uint32_t mipmaps;
+ uint32_t usage_flags;
+
+ Vector<DataFormat> allowed_shared_formats;
+
+ VkImageLayout layout;
+
+ uint32_t read_aspect_mask;
+ uint32_t barrier_aspect_mask;
+ bool bound; //bound to framebffer
+ RID owner;
+ };
+
+ RID_Owner<Texture, true> texture_owner;
+ uint32_t texture_upload_region_size_px;
+
+ Vector<uint8_t> _texture_get_data_from_image(Texture *tex, VkImage p_image, VmaAllocation p_allocation, uint32_t p_layer, bool p_2d = false);
+
+ /*****************/
+ /**** SAMPLER ****/
+ /*****************/
+
+ RID_Owner<VkSampler> sampler_owner;
+
+ /***************************/
+ /**** BUFFER MANAGEMENT ****/
+ /***************************/
+
+ // These are temporary buffers on CPU memory that hold
+ // the information until the CPU fetches it and places it
+ // either on GPU buffers, or images (textures). It ensures
+ // updates are properly synchronized with whathever the
+ // GPU is doing.
+ //
+ // The logic here is as follows, only 3 of these
+ // blocks are created at the beginning (one per frame)
+ // they can each belong to a frame (assigned to current when
+ // used) and they can only be reused after the same frame is
+ // recycled.
+ //
+ // When CPU requires to allocate more than what is available,
+ // more of these buffers are created. If a limit is reached,
+ // then a fence will ensure will wait for blocks allocated
+ // in previous frames are processed. If that fails, then
+ // another fence will ensure everything pending for the current
+ // frame is processed (effectively stalling).
+ //
+ // See the comments in the code to understand better how it works.
+
+ struct StagingBufferBlock {
+ VkBuffer buffer;
+ VmaAllocation allocation;
+ uint64_t frame_used;
+ uint32_t fill_amount;
+ };
+
+ Vector<StagingBufferBlock> staging_buffer_blocks;
+ int staging_buffer_current;
+ uint32_t staging_buffer_block_size;
+ uint64_t staging_buffer_max_size;
+ bool staging_buffer_used;
+
+ Error _staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment = true, bool p_on_draw_command_buffer = false);
+ Error _insert_staging_block();
+
+ struct Buffer {
+
+ uint32_t size;
+ VkBuffer buffer;
+ VmaAllocation allocation;
+ VkDescriptorBufferInfo buffer_info; //used for binding
+ Buffer() {
+ size = 0;
+ buffer = NULL;
+ allocation = NULL;
+ }
+ };
+
+ Error _buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mapping);
+ Error _buffer_free(Buffer *p_buffer);
+ Error _buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer = false, uint32_t p_required_align = 32);
+
+ void _full_barrier(bool p_sync_with_draw);
+ void _memory_barrier(VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw);
+ void _buffer_memory_barrier(VkBuffer buffer, uint64_t p_from, uint64_t p_size, VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw);
+
+ /*********************/
+ /**** FRAMEBUFFER ****/
+ /*********************/
+
+ // In Vulkan, framebuffers work similar to how they
+ // do in OpenGL, with the exception that
+ // the "format" (vkRenderPass) is not dynamic
+ // and must be more or less the same as the one
+ // used for the render pipelines.
+
+ struct FramebufferFormatKey {
+ Vector<AttachmentFormat> attachments;
+ bool operator<(const FramebufferFormatKey &p_key) const {
+
+ int as = attachments.size();
+ int bs = p_key.attachments.size();
+ if (as != bs) {
+ return as < bs;
+ }
+
+ const AttachmentFormat *af_a = attachments.ptr();
+ const AttachmentFormat *af_b = p_key.attachments.ptr();
+ for (int i = 0; i < as; i++) {
+ const AttachmentFormat &a = af_a[i];
+ const AttachmentFormat &b = af_b[i];
+ if (a.format != b.format) {
+ return a.format < b.format;
+ }
+ if (a.samples != b.samples) {
+ return a.samples < b.samples;
+ }
+ if (a.usage_flags != b.usage_flags) {
+ return a.usage_flags < b.usage_flags;
+ }
+ }
+
+ return false; //equal
+ }
+ };
+
+ VkRenderPass _render_pass_create(const Vector<AttachmentFormat> &p_format, InitialAction p_initial_action, FinalAction p_final_action, InitialAction p_initial_depth_action, FinalAction p_final_depthcolor_action, int *r_color_attachment_count = NULL);
+
+ // This is a cache and it's never freed, it ensures
+ // IDs for a given format are always unique.
+ Map<FramebufferFormatKey, FramebufferFormatID> framebuffer_format_cache;
+ struct FramebufferFormat {
+ const Map<FramebufferFormatKey, FramebufferFormatID>::Element *E;
+ VkRenderPass render_pass; //here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec)
+ int color_attachments; //used for pipeline validation
+ TextureSamples samples;
+ };
+
+ Map<FramebufferFormatID, FramebufferFormat> framebuffer_formats;
+
+ struct Framebuffer {
+ FramebufferFormatID format_id;
+ struct VersionKey {
+ InitialAction initial_color_action;
+ FinalAction final_color_action;
+ InitialAction initial_depth_action;
+ FinalAction final_depth_action;
+ bool operator<(const VersionKey &p_key) const {
+ if (initial_color_action == p_key.initial_color_action) {
+ if (final_color_action == p_key.final_color_action) {
+ if (initial_depth_action == p_key.initial_depth_action) {
+ return final_depth_action < p_key.final_depth_action;
+ } else {
+ return initial_depth_action < p_key.initial_depth_action;
+ }
+ } else {
+ return final_color_action < p_key.final_color_action;
+ }
+ } else {
+ return initial_color_action < p_key.initial_color_action;
+ }
+ }
+ };
+
+ uint32_t storage_mask;
+ Vector<RID> texture_ids;
+
+ struct Version {
+ VkFramebuffer framebuffer;
+ VkRenderPass render_pass; //this one is owned
+ };
+
+ Map<VersionKey, Version> framebuffers;
+ Size2 size;
+ };
+
+ RID_Owner<Framebuffer, true> framebuffer_owner;
+
+ /***********************/
+ /**** VERTEX BUFFER ****/
+ /***********************/
+
+ // Vertex buffers in Vulkan are similar to how
+ // they work in OpenGL, except that instead of
+ // an attribtue index, there is a buffer binding
+ // index (for binding the buffers in real-time)
+ // and a location index (what is used in the shader).
+ //
+ // This mapping is done here internally, and it's not
+ // exposed.
+
+ RID_Owner<Buffer, true> vertex_buffer_owner;
+
+ struct VertexDescriptionKey {
+ Vector<VertexDescription> vertex_formats;
+ bool operator==(const VertexDescriptionKey &p_key) const {
+ int vdc = vertex_formats.size();
+ int vdck = p_key.vertex_formats.size();
+
+ if (vdc != vdck) {
+ return false;
+ } else {
+ const VertexDescription *a_ptr = vertex_formats.ptr();
+ const VertexDescription *b_ptr = p_key.vertex_formats.ptr();
+ for (int i = 0; i < vdc; i++) {
+ const VertexDescription &a = a_ptr[i];
+ const VertexDescription &b = b_ptr[i];
+
+ if (a.location != b.location) {
+ return false;
+ }
+ if (a.offset != b.offset) {
+ return false;
+ }
+ if (a.format != b.format) {
+ return false;
+ }
+ if (a.stride != b.stride) {
+ return false;
+ }
+ if (a.frequency != b.frequency) {
+ return false;
+ }
+ }
+ return true; //they are equal
+ }
+ }
+
+ uint32_t hash() const {
+ int vdc = vertex_formats.size();
+ uint32_t h = hash_djb2_one_32(vdc);
+ const VertexDescription *ptr = vertex_formats.ptr();
+ for (int i = 0; i < vdc; i++) {
+ const VertexDescription &vd = ptr[i];
+ h = hash_djb2_one_32(vd.location, h);
+ h = hash_djb2_one_32(vd.offset, h);
+ h = hash_djb2_one_32(vd.format, h);
+ h = hash_djb2_one_32(vd.stride, h);
+ h = hash_djb2_one_32(vd.frequency, h);
+ }
+ return h;
+ }
+ };
+
+ struct VertexDescriptionHash {
+ static _FORCE_INLINE_ uint32_t hash(const VertexDescriptionKey &p_key) {
+ return p_key.hash();
+ }
+ };
+
+ // This is a cache and it's never freed, it ensures that
+ // ID used for a specific format always remain the same.
+ HashMap<VertexDescriptionKey, VertexFormatID, VertexDescriptionHash> vertex_format_cache;
+
+ struct VertexDescriptionCache {
+ Vector<VertexDescription> vertex_formats;
+ VkVertexInputBindingDescription *bindings;
+ VkVertexInputAttributeDescription *attributes;
+ VkPipelineVertexInputStateCreateInfo create_info;
+ };
+
+ Map<VertexFormatID, VertexDescriptionCache> vertex_formats;
+
+ struct VertexArray {
+ RID buffer;
+ VertexFormatID description;
+ int vertex_count;
+ uint32_t max_instances_allowed;
+
+ Vector<VkBuffer> buffers; //not owned, just referenced
+ Vector<VkDeviceSize> offsets;
+ };
+
+ RID_Owner<VertexArray, true> vertex_array_owner;
+
+ struct IndexBuffer : public Buffer {
+ uint32_t max_index; //used for validation
+ uint32_t index_count;
+ VkIndexType index_type;
+ bool supports_restart_indices;
+ };
+
+ RID_Owner<IndexBuffer, true> index_buffer_owner;
+
+ struct IndexArray {
+ uint32_t max_index; //remember the maximum index here too, for validation
+ VkBuffer buffer; //not owned, inherited from index buffer
+ uint32_t offset;
+ uint32_t indices;
+ VkIndexType index_type;
+ bool supports_restart_indices;
+ };
+
+ RID_Owner<IndexArray, true> index_array_owner;
+
+ /****************/
+ /**** SHADER ****/
+ /****************/
+
+ // Vulkan specifies a really complex behavior for the application
+ // in order to tell when descriptor sets need to be re-bound (or not).
+ // "When binding a descriptor set (see Descriptor Set Binding) to set
+ // number N, if the previously bound descriptor sets for sets zero
+ // through N-1 were all bound using compatible pipeline layouts,
+ // then performing this binding does not disturb any of the lower numbered sets.
+ // If, additionally, the previous bound descriptor set for set N was
+ // bound using a pipeline layout compatible for set N, then the bindings
+ // in sets numbered greater than N are also not disturbed."
+ // As a result, we need to figure out quickly when something is no longer "compatible".
+ // in order to avoid costly rebinds.
+
+ enum {
+ MAX_UNIFORM_SETS = 16
+ };
+
+ struct UniformInfo {
+ UniformType type;
+ int binding;
+ uint32_t stages;
+ int length; //size of arrays (in total elements), or ubos (in bytes * total elements)
+
+ bool operator!=(const UniformInfo &p_info) const {
+ return (binding != p_info.binding || type != p_info.type || stages != p_info.stages || length != p_info.length);
+ }
+
+ bool operator<(const UniformInfo &p_info) const {
+ if (binding != p_info.binding) {
+ return binding < p_info.binding;
+ }
+ if (type != p_info.type) {
+ return type < p_info.type;
+ }
+ if (stages != p_info.stages) {
+ return stages < p_info.stages;
+ }
+ return length < p_info.length;
+ }
+ };
+
+ struct UniformSetFormat {
+ Vector<UniformInfo> uniform_info;
+ bool operator<(const UniformSetFormat &p_format) const {
+ uint32_t size = uniform_info.size();
+ uint32_t psize = p_format.uniform_info.size();
+
+ if (size != psize) {
+ return size < psize;
+ }
+
+ const UniformInfo *infoptr = uniform_info.ptr();
+ const UniformInfo *pinfoptr = p_format.uniform_info.ptr();
+
+ for (uint32_t i = 0; i < size; i++) {
+ if (infoptr[i] != pinfoptr[i]) {
+ return infoptr[i] < pinfoptr[i];
+ }
+ }
+
+ return false;
+ }
+ };
+
+ // Always grows, never shrinks, ensuring unique IDs, but we assume
+ // the amount of formats will never be a problem, as the amount of shaders
+ // in a game is limited.
+ Map<UniformSetFormat, uint32_t> uniform_set_format_cache;
+
+ // Shaders in Vulkan are just pretty much
+ // precompiled blocks of SPIR-V bytecode. They
+ // are most likely not really compiled to host
+ // assembly until a pipeline is created.
+ //
+ // When supplying the shaders, this implementation
+ // will use the reflection abilities of glslang to
+ // understand and cache everything required to
+ // create and use the descriptor sets (Vulkan's
+ // biggest pain).
+ //
+ // Additionally, hashes are created for every set
+ // to do quick validation and ensuring the user
+ // does not submit something invalid.
+
+ struct Shader {
+
+ struct Set {
+
+ Vector<UniformInfo> uniform_info;
+ VkDescriptorSetLayout descriptor_set_layout;
+ };
+
+ uint32_t vertex_input_mask; //inputs used, this is mostly for validation
+ int fragment_outputs;
+
+ struct PushConstant {
+ uint32_t push_constant_size;
+ uint32_t push_constants_vk_stage;
+ };
+
+ PushConstant push_constant;
+
+ bool is_compute = false;
+ int max_output;
+ Vector<Set> sets;
+ Vector<uint32_t> set_formats;
+ Vector<VkPipelineShaderStageCreateInfo> pipeline_stages;
+ VkPipelineLayout pipeline_layout;
+ };
+
+ String _shader_uniform_debug(RID p_shader, int p_set = -1);
+
+ RID_Owner<Shader, true> shader_owner;
+
+ /******************/
+ /**** UNIFORMS ****/
+ /******************/
+
+ // Descriptor sets require allocation from a pool.
+ // The documentation on how to use pools properly
+ // is scarce, and the documentation is strange.
+ //
+ // Basically, you can mix and match pools as you
+ // like, but you'll run into fragmentation issues.
+ // Because of this, the recommended approach is to
+ // create a a pool for every descriptor set type,
+ // as this prevents fragmentation.
+ //
+ // This is implemented here as a having a list of
+ // pools (each can contain up to 64 sets) for each
+ // set layout. The amount of sets for each type
+ // is used as the key.
+
+ enum {
+ MAX_DESCRIPTOR_POOL_ELEMENT = 65535
+ };
+
+ struct DescriptorPoolKey {
+ union {
+ struct {
+ uint16_t uniform_type[UNIFORM_TYPE_MAX]; //using 16 bits because, for sending arrays, each element is a pool set.
+ };
+ struct {
+ uint64_t key1;
+ uint64_t key2;
+ uint64_t key3;
+ };
+ };
+ bool operator<(const DescriptorPoolKey &p_key) const {
+ if (key1 != p_key.key1) {
+ return key1 < p_key.key1;
+ }
+ if (key2 != p_key.key2) {
+ return key2 < p_key.key2;
+ }
+
+ return key3 < p_key.key3;
+ }
+ DescriptorPoolKey() {
+ key1 = 0;
+ key2 = 0;
+ key3 = 0;
+ }
+ };
+
+ struct DescriptorPool {
+ VkDescriptorPool pool;
+ uint32_t usage;
+ };
+
+ Map<DescriptorPoolKey, Set<DescriptorPool *> > descriptor_pools;
+ uint32_t max_descriptors_per_pool;
+
+ DescriptorPool *_descriptor_pool_allocate(const DescriptorPoolKey &p_key);
+ void _descriptor_pool_free(const DescriptorPoolKey &p_key, DescriptorPool *p_pool);
+
+ RID_Owner<Buffer, true> uniform_buffer_owner;
+ RID_Owner<Buffer, true> storage_buffer_owner;
+
+ //texture buffer needs a view
+ struct TextureBuffer {
+ Buffer buffer;
+ VkBufferView view;
+ };
+
+ RID_Owner<TextureBuffer, true> texture_buffer_owner;
+
+ // This structure contains the descriptor set. They _need_ to be allocated
+ // for a shader (and will be erased when this shader is erased), but should
+ // work for other shaders as long as the hash matches. This covers using
+ // them in shader variants.
+ //
+ // Keep also in mind that you can share buffers between descriptor sets, so
+ // the above restriction is not too serious.
+
+ struct UniformSet {
+ uint32_t format;
+ RID shader_id;
+ uint32_t shader_set;
+ DescriptorPool *pool;
+ DescriptorPoolKey pool_key;
+ VkDescriptorSet descriptor_set;
+ //VkPipelineLayout pipeline_layout; //not owned, inherited from shader
+ Vector<RID> attachable_textures; //used for validation
+ Vector<Texture *> mutable_sampled_textures; //used for layout change
+ Vector<Texture *> mutable_storage_textures; //used for layout change
+ };
+
+ RID_Owner<UniformSet, true> uniform_set_owner;
+
+ /*******************/
+ /**** PIPELINES ****/
+ /*******************/
+
+ // Render pipeline contains ALL the
+ // information required for drawing.
+ // This includes all the rasterizer state
+ // as well as shader used, framebuffer format,
+ // etc.
+ // While the pipeline is just a single object
+ // (VkPipeline) a lot of values are also saved
+ // here to do validation (vulkan does none by
+ // default) and warn the user if something
+ // was not supplied as intended.
+
+ struct RenderPipeline {
+ //Cached values for validation
+#ifdef DEBUG_ENABLED
+ struct Validation {
+ FramebufferFormatID framebuffer_format;
+ uint32_t dynamic_state;
+ VertexFormatID vertex_format;
+ bool uses_restart_indices;
+ uint32_t primitive_minimum;
+ uint32_t primitive_divisor;
+ } validation;
+#endif
+ //Actual pipeline
+ RID shader;
+ Vector<uint32_t> set_formats;
+ VkPipelineLayout pipeline_layout; // not owned, needed for push constants
+ VkPipeline pipeline;
+ uint32_t push_constant_size;
+ uint32_t push_constant_stages;
+ };
+
+ RID_Owner<RenderPipeline, true> render_pipeline_owner;
+
+ struct ComputePipeline {
+
+ RID shader;
+ Vector<uint32_t> set_formats;
+ VkPipelineLayout pipeline_layout; // not owned, needed for push constants
+ VkPipeline pipeline;
+ uint32_t push_constant_size;
+ uint32_t push_constant_stages;
+ };
+
+ RID_Owner<ComputePipeline, true> compute_pipeline_owner;
+
+ /*******************/
+ /**** DRAW LIST ****/
+ /*******************/
+
+ // Draw list contains both the command buffer
+ // used for drawing as well as a LOT of
+ // information used for validation. This
+ // validation is cheap so most of it can
+ // also run in release builds.
+
+ // When using split command lists, this is
+ // implemented internally using secondary command
+ // buffers. As they can be created in threads,
+ // each needs it's own command pool.
+
+ struct SplitDrawListAllocator {
+ VkCommandPool command_pool;
+ Vector<VkCommandBuffer> command_buffers; //one for each frame
+ };
+
+ Vector<SplitDrawListAllocator> split_draw_list_allocators;
+
+ struct DrawList {
+
+ VkCommandBuffer command_buffer; //if persistent, this is owned, otherwise it's shared with the ringbuffer
+ Rect2i viewport;
+
+ struct SetState {
+ uint32_t pipeline_expected_format;
+ uint32_t uniform_set_format;
+ VkDescriptorSet descriptor_set;
+ RID uniform_set;
+ bool bound;
+ SetState() {
+ bound = false;
+ pipeline_expected_format = 0;
+ uniform_set_format = 0;
+ descriptor_set = VK_NULL_HANDLE;
+ }
+ };
+
+ struct State {
+ SetState sets[MAX_UNIFORM_SETS];
+ uint32_t set_count;
+ RID pipeline;
+ RID pipeline_shader;
+ VkPipelineLayout pipeline_layout;
+ RID vertex_array;
+ RID index_array;
+ uint32_t pipeline_push_constant_stages;
+
+ State() {
+ set_count = 0;
+ pipeline_layout = VK_NULL_HANDLE;
+ pipeline_push_constant_stages = 0;
+ }
+ } state;
+#ifdef DEBUG_ENABLED
+
+ struct Validation {
+ bool active; //means command buffer was not closes, so you can keep adding things
+ FramebufferFormatID framebuffer_format;
+ //actual render pass values
+ uint32_t dynamic_state;
+ VertexFormatID vertex_format; //INVALID_ID if not set
+ uint32_t vertex_array_size; //0 if not set
+ uint32_t vertex_max_instances_allowed;
+ bool index_buffer_uses_restart_indices;
+ uint32_t index_array_size; //0 if index buffer not set
+ uint32_t index_array_max_index;
+ uint32_t index_array_offset;
+ Vector<uint32_t> set_formats;
+ Vector<bool> set_bound;
+ Vector<RID> set_rids;
+ //last pipeline set values
+ bool pipeline_active;
+ uint32_t pipeline_dynamic_state;
+ VertexFormatID pipeline_vertex_format;
+ RID pipeline_shader;
+ uint32_t invalid_set_from;
+ bool pipeline_uses_restart_indices;
+ uint32_t pipeline_primitive_divisor;
+ uint32_t pipeline_primitive_minimum;
+ Vector<uint32_t> pipeline_set_formats;
+ uint32_t pipeline_push_constant_size;
+ bool pipeline_push_constant_suppplied;
+
+ Validation() {
+ active = true;
+ dynamic_state = 0;
+ vertex_format = INVALID_ID;
+ vertex_array_size = 0;
+ vertex_max_instances_allowed = 0xFFFFFFFF;
+ framebuffer_format = INVALID_ID;
+ index_array_size = 0; //not sent
+ index_array_max_index = 0; //not set
+ index_buffer_uses_restart_indices = false;
+ invalid_set_from = 0;
+
+ //pipeline state initalize
+ pipeline_active = false;
+ pipeline_dynamic_state = 0;
+ pipeline_vertex_format = INVALID_ID;
+ pipeline_uses_restart_indices = false;
+ pipeline_push_constant_size = 0;
+ pipeline_push_constant_suppplied = false;
+ }
+ } validation;
+#else
+ struct Validation {
+ uint32_t vertex_array_size; //0 if not set
+ uint32_t index_array_size; //0 if index buffer not set
+ uint32_t index_array_offset;
+
+ Validation() {
+ vertex_array_size = 0;
+ index_array_size = 0; //not sent
+ }
+ } validation;
+
+#endif
+ };
+
+ DrawList *draw_list; //one for regular draw lists, multiple for split.
+ uint32_t draw_list_count;
+ bool draw_list_split;
+ Vector<RID> draw_list_bound_textures;
+ bool draw_list_unbind_color_textures;
+ bool draw_list_unbind_depth_textures;
+
+ void _draw_list_insert_clear_region(DrawList *draw_list, Framebuffer *framebuffer, Point2i viewport_offset, Point2i viewport_size, bool p_clear_color, const Vector<Color> &p_clear_colors, bool p_clear_depth, float p_depth, uint32_t p_stencil);
+ Error _draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass);
+ Error _draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents);
+ _FORCE_INLINE_ DrawList *_get_draw_list_ptr(DrawListID p_id);
+
+ /**********************/
+ /**** COMPUTE LIST ****/
+ /**********************/
+
+ struct ComputeList {
+
+ VkCommandBuffer command_buffer; //if persistent, this is owned, otherwise it's shared with the ringbuffer
+
+ struct SetState {
+ uint32_t pipeline_expected_format;
+ uint32_t uniform_set_format;
+ VkDescriptorSet descriptor_set;
+ RID uniform_set;
+ bool bound;
+ SetState() {
+ bound = false;
+ pipeline_expected_format = 0;
+ uniform_set_format = 0;
+ descriptor_set = VK_NULL_HANDLE;
+ }
+ };
+
+ struct State {
+ Set<Texture *> textures_to_sampled_layout;
+
+ SetState sets[MAX_UNIFORM_SETS];
+ uint32_t set_count;
+ RID pipeline;
+ RID pipeline_shader;
+ VkPipelineLayout pipeline_layout;
+ uint32_t pipeline_push_constant_stages;
+
+ State() {
+ set_count = 0;
+ pipeline_layout = VK_NULL_HANDLE;
+ pipeline_push_constant_stages = 0;
+ }
+ } state;
+#ifdef DEBUG_ENABLED
+
+ struct Validation {
+ bool active; //means command buffer was not closes, so you can keep adding things
+ Vector<uint32_t> set_formats;
+ Vector<bool> set_bound;
+ Vector<RID> set_rids;
+ //last pipeline set values
+ bool pipeline_active;
+ RID pipeline_shader;
+ uint32_t invalid_set_from;
+ Vector<uint32_t> pipeline_set_formats;
+ uint32_t pipeline_push_constant_size;
+ bool pipeline_push_constant_suppplied;
+
+ Validation() {
+ active = true;
+ invalid_set_from = 0;
+
+ //pipeline state initalize
+ pipeline_active = false;
+ pipeline_push_constant_size = 0;
+ pipeline_push_constant_suppplied = false;
+ }
+ } validation;
+#endif
+ };
+
+ ComputeList *compute_list;
+
+ /**************************/
+ /**** FRAME MANAGEMENT ****/
+ /**************************/
+
+ // This is the frame structure. There are normally
+ // 3 of these (used for triple buffering), or 2
+ // (double buffering). They are cycled constantly.
+ //
+ // It contains two command buffers, one that is
+ // used internally for setting up (creating stuff)
+ // and another used mostly for drawing.
+ //
+ // They also contains a list of things that need
+ // to be disposed of when deleted, which can't
+ // happen immediately due to the asynchronous
+ // nature of the GPU. They will get deleted
+ // when the frame is cycled.
+
+ struct Frame {
+ //list in usage order, from last to free to first to free
+ List<Buffer> buffers_to_dispose_of;
+ List<Texture> textures_to_dispose_of;
+ List<Framebuffer> framebuffers_to_dispose_of;
+ List<VkSampler> samplers_to_dispose_of;
+ List<Shader> shaders_to_dispose_of;
+ List<VkBufferView> buffer_views_to_dispose_of;
+ List<UniformSet> uniform_sets_to_dispose_of;
+ List<RenderPipeline> render_pipelines_to_dispose_of;
+ List<ComputePipeline> compute_pipelines_to_dispose_of;
+
+ VkCommandPool command_pool;
+ VkCommandBuffer setup_command_buffer; //used at the begining of every frame for set-up
+ VkCommandBuffer draw_command_buffer; //used at the begining of every frame for set-up
+
+ struct Timestamp {
+ String description;
+ uint64_t value;
+ };
+
+ VkQueryPool timestamp_pool;
+
+ String *timestamp_names;
+ uint64_t *timestamp_cpu_values;
+ uint32_t timestamp_count;
+ String *timestamp_result_names;
+ uint64_t *timestamp_cpu_result_values;
+ uint64_t *timestamp_result_values;
+ uint32_t timestamp_result_count;
+ uint64_t index;
+ };
+
+ uint32_t max_timestamp_query_elements;
+
+ Frame *frames; //frames available, they are cycled (usually 3)
+ int frame; //current frame
+ int frame_count; //total amount of frames
+ uint64_t frames_drawn;
+
+ void _free_pending_resources(int p_frame);
+
+ VmaAllocator allocator;
+
+ VulkanContext *context;
+
+ void _free_internal(RID p_id);
+ void _flush(bool p_current_frame);
+
+ bool screen_prepared;
+
+ template <class T>
+ void _free_rids(T &p_owner, const char *p_type);
+
+public:
+ virtual RID texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t> > &p_data = Vector<Vector<uint8_t> >());
+ virtual RID texture_create_shared(const TextureView &p_view, RID p_with_texture);
+
+ virtual RID texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, TextureSliceType p_slice_type = TEXTURE_SLICE_2D);
+ virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, bool p_sync_with_draw = false);
+ virtual Vector<uint8_t> texture_get_data(RID p_texture, uint32_t p_layer);
+
+ virtual bool texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const;
+ virtual bool texture_is_shared(RID p_texture);
+ virtual bool texture_is_valid(RID p_texture);
+
+ virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, bool p_sync_with_draw = false);
+ virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, bool p_sync_with_draw = false);
+
+ /*********************/
+ /**** FRAMEBUFFER ****/
+ /*********************/
+
+ virtual FramebufferFormatID framebuffer_format_create(const Vector<AttachmentFormat> &p_format);
+ virtual TextureSamples framebuffer_format_get_texture_samples(FramebufferFormatID p_format);
+
+ virtual RID framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check = INVALID_ID);
+
+ virtual FramebufferFormatID framebuffer_get_format(RID p_framebuffer);
+
+ /*****************/
+ /**** SAMPLER ****/
+ /*****************/
+
+ virtual RID sampler_create(const SamplerState &p_state);
+
+ /**********************/
+ /**** VERTEX ARRAY ****/
+ /**********************/
+
+ virtual RID vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data = Vector<uint8_t>());
+
+ // Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated
+ virtual VertexFormatID vertex_format_create(const Vector<VertexDescription> &p_vertex_formats);
+ virtual RID vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers);
+
+ virtual RID index_buffer_create(uint32_t p_size_indices, IndexBufferFormat p_format, const Vector<uint8_t> &p_data = Vector<uint8_t>(), bool p_use_restart_indices = false);
+
+ virtual RID index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count);
+
+ /****************/
+ /**** SHADER ****/
+ /****************/
+
+ virtual RID shader_create(const Vector<ShaderStageData> &p_stages);
+ virtual uint32_t shader_get_vertex_input_attribute_mask(RID p_shader);
+
+ /*****************/
+ /**** UNIFORM ****/
+ /*****************/
+
+ virtual RID uniform_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data = Vector<uint8_t>());
+ virtual RID storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data = Vector<uint8_t>());
+ virtual RID texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data = Vector<uint8_t>());
+
+ virtual RID uniform_set_create(const Vector<Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set);
+ virtual bool uniform_set_is_valid(RID p_uniform_set);
+
+ virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, bool p_sync_with_draw = false); //works for any buffer
+ virtual Vector<uint8_t> buffer_get_data(RID p_buffer);
+
+ /*************************/
+ /**** RENDER PIPELINE ****/
+ /*************************/
+
+ virtual RID render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags = 0);
+ virtual bool render_pipeline_is_valid(RID p_pipeline);
+
+ /**************************/
+ /**** COMPUTE PIPELINE ****/
+ /**************************/
+
+ virtual RID compute_pipeline_create(RID p_shader);
+ virtual bool compute_pipeline_is_valid(RID p_pipeline);
+
+ /****************/
+ /**** SCREEN ****/
+ /****************/
+
+ virtual int screen_get_width(int p_screen = 0) const;
+ virtual int screen_get_height(int p_screen = 0) const;
+ virtual FramebufferFormatID screen_get_framebuffer_format() const;
+
+ /********************/
+ /**** DRAW LISTS ****/
+ /********************/
+
+ virtual DrawListID draw_list_begin_for_screen(int p_screen = 0, const Color &p_clear_color = Color());
+
+ virtual DrawListID draw_list_begin(RID p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2());
+ virtual Error draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values = Vector<Color>(), float p_clear_depth = 1.0, uint32_t p_clear_stencil = 0, const Rect2 &p_region = Rect2());
+
+ virtual void draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline);
+ virtual void draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index);
+ virtual void draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array);
+ virtual void draw_list_bind_index_array(DrawListID p_list, RID p_index_array);
+ virtual void draw_list_set_line_width(DrawListID p_list, float p_width);
+ virtual void draw_list_set_push_constant(DrawListID p_list, void *p_data, uint32_t p_data_size);
+
+ virtual void draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances = 1, uint32_t p_procedural_vertices = 0);
+
+ virtual void draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect);
+ virtual void draw_list_disable_scissor(DrawListID p_list);
+
+ virtual void draw_list_end();
+
+ /***********************/
+ /**** COMPUTE LISTS ****/
+ /***********************/
+
+ virtual ComputeListID compute_list_begin();
+ virtual void compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline);
+ virtual void compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index);
+ virtual void compute_list_set_push_constant(ComputeListID p_list, void *p_data, uint32_t p_data_size);
+ virtual void compute_list_add_barrier(ComputeListID p_list);
+
+ virtual void compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups);
+ virtual void compute_list_end();
+
+ /**************/
+ /**** FREE ****/
+ /**************/
+
+ virtual void free(RID p_id);
+
+ /****************/
+ /**** Timing ****/
+ /****************/
+
+ virtual void capture_timestamp(const String &p_name, bool p_sync_to_draw);
+ virtual uint32_t get_captured_timestamps_count() const;
+ virtual uint64_t get_captured_timestamps_frame() const;
+ virtual uint64_t get_captured_timestamp_gpu_time(uint32_t p_index) const;
+ virtual uint64_t get_captured_timestamp_cpu_time(uint32_t p_index) const;
+ virtual String get_captured_timestamp_name(uint32_t p_index) const;
+
+ /****************/
+ /**** Limits ****/
+ /****************/
+
+ virtual int limit_get(Limit p_limit);
+
+ virtual void prepare_screen_for_drawing();
+ void initialize(VulkanContext *p_context);
+ void finalize();
+
+ virtual void swap_buffers();
+
+ virtual uint32_t get_frame_delay() const;
+
+ RenderingDeviceVulkan();
+};
+
+#endif // RENDERING_DEVICE_VULKAN_H
diff --git a/drivers/vulkan/vulkan_context.cpp b/drivers/vulkan/vulkan_context.cpp
new file mode 100644
index 0000000000..51d66cf97e
--- /dev/null
+++ b/drivers/vulkan/vulkan_context.cpp
@@ -0,0 +1,1517 @@
+/*************************************************************************/
+/* vulkan_context.cpp */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#include "vulkan_context.h"
+
+#include "core/engine.h"
+#include "core/project_settings.h"
+#include "core/ustring.h"
+#include "core/version.h"
+
+#include "vk_enum_string_helper.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+#define VULKAN_DEBUG(m_text) print_line(m_text)
+#define APP_SHORT_NAME "GodotEngine"
+
+VKAPI_ATTR VkBool32 VKAPI_CALL VulkanContext::_debug_messenger_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageType,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
+ void *pUserData) {
+
+ // This error needs to be ignored because the AMD allocator will mix up memory types on IGP processors.
+ if (strstr(pCallbackData->pMessage, "Mapping an image with layout") != NULL &&
+ strstr(pCallbackData->pMessage, "can result in undefined behavior if this memory is used by the device") != NULL) {
+ return VK_FALSE;
+ }
+ // This needs to be ignored because Validator is wrong here.
+ if (strstr(pCallbackData->pMessage, "SPIR-V module not valid: Pointer operand") != NULL &&
+ strstr(pCallbackData->pMessage, "must be a memory object") != NULL) {
+ return VK_FALSE;
+ }
+ // Workaround for Vulkan-Loader usability bug: https://github.com/KhronosGroup/Vulkan-Loader/issues/262.
+ if (strstr(pCallbackData->pMessage, "wrong ELF class: ELFCLASS32") != NULL) {
+ return VK_FALSE;
+ }
+ if (strstr(pCallbackData->pMessageIdName, "UNASSIGNED-CoreValidation-DrawState-ClearCmdBeforeDraw") != NULL) {
+ return VK_FALSE;
+ }
+
+ String severity_string;
+ switch (messageSeverity) {
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:
+ severity_string = "VERBOSE : ";
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ severity_string = "INFO : ";
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ severity_string = "WARNING : ";
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ severity_string = "ERROR : ";
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT:
+ break;
+ }
+
+ String type_string;
+ switch (messageType) {
+ case (VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT):
+ type_string = "GENERAL";
+ break;
+ case (VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT):
+ type_string = "VALIDATION";
+ break;
+ case (VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT):
+ type_string = "PERFORMANCE";
+ break;
+ case (VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT):
+ type_string = "VALIDATION|PERFORMANCE";
+ break;
+ }
+
+ String objects_string;
+ if (pCallbackData->objectCount > 0) {
+ objects_string = "\n\tObjects - " + String::num_int64(pCallbackData->objectCount);
+ for (uint32_t object = 0; object < pCallbackData->objectCount; ++object) {
+ objects_string +=
+ "\n\t\tObject[" + String::num_int64(object) + "]" +
+ " - " + string_VkObjectType(pCallbackData->pObjects[object].objectType) +
+ ", Handle " + String::num_int64(pCallbackData->pObjects[object].objectHandle);
+ if (NULL != pCallbackData->pObjects[object].pObjectName && strlen(pCallbackData->pObjects[object].pObjectName) > 0) {
+ objects_string += ", Name \"" + String(pCallbackData->pObjects[object].pObjectName) + "\"";
+ }
+ }
+ }
+
+ String labels_string;
+ if (pCallbackData->cmdBufLabelCount > 0) {
+ labels_string = "\n\tCommand Buffer Labels - " + String::num_int64(pCallbackData->cmdBufLabelCount);
+ for (uint32_t cmd_buf_label = 0; cmd_buf_label < pCallbackData->cmdBufLabelCount; ++cmd_buf_label) {
+ labels_string +=
+ "\n\t\tLabel[" + String::num_int64(cmd_buf_label) + "]" +
+ " - " + pCallbackData->pCmdBufLabels[cmd_buf_label].pLabelName +
+ "{ ";
+ for (int color_idx = 0; color_idx < 4; ++color_idx) {
+ labels_string += String::num(pCallbackData->pCmdBufLabels[cmd_buf_label].color[color_idx]);
+ if (color_idx < 3) {
+ labels_string += ", ";
+ }
+ }
+ labels_string += " }";
+ }
+ }
+
+ String error_message(severity_string + type_string +
+ " - Message Id Number: " + String::num_int64(pCallbackData->messageIdNumber) +
+ " | Message Id Name: " + pCallbackData->pMessageIdName +
+ "\n\t" + pCallbackData->pMessage +
+ objects_string + labels_string);
+
+ ERR_PRINT(error_message);
+
+ CRASH_COND_MSG(Engine::get_singleton()->is_abort_on_gpu_errors_enabled(),
+ "Crashing, because abort on GPU errors is enabled.");
+
+ return VK_FALSE;
+}
+
+VkBool32 VulkanContext::_check_layers(uint32_t check_count, const char **check_names, uint32_t layer_count, VkLayerProperties *layers) {
+ for (uint32_t i = 0; i < check_count; i++) {
+ VkBool32 found = 0;
+ for (uint32_t j = 0; j < layer_count; j++) {
+ if (!strcmp(check_names[i], layers[j].layerName)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ERR_PRINT("Cant find layer: " + String(check_names[i]));
+ return 0;
+ }
+ }
+ return 1;
+}
+
+Error VulkanContext::_create_validation_layers() {
+
+ VkResult err;
+ uint32_t instance_layer_count = 0;
+ uint32_t validation_layer_count = 0;
+ const char *instance_validation_layers_alt1[] = { "VK_LAYER_LUNARG_standard_validation" };
+ const char *instance_validation_layers_alt2[] = { "VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation",
+ "VK_LAYER_LUNARG_object_tracker", "VK_LAYER_LUNARG_core_validation",
+ "VK_LAYER_GOOGLE_unique_objects" };
+ VkBool32 validation_found = 0;
+ err = vkEnumerateInstanceLayerProperties(&instance_layer_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ const char **instance_validation_layers = instance_validation_layers_alt1;
+ if (instance_layer_count > 0) {
+ VkLayerProperties *instance_layers = (VkLayerProperties *)malloc(sizeof(VkLayerProperties) * instance_layer_count);
+ err = vkEnumerateInstanceLayerProperties(&instance_layer_count, instance_layers);
+ if (err) {
+ free(instance_layers);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ validation_found = _check_layers(ARRAY_SIZE(instance_validation_layers_alt1), instance_validation_layers,
+ instance_layer_count, instance_layers);
+ if (validation_found) {
+ enabled_layer_count = ARRAY_SIZE(instance_validation_layers_alt1);
+ enabled_layers[0] = "VK_LAYER_LUNARG_standard_validation";
+ validation_layer_count = 1;
+ } else {
+ // use alternative set of validation layers
+ instance_validation_layers = instance_validation_layers_alt2;
+ enabled_layer_count = ARRAY_SIZE(instance_validation_layers_alt2);
+ validation_found = _check_layers(ARRAY_SIZE(instance_validation_layers_alt2), instance_validation_layers,
+ instance_layer_count, instance_layers);
+ validation_layer_count = ARRAY_SIZE(instance_validation_layers_alt2);
+ for (uint32_t i = 0; i < validation_layer_count; i++) {
+ enabled_layers[i] = instance_validation_layers[i];
+ }
+ }
+ free(instance_layers);
+ }
+
+ if (!validation_found) {
+ return ERR_CANT_CREATE;
+ }
+
+ return OK;
+}
+
+Error VulkanContext::_initialize_extensions() {
+
+ VkResult err;
+ uint32_t instance_extension_count = 0;
+
+ enabled_extension_count = 0;
+ enabled_layer_count = 0;
+ /* Look for instance extensions */
+ VkBool32 surfaceExtFound = 0;
+ VkBool32 platformSurfaceExtFound = 0;
+ memset(extension_names, 0, sizeof(extension_names));
+
+ err = vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ if (instance_extension_count > 0) {
+ VkExtensionProperties *instance_extensions = (VkExtensionProperties *)malloc(sizeof(VkExtensionProperties) * instance_extension_count);
+ err = vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, instance_extensions);
+ if (err) {
+ free(instance_extensions);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ for (uint32_t i = 0; i < instance_extension_count; i++) {
+ if (!strcmp(VK_KHR_SURFACE_EXTENSION_NAME, instance_extensions[i].extensionName)) {
+ surfaceExtFound = 1;
+ extension_names[enabled_extension_count++] = VK_KHR_SURFACE_EXTENSION_NAME;
+ }
+
+ if (!strcmp(_get_platform_surface_extension(), instance_extensions[i].extensionName)) {
+ platformSurfaceExtFound = 1;
+ extension_names[enabled_extension_count++] = _get_platform_surface_extension();
+ }
+ if (!strcmp(VK_EXT_DEBUG_REPORT_EXTENSION_NAME, instance_extensions[i].extensionName)) {
+ if (use_validation_layers) {
+ extension_names[enabled_extension_count++] = VK_EXT_DEBUG_REPORT_EXTENSION_NAME;
+ }
+ }
+ if (!strcmp(VK_EXT_DEBUG_UTILS_EXTENSION_NAME, instance_extensions[i].extensionName)) {
+ if (use_validation_layers) {
+ extension_names[enabled_extension_count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME;
+ }
+ }
+ if (enabled_extension_count >= MAX_EXTENSIONS) {
+ free(instance_extensions);
+ ERR_FAIL_V_MSG(ERR_BUG, "Enabled extension count reaches MAX_EXTENSIONS, BUG");
+ }
+ }
+
+ free(instance_extensions);
+ }
+
+ ERR_FAIL_COND_V_MSG(!surfaceExtFound, ERR_CANT_CREATE, "No surface extension found, is a driver installed?");
+ ERR_FAIL_COND_V_MSG(!platformSurfaceExtFound, ERR_CANT_CREATE, "No platform surface extension found, is a driver installed?");
+
+ return OK;
+}
+
+Error VulkanContext::_create_physical_device() {
+
+ /* Look for validation layers */
+ if (use_validation_layers) {
+ _create_validation_layers();
+ }
+
+ {
+ Error err = _initialize_extensions();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ CharString cs = ProjectSettings::get_singleton()->get("application/config/name").operator String().utf8();
+ String name = "GodotEngine " + String(VERSION_FULL_NAME);
+ CharString namecs = name.utf8();
+ const VkApplicationInfo app = {
+ /*sType*/ VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ /*pNext*/ NULL,
+ /*pApplicationName*/ cs.get_data(),
+ /*applicationVersion*/ 0,
+ /*pEngineName*/ namecs.get_data(),
+ /*engineVersion*/ 0,
+ /*apiVersion*/ VK_API_VERSION_1_0,
+ };
+ VkInstanceCreateInfo inst_info = {
+ /*sType*/ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ /*pApplicationInfo*/ &app,
+ /*enabledLayerCount*/ enabled_layer_count,
+ /*ppEnabledLayerNames*/ (const char *const *)instance_validation_layers,
+ /*enabledExtensionCount*/ enabled_extension_count,
+ /*ppEnabledExtensionNames*/ (const char *const *)extension_names,
+ };
+
+ /*
+ * This is info for a temp callback to use during CreateInstance.
+ * After the instance is created, we use the instance-based
+ * function to register the final callback.
+ */
+ VkDebugUtilsMessengerCreateInfoEXT dbg_messenger_create_info;
+ if (use_validation_layers) {
+ // VK_EXT_debug_utils style
+ dbg_messenger_create_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+ dbg_messenger_create_info.pNext = NULL;
+ dbg_messenger_create_info.flags = 0;
+ dbg_messenger_create_info.messageSeverity =
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
+ dbg_messenger_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
+ dbg_messenger_create_info.pfnUserCallback = _debug_messenger_callback;
+ dbg_messenger_create_info.pUserData = this;
+ inst_info.pNext = &dbg_messenger_create_info;
+ }
+
+ uint32_t gpu_count;
+
+ VkResult err = vkCreateInstance(&inst_info, NULL, &inst);
+ ERR_FAIL_COND_V_MSG(err == VK_ERROR_INCOMPATIBLE_DRIVER, ERR_CANT_CREATE,
+ "Cannot find a compatible Vulkan installable client driver (ICD).\n\n"
+ "vkCreateInstance Failure");
+ ERR_FAIL_COND_V_MSG(err == VK_ERROR_EXTENSION_NOT_PRESENT, ERR_CANT_CREATE,
+ "Cannot find a specified extension library.\n"
+ "Make sure your layers path is set appropriately.\n"
+ "vkCreateInstance Failure");
+ ERR_FAIL_COND_V_MSG(err, ERR_CANT_CREATE,
+ "vkCreateInstance failed.\n\n"
+ "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
+ "Please look at the Getting Started guide for additional information.\n"
+ "vkCreateInstance Failure");
+
+ /* Make initial call to query gpu_count, then second call for gpu info*/
+ err = vkEnumeratePhysicalDevices(inst, &gpu_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ ERR_FAIL_COND_V_MSG(gpu_count == 0, ERR_CANT_CREATE,
+ "vkEnumeratePhysicalDevices reported zero accessible devices.\n\n"
+ "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
+ "vkEnumeratePhysicalDevices Failure");
+
+ VkPhysicalDevice *physical_devices = (VkPhysicalDevice *)malloc(sizeof(VkPhysicalDevice) * gpu_count);
+ err = vkEnumeratePhysicalDevices(inst, &gpu_count, physical_devices);
+ if (err) {
+ free(physical_devices);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ /* for now, just grab the first physical device */
+ gpu = physical_devices[0];
+ free(physical_devices);
+
+ /* Look for device extensions */
+ uint32_t device_extension_count = 0;
+ VkBool32 swapchainExtFound = 0;
+ enabled_extension_count = 0;
+ memset(extension_names, 0, sizeof(extension_names));
+
+ err = vkEnumerateDeviceExtensionProperties(gpu, NULL, &device_extension_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ if (device_extension_count > 0) {
+ VkExtensionProperties *device_extensions = (VkExtensionProperties *)malloc(sizeof(VkExtensionProperties) * device_extension_count);
+ err = vkEnumerateDeviceExtensionProperties(gpu, NULL, &device_extension_count, device_extensions);
+ if (err) {
+ free(device_extensions);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ for (uint32_t i = 0; i < device_extension_count; i++) {
+ if (!strcmp(VK_KHR_SWAPCHAIN_EXTENSION_NAME, device_extensions[i].extensionName)) {
+ swapchainExtFound = 1;
+ extension_names[enabled_extension_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+ }
+ if (enabled_extension_count >= MAX_EXTENSIONS) {
+ free(device_extensions);
+ ERR_FAIL_V_MSG(ERR_BUG, "Enabled extension count reaches MAX_EXTENSIONS, BUG");
+ }
+ }
+
+ if (VK_KHR_incremental_present_enabled) {
+ // Even though the user "enabled" the extension via the command
+ // line, we must make sure that it's enumerated for use with the
+ // device. Therefore, disable it here, and re-enable it again if
+ // enumerated.
+ VK_KHR_incremental_present_enabled = false;
+ for (uint32_t i = 0; i < device_extension_count; i++) {
+ if (!strcmp(VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME, device_extensions[i].extensionName)) {
+ extension_names[enabled_extension_count++] = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
+ VK_KHR_incremental_present_enabled = true;
+ VULKAN_DEBUG("VK_KHR_incremental_present extension enabled\n");
+ }
+ if (enabled_extension_count >= MAX_EXTENSIONS) {
+ free(device_extensions);
+ ERR_FAIL_V_MSG(ERR_BUG, "Enabled extension count reaches MAX_EXTENSIONS, BUG");
+ }
+ }
+ if (!VK_KHR_incremental_present_enabled) {
+ VULKAN_DEBUG("VK_KHR_incremental_present extension NOT AVAILABLE\n");
+ }
+ }
+
+ if (VK_GOOGLE_display_timing_enabled) {
+ // Even though the user "enabled" the extension via the command
+ // line, we must make sure that it's enumerated for use with the
+ // device. Therefore, disable it here, and re-enable it again if
+ // enumerated.
+ VK_GOOGLE_display_timing_enabled = false;
+ for (uint32_t i = 0; i < device_extension_count; i++) {
+ if (!strcmp(VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME, device_extensions[i].extensionName)) {
+ extension_names[enabled_extension_count++] = VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME;
+ VK_GOOGLE_display_timing_enabled = true;
+ VULKAN_DEBUG("VK_GOOGLE_display_timing extension enabled\n");
+ }
+ if (enabled_extension_count >= MAX_EXTENSIONS) {
+ free(device_extensions);
+ ERR_FAIL_V_MSG(ERR_BUG, "Enabled extension count reaches MAX_EXTENSIONS, BUG");
+ }
+ }
+ if (!VK_GOOGLE_display_timing_enabled) {
+ VULKAN_DEBUG("VK_GOOGLE_display_timing extension NOT AVAILABLE\n");
+ }
+ }
+
+ free(device_extensions);
+ }
+
+ ERR_FAIL_COND_V_MSG(!swapchainExtFound, ERR_CANT_CREATE,
+ "vkEnumerateDeviceExtensionProperties failed to find the " VK_KHR_SWAPCHAIN_EXTENSION_NAME
+ " extension.\n\nDo you have a compatible Vulkan installable client driver (ICD) installed?\n"
+ "vkCreateInstance Failure");
+
+ if (use_validation_layers) {
+ // Setup VK_EXT_debug_utils function pointers always (we use them for
+ // debug labels and names).
+ CreateDebugUtilsMessengerEXT =
+ (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(inst, "vkCreateDebugUtilsMessengerEXT");
+ DestroyDebugUtilsMessengerEXT =
+ (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(inst, "vkDestroyDebugUtilsMessengerEXT");
+ SubmitDebugUtilsMessageEXT =
+ (PFN_vkSubmitDebugUtilsMessageEXT)vkGetInstanceProcAddr(inst, "vkSubmitDebugUtilsMessageEXT");
+ CmdBeginDebugUtilsLabelEXT =
+ (PFN_vkCmdBeginDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdBeginDebugUtilsLabelEXT");
+ CmdEndDebugUtilsLabelEXT =
+ (PFN_vkCmdEndDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdEndDebugUtilsLabelEXT");
+ CmdInsertDebugUtilsLabelEXT =
+ (PFN_vkCmdInsertDebugUtilsLabelEXT)vkGetInstanceProcAddr(inst, "vkCmdInsertDebugUtilsLabelEXT");
+ SetDebugUtilsObjectNameEXT =
+ (PFN_vkSetDebugUtilsObjectNameEXT)vkGetInstanceProcAddr(inst, "vkSetDebugUtilsObjectNameEXT");
+ if (NULL == CreateDebugUtilsMessengerEXT || NULL == DestroyDebugUtilsMessengerEXT ||
+ NULL == SubmitDebugUtilsMessageEXT || NULL == CmdBeginDebugUtilsLabelEXT ||
+ NULL == CmdEndDebugUtilsLabelEXT || NULL == CmdInsertDebugUtilsLabelEXT ||
+ NULL == SetDebugUtilsObjectNameEXT) {
+ ERR_FAIL_V_MSG(ERR_CANT_CREATE,
+ "GetProcAddr: Failed to init VK_EXT_debug_utils\n"
+ "GetProcAddr: Failure");
+ }
+
+ err = CreateDebugUtilsMessengerEXT(inst, &dbg_messenger_create_info, NULL, &dbg_messenger);
+ switch (err) {
+ case VK_SUCCESS:
+ break;
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ ERR_FAIL_V_MSG(ERR_CANT_CREATE,
+ "CreateDebugUtilsMessengerEXT: out of host memory\n"
+ "CreateDebugUtilsMessengerEXT Failure");
+ break;
+ default:
+ ERR_FAIL_V_MSG(ERR_CANT_CREATE,
+ "CreateDebugUtilsMessengerEXT: unknown failure\n"
+ "CreateDebugUtilsMessengerEXT Failure");
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ break;
+ }
+ }
+ vkGetPhysicalDeviceProperties(gpu, &gpu_props);
+
+ /* Call with NULL data to get count */
+ vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_family_count, NULL);
+ ERR_FAIL_COND_V(queue_family_count == 0, ERR_CANT_CREATE);
+
+ queue_props = (VkQueueFamilyProperties *)malloc(queue_family_count * sizeof(VkQueueFamilyProperties));
+ vkGetPhysicalDeviceQueueFamilyProperties(gpu, &queue_family_count, queue_props);
+
+ // Query fine-grained feature support for this device.
+ // If app has specific feature requirements it should check supported
+ // features based on this query
+ vkGetPhysicalDeviceFeatures(gpu, &physical_device_features);
+
+#define GET_INSTANCE_PROC_ADDR(inst, entrypoint) \
+ { \
+ fp##entrypoint = (PFN_vk##entrypoint)vkGetInstanceProcAddr(inst, "vk" #entrypoint); \
+ ERR_FAIL_COND_V_MSG(fp##entrypoint == NULL, ERR_CANT_CREATE, \
+ "vkGetInstanceProcAddr failed to find vk" #entrypoint); \
+ }
+
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfaceSupportKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfaceCapabilitiesKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfaceFormatsKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetPhysicalDeviceSurfacePresentModesKHR);
+ GET_INSTANCE_PROC_ADDR(inst, GetSwapchainImagesKHR);
+
+ return OK;
+}
+
+Error VulkanContext::_create_device() {
+
+ VkResult err;
+ float queue_priorities[1] = { 0.0 };
+ VkDeviceQueueCreateInfo queues[2];
+ queues[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queues[0].pNext = NULL;
+ queues[0].queueFamilyIndex = graphics_queue_family_index;
+ queues[0].queueCount = 1;
+ queues[0].pQueuePriorities = queue_priorities;
+ queues[0].flags = 0;
+
+ VkDeviceCreateInfo sdevice = {
+ /*sType*/ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ /*queueCreateInfoCount*/ 1,
+ /*pQueueCreateInfos*/ queues,
+ /*enabledLayerCount*/ 0,
+ /*ppEnabledLayerNames*/ NULL,
+ /*enabledExtensionCount*/ enabled_extension_count,
+ /*ppEnabledExtensionNames*/ (const char *const *)extension_names,
+ /*pEnabledFeatures*/ &physical_device_features, // If specific features are required, pass them in here
+
+ };
+ if (separate_present_queue) {
+ queues[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queues[1].pNext = NULL;
+ queues[1].queueFamilyIndex = present_queue_family_index;
+ queues[1].queueCount = 1;
+ queues[1].pQueuePriorities = queue_priorities;
+ queues[1].flags = 0;
+ sdevice.queueCreateInfoCount = 2;
+ }
+ err = vkCreateDevice(gpu, &sdevice, NULL, &device);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ return OK;
+}
+
+Error VulkanContext::_initialize_queues(VkSurfaceKHR surface) {
+
+ // Iterate over each queue to learn whether it supports presenting:
+ VkBool32 *supportsPresent = (VkBool32 *)malloc(queue_family_count * sizeof(VkBool32));
+ for (uint32_t i = 0; i < queue_family_count; i++) {
+ fpGetPhysicalDeviceSurfaceSupportKHR(gpu, i, surface, &supportsPresent[i]);
+ }
+
+ // Search for a graphics and a present queue in the array of queue
+ // families, try to find one that supports both
+ uint32_t graphicsQueueFamilyIndex = UINT32_MAX;
+ uint32_t presentQueueFamilyIndex = UINT32_MAX;
+ for (uint32_t i = 0; i < queue_family_count; i++) {
+ if ((queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) {
+ if (graphicsQueueFamilyIndex == UINT32_MAX) {
+ graphicsQueueFamilyIndex = i;
+ }
+
+ if (supportsPresent[i] == VK_TRUE) {
+ graphicsQueueFamilyIndex = i;
+ presentQueueFamilyIndex = i;
+ break;
+ }
+ }
+ }
+
+ if (presentQueueFamilyIndex == UINT32_MAX) {
+ // If didn't find a queue that supports both graphics and present, then
+ // find a separate present queue.
+ for (uint32_t i = 0; i < queue_family_count; ++i) {
+ if (supportsPresent[i] == VK_TRUE) {
+ presentQueueFamilyIndex = i;
+ break;
+ }
+ }
+ }
+
+ free(supportsPresent);
+
+ // Generate error if could not find both a graphics and a present queue
+ ERR_FAIL_COND_V_MSG(graphicsQueueFamilyIndex == UINT32_MAX || presentQueueFamilyIndex == UINT32_MAX, ERR_CANT_CREATE,
+ "Could not find both graphics and present queues\n");
+
+ graphics_queue_family_index = graphicsQueueFamilyIndex;
+ present_queue_family_index = presentQueueFamilyIndex;
+ separate_present_queue = (graphics_queue_family_index != present_queue_family_index);
+
+ _create_device();
+
+ static PFN_vkGetDeviceProcAddr g_gdpa = NULL;
+#define GET_DEVICE_PROC_ADDR(dev, entrypoint) \
+ { \
+ if (!g_gdpa) g_gdpa = (PFN_vkGetDeviceProcAddr)vkGetInstanceProcAddr(inst, "vkGetDeviceProcAddr"); \
+ fp##entrypoint = (PFN_vk##entrypoint)g_gdpa(dev, "vk" #entrypoint); \
+ ERR_FAIL_COND_V_MSG(fp##entrypoint == NULL, ERR_CANT_CREATE, \
+ "vkGetDeviceProcAddr failed to find vk" #entrypoint); \
+ }
+
+ GET_DEVICE_PROC_ADDR(device, CreateSwapchainKHR);
+ GET_DEVICE_PROC_ADDR(device, DestroySwapchainKHR);
+ GET_DEVICE_PROC_ADDR(device, GetSwapchainImagesKHR);
+ GET_DEVICE_PROC_ADDR(device, AcquireNextImageKHR);
+ GET_DEVICE_PROC_ADDR(device, QueuePresentKHR);
+ if (VK_GOOGLE_display_timing_enabled) {
+ GET_DEVICE_PROC_ADDR(device, GetRefreshCycleDurationGOOGLE);
+ GET_DEVICE_PROC_ADDR(device, GetPastPresentationTimingGOOGLE);
+ }
+
+ vkGetDeviceQueue(device, graphics_queue_family_index, 0, &graphics_queue);
+
+ if (!separate_present_queue) {
+ present_queue = graphics_queue;
+ } else {
+ vkGetDeviceQueue(device, present_queue_family_index, 0, &present_queue);
+ }
+
+ // Get the list of VkFormat's that are supported:
+ uint32_t formatCount;
+ VkResult err = fpGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &formatCount, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ VkSurfaceFormatKHR *surfFormats = (VkSurfaceFormatKHR *)malloc(formatCount * sizeof(VkSurfaceFormatKHR));
+ err = fpGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &formatCount, surfFormats);
+ if (err) {
+ free(surfFormats);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ // If the format list includes just one entry of VK_FORMAT_UNDEFINED,
+ // the surface has no preferred format. Otherwise, at least one
+ // supported format will be returned.
+ if (true || (formatCount == 1 && surfFormats[0].format == VK_FORMAT_UNDEFINED)) {
+ format = VK_FORMAT_B8G8R8A8_UNORM;
+ } else {
+ if (formatCount < 1) {
+ free(surfFormats);
+ ERR_FAIL_V_MSG(ERR_CANT_CREATE, "formatCount less than 1");
+ }
+ format = surfFormats[0].format;
+ }
+ color_space = surfFormats[0].colorSpace;
+
+ free(surfFormats);
+
+ Error serr = _create_semaphores();
+ if (serr) {
+ return serr;
+ }
+
+ queues_initialized = true;
+ return OK;
+}
+
+Error VulkanContext::_create_semaphores() {
+ VkResult err;
+
+ // Create semaphores to synchronize acquiring presentable buffers before
+ // rendering and waiting for drawing to be complete before presenting
+ VkSemaphoreCreateInfo semaphoreCreateInfo = {
+ /*sType*/ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ };
+
+ // Create fences that we can use to throttle if we get too far
+ // ahead of the image presents
+ VkFenceCreateInfo fence_ci = {
+ /*sType*/ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ VK_FENCE_CREATE_SIGNALED_BIT
+ };
+ for (uint32_t i = 0; i < FRAME_LAG; i++) {
+ err = vkCreateFence(device, &fence_ci, NULL, &fences[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ err = vkCreateSemaphore(device, &semaphoreCreateInfo, NULL, &image_acquired_semaphores[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ err = vkCreateSemaphore(device, &semaphoreCreateInfo, NULL, &draw_complete_semaphores[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ if (separate_present_queue) {
+ err = vkCreateSemaphore(device, &semaphoreCreateInfo, NULL, &image_ownership_semaphores[i]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ }
+ frame_index = 0;
+
+ // Get Memory information and properties
+ vkGetPhysicalDeviceMemoryProperties(gpu, &memory_properties);
+
+ return OK;
+}
+
+int VulkanContext::_window_create(VkSurfaceKHR p_surface, int p_width, int p_height) {
+
+ if (!queues_initialized) {
+ // We use a single GPU, but we need a surface to initialize the
+ // queues, so this process must be deferred until a surface
+ // is created.
+ _initialize_queues(p_surface);
+ }
+
+ Window window;
+ window.surface = p_surface;
+ window.width = p_width;
+ window.height = p_height;
+ Error err = _update_swap_chain(&window);
+ ERR_FAIL_COND_V(err != OK, -1);
+
+ int id = last_window_id;
+ windows[id] = window;
+ last_window_id++;
+ return id;
+}
+
+void VulkanContext::window_resize(int p_window, int p_width, int p_height) {
+ ERR_FAIL_COND(!windows.has(p_window));
+ windows[p_window].width = p_width;
+ windows[p_window].height = p_height;
+ _update_swap_chain(&windows[p_window]);
+}
+
+int VulkanContext::window_get_width(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), -1);
+ return windows[p_window].width;
+}
+
+int VulkanContext::window_get_height(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), -1);
+ return windows[p_window].height;
+}
+
+VkRenderPass VulkanContext::window_get_render_pass(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), VK_NULL_HANDLE);
+ Window *w = &windows[p_window];
+ //vulkan use of currentbuffer
+ return w->render_pass;
+}
+
+VkFramebuffer VulkanContext::window_get_framebuffer(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), VK_NULL_HANDLE);
+ ERR_FAIL_COND_V(!buffers_prepared, VK_NULL_HANDLE);
+ Window *w = &windows[p_window];
+ //vulkan use of currentbuffer
+ return w->swapchain_image_resources[w->current_buffer].framebuffer;
+}
+
+void VulkanContext::window_destroy(int p_window_id) {
+ ERR_FAIL_COND(!windows.has(p_window_id));
+ _clean_up_swap_chain(&windows[p_window_id]);
+ vkDestroySurfaceKHR(inst, windows[p_window_id].surface, NULL);
+ windows.erase(p_window_id);
+}
+
+Error VulkanContext::_clean_up_swap_chain(Window *window) {
+
+ if (!window->swapchain) {
+ return OK;
+ }
+ vkDeviceWaitIdle(device);
+
+ //this destroys images associated it seems
+ fpDestroySwapchainKHR(device, window->swapchain, NULL);
+ window->swapchain = VK_NULL_HANDLE;
+ vkDestroyRenderPass(device, window->render_pass, NULL);
+ if (window->swapchain_image_resources) {
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ vkDestroyImageView(device, window->swapchain_image_resources[i].view, NULL);
+ vkDestroyFramebuffer(device, window->swapchain_image_resources[i].framebuffer, NULL);
+ }
+
+ free(window->swapchain_image_resources);
+ window->swapchain_image_resources = NULL;
+ }
+ if (separate_present_queue) {
+ vkDestroyCommandPool(device, window->present_cmd_pool, NULL);
+ }
+ return OK;
+}
+
+Error VulkanContext::_update_swap_chain(Window *window) {
+ VkResult err;
+
+ if (window->swapchain) {
+ _clean_up_swap_chain(window);
+ }
+
+ // Check the surface capabilities and formats
+ VkSurfaceCapabilitiesKHR surfCapabilities;
+ err = fpGetPhysicalDeviceSurfaceCapabilitiesKHR(gpu, window->surface, &surfCapabilities);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ uint32_t presentModeCount;
+ err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, window->surface, &presentModeCount, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR));
+ ERR_FAIL_COND_V(!presentModes, ERR_CANT_CREATE);
+ err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, window->surface, &presentModeCount, presentModes);
+ if (err) {
+ free(presentModes);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ VkExtent2D swapchainExtent;
+ // width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF.
+ if (surfCapabilities.currentExtent.width == 0xFFFFFFFF) {
+ // If the surface size is undefined, the size is set to the size
+ // of the images requested, which must fit within the minimum and
+ // maximum values.
+ swapchainExtent.width = window->width;
+ swapchainExtent.height = window->height;
+
+ if (swapchainExtent.width < surfCapabilities.minImageExtent.width) {
+ swapchainExtent.width = surfCapabilities.minImageExtent.width;
+ } else if (swapchainExtent.width > surfCapabilities.maxImageExtent.width) {
+ swapchainExtent.width = surfCapabilities.maxImageExtent.width;
+ }
+
+ if (swapchainExtent.height < surfCapabilities.minImageExtent.height) {
+ swapchainExtent.height = surfCapabilities.minImageExtent.height;
+ } else if (swapchainExtent.height > surfCapabilities.maxImageExtent.height) {
+ swapchainExtent.height = surfCapabilities.maxImageExtent.height;
+ }
+ } else {
+ // If the surface size is defined, the swap chain size must match
+ swapchainExtent = surfCapabilities.currentExtent;
+ window->width = surfCapabilities.currentExtent.width;
+ window->height = surfCapabilities.currentExtent.height;
+ }
+
+ if (window->width == 0 || window->height == 0) {
+ free(presentModes);
+ //likely window minimized, no swapchain created
+ return OK;
+ }
+ // The FIFO present mode is guaranteed by the spec to be supported
+ // and to have no tearing. It's a great default present mode to use.
+ VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR;
+
+ // There are times when you may wish to use another present mode. The
+ // following code shows how to select them, and the comments provide some
+ // reasons you may wish to use them.
+ //
+ // It should be noted that Vulkan 1.0 doesn't provide a method for
+ // synchronizing rendering with the presentation engine's display. There
+ // is a method provided for throttling rendering with the display, but
+ // there are some presentation engines for which this method will not work.
+ // If an application doesn't throttle its rendering, and if it renders much
+ // faster than the refresh rate of the display, this can waste power on
+ // mobile devices. That is because power is being spent rendering images
+ // that may never be seen.
+
+ // VK_PRESENT_MODE_IMMEDIATE_KHR is for applications that don't care about
+ // tearing, or have some way of synchronizing their rendering with the
+ // display.
+ // VK_PRESENT_MODE_MAILBOX_KHR may be useful for applications that
+ // generally render a new presentable image every refresh cycle, but are
+ // occasionally early. In this case, the application wants the new image
+ // to be displayed instead of the previously-queued-for-presentation image
+ // that has not yet been displayed.
+ // VK_PRESENT_MODE_FIFO_RELAXED_KHR is for applications that generally
+ // render a new presentable image every refresh cycle, but are occasionally
+ // late. In this case (perhaps because of stuttering/latency concerns),
+ // the application wants the late image to be immediately displayed, even
+ // though that may mean some tearing.
+
+ if (window->presentMode != swapchainPresentMode) {
+ for (size_t i = 0; i < presentModeCount; ++i) {
+ if (presentModes[i] == window->presentMode) {
+ swapchainPresentMode = window->presentMode;
+ break;
+ }
+ }
+ }
+ free(presentModes);
+ ERR_FAIL_COND_V_MSG(swapchainPresentMode != window->presentMode, ERR_CANT_CREATE, "Present mode specified is not supported\n");
+
+ // Determine the number of VkImages to use in the swap chain.
+ // Application desires to acquire 3 images at a time for triple
+ // buffering
+ uint32_t desiredNumOfSwapchainImages = 3;
+ if (desiredNumOfSwapchainImages < surfCapabilities.minImageCount) {
+ desiredNumOfSwapchainImages = surfCapabilities.minImageCount;
+ }
+ // If maxImageCount is 0, we can ask for as many images as we want;
+ // otherwise we're limited to maxImageCount
+ if ((surfCapabilities.maxImageCount > 0) && (desiredNumOfSwapchainImages > surfCapabilities.maxImageCount)) {
+ // Application must settle for fewer images than desired:
+ desiredNumOfSwapchainImages = surfCapabilities.maxImageCount;
+ }
+
+ VkSurfaceTransformFlagsKHR preTransform;
+ if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) {
+ preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+ } else {
+ preTransform = surfCapabilities.currentTransform;
+ }
+
+ // Find a supported composite alpha mode - one of these is guaranteed to be set
+ VkCompositeAlphaFlagBitsKHR compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
+ VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
+ };
+ for (uint32_t i = 0; i < ARRAY_SIZE(compositeAlphaFlags); i++) {
+ if (surfCapabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
+ compositeAlpha = compositeAlphaFlags[i];
+ break;
+ }
+ }
+
+ VkSwapchainCreateInfoKHR swapchain_ci = {
+ /*sType*/ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ /*surface*/ window->surface,
+ /*minImageCount*/ desiredNumOfSwapchainImages,
+ /*imageFormat*/ format,
+ /*imageColorSpace*/ color_space,
+ /*imageExtent*/ {
+ /*width*/ swapchainExtent.width,
+ /*height*/ swapchainExtent.height,
+ },
+ /*imageArrayLayers*/ 1,
+ /*imageUsage*/ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ /*imageSharingMode*/ VK_SHARING_MODE_EXCLUSIVE,
+ /*queueFamilyIndexCount*/ 0,
+ /*pQueueFamilyIndices*/ NULL,
+ /*preTransform*/ (VkSurfaceTransformFlagBitsKHR)preTransform,
+ /*compositeAlpha*/ compositeAlpha,
+ /*presentMode*/ swapchainPresentMode,
+ /*clipped*/ true,
+ /*oldSwapchain*/ NULL,
+ };
+
+ err = fpCreateSwapchainKHR(device, &swapchain_ci, NULL, &window->swapchain);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ uint32_t sp_image_count;
+ err = fpGetSwapchainImagesKHR(device, window->swapchain, &sp_image_count, NULL);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ if (swapchainImageCount == 0) {
+ //assign here for the first time.
+ swapchainImageCount = sp_image_count;
+ } else {
+ ERR_FAIL_COND_V(swapchainImageCount != sp_image_count, ERR_BUG);
+ }
+
+ VkImage *swapchainImages = (VkImage *)malloc(swapchainImageCount * sizeof(VkImage));
+ ERR_FAIL_COND_V(!swapchainImages, ERR_CANT_CREATE);
+ err = fpGetSwapchainImagesKHR(device, window->swapchain, &swapchainImageCount, swapchainImages);
+ if (err) {
+ free(swapchainImages);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ window->swapchain_image_resources =
+ (SwapchainImageResources *)malloc(sizeof(SwapchainImageResources) * swapchainImageCount);
+ if (!window->swapchain_image_resources) {
+ free(swapchainImages);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ VkImageViewCreateInfo color_image_view = {
+ /*sType*/ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ /*image*/ swapchainImages[i],
+ /*viewType*/ VK_IMAGE_VIEW_TYPE_2D,
+ /*format*/ format,
+ /*components*/ {
+ /*r*/ VK_COMPONENT_SWIZZLE_R,
+ /*g*/ VK_COMPONENT_SWIZZLE_G,
+ /*b*/ VK_COMPONENT_SWIZZLE_B,
+ /*a*/ VK_COMPONENT_SWIZZLE_A,
+ },
+ /*subresourceRange*/ { /*aspectMask*/ VK_IMAGE_ASPECT_COLOR_BIT,
+ /*baseMipLevel*/ 0,
+ /*levelCount*/ 1,
+ /*baseArrayLayer*/ 0,
+ /*layerCount*/ 1 },
+ };
+
+ window->swapchain_image_resources[i].image = swapchainImages[i];
+
+ color_image_view.image = window->swapchain_image_resources[i].image;
+
+ err = vkCreateImageView(device, &color_image_view, NULL, &window->swapchain_image_resources[i].view);
+ if (err) {
+ free(swapchainImages);
+ ERR_FAIL_V(ERR_CANT_CREATE);
+ }
+ }
+
+ free(swapchainImages);
+
+ /******** FRAMEBUFFER ************/
+
+ {
+ const VkAttachmentDescription attachment = {
+
+ /*flags*/ 0,
+ /*format*/ format,
+ /*samples*/ VK_SAMPLE_COUNT_1_BIT,
+ /*loadOp*/ VK_ATTACHMENT_LOAD_OP_CLEAR,
+ /*storeOp*/ VK_ATTACHMENT_STORE_OP_STORE,
+ /*stencilLoadOp*/ VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ /*stencilStoreOp*/ VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ /*initialLayout*/ VK_IMAGE_LAYOUT_UNDEFINED,
+ /*finalLayout*/ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+
+ };
+ const VkAttachmentReference color_reference = {
+ /*attachment*/ 0,
+ /*layout*/ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ };
+
+ const VkSubpassDescription subpass = {
+ /*flags*/ 0,
+ /*pipelineBindPoint*/ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ /*inputAttachmentCount*/ 0,
+ /*pInputAttachments*/ NULL,
+ /*colorAttachmentCount*/ 1,
+ /*pColorAttachments*/ &color_reference,
+ /*pResolveAttachments*/ NULL,
+ /*pDepthStencilAttachment*/ NULL,
+ /*preserveAttachmentCount*/ 0,
+ /*pPreserveAttachments*/ NULL,
+ };
+ const VkRenderPassCreateInfo rp_info = {
+ /*sTyp*/ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ /*attachmentCount*/ 1,
+ /*pAttachments*/ &attachment,
+ /*subpassCount*/ 1,
+ /*pSubpasses*/ &subpass,
+ /*dependencyCount*/ 0,
+ /*pDependencies*/ NULL,
+ };
+
+ err = vkCreateRenderPass(device, &rp_info, NULL, &window->render_pass);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ const VkFramebufferCreateInfo fb_info = {
+ /*sType*/ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ /*renderPass*/ window->render_pass,
+ /*attachmentCount*/ 1,
+ /*pAttachments*/ &window->swapchain_image_resources[i].view,
+ /*width*/ (uint32_t)window->width,
+ /*height*/ (uint32_t)window->height,
+ /*layers*/ 1,
+ };
+
+ err = vkCreateFramebuffer(device, &fb_info, NULL, &window->swapchain_image_resources[i].framebuffer);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ }
+
+ /******** SEPARATE PRESENT QUEUE ************/
+
+ if (separate_present_queue) {
+ const VkCommandPoolCreateInfo present_cmd_pool_info = {
+ /*sType*/ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ 0,
+ /*queueFamilyIndex*/ present_queue_family_index,
+ };
+ err = vkCreateCommandPool(device, &present_cmd_pool_info, NULL, &window->present_cmd_pool);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ const VkCommandBufferAllocateInfo present_cmd_info = {
+ /*sType*/ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ /*pNext*/ NULL,
+ /*commandPool*/ window->present_cmd_pool,
+ /*level*/ VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ /*commandBufferCount*/ 1,
+ };
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ err = vkAllocateCommandBuffers(device, &present_cmd_info,
+ &window->swapchain_image_resources[i].graphics_to_present_cmd);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ const VkCommandBufferBeginInfo cmd_buf_info = {
+ /*sType*/ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ /*pNext*/ NULL,
+ /*flags*/ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
+ /*pInheritanceInfo*/ NULL,
+ };
+ err = vkBeginCommandBuffer(window->swapchain_image_resources[i].graphics_to_present_cmd, &cmd_buf_info);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ VkImageMemoryBarrier image_ownership_barrier = {
+ /*sType*/ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ /*pNext*/ NULL,
+ /*srcAccessMask*/ 0,
+ /*dstAccessMask*/ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ /*oldLayout*/ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ /*newLayout*/ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ /*srcQueueFamilyIndex*/ graphics_queue_family_index,
+ /*dstQueueFamilyIndex*/ present_queue_family_index,
+ /*image*/ window->swapchain_image_resources[i].image,
+ /*subresourceRange*/ { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }
+ };
+
+ vkCmdPipelineBarrier(window->swapchain_image_resources[i].graphics_to_present_cmd, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &image_ownership_barrier);
+ err = vkEndCommandBuffer(window->swapchain_image_resources[i].graphics_to_present_cmd);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ }
+
+ //reset current buffer
+ window->current_buffer = 0;
+
+ return OK;
+}
+
+Error VulkanContext::initialize() {
+
+ Error err = _create_physical_device();
+ if (err) {
+ return err;
+ }
+ print_line("Vulkan physical device creation success o_O");
+ return OK;
+}
+
+void VulkanContext::set_setup_buffer(const VkCommandBuffer &pCommandBuffer) {
+ command_buffer_queue.write[0] = pCommandBuffer;
+}
+
+void VulkanContext::append_command_buffer(const VkCommandBuffer &pCommandBuffer) {
+
+ if (command_buffer_queue.size() <= command_buffer_count) {
+ command_buffer_queue.resize(command_buffer_count + 1);
+ }
+
+ command_buffer_queue.write[command_buffer_count] = pCommandBuffer;
+ command_buffer_count++;
+}
+
+void VulkanContext::flush(bool p_flush_setup, bool p_flush_pending) {
+
+ // ensure everything else pending is executed
+ vkDeviceWaitIdle(device);
+
+ //flush the pending setup buffer
+
+ if (p_flush_setup && command_buffer_queue[0]) {
+
+ //use a fence to wait for everything done
+ VkSubmitInfo submit_info;
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.pNext = NULL;
+ submit_info.pWaitDstStageMask = NULL;
+ submit_info.waitSemaphoreCount = 0;
+ submit_info.pWaitSemaphores = NULL;
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = command_buffer_queue.ptr();
+ submit_info.signalSemaphoreCount = 0;
+ submit_info.pSignalSemaphores = NULL;
+ VkResult err = vkQueueSubmit(graphics_queue, 1, &submit_info, VK_NULL_HANDLE);
+ command_buffer_queue.write[0] = NULL;
+ ERR_FAIL_COND(err);
+ vkDeviceWaitIdle(device);
+ }
+
+ if (p_flush_pending && command_buffer_count > 1) {
+
+ //use a fence to wait for everything done
+
+ VkSubmitInfo submit_info;
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.pNext = NULL;
+ submit_info.pWaitDstStageMask = NULL;
+ submit_info.waitSemaphoreCount = 0;
+ submit_info.pWaitSemaphores = NULL;
+ submit_info.commandBufferCount = command_buffer_count - 1;
+ submit_info.pCommandBuffers = command_buffer_queue.ptr() + 1;
+ submit_info.signalSemaphoreCount = 0;
+ submit_info.pSignalSemaphores = NULL;
+ VkResult err = vkQueueSubmit(graphics_queue, 1, &submit_info, VK_NULL_HANDLE);
+ ERR_FAIL_COND(err);
+ vkDeviceWaitIdle(device);
+
+ command_buffer_count = 1;
+ }
+}
+
+Error VulkanContext::prepare_buffers() {
+
+ if (!queues_initialized) {
+ return OK;
+ }
+
+ VkResult err;
+
+ // Ensure no more than FRAME_LAG renderings are outstanding
+ vkWaitForFences(device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
+ vkResetFences(device, 1, &fences[frame_index]);
+
+ for (Map<int, Window>::Element *E = windows.front(); E; E = E->next()) {
+
+ Window *w = &E->get();
+
+ if (w->swapchain == VK_NULL_HANDLE) {
+ continue;
+ }
+
+ do {
+ // Get the index of the next available swapchain image:
+ err =
+ fpAcquireNextImageKHR(device, w->swapchain, UINT64_MAX,
+ image_acquired_semaphores[frame_index], VK_NULL_HANDLE, &w->current_buffer);
+
+ if (err == VK_ERROR_OUT_OF_DATE_KHR) {
+ // swapchain is out of date (e.g. the window was resized) and
+ // must be recreated:
+ print_line("early out of data");
+ //resize_notify();
+ _update_swap_chain(w);
+ } else if (err == VK_SUBOPTIMAL_KHR) {
+ print_line("early suboptimal");
+ // swapchain is not as optimal as it could be, but the platform's
+ // presentation engine will still present the image correctly.
+ break;
+ } else {
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ } while (err != VK_SUCCESS);
+ }
+
+ buffers_prepared = true;
+
+ return OK;
+}
+
+Error VulkanContext::swap_buffers() {
+
+ if (!queues_initialized) {
+ return OK;
+ }
+
+ // print_line("swapbuffers?");
+ VkResult err;
+
+#if 0
+ if (VK_GOOGLE_display_timing_enabled) {
+ // Look at what happened to previous presents, and make appropriate
+ // adjustments in timing:
+ DemoUpdateTargetIPD(demo);
+
+ // Note: a real application would position its geometry to that it's in
+ // the correct locatoin for when the next image is presented. It might
+ // also wait, so that there's less latency between any input and when
+ // the next image is rendered/presented. This demo program is so
+ // simple that it doesn't do either of those.
+ }
+#endif
+ // Wait for the image acquired semaphore to be signaled to ensure
+ // that the image won't be rendered to until the presentation
+ // engine has fully released ownership to the application, and it is
+ // okay to render to the image.
+
+ const VkCommandBuffer *commands_ptr = NULL;
+ uint32_t commands_to_submit = 0;
+
+ if (command_buffer_queue[0] == NULL) {
+ //no setup command, but commands to submit, submit from the first and skip command
+ if (command_buffer_count > 1) {
+ commands_ptr = command_buffer_queue.ptr() + 1;
+ commands_to_submit = command_buffer_count - 1;
+ }
+ } else {
+ commands_ptr = command_buffer_queue.ptr();
+ commands_to_submit = command_buffer_count;
+ }
+
+ VkPipelineStageFlags pipe_stage_flags;
+ VkSubmitInfo submit_info;
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.pNext = NULL;
+ submit_info.pWaitDstStageMask = &pipe_stage_flags;
+ pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = &image_acquired_semaphores[frame_index];
+ submit_info.commandBufferCount = commands_to_submit;
+ submit_info.pCommandBuffers = commands_ptr;
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = &draw_complete_semaphores[frame_index];
+ err = vkQueueSubmit(graphics_queue, 1, &submit_info, fences[frame_index]);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+
+ command_buffer_queue.write[0] = NULL;
+ command_buffer_count = 1;
+
+ if (separate_present_queue) {
+ // If we are using separate queues, change image ownership to the
+ // present queue before presenting, waiting for the draw complete
+ // semaphore and signalling the ownership released semaphore when finished
+ VkFence nullFence = VK_NULL_HANDLE;
+ pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = &draw_complete_semaphores[frame_index];
+ submit_info.commandBufferCount = 0;
+
+ VkCommandBuffer *cmdbufptr = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer *) * windows.size());
+ submit_info.pCommandBuffers = cmdbufptr;
+
+ for (Map<int, Window>::Element *E = windows.front(); E; E = E->next()) {
+ Window *w = &E->get();
+
+ if (w->swapchain == VK_NULL_HANDLE) {
+ continue;
+ }
+ cmdbufptr[submit_info.commandBufferCount] = w->swapchain_image_resources[w->current_buffer].graphics_to_present_cmd;
+ submit_info.commandBufferCount++;
+ }
+
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = &image_ownership_semaphores[frame_index];
+ err = vkQueueSubmit(present_queue, 1, &submit_info, nullFence);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+
+ // If we are using separate queues we have to wait for image ownership,
+ // otherwise wait for draw complete
+ VkPresentInfoKHR present = {
+ /*sType*/ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ /*pNext*/ NULL,
+ /*waitSemaphoreCount*/ 1,
+ /*pWaitSemaphores*/ (separate_present_queue) ? &image_ownership_semaphores[frame_index] : &draw_complete_semaphores[frame_index],
+ /*swapchainCount*/ 0,
+ /*pSwapchain*/ NULL,
+ /*pImageIndices*/ NULL,
+ /*pResults*/ NULL,
+ };
+
+ VkSwapchainKHR *pSwapchains = (VkSwapchainKHR *)alloca(sizeof(VkSwapchainKHR *) * windows.size());
+ uint32_t *pImageIndices = (uint32_t *)alloca(sizeof(uint32_t *) * windows.size());
+
+ present.pSwapchains = pSwapchains;
+ present.pImageIndices = pImageIndices;
+
+ for (Map<int, Window>::Element *E = windows.front(); E; E = E->next()) {
+ Window *w = &E->get();
+
+ if (w->swapchain == VK_NULL_HANDLE) {
+ continue;
+ }
+ pSwapchains[present.swapchainCount] = w->swapchain;
+ pImageIndices[present.swapchainCount] = w->current_buffer;
+ present.swapchainCount++;
+ }
+
+#if 0
+ if (VK_KHR_incremental_present_enabled) {
+ // If using VK_KHR_incremental_present, we provide a hint of the region
+ // that contains changed content relative to the previously-presented
+ // image. The implementation can use this hint in order to save
+ // work/power (by only copying the region in the hint). The
+ // implementation is free to ignore the hint though, and so we must
+ // ensure that the entire image has the correctly-drawn content.
+ uint32_t eighthOfWidth = width / 8;
+ uint32_t eighthOfHeight = height / 8;
+ VkRectLayerKHR rect = {
+ /*offset.x*/ eighthOfWidth,
+ /*offset.y*/ eighthOfHeight,
+ /*extent.width*/ eighthOfWidth * 6,
+ /*extent.height*/ eighthOfHeight * 6,
+ /*layer*/ 0,
+ };
+ VkPresentRegionKHR region = {
+ /*rectangleCount*/ 1,
+ /*pRectangles*/ &rect,
+ };
+ VkPresentRegionsKHR regions = {
+ /*sType*/ VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR,
+ /*pNext*/ present.pNext,
+ /*swapchainCount*/ present.swapchainCount,
+ /*pRegions*/ &region,
+ };
+ present.pNext = &regions;
+ }
+#endif
+
+#if 0
+ if (VK_GOOGLE_display_timing_enabled) {
+ VkPresentTimeGOOGLE ptime;
+ if (prev_desired_present_time == 0) {
+ // This must be the first present for this swapchain.
+ //
+ // We don't know where we are relative to the presentation engine's
+ // display's refresh cycle. We also don't know how long rendering
+ // takes. Let's make a grossly-simplified assumption that the
+ // desiredPresentTime should be half way between now and
+ // now+target_IPD. We will adjust over time.
+ uint64_t curtime = getTimeInNanoseconds();
+ if (curtime == 0) {
+ // Since we didn't find out the current time, don't give a
+ // desiredPresentTime:
+ ptime.desiredPresentTime = 0;
+ } else {
+ ptime.desiredPresentTime = curtime + (target_IPD >> 1);
+ }
+ } else {
+ ptime.desiredPresentTime = (prev_desired_present_time + target_IPD);
+ }
+ ptime.presentID = next_present_id++;
+ prev_desired_present_time = ptime.desiredPresentTime;
+
+ VkPresentTimesInfoGOOGLE present_time = {
+ /*sType*/ VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE,
+ /*pNext*/ present.pNext,
+ /*swapchainCount*/ present.swapchainCount,
+ /*pTimes*/ &ptime,
+ };
+ if (VK_GOOGLE_display_timing_enabled) {
+ present.pNext = &present_time;
+ }
+ }
+#endif
+ static int total_frames = 0;
+ total_frames++;
+ // print_line("current buffer: " + itos(current_buffer));
+ err = fpQueuePresentKHR(present_queue, &present);
+
+ frame_index += 1;
+ frame_index %= FRAME_LAG;
+
+ if (err == VK_ERROR_OUT_OF_DATE_KHR) {
+ // swapchain is out of date (e.g. the window was resized) and
+ // must be recreated:
+ print_line("out of date");
+ resize_notify();
+ } else if (err == VK_SUBOPTIMAL_KHR) {
+ // swapchain is not as optimal as it could be, but the platform's
+ // presentation engine will still present the image correctly.
+ print_line("suboptimal");
+ } else {
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+
+ buffers_prepared = false;
+ return OK;
+}
+
+void VulkanContext::resize_notify() {
+}
+
+VkDevice VulkanContext::get_device() {
+ return device;
+}
+
+VkPhysicalDevice VulkanContext::get_physical_device() {
+ return gpu;
+}
+int VulkanContext::get_swapchain_image_count() const {
+ return swapchainImageCount;
+}
+uint32_t VulkanContext::get_graphics_queue() const {
+ return graphics_queue_family_index;
+}
+
+VkFormat VulkanContext::get_screen_format() const {
+ return format;
+}
+
+VkPhysicalDeviceLimits VulkanContext::get_device_limits() const {
+ return gpu_props.limits;
+}
+
+VulkanContext::VulkanContext() {
+ command_buffer_count = 0;
+ instance_validation_layers = NULL;
+ use_validation_layers = true;
+ VK_KHR_incremental_present_enabled = true;
+ VK_GOOGLE_display_timing_enabled = true;
+
+ command_buffer_queue.resize(1); //first one is the setup command always
+ command_buffer_queue.write[0] = NULL;
+ command_buffer_count = 1;
+ queues_initialized = false;
+
+ buffers_prepared = false;
+ swapchainImageCount = 0;
+ last_window_id = 0;
+}
+
+VulkanContext::~VulkanContext() {
+ if (queue_props) {
+ free(queue_props);
+ }
+}
diff --git a/drivers/vulkan/vulkan_context.h b/drivers/vulkan/vulkan_context.h
new file mode 100644
index 0000000000..458cb6d793
--- /dev/null
+++ b/drivers/vulkan/vulkan_context.h
@@ -0,0 +1,212 @@
+/*************************************************************************/
+/* vulkan_context.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef VULKAN_CONTEXT_H
+#define VULKAN_CONTEXT_H
+
+#include "core/error_list.h"
+#include "core/map.h"
+#include "core/ustring.h"
+#include <vulkan/vulkan.h>
+
+class VulkanContext {
+
+ enum {
+ MAX_EXTENSIONS = 128,
+ MAX_LAYERS = 64,
+ FRAME_LAG = 2
+ };
+
+ bool use_validation_layers;
+
+ VkInstance inst;
+ VkSurfaceKHR surface;
+ VkPhysicalDevice gpu;
+ VkPhysicalDeviceProperties gpu_props;
+ uint32_t queue_family_count;
+ VkQueueFamilyProperties *queue_props;
+ VkDevice device;
+
+ //present
+ bool queues_initialized;
+ uint32_t graphics_queue_family_index;
+ uint32_t present_queue_family_index;
+ bool separate_present_queue;
+ VkQueue graphics_queue;
+ VkQueue present_queue;
+ VkColorSpaceKHR color_space;
+ VkFormat format;
+ VkSemaphore image_acquired_semaphores[FRAME_LAG];
+ VkSemaphore draw_complete_semaphores[FRAME_LAG];
+ VkSemaphore image_ownership_semaphores[FRAME_LAG];
+ int frame_index;
+ VkFence fences[FRAME_LAG];
+ VkPhysicalDeviceMemoryProperties memory_properties;
+ VkPhysicalDeviceFeatures physical_device_features;
+
+ typedef struct {
+ VkImage image;
+ VkCommandBuffer graphics_to_present_cmd;
+ VkImageView view;
+ VkFramebuffer framebuffer;
+
+ } SwapchainImageResources;
+
+ struct Window {
+
+ bool is_minimzed;
+ VkSurfaceKHR surface;
+ VkSwapchainKHR swapchain;
+ SwapchainImageResources *swapchain_image_resources;
+ VkPresentModeKHR presentMode;
+ uint32_t current_buffer;
+ int width;
+ int height;
+ VkCommandPool present_cmd_pool; //for separate present queue
+
+ VkRenderPass render_pass;
+
+ Window() {
+ width = 0;
+ height = 0;
+ render_pass = VK_NULL_HANDLE;
+ current_buffer = 0;
+ surface = VK_NULL_HANDLE;
+ swapchain_image_resources = VK_NULL_HANDLE;
+ swapchain = VK_NULL_HANDLE;
+ is_minimzed = false;
+ presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ }
+ };
+
+ Map<int, Window> windows;
+ int last_window_id;
+ uint32_t swapchainImageCount;
+
+ //commands
+
+ bool prepared;
+
+ //extensions
+ bool VK_KHR_incremental_present_enabled;
+ bool VK_GOOGLE_display_timing_enabled;
+ const char **instance_validation_layers;
+ uint32_t enabled_extension_count;
+ uint32_t enabled_layer_count;
+ const char *extension_names[MAX_EXTENSIONS];
+ const char *enabled_layers[MAX_LAYERS];
+
+ PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT;
+ PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT;
+ PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT;
+ PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT;
+ PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT;
+ PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT;
+ PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT;
+ PFN_vkGetPhysicalDeviceSurfaceSupportKHR fpGetPhysicalDeviceSurfaceSupportKHR;
+ PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR fpGetPhysicalDeviceSurfaceCapabilitiesKHR;
+ PFN_vkGetPhysicalDeviceSurfaceFormatsKHR fpGetPhysicalDeviceSurfaceFormatsKHR;
+ PFN_vkGetPhysicalDeviceSurfacePresentModesKHR fpGetPhysicalDeviceSurfacePresentModesKHR;
+ PFN_vkCreateSwapchainKHR fpCreateSwapchainKHR;
+ PFN_vkDestroySwapchainKHR fpDestroySwapchainKHR;
+ PFN_vkGetSwapchainImagesKHR fpGetSwapchainImagesKHR;
+ PFN_vkAcquireNextImageKHR fpAcquireNextImageKHR;
+ PFN_vkQueuePresentKHR fpQueuePresentKHR;
+ PFN_vkGetRefreshCycleDurationGOOGLE fpGetRefreshCycleDurationGOOGLE;
+ PFN_vkGetPastPresentationTimingGOOGLE fpGetPastPresentationTimingGOOGLE;
+
+ VkDebugUtilsMessengerEXT dbg_messenger;
+
+ Error _create_validation_layers();
+ Error _initialize_extensions();
+
+ VkBool32 _check_layers(uint32_t check_count, const char **check_names, uint32_t layer_count, VkLayerProperties *layers);
+ static VKAPI_ATTR VkBool32 VKAPI_CALL _debug_messenger_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageType,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
+ void *pUserData);
+
+ Error _create_physical_device();
+
+ Error _initialize_queues(VkSurfaceKHR surface);
+
+ Error _create_device();
+
+ Error _clean_up_swap_chain(Window *window);
+
+ Error _update_swap_chain(Window *window);
+
+ Error _create_swap_chain();
+ Error _create_semaphores();
+
+ Vector<VkCommandBuffer> command_buffer_queue;
+ int command_buffer_count;
+
+protected:
+ virtual const char *_get_platform_surface_extension() const = 0;
+ // virtual VkResult _create_surface(VkSurfaceKHR *surface, VkInstance p_instance) = 0;
+
+ virtual int _window_create(VkSurfaceKHR p_surface, int p_width, int p_height);
+
+ VkInstance _get_instance() {
+ return inst;
+ }
+
+ bool buffers_prepared;
+
+public:
+ VkDevice get_device();
+ VkPhysicalDevice get_physical_device();
+ int get_swapchain_image_count() const;
+ uint32_t get_graphics_queue() const;
+
+ void window_resize(int p_window_id, int p_width, int p_height);
+ int window_get_width(int p_window = 0);
+ int window_get_height(int p_window = 0);
+ void window_destroy(int p_window_id);
+ VkFramebuffer window_get_framebuffer(int p_window = 0);
+ VkRenderPass window_get_render_pass(int p_window = 0);
+
+ VkFormat get_screen_format() const;
+ VkPhysicalDeviceLimits get_device_limits() const;
+
+ void set_setup_buffer(const VkCommandBuffer &pCommandBuffer);
+ void append_command_buffer(const VkCommandBuffer &pCommandBuffer);
+ void resize_notify();
+ void flush(bool p_flush_setup = false, bool p_flush_pending = false);
+ Error prepare_buffers();
+ Error swap_buffers();
+ Error initialize();
+
+ VulkanContext();
+ virtual ~VulkanContext();
+};
+
+#endif // VULKAN_DEVICE_H