summaryrefslogtreecommitdiff
path: root/drivers/vulkan/rendering_device_vulkan.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vulkan/rendering_device_vulkan.cpp')
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp1044
1 files changed, 288 insertions, 756 deletions
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
index 6e61006395..327f067e65 100644
--- a/drivers/vulkan/rendering_device_vulkan.cpp
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -1,32 +1,32 @@
-/*************************************************************************/
-/* rendering_device_vulkan.cpp */
-/*************************************************************************/
-/* This file is part of: */
-/* GODOT ENGINE */
-/* https://godotengine.org */
-/*************************************************************************/
-/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
-/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
-/* */
-/* Permission is hereby granted, free of charge, to any person obtaining */
-/* a copy of this software and associated documentation files (the */
-/* "Software"), to deal in the Software without restriction, including */
-/* without limitation the rights to use, copy, modify, merge, publish, */
-/* distribute, sublicense, and/or sell copies of the Software, and to */
-/* permit persons to whom the Software is furnished to do so, subject to */
-/* the following conditions: */
-/* */
-/* The above copyright notice and this permission notice shall be */
-/* included in all copies or substantial portions of the Software. */
-/* */
-/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
-/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
-/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
-/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
-/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
-/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
-/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-/*************************************************************************/
+/**************************************************************************/
+/* rendering_device_vulkan.cpp */
+/**************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/**************************************************************************/
+/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
+/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/**************************************************************************/
#include "rendering_device_vulkan.h"
@@ -36,18 +36,16 @@
#include "core/io/marshalls.h"
#include "core/os/os.h"
#include "core/templates/hashfuncs.h"
-#include "core/version.h"
#include "drivers/vulkan/vulkan_context.h"
#include "thirdparty/misc/smolv.h"
-#include "thirdparty/spirv-reflect/spirv_reflect.h"
//#define FORCE_FULL_BARRIER
static const uint32_t SMALL_ALLOCATION_MAX_SIZE = 4096;
// Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values).
-RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, uint32_t p_post_barrier) {
+RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, BitField<BarrierMask> p_post_barrier) {
Buffer *buffer = nullptr;
if (vertex_buffer_owner.owns(p_buffer)) {
buffer = vertex_buffer_owner.get_or_null(p_buffer);
@@ -55,11 +53,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
}
@@ -69,20 +67,20 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_access_mask |= VK_ACCESS_INDEX_READ_BIT;
buffer = index_buffer_owner.get_or_null(p_buffer);
} else if (uniform_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
}
r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT;
buffer = uniform_buffer_owner.get_or_null(p_buffer);
} else if (texture_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
}
@@ -90,11 +88,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
buffer = &texture_buffer_owner.get_or_null(p_buffer)->buffer;
} else if (storage_buffer_owner.owns(p_buffer)) {
buffer = storage_buffer_owner.get_or_null(p_buffer);
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
@@ -996,8 +994,11 @@ void RenderingDeviceVulkan::get_compressed_image_format_block_dimensions(DataFor
case DATA_FORMAT_EAC_R11G11_UNORM_BLOCK:
case DATA_FORMAT_EAC_R11G11_SNORM_BLOCK:
case DATA_FORMAT_ASTC_4x4_UNORM_BLOCK: // Again, not sure about astc.
- case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK:
- case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_4x4_SRGB_BLOCK: {
+ r_w = 4;
+ r_h = 4;
+ } break;
+ case DATA_FORMAT_ASTC_5x4_UNORM_BLOCK: // Unsupported
case DATA_FORMAT_ASTC_5x4_SRGB_BLOCK:
case DATA_FORMAT_ASTC_5x5_UNORM_BLOCK:
case DATA_FORMAT_ASTC_5x5_SRGB_BLOCK:
@@ -1008,10 +1009,16 @@ void RenderingDeviceVulkan::get_compressed_image_format_block_dimensions(DataFor
case DATA_FORMAT_ASTC_8x5_UNORM_BLOCK:
case DATA_FORMAT_ASTC_8x5_SRGB_BLOCK:
case DATA_FORMAT_ASTC_8x6_UNORM_BLOCK:
- case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x6_SRGB_BLOCK: {
+ r_w = 4;
+ r_h = 4;
+ } break;
case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK:
- case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
- case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK: {
+ r_w = 8;
+ r_h = 8;
+ } break;
+ case DATA_FORMAT_ASTC_10x5_UNORM_BLOCK: // Unsupported
case DATA_FORMAT_ASTC_10x5_SRGB_BLOCK:
case DATA_FORMAT_ASTC_10x6_UNORM_BLOCK:
case DATA_FORMAT_ASTC_10x6_SRGB_BLOCK:
@@ -1101,7 +1108,7 @@ uint32_t RenderingDeviceVulkan::get_compressed_image_format_block_byte_size(Data
case DATA_FORMAT_ASTC_12x10_SRGB_BLOCK:
case DATA_FORMAT_ASTC_12x12_UNORM_BLOCK:
case DATA_FORMAT_ASTC_12x12_SRGB_BLOCK:
- return 8; // Wrong.
+ return 16;
default: {
}
}
@@ -1123,6 +1130,10 @@ uint32_t RenderingDeviceVulkan::get_compressed_image_format_pixel_rshift(DataFor
case DATA_FORMAT_EAC_R11_UNORM_BLOCK:
case DATA_FORMAT_EAC_R11_SNORM_BLOCK:
return 1;
+ case DATA_FORMAT_ASTC_8x8_SRGB_BLOCK:
+ case DATA_FORMAT_ASTC_8x8_UNORM_BLOCK: {
+ return 2;
+ }
default: {
}
}
@@ -1579,12 +1590,12 @@ Error RenderingDeviceVulkan::_buffer_update(Buffer *p_buffer, size_t p_offset, c
return OK;
}
-void RenderingDeviceVulkan::_memory_barrier(VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw) {
+void RenderingDeviceVulkan::_memory_barrier(VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_access, bool p_sync_with_draw) {
VkMemoryBarrier mem_barrier;
mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
mem_barrier.pNext = nullptr;
mem_barrier.srcAccessMask = p_src_access;
- mem_barrier.dstAccessMask = p_dst_sccess;
+ mem_barrier.dstAccessMask = p_dst_access;
if (p_src_stage_mask == 0 || p_dst_stage_mask == 0) {
return; // No barrier, since this is invalid.
@@ -1628,14 +1639,14 @@ void RenderingDeviceVulkan::_full_barrier(bool p_sync_with_draw) {
p_sync_with_draw);
}
-void RenderingDeviceVulkan::_buffer_memory_barrier(VkBuffer buffer, uint64_t p_from, uint64_t p_size, VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw) {
+void RenderingDeviceVulkan::_buffer_memory_barrier(VkBuffer buffer, uint64_t p_from, uint64_t p_size, VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_access, bool p_sync_with_draw) {
VkBufferMemoryBarrier buffer_mem_barrier;
buffer_mem_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
buffer_mem_barrier.pNext = nullptr;
buffer_mem_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
buffer_mem_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
buffer_mem_barrier.srcAccessMask = p_src_access;
- buffer_mem_barrier.dstAccessMask = p_dst_sccess;
+ buffer_mem_barrier.dstAccessMask = p_dst_access;
buffer_mem_barrier.buffer = buffer;
buffer_mem_barrier.offset = p_from;
buffer_mem_barrier.size = p_size;
@@ -1655,34 +1666,27 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
image_create_info.pNext = nullptr;
image_create_info.flags = 0;
- // TODO: Check for support via RenderingDevice to enable on mobile when possible.
-
-#ifndef ANDROID_ENABLED
-
- // vkCreateImage fails with format list on Android (VK_ERROR_OUT_OF_HOST_MEMORY)
VkImageFormatListCreateInfoKHR format_list_create_info; // Keep out of the if, needed for creation.
Vector<VkFormat> allowed_formats; // Keep out of the if, needed for creation.
-#endif
if (p_format.shareable_formats.size()) {
image_create_info.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
-#ifndef ANDROID_ENABLED
-
- for (int i = 0; i < p_format.shareable_formats.size(); i++) {
- allowed_formats.push_back(vulkan_formats[p_format.shareable_formats[i]]);
- }
+ if (context->is_device_extension_enabled(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME)) {
+ for (int i = 0; i < p_format.shareable_formats.size(); i++) {
+ allowed_formats.push_back(vulkan_formats[p_format.shareable_formats[i]]);
+ }
- format_list_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR;
- format_list_create_info.pNext = nullptr;
- format_list_create_info.viewFormatCount = allowed_formats.size();
- format_list_create_info.pViewFormats = allowed_formats.ptr();
- image_create_info.pNext = &format_list_create_info;
+ format_list_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR;
+ format_list_create_info.pNext = nullptr;
+ format_list_create_info.viewFormatCount = allowed_formats.size();
+ format_list_create_info.pViewFormats = allowed_formats.ptr();
+ image_create_info.pNext = &format_list_create_info;
- ERR_FAIL_COND_V_MSG(p_format.shareable_formats.find(p_format.format) == -1, RID(),
- "If supplied a list of shareable formats, the current format must be present in the list");
- ERR_FAIL_COND_V_MSG(p_view.format_override != DATA_FORMAT_MAX && p_format.shareable_formats.find(p_view.format_override) == -1, RID(),
- "If supplied a list of shareable formats, the current view format override must be present in the list");
-#endif
+ ERR_FAIL_COND_V_MSG(p_format.shareable_formats.find(p_format.format) == -1, RID(),
+ "If supplied a list of shareable formats, the current format must be present in the list");
+ ERR_FAIL_COND_V_MSG(p_view.format_override != DATA_FORMAT_MAX && p_format.shareable_formats.find(p_view.format_override) == -1, RID(),
+ "If supplied a list of shareable formats, the current view format override must be present in the list");
+ }
}
if (p_format.texture_type == TEXTURE_TYPE_CUBE || p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY) {
@@ -2009,7 +2013,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
if (p_data.size()) {
for (uint32_t i = 0; i < image_create_info.arrayLayers; i++) {
- _texture_update(id, i, p_data[i], RD::BARRIER_MASK_ALL, true);
+ _texture_update(id, i, p_data[i], RD::BARRIER_MASK_ALL_BARRIERS, true);
}
}
return id;
@@ -2085,49 +2089,54 @@ RID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, RID
}
VkImageViewUsageCreateInfo usage_info;
- usage_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
- usage_info.pNext = nullptr;
- if (p_view.format_override != DATA_FORMAT_MAX) {
- // Need to validate usage with vulkan.
+ if (context->is_device_extension_enabled(VK_KHR_MAINTENANCE_2_EXTENSION_NAME)) {
+ // May need to make VK_KHR_maintenance2 manditory and thus has Vulkan 1.1 be our minimum supported version
+ // if we require setting this information. Vulkan 1.0 may simply not care..
- usage_info.usage = 0;
+ usage_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
+ usage_info.pNext = nullptr;
+ if (p_view.format_override != DATA_FORMAT_MAX) {
+ // Need to validate usage with vulkan.
- if (texture.usage_flags & TEXTURE_USAGE_SAMPLING_BIT) {
- usage_info.usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
- }
+ usage_info.usage = 0;
- if (texture.usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
- if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_STORAGE_BIT)) {
- usage_info.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
+ if (texture.usage_flags & TEXTURE_USAGE_SAMPLING_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ }
+
+ if (texture.usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
+ if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_STORAGE_BIT)) {
+ usage_info.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
+ }
}
- }
- if (texture.usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
- if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
- usage_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ if (texture.usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
+ usage_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
}
- }
- if (texture.usage_flags & TEXTURE_USAGE_INPUT_ATTACHMENT_BIT) {
- usage_info.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
- }
+ if (texture.usage_flags & TEXTURE_USAGE_INPUT_ATTACHMENT_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+ }
- if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
- usage_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
- }
+ if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
- if (texture.usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT) {
- usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- }
- if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT) {
- usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
- }
+ if (texture.usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+ if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ }
- if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT) {
- usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- }
+ if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
- image_view_create_info.pNext = &usage_info;
+ image_view_create_info.pNext = &usage_info;
+ }
}
VkResult err = vkCreateImageView(device, &image_view_create_info, nullptr, &texture.view);
@@ -2285,7 +2294,7 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat
return id;
}
-RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps, TextureSliceType p_slice_type) {
+RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps, TextureSliceType p_slice_type, uint32_t p_layers) {
_THREAD_SAFE_METHOD_
Texture *src_texture = texture_owner.get_or_null(p_with_texture);
@@ -2313,7 +2322,11 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p
ERR_FAIL_UNSIGNED_INDEX_V(p_layer, src_texture->layers, RID());
int slice_layers = 1;
- if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) {
+ if (p_layers != 0) {
+ ERR_FAIL_COND_V_MSG(p_layers > 1 && p_slice_type != TEXTURE_SLICE_2D_ARRAY, RID(), "layer slicing only supported for 2D arrays");
+ ERR_FAIL_COND_V_MSG(p_layer + p_layers > src_texture->layers, RID(), "layer slice is out of bounds");
+ slice_layers = p_layers;
+ } else if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) {
ERR_FAIL_COND_V_MSG(p_layer != 0, RID(), "layer must be 0 when obtaining a 2D array mipmap slice");
slice_layers = src_texture->layers;
} else if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
@@ -2414,7 +2427,7 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p
return id;
}
-Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier) {
return _texture_update(p_texture, p_layer, p_data, p_post_barrier, false);
}
@@ -2434,7 +2447,7 @@ static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_
}
}
-Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier, bool p_use_setup_queue) {
+Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier, bool p_use_setup_queue) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG((draw_list || compute_list) && !p_use_setup_queue, ERR_INVALID_PARAMETER,
@@ -2608,15 +2621,15 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co
{
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -2869,7 +2882,7 @@ Size2i RenderingDeviceVulkan::texture_size(RID p_texture) {
return Size2i(tex->width, tex->height);
}
-Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
@@ -2994,15 +3007,15 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -3064,7 +3077,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
return OK;
}
-Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
@@ -3172,15 +3185,15 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -3235,7 +3248,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
return OK;
}
-Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_texture);
@@ -3308,15 +3321,15 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -3355,7 +3368,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
return OK;
}
-bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const {
+bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_format, BitField<RenderingDevice::TextureUsageBits> p_usage) const {
ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
_THREAD_SAFE_METHOD_
@@ -3365,34 +3378,34 @@ bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_f
vkGetPhysicalDeviceFormatProperties(context->get_physical_device(), vulkan_formats[p_format], &properties);
VkFormatFeatureFlags flags;
- if (p_usage & TEXTURE_USAGE_CPU_READ_BIT) {
+ if (p_usage.has_flag(TEXTURE_USAGE_CPU_READ_BIT)) {
flags = properties.linearTilingFeatures;
} else {
flags = properties.optimalTilingFeatures;
}
- if (p_usage & TEXTURE_USAGE_SAMPLING_BIT && !(flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
+ if (p_usage.has_flag(TEXTURE_USAGE_SAMPLING_BIT) && !(flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
return false;
}
- if (p_usage & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
+ if (p_usage.has_flag(TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) && !(flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
return false;
}
- if (p_usage & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT && !(flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ if (p_usage.has_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
return false;
}
- if (p_usage & TEXTURE_USAGE_STORAGE_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
+ if (p_usage.has_flag(TEXTURE_USAGE_STORAGE_BIT) && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
return false;
}
- if (p_usage & TEXTURE_USAGE_STORAGE_ATOMIC_BIT && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)) {
+ if (p_usage.has_flag(TEXTURE_USAGE_STORAGE_ATOMIC_BIT) && !(flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)) {
return false;
}
// Validation via VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR fails if VRS attachment is not supported.
- if (p_usage & TEXTURE_USAGE_VRS_ATTACHMENT_BIT && p_format != DATA_FORMAT_R8_UINT) {
+ if (p_usage.has_flag(TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && p_format != DATA_FORMAT_R8_UINT) {
return false;
}
@@ -4267,7 +4280,7 @@ RID RenderingDeviceVulkan::sampler_create(const SamplerState &p_state) {
sampler_create_info.addressModeW = address_modes[p_state.repeat_w];
sampler_create_info.mipLodBias = p_state.lod_bias;
- sampler_create_info.anisotropyEnable = p_state.use_anisotropy;
+ sampler_create_info.anisotropyEnable = p_state.use_anisotropy && context->get_physical_device_features().samplerAnisotropy;
sampler_create_info.maxAnisotropy = p_state.anisotropy_max;
sampler_create_info.compareEnable = p_state.enable_compare;
@@ -4378,7 +4391,7 @@ RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(cons
return id;
}
-RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers) {
+RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
@@ -4392,6 +4405,13 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo
VertexArray vertex_array;
+ if (p_offsets.is_empty()) {
+ vertex_array.offsets.resize_zeroed(p_src_buffers.size());
+ } else {
+ ERR_FAIL_COND_V(p_offsets.size() != p_src_buffers.size(), RID());
+ vertex_array.offsets = p_offsets;
+ }
+
vertex_array.vertex_count = p_vertex_count;
vertex_array.description = p_vertex_format;
vertex_array.max_instances_allowed = 0xFFFFFFFF; // By default as many as you want.
@@ -4423,7 +4443,6 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo
}
vertex_array.buffers.push_back(buffer->buffer);
- vertex_array.offsets.push_back(0); // Offset unused, but passing anyway.
}
RID id = vertex_array_owner.make_rid(vertex_array);
@@ -4518,14 +4537,6 @@ RID RenderingDeviceVulkan::index_array_create(RID p_index_buffer, uint32_t p_ind
/**** SHADER ****/
/****************/
-static const char *shader_stage_names[RenderingDevice::SHADER_STAGE_MAX] = {
- "Vertex",
- "Fragment",
- "TesselationControl",
- "TesselationEvaluation",
- "Compute"
-};
-
static const char *shader_uniform_names[RenderingDevice::UNIFORM_TYPE_MAX] = {
"Sampler", "CombinedSampler", "Texture", "Image", "TextureBuffer", "SamplerTextureBuffer", "ImageBuffer", "UniformBuffer", "StorageBuffer", "InputAttachment"
};
@@ -4556,198 +4567,6 @@ String RenderingDeviceVulkan::_shader_uniform_debug(RID p_shader, int p_set) {
}
return ret;
}
-#if 0
-bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLayoutBinding> > &bindings, Vector<Vector<UniformInfo> > &uniform_infos, const glslang::TObjectReflection &reflection, RenderingDevice::ShaderStage p_stage, Shader::PushConstant &push_constant, String *r_error) {
- VkDescriptorSetLayoutBinding layout_binding;
- UniformInfo info;
-
- switch (reflection.getType()->getBasicType()) {
- case glslang::EbtSampler: {
- //print_line("DEBUG: IsSampler");
- if (reflection.getType()->getSampler().dim == glslang::EsdBuffer) {
- // Texture buffers.
- if (reflection.getType()->getSampler().isCombined()) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
- info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER;
- //print_line("DEBUG: SAMPLER: texel combined");
- } else if (reflection.getType()->getSampler().isTexture()) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
- info.type = UNIFORM_TYPE_TEXTURE_BUFFER;
- //print_line("DEBUG: SAMPLER: texel alone");
- } else if (reflection.getType()->getSampler().isImage()) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
- info.type = UNIFORM_TYPE_IMAGE_BUFFER;
- //print_line("DEBUG: SAMPLER: texel buffer");
- } else {
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' is of unsupported buffer type.";
- }
- return false;
- }
- } else if (reflection.getType()->getSampler().isCombined()) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
- info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
- //print_line("DEBUG: SAMPLER: combined");
- } else if (reflection.getType()->getSampler().isPureSampler()) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
- info.type = UNIFORM_TYPE_SAMPLER;
- //print_line("DEBUG: SAMPLER: sampler");
- } else if (reflection.getType()->getSampler().isTexture()) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
- info.type = UNIFORM_TYPE_TEXTURE;
- //print_line("DEBUG: SAMPLER: image");
- } else if (reflection.getType()->getSampler().isImage()) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
- info.type = UNIFORM_TYPE_IMAGE;
- //print_line("DEBUG: SAMPLER: storage image");
- } else {
- //print_line("DEBUG: sampler unknown");
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' is of unsupported sampler type.";
- }
- return false;
- }
-
- if (reflection.getType()->isArray()) {
- layout_binding.descriptorCount = reflection.getType()->getArraySizes()->getCumulativeSize();
- //print_line("DEBUG: array of size: " + itos(layout_binding.descriptorCount));
- } else {
- layout_binding.descriptorCount = 1;
- }
-
- info.length = layout_binding.descriptorCount;
-
- } break;
- /*case glslang::EbtStruct: {
- print_line("DEBUG: Struct");
-
- } break;*/
- case glslang::EbtBlock: {
- //print_line("DEBUG: Block");
- if (reflection.getType()->getQualifier().storage == glslang::EvqUniform) {
- if (reflection.getType()->getQualifier().layoutPushConstant) {
- uint32_t len = reflection.size;
- if (push_constant.push_constant_size != 0 && push_constant.push_constant_size != len) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' push constants for different stages should all be the same size.";
- return false;
- }
- push_constant.push_constant_size = len;
- push_constant.push_constants_vk_stage |= shader_stage_masks[p_stage];
- return true;
- }
- //print_line("DEBUG: Uniform buffer");
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
- info.type = UNIFORM_TYPE_UNIFORM_BUFFER;
- } else if (reflection.getType()->getQualifier().storage == glslang::EvqBuffer) {
- layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
- info.type = UNIFORM_TYPE_STORAGE_BUFFER;
- //print_line("DEBUG: Storage buffer");
- } else {
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' is of unsupported block type: (" + itos(reflection.getType()->getQualifier().storage) + ").";
- }
- return false;
- }
-
- if (reflection.getType()->isArray()) {
- layout_binding.descriptorCount = reflection.getType()->getArraySizes()->getCumulativeSize();
- //print_line("DEBUG: array of size: " + itos(layout_binding.descriptorCount));
- } else {
- layout_binding.descriptorCount = 1;
- }
-
- info.length = reflection.size;
-
- } break;
- /*case glslang::EbtReference: {
- } break;*/
- /*case glslang::EbtAtomicUint: {
- } break;*/
- default: {
- if (reflection.getType()->getQualifier().hasOffset() || reflection.name.find(".") != std::string::npos) {
- // Member of uniform block?
- return true;
- }
-
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' unsupported uniform type.";
- }
- return false;
- }
- }
-
- if (!reflection.getType()->getQualifier().hasBinding()) {
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' lacks a binding number.";
- }
- return false;
- }
-
- uint32_t set = reflection.getType()->getQualifier().hasSet() ? reflection.getType()->getQualifier().layoutSet : 0;
-
- if (set >= MAX_UNIFORM_SETS) {
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(MAX_UNIFORM_SETS) + ").";
- }
- return false;
- }
-
- if (set >= limits.maxBoundDescriptorSets) {
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' uses a set (" + itos(set) + ") index larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").";
- }
- return false;
- }
-
- uint32_t binding = reflection.getType()->getQualifier().layoutBinding;
-
- if (set < (uint32_t)bindings.size()) {
- // Check if this already exists.
- for (int i = 0; i < bindings[set].size(); i++) {
- if (bindings[set][i].binding == binding) {
- // Already exists, verify that it's the same type.
- if (bindings[set][i].descriptorType != layout_binding.descriptorType) {
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform type.";
- }
- return false;
- }
-
- // Also, verify that it's the same size.
- if (bindings[set][i].descriptorCount != layout_binding.descriptorCount || uniform_infos[set][i].length != info.length) {
- if (r_error) {
- *r_error = "On shader stage '" + String(shader_stage_names[p_stage]) + "', uniform '" + reflection.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(binding) + " with different uniform size.";
- }
- return false;
- }
-
- // Just append stage mask and return.
- bindings.write[set].write[i].stageFlags |= shader_stage_masks[p_stage];
- uniform_infos.write[set].write[i].stages |= 1 << p_stage;
- return true;
- }
- }
- }
- layout_binding.binding = binding;
- layout_binding.stageFlags = shader_stage_masks[p_stage];
- layout_binding.pImmutableSamplers = nullptr; // No support for this yet.
-
- info.stages = 1 << p_stage;
- info.binding = binding;
-
- if (set >= (uint32_t)bindings.size()) {
- bindings.resize(set + 1);
- uniform_infos.resize(set + 1);
- }
-#if 0
- print_line("stage: " + String(shader_stage_names[p_stage]) + " set: " + itos(set) + " binding: " + itos(info.binding) + " type:" + shader_uniform_names[info.type] + " length: " + itos(info.length));
-#endif
- bindings.write[set].push_back(layout_binding);
- uniform_infos.write[set].push_back(info);
-
- return true;
-}
-#endif
// Version 1: initial.
// Version 2: Added shader name.
@@ -4756,7 +4575,7 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa
#define SHADER_BINARY_VERSION 3
String RenderingDeviceVulkan::shader_get_binary_cache_key() const {
- return "Vulkan-SV" + itos(SHADER_BINARY_VERSION) + "-" + String(VERSION_NUMBER) + "-" + String(VERSION_HASH);
+ return "Vulkan-SV" + itos(SHADER_BINARY_VERSION);
}
struct RenderingDeviceVulkanShaderBinaryDataBinding {
@@ -4780,346 +4599,68 @@ struct RenderingDeviceVulkanShaderBinarySpecializationConstant {
struct RenderingDeviceVulkanShaderBinaryData {
uint32_t vertex_input_mask;
- uint32_t fragment_outputs;
- uint32_t specialization_constant_count;
+ uint32_t fragment_output_mask;
+ uint32_t specialization_constants_count;
uint32_t is_compute;
uint32_t compute_local_size[3];
uint32_t set_count;
uint32_t push_constant_size;
- uint32_t push_constants_vk_stage;
+ uint32_t push_constant_vk_stages_mask;
uint32_t stage_count;
uint32_t shader_name_len;
};
Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
- RenderingDeviceVulkanShaderBinaryData binary_data;
- binary_data.vertex_input_mask = 0;
- binary_data.fragment_outputs = 0;
- binary_data.specialization_constant_count = 0;
- binary_data.is_compute = 0;
- binary_data.compute_local_size[0] = 0;
- binary_data.compute_local_size[1] = 0;
- binary_data.compute_local_size[2] = 0;
- binary_data.set_count = 0;
- binary_data.push_constant_size = 0;
- binary_data.push_constants_vk_stage = 0;
+ SpirvReflectionData spirv_data;
+ if (_reflect_spirv(p_spirv, spirv_data) != OK) {
+ return Vector<uint8_t>();
+ }
+ ERR_FAIL_COND_V_MSG((uint32_t)spirv_data.uniforms.size() > limits.maxBoundDescriptorSets, Vector<uint8_t>(),
+ "Number of uniform sets is larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").");
+
+ // Collect reflection data into binary data.
+ RenderingDeviceVulkanShaderBinaryData binary_data;
Vector<Vector<RenderingDeviceVulkanShaderBinaryDataBinding>> uniform_info; // Set bindings.
Vector<RenderingDeviceVulkanShaderBinarySpecializationConstant> specialization_constants;
-
- uint32_t stages_processed = 0;
-
- for (int i = 0; i < p_spirv.size(); i++) {
- if (p_spirv[i].shader_stage == SHADER_STAGE_COMPUTE) {
- binary_data.is_compute = true;
- ERR_FAIL_COND_V_MSG(p_spirv.size() != 1, Vector<uint8_t>(),
- "Compute shaders can only receive one stage, dedicated to compute.");
- }
- ERR_FAIL_COND_V_MSG(stages_processed & (1 << p_spirv[i].shader_stage), Vector<uint8_t>(),
- "Stage " + String(shader_stage_names[p_spirv[i].shader_stage]) + " submitted more than once.");
-
- {
- SpvReflectShaderModule module;
- const uint8_t *spirv = p_spirv[i].spir_v.ptr();
- SpvReflectResult result = spvReflectCreateShaderModule(p_spirv[i].spir_v.size(), spirv, &module);
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed parsing shader.");
-
- if (binary_data.is_compute) {
- binary_data.compute_local_size[0] = module.entry_points->local_size.x;
- binary_data.compute_local_size[1] = module.entry_points->local_size.y;
- binary_data.compute_local_size[2] = module.entry_points->local_size.z;
- }
- uint32_t binding_count = 0;
- result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, nullptr);
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating descriptor bindings.");
-
- uint32_t stage = p_spirv[i].shader_stage;
-
- if (binding_count > 0) {
- // Parse bindings.
-
- Vector<SpvReflectDescriptorBinding *> bindings;
- bindings.resize(binding_count);
- result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, bindings.ptrw());
-
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed getting descriptor bindings.");
-
- for (uint32_t j = 0; j < binding_count; j++) {
- const SpvReflectDescriptorBinding &binding = *bindings[j];
-
- RenderingDeviceVulkanShaderBinaryDataBinding info{};
-
- bool need_array_dimensions = false;
- bool need_block_size = false;
- bool may_be_writable = false;
-
- switch (binding.descriptor_type) {
- case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: {
- info.type = UNIFORM_TYPE_SAMPLER;
- need_array_dimensions = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
- info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
- need_array_dimensions = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
- info.type = UNIFORM_TYPE_TEXTURE;
- need_array_dimensions = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
- info.type = UNIFORM_TYPE_IMAGE;
- need_array_dimensions = true;
- may_be_writable = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
- info.type = UNIFORM_TYPE_TEXTURE_BUFFER;
- need_array_dimensions = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
- info.type = UNIFORM_TYPE_IMAGE_BUFFER;
- need_array_dimensions = true;
- may_be_writable = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
- info.type = UNIFORM_TYPE_UNIFORM_BUFFER;
- need_block_size = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
- info.type = UNIFORM_TYPE_STORAGE_BUFFER;
- need_block_size = true;
- may_be_writable = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
- ERR_PRINT("Dynamic uniform buffer not supported.");
- continue;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
- ERR_PRINT("Dynamic storage buffer not supported.");
- continue;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
- info.type = UNIFORM_TYPE_INPUT_ATTACHMENT;
- need_array_dimensions = true;
- } break;
- case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
- ERR_PRINT("Acceleration structure not supported.");
- continue;
- } break;
- }
-
- if (need_array_dimensions) {
- if (binding.array.dims_count == 0) {
- info.length = 1;
- } else {
- for (uint32_t k = 0; k < binding.array.dims_count; k++) {
- if (k == 0) {
- info.length = binding.array.dims[0];
- } else {
- info.length *= binding.array.dims[k];
- }
- }
- }
-
- } else if (need_block_size) {
- info.length = binding.block.size;
- } else {
- info.length = 0;
- }
-
- if (may_be_writable) {
- info.writable = !(binding.type_description->decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) && !(binding.block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE);
- } else {
- info.writable = false;
- }
-
- info.binding = binding.binding;
- uint32_t set = binding.set;
-
- ERR_FAIL_COND_V_MSG(set >= MAX_UNIFORM_SETS, Vector<uint8_t>(),
- "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(MAX_UNIFORM_SETS) + ").");
-
- ERR_FAIL_COND_V_MSG(set >= limits.maxBoundDescriptorSets, Vector<uint8_t>(),
- "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").");
-
- if (set < (uint32_t)uniform_info.size()) {
- // Check if this already exists.
- bool exists = false;
- for (int k = 0; k < uniform_info[set].size(); k++) {
- if (uniform_info[set][k].binding == (uint32_t)info.binding) {
- // Already exists, verify that it's the same type.
- ERR_FAIL_COND_V_MSG(uniform_info[set][k].type != info.type, Vector<uint8_t>(),
- "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform type.");
-
- // Also, verify that it's the same size.
- ERR_FAIL_COND_V_MSG(uniform_info[set][k].length != info.length, Vector<uint8_t>(),
- "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform size.");
-
- // Also, verify that it has the same writability.
- ERR_FAIL_COND_V_MSG(uniform_info[set][k].writable != info.writable, Vector<uint8_t>(),
- "On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different writability.");
-
- // Just append stage mask and return.
- uniform_info.write[set].write[k].stages |= 1 << stage;
- exists = true;
- break;
- }
- }
-
- if (exists) {
- continue; // Merged.
- }
- }
-
- info.stages = 1 << stage;
-
- if (set >= (uint32_t)uniform_info.size()) {
- uniform_info.resize(set + 1);
- }
-
- uniform_info.write[set].push_back(info);
- }
- }
-
- {
- // Specialization constants.
-
- uint32_t sc_count = 0;
- result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, nullptr);
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating specialization constants.");
-
- if (sc_count) {
- Vector<SpvReflectSpecializationConstant *> spec_constants;
- spec_constants.resize(sc_count);
-
- result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, spec_constants.ptrw());
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining specialization constants.");
-
- for (uint32_t j = 0; j < sc_count; j++) {
- int32_t existing = -1;
- RenderingDeviceVulkanShaderBinarySpecializationConstant sconst{};
- SpvReflectSpecializationConstant *spc = spec_constants[j];
-
- sconst.constant_id = spc->constant_id;
- sconst.int_value = 0.0; // Clear previous value JIC.
- switch (spc->constant_type) {
- case SPV_REFLECT_SPECIALIZATION_CONSTANT_BOOL: {
- sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
- sconst.bool_value = spc->default_value.int_bool_value != 0;
- } break;
- case SPV_REFLECT_SPECIALIZATION_CONSTANT_INT: {
- sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
- sconst.int_value = spc->default_value.int_bool_value;
- } break;
- case SPV_REFLECT_SPECIALIZATION_CONSTANT_FLOAT: {
- sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
- sconst.float_value = spc->default_value.float_value;
- } break;
- }
- sconst.stage_flags = 1 << p_spirv[i].shader_stage;
-
- for (int k = 0; k < specialization_constants.size(); k++) {
- if (specialization_constants[k].constant_id == sconst.constant_id) {
- ERR_FAIL_COND_V_MSG(specialization_constants[k].type != sconst.type, Vector<uint8_t>(), "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their types differ.");
- ERR_FAIL_COND_V_MSG(specialization_constants[k].int_value != sconst.int_value, Vector<uint8_t>(), "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their default values differ.");
- existing = k;
- break;
- }
- }
-
- if (existing > 0) {
- specialization_constants.write[existing].stage_flags |= sconst.stage_flags;
- } else {
- specialization_constants.push_back(sconst);
- }
- }
- }
- }
-
- if (stage == SHADER_STAGE_VERTEX) {
- uint32_t iv_count = 0;
- result = spvReflectEnumerateInputVariables(&module, &iv_count, nullptr);
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating input variables.");
-
- if (iv_count) {
- Vector<SpvReflectInterfaceVariable *> input_vars;
- input_vars.resize(iv_count);
-
- result = spvReflectEnumerateInputVariables(&module, &iv_count, input_vars.ptrw());
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining input variables.");
-
- for (uint32_t j = 0; j < iv_count; j++) {
- if (input_vars[j] && input_vars[j]->decoration_flags == 0) { // Regular input.
- binary_data.vertex_input_mask |= (1 << uint32_t(input_vars[j]->location));
- }
- }
- }
- }
-
- if (stage == SHADER_STAGE_FRAGMENT) {
- uint32_t ov_count = 0;
- result = spvReflectEnumerateOutputVariables(&module, &ov_count, nullptr);
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating output variables.");
-
- if (ov_count) {
- Vector<SpvReflectInterfaceVariable *> output_vars;
- output_vars.resize(ov_count);
-
- result = spvReflectEnumerateOutputVariables(&module, &ov_count, output_vars.ptrw());
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining output variables.");
-
- for (uint32_t j = 0; j < ov_count; j++) {
- const SpvReflectInterfaceVariable *refvar = output_vars[j];
- if (refvar != nullptr && refvar->built_in != SpvBuiltInFragDepth) {
- binary_data.fragment_outputs |= 1 << refvar->location;
- }
- }
- }
+ {
+ binary_data.vertex_input_mask = spirv_data.vertex_input_mask;
+ binary_data.fragment_output_mask = spirv_data.fragment_output_mask;
+ binary_data.specialization_constants_count = spirv_data.specialization_constants.size();
+ binary_data.is_compute = spirv_data.is_compute;
+ binary_data.compute_local_size[0] = spirv_data.compute_local_size[0];
+ binary_data.compute_local_size[1] = spirv_data.compute_local_size[1];
+ binary_data.compute_local_size[2] = spirv_data.compute_local_size[2];
+ binary_data.set_count = spirv_data.uniforms.size();
+ binary_data.push_constant_size = spirv_data.push_constant_size;
+ for (uint32_t i = 0; i < SHADER_STAGE_MAX; i++) {
+ if (spirv_data.push_constant_stages_mask.has_flag((ShaderStage)(1 << i))) {
+ binary_data.push_constant_vk_stages_mask |= shader_stage_masks[i];
}
+ }
- uint32_t pc_count = 0;
- result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, nullptr);
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating push constants.");
-
- if (pc_count) {
- ERR_FAIL_COND_V_MSG(pc_count > 1, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "': Only one push constant is supported, which should be the same across shader stages.");
-
- Vector<SpvReflectBlockVariable *> pconstants;
- pconstants.resize(pc_count);
- result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, pconstants.ptrw());
- ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining push constants.");
-#if 0
- if (pconstants[0] == nullptr) {
- Ref<FileAccess> f = FileAccess::open("res://popo.spv", FileAccess::WRITE);
- f->store_buffer((const uint8_t *)&SpirV[0], SpirV.size() * sizeof(uint32_t));
- }
-#endif
-
- ERR_FAIL_COND_V_MSG(binary_data.push_constant_size && binary_data.push_constant_size != pconstants[0]->size, Vector<uint8_t>(),
- "Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
-
- binary_data.push_constant_size = pconstants[0]->size;
- binary_data.push_constants_vk_stage |= shader_stage_masks[stage];
-
- //print_line("Stage: " + String(shader_stage_names[stage]) + " push constant of size=" + itos(push_constant.push_constant_size));
+ for (const Vector<SpirvReflectionData::Uniform> &spirv_set : spirv_data.uniforms) {
+ Vector<RenderingDeviceVulkanShaderBinaryDataBinding> set_bindings;
+ for (const SpirvReflectionData::Uniform &spirv_uniform : spirv_set) {
+ RenderingDeviceVulkanShaderBinaryDataBinding binding{};
+ binding.type = (uint32_t)spirv_uniform.type;
+ binding.binding = spirv_uniform.binding;
+ binding.stages = (uint32_t)spirv_uniform.stages_mask;
+ binding.length = spirv_uniform.length;
+ binding.writable = (uint32_t)spirv_uniform.writable;
+ set_bindings.push_back(binding);
}
-
- // Destroy the reflection data when no longer required.
- spvReflectDestroyShaderModule(&module);
+ uniform_info.push_back(set_bindings);
}
- stages_processed |= (1 << p_spirv[i].shader_stage);
+ for (const SpirvReflectionData::SpecializationConstant &spirv_sc : spirv_data.specialization_constants) {
+ RenderingDeviceVulkanShaderBinarySpecializationConstant spec_constant{};
+ spec_constant.type = (uint32_t)spirv_sc.type;
+ spec_constant.constant_id = spirv_sc.constant_id;
+ spec_constant.int_value = spirv_sc.int_value;
+ spec_constant.stage_flags = (uint32_t)spirv_sc.stages_mask;
+ specialization_constants.push_back(spec_constant);
+ }
}
Vector<Vector<uint8_t>> compressed_stages;
@@ -5161,7 +4702,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve
stages_binary_size += s;
}
- binary_data.specialization_constant_count = specialization_constants.size();
+ binary_data.specialization_constants_count = specialization_constants.size();
binary_data.set_count = uniform_info.size();
binary_data.stage_count = p_spirv.size();
@@ -5267,12 +4808,12 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
const RenderingDeviceVulkanShaderBinaryData &binary_data = *(reinterpret_cast<const RenderingDeviceVulkanShaderBinaryData *>(binptr + 12));
Shader::PushConstant push_constant;
- push_constant.push_constant_size = binary_data.push_constant_size;
- push_constant.push_constants_vk_stage = binary_data.push_constants_vk_stage;
+ push_constant.size = binary_data.push_constant_size;
+ push_constant.vk_stages_mask = binary_data.push_constant_vk_stages_mask;
uint32_t vertex_input_mask = binary_data.vertex_input_mask;
- uint32_t fragment_outputs = binary_data.fragment_outputs;
+ uint32_t fragment_output_mask = binary_data.fragment_output_mask;
bool is_compute = binary_data.is_compute;
@@ -5368,11 +4909,11 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
read_offset += set_size;
}
- ERR_FAIL_COND_V(read_offset + binary_data.specialization_constant_count * sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant) >= binsize, RID());
+ ERR_FAIL_COND_V(read_offset + binary_data.specialization_constants_count * sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant) >= binsize, RID());
Vector<Shader::SpecializationConstant> specialization_constants;
- for (uint32_t i = 0; i < binary_data.specialization_constant_count; i++) {
+ for (uint32_t i = 0; i < binary_data.specialization_constants_count; i++) {
const RenderingDeviceVulkanShaderBinarySpecializationConstant &src_sc = *(reinterpret_cast<const RenderingDeviceVulkanShaderBinarySpecializationConstant *>(binptr + read_offset));
Shader::SpecializationConstant sc;
sc.constant.int_value = src_sc.int_value;
@@ -5438,7 +4979,7 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
Shader shader;
shader.vertex_input_mask = vertex_input_mask;
- shader.fragment_output_mask = fragment_outputs;
+ shader.fragment_output_mask = fragment_output_mask;
shader.push_constant = push_constant;
shader.is_compute = is_compute;
shader.compute_local_size[0] = compute_local_size[0];
@@ -5468,19 +5009,11 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
break;
}
- const VkShaderStageFlagBits shader_stage_bits[SHADER_STAGE_MAX] = {
- VK_SHADER_STAGE_VERTEX_BIT,
- VK_SHADER_STAGE_FRAGMENT_BIT,
- VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
- VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
- VK_SHADER_STAGE_COMPUTE_BIT,
- };
-
VkPipelineShaderStageCreateInfo shader_stage;
shader_stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stage.pNext = nullptr;
shader_stage.flags = 0;
- shader_stage.stage = shader_stage_bits[stage_type[i]];
+ shader_stage.stage = shader_stage_masks[stage_type[i]];
shader_stage.module = module;
shader_stage.pName = "main";
shader_stage.pSpecializationInfo = nullptr;
@@ -5552,10 +5085,10 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
// Needs to be declared in this outer scope, otherwise it may not outlive its assignment
// to pipeline_layout_create_info.
VkPushConstantRange push_constant_range;
- if (push_constant.push_constant_size) {
- push_constant_range.stageFlags = push_constant.push_constants_vk_stage;
+ if (push_constant.size) {
+ push_constant_range.stageFlags = push_constant.vk_stages_mask;
push_constant_range.offset = 0;
- push_constant_range.size = push_constant.push_constant_size;
+ push_constant_range.size = push_constant.size;
pipeline_layout_create_info.pushConstantRangeCount = 1;
pipeline_layout_create_info.pPushConstantRanges = &push_constant_range;
@@ -5629,7 +5162,7 @@ RID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const Ve
return id;
}
-RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, uint32_t p_usage) {
+RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<StorageBufferUsage> p_usage) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG(draw_list != nullptr && p_data.size(), RID(),
"Creating buffers with data is forbidden during creation of a draw list");
@@ -5640,7 +5173,7 @@ RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Ve
Buffer buffer;
uint32_t flags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
- if (p_usage & STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT) {
+ if (p_usage.has_flag(STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT)) {
flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
}
Error err = _buffer_allocate(&buffer, p_size_bytes, flags, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, 0);
@@ -5950,10 +5483,8 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
// Can also be used as storage, add to mutable sampled.
mutable_sampled_textures.push_back(texture);
}
- if (texture->owner.is_valid()) {
- texture = texture_owner.get_or_null(texture->owner);
- ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen.
- }
+
+ DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -6004,10 +5535,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
mutable_sampled_textures.push_back(texture);
}
- if (texture->owner.is_valid()) {
- texture = texture_owner.get_or_null(texture->owner);
- ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen.
- }
+ DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -6052,10 +5580,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
mutable_storage_textures.push_back(texture);
}
- if (texture->owner.is_valid()) {
- texture = texture_owner.get_or_null(texture->owner);
- ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen.
- }
+ DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
img_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
@@ -6217,10 +5742,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
img_info.sampler = VK_NULL_HANDLE;
img_info.imageView = texture->view;
- if (texture->owner.is_valid()) {
- texture = texture_owner.get_or_null(texture->owner);
- ERR_FAIL_COND_V(!texture, RID()); // Bug, should never happen.
- }
+ DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -6315,7 +5837,7 @@ void RenderingDeviceVulkan::uniform_set_set_invalidation_callback(RID p_uniform_
us->invalidated_callback_userdata = p_userdata;
}
-Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
@@ -6325,7 +5847,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
VkPipelineStageFlags dst_stage_mask = 0;
VkAccessFlags dst_access = 0;
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
// Protect subsequent updates.
dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
@@ -6361,7 +5883,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
return err;
}
-Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER,
@@ -6373,7 +5895,7 @@ Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint3
VkPipelineStageFlags dst_stage_mask = 0;
VkAccessFlags dst_access = 0;
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
// Protect subsequent updates.
dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
@@ -6405,29 +5927,37 @@ Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint3
return OK;
}
-Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
+Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
_THREAD_SAFE_METHOD_
// It could be this buffer was just created.
VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT;
// Get the vulkan buffer and the potential stage/access possible.
- Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL);
+ Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL_BARRIERS);
if (!buffer) {
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving.");
}
// Make sure no one is using the buffer -- the "false" gets us to the same command buffer as below.
- _buffer_memory_barrier(buffer->buffer, 0, buffer->size, src_stage_mask, src_access_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT, false);
+ _buffer_memory_barrier(buffer->buffer, 0, buffer->size, src_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, src_access_mask, VK_ACCESS_TRANSFER_READ_BIT, false);
VkCommandBuffer command_buffer = frames[frame].setup_command_buffer;
+ // Size of buffer to retrieve.
+ if (!p_size) {
+ p_size = buffer->size;
+ } else {
+ ERR_FAIL_COND_V_MSG(p_size + p_offset > buffer->size, Vector<uint8_t>(),
+ "Size is larger than the buffer.");
+ }
+
Buffer tmp_buffer;
- _buffer_allocate(&tmp_buffer, buffer->size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_HOST, VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT);
+ _buffer_allocate(&tmp_buffer, p_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_HOST, VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT);
VkBufferCopy region;
- region.srcOffset = 0;
+ region.srcOffset = p_offset;
region.dstOffset = 0;
- region.size = buffer->size;
+ region.size = p_size;
vkCmdCopyBuffer(command_buffer, buffer->buffer, tmp_buffer.buffer, 1, &region); // Dst buffer is in CPU, but I wonder if src buffer needs a barrier for this.
// Flush everything so memory can be safely mapped.
_flush(true);
@@ -6438,9 +5968,9 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
Vector<uint8_t> buffer_data;
{
- buffer_data.resize(buffer->size);
+ buffer_data.resize(p_size);
uint8_t *w = buffer_data.ptrw();
- memcpy(w, buffer_mem, buffer->size);
+ memcpy(w, buffer_mem, p_size);
}
vmaUnmapMemory(allocator, tmp_buffer.allocation);
@@ -6454,7 +5984,7 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
/**** RENDER PIPELINE ****/
/*************************/
-RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags, uint32_t p_for_render_pass, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
+RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
_THREAD_SAFE_METHOD_
// Needs a shader.
@@ -6740,31 +6270,31 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma
dynamic_states.push_back(VK_DYNAMIC_STATE_VIEWPORT); // Viewport and scissor are always dynamic.
dynamic_states.push_back(VK_DYNAMIC_STATE_SCISSOR);
- if (p_dynamic_state_flags & DYNAMIC_STATE_LINE_WIDTH) {
+ if (p_dynamic_state_flags.has_flag(DYNAMIC_STATE_LINE_WIDTH)) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_WIDTH);
}
- if (p_dynamic_state_flags & DYNAMIC_STATE_DEPTH_BIAS) {
+ if (p_dynamic_state_flags.has_flag(DYNAMIC_STATE_DEPTH_BIAS)) {
dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_BIAS);
}
- if (p_dynamic_state_flags & DYNAMIC_STATE_BLEND_CONSTANTS) {
+ if (p_dynamic_state_flags.has_flag(DYNAMIC_STATE_BLEND_CONSTANTS)) {
dynamic_states.push_back(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
}
- if (p_dynamic_state_flags & DYNAMIC_STATE_DEPTH_BOUNDS) {
+ if (p_dynamic_state_flags.has_flag(DYNAMIC_STATE_DEPTH_BOUNDS)) {
dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_BOUNDS);
}
- if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_COMPARE_MASK) {
+ if (p_dynamic_state_flags.has_flag(DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK);
}
- if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_WRITE_MASK) {
+ if (p_dynamic_state_flags.has_flag(DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
}
- if (p_dynamic_state_flags & DYNAMIC_STATE_STENCIL_REFERENCE) {
+ if (p_dynamic_state_flags.has_flag(DYNAMIC_STATE_STENCIL_REFERENCE)) {
dynamic_states.push_back(VK_DYNAMIC_STATE_STENCIL_REFERENCE);
}
@@ -6872,10 +6402,10 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma
ERR_FAIL_COND_V_MSG(err, RID(), "vkCreateGraphicsPipelines failed with error " + itos(err) + " for shader '" + shader->name + "'.");
pipeline.set_formats = shader->set_formats;
- pipeline.push_constant_stages = shader->push_constant.push_constants_vk_stage;
+ pipeline.push_constant_stages_mask = shader->push_constant.vk_stages_mask;
pipeline.pipeline_layout = shader->pipeline_layout;
pipeline.shader = p_shader;
- pipeline.push_constant_size = shader->push_constant.push_constant_size;
+ pipeline.push_constant_size = shader->push_constant.size;
#ifdef DEBUG_ENABLED
pipeline.validation.dynamic_state = p_dynamic_state_flags;
@@ -6987,10 +6517,10 @@ RID RenderingDeviceVulkan::compute_pipeline_create(RID p_shader, const Vector<Pi
ERR_FAIL_COND_V_MSG(err, RID(), "vkCreateComputePipelines failed with error " + itos(err) + ".");
pipeline.set_formats = shader->set_formats;
- pipeline.push_constant_stages = shader->push_constant.push_constants_vk_stage;
+ pipeline.push_constant_stages_mask = shader->push_constant.vk_stages_mask;
pipeline.pipeline_layout = shader->pipeline_layout;
pipeline.shader = p_shader;
- pipeline.push_constant_size = shader->push_constant.push_constant_size;
+ pipeline.push_constant_size = shader->push_constant.size;
pipeline.local_group_size[0] = shader->compute_local_size[0];
pipeline.local_group_size[1] = shader->compute_local_size[1];
pipeline.local_group_size[2] = shader->compute_local_size[2];
@@ -7632,7 +7162,7 @@ void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RI
dl->state.set_count = pcount; // Update set count.
if (pipeline->push_constant_size) {
- dl->state.pipeline_push_constant_stages = pipeline->push_constant_stages;
+ dl->state.pipeline_push_constant_stages = pipeline->push_constant_stages_mask;
#ifdef DEBUG_ENABLED
dl->validation.pipeline_push_constant_supplied = false;
#endif
@@ -8084,7 +7614,7 @@ void RenderingDeviceVulkan::_draw_list_free(Rect2i *r_last_viewport) {
_THREAD_SAFE_UNLOCK_
}
-void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) {
+void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive.");
@@ -8106,15 +7636,15 @@ void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) {
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
}
@@ -8248,7 +7778,7 @@ void RenderingDeviceVulkan::compute_list_bind_compute_pipeline(ComputeListID p_l
cl->state.set_count = pcount; // Update set count.
if (pipeline->push_constant_size) {
- cl->state.pipeline_push_constant_stages = pipeline->push_constant_stages;
+ cl->state.pipeline_push_constant_stages = pipeline->push_constant_stages_mask;
#ifdef DEBUG_ENABLED
cl->validation.pipeline_push_constant_supplied = false;
#endif
@@ -8592,20 +8122,20 @@ void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) {
#endif
}
-void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) {
+void RenderingDeviceVulkan::compute_list_end(BitField<BarrierMask> p_post_barrier) {
ERR_FAIL_COND(!compute_list);
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
}
@@ -8673,43 +8203,45 @@ void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) {
_THREAD_SAFE_UNLOCK_
}
-void RenderingDeviceVulkan::barrier(uint32_t p_from, uint32_t p_to) {
+void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to) {
uint32_t src_barrier_flags = 0;
uint32_t src_access_flags = 0;
- if (p_from & BARRIER_MASK_COMPUTE) {
- src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (p_from & BARRIER_MASK_RASTER) {
- src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
- src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- }
- if (p_from & BARRIER_MASK_TRANSFER) {
- src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
if (p_from == 0) {
src_barrier_flags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ } else {
+ if (p_from.has_flag(BARRIER_MASK_COMPUTE)) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_from.has_flag(BARRIER_MASK_RASTER)) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ }
+ if (p_from.has_flag(BARRIER_MASK_TRANSFER)) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
}
uint32_t dst_barrier_flags = 0;
uint32_t dst_access_flags = 0;
- if (p_to & BARRIER_MASK_COMPUTE) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (p_to & BARRIER_MASK_RASTER) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
- dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
- }
- if (p_to & BARRIER_MASK_TRANSFER) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
- }
if (p_to == 0) {
dst_barrier_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ } else {
+ if (p_to.has_flag(BARRIER_MASK_COMPUTE)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_to.has_flag(BARRIER_MASK_RASTER)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (p_to.has_flag(BARRIER_MASK_TRANSFER)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ }
}
_memory_barrier(src_barrier_flags, dst_barrier_flags, src_access_flags, dst_access_flags, true);
@@ -9846,11 +9378,11 @@ bool RenderingDeviceVulkan::has_feature(const Features p_feature) const {
return multiview_capabilies.is_supported && multiview_capabilies.max_view_count > 1;
} break;
case SUPPORTS_FSR_HALF_FLOAT: {
- return context->get_shader_capabilities().shader_float16_is_supported && context->get_storage_buffer_capabilities().storage_buffer_16_bit_access_is_supported;
+ return context->get_shader_capabilities().shader_float16_is_supported && context->get_physical_device_features().shaderInt16 && context->get_storage_buffer_capabilities().storage_buffer_16_bit_access_is_supported;
} break;
case SUPPORTS_ATTACHMENT_VRS: {
VulkanContext::VRSCapabilities vrs_capabilities = context->get_vrs_capabilities();
- return vrs_capabilities.attachment_vrs_supported;
+ return vrs_capabilities.attachment_vrs_supported && context->get_physical_device_features().shaderStorageImageExtendedFormats;
} break;
default: {
return false;