summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dummy/rasterizer_dummy.h2
-rw-r--r--drivers/unix/os_unix.cpp2
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp245
-rw-r--r--drivers/vulkan/rendering_device_vulkan.h1
4 files changed, 178 insertions, 72 deletions
diff --git a/drivers/dummy/rasterizer_dummy.h b/drivers/dummy/rasterizer_dummy.h
index 1df26756c2..bef4d999af 100644
--- a/drivers/dummy/rasterizer_dummy.h
+++ b/drivers/dummy/rasterizer_dummy.h
@@ -80,7 +80,7 @@ public:
void environment_set_canvas_max_layer(RID p_env, int p_max_layer) override {}
void environment_set_ambient_light(RID p_env, const Color &p_color, RS::EnvironmentAmbientSource p_ambient = RS::ENV_AMBIENT_SOURCE_BG, float p_energy = 1.0, float p_sky_contribution = 0.0, RS::EnvironmentReflectionSource p_reflection_source = RS::ENV_REFLECTION_SOURCE_BG, const Color &p_ao_color = Color()) override {}
- void environment_set_glow(RID p_env, bool p_enable, int p_level_flags, float p_intensity, float p_strength, float p_mix, float p_bloom_threshold, RS::EnvironmentGlowBlendMode p_blend_mode, float p_hdr_bleed_threshold, float p_hdr_bleed_scale, float p_hdr_luminance_cap) override {}
+ void environment_set_glow(RID p_env, bool p_enable, Vector<float> p_levels, float p_intensity, float p_strength, float p_mix, float p_bloom_threshold, RS::EnvironmentGlowBlendMode p_blend_mode, float p_hdr_bleed_threshold, float p_hdr_bleed_scale, float p_hdr_luminance_cap) override {}
void environment_glow_set_use_bicubic_upscale(bool p_enable) override {}
void environment_glow_set_use_high_quality(bool p_enable) override {}
diff --git a/drivers/unix/os_unix.cpp b/drivers/unix/os_unix.cpp
index 96c338f86b..eec6eb8303 100644
--- a/drivers/unix/os_unix.cpp
+++ b/drivers/unix/os_unix.cpp
@@ -331,7 +331,7 @@ Error OS_Unix::execute(const String &p_path, const List<String> &p_arguments, bo
int status;
waitpid(pid, &status, 0);
if (r_exitcode) {
- *r_exitcode = WEXITSTATUS(status);
+ *r_exitcode = WIFEXITED(status) ? WEXITSTATUS(status) : status;
}
} else {
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
index 9fe3f7e6c0..92d7906b6c 100644
--- a/drivers/vulkan/rendering_device_vulkan.cpp
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -38,7 +38,58 @@
#include "thirdparty/spirv-reflect/spirv_reflect.h"
-#define FORCE_FULL_BARRIER
+//#define FORCE_FULL_BARRIER
+
+// Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values)
+RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &stage_mask, VkAccessFlags &access_mask) {
+ Buffer *buffer = nullptr;
+ if (vertex_buffer_owner.owns(p_buffer)) {
+ stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ buffer = vertex_buffer_owner.getornull(p_buffer);
+ } else if (index_buffer_owner.owns(p_buffer)) {
+ stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ access_mask |= VK_ACCESS_INDEX_READ_BIT;
+ buffer = index_buffer_owner.getornull(p_buffer);
+ } else if (uniform_buffer_owner.owns(p_buffer)) {
+ stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_mask |= VK_ACCESS_UNIFORM_READ_BIT;
+ buffer = uniform_buffer_owner.getornull(p_buffer);
+ } else if (texture_buffer_owner.owns(p_buffer)) {
+ stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_mask |= VK_ACCESS_SHADER_READ_BIT;
+ buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
+ } else if (storage_buffer_owner.owns(p_buffer)) {
+ stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ buffer = storage_buffer_owner.getornull(p_buffer);
+ }
+ return buffer;
+}
+
+static void update_external_dependency_for_store(VkSubpassDependency &dependency, bool is_sampled, bool is_storage, bool is_depth) {
+ // Transitioning from write to read, protect the shaders that may use this next
+ // Allow for copies/image layout transitions
+ dependency.dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dependency.dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT;
+
+ if (is_sampled) {
+ dependency.dstStageMask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dependency.dstAccessMask |= VK_ACCESS_SHADER_READ_BIT;
+ } else if (is_storage) {
+ dependency.dstStageMask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dependency.dstAccessMask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ } else {
+ dependency.dstStageMask |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ dependency.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ }
+
+ if (is_depth) {
+ // Depth resources have addtional stages that may be interested in them
+ dependency.dstStageMask |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ dependency.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ }
+}
void RenderingDeviceVulkan::_add_dependency(RID p_id, RID p_depends_on) {
if (!dependency_map.has(p_depends_on)) {
@@ -1932,7 +1983,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
image_memory_barrier.subresourceRange.baseArrayLayer = 0;
image_memory_barrier.subresourceRange.layerCount = image_create_info.arrayLayers;
- vkCmdPipelineBarrier(frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
RID id = texture_owner.make_rid(texture);
@@ -2189,7 +2240,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
uint32_t mipmap_offset = 0;
@@ -2322,7 +2373,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
return OK;
@@ -2485,6 +2536,9 @@ Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ if (tex->usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
+ image_memory_barrier.dstAccessMask |= VK_ACCESS_SHADER_WRITE_BIT;
+ }
image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
image_memory_barrier.newLayout = tex->layout;
image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -2496,7 +2550,7 @@ Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t
image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
_flush(true);
@@ -2672,7 +2726,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
{ //make dst readable
@@ -2694,7 +2748,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
}
@@ -2755,7 +2809,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
{ //Dest
VkImageMemoryBarrier image_memory_barrier;
@@ -2824,7 +2878,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
{ //make dst readable
@@ -2846,7 +2900,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
image_memory_barrier.subresourceRange.baseArrayLayer = dst_tex->base_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
}
@@ -2878,16 +2932,22 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
- VkImageLayout layout = src_tex->layout;
+ VkImageLayout clear_layout = (src_tex->layout == VK_IMAGE_LAYOUT_GENERAL) ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
- if (src_tex->layout != VK_IMAGE_LAYOUT_GENERAL) { //storage may be in general state
+ // NOTE: Perhaps the valid stages/accesses for a given onwner should be a property of the owner. (Here and places like _get_buffer_from_owner)
+ const VkPipelineStageFlags valid_texture_stages = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ constexpr VkAccessFlags read_access = VK_ACCESS_SHADER_READ_BIT;
+ constexpr VkAccessFlags read_write_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ const VkAccessFlags valid_texture_access = (src_tex->usage_flags & TEXTURE_USAGE_STORAGE_BIT) ? read_write_access : read_access;
+
+ { // Barrier from previous access with optional layout change (see clear_layout logic above)
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
- image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.srcAccessMask = valid_texture_access;
image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
image_memory_barrier.oldLayout = src_tex->layout;
- image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ image_memory_barrier.oldLayout = clear_layout;
image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -2898,8 +2958,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer + p_base_layer;
image_memory_barrier.subresourceRange.layerCount = p_layers;
- layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, valid_texture_stages, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
VkClearColorValue clear_color;
@@ -2915,16 +2974,15 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
range.baseMipLevel = src_tex->base_mipmap + p_base_mipmap;
range.levelCount = p_mipmaps;
- vkCmdClearColorImage(command_buffer, src_tex->image, layout, &clear_color, 1, &range);
-
- if (src_tex->layout != VK_IMAGE_LAYOUT_GENERAL) { //storage may be in general state
+ vkCmdClearColorImage(command_buffer, src_tex->image, clear_layout, &clear_color, 1, &range);
+ { // Barrier to post clear accesses (changing back the layout if needed)
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
- image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+ image_memory_barrier.dstAccessMask = valid_texture_access;
+ image_memory_barrier.oldLayout = clear_layout;
image_memory_barrier.newLayout = src_tex->layout;
image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -2936,7 +2994,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer + p_base_layer;
image_memory_barrier.subresourceRange.layerCount = p_layers;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, valid_texture_stages, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
return OK;
@@ -2991,6 +3049,19 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
Vector<VkAttachmentReference> depth_stencil_references;
Vector<VkAttachmentReference> resolve_references;
+ // Set up a dependencies from/to external equivalent to the default (implicit) one, and then amend them
+ const VkPipelineStageFlags default_access_mask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; // From Section 7.1 of Vulkan API Spec v1.1.148
+
+ VkPipelineStageFlags reading_stages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkSubpassDependency dependencies[2] = { { VK_SUBPASS_EXTERNAL, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, default_access_mask, 0 },
+ { 0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, default_access_mask, 0, 0 } };
+ VkSubpassDependency &dependency_from_external = dependencies[0];
+ VkSubpassDependency &dependency_to_external = dependencies[1];
+
for (int i = 0; i < p_format.size(); i++) {
ERR_FAIL_INDEX_V(p_format[i].format, DATA_FORMAT_MAX, VK_NULL_HANDLE);
ERR_FAIL_INDEX_V(p_format[i].samples, TEXTURE_SAMPLES_MAX, VK_NULL_HANDLE);
@@ -3006,11 +3077,16 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
bool is_sampled = p_format[i].usage_flags & TEXTURE_USAGE_SAMPLING_BIT;
bool is_storage = p_format[i].usage_flags & TEXTURE_USAGE_STORAGE_BIT;
+ // For each UNDEFINED, assume the prior use was a *read*, as we'd be discarding the output of a write
+ // Also, each UNDEFINED will do an immediate layout transition (write), s.t. we must ensure execution syncronization vs.
+ // the read. If this is a performance issue, one could track the actual last accessor of each resource, adding only that
+ // stage
switch (is_depth_stencil ? p_initial_depth_action : p_initial_color_action) {
case INITIAL_ACTION_CLEAR: {
description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ dependency_from_external.srcStageMask |= reading_stages;
} break;
case INITIAL_ACTION_KEEP: {
if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
@@ -3021,10 +3097,12 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ dependency_from_external.srcStageMask |= reading_stages;
} else {
description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ dependency_from_external.srcStageMask |= reading_stages;
}
} break;
case INITIAL_ACTION_DROP: {
@@ -3036,10 +3114,12 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ dependency_from_external.srcStageMask |= reading_stages;
} else {
description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ dependency_from_external.srcStageMask |= reading_stages;
}
} break;
case INITIAL_ACTION_CONTINUE: {
@@ -3055,6 +3135,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ dependency_from_external.srcStageMask |= reading_stages;
}
} break;
default: {
@@ -3068,14 +3149,17 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ update_external_dependency_for_store(dependency_to_external, is_sampled, is_storage, false);
} else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ update_external_dependency_for_store(dependency_to_external, is_sampled, is_storage, true);
} else {
description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
+ // TODO: What does this mean about the next usage (and thus appropriate dependency masks
}
} break;
case FINAL_ACTION_DISCARD: {
@@ -3128,9 +3212,24 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
} else if (p_format[i].usage_flags & TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT) {
reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
resolve_references.push_back(reference);
+ // if resolves are done, we need to ensure the copy is safe
+ dependency_to_external.dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dependency_to_external.dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT;
} else {
ERR_FAIL_V_MSG(VK_NULL_HANDLE, "Texture index " + itos(i) + " is neither color, depth stencil or resolve so it can't be used as attachment.");
}
+
+ // NOTE: Big Mallet Approach -- any layout transition causes a full barrier
+ if (reference.layout != description.initialLayout) {
+ // NOTE: this should be smarter based on the textures knowledge of it's previous role
+ dependency_from_external.srcStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ dependency_from_external.srcAccessMask |= VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
+ }
+ if (reference.layout != description.finalLayout) {
+ // NOTE: this should be smarter based on the textures knowledge of it's subsequent role
+ dependency_from_external.dstStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ dependency_from_external.dstAccessMask |= VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
+ }
}
ERR_FAIL_COND_V_MSG(depth_stencil_references.size() > 1, VK_NULL_HANDLE,
@@ -3159,8 +3258,8 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
render_pass_create_info.pAttachments = attachments.ptr();
render_pass_create_info.subpassCount = 1;
render_pass_create_info.pSubpasses = &subpass;
- render_pass_create_info.dependencyCount = 0;
- render_pass_create_info.pDependencies = nullptr;
+ render_pass_create_info.dependencyCount = 0; //2 - throws validation layer error
+ render_pass_create_info.pDependencies = dependencies;
VkRenderPass render_pass;
VkResult res = vkCreateRenderPass(device, &render_pass_create_info, nullptr, &render_pass);
@@ -3185,7 +3284,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c
}
int color_references;
- VkRenderPass render_pass = _render_pass_create(p_format, INITIAL_ACTION_CLEAR, FINAL_ACTION_DISCARD, INITIAL_ACTION_CLEAR, FINAL_ACTION_DISCARD, &color_references); //actions don't matter for this use case
+ VkRenderPass render_pass = _render_pass_create(p_format, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, &color_references); //actions don't matter for this use case
if (render_pass == VK_NULL_HANDLE) { //was likely invalid
return INVALID_ID;
@@ -3389,6 +3488,10 @@ RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vec
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
+ ERR_FAIL_COND_V_MSG(draw_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
Buffer buffer;
_buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
@@ -3512,6 +3615,10 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo
RID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices) {
_THREAD_SAFE_METHOD_
+ ERR_FAIL_COND_V_MSG(draw_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
ERR_FAIL_COND_V(p_index_count == 0, RID());
@@ -4205,8 +4312,10 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
}
pipeline_layout_create_info.pSetLayouts = layouts.ptr();
+ // Needs to be declared in this outer scope, otherwise it may not outlive its assignment
+ // to pipeline_layout_create_info.
+ VkPushConstantRange push_constant_range;
if (push_constant.push_constant_size) {
- VkPushConstantRange push_constant_range;
push_constant_range.stageFlags = push_constant.push_constants_vk_stage;
push_constant_range.offset = 0;
push_constant_range.size = push_constant.push_constant_size;
@@ -4258,6 +4367,10 @@ RID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const Ve
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
+ ERR_FAIL_COND_V_MSG(draw_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
Buffer buffer;
Error err = _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
@@ -4273,6 +4386,10 @@ RID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const Ve
RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, uint32_t p_usage) {
_THREAD_SAFE_METHOD_
+ ERR_FAIL_COND_V_MSG(draw_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
@@ -4296,6 +4413,10 @@ RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Ve
RID RenderingDeviceVulkan::texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data) {
_THREAD_SAFE_METHOD_
+ ERR_FAIL_COND_V_MSG(draw_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list != nullptr && p_data.size(), RID(),
+ "Creating buffers with data is forbidden during creation of a draw list");
uint32_t element_size = get_format_vertex_size(p_format);
ERR_FAIL_COND_V_MSG(element_size == 0, RID(), "Format requested is not supported for texture buffers");
@@ -4491,12 +4612,12 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
}
}
ERR_FAIL_COND_V_MSG(uniform_idx == -1, RID(),
- "All the shader bindings for the given set must be covered by the uniforms provided. Binding (" + itos(set_uniform.binding) + ") was not provided.");
+ "All the shader bindings for the given set must be covered by the uniforms provided. Binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + ") was not provided.");
const Uniform &uniform = uniforms[uniform_idx];
ERR_FAIL_COND_V_MSG(uniform.type != set_uniform.type, RID(),
- "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.type] + "'.");
+ "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.type] + "'.");
VkWriteDescriptorSet write; //common header
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
@@ -4885,44 +5006,27 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER,
"Updating buffers in 'sync to draw' mode is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list && p_sync_with_draw, ERR_INVALID_PARAMETER,
+ "Updating buffers in 'sync to draw' mode is forbidden during creation of a compute list");
- VkPipelineStageFlags dst_stage_mask;
- VkAccessFlags dst_access;
+ // Protect subsequent updates...
+ VkPipelineStageFlags dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkAccessFlags dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
- Buffer *buffer = nullptr;
- if (vertex_buffer_owner.owns(p_buffer)) {
- dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
- dst_access = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
- buffer = vertex_buffer_owner.getornull(p_buffer);
- } else if (index_buffer_owner.owns(p_buffer)) {
- dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
- dst_access = VK_ACCESS_INDEX_READ_BIT;
- buffer = index_buffer_owner.getornull(p_buffer);
- } else if (uniform_buffer_owner.owns(p_buffer)) {
- dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- dst_access = VK_ACCESS_UNIFORM_READ_BIT;
- buffer = uniform_buffer_owner.getornull(p_buffer);
- } else if (texture_buffer_owner.owns(p_buffer)) {
- dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- dst_access = VK_ACCESS_SHADER_READ_BIT;
- buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
- } else if (storage_buffer_owner.owns(p_buffer)) {
- dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- dst_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- buffer = storage_buffer_owner.getornull(p_buffer);
- } else {
+ Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access);
+ if (!buffer) {
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
}
ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
"Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
+ _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, p_sync_with_draw);
Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw);
if (err) {
return err;
}
- _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_sync_with_draw);
#ifdef FORCE_FULL_BARRIER
_full_barrier(p_sync_with_draw);
#else
@@ -4934,20 +5038,20 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
_THREAD_SAFE_METHOD_
- Buffer *buffer = nullptr;
- if (vertex_buffer_owner.owns(p_buffer)) {
- buffer = vertex_buffer_owner.getornull(p_buffer);
- } else if (index_buffer_owner.owns(p_buffer)) {
- buffer = index_buffer_owner.getornull(p_buffer);
- } else if (texture_buffer_owner.owns(p_buffer)) {
- buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
- } else if (storage_buffer_owner.owns(p_buffer)) {
- buffer = storage_buffer_owner.getornull(p_buffer);
- } else {
+ // It could be this buffer was just created
+ VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ // Get the vulkan buffer and the potential stage/access possible
+ Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask);
+ if (!buffer) {
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving.");
}
+ // Make sure no one is using the buffer -- the "false" gets us to the same command buffer as below.
+ _buffer_memory_barrier(buffer->buffer, 0, buffer->size, src_stage_mask, src_access_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT, false);
+
VkCommandBuffer command_buffer = frames[frame].setup_command_buffer;
+
Buffer tmp_buffer;
_buffer_allocate(&tmp_buffer, buffer->size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_CPU_ONLY);
VkBufferCopy region;
@@ -5614,7 +5718,7 @@ Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuff
image_memory_barrier.subresourceRange.baseArrayLayer = texture->base_layer;
image_memory_barrier.subresourceRange.layerCount = texture->layers;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
texture->layout = VK_IMAGE_LAYOUT_GENERAL;
@@ -6352,7 +6456,7 @@ void RenderingDeviceVulkan::draw_list_end() {
image_memory_barrier.subresourceRange.baseArrayLayer = texture->base_layer;
image_memory_barrier.subresourceRange.layerCount = texture->layers;
- vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
texture->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
@@ -6494,7 +6598,7 @@ void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list,
image_memory_barrier.subresourceRange.baseArrayLayer = textures_to_sampled[i]->base_layer;
image_memory_barrier.subresourceRange.layerCount = textures_to_sampled[i]->layers;
- vkCmdPipelineBarrier(cl->command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(cl->command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
textures_to_sampled[i]->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -6691,13 +6795,13 @@ void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) {
void RenderingDeviceVulkan::compute_list_end() {
ERR_FAIL_COND(!compute_list);
-
+ const VkPipelineStageFlags dest_stage_mask = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
for (Set<Texture *>::Element *E = compute_list->state.textures_to_sampled_layout.front(); E; E = E->next()) {
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
image_memory_barrier.oldLayout = E->get()->layout;
image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -6710,7 +6814,8 @@ void RenderingDeviceVulkan::compute_list_end() {
image_memory_barrier.subresourceRange.baseArrayLayer = E->get()->base_layer;
image_memory_barrier.subresourceRange.layerCount = E->get()->layers;
- vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ // TODO: Look at the usages in the compute list and determine tighter dst stage and access masks based on some "final" usage equivalent
+ vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, dest_stage_mask, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
E->get()->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
@@ -6720,7 +6825,7 @@ void RenderingDeviceVulkan::compute_list_end() {
#ifdef FORCE_FULL_BARRIER
_full_barrier(true);
#else
- _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true);
+ _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, dest_stage_mask, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true);
#endif
}
diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h
index 6f8bbc9c64..e6cbf2e01d 100644
--- a/drivers/vulkan/rendering_device_vulkan.h
+++ b/drivers/vulkan/rendering_device_vulkan.h
@@ -793,6 +793,7 @@ class RenderingDeviceVulkan : public RenderingDevice {
Error _draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass);
Error _draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents, const Vector<RID> &p_storage_textures);
_FORCE_INLINE_ DrawList *_get_draw_list_ptr(DrawListID p_id);
+ Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access);
/**********************/
/**** COMPUTE LIST ****/