diff options
author | Juan Linietsky <reduzio@gmail.com> | 2020-10-19 17:32:19 -0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-10-19 17:32:19 -0300 |
commit | c71e941ee8e98506572c5237f2d5e454504b125c (patch) | |
tree | 04e1ecda4f73e006d0747cb1273040e7ac6661c9 /drivers | |
parent | 7480a7d17848d7ab9749d6e5af257233588eaea3 (diff) |
Revert "Revert "Synchronization validation fix patch set 3 (inclusive of all previous patches)""
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/vulkan/rendering_device_vulkan.cpp | 215 | ||||
-rw-r--r-- | drivers/vulkan/rendering_device_vulkan.h | 1 |
2 files changed, 149 insertions, 67 deletions
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp index 9fe3f7e6c0..a01f63caae 100644 --- a/drivers/vulkan/rendering_device_vulkan.cpp +++ b/drivers/vulkan/rendering_device_vulkan.cpp @@ -38,7 +38,58 @@ #include "thirdparty/spirv-reflect/spirv_reflect.h" -#define FORCE_FULL_BARRIER +//#define FORCE_FULL_BARRIER + +// Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values) +RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &stage_mask, VkAccessFlags &access_mask) { + Buffer *buffer = nullptr; + if (vertex_buffer_owner.owns(p_buffer)) { + stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; + access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; + buffer = vertex_buffer_owner.getornull(p_buffer); + } else if (index_buffer_owner.owns(p_buffer)) { + stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; + access_mask |= VK_ACCESS_INDEX_READ_BIT; + buffer = index_buffer_owner.getornull(p_buffer); + } else if (uniform_buffer_owner.owns(p_buffer)) { + stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_mask |= VK_ACCESS_UNIFORM_READ_BIT; + buffer = uniform_buffer_owner.getornull(p_buffer); + } else if (texture_buffer_owner.owns(p_buffer)) { + stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_mask |= VK_ACCESS_SHADER_READ_BIT; + buffer = &texture_buffer_owner.getornull(p_buffer)->buffer; + } else if (storage_buffer_owner.owns(p_buffer)) { + stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + buffer = storage_buffer_owner.getornull(p_buffer); + } + return buffer; +} + +static void update_external_dependency_for_store(VkSubpassDependency &dependency, bool is_sampled, bool is_storage, bool is_depth) { + // Transitioning from write to read, protect the shaders that may use this next + // Allow for copies/image layout transitions + dependency.dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dependency.dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT; + + if (is_sampled) { + dependency.dstStageMask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + dependency.dstAccessMask |= VK_ACCESS_SHADER_READ_BIT; + } else if (is_storage) { + dependency.dstStageMask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + dependency.dstAccessMask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } else { + dependency.dstStageMask |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + } + + if (is_depth) { + // Depth resources have addtional stages that may be interested in them + dependency.dstStageMask |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + dependency.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + } +} void RenderingDeviceVulkan::_add_dependency(RID p_id, RID p_depends_on) { if (!dependency_map.has(p_depends_on)) { @@ -1932,7 +1983,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T image_memory_barrier.subresourceRange.baseArrayLayer = 0; image_memory_barrier.subresourceRange.layerCount = image_create_info.arrayLayers; - vkCmdPipelineBarrier(frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } RID id = texture_owner.make_rid(texture); @@ -2189,7 +2240,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con image_memory_barrier.subresourceRange.baseArrayLayer = p_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } uint32_t mipmap_offset = 0; @@ -2322,7 +2373,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con image_memory_barrier.subresourceRange.baseArrayLayer = p_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } return OK; @@ -2485,6 +2536,9 @@ Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + if (tex->usage_flags & TEXTURE_USAGE_STORAGE_BIT) { + image_memory_barrier.dstAccessMask |= VK_ACCESS_SHADER_WRITE_BIT; + } image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; image_memory_barrier.newLayout = tex->layout; image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; @@ -2496,7 +2550,7 @@ Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t image_memory_barrier.subresourceRange.baseArrayLayer = p_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } _flush(true); @@ -2672,7 +2726,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } { //make dst readable @@ -2694,7 +2748,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } } @@ -2755,7 +2809,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } { //Dest VkImageMemoryBarrier image_memory_barrier; @@ -2824,7 +2878,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } { //make dst readable @@ -2846,7 +2900,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID image_memory_barrier.subresourceRange.baseArrayLayer = dst_tex->base_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } } @@ -2878,16 +2932,22 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer; - VkImageLayout layout = src_tex->layout; + VkImageLayout clear_layout = (src_tex->layout == VK_IMAGE_LAYOUT_GENERAL) ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - if (src_tex->layout != VK_IMAGE_LAYOUT_GENERAL) { //storage may be in general state + // NOTE: Perhaps the valid stages/accesses for a given onwner should be a property of the owner. (Here and places like _get_buffer_from_owner) + const VkPipelineStageFlags valid_texture_stages = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + constexpr VkAccessFlags read_access = VK_ACCESS_SHADER_READ_BIT; + constexpr VkAccessFlags read_write_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + const VkAccessFlags valid_texture_access = (src_tex->usage_flags & TEXTURE_USAGE_STORAGE_BIT) ? read_write_access : read_access; + + { // Barrier from previous access with optional layout change (see clear_layout logic above) VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; - image_memory_barrier.srcAccessMask = 0; + image_memory_barrier.srcAccessMask = valid_texture_access; image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; image_memory_barrier.oldLayout = src_tex->layout; - image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + image_memory_barrier.oldLayout = clear_layout; image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; @@ -2898,8 +2958,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer + p_base_layer; image_memory_barrier.subresourceRange.layerCount = p_layers; - layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, valid_texture_stages, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } VkClearColorValue clear_color; @@ -2915,16 +2974,15 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, range.baseMipLevel = src_tex->base_mipmap + p_base_mipmap; range.levelCount = p_mipmaps; - vkCmdClearColorImage(command_buffer, src_tex->image, layout, &clear_color, 1, &range); - - if (src_tex->layout != VK_IMAGE_LAYOUT_GENERAL) { //storage may be in general state + vkCmdClearColorImage(command_buffer, src_tex->image, clear_layout, &clear_color, 1, &range); + { // Barrier to post clear accesses (changing back the layout if needed) VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; - image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + image_memory_barrier.dstAccessMask = valid_texture_access; + image_memory_barrier.oldLayout = clear_layout; image_memory_barrier.newLayout = src_tex->layout; image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; @@ -2936,7 +2994,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer + p_base_layer; image_memory_barrier.subresourceRange.layerCount = p_layers; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, valid_texture_stages, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } return OK; @@ -2991,6 +3049,19 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF Vector<VkAttachmentReference> depth_stencil_references; Vector<VkAttachmentReference> resolve_references; + // Set up a dependencies from/to external equivalent to the default (implicit) one, and then amend them + const VkPipelineStageFlags default_access_mask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; // From Section 7.1 of Vulkan API Spec v1.1.148 + + VkPipelineStageFlags reading_stages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT; + VkSubpassDependency dependencies[2] = { { VK_SUBPASS_EXTERNAL, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, default_access_mask, 0 }, + { 0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, default_access_mask, 0, 0 } }; + VkSubpassDependency &dependency_from_external = dependencies[0]; + VkSubpassDependency &dependency_to_external = dependencies[1]; + for (int i = 0; i < p_format.size(); i++) { ERR_FAIL_INDEX_V(p_format[i].format, DATA_FORMAT_MAX, VK_NULL_HANDLE); ERR_FAIL_INDEX_V(p_format[i].samples, TEXTURE_SAMPLES_MAX, VK_NULL_HANDLE); @@ -3006,11 +3077,16 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF bool is_sampled = p_format[i].usage_flags & TEXTURE_USAGE_SAMPLING_BIT; bool is_storage = p_format[i].usage_flags & TEXTURE_USAGE_STORAGE_BIT; + // For each UNDEFINED, assume the prior use was a *read*, as we'd be discarding the output of a write + // Also, each UNDEFINED will do an immediate layout transition (write), s.t. we must ensure execution syncronization vs. + // the read. If this is a performance issue, one could track the actual last accessor of each resource, adding only that + // stage switch (is_depth_stencil ? p_initial_depth_action : p_initial_color_action) { case INITIAL_ACTION_CLEAR: { description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + dependency_from_external.srcStageMask |= reading_stages; } break; case INITIAL_ACTION_KEEP: { if (p_format[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) { @@ -3021,10 +3097,12 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + dependency_from_external.srcStageMask |= reading_stages; } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + dependency_from_external.srcStageMask |= reading_stages; } } break; case INITIAL_ACTION_DROP: { @@ -3036,10 +3114,12 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + dependency_from_external.srcStageMask |= reading_stages; } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + dependency_from_external.srcStageMask |= reading_stages; } } break; case INITIAL_ACTION_CONTINUE: { @@ -3055,6 +3135,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + dependency_from_external.srcStageMask |= reading_stages; } } break; default: { @@ -3068,14 +3149,17 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF description.storeOp = VK_ATTACHMENT_STORE_OP_STORE; description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + update_external_dependency_for_store(dependency_to_external, is_sampled, is_storage, false); } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) { description.storeOp = VK_ATTACHMENT_STORE_OP_STORE; description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; description.finalLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); + update_external_dependency_for_store(dependency_to_external, is_sampled, is_storage, true); } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there + // TODO: What does this mean about the next usage (and thus appropriate dependency masks } } break; case FINAL_ACTION_DISCARD: { @@ -3128,9 +3212,24 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF } else if (p_format[i].usage_flags & TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT) { reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; resolve_references.push_back(reference); + // if resolves are done, we need to ensure the copy is safe + dependency_to_external.dstStageMask |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dependency_to_external.dstAccessMask |= VK_ACCESS_TRANSFER_READ_BIT; } else { ERR_FAIL_V_MSG(VK_NULL_HANDLE, "Texture index " + itos(i) + " is neither color, depth stencil or resolve so it can't be used as attachment."); } + + // NOTE: Big Mallet Approach -- any layout transition causes a full barrier + if (reference.layout != description.initialLayout) { + // NOTE: this should be smarter based on the textures knowledge of it's previous role + dependency_from_external.srcStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; + dependency_from_external.srcAccessMask |= VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT; + } + if (reference.layout != description.finalLayout) { + // NOTE: this should be smarter based on the textures knowledge of it's subsequent role + dependency_from_external.dstStageMask |= VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; + dependency_from_external.dstAccessMask |= VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT; + } } ERR_FAIL_COND_V_MSG(depth_stencil_references.size() > 1, VK_NULL_HANDLE, @@ -3159,8 +3258,8 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF render_pass_create_info.pAttachments = attachments.ptr(); render_pass_create_info.subpassCount = 1; render_pass_create_info.pSubpasses = &subpass; - render_pass_create_info.dependencyCount = 0; - render_pass_create_info.pDependencies = nullptr; + render_pass_create_info.dependencyCount = 2; + render_pass_create_info.pDependencies = dependencies; VkRenderPass render_pass; VkResult res = vkCreateRenderPass(device, &render_pass_create_info, nullptr, &render_pass); @@ -3185,7 +3284,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c } int color_references; - VkRenderPass render_pass = _render_pass_create(p_format, INITIAL_ACTION_CLEAR, FINAL_ACTION_DISCARD, INITIAL_ACTION_CLEAR, FINAL_ACTION_DISCARD, &color_references); //actions don't matter for this use case + VkRenderPass render_pass = _render_pass_create(p_format, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, INITIAL_ACTION_CLEAR, FINAL_ACTION_READ, &color_references); //actions don't matter for this use case if (render_pass == VK_NULL_HANDLE) { //was likely invalid return INVALID_ID; @@ -4886,43 +4985,24 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER, "Updating buffers in 'sync to draw' mode is forbidden during creation of a draw list"); - VkPipelineStageFlags dst_stage_mask; - VkAccessFlags dst_access; + // Protect subsequent updates... + VkPipelineStageFlags dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; + VkAccessFlags dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; - Buffer *buffer = nullptr; - if (vertex_buffer_owner.owns(p_buffer)) { - dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; - dst_access = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; - buffer = vertex_buffer_owner.getornull(p_buffer); - } else if (index_buffer_owner.owns(p_buffer)) { - dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; - dst_access = VK_ACCESS_INDEX_READ_BIT; - buffer = index_buffer_owner.getornull(p_buffer); - } else if (uniform_buffer_owner.owns(p_buffer)) { - dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - dst_access = VK_ACCESS_UNIFORM_READ_BIT; - buffer = uniform_buffer_owner.getornull(p_buffer); - } else if (texture_buffer_owner.owns(p_buffer)) { - dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - dst_access = VK_ACCESS_SHADER_READ_BIT; - buffer = &texture_buffer_owner.getornull(p_buffer)->buffer; - } else if (storage_buffer_owner.owns(p_buffer)) { - dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - dst_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; - buffer = storage_buffer_owner.getornull(p_buffer); - } else { + Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access); + if (!buffer) { ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type."); } ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER, "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end."); + _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, p_sync_with_draw); Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw); if (err) { return err; } - _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_sync_with_draw); #ifdef FORCE_FULL_BARRIER _full_barrier(p_sync_with_draw); #else @@ -4934,20 +5014,20 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { _THREAD_SAFE_METHOD_ - Buffer *buffer = nullptr; - if (vertex_buffer_owner.owns(p_buffer)) { - buffer = vertex_buffer_owner.getornull(p_buffer); - } else if (index_buffer_owner.owns(p_buffer)) { - buffer = index_buffer_owner.getornull(p_buffer); - } else if (texture_buffer_owner.owns(p_buffer)) { - buffer = &texture_buffer_owner.getornull(p_buffer)->buffer; - } else if (storage_buffer_owner.owns(p_buffer)) { - buffer = storage_buffer_owner.getornull(p_buffer); - } else { + // It could be this buffer was just created + VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; + VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; + // Get the vulkan buffer and the potential stage/access possible + Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask); + if (!buffer) { ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving."); } + // Make sure no one is using the buffer -- the "false" gets us to the same command buffer as below. + _buffer_memory_barrier(buffer->buffer, 0, buffer->size, src_stage_mask, src_access_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT, false); + VkCommandBuffer command_buffer = frames[frame].setup_command_buffer; + Buffer tmp_buffer; _buffer_allocate(&tmp_buffer, buffer->size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_CPU_ONLY); VkBufferCopy region; @@ -5614,7 +5694,7 @@ Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuff image_memory_barrier.subresourceRange.baseArrayLayer = texture->base_layer; image_memory_barrier.subresourceRange.layerCount = texture->layers; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); texture->layout = VK_IMAGE_LAYOUT_GENERAL; @@ -6352,7 +6432,7 @@ void RenderingDeviceVulkan::draw_list_end() { image_memory_barrier.subresourceRange.baseArrayLayer = texture->base_layer; image_memory_barrier.subresourceRange.layerCount = texture->layers; - vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); texture->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } @@ -6494,7 +6574,7 @@ void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, image_memory_barrier.subresourceRange.baseArrayLayer = textures_to_sampled[i]->base_layer; image_memory_barrier.subresourceRange.layerCount = textures_to_sampled[i]->layers; - vkCmdPipelineBarrier(cl->command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(cl->command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); textures_to_sampled[i]->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; @@ -6691,13 +6771,13 @@ void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) { void RenderingDeviceVulkan::compute_list_end() { ERR_FAIL_COND(!compute_list); - + const VkPipelineStageFlags dest_stage_mask = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT; for (Set<Texture *>::Element *E = compute_list->state.textures_to_sampled_layout.front(); E; E = E->next()) { VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; image_memory_barrier.oldLayout = E->get()->layout; image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; @@ -6710,7 +6790,8 @@ void RenderingDeviceVulkan::compute_list_end() { image_memory_barrier.subresourceRange.baseArrayLayer = E->get()->base_layer; image_memory_barrier.subresourceRange.layerCount = E->get()->layers; - vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + // TODO: Look at the usages in the compute list and determine tighter dst stage and access masks based on some "final" usage equivalent + vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, dest_stage_mask, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); E->get()->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } @@ -6720,7 +6801,7 @@ void RenderingDeviceVulkan::compute_list_end() { #ifdef FORCE_FULL_BARRIER _full_barrier(true); #else - _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true); + _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, dest_stage_mask, VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true); #endif } diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h index 6f8bbc9c64..e6cbf2e01d 100644 --- a/drivers/vulkan/rendering_device_vulkan.h +++ b/drivers/vulkan/rendering_device_vulkan.h @@ -793,6 +793,7 @@ class RenderingDeviceVulkan : public RenderingDevice { Error _draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass); Error _draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents, const Vector<RID> &p_storage_textures); _FORCE_INLINE_ DrawList *_get_draw_list_ptr(DrawListID p_id); + Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access); /**********************/ /**** COMPUTE LIST ****/ |