diff options
Diffstat (limited to 'drivers/vulkan')
-rw-r--r-- | drivers/vulkan/SCsub | 29 | ||||
-rw-r--r-- | drivers/vulkan/rendering_device_vulkan.cpp | 619 | ||||
-rw-r--r-- | drivers/vulkan/rendering_device_vulkan.h | 284 | ||||
-rw-r--r-- | drivers/vulkan/vulkan_context.cpp | 103 | ||||
-rw-r--r-- | drivers/vulkan/vulkan_context.h | 51 |
5 files changed, 770 insertions, 316 deletions
diff --git a/drivers/vulkan/SCsub b/drivers/vulkan/SCsub index 13fcaf16d2..14b9d63204 100644 --- a/drivers/vulkan/SCsub +++ b/drivers/vulkan/SCsub @@ -2,7 +2,7 @@ Import("env") -env.add_source_files(env.drivers_sources, "*.cpp") +thirdparty_obj = [] # FIXME: Refactor all this to reduce code duplication. if env["platform"] == "android": @@ -22,7 +22,8 @@ if env["platform"] == "android": thirdparty_dir = "#thirdparty/vulkan" vma_sources = [thirdparty_dir + "/android/vk_mem_alloc.cpp"] - env_thirdparty.add_source_files(env.drivers_sources, vma_sources) + env_thirdparty.add_source_files(thirdparty_obj, vma_sources) + elif env["platform"] == "iphone": # Use bundled Vulkan headers thirdparty_dir = "#thirdparty/vulkan" @@ -33,7 +34,8 @@ elif env["platform"] == "iphone": env_thirdparty.disable_warnings() vma_sources = [thirdparty_dir + "/vk_mem_alloc.cpp"] - env_thirdparty.add_source_files(env.drivers_sources, vma_sources) + env_thirdparty.add_source_files(thirdparty_obj, vma_sources) + elif env["builtin_vulkan"]: # Use bundled Vulkan headers thirdparty_dir = "#thirdparty/vulkan" @@ -98,8 +100,9 @@ elif env["builtin_vulkan"]: env_thirdparty.AppendUnique(CPPDEFINES=["HAVE_SECURE_GETENV"]) loader_sources = [thirdparty_dir + "/loader/" + file for file in loader_sources] - env_thirdparty.add_source_files(env.drivers_sources, loader_sources) - env_thirdparty.add_source_files(env.drivers_sources, vma_sources) + env_thirdparty.add_source_files(thirdparty_obj, loader_sources) + env_thirdparty.add_source_files(thirdparty_obj, vma_sources) + else: # Always build VMA. thirdparty_dir = "#thirdparty/vulkan" env.Prepend(CPPPATH=[thirdparty_dir]) @@ -109,4 +112,18 @@ else: # Always build VMA. env_thirdparty.disable_warnings() vma_sources = [thirdparty_dir + "/vk_mem_alloc.cpp"] - env_thirdparty.add_source_files(env.drivers_sources, vma_sources) + env_thirdparty.add_source_files(thirdparty_obj, vma_sources) + + +env.drivers_sources += thirdparty_obj + + +# Godot source files + +driver_obj = [] + +env.add_source_files(driver_obj, "*.cpp") +env.drivers_sources += driver_obj + +# Needed to force rebuilding the driver files when the thirdparty code is updated. +env.Depends(driver_obj, thirdparty_obj) diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp index a356586698..ef331ec4b6 100644 --- a/drivers/vulkan/rendering_device_vulkan.cpp +++ b/drivers/vulkan/rendering_device_vulkan.cpp @@ -5,8 +5,8 @@ /* GODOT ENGINE */ /* https://godotengine.org */ /*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */ /* */ /* Permission is hereby granted, free of charge, to any person obtaining */ /* a copy of this software and associated documentation files (the */ @@ -30,10 +30,10 @@ #include "rendering_device_vulkan.h" -#include "core/hashfuncs.h" +#include "core/config/project_settings.h" #include "core/os/file_access.h" #include "core/os/os.h" -#include "core/project_settings.h" +#include "core/templates/hashfuncs.h" #include "drivers/vulkan/vulkan_context.h" #include "thirdparty/spirv-reflect/spirv_reflect.h" @@ -41,28 +41,60 @@ //#define FORCE_FULL_BARRIER // Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values) -RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &stage_mask, VkAccessFlags &access_mask) { +RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, uint32_t p_post_barrier) { Buffer *buffer = nullptr; if (vertex_buffer_owner.owns(p_buffer)) { - stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; - access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; buffer = vertex_buffer_owner.getornull(p_buffer); + + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; + r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; + if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) { + if (p_post_barrier & BARRIER_MASK_RASTER) { + r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + } + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + } + } } else if (index_buffer_owner.owns(p_buffer)) { - stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; - access_mask |= VK_ACCESS_INDEX_READ_BIT; + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; + r_access_mask |= VK_ACCESS_INDEX_READ_BIT; buffer = index_buffer_owner.getornull(p_buffer); } else if (uniform_buffer_owner.owns(p_buffer)) { - stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - access_mask |= VK_ACCESS_UNIFORM_READ_BIT; + if (p_post_barrier & BARRIER_MASK_RASTER) { + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + } + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + } + r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT; buffer = uniform_buffer_owner.getornull(p_buffer); } else if (texture_buffer_owner.owns(p_buffer)) { - stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - access_mask |= VK_ACCESS_SHADER_READ_BIT; + if (p_post_barrier & BARRIER_MASK_RASTER) { + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + } + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + } + r_access_mask |= VK_ACCESS_SHADER_READ_BIT; buffer = &texture_buffer_owner.getornull(p_buffer)->buffer; } else if (storage_buffer_owner.owns(p_buffer)) { - stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; buffer = storage_buffer_owner.getornull(p_buffer); + if (p_post_barrier & BARRIER_MASK_RASTER) { + r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + + if (buffer->usage & VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT) { + r_stage_mask |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + r_access_mask |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } } return buffer; } @@ -615,6 +647,7 @@ int RenderingDeviceVulkan::get_format_vertex_size(DataFormat p_format) { case DATA_FORMAT_B8G8R8A8_SNORM: case DATA_FORMAT_B8G8R8A8_UINT: case DATA_FORMAT_B8G8R8A8_SINT: + case DATA_FORMAT_A2B10G10R10_UNORM_PACK32: return 4; case DATA_FORMAT_R16_UNORM: case DATA_FORMAT_R16_SNORM: @@ -1361,6 +1394,7 @@ Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, p_buffer->buffer_info.buffer = p_buffer->buffer; p_buffer->buffer_info.offset = 0; p_buffer->buffer_info.range = p_size; + p_buffer->usage = p_usage; return OK; } @@ -1691,16 +1725,16 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T #endif } - if (p_format.type == TEXTURE_TYPE_CUBE || p_format.type == TEXTURE_TYPE_CUBE_ARRAY) { + if (p_format.texture_type == TEXTURE_TYPE_CUBE || p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY) { image_create_info.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; } /*if (p_format.type == TEXTURE_TYPE_2D || p_format.type == TEXTURE_TYPE_2D_ARRAY) { image_create_info.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT; }*/ - ERR_FAIL_INDEX_V(p_format.type, TEXTURE_TYPE_MAX, RID()); + ERR_FAIL_INDEX_V(p_format.texture_type, TEXTURE_TYPE_MAX, RID()); - image_create_info.imageType = vulkan_image_type[p_format.type]; + image_create_info.imageType = vulkan_image_type[p_format.texture_type]; ERR_FAIL_COND_V_MSG(p_format.width < 1, RID(), "Width must be equal or greater than 1 for all textures"); @@ -1725,10 +1759,10 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T image_create_info.mipLevels = p_format.mipmaps; - if (p_format.type == TEXTURE_TYPE_1D_ARRAY || p_format.type == TEXTURE_TYPE_2D_ARRAY || p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) { + if (p_format.texture_type == TEXTURE_TYPE_1D_ARRAY || p_format.texture_type == TEXTURE_TYPE_2D_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE) { ERR_FAIL_COND_V_MSG(p_format.array_layers < 1, RID(), "Amount of layers must be equal or greater than 1 for arrays and cubemaps."); - ERR_FAIL_COND_V_MSG((p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) && (p_format.array_layers % 6) != 0, RID(), + ERR_FAIL_COND_V_MSG((p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE) && (p_format.array_layers % 6) != 0, RID(), "Cubemap and cubemap array textures must provide a layer number that is multiple of 6"); image_create_info.arrayLayers = p_format.array_layers; } else { @@ -1858,7 +1892,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T VkResult err = vmaCreateImage(allocator, &image_create_info, &allocInfo, &texture.image, &texture.allocation, &texture.allocation_info); ERR_FAIL_COND_V_MSG(err, RID(), "vmaCreateImage failed with error " + itos(err) + "."); - texture.type = p_format.type; + texture.type = p_format.texture_type; texture.format = p_format.format; texture.width = image_create_info.extent.width; texture.height = image_create_info.extent.height; @@ -1926,7 +1960,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, }; - image_view_create_info.viewType = view_types[p_format.type]; + image_view_create_info.viewType = view_types[p_format.texture_type]; if (p_view.format_override == DATA_FORMAT_MAX) { image_view_create_info.format = image_create_info.format; } else { @@ -2065,6 +2099,48 @@ RID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, RID image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; } + VkImageViewUsageCreateInfo usage_info; + usage_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO; + usage_info.pNext = nullptr; + if (p_view.format_override != DATA_FORMAT_MAX) { + //need to validate usage with vulkan + + usage_info.usage = 0; + + if (texture.usage_flags & TEXTURE_USAGE_SAMPLING_BIT) { + usage_info.usage |= VK_IMAGE_USAGE_SAMPLED_BIT; + } + + if (texture.usage_flags & TEXTURE_USAGE_STORAGE_BIT) { + if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_STORAGE_BIT)) { + usage_info.usage |= VK_IMAGE_USAGE_STORAGE_BIT; + } + } + + if (texture.usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) { + if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) { + usage_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + } + } + + if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) { + usage_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; + } + + if (texture.usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT) { + usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; + } + if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT) { + usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + } + + if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT) { + usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; + } + + image_view_create_info.pNext = &usage_info; + } + VkResult err = vkCreateImageView(device, &image_view_create_info, nullptr, &texture.view); ERR_FAIL_COND_V_MSG(err, RID(), "vkCreateImageView failed with error " + itos(err) + "."); @@ -2093,15 +2169,26 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_3D && src_texture->type != TEXTURE_TYPE_3D, RID(), "Can only create a 3D slice from a 3D texture"); + ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_2D_ARRAY && (src_texture->type != TEXTURE_TYPE_2D_ARRAY), RID(), + "Can only create an array slice from a 2D array mipmap"); + //create view ERR_FAIL_UNSIGNED_INDEX_V(p_mipmap, src_texture->mipmaps, RID()); ERR_FAIL_UNSIGNED_INDEX_V(p_layer, src_texture->layers, RID()); + int slice_layers = 1; + if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) { + ERR_FAIL_COND_V_MSG(p_layer != 0, RID(), "layer must be 0 when obtaining a 2D array mipmap slice"); + slice_layers = src_texture->layers; + } else if (p_slice_type == TEXTURE_SLICE_CUBEMAP) { + slice_layers = 6; + } + Texture texture = *src_texture; get_image_format_required_size(texture.format, texture.width, texture.height, texture.depth, p_mipmap + 1, &texture.width, &texture.height); texture.mipmaps = 1; - texture.layers = p_slice_type == TEXTURE_SLICE_CUBEMAP ? 6 : 1; + texture.layers = slice_layers; texture.base_mipmap = p_mipmap; texture.base_layer = p_layer; @@ -2121,7 +2208,16 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p VK_IMAGE_VIEW_TYPE_2D, }; - image_view_create_info.viewType = p_slice_type == TEXTURE_SLICE_CUBEMAP ? VK_IMAGE_VIEW_TYPE_CUBE : (p_slice_type == TEXTURE_SLICE_3D ? VK_IMAGE_VIEW_TYPE_3D : view_types[texture.type]); + image_view_create_info.viewType = view_types[texture.type]; + + if (p_slice_type == TEXTURE_SLICE_CUBEMAP) { + image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_CUBE; + } else if (p_slice_type == TEXTURE_SLICE_3D) { + image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_3D; + } else if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) { + image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY; + } + if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) { image_view_create_info.format = vulkan_formats[texture.format]; } else { @@ -2155,7 +2251,7 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p } image_view_create_info.subresourceRange.baseMipLevel = p_mipmap; image_view_create_info.subresourceRange.levelCount = 1; - image_view_create_info.subresourceRange.layerCount = p_slice_type == TEXTURE_SLICE_CUBEMAP ? 6 : 1; + image_view_create_info.subresourceRange.layerCount = slice_layers; image_view_create_info.subresourceRange.baseArrayLayer = p_layer; if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) { @@ -2174,11 +2270,11 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p return id; } -Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, bool p_sync_with_draw) { +Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier) { _THREAD_SAFE_METHOD_ - ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER, - "Updating textures in 'sync to draw' mode is forbidden during creation of a draw list"); + ERR_FAIL_COND_V_MSG(draw_list || compute_list, ERR_INVALID_PARAMETER, + "Updating textures in is forbidden during creation of a draw or compute list"); Texture *texture = texture_owner.getornull(p_texture); ERR_FAIL_COND_V(!texture, ERR_INVALID_PARAMETER); @@ -2219,7 +2315,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con const uint8_t *r = p_data.ptr(); - VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer; + VkCommandBuffer command_buffer = p_post_barrier ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer; //barrier to transfer { @@ -2244,6 +2340,10 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con } uint32_t mipmap_offset = 0; + + uint32_t logic_width = texture->width; + uint32_t logic_height = texture->height; + for (uint32_t mm_i = 0; mm_i < texture->mipmaps; mm_i++) { uint32_t depth; uint32_t image_total = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, mm_i + 1, &width, &height, &depth); @@ -2260,12 +2360,15 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con uint32_t region_w = MIN(region_size, width - x); uint32_t region_h = MIN(region_size, height - y); + uint32_t region_logic_w = MIN(region_size, logic_width - x); + uint32_t region_logic_h = MIN(region_size, logic_height - y); + uint32_t pixel_size = get_image_format_pixel_size(texture->format); uint32_t to_allocate = region_w * region_h * pixel_size; to_allocate >>= get_compressed_image_format_pixel_rshift(texture->format); uint32_t alloc_offset, alloc_size; - Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, false, p_sync_with_draw); + Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, false, p_post_barrier); ERR_FAIL_COND_V(err, ERR_CANT_CREATE); uint8_t *write_ptr; @@ -2341,8 +2444,8 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con buffer_image_copy.imageOffset.y = y; buffer_image_copy.imageOffset.z = z; - buffer_image_copy.imageExtent.width = region_w; - buffer_image_copy.imageExtent.height = region_h; + buffer_image_copy.imageExtent.width = region_logic_w; + buffer_image_copy.imageExtent.height = region_logic_h; buffer_image_copy.imageExtent.depth = 1; vkCmdCopyBufferToImage(command_buffer, staging_buffer_blocks[staging_buffer_current].buffer, texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &buffer_image_copy); @@ -2353,15 +2456,32 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con } mipmap_offset = image_total; + logic_width = MAX(1, logic_width >> 1); + logic_height = MAX(1, logic_height >> 1); } //barrier to restore layout { + uint32_t barrier_flags = 0; + uint32_t access_flags = 0; + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT; + } + if (p_post_barrier & BARRIER_MASK_RASTER) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT; + } + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; + } + VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; image_memory_barrier.newLayout = texture->layout; image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; @@ -2373,7 +2493,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con image_memory_barrier.subresourceRange.baseArrayLayer = p_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } return OK; @@ -2586,13 +2706,13 @@ bool RenderingDeviceVulkan::texture_is_valid(RID p_texture) { return texture_owner.owns(p_texture); } -Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, bool p_sync_with_draw) { +Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier) { _THREAD_SAFE_METHOD_ Texture *src_tex = texture_owner.getornull(p_from_texture); ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER); - ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER, + ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER, "Source texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture."); ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER, "Source texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved."); @@ -2613,7 +2733,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, Texture *dst_tex = texture_owner.getornull(p_to_texture); ERR_FAIL_COND_V(!dst_tex, ERR_INVALID_PARAMETER); - ERR_FAIL_COND_V_MSG(p_sync_with_draw && dst_tex->bound, ERR_INVALID_PARAMETER, + ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER, "Destination texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture."); ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER, "Destination texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be retrieved."); @@ -2634,7 +2754,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, ERR_FAIL_COND_V_MSG(src_tex->read_aspect_mask != dst_tex->read_aspect_mask, ERR_INVALID_PARAMETER, "Source and destination texture must be of the same type (color or depth)."); - VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer; + VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; { //PRE Copy the image @@ -2709,12 +2829,27 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, // RESTORE LAYOUT for SRC and DST + uint32_t barrier_flags = 0; + uint32_t access_flags = 0; + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_RASTER) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; + } + { //restore src VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; image_memory_barrier.newLayout = src_tex->layout; image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; @@ -2726,7 +2861,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } { //make dst readable @@ -2735,7 +2870,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; image_memory_barrier.newLayout = dst_tex->layout; @@ -2748,20 +2883,20 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } } return OK; } -Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, bool p_sync_with_draw) { +Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier) { _THREAD_SAFE_METHOD_ Texture *src_tex = texture_owner.getornull(p_from_texture); ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER); - ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER, + ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER, "Source texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture."); ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER, "Source texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved."); @@ -2772,7 +2907,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID Texture *dst_tex = texture_owner.getornull(p_to_texture); ERR_FAIL_COND_V(!dst_tex, ERR_INVALID_PARAMETER); - ERR_FAIL_COND_V_MSG(p_sync_with_draw && dst_tex->bound, ERR_INVALID_PARAMETER, + ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER, "Destination texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture."); ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER, "Destination texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be retrieved."); @@ -2786,7 +2921,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID ERR_FAIL_COND_V_MSG(src_tex->read_aspect_mask != dst_tex->read_aspect_mask, ERR_INVALID_PARAMETER, "Source and destination texture must be of the same type (color or depth)."); - VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer; + VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; { //PRE Copy the image @@ -2861,12 +2996,27 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID // RESTORE LAYOUT for SRC and DST + uint32_t barrier_flags = 0; + uint32_t access_flags = 0; + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_RASTER) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; + } + { //restore src VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; image_memory_barrier.newLayout = src_tex->layout; image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; @@ -2878,7 +3028,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } { //make dst readable @@ -2887,7 +3037,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; image_memory_barrier.newLayout = dst_tex->layout; @@ -2900,20 +3050,20 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID image_memory_barrier.subresourceRange.baseArrayLayer = dst_tex->base_layer; image_memory_barrier.subresourceRange.layerCount = 1; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } } return OK; } -Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, bool p_sync_with_draw) { +Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier) { _THREAD_SAFE_METHOD_ Texture *src_tex = texture_owner.getornull(p_texture); ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER); - ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER, + ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER, "Source texture can't be cleared while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture."); ERR_FAIL_COND_V(p_layers == 0, ERR_INVALID_PARAMETER); @@ -2930,7 +3080,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, ERR_FAIL_COND_V(p_base_mipmap + p_mipmaps > src_tex->mipmaps, ERR_INVALID_PARAMETER); ERR_FAIL_COND_V(p_base_layer + p_layers > src_layer_count, ERR_INVALID_PARAMETER); - VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer; + VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; VkImageLayout clear_layout = (src_tex->layout == VK_IMAGE_LAYOUT_GENERAL) ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; @@ -2977,11 +3127,27 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, vkCmdClearColorImage(command_buffer, src_tex->image, clear_layout, &clear_color, 1, &range); { // Barrier to post clear accesses (changing back the layout if needed) + + uint32_t barrier_flags = 0; + uint32_t access_flags = 0; + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_RASTER) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; + } + VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - image_memory_barrier.dstAccessMask = valid_texture_access; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = clear_layout; image_memory_barrier.newLayout = src_tex->layout; @@ -2994,7 +3160,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer + p_base_layer; image_memory_barrier.subresourceRange.layerCount = p_layers; - vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, valid_texture_stages, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); } return OK; @@ -3082,6 +3248,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF // the read. If this is a performance issue, one could track the actual last accessor of each resource, adding only that // stage switch (is_depth_stencil ? p_initial_depth_action : p_initial_color_action) { + case INITIAL_ACTION_CLEAR_REGION: case INITIAL_ACTION_CLEAR: { description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; @@ -3094,9 +3261,9 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF description.initialLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; } else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) { - description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; - description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there - description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; + description.initialLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); + description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD; dependency_from_external.srcStageMask |= reading_stages; } else { description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; @@ -3301,11 +3468,8 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c return id; } -RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_create_empty(const Size2i &p_size) { - ERR_FAIL_COND_V(p_size.width <= 0 || p_size.height <= 0, INVALID_FORMAT_ID); - +RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_create_empty(TextureSamples p_samples) { FramebufferFormatKey key; - key.empty_size = p_size; const Map<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key); if (E) { @@ -3353,7 +3517,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c fb_format.E = E; fb_format.color_attachments = 0; fb_format.render_pass = render_pass; - fb_format.samples = TEXTURE_SAMPLES_1; + fb_format.samples = p_samples; framebuffer_formats[id] = fb_format; return id; } @@ -3369,10 +3533,10 @@ RenderingDevice::TextureSamples RenderingDeviceVulkan::framebuffer_format_get_te /**** RENDER TARGET ****/ /***********************/ -RID RenderingDeviceVulkan::framebuffer_create_empty(const Size2i &p_size, FramebufferFormatID p_format_check) { +RID RenderingDeviceVulkan::framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples, FramebufferFormatID p_format_check) { _THREAD_SAFE_METHOD_ Framebuffer framebuffer; - framebuffer.format_id = framebuffer_format_create_empty(p_size); + framebuffer.format_id = framebuffer_format_create_empty(p_samples); ERR_FAIL_COND_V(p_format_check != INVALID_FORMAT_ID && framebuffer.format_id != p_format_check, RID()); framebuffer.size = p_size; @@ -3484,7 +3648,7 @@ RID RenderingDeviceVulkan::sampler_create(const SamplerState &p_state) { /**** VERTEX ARRAY ****/ /**********************/ -RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data) { +RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, bool p_use_as_storage) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID()); @@ -3493,8 +3657,12 @@ RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vec ERR_FAIL_COND_V_MSG(compute_list != nullptr && p_data.size(), RID(), "Creating buffers with data is forbidden during creation of a draw list"); + uint32_t usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; + if (p_use_as_storage) { + usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; + } Buffer buffer; - _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY); + _buffer_allocate(&buffer, p_size_bytes, usage, VMA_MEMORY_USAGE_GPU_ONLY); if (p_data.size()) { uint64_t data_size = p_data.size(); const uint8_t *r = p_data.ptr(); @@ -3528,7 +3696,7 @@ RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(cons ERR_FAIL_COND_V(used_locations.has(p_vertex_formats[i].location), INVALID_ID); ERR_FAIL_COND_V_MSG(get_format_vertex_size(p_vertex_formats[i].format) == 0, INVALID_ID, - "Data format for attachment (" + itos(i) + ") is not valid for a vertex array."); + "Data format for attachment (" + itos(i) + "), '" + named_formats[p_vertex_formats[i].format] + "', is not valid for a vertex array."); vdcache.bindings[i].binding = i; vdcache.bindings[i].stride = p_vertex_formats[i].stride; @@ -3733,13 +3901,11 @@ String RenderingDeviceVulkan::_shader_uniform_debug(RID p_shader, int p_set) { } #if 0 bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLayoutBinding> > &bindings, Vector<Vector<UniformInfo> > &uniform_infos, const glslang::TObjectReflection &reflection, RenderingDevice::ShaderStage p_stage, Shader::PushConstant &push_constant, String *r_error) { - VkDescriptorSetLayoutBinding layout_binding; UniformInfo info; switch (reflection.getType()->getBasicType()) { case glslang::EbtSampler: { - //print_line("DEBUG: IsSampler"); if (reflection.getType()->getSampler().dim == glslang::EsdBuffer) { //texture buffers @@ -3837,13 +4003,10 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa } break; /*case glslang::EbtReference: { - } break;*/ /*case glslang::EbtAtomicUint: { - } break;*/ default: { - if (reflection.getType()->getQualifier().hasOffset() || reflection.name.find(".") != std::string::npos) { //member of uniform block? return true; @@ -4040,6 +4203,10 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; info.type = UNIFORM_TYPE_INPUT_ATTACHMENT; } break; + case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: { + ERR_PRINT("Acceleration structure not supported."); + continue; + } break; } if (need_array_dimensions) { @@ -4565,7 +4732,7 @@ void RenderingDeviceVulkan::_descriptor_pool_free(const DescriptorPoolKey &p_key vkDestroyDescriptorPool(device, p_pool->pool, nullptr); descriptor_pools[p_key].erase(p_pool); memdelete(p_pool); - if (descriptor_pools[p_key].empty()) { + if (descriptor_pools[p_key].is_empty()) { descriptor_pools.erase(p_key); } } @@ -4599,7 +4766,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, List<Vector<VkBufferView>> buffer_views; List<Vector<VkDescriptorImageInfo>> image_infos; //used for verification to make sure a uniform set does not use a framebuffer bound texture - Vector<RID> attachable_textures; + LocalVector<UniformSet::AttachableTexture> attachable_textures; Vector<Texture *> mutable_sampled_textures; Vector<Texture *> mutable_storage_textures; @@ -4616,8 +4783,8 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, const Uniform &uniform = uniforms[uniform_idx]; - ERR_FAIL_COND_V_MSG(uniform.type != set_uniform.type, RID(), - "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.type] + "'."); + ERR_FAIL_COND_V_MSG(uniform.uniform_type != set_uniform.type, RID(), + "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.uniform_type] + "'."); VkWriteDescriptorSet write; //common header write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; @@ -4632,7 +4799,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, write.pTexelBufferView = nullptr; uint32_t type_size = 1; - switch (uniform.type) { + switch (uniform.uniform_type) { case UNIFORM_TYPE_SAMPLER: { if (uniform.ids.size() != set_uniform.length) { if (set_uniform.length > 1) { @@ -4692,7 +4859,10 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, img_info.imageView = texture->view; if (texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT)) { - attachable_textures.push_back(texture->owner.is_valid() ? texture->owner : uniform.ids[j + 1]); + UniformSet::AttachableTexture attachable_texture; + attachable_texture.bind = set_uniform.binding; + attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.ids[j + 1]; + attachable_textures.push_back(attachable_texture); } if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) { @@ -4742,7 +4912,10 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, img_info.imageView = texture->view; if (texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT)) { - attachable_textures.push_back(texture->owner.is_valid() ? texture->owner : uniform.ids[j]); + UniformSet::AttachableTexture attachable_texture; + attachable_texture.bind = set_uniform.binding; + attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.ids[j]; + attachable_textures.push_back(attachable_texture); } if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) { @@ -4915,7 +5088,15 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms, ERR_FAIL_COND_V_MSG(uniform.ids.size() != 1, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.ids.size()) + " provided)."); - Buffer *buffer = storage_buffer_owner.getornull(uniform.ids[0]); + Buffer *buffer = nullptr; + + if (storage_buffer_owner.owns(uniform.ids[0])) { + buffer = storage_buffer_owner.getornull(uniform.ids[0]); + } else if (vertex_buffer_owner.owns(uniform.ids[0])) { + buffer = vertex_buffer_owner.getornull(uniform.ids[0]); + + ERR_FAIL_COND_V_MSG(!(buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), RID(), "Vertex buffer supplied (binding: " + itos(uniform.binding) + ") was not created with storage flag."); + } ERR_FAIL_COND_V_MSG(!buffer, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") is invalid."); //if 0, then its sized on link time @@ -5001,19 +5182,22 @@ bool RenderingDeviceVulkan::uniform_set_is_valid(RID p_uniform_set) { return uniform_set_owner.owns(p_uniform_set); } -Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, bool p_sync_with_draw) { +Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier) { _THREAD_SAFE_METHOD_ - ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER, - "Updating buffers in 'sync to draw' mode is forbidden during creation of a draw list"); - ERR_FAIL_COND_V_MSG(compute_list && p_sync_with_draw, ERR_INVALID_PARAMETER, - "Updating buffers in 'sync to draw' mode is forbidden during creation of a compute list"); - - // Protect subsequent updates... - VkPipelineStageFlags dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; - VkAccessFlags dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; + ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER, + "Updating buffers is forbidden during creation of a draw list"); + ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER, + "Updating buffers is forbidden during creation of a compute list"); - Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access); + VkPipelineStageFlags dst_stage_mask = 0; + VkAccessFlags dst_access = 0; + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + // Protect subsequent updates... + dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; + dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; + } + Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access, p_post_barrier); if (!buffer) { ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type."); } @@ -5021,20 +5205,61 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER, "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end."); - _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, p_sync_with_draw); - Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw); + // no barrier should be needed here + // _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, true); + + Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_post_barrier); if (err) { return err; } #ifdef FORCE_FULL_BARRIER - _full_barrier(p_sync_with_draw); + _full_barrier(true); #else - _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_sync_with_draw); + _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, true); #endif return err; } +Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier) { + _THREAD_SAFE_METHOD_ + + ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER, + "Size must be a multiple of four"); + ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER, + "Updating buffers in is forbidden during creation of a draw list"); + ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER, + "Updating buffers is forbidden during creation of a compute list"); + + VkPipelineStageFlags dst_stage_mask = 0; + VkAccessFlags dst_access = 0; + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + // Protect subsequent updates... + dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; + dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; + } + + Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access, p_post_barrier); + if (!buffer) { + ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type."); + } + + ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER, + "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end."); + + // should not be needed + // _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, p_post_barrier); + + vkCmdFillBuffer(frames[frame].draw_command_buffer, buffer->buffer, p_offset, p_size, 0); + +#ifdef FORCE_FULL_BARRIER + _full_barrier(true); +#else + _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_post_barrier); +#endif + return OK; +} + Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { _THREAD_SAFE_METHOD_ @@ -5042,7 +5267,7 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; // Get the vulkan buffer and the potential stage/access possible - Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask); + Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL); if (!buffer) { ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving."); } @@ -5603,7 +5828,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin_for_screen(Di vkCmdSetScissor(command_buffer, 0, 1, &scissor); - return ID_TYPE_DRAW_LIST; + return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT; } Error RenderingDeviceVulkan::_draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass) { @@ -5656,11 +5881,18 @@ Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuff render_pass_begin.pNext = nullptr; render_pass_begin.renderPass = render_pass; render_pass_begin.framebuffer = vkframebuffer; - + /* + * Given how API works, it makes sense to always fully operate on the whole framebuffer. + * This allows better continue operations for operations like shadowmapping. render_pass_begin.renderArea.extent.width = viewport_size.width; render_pass_begin.renderArea.extent.height = viewport_size.height; render_pass_begin.renderArea.offset.x = viewport_offset.x; render_pass_begin.renderArea.offset.y = viewport_offset.y; + */ + render_pass_begin.renderArea.extent.width = framebuffer->size.width; + render_pass_begin.renderArea.extent.height = framebuffer->size.height; + render_pass_begin.renderArea.offset.x = 0; + render_pass_begin.renderArea.offset.y = 0; Vector<VkClearValue> clear_values; clear_values.resize(framebuffer->texture_ids.size()); @@ -5809,11 +6041,11 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebu viewport_offset = regioni.position; viewport_size = regioni.size; - if (p_initial_color_action == INITIAL_ACTION_CLEAR) { + if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION) { needs_clear_color = true; p_initial_color_action = INITIAL_ACTION_KEEP; } - if (p_initial_depth_action == INITIAL_ACTION_CLEAR) { + if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) { needs_clear_depth = true; p_initial_depth_action = INITIAL_ACTION_KEEP; } @@ -5870,7 +6102,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebu vkCmdSetScissor(command_buffer, 0, 1, &scissor); draw_list->viewport = Rect2i(viewport_offset, viewport_size); - return ID_TYPE_DRAW_LIST; + return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT; } Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) { @@ -5899,11 +6131,11 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p viewport_offset = regioni.position; viewport_size = regioni.size; - if (p_initial_color_action == INITIAL_ACTION_CLEAR) { + if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION) { needs_clear_color = true; p_initial_color_action = INITIAL_ACTION_KEEP; } - if (p_initial_depth_action == INITIAL_ACTION_CLEAR) { + if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) { needs_clear_depth = true; p_initial_depth_action = INITIAL_ACTION_KEEP; } @@ -5967,7 +6199,7 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p for (uint32_t i = 0; i < p_splits; i++) { //take a command buffer and initialize it - VkCommandBuffer command_buffer = split_draw_list_allocators[p_splits].command_buffers[frame]; + VkCommandBuffer command_buffer = split_draw_list_allocators[i].command_buffers[frame]; VkCommandBufferInheritanceInfo inheritance_info; inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; @@ -6025,7 +6257,7 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p scissor.extent.height = viewport_size.height; vkCmdSetScissor(command_buffer, 0, 1, &scissor); - r_split_ids[i] = (DrawListID(1) << DrawListID(ID_TYPE_SPLIT_DRAW_LIST)) + i; + r_split_ids[i] = (int64_t(ID_TYPE_SPLIT_DRAW_LIST) << ID_BASE_SHIFT) + i; draw_list[i].viewport = Rect2i(viewport_offset, viewport_size); } @@ -6040,7 +6272,7 @@ RenderingDeviceVulkan::DrawList *RenderingDeviceVulkan::_get_draw_list_ptr(DrawL if (!draw_list) { return nullptr; - } else if (p_id == ID_TYPE_DRAW_LIST) { + } else if (p_id == (int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT)) { if (draw_list_split) { return nullptr; } @@ -6134,7 +6366,7 @@ void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RI void RenderingDeviceVulkan::draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index) { #ifdef DEBUG_ENABLED - ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index > MAX_UNIFORM_SETS, + ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index >= MAX_UNIFORM_SETS, "Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(limits.maxBoundDescriptorSets) + ")."); #endif DrawList *dl = _get_draw_list_ptr(p_list); @@ -6159,13 +6391,13 @@ void RenderingDeviceVulkan::draw_list_bind_uniform_set(DrawListID p_list, RID p_ #ifdef DEBUG_ENABLED { //validate that textures bound are not attached as framebuffer bindings uint32_t attachable_count = uniform_set->attachable_textures.size(); - const RID *attachable_ptr = uniform_set->attachable_textures.ptr(); + const UniformSet::AttachableTexture *attachable_ptr = uniform_set->attachable_textures.ptr(); uint32_t bound_count = draw_list_bound_textures.size(); const RID *bound_ptr = draw_list_bound_textures.ptr(); for (uint32_t i = 0; i < attachable_count; i++) { for (uint32_t j = 0; j < bound_count; j++) { - ERR_FAIL_COND_MSG(attachable_ptr[i] == bound_ptr[j], - "Attempted to use the same texture in framebuffer attachment and a uniform set, this is not allowed."); + ERR_FAIL_COND_MSG(attachable_ptr[i].texture == bound_ptr[j], + "Attempted to use the same texture in framebuffer attachment and a uniform (set: " + itos(p_index) + ", binding: " + itos(attachable_ptr[i].bind) + "), this is not allowed."); } } } @@ -6369,7 +6601,7 @@ void RenderingDeviceVulkan::draw_list_enable_scissor(DrawListID p_list, const Re Rect2i rect = p_rect; rect.position += dl->viewport.position; - rect = dl->viewport.clip(rect); + rect = dl->viewport.intersection(rect); if (rect.get_area() == 0) { return; @@ -6398,7 +6630,7 @@ void RenderingDeviceVulkan::draw_list_disable_scissor(DrawListID p_list) { vkCmdSetScissor(dl->command_buffer, 0, 1, &scissor); } -void RenderingDeviceVulkan::draw_list_end() { +void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive."); @@ -6407,8 +6639,8 @@ void RenderingDeviceVulkan::draw_list_end() { //send all command buffers VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * draw_list_count); for (uint32_t i = 0; i < draw_list_count; i++) { - vkEndCommandBuffer(draw_list->command_buffer); - command_buffers[i] = draw_list->command_buffer; + vkEndCommandBuffer(draw_list[i].command_buffer); + command_buffers[i] = draw_list[i].command_buffer; } vkCmdExecuteCommands(frames[frame].draw_command_buffer, draw_list_count, command_buffers); @@ -6434,6 +6666,21 @@ void RenderingDeviceVulkan::draw_list_end() { } } + uint32_t barrier_flags = 0; + uint32_t access_flags = 0; + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_post_barrier & BARRIER_MASK_RASTER) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; + } + draw_list_bound_textures.clear(); for (int i = 0; i < draw_list_storage_textures.size(); i++) { @@ -6443,7 +6690,7 @@ void RenderingDeviceVulkan::draw_list_end() { image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = texture->layout; image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; @@ -6456,7 +6703,7 @@ void RenderingDeviceVulkan::draw_list_end() { image_memory_barrier.subresourceRange.baseArrayLayer = texture->base_layer; image_memory_barrier.subresourceRange.layerCount = texture->layers; - vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); texture->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } @@ -6470,7 +6717,7 @@ void RenderingDeviceVulkan::draw_list_end() { #ifdef FORCE_FULL_BARRIER _full_barrier(true); #else - _memory_barrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, true); + _memory_barrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, barrier_flags, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, access_flags, true); #endif } @@ -6556,7 +6803,7 @@ void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list, ComputeList *cl = compute_list; #ifdef DEBUG_ENABLED - ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index > MAX_UNIFORM_SETS, + ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index >= MAX_UNIFORM_SETS, "Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(limits.maxBoundDescriptorSets) + ")."); #endif @@ -6793,14 +7040,30 @@ void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) { #endif } -void RenderingDeviceVulkan::compute_list_end() { +void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) { ERR_FAIL_COND(!compute_list); + + uint32_t barrier_flags = 0; + uint32_t access_flags = 0; + if (p_post_barrier & BARRIER_MASK_COMPUTE) { + barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } + if (p_post_barrier & BARRIER_MASK_RASTER) { + barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } + if (p_post_barrier & BARRIER_MASK_TRANSFER) { + barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; + } + for (Set<Texture *>::Element *E = compute_list->state.textures_to_sampled_layout.front(); E; E = E->next()) { VkImageMemoryBarrier image_memory_barrier; image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_memory_barrier.pNext = nullptr; image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; - image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT; + image_memory_barrier.dstAccessMask = access_flags; image_memory_barrier.oldLayout = E->get()->layout; image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; @@ -6814,7 +7077,7 @@ void RenderingDeviceVulkan::compute_list_end() { image_memory_barrier.subresourceRange.layerCount = E->get()->layers; // TODO: Look at the usages in the compute list and determine tighter dst stage and access masks based on some "final" usage equivalent - vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); + vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier); E->get()->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } @@ -6824,10 +7087,44 @@ void RenderingDeviceVulkan::compute_list_end() { #ifdef FORCE_FULL_BARRIER _full_barrier(true); #else - _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true); + _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, barrier_flags, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true); #endif } +void RenderingDeviceVulkan::barrier(uint32_t p_from, uint32_t p_to) { + uint32_t src_barrier_flags = 0; + uint32_t src_access_flags = 0; + if (p_from & BARRIER_MASK_COMPUTE) { + src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_from & BARRIER_MASK_RASTER) { + src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + } + if (p_from & BARRIER_MASK_TRANSFER) { + src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; + } + + uint32_t dst_barrier_flags = 0; + uint32_t dst_access_flags = 0; + if (p_to & BARRIER_MASK_COMPUTE) { + dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } + if (p_to & BARRIER_MASK_RASTER) { + dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } + if (p_to & BARRIER_MASK_TRANSFER) { + dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; + } + + _memory_barrier(src_barrier_flags, dst_barrier_flags, src_access_flags, dst_access_flags, true); +} + void RenderingDeviceVulkan::full_barrier() { #ifndef DEBUG_ENABLED ERR_PRINT("Full barrier is debug-only, should not be used in production"); @@ -6837,7 +7134,6 @@ void RenderingDeviceVulkan::full_barrier() { #if 0 void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_framebuffer, ID *p_draw_lists, uint32_t p_draw_list_count, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Variant> &p_clear_colors) { - VkCommandBuffer frame_cmdbuf = frames[frame].frame_buffer; ERR_FAIL_COND(!frame_cmdbuf); @@ -6866,7 +7162,6 @@ void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_frame ID screen_format = screen_get_framebuffer_format(); { - VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * p_draw_list_count); uint32_t command_buffer_count = 0; @@ -6890,7 +7185,6 @@ void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_frame } vkCmdEndRenderPass(frame_cmdbuf); - } #endif @@ -6965,6 +7259,72 @@ void RenderingDeviceVulkan::free(RID p_id) { _free_internal(p_id); } +// The full list of resources that can be named is in the VkObjectType enum +// We just expose the resources that are owned and can be accessed easily. +void RenderingDeviceVulkan::set_resource_name(RID p_id, const String p_name) { + if (texture_owner.owns(p_id)) { + Texture *texture = texture_owner.getornull(p_id); + if (texture->owner.is_null()) { + // Don't set the source texture's name when calling on a texture view + context->set_object_name(VK_OBJECT_TYPE_IMAGE, uint64_t(texture->image), p_name); + } + context->set_object_name(VK_OBJECT_TYPE_IMAGE_VIEW, uint64_t(texture->view), p_name + " View"); + } else if (framebuffer_owner.owns(p_id)) { + //Framebuffer *framebuffer = framebuffer_owner.getornull(p_id); + // Not implemented for now as the relationship between Framebuffer and RenderPass is very complex + } else if (sampler_owner.owns(p_id)) { + VkSampler *sampler = sampler_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_SAMPLER, uint64_t(*sampler), p_name); + } else if (vertex_buffer_owner.owns(p_id)) { + Buffer *vertex_buffer = vertex_buffer_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(vertex_buffer->buffer), p_name); + } else if (index_buffer_owner.owns(p_id)) { + IndexBuffer *index_buffer = index_buffer_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(index_buffer->buffer), p_name); + } else if (shader_owner.owns(p_id)) { + Shader *shader = shader_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_PIPELINE_LAYOUT, uint64_t(shader->pipeline_layout), p_name + " Pipeline Layout"); + for (int i = 0; i < shader->sets.size(); i++) { + context->set_object_name(VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, uint64_t(shader->sets[i].descriptor_set_layout), p_name); + } + } else if (uniform_buffer_owner.owns(p_id)) { + Buffer *uniform_buffer = uniform_buffer_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(uniform_buffer->buffer), p_name); + } else if (texture_buffer_owner.owns(p_id)) { + TextureBuffer *texture_buffer = texture_buffer_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(texture_buffer->buffer.buffer), p_name); + context->set_object_name(VK_OBJECT_TYPE_BUFFER_VIEW, uint64_t(texture_buffer->view), p_name + " View"); + } else if (storage_buffer_owner.owns(p_id)) { + Buffer *storage_buffer = storage_buffer_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(storage_buffer->buffer), p_name); + } else if (uniform_set_owner.owns(p_id)) { + UniformSet *uniform_set = uniform_set_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_DESCRIPTOR_SET, uint64_t(uniform_set->descriptor_set), p_name); + } else if (render_pipeline_owner.owns(p_id)) { + RenderPipeline *pipeline = render_pipeline_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_PIPELINE, uint64_t(pipeline->pipeline), p_name); + context->set_object_name(VK_OBJECT_TYPE_PIPELINE_LAYOUT, uint64_t(pipeline->pipeline_layout), p_name + " Layout"); + } else if (compute_pipeline_owner.owns(p_id)) { + ComputePipeline *pipeline = compute_pipeline_owner.getornull(p_id); + context->set_object_name(VK_OBJECT_TYPE_PIPELINE, uint64_t(pipeline->pipeline), p_name); + context->set_object_name(VK_OBJECT_TYPE_PIPELINE_LAYOUT, uint64_t(pipeline->pipeline_layout), p_name + " Layout"); + } else { + ERR_PRINT("Attempted to name invalid ID: " + itos(p_id.get_id())); + } +} + +void RenderingDeviceVulkan::draw_command_begin_label(String p_label_name, const Color p_color) { + context->command_begin_label(frames[frame].draw_command_buffer, p_label_name, p_color); +} + +void RenderingDeviceVulkan::draw_command_insert_label(String p_label_name, const Color p_color) { + context->command_insert_label(frames[frame].draw_command_buffer, p_label_name, p_color); +} + +void RenderingDeviceVulkan::draw_command_end_label() { + context->command_end_label(frames[frame].draw_command_buffer); +} + void RenderingDeviceVulkan::_finalize_command_bufers() { if (draw_list) { ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work)."); @@ -7383,9 +7743,10 @@ void RenderingDeviceVulkan::_free_rids(T &p_owner, const char *p_type) { } } -void RenderingDeviceVulkan::capture_timestamp(const String &p_name, bool p_sync_to_draw) { +void RenderingDeviceVulkan::capture_timestamp(const String &p_name) { ERR_FAIL_COND(frames[frame].timestamp_count >= max_timestamp_query_elements); + //this should be optional for profiling, else it will slow things down { VkMemoryBarrier memoryBarrier; @@ -7422,9 +7783,10 @@ void RenderingDeviceVulkan::capture_timestamp(const String &p_name, bool p_sync_ VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT; - vkCmdPipelineBarrier(p_sync_to_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr); + vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr); } - vkCmdWriteTimestamp(p_sync_to_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, frames[frame].timestamp_pool, frames[frame].timestamp_count); + + vkCmdWriteTimestamp(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, frames[frame].timestamp_pool, frames[frame].timestamp_count); frames[frame].timestamp_names[frames[frame].timestamp_count] = p_name; frames[frame].timestamp_cpu_values[frames[frame].timestamp_count] = OS::get_singleton()->get_ticks_usec(); frames[frame].timestamp_count++; @@ -7653,7 +8015,6 @@ RenderingDevice *RenderingDeviceVulkan::create_local_device() { } RenderingDeviceVulkan::RenderingDeviceVulkan() { - screen_prepared = false; } RenderingDeviceVulkan::~RenderingDeviceVulkan() { diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h index e6cbf2e01d..4bea17e4a1 100644 --- a/drivers/vulkan/rendering_device_vulkan.h +++ b/drivers/vulkan/rendering_device_vulkan.h @@ -5,8 +5,8 @@ /* GODOT ENGINE */ /* https://godotengine.org */ /*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */ /* */ /* Permission is hereby granted, free of charge, to any person obtaining */ /* a copy of this software and associated documentation files (the */ @@ -31,9 +31,10 @@ #ifndef RENDERING_DEVICE_VULKAN_H #define RENDERING_DEVICE_VULKAN_H -#include "core/oa_hash_map.h" #include "core/os/thread_safe.h" -#include "core/rid_owner.h" +#include "core/templates/local_vector.h" +#include "core/templates/oa_hash_map.h" +#include "core/templates/rid_owner.h" #include "servers/rendering/rendering_device.h" #ifdef DEBUG_ENABLED @@ -45,11 +46,6 @@ #include <vulkan/vulkan.h> -//todo: -//compute -//push constants -//views of texture slices - class VulkanContext; class RenderingDeviceVulkan : public RenderingDevice { @@ -99,7 +95,7 @@ class RenderingDeviceVulkan : public RenderingDevice { ID_BASE_SHIFT = 58 //5 bits for ID types }; - VkDevice device; + VkDevice device = VK_NULL_HANDLE; Map<RID, Set<RID>> dependency_map; //IDs to IDs that depend on it Map<RID, Set<RID>> reverse_dependency_map; //same as above, but in reverse @@ -124,35 +120,35 @@ class RenderingDeviceVulkan : public RenderingDevice { // for a framebuffer to render into it. struct Texture { - VkImage image; - VmaAllocation allocation; + VkImage image = VK_NULL_HANDLE; + VmaAllocation allocation = nullptr; VmaAllocationInfo allocation_info; - VkImageView view; + VkImageView view = VK_NULL_HANDLE; TextureType type; DataFormat format; TextureSamples samples; - uint32_t width; - uint32_t height; - uint32_t depth; - uint32_t layers; - uint32_t mipmaps; - uint32_t usage_flags; - uint32_t base_mipmap; - uint32_t base_layer; + uint32_t width = 0; + uint32_t height = 0; + uint32_t depth = 0; + uint32_t layers = 0; + uint32_t mipmaps = 0; + uint32_t usage_flags = 0; + uint32_t base_mipmap = 0; + uint32_t base_layer = 0; Vector<DataFormat> allowed_shared_formats; VkImageLayout layout; - uint32_t read_aspect_mask; - uint32_t barrier_aspect_mask; - bool bound; //bound to framebffer + uint32_t read_aspect_mask = 0; + uint32_t barrier_aspect_mask = 0; + bool bound = false; //bound to framebffer RID owner; }; RID_Owner<Texture, true> texture_owner; - uint32_t texture_upload_region_size_px; + uint32_t texture_upload_region_size_px = 0; Vector<uint8_t> _texture_get_data_from_image(Texture *tex, VkImage p_image, VmaAllocation p_allocation, uint32_t p_layer, bool p_2d = false); @@ -188,32 +184,28 @@ class RenderingDeviceVulkan : public RenderingDevice { // See the comments in the code to understand better how it works. struct StagingBufferBlock { - VkBuffer buffer; - VmaAllocation allocation; - uint64_t frame_used; - uint32_t fill_amount; + VkBuffer buffer = VK_NULL_HANDLE; + VmaAllocation allocation = nullptr; + uint64_t frame_used = 0; + uint32_t fill_amount = 0; }; Vector<StagingBufferBlock> staging_buffer_blocks; - int staging_buffer_current; - uint32_t staging_buffer_block_size; - uint64_t staging_buffer_max_size; - bool staging_buffer_used; + int staging_buffer_current = 0; + uint32_t staging_buffer_block_size = 0; + uint64_t staging_buffer_max_size = 0; + bool staging_buffer_used = false; Error _staging_buffer_allocate(uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, bool p_can_segment = true, bool p_on_draw_command_buffer = false); Error _insert_staging_block(); struct Buffer { - uint32_t size; - uint32_t usage; - VkBuffer buffer; - VmaAllocation allocation; + uint32_t size = 0; + uint32_t usage = 0; + VkBuffer buffer = VK_NULL_HANDLE; + VmaAllocation allocation = nullptr; VkDescriptorBufferInfo buffer_info; //used for binding Buffer() { - size = 0; - usage = 0; - buffer = VK_NULL_HANDLE; - allocation = nullptr; } }; @@ -236,13 +228,8 @@ class RenderingDeviceVulkan : public RenderingDevice { // used for the render pipelines. struct FramebufferFormatKey { - Size2i empty_size; Vector<AttachmentFormat> attachments; bool operator<(const FramebufferFormatKey &p_key) const { - if (empty_size != p_key.empty_size) { - return empty_size < p_key.empty_size; - } - int as = attachments.size(); int bs = p_key.attachments.size(); if (as != bs) { @@ -276,15 +263,15 @@ class RenderingDeviceVulkan : public RenderingDevice { Map<FramebufferFormatKey, FramebufferFormatID> framebuffer_format_cache; struct FramebufferFormat { const Map<FramebufferFormatKey, FramebufferFormatID>::Element *E; - VkRenderPass render_pass; //here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec) - int color_attachments; //used for pipeline validation + VkRenderPass render_pass = VK_NULL_HANDLE; //here for constructing shaders, never used, see section (7.2. Render Pass Compatibility from Vulkan spec) + int color_attachments = 0; //used for pipeline validation TextureSamples samples; }; Map<FramebufferFormatID, FramebufferFormat> framebuffer_formats; struct Framebuffer { - FramebufferFormatID format_id; + FramebufferFormatID format_id = 0; struct VersionKey { InitialAction initial_color_action; FinalAction final_color_action; @@ -307,12 +294,12 @@ class RenderingDeviceVulkan : public RenderingDevice { } }; - uint32_t storage_mask; + uint32_t storage_mask = 0; Vector<RID> texture_ids; struct Version { - VkFramebuffer framebuffer; - VkRenderPass render_pass; //this one is owned + VkFramebuffer framebuffer = VK_NULL_HANDLE; + VkRenderPass render_pass = VK_NULL_HANDLE; //this one is owned }; Map<VersionKey, Version> framebuffers; @@ -399,8 +386,8 @@ class RenderingDeviceVulkan : public RenderingDevice { struct VertexDescriptionCache { Vector<VertexAttribute> vertex_formats; - VkVertexInputBindingDescription *bindings; - VkVertexInputAttributeDescription *attributes; + VkVertexInputBindingDescription *bindings = nullptr; + VkVertexInputAttributeDescription *attributes = nullptr; VkPipelineVertexInputStateCreateInfo create_info; }; @@ -408,9 +395,9 @@ class RenderingDeviceVulkan : public RenderingDevice { struct VertexArray { RID buffer; - VertexFormatID description; - int vertex_count; - uint32_t max_instances_allowed; + VertexFormatID description = 0; + int vertex_count = 0; + uint32_t max_instances_allowed = 0; Vector<VkBuffer> buffers; //not owned, just referenced Vector<VkDeviceSize> offsets; @@ -419,21 +406,21 @@ class RenderingDeviceVulkan : public RenderingDevice { RID_Owner<VertexArray, true> vertex_array_owner; struct IndexBuffer : public Buffer { - uint32_t max_index; //used for validation - uint32_t index_count; - VkIndexType index_type; - bool supports_restart_indices; + uint32_t max_index = 0; //used for validation + uint32_t index_count = 0; + VkIndexType index_type = VK_INDEX_TYPE_NONE_NV; + bool supports_restart_indices = false; }; RID_Owner<IndexBuffer, true> index_buffer_owner; struct IndexArray { - uint32_t max_index; //remember the maximum index here too, for validation + uint32_t max_index = 0; //remember the maximum index here too, for validation VkBuffer buffer; //not owned, inherited from index buffer - uint32_t offset; - uint32_t indices; - VkIndexType index_type; - bool supports_restart_indices; + uint32_t offset = 0; + uint32_t indices = 0; + VkIndexType index_type = VK_INDEX_TYPE_NONE_NV; + bool supports_restart_indices = false; }; RID_Owner<IndexArray, true> index_array_owner; @@ -459,10 +446,10 @@ class RenderingDeviceVulkan : public RenderingDevice { }; struct UniformInfo { - UniformType type; - int binding; - uint32_t stages; - int length; //size of arrays (in total elements), or ubos (in bytes * total elements) + UniformType type = UniformType::UNIFORM_TYPE_MAX; + int binding = 0; + uint32_t stages = 0; + int length = 0; //size of arrays (in total elements), or ubos (in bytes * total elements) bool operator!=(const UniformInfo &p_info) const { return (binding != p_info.binding || type != p_info.type || stages != p_info.stages || length != p_info.length); @@ -528,25 +515,25 @@ class RenderingDeviceVulkan : public RenderingDevice { struct Shader { struct Set { Vector<UniformInfo> uniform_info; - VkDescriptorSetLayout descriptor_set_layout; + VkDescriptorSetLayout descriptor_set_layout = VK_NULL_HANDLE; }; - uint32_t vertex_input_mask; //inputs used, this is mostly for validation - int fragment_outputs; + uint32_t vertex_input_mask = 0; //inputs used, this is mostly for validation + int fragment_outputs = 0; struct PushConstant { - uint32_t push_constant_size; - uint32_t push_constants_vk_stage; + uint32_t push_constant_size = 0; + uint32_t push_constants_vk_stage = 0; }; PushConstant push_constant; bool is_compute = false; - int max_output; + int max_output = 0; Vector<Set> sets; Vector<uint32_t> set_formats; Vector<VkPipelineShaderStageCreateInfo> pipeline_stages; - VkPipelineLayout pipeline_layout; + VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; }; String _shader_uniform_debug(RID p_shader, int p_set = -1); @@ -610,7 +597,7 @@ class RenderingDeviceVulkan : public RenderingDevice { }; Map<DescriptorPoolKey, Set<DescriptorPool *>> descriptor_pools; - uint32_t max_descriptors_per_pool; + uint32_t max_descriptors_per_pool = 0; DescriptorPool *_descriptor_pool_allocate(const DescriptorPoolKey &p_key); void _descriptor_pool_free(const DescriptorPoolKey &p_key, DescriptorPool *p_pool); @@ -621,7 +608,7 @@ class RenderingDeviceVulkan : public RenderingDevice { //texture buffer needs a view struct TextureBuffer { Buffer buffer; - VkBufferView view; + VkBufferView view = VK_NULL_HANDLE; }; RID_Owner<TextureBuffer, true> texture_buffer_owner; @@ -635,14 +622,19 @@ class RenderingDeviceVulkan : public RenderingDevice { // the above restriction is not too serious. struct UniformSet { - uint32_t format; + uint32_t format = 0; RID shader_id; - uint32_t shader_set; - DescriptorPool *pool; + uint32_t shader_set = 0; + DescriptorPool *pool = nullptr; DescriptorPoolKey pool_key; - VkDescriptorSet descriptor_set; + VkDescriptorSet descriptor_set = VK_NULL_HANDLE; //VkPipelineLayout pipeline_layout; //not owned, inherited from shader - Vector<RID> attachable_textures; //used for validation + struct AttachableTexture { + uint32_t bind; + RID texture; + }; + + LocalVector<AttachableTexture> attachable_textures; //used for validation Vector<Texture *> mutable_sampled_textures; //used for layout change Vector<Texture *> mutable_storage_textures; //used for layout change }; @@ -668,21 +660,21 @@ class RenderingDeviceVulkan : public RenderingDevice { //Cached values for validation #ifdef DEBUG_ENABLED struct Validation { - FramebufferFormatID framebuffer_format; - uint32_t dynamic_state; - VertexFormatID vertex_format; - bool uses_restart_indices; - uint32_t primitive_minimum; - uint32_t primitive_divisor; + FramebufferFormatID framebuffer_format = 0; + uint32_t dynamic_state = 0; + VertexFormatID vertex_format = 0; + bool uses_restart_indices = false; + uint32_t primitive_minimum = 0; + uint32_t primitive_divisor = 0; } validation; #endif //Actual pipeline RID shader; Vector<uint32_t> set_formats; - VkPipelineLayout pipeline_layout; // not owned, needed for push constants - VkPipeline pipeline; - uint32_t push_constant_size; - uint32_t push_constant_stages; + VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // not owned, needed for push constants + VkPipeline pipeline = VK_NULL_HANDLE; + uint32_t push_constant_size = 0; + uint32_t push_constant_stages = 0; }; RID_Owner<RenderPipeline, true> render_pipeline_owner; @@ -690,10 +682,10 @@ class RenderingDeviceVulkan : public RenderingDevice { struct ComputePipeline { RID shader; Vector<uint32_t> set_formats; - VkPipelineLayout pipeline_layout; // not owned, needed for push constants - VkPipeline pipeline; - uint32_t push_constant_size; - uint32_t push_constant_stages; + VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // not owned, needed for push constants + VkPipeline pipeline = VK_NULL_HANDLE; + uint32_t push_constant_size = 0; + uint32_t push_constant_stages = 0; }; RID_Owner<ComputePipeline, true> compute_pipeline_owner; @@ -714,14 +706,14 @@ class RenderingDeviceVulkan : public RenderingDevice { // each needs it's own command pool. struct SplitDrawListAllocator { - VkCommandPool command_pool; + VkCommandPool command_pool = VK_NULL_HANDLE; Vector<VkCommandBuffer> command_buffers; //one for each frame }; Vector<SplitDrawListAllocator> split_draw_list_allocators; struct DrawList { - VkCommandBuffer command_buffer; // If persistent, this is owned, otherwise it's shared with the ringbuffer. + VkCommandBuffer command_buffer = VK_NULL_HANDLE; // If persistent, this is owned, otherwise it's shared with the ringbuffer. Rect2i viewport; struct SetState { @@ -755,7 +747,7 @@ class RenderingDeviceVulkan : public RenderingDevice { bool index_buffer_uses_restart_indices = false; uint32_t index_array_size = 0; uint32_t index_array_max_index = 0; - uint32_t index_array_offset; + uint32_t index_array_offset = 0; Vector<uint32_t> set_formats; Vector<bool> set_bound; Vector<RID> set_rids; @@ -766,8 +758,8 @@ class RenderingDeviceVulkan : public RenderingDevice { RID pipeline_shader; uint32_t invalid_set_from = 0; bool pipeline_uses_restart_indices = false; - uint32_t pipeline_primitive_divisor; - uint32_t pipeline_primitive_minimum; + uint32_t pipeline_primitive_divisor = 0; + uint32_t pipeline_primitive_minimum = 0; Vector<uint32_t> pipeline_set_formats; uint32_t pipeline_push_constant_size = 0; bool pipeline_push_constant_supplied = false; @@ -781,26 +773,26 @@ class RenderingDeviceVulkan : public RenderingDevice { #endif }; - DrawList *draw_list; // One for regular draw lists, multiple for split. - uint32_t draw_list_count; - bool draw_list_split; + DrawList *draw_list = nullptr; // One for regular draw lists, multiple for split. + uint32_t draw_list_count = 0; + bool draw_list_split = false; Vector<RID> draw_list_bound_textures; Vector<RID> draw_list_storage_textures; - bool draw_list_unbind_color_textures; - bool draw_list_unbind_depth_textures; + bool draw_list_unbind_color_textures = false; + bool draw_list_unbind_depth_textures = false; void _draw_list_insert_clear_region(DrawList *draw_list, Framebuffer *framebuffer, Point2i viewport_offset, Point2i viewport_size, bool p_clear_color, const Vector<Color> &p_clear_colors, bool p_clear_depth, float p_depth, uint32_t p_stencil); Error _draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass); Error _draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents, const Vector<RID> &p_storage_textures); _FORCE_INLINE_ DrawList *_get_draw_list_ptr(DrawListID p_id); - Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access); + Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access, uint32_t p_post_barrier); /**********************/ /**** COMPUTE LIST ****/ /**********************/ struct ComputeList { - VkCommandBuffer command_buffer; // If persistent, this is owned, otherwise it's shared with the ringbuffer. + VkCommandBuffer command_buffer = VK_NULL_HANDLE; // If persistent, this is owned, otherwise it's shared with the ringbuffer. struct SetState { uint32_t pipeline_expected_format = 0; @@ -837,7 +829,7 @@ class RenderingDeviceVulkan : public RenderingDevice { #endif }; - ComputeList *compute_list; + ComputeList *compute_list = nullptr; /**************************/ /**** FRAME MANAGEMENT ****/ @@ -869,46 +861,46 @@ class RenderingDeviceVulkan : public RenderingDevice { List<RenderPipeline> render_pipelines_to_dispose_of; List<ComputePipeline> compute_pipelines_to_dispose_of; - VkCommandPool command_pool; - VkCommandBuffer setup_command_buffer; //used at the beginning of every frame for set-up - VkCommandBuffer draw_command_buffer; //used at the beginning of every frame for set-up + VkCommandPool command_pool = VK_NULL_HANDLE; + VkCommandBuffer setup_command_buffer = VK_NULL_HANDLE; //used at the beginning of every frame for set-up + VkCommandBuffer draw_command_buffer = VK_NULL_HANDLE; //used at the beginning of every frame for set-up struct Timestamp { String description; - uint64_t value; + uint64_t value = 0; }; VkQueryPool timestamp_pool; - String *timestamp_names; - uint64_t *timestamp_cpu_values; - uint32_t timestamp_count; - String *timestamp_result_names; - uint64_t *timestamp_cpu_result_values; - uint64_t *timestamp_result_values; - uint32_t timestamp_result_count; - uint64_t index; + String *timestamp_names = nullptr; + uint64_t *timestamp_cpu_values = nullptr; + uint32_t timestamp_count = 0; + String *timestamp_result_names = nullptr; + uint64_t *timestamp_cpu_result_values = nullptr; + uint64_t *timestamp_result_values = nullptr; + uint32_t timestamp_result_count = 0; + uint64_t index = 0; }; - uint32_t max_timestamp_query_elements; + uint32_t max_timestamp_query_elements = 0; - Frame *frames; //frames available, for main device they are cycled (usually 3), for local devices only 1 - int frame; //current frame - int frame_count; //total amount of frames - uint64_t frames_drawn; + Frame *frames = nullptr; //frames available, for main device they are cycled (usually 3), for local devices only 1 + int frame = 0; //current frame + int frame_count = 0; //total amount of frames + uint64_t frames_drawn = 0; RID local_device; bool local_device_processing = false; void _free_pending_resources(int p_frame); - VmaAllocator allocator; + VmaAllocator allocator = nullptr; - VulkanContext *context; + VulkanContext *context = nullptr; void _free_internal(RID p_id); void _flush(bool p_current_frame); - bool screen_prepared; + bool screen_prepared = false; template <class T> void _free_rids(T &p_owner, const char *p_type); @@ -921,27 +913,27 @@ public: virtual RID texture_create_shared(const TextureView &p_view, RID p_with_texture); virtual RID texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, TextureSliceType p_slice_type = TEXTURE_SLICE_2D); - virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, bool p_sync_with_draw = false); + virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); virtual Vector<uint8_t> texture_get_data(RID p_texture, uint32_t p_layer); virtual bool texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const; virtual bool texture_is_shared(RID p_texture); virtual bool texture_is_valid(RID p_texture); - virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, bool p_sync_with_draw = false); - virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, bool p_sync_with_draw = false); - virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, bool p_sync_with_draw = false); + virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier = BARRIER_MASK_ALL); /*********************/ /**** FRAMEBUFFER ****/ /*********************/ virtual FramebufferFormatID framebuffer_format_create(const Vector<AttachmentFormat> &p_format); - virtual FramebufferFormatID framebuffer_format_create_empty(const Size2i &p_size); + virtual FramebufferFormatID framebuffer_format_create_empty(TextureSamples p_samples = TEXTURE_SAMPLES_1); virtual TextureSamples framebuffer_format_get_texture_samples(FramebufferFormatID p_format); virtual RID framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check = INVALID_ID); - virtual RID framebuffer_create_empty(const Size2i &p_size, FramebufferFormatID p_format_check = INVALID_ID); + virtual RID framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples = TEXTURE_SAMPLES_1, FramebufferFormatID p_format_check = INVALID_ID); virtual FramebufferFormatID framebuffer_get_format(RID p_framebuffer); @@ -955,7 +947,7 @@ public: /**** VERTEX ARRAY ****/ /**********************/ - virtual RID vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data = Vector<uint8_t>()); + virtual RID vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data = Vector<uint8_t>(), bool p_use_as_storage = false); // Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated virtual VertexFormatID vertex_format_create(const Vector<VertexAttribute> &p_vertex_formats); @@ -983,7 +975,8 @@ public: virtual RID uniform_set_create(const Vector<Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set); virtual bool uniform_set_is_valid(RID p_uniform_set); - virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, bool p_sync_with_draw = false); //works for any buffer + virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); //works for any buffer + virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier = BARRIER_MASK_ALL); virtual Vector<uint8_t> buffer_get_data(RID p_buffer); /*************************/ @@ -1029,7 +1022,7 @@ public: virtual void draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect); virtual void draw_list_disable_scissor(DrawListID p_list); - virtual void draw_list_end(); + virtual void draw_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL); /***********************/ /**** COMPUTE LISTS ****/ @@ -1043,8 +1036,9 @@ public: virtual void compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups); virtual void compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset); - virtual void compute_list_end(); + virtual void compute_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual void barrier(uint32_t p_from = BARRIER_MASK_ALL, uint32_t p_to = BARRIER_MASK_ALL); virtual void full_barrier(); /**************/ @@ -1057,7 +1051,7 @@ public: /**** Timing ****/ /****************/ - virtual void capture_timestamp(const String &p_name, bool p_sync_to_draw); + virtual void capture_timestamp(const String &p_name); virtual uint32_t get_captured_timestamps_count() const; virtual uint64_t get_captured_timestamps_frame() const; virtual uint64_t get_captured_timestamp_gpu_time(uint32_t p_index) const; @@ -1085,6 +1079,12 @@ public: virtual uint64_t get_memory_usage() const; + virtual void set_resource_name(RID p_id, const String p_name); + + virtual void draw_command_begin_label(String p_label_name, const Color p_color = Color(1, 1, 1, 1)); + virtual void draw_command_insert_label(String p_label_name, const Color p_color = Color(1, 1, 1, 1)); + virtual void draw_command_end_label(); + RenderingDeviceVulkan(); ~RenderingDeviceVulkan(); }; diff --git a/drivers/vulkan/vulkan_context.cpp b/drivers/vulkan/vulkan_context.cpp index 8840391966..1a631f4a2c 100644 --- a/drivers/vulkan/vulkan_context.cpp +++ b/drivers/vulkan/vulkan_context.cpp @@ -5,8 +5,8 @@ /* GODOT ENGINE */ /* https://godotengine.org */ /*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */ /* */ /* Permission is hereby granted, free of charge, to any person obtaining */ /* a copy of this software and associated documentation files (the */ @@ -30,9 +30,9 @@ #include "vulkan_context.h" -#include "core/engine.h" -#include "core/project_settings.h" -#include "core/ustring.h" +#include "core/config/engine.h" +#include "core/config/project_settings.h" +#include "core/string/ustring.h" #include "core/version.h" #include "vk_enum_string_helper.h" @@ -55,10 +55,29 @@ VKAPI_ATTR VkBool32 VKAPI_CALL VulkanContext::_debug_messenger_callback( return VK_FALSE; } // This needs to be ignored because Validator is wrong here. + if (strstr(pCallbackData->pMessage, "Invalid SPIR-V binary version 1.3") != nullptr) { + return VK_FALSE; + } + // This needs to be ignored because Validator is wrong here. + if (strstr(pCallbackData->pMessage, "Shader requires flag") != nullptr) { + return VK_FALSE; + } + + // This needs to be ignored because Validator is wrong here. if (strstr(pCallbackData->pMessage, "SPIR-V module not valid: Pointer operand") != nullptr && strstr(pCallbackData->pMessage, "must be a memory object") != nullptr) { return VK_FALSE; } + /* + // This is a valid warning because its illegal in Vulkan, but in practice it should work according to VK_KHR_maintenance2 + if (strstr(pCallbackData->pMessage, "VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes VK_IMAGE_USAGE_STORAGE_BIT") != nullptr) { + return VK_FALSE; + } + + if (strstr(pCallbackData->pMessage, "VK_FORMAT_R4G4B4A4_UNORM_PACK16 with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes VK_IMAGE_USAGE_STORAGE_BIT") != nullptr) { + return VK_FALSE; + } +*/ // Workaround for Vulkan-Loader usability bug: https://github.com/KhronosGroup/Vulkan-Loader/issues/262. if (strstr(pCallbackData->pMessage, "wrong ELF class: ELFCLASS32") != nullptr) { return VK_FALSE; @@ -216,23 +235,23 @@ Error VulkanContext::_create_validation_layers() { } Error VulkanContext::_initialize_extensions() { - VkResult err; uint32_t instance_extension_count = 0; enabled_extension_count = 0; enabled_layer_count = 0; + enabled_debug_utils = false; /* Look for instance extensions */ VkBool32 surfaceExtFound = 0; VkBool32 platformSurfaceExtFound = 0; memset(extension_names, 0, sizeof(extension_names)); - err = vkEnumerateInstanceExtensionProperties(nullptr, &instance_extension_count, nullptr); - ERR_FAIL_COND_V(err, ERR_CANT_CREATE); + VkResult err = vkEnumerateInstanceExtensionProperties(nullptr, &instance_extension_count, nullptr); + ERR_FAIL_COND_V(err != VK_SUCCESS && err != VK_INCOMPLETE, ERR_CANT_CREATE); if (instance_extension_count > 0) { VkExtensionProperties *instance_extensions = (VkExtensionProperties *)malloc(sizeof(VkExtensionProperties) * instance_extension_count); err = vkEnumerateInstanceExtensionProperties(nullptr, &instance_extension_count, instance_extensions); - if (err) { + if (err != VK_SUCCESS && err != VK_INCOMPLETE) { free(instance_extensions); ERR_FAIL_V(ERR_CANT_CREATE); } @@ -252,9 +271,8 @@ Error VulkanContext::_initialize_extensions() { } } if (!strcmp(VK_EXT_DEBUG_UTILS_EXTENSION_NAME, instance_extensions[i].extensionName)) { - if (use_validation_layers) { - extension_names[enabled_extension_count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; - } + extension_names[enabled_extension_count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; + enabled_debug_utils = true; } if (enabled_extension_count >= MAX_EXTENSIONS) { free(instance_extensions); @@ -302,7 +320,7 @@ Error VulkanContext::_create_physical_device() { /*flags*/ 0, /*pApplicationInfo*/ &app, /*enabledLayerCount*/ enabled_layer_count, - /*ppEnabledLayerNames*/ (const char *const *)instance_validation_layers, + /*ppEnabledLayerNames*/ (const char *const *)enabled_layers, /*enabledExtensionCount*/ enabled_extension_count, /*ppEnabledExtensionNames*/ (const char *const *)extension_names, }; @@ -437,7 +455,7 @@ Error VulkanContext::_create_physical_device() { " extension.\n\nDo you have a compatible Vulkan installable client driver (ICD) installed?\n" "vkCreateInstance Failure"); - if (use_validation_layers) { + if (enabled_debug_utils) { // Setup VK_EXT_debug_utils function pointers always (we use them for // debug labels and names). CreateDebugUtilsMessengerEXT = @@ -494,6 +512,8 @@ Error VulkanContext::_create_physical_device() { // features based on this query vkGetPhysicalDeviceFeatures(gpu, &physical_device_features); + physical_device_features.robustBufferAccess = false; //turn off robust buffer access, which can hamper performance on some hardware + #define GET_INSTANCE_PROC_ADDR(inst, entrypoint) \ { \ fp##entrypoint = (PFN_vk##entrypoint)vkGetInstanceProcAddr(inst, "vk" #entrypoint); \ @@ -707,7 +727,8 @@ Error VulkanContext::_window_create(DisplayServer::WindowID p_window_id, VkSurfa // We use a single GPU, but we need a surface to initialize the // queues, so this process must be deferred until a surface // is created. - _initialize_queues(p_surface); + Error err = _initialize_queues(p_surface); + ERR_FAIL_COND_V(err != OK, ERR_CANT_CREATE); } Window window; @@ -1009,7 +1030,6 @@ Error VulkanContext::_update_swap_chain(Window *window) { { const VkAttachmentDescription attachment = { - /*flags*/ 0, /*format*/ format, /*samples*/ VK_SAMPLE_COUNT_1_BIT, @@ -1566,6 +1586,57 @@ void VulkanContext::local_device_free(RID p_local_device) { local_device_owner.free(p_local_device); } +void VulkanContext::command_begin_label(VkCommandBuffer p_command_buffer, String p_label_name, const Color p_color) { + if (!enabled_debug_utils) { + return; + } + VkDebugUtilsLabelEXT label; + label.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT; + label.pNext = nullptr; + label.pLabelName = p_label_name.utf8().get_data(); + label.color[0] = p_color[0]; + label.color[1] = p_color[1]; + label.color[2] = p_color[2]; + label.color[3] = p_color[3]; + CmdBeginDebugUtilsLabelEXT(p_command_buffer, &label); +} + +void VulkanContext::command_insert_label(VkCommandBuffer p_command_buffer, String p_label_name, const Color p_color) { + if (!enabled_debug_utils) { + return; + } + VkDebugUtilsLabelEXT label; + label.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT; + label.pNext = nullptr; + label.pLabelName = p_label_name.utf8().get_data(); + label.color[0] = p_color[0]; + label.color[1] = p_color[1]; + label.color[2] = p_color[2]; + label.color[3] = p_color[3]; + CmdInsertDebugUtilsLabelEXT(p_command_buffer, &label); +} + +void VulkanContext::command_end_label(VkCommandBuffer p_command_buffer) { + if (!enabled_debug_utils) { + return; + } + CmdEndDebugUtilsLabelEXT(p_command_buffer); +} + +void VulkanContext::set_object_name(VkObjectType p_object_type, uint64_t p_object_handle, String p_object_name) { + if (!enabled_debug_utils) { + return; + } + VkDebugUtilsObjectNameInfoEXT name_info; + name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + name_info.pNext = nullptr; + name_info.objectType = p_object_type; + name_info.objectHandle = p_object_handle; + CharString object_name = p_object_name.utf8(); + name_info.pObjectName = object_name.get_data(); + SetDebugUtilsObjectNameEXT(device, &name_info); +} + VulkanContext::VulkanContext() { use_validation_layers = Engine::get_singleton()->is_validation_layers_enabled(); diff --git a/drivers/vulkan/vulkan_context.h b/drivers/vulkan/vulkan_context.h index 59e404512a..5cb762aca8 100644 --- a/drivers/vulkan/vulkan_context.h +++ b/drivers/vulkan/vulkan_context.h @@ -5,8 +5,8 @@ /* GODOT ENGINE */ /* https://godotengine.org */ /*************************************************************************/ -/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */ /* */ /* Permission is hereby granted, free of charge, to any person obtaining */ /* a copy of this software and associated documentation files (the */ @@ -31,11 +31,11 @@ #ifndef VULKAN_CONTEXT_H #define VULKAN_CONTEXT_H -#include "core/error_list.h" -#include "core/map.h" +#include "core/error/error_list.h" #include "core/os/mutex.h" -#include "core/rid_owner.h" -#include "core/ustring.h" +#include "core/string/ustring.h" +#include "core/templates/map.h" +#include "core/templates/rid_owner.h" #include "servers/display_server.h" #include <vulkan/vulkan.h> @@ -47,13 +47,13 @@ class VulkanContext { FRAME_LAG = 2 }; - VkInstance inst; - VkSurfaceKHR surface; - VkPhysicalDevice gpu; + VkInstance inst = VK_NULL_HANDLE; + VkSurfaceKHR surface = VK_NULL_HANDLE; + VkPhysicalDevice gpu = VK_NULL_HANDLE; VkPhysicalDeviceProperties gpu_props; - uint32_t queue_family_count; + uint32_t queue_family_count = 0; VkQueueFamilyProperties *queue_props = nullptr; - VkDevice device; + VkDevice device = VK_NULL_HANDLE; bool device_initialized = false; bool inst_initialized = false; @@ -61,17 +61,17 @@ class VulkanContext { // Present queue. bool queues_initialized = false; - uint32_t graphics_queue_family_index; - uint32_t present_queue_family_index; - bool separate_present_queue; - VkQueue graphics_queue; - VkQueue present_queue; + uint32_t graphics_queue_family_index = 0; + uint32_t present_queue_family_index = 0; + bool separate_present_queue = false; + VkQueue graphics_queue = VK_NULL_HANDLE; + VkQueue present_queue = VK_NULL_HANDLE; VkColorSpaceKHR color_space; VkFormat format; VkSemaphore image_acquired_semaphores[FRAME_LAG]; VkSemaphore draw_complete_semaphores[FRAME_LAG]; VkSemaphore image_ownership_semaphores[FRAME_LAG]; - int frame_index; + int frame_index = 0; VkFence fences[FRAME_LAG]; VkPhysicalDeviceMemoryProperties memory_properties; VkPhysicalDeviceFeatures physical_device_features; @@ -91,14 +91,14 @@ class VulkanContext { uint32_t current_buffer = 0; int width = 0; int height = 0; - VkCommandPool present_cmd_pool; // For separate present queue. + VkCommandPool present_cmd_pool = VK_NULL_HANDLE; // For separate present queue. VkRenderPass render_pass = VK_NULL_HANDLE; }; struct LocalDevice { bool waiting = false; - VkDevice device; - VkQueue queue; + VkDevice device = VK_NULL_HANDLE; + VkQueue queue = VK_NULL_HANDLE; }; RID_Owner<LocalDevice, true> local_device_owner; @@ -108,7 +108,7 @@ class VulkanContext { // Commands. - bool prepared; + bool prepared = false; Vector<VkCommandBuffer> command_buffer_queue; int command_buffer_count = 1; @@ -119,8 +119,8 @@ class VulkanContext { bool VK_GOOGLE_display_timing_enabled = true; uint32_t enabled_extension_count = 0; const char *extension_names[MAX_EXTENSIONS]; + bool enabled_debug_utils = false; - const char **instance_validation_layers = nullptr; uint32_t enabled_layer_count = 0; const char *enabled_layers[MAX_LAYERS]; @@ -143,7 +143,7 @@ class VulkanContext { PFN_vkGetRefreshCycleDurationGOOGLE fpGetRefreshCycleDurationGOOGLE; PFN_vkGetPastPresentationTimingGOOGLE fpGetPastPresentationTimingGOOGLE; - VkDebugUtilsMessengerEXT dbg_messenger; + VkDebugUtilsMessengerEXT dbg_messenger = VK_NULL_HANDLE; Error _create_validation_layers(); Error _initialize_extensions(); @@ -210,6 +210,11 @@ public: Error swap_buffers(); Error initialize(); + void command_begin_label(VkCommandBuffer p_command_buffer, String p_label_name, const Color p_color); + void command_insert_label(VkCommandBuffer p_command_buffer, String p_label_name, const Color p_color); + void command_end_label(VkCommandBuffer p_command_buffer); + void set_object_name(VkObjectType p_object_type, uint64_t p_object_handle, String p_object_name); + VulkanContext(); virtual ~VulkanContext(); }; |