summaryrefslogtreecommitdiff
path: root/drivers/vulkan/rendering_device_vulkan.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vulkan/rendering_device_vulkan.cpp')
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp619
1 files changed, 490 insertions, 129 deletions
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
index a356586698..ef331ec4b6 100644
--- a/drivers/vulkan/rendering_device_vulkan.cpp
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -5,8 +5,8 @@
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
-/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
-/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
@@ -30,10 +30,10 @@
#include "rendering_device_vulkan.h"
-#include "core/hashfuncs.h"
+#include "core/config/project_settings.h"
#include "core/os/file_access.h"
#include "core/os/os.h"
-#include "core/project_settings.h"
+#include "core/templates/hashfuncs.h"
#include "drivers/vulkan/vulkan_context.h"
#include "thirdparty/spirv-reflect/spirv_reflect.h"
@@ -41,28 +41,60 @@
//#define FORCE_FULL_BARRIER
// Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values)
-RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &stage_mask, VkAccessFlags &access_mask) {
+RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, uint32_t p_post_barrier) {
Buffer *buffer = nullptr;
if (vertex_buffer_owner.owns(p_buffer)) {
- stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
- access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
buffer = vertex_buffer_owner.getornull(p_buffer);
+
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ }
} else if (index_buffer_owner.owns(p_buffer)) {
- stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
- access_mask |= VK_ACCESS_INDEX_READ_BIT;
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ r_access_mask |= VK_ACCESS_INDEX_READ_BIT;
buffer = index_buffer_owner.getornull(p_buffer);
} else if (uniform_buffer_owner.owns(p_buffer)) {
- stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- access_mask |= VK_ACCESS_UNIFORM_READ_BIT;
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT;
buffer = uniform_buffer_owner.getornull(p_buffer);
} else if (texture_buffer_owner.owns(p_buffer)) {
- stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- access_mask |= VK_ACCESS_SHADER_READ_BIT;
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
} else if (storage_buffer_owner.owns(p_buffer)) {
- stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
buffer = storage_buffer_owner.getornull(p_buffer);
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+
+ if (buffer->usage & VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT) {
+ r_stage_mask |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ r_access_mask |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
}
return buffer;
}
@@ -615,6 +647,7 @@ int RenderingDeviceVulkan::get_format_vertex_size(DataFormat p_format) {
case DATA_FORMAT_B8G8R8A8_SNORM:
case DATA_FORMAT_B8G8R8A8_UINT:
case DATA_FORMAT_B8G8R8A8_SINT:
+ case DATA_FORMAT_A2B10G10R10_UNORM_PACK32:
return 4;
case DATA_FORMAT_R16_UNORM:
case DATA_FORMAT_R16_SNORM:
@@ -1361,6 +1394,7 @@ Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size,
p_buffer->buffer_info.buffer = p_buffer->buffer;
p_buffer->buffer_info.offset = 0;
p_buffer->buffer_info.range = p_size;
+ p_buffer->usage = p_usage;
return OK;
}
@@ -1691,16 +1725,16 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
#endif
}
- if (p_format.type == TEXTURE_TYPE_CUBE || p_format.type == TEXTURE_TYPE_CUBE_ARRAY) {
+ if (p_format.texture_type == TEXTURE_TYPE_CUBE || p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY) {
image_create_info.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
}
/*if (p_format.type == TEXTURE_TYPE_2D || p_format.type == TEXTURE_TYPE_2D_ARRAY) {
image_create_info.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
}*/
- ERR_FAIL_INDEX_V(p_format.type, TEXTURE_TYPE_MAX, RID());
+ ERR_FAIL_INDEX_V(p_format.texture_type, TEXTURE_TYPE_MAX, RID());
- image_create_info.imageType = vulkan_image_type[p_format.type];
+ image_create_info.imageType = vulkan_image_type[p_format.texture_type];
ERR_FAIL_COND_V_MSG(p_format.width < 1, RID(), "Width must be equal or greater than 1 for all textures");
@@ -1725,10 +1759,10 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
image_create_info.mipLevels = p_format.mipmaps;
- if (p_format.type == TEXTURE_TYPE_1D_ARRAY || p_format.type == TEXTURE_TYPE_2D_ARRAY || p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) {
+ if (p_format.texture_type == TEXTURE_TYPE_1D_ARRAY || p_format.texture_type == TEXTURE_TYPE_2D_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE) {
ERR_FAIL_COND_V_MSG(p_format.array_layers < 1, RID(),
"Amount of layers must be equal or greater than 1 for arrays and cubemaps.");
- ERR_FAIL_COND_V_MSG((p_format.type == TEXTURE_TYPE_CUBE_ARRAY || p_format.type == TEXTURE_TYPE_CUBE) && (p_format.array_layers % 6) != 0, RID(),
+ ERR_FAIL_COND_V_MSG((p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || p_format.texture_type == TEXTURE_TYPE_CUBE) && (p_format.array_layers % 6) != 0, RID(),
"Cubemap and cubemap array textures must provide a layer number that is multiple of 6");
image_create_info.arrayLayers = p_format.array_layers;
} else {
@@ -1858,7 +1892,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
VkResult err = vmaCreateImage(allocator, &image_create_info, &allocInfo, &texture.image, &texture.allocation, &texture.allocation_info);
ERR_FAIL_COND_V_MSG(err, RID(), "vmaCreateImage failed with error " + itos(err) + ".");
- texture.type = p_format.type;
+ texture.type = p_format.texture_type;
texture.format = p_format.format;
texture.width = image_create_info.extent.width;
texture.height = image_create_info.extent.height;
@@ -1926,7 +1960,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
};
- image_view_create_info.viewType = view_types[p_format.type];
+ image_view_create_info.viewType = view_types[p_format.texture_type];
if (p_view.format_override == DATA_FORMAT_MAX) {
image_view_create_info.format = image_create_info.format;
} else {
@@ -2065,6 +2099,48 @@ RID RenderingDeviceVulkan::texture_create_shared(const TextureView &p_view, RID
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
}
+ VkImageViewUsageCreateInfo usage_info;
+ usage_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
+ usage_info.pNext = nullptr;
+ if (p_view.format_override != DATA_FORMAT_MAX) {
+ //need to validate usage with vulkan
+
+ usage_info.usage = 0;
+
+ if (texture.usage_flags & TEXTURE_USAGE_SAMPLING_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ }
+
+ if (texture.usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
+ if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_STORAGE_BIT)) {
+ usage_info.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
+ }
+ }
+
+ if (texture.usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ if (texture_is_format_supported_for_usage(p_view.format_override, TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
+ usage_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+ }
+
+ if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ if (texture.usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+ if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ }
+
+ if (texture.usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT) {
+ usage_info.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+
+ image_view_create_info.pNext = &usage_info;
+ }
+
VkResult err = vkCreateImageView(device, &image_view_create_info, nullptr, &texture.view);
ERR_FAIL_COND_V_MSG(err, RID(), "vkCreateImageView failed with error " + itos(err) + ".");
@@ -2093,15 +2169,26 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p
ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_3D && src_texture->type != TEXTURE_TYPE_3D, RID(),
"Can only create a 3D slice from a 3D texture");
+ ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_2D_ARRAY && (src_texture->type != TEXTURE_TYPE_2D_ARRAY), RID(),
+ "Can only create an array slice from a 2D array mipmap");
+
//create view
ERR_FAIL_UNSIGNED_INDEX_V(p_mipmap, src_texture->mipmaps, RID());
ERR_FAIL_UNSIGNED_INDEX_V(p_layer, src_texture->layers, RID());
+ int slice_layers = 1;
+ if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) {
+ ERR_FAIL_COND_V_MSG(p_layer != 0, RID(), "layer must be 0 when obtaining a 2D array mipmap slice");
+ slice_layers = src_texture->layers;
+ } else if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
+ slice_layers = 6;
+ }
+
Texture texture = *src_texture;
get_image_format_required_size(texture.format, texture.width, texture.height, texture.depth, p_mipmap + 1, &texture.width, &texture.height);
texture.mipmaps = 1;
- texture.layers = p_slice_type == TEXTURE_SLICE_CUBEMAP ? 6 : 1;
+ texture.layers = slice_layers;
texture.base_mipmap = p_mipmap;
texture.base_layer = p_layer;
@@ -2121,7 +2208,16 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p
VK_IMAGE_VIEW_TYPE_2D,
};
- image_view_create_info.viewType = p_slice_type == TEXTURE_SLICE_CUBEMAP ? VK_IMAGE_VIEW_TYPE_CUBE : (p_slice_type == TEXTURE_SLICE_3D ? VK_IMAGE_VIEW_TYPE_3D : view_types[texture.type]);
+ image_view_create_info.viewType = view_types[texture.type];
+
+ if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
+ image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
+ } else if (p_slice_type == TEXTURE_SLICE_3D) {
+ image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_3D;
+ } else if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) {
+ image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ }
+
if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
image_view_create_info.format = vulkan_formats[texture.format];
} else {
@@ -2155,7 +2251,7 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p
}
image_view_create_info.subresourceRange.baseMipLevel = p_mipmap;
image_view_create_info.subresourceRange.levelCount = 1;
- image_view_create_info.subresourceRange.layerCount = p_slice_type == TEXTURE_SLICE_CUBEMAP ? 6 : 1;
+ image_view_create_info.subresourceRange.layerCount = slice_layers;
image_view_create_info.subresourceRange.baseArrayLayer = p_layer;
if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
@@ -2174,11 +2270,11 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p
return id;
}
-Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, bool p_sync_with_draw) {
+Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier) {
_THREAD_SAFE_METHOD_
- ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER,
- "Updating textures in 'sync to draw' mode is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(draw_list || compute_list, ERR_INVALID_PARAMETER,
+ "Updating textures in is forbidden during creation of a draw or compute list");
Texture *texture = texture_owner.getornull(p_texture);
ERR_FAIL_COND_V(!texture, ERR_INVALID_PARAMETER);
@@ -2219,7 +2315,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
const uint8_t *r = p_data.ptr();
- VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+ VkCommandBuffer command_buffer = p_post_barrier ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
//barrier to transfer
{
@@ -2244,6 +2340,10 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
}
uint32_t mipmap_offset = 0;
+
+ uint32_t logic_width = texture->width;
+ uint32_t logic_height = texture->height;
+
for (uint32_t mm_i = 0; mm_i < texture->mipmaps; mm_i++) {
uint32_t depth;
uint32_t image_total = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, mm_i + 1, &width, &height, &depth);
@@ -2260,12 +2360,15 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
uint32_t region_w = MIN(region_size, width - x);
uint32_t region_h = MIN(region_size, height - y);
+ uint32_t region_logic_w = MIN(region_size, logic_width - x);
+ uint32_t region_logic_h = MIN(region_size, logic_height - y);
+
uint32_t pixel_size = get_image_format_pixel_size(texture->format);
uint32_t to_allocate = region_w * region_h * pixel_size;
to_allocate >>= get_compressed_image_format_pixel_rshift(texture->format);
uint32_t alloc_offset, alloc_size;
- Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, false, p_sync_with_draw);
+ Error err = _staging_buffer_allocate(to_allocate, required_align, alloc_offset, alloc_size, false, p_post_barrier);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
uint8_t *write_ptr;
@@ -2341,8 +2444,8 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
buffer_image_copy.imageOffset.y = y;
buffer_image_copy.imageOffset.z = z;
- buffer_image_copy.imageExtent.width = region_w;
- buffer_image_copy.imageExtent.height = region_h;
+ buffer_image_copy.imageExtent.width = region_logic_w;
+ buffer_image_copy.imageExtent.height = region_logic_h;
buffer_image_copy.imageExtent.depth = 1;
vkCmdCopyBufferToImage(command_buffer, staging_buffer_blocks[staging_buffer_current].buffer, texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &buffer_image_copy);
@@ -2353,15 +2456,32 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
}
mipmap_offset = image_total;
+ logic_width = MAX(1, logic_width >> 1);
+ logic_height = MAX(1, logic_height >> 1);
}
//barrier to restore layout
{
+ uint32_t barrier_flags = 0;
+ uint32_t access_flags = 0;
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
image_memory_barrier.newLayout = texture->layout;
image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -2373,7 +2493,7 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
image_memory_barrier.subresourceRange.baseArrayLayer = p_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
return OK;
@@ -2586,13 +2706,13 @@ bool RenderingDeviceVulkan::texture_is_valid(RID p_texture) {
return texture_owner.owns(p_texture);
}
-Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, bool p_sync_with_draw) {
+Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.getornull(p_from_texture);
ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER);
- ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER,
+ ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
"Source texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
"Source texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved.");
@@ -2613,7 +2733,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
Texture *dst_tex = texture_owner.getornull(p_to_texture);
ERR_FAIL_COND_V(!dst_tex, ERR_INVALID_PARAMETER);
- ERR_FAIL_COND_V_MSG(p_sync_with_draw && dst_tex->bound, ERR_INVALID_PARAMETER,
+ ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
"Destination texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
"Destination texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be retrieved.");
@@ -2634,7 +2754,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
ERR_FAIL_COND_V_MSG(src_tex->read_aspect_mask != dst_tex->read_aspect_mask, ERR_INVALID_PARAMETER,
"Source and destination texture must be of the same type (color or depth).");
- VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+ VkCommandBuffer command_buffer = frames[frame].draw_command_buffer;
{
//PRE Copy the image
@@ -2709,12 +2829,27 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
// RESTORE LAYOUT for SRC and DST
+ uint32_t barrier_flags = 0;
+ uint32_t access_flags = 0;
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
{ //restore src
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
image_memory_barrier.newLayout = src_tex->layout;
image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -2726,7 +2861,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
{ //make dst readable
@@ -2735,7 +2870,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
image_memory_barrier.newLayout = dst_tex->layout;
@@ -2748,20 +2883,20 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
image_memory_barrier.subresourceRange.baseArrayLayer = p_src_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
}
return OK;
}
-Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, bool p_sync_with_draw) {
+Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.getornull(p_from_texture);
ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER);
- ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER,
+ ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
"Source texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
"Source texture requires the TEXTURE_USAGE_CAN_COPY_FROM_BIT in order to be retrieved.");
@@ -2772,7 +2907,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
Texture *dst_tex = texture_owner.getornull(p_to_texture);
ERR_FAIL_COND_V(!dst_tex, ERR_INVALID_PARAMETER);
- ERR_FAIL_COND_V_MSG(p_sync_with_draw && dst_tex->bound, ERR_INVALID_PARAMETER,
+ ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
"Destination texture can't be copied while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
"Destination texture requires the TEXTURE_USAGE_CAN_COPY_TO_BIT in order to be retrieved.");
@@ -2786,7 +2921,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
ERR_FAIL_COND_V_MSG(src_tex->read_aspect_mask != dst_tex->read_aspect_mask, ERR_INVALID_PARAMETER,
"Source and destination texture must be of the same type (color or depth).");
- VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+ VkCommandBuffer command_buffer = frames[frame].draw_command_buffer;
{
//PRE Copy the image
@@ -2861,12 +2996,27 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
// RESTORE LAYOUT for SRC and DST
+ uint32_t barrier_flags = 0;
+ uint32_t access_flags = 0;
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
{ //restore src
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
image_memory_barrier.newLayout = src_tex->layout;
image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@@ -2878,7 +3028,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
{ //make dst readable
@@ -2887,7 +3037,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
image_memory_barrier.newLayout = dst_tex->layout;
@@ -2900,20 +3050,20 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
image_memory_barrier.subresourceRange.baseArrayLayer = dst_tex->base_layer;
image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
}
return OK;
}
-Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, bool p_sync_with_draw) {
+Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.getornull(p_texture);
ERR_FAIL_COND_V(!src_tex, ERR_INVALID_PARAMETER);
- ERR_FAIL_COND_V_MSG(p_sync_with_draw && src_tex->bound, ERR_INVALID_PARAMETER,
+ ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
"Source texture can't be cleared while a render pass that uses it is being created. Ensure render pass is finalized (and that it was created with RENDER_PASS_CONTENTS_FINISH) to unbind this texture.");
ERR_FAIL_COND_V(p_layers == 0, ERR_INVALID_PARAMETER);
@@ -2930,7 +3080,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
ERR_FAIL_COND_V(p_base_mipmap + p_mipmaps > src_tex->mipmaps, ERR_INVALID_PARAMETER);
ERR_FAIL_COND_V(p_base_layer + p_layers > src_layer_count, ERR_INVALID_PARAMETER);
- VkCommandBuffer command_buffer = p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer;
+ VkCommandBuffer command_buffer = frames[frame].draw_command_buffer;
VkImageLayout clear_layout = (src_tex->layout == VK_IMAGE_LAYOUT_GENERAL) ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
@@ -2977,11 +3127,27 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
vkCmdClearColorImage(command_buffer, src_tex->image, clear_layout, &clear_color, 1, &range);
{ // Barrier to post clear accesses (changing back the layout if needed)
+
+ uint32_t barrier_flags = 0;
+ uint32_t access_flags = 0;
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = valid_texture_access;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = clear_layout;
image_memory_barrier.newLayout = src_tex->layout;
@@ -2994,7 +3160,7 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
image_memory_barrier.subresourceRange.baseArrayLayer = src_tex->base_layer + p_base_layer;
image_memory_barrier.subresourceRange.layerCount = p_layers;
- vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, valid_texture_stages, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
}
return OK;
@@ -3082,6 +3248,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
// the read. If this is a performance issue, one could track the actual last accessor of each resource, adding only that
// stage
switch (is_depth_stencil ? p_initial_depth_action : p_initial_color_action) {
+ case INITIAL_ACTION_CLEAR_REGION:
case INITIAL_ACTION_CLEAR: {
description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
@@ -3094,9 +3261,9 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
description.initialLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
} else if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
- description.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
- description.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //don't care what is there
- description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ description.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ description.initialLayout = is_sampled ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : (is_storage ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
dependency_from_external.srcStageMask |= reading_stages;
} else {
description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
@@ -3301,11 +3468,8 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c
return id;
}
-RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_create_empty(const Size2i &p_size) {
- ERR_FAIL_COND_V(p_size.width <= 0 || p_size.height <= 0, INVALID_FORMAT_ID);
-
+RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_create_empty(TextureSamples p_samples) {
FramebufferFormatKey key;
- key.empty_size = p_size;
const Map<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key);
if (E) {
@@ -3353,7 +3517,7 @@ RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::framebuffer_format_c
fb_format.E = E;
fb_format.color_attachments = 0;
fb_format.render_pass = render_pass;
- fb_format.samples = TEXTURE_SAMPLES_1;
+ fb_format.samples = p_samples;
framebuffer_formats[id] = fb_format;
return id;
}
@@ -3369,10 +3533,10 @@ RenderingDevice::TextureSamples RenderingDeviceVulkan::framebuffer_format_get_te
/**** RENDER TARGET ****/
/***********************/
-RID RenderingDeviceVulkan::framebuffer_create_empty(const Size2i &p_size, FramebufferFormatID p_format_check) {
+RID RenderingDeviceVulkan::framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples, FramebufferFormatID p_format_check) {
_THREAD_SAFE_METHOD_
Framebuffer framebuffer;
- framebuffer.format_id = framebuffer_format_create_empty(p_size);
+ framebuffer.format_id = framebuffer_format_create_empty(p_samples);
ERR_FAIL_COND_V(p_format_check != INVALID_FORMAT_ID && framebuffer.format_id != p_format_check, RID());
framebuffer.size = p_size;
@@ -3484,7 +3648,7 @@ RID RenderingDeviceVulkan::sampler_create(const SamplerState &p_state) {
/**** VERTEX ARRAY ****/
/**********************/
-RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data) {
+RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, bool p_use_as_storage) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
@@ -3493,8 +3657,12 @@ RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vec
ERR_FAIL_COND_V_MSG(compute_list != nullptr && p_data.size(), RID(),
"Creating buffers with data is forbidden during creation of a draw list");
+ uint32_t usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ if (p_use_as_storage) {
+ usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+ }
Buffer buffer;
- _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
+ _buffer_allocate(&buffer, p_size_bytes, usage, VMA_MEMORY_USAGE_GPU_ONLY);
if (p_data.size()) {
uint64_t data_size = p_data.size();
const uint8_t *r = p_data.ptr();
@@ -3528,7 +3696,7 @@ RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(cons
ERR_FAIL_COND_V(used_locations.has(p_vertex_formats[i].location), INVALID_ID);
ERR_FAIL_COND_V_MSG(get_format_vertex_size(p_vertex_formats[i].format) == 0, INVALID_ID,
- "Data format for attachment (" + itos(i) + ") is not valid for a vertex array.");
+ "Data format for attachment (" + itos(i) + "), '" + named_formats[p_vertex_formats[i].format] + "', is not valid for a vertex array.");
vdcache.bindings[i].binding = i;
vdcache.bindings[i].stride = p_vertex_formats[i].stride;
@@ -3733,13 +3901,11 @@ String RenderingDeviceVulkan::_shader_uniform_debug(RID p_shader, int p_set) {
}
#if 0
bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLayoutBinding> > &bindings, Vector<Vector<UniformInfo> > &uniform_infos, const glslang::TObjectReflection &reflection, RenderingDevice::ShaderStage p_stage, Shader::PushConstant &push_constant, String *r_error) {
-
VkDescriptorSetLayoutBinding layout_binding;
UniformInfo info;
switch (reflection.getType()->getBasicType()) {
case glslang::EbtSampler: {
-
//print_line("DEBUG: IsSampler");
if (reflection.getType()->getSampler().dim == glslang::EsdBuffer) {
//texture buffers
@@ -3837,13 +4003,10 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa
} break;
/*case glslang::EbtReference: {
-
} break;*/
/*case glslang::EbtAtomicUint: {
-
} break;*/
default: {
-
if (reflection.getType()->getQualifier().hasOffset() || reflection.name.find(".") != std::string::npos) {
//member of uniform block?
return true;
@@ -4040,6 +4203,10 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
info.type = UNIFORM_TYPE_INPUT_ATTACHMENT;
} break;
+ case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
+ ERR_PRINT("Acceleration structure not supported.");
+ continue;
+ } break;
}
if (need_array_dimensions) {
@@ -4565,7 +4732,7 @@ void RenderingDeviceVulkan::_descriptor_pool_free(const DescriptorPoolKey &p_key
vkDestroyDescriptorPool(device, p_pool->pool, nullptr);
descriptor_pools[p_key].erase(p_pool);
memdelete(p_pool);
- if (descriptor_pools[p_key].empty()) {
+ if (descriptor_pools[p_key].is_empty()) {
descriptor_pools.erase(p_key);
}
}
@@ -4599,7 +4766,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
List<Vector<VkBufferView>> buffer_views;
List<Vector<VkDescriptorImageInfo>> image_infos;
//used for verification to make sure a uniform set does not use a framebuffer bound texture
- Vector<RID> attachable_textures;
+ LocalVector<UniformSet::AttachableTexture> attachable_textures;
Vector<Texture *> mutable_sampled_textures;
Vector<Texture *> mutable_storage_textures;
@@ -4616,8 +4783,8 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
const Uniform &uniform = uniforms[uniform_idx];
- ERR_FAIL_COND_V_MSG(uniform.type != set_uniform.type, RID(),
- "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.type] + "'.");
+ ERR_FAIL_COND_V_MSG(uniform.uniform_type != set_uniform.type, RID(),
+ "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + shader_uniform_names[set_uniform.type] + "', supplied: '" + shader_uniform_names[uniform.uniform_type] + "'.");
VkWriteDescriptorSet write; //common header
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
@@ -4632,7 +4799,7 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
write.pTexelBufferView = nullptr;
uint32_t type_size = 1;
- switch (uniform.type) {
+ switch (uniform.uniform_type) {
case UNIFORM_TYPE_SAMPLER: {
if (uniform.ids.size() != set_uniform.length) {
if (set_uniform.length > 1) {
@@ -4692,7 +4859,10 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
img_info.imageView = texture->view;
if (texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT)) {
- attachable_textures.push_back(texture->owner.is_valid() ? texture->owner : uniform.ids[j + 1]);
+ UniformSet::AttachableTexture attachable_texture;
+ attachable_texture.bind = set_uniform.binding;
+ attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.ids[j + 1];
+ attachable_textures.push_back(attachable_texture);
}
if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
@@ -4742,7 +4912,10 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
img_info.imageView = texture->view;
if (texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_RESOLVE_ATTACHMENT_BIT)) {
- attachable_textures.push_back(texture->owner.is_valid() ? texture->owner : uniform.ids[j]);
+ UniformSet::AttachableTexture attachable_texture;
+ attachable_texture.bind = set_uniform.binding;
+ attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.ids[j];
+ attachable_textures.push_back(attachable_texture);
}
if (texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
@@ -4915,7 +5088,15 @@ RID RenderingDeviceVulkan::uniform_set_create(const Vector<Uniform> &p_uniforms,
ERR_FAIL_COND_V_MSG(uniform.ids.size() != 1, RID(),
"Storage buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.ids.size()) + " provided).");
- Buffer *buffer = storage_buffer_owner.getornull(uniform.ids[0]);
+ Buffer *buffer = nullptr;
+
+ if (storage_buffer_owner.owns(uniform.ids[0])) {
+ buffer = storage_buffer_owner.getornull(uniform.ids[0]);
+ } else if (vertex_buffer_owner.owns(uniform.ids[0])) {
+ buffer = vertex_buffer_owner.getornull(uniform.ids[0]);
+
+ ERR_FAIL_COND_V_MSG(!(buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), RID(), "Vertex buffer supplied (binding: " + itos(uniform.binding) + ") was not created with storage flag.");
+ }
ERR_FAIL_COND_V_MSG(!buffer, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
//if 0, then its sized on link time
@@ -5001,19 +5182,22 @@ bool RenderingDeviceVulkan::uniform_set_is_valid(RID p_uniform_set) {
return uniform_set_owner.owns(p_uniform_set);
}
-Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, bool p_sync_with_draw) {
+Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier) {
_THREAD_SAFE_METHOD_
- ERR_FAIL_COND_V_MSG(draw_list && p_sync_with_draw, ERR_INVALID_PARAMETER,
- "Updating buffers in 'sync to draw' mode is forbidden during creation of a draw list");
- ERR_FAIL_COND_V_MSG(compute_list && p_sync_with_draw, ERR_INVALID_PARAMETER,
- "Updating buffers in 'sync to draw' mode is forbidden during creation of a compute list");
-
- // Protect subsequent updates...
- VkPipelineStageFlags dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
- VkAccessFlags dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
+ ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
+ "Updating buffers is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
+ "Updating buffers is forbidden during creation of a compute list");
- Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access);
+ VkPipelineStageFlags dst_stage_mask = 0;
+ VkAccessFlags dst_access = 0;
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ // Protect subsequent updates...
+ dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+ Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access, p_post_barrier);
if (!buffer) {
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
}
@@ -5021,20 +5205,61 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
"Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
- _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, p_sync_with_draw);
- Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw);
+ // no barrier should be needed here
+ // _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, true);
+
+ Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_post_barrier);
if (err) {
return err;
}
#ifdef FORCE_FULL_BARRIER
- _full_barrier(p_sync_with_draw);
+ _full_barrier(true);
#else
- _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_sync_with_draw);
+ _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, true);
#endif
return err;
}
+Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier) {
+ _THREAD_SAFE_METHOD_
+
+ ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER,
+ "Size must be a multiple of four");
+ ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
+ "Updating buffers in is forbidden during creation of a draw list");
+ ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
+ "Updating buffers is forbidden during creation of a compute list");
+
+ VkPipelineStageFlags dst_stage_mask = 0;
+ VkAccessFlags dst_access = 0;
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ // Protect subsequent updates...
+ dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
+ Buffer *buffer = _get_buffer_from_owner(p_buffer, dst_stage_mask, dst_access, p_post_barrier);
+ if (!buffer) {
+ ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
+ }
+
+ ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
+ "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
+
+ // should not be needed
+ // _buffer_memory_barrier(buffer->buffer, p_offset, p_size, dst_stage_mask, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_access, VK_ACCESS_TRANSFER_WRITE_BIT, p_post_barrier);
+
+ vkCmdFillBuffer(frames[frame].draw_command_buffer, buffer->buffer, p_offset, p_size, 0);
+
+#ifdef FORCE_FULL_BARRIER
+ _full_barrier(true);
+#else
+ _buffer_memory_barrier(buffer->buffer, p_offset, p_size, VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_post_barrier);
+#endif
+ return OK;
+}
+
Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
_THREAD_SAFE_METHOD_
@@ -5042,7 +5267,7 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT;
// Get the vulkan buffer and the potential stage/access possible
- Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask);
+ Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL);
if (!buffer) {
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving.");
}
@@ -5603,7 +5828,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin_for_screen(Di
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
- return ID_TYPE_DRAW_LIST;
+ return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
}
Error RenderingDeviceVulkan::_draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass) {
@@ -5656,11 +5881,18 @@ Error RenderingDeviceVulkan::_draw_list_render_pass_begin(Framebuffer *framebuff
render_pass_begin.pNext = nullptr;
render_pass_begin.renderPass = render_pass;
render_pass_begin.framebuffer = vkframebuffer;
-
+ /*
+ * Given how API works, it makes sense to always fully operate on the whole framebuffer.
+ * This allows better continue operations for operations like shadowmapping.
render_pass_begin.renderArea.extent.width = viewport_size.width;
render_pass_begin.renderArea.extent.height = viewport_size.height;
render_pass_begin.renderArea.offset.x = viewport_offset.x;
render_pass_begin.renderArea.offset.y = viewport_offset.y;
+ */
+ render_pass_begin.renderArea.extent.width = framebuffer->size.width;
+ render_pass_begin.renderArea.extent.height = framebuffer->size.height;
+ render_pass_begin.renderArea.offset.x = 0;
+ render_pass_begin.renderArea.offset.y = 0;
Vector<VkClearValue> clear_values;
clear_values.resize(framebuffer->texture_ids.size());
@@ -5809,11 +6041,11 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebu
viewport_offset = regioni.position;
viewport_size = regioni.size;
- if (p_initial_color_action == INITIAL_ACTION_CLEAR) {
+ if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION) {
needs_clear_color = true;
p_initial_color_action = INITIAL_ACTION_KEEP;
}
- if (p_initial_depth_action == INITIAL_ACTION_CLEAR) {
+ if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) {
needs_clear_depth = true;
p_initial_depth_action = INITIAL_ACTION_KEEP;
}
@@ -5870,7 +6102,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebu
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
draw_list->viewport = Rect2i(viewport_offset, viewport_size);
- return ID_TYPE_DRAW_LIST;
+ return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
}
Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) {
@@ -5899,11 +6131,11 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p
viewport_offset = regioni.position;
viewport_size = regioni.size;
- if (p_initial_color_action == INITIAL_ACTION_CLEAR) {
+ if (p_initial_color_action == INITIAL_ACTION_CLEAR_REGION) {
needs_clear_color = true;
p_initial_color_action = INITIAL_ACTION_KEEP;
}
- if (p_initial_depth_action == INITIAL_ACTION_CLEAR) {
+ if (p_initial_depth_action == INITIAL_ACTION_CLEAR_REGION) {
needs_clear_depth = true;
p_initial_depth_action = INITIAL_ACTION_KEEP;
}
@@ -5967,7 +6199,7 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p
for (uint32_t i = 0; i < p_splits; i++) {
//take a command buffer and initialize it
- VkCommandBuffer command_buffer = split_draw_list_allocators[p_splits].command_buffers[frame];
+ VkCommandBuffer command_buffer = split_draw_list_allocators[i].command_buffers[frame];
VkCommandBufferInheritanceInfo inheritance_info;
inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
@@ -6025,7 +6257,7 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p
scissor.extent.height = viewport_size.height;
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
- r_split_ids[i] = (DrawListID(1) << DrawListID(ID_TYPE_SPLIT_DRAW_LIST)) + i;
+ r_split_ids[i] = (int64_t(ID_TYPE_SPLIT_DRAW_LIST) << ID_BASE_SHIFT) + i;
draw_list[i].viewport = Rect2i(viewport_offset, viewport_size);
}
@@ -6040,7 +6272,7 @@ RenderingDeviceVulkan::DrawList *RenderingDeviceVulkan::_get_draw_list_ptr(DrawL
if (!draw_list) {
return nullptr;
- } else if (p_id == ID_TYPE_DRAW_LIST) {
+ } else if (p_id == (int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT)) {
if (draw_list_split) {
return nullptr;
}
@@ -6134,7 +6366,7 @@ void RenderingDeviceVulkan::draw_list_bind_render_pipeline(DrawListID p_list, RI
void RenderingDeviceVulkan::draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index) {
#ifdef DEBUG_ENABLED
- ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index > MAX_UNIFORM_SETS,
+ ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index >= MAX_UNIFORM_SETS,
"Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(limits.maxBoundDescriptorSets) + ").");
#endif
DrawList *dl = _get_draw_list_ptr(p_list);
@@ -6159,13 +6391,13 @@ void RenderingDeviceVulkan::draw_list_bind_uniform_set(DrawListID p_list, RID p_
#ifdef DEBUG_ENABLED
{ //validate that textures bound are not attached as framebuffer bindings
uint32_t attachable_count = uniform_set->attachable_textures.size();
- const RID *attachable_ptr = uniform_set->attachable_textures.ptr();
+ const UniformSet::AttachableTexture *attachable_ptr = uniform_set->attachable_textures.ptr();
uint32_t bound_count = draw_list_bound_textures.size();
const RID *bound_ptr = draw_list_bound_textures.ptr();
for (uint32_t i = 0; i < attachable_count; i++) {
for (uint32_t j = 0; j < bound_count; j++) {
- ERR_FAIL_COND_MSG(attachable_ptr[i] == bound_ptr[j],
- "Attempted to use the same texture in framebuffer attachment and a uniform set, this is not allowed.");
+ ERR_FAIL_COND_MSG(attachable_ptr[i].texture == bound_ptr[j],
+ "Attempted to use the same texture in framebuffer attachment and a uniform (set: " + itos(p_index) + ", binding: " + itos(attachable_ptr[i].bind) + "), this is not allowed.");
}
}
}
@@ -6369,7 +6601,7 @@ void RenderingDeviceVulkan::draw_list_enable_scissor(DrawListID p_list, const Re
Rect2i rect = p_rect;
rect.position += dl->viewport.position;
- rect = dl->viewport.clip(rect);
+ rect = dl->viewport.intersection(rect);
if (rect.get_area() == 0) {
return;
@@ -6398,7 +6630,7 @@ void RenderingDeviceVulkan::draw_list_disable_scissor(DrawListID p_list) {
vkCmdSetScissor(dl->command_buffer, 0, 1, &scissor);
}
-void RenderingDeviceVulkan::draw_list_end() {
+void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive.");
@@ -6407,8 +6639,8 @@ void RenderingDeviceVulkan::draw_list_end() {
//send all command buffers
VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * draw_list_count);
for (uint32_t i = 0; i < draw_list_count; i++) {
- vkEndCommandBuffer(draw_list->command_buffer);
- command_buffers[i] = draw_list->command_buffer;
+ vkEndCommandBuffer(draw_list[i].command_buffer);
+ command_buffers[i] = draw_list[i].command_buffer;
}
vkCmdExecuteCommands(frames[frame].draw_command_buffer, draw_list_count, command_buffers);
@@ -6434,6 +6666,21 @@ void RenderingDeviceVulkan::draw_list_end() {
}
}
+ uint32_t barrier_flags = 0;
+ uint32_t access_flags = 0;
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ }
+
draw_list_bound_textures.clear();
for (int i = 0; i < draw_list_storage_textures.size(); i++) {
@@ -6443,7 +6690,7 @@ void RenderingDeviceVulkan::draw_list_end() {
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = texture->layout;
image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -6456,7 +6703,7 @@ void RenderingDeviceVulkan::draw_list_end() {
image_memory_barrier.subresourceRange.baseArrayLayer = texture->base_layer;
image_memory_barrier.subresourceRange.layerCount = texture->layers;
- vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
texture->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
@@ -6470,7 +6717,7 @@ void RenderingDeviceVulkan::draw_list_end() {
#ifdef FORCE_FULL_BARRIER
_full_barrier(true);
#else
- _memory_barrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, true);
+ _memory_barrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, barrier_flags, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, access_flags, true);
#endif
}
@@ -6556,7 +6803,7 @@ void RenderingDeviceVulkan::compute_list_bind_uniform_set(ComputeListID p_list,
ComputeList *cl = compute_list;
#ifdef DEBUG_ENABLED
- ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index > MAX_UNIFORM_SETS,
+ ERR_FAIL_COND_MSG(p_index >= limits.maxBoundDescriptorSets || p_index >= MAX_UNIFORM_SETS,
"Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(limits.maxBoundDescriptorSets) + ").");
#endif
@@ -6793,14 +7040,30 @@ void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) {
#endif
}
-void RenderingDeviceVulkan::compute_list_end() {
+void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) {
ERR_FAIL_COND(!compute_list);
+
+ uint32_t barrier_flags = 0;
+ uint32_t access_flags = 0;
+ if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_RASTER) {
+ barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ }
+
for (Set<Texture *>::Element *E = compute_list->state.textures_to_sampled_layout.front(); E; E = E->next()) {
VkImageMemoryBarrier image_memory_barrier;
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = nullptr;
image_memory_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ image_memory_barrier.dstAccessMask = access_flags;
image_memory_barrier.oldLayout = E->get()->layout;
image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -6814,7 +7077,7 @@ void RenderingDeviceVulkan::compute_list_end() {
image_memory_barrier.subresourceRange.layerCount = E->get()->layers;
// TODO: Look at the usages in the compute list and determine tighter dst stage and access masks based on some "final" usage equivalent
- vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
+ vkCmdPipelineBarrier(compute_list->command_buffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, barrier_flags, 0, 0, nullptr, 0, nullptr, 1, &image_memory_barrier);
E->get()->layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
@@ -6824,10 +7087,44 @@ void RenderingDeviceVulkan::compute_list_end() {
#ifdef FORCE_FULL_BARRIER
_full_barrier(true);
#else
- _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true);
+ _memory_barrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, barrier_flags, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT, true);
#endif
}
+void RenderingDeviceVulkan::barrier(uint32_t p_from, uint32_t p_to) {
+ uint32_t src_barrier_flags = 0;
+ uint32_t src_access_flags = 0;
+ if (p_from & BARRIER_MASK_COMPUTE) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_from & BARRIER_MASK_RASTER) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ }
+ if (p_from & BARRIER_MASK_TRANSFER) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
+ uint32_t dst_barrier_flags = 0;
+ uint32_t dst_access_flags = 0;
+ if (p_to & BARRIER_MASK_COMPUTE) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (p_to & BARRIER_MASK_RASTER) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (p_to & BARRIER_MASK_TRANSFER) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ }
+
+ _memory_barrier(src_barrier_flags, dst_barrier_flags, src_access_flags, dst_access_flags, true);
+}
+
void RenderingDeviceVulkan::full_barrier() {
#ifndef DEBUG_ENABLED
ERR_PRINT("Full barrier is debug-only, should not be used in production");
@@ -6837,7 +7134,6 @@ void RenderingDeviceVulkan::full_barrier() {
#if 0
void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_framebuffer, ID *p_draw_lists, uint32_t p_draw_list_count, InitialAction p_initial_action, FinalAction p_final_action, const Vector<Variant> &p_clear_colors) {
-
VkCommandBuffer frame_cmdbuf = frames[frame].frame_buffer;
ERR_FAIL_COND(!frame_cmdbuf);
@@ -6866,7 +7162,6 @@ void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_frame
ID screen_format = screen_get_framebuffer_format();
{
-
VkCommandBuffer *command_buffers = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer) * p_draw_list_count);
uint32_t command_buffer_count = 0;
@@ -6890,7 +7185,6 @@ void RenderingDeviceVulkan::draw_list_render_secondary_to_framebuffer(ID p_frame
}
vkCmdEndRenderPass(frame_cmdbuf);
-
}
#endif
@@ -6965,6 +7259,72 @@ void RenderingDeviceVulkan::free(RID p_id) {
_free_internal(p_id);
}
+// The full list of resources that can be named is in the VkObjectType enum
+// We just expose the resources that are owned and can be accessed easily.
+void RenderingDeviceVulkan::set_resource_name(RID p_id, const String p_name) {
+ if (texture_owner.owns(p_id)) {
+ Texture *texture = texture_owner.getornull(p_id);
+ if (texture->owner.is_null()) {
+ // Don't set the source texture's name when calling on a texture view
+ context->set_object_name(VK_OBJECT_TYPE_IMAGE, uint64_t(texture->image), p_name);
+ }
+ context->set_object_name(VK_OBJECT_TYPE_IMAGE_VIEW, uint64_t(texture->view), p_name + " View");
+ } else if (framebuffer_owner.owns(p_id)) {
+ //Framebuffer *framebuffer = framebuffer_owner.getornull(p_id);
+ // Not implemented for now as the relationship between Framebuffer and RenderPass is very complex
+ } else if (sampler_owner.owns(p_id)) {
+ VkSampler *sampler = sampler_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_SAMPLER, uint64_t(*sampler), p_name);
+ } else if (vertex_buffer_owner.owns(p_id)) {
+ Buffer *vertex_buffer = vertex_buffer_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(vertex_buffer->buffer), p_name);
+ } else if (index_buffer_owner.owns(p_id)) {
+ IndexBuffer *index_buffer = index_buffer_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(index_buffer->buffer), p_name);
+ } else if (shader_owner.owns(p_id)) {
+ Shader *shader = shader_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_PIPELINE_LAYOUT, uint64_t(shader->pipeline_layout), p_name + " Pipeline Layout");
+ for (int i = 0; i < shader->sets.size(); i++) {
+ context->set_object_name(VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, uint64_t(shader->sets[i].descriptor_set_layout), p_name);
+ }
+ } else if (uniform_buffer_owner.owns(p_id)) {
+ Buffer *uniform_buffer = uniform_buffer_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(uniform_buffer->buffer), p_name);
+ } else if (texture_buffer_owner.owns(p_id)) {
+ TextureBuffer *texture_buffer = texture_buffer_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(texture_buffer->buffer.buffer), p_name);
+ context->set_object_name(VK_OBJECT_TYPE_BUFFER_VIEW, uint64_t(texture_buffer->view), p_name + " View");
+ } else if (storage_buffer_owner.owns(p_id)) {
+ Buffer *storage_buffer = storage_buffer_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_BUFFER, uint64_t(storage_buffer->buffer), p_name);
+ } else if (uniform_set_owner.owns(p_id)) {
+ UniformSet *uniform_set = uniform_set_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_DESCRIPTOR_SET, uint64_t(uniform_set->descriptor_set), p_name);
+ } else if (render_pipeline_owner.owns(p_id)) {
+ RenderPipeline *pipeline = render_pipeline_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_PIPELINE, uint64_t(pipeline->pipeline), p_name);
+ context->set_object_name(VK_OBJECT_TYPE_PIPELINE_LAYOUT, uint64_t(pipeline->pipeline_layout), p_name + " Layout");
+ } else if (compute_pipeline_owner.owns(p_id)) {
+ ComputePipeline *pipeline = compute_pipeline_owner.getornull(p_id);
+ context->set_object_name(VK_OBJECT_TYPE_PIPELINE, uint64_t(pipeline->pipeline), p_name);
+ context->set_object_name(VK_OBJECT_TYPE_PIPELINE_LAYOUT, uint64_t(pipeline->pipeline_layout), p_name + " Layout");
+ } else {
+ ERR_PRINT("Attempted to name invalid ID: " + itos(p_id.get_id()));
+ }
+}
+
+void RenderingDeviceVulkan::draw_command_begin_label(String p_label_name, const Color p_color) {
+ context->command_begin_label(frames[frame].draw_command_buffer, p_label_name, p_color);
+}
+
+void RenderingDeviceVulkan::draw_command_insert_label(String p_label_name, const Color p_color) {
+ context->command_insert_label(frames[frame].draw_command_buffer, p_label_name, p_color);
+}
+
+void RenderingDeviceVulkan::draw_command_end_label() {
+ context->command_end_label(frames[frame].draw_command_buffer);
+}
+
void RenderingDeviceVulkan::_finalize_command_bufers() {
if (draw_list) {
ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work).");
@@ -7383,9 +7743,10 @@ void RenderingDeviceVulkan::_free_rids(T &p_owner, const char *p_type) {
}
}
-void RenderingDeviceVulkan::capture_timestamp(const String &p_name, bool p_sync_to_draw) {
+void RenderingDeviceVulkan::capture_timestamp(const String &p_name) {
ERR_FAIL_COND(frames[frame].timestamp_count >= max_timestamp_query_elements);
+ //this should be optional for profiling, else it will slow things down
{
VkMemoryBarrier memoryBarrier;
@@ -7422,9 +7783,10 @@ void RenderingDeviceVulkan::capture_timestamp(const String &p_name, bool p_sync_
VK_ACCESS_HOST_READ_BIT |
VK_ACCESS_HOST_WRITE_BIT;
- vkCmdPipelineBarrier(p_sync_to_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
+ vkCmdPipelineBarrier(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
}
- vkCmdWriteTimestamp(p_sync_to_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, frames[frame].timestamp_pool, frames[frame].timestamp_count);
+
+ vkCmdWriteTimestamp(frames[frame].draw_command_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, frames[frame].timestamp_pool, frames[frame].timestamp_count);
frames[frame].timestamp_names[frames[frame].timestamp_count] = p_name;
frames[frame].timestamp_cpu_values[frames[frame].timestamp_count] = OS::get_singleton()->get_ticks_usec();
frames[frame].timestamp_count++;
@@ -7653,7 +8015,6 @@ RenderingDevice *RenderingDeviceVulkan::create_local_device() {
}
RenderingDeviceVulkan::RenderingDeviceVulkan() {
- screen_prepared = false;
}
RenderingDeviceVulkan::~RenderingDeviceVulkan() {