summaryrefslogtreecommitdiff
path: root/drivers/vulkan
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vulkan')
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp222
-rw-r--r--drivers/vulkan/rendering_device_vulkan.h24
-rw-r--r--drivers/vulkan/vulkan_context.cpp7
-rw-r--r--drivers/vulkan/vulkan_context.h3
4 files changed, 151 insertions, 105 deletions
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
index de29363556..01d1583ca4 100644
--- a/drivers/vulkan/rendering_device_vulkan.cpp
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -47,7 +47,7 @@
static const uint32_t SMALL_ALLOCATION_MAX_SIZE = 4096;
// Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values).
-RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, uint32_t p_post_barrier) {
+RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, BitField<BarrierMask> p_post_barrier) {
Buffer *buffer = nullptr;
if (vertex_buffer_owner.owns(p_buffer)) {
buffer = vertex_buffer_owner.get_or_null(p_buffer);
@@ -55,11 +55,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
}
@@ -69,20 +69,20 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
r_access_mask |= VK_ACCESS_INDEX_READ_BIT;
buffer = index_buffer_owner.get_or_null(p_buffer);
} else if (uniform_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
}
r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT;
buffer = uniform_buffer_owner.get_or_null(p_buffer);
} else if (texture_buffer_owner.owns(p_buffer)) {
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT;
}
@@ -90,11 +90,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID
buffer = &texture_buffer_owner.get_or_null(p_buffer)->buffer;
} else if (storage_buffer_owner.owns(p_buffer)) {
buffer = storage_buffer_owner.get_or_null(p_buffer);
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
@@ -2009,7 +2009,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
if (p_data.size()) {
for (uint32_t i = 0; i < image_create_info.arrayLayers; i++) {
- _texture_update(id, i, p_data[i], RD::BARRIER_MASK_ALL, true);
+ _texture_update(id, i, p_data[i], RD::BARRIER_MASK_ALL_BARRIERS, true);
}
}
return id;
@@ -2160,14 +2160,35 @@ RID RenderingDeviceVulkan::texture_create_from_extension(TextureType p_type, Dat
texture.height = p_height;
texture.depth = p_depth;
texture.layers = p_layers;
- texture.mipmaps = 0; // Maybe make this settable too?
+ texture.mipmaps = 1;
texture.usage_flags = p_flags;
texture.base_mipmap = 0;
texture.base_layer = 0;
texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_UNORM);
texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_SRGB);
- // Do we need to do something with texture.layout?
+ // Set base layout based on usage priority.
+
+ if (texture.usage_flags & TEXTURE_USAGE_SAMPLING_BIT) {
+ // First priority, readable.
+ texture.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ } else if (texture.usage_flags & TEXTURE_USAGE_STORAGE_BIT) {
+ // Second priority, storage.
+
+ texture.layout = VK_IMAGE_LAYOUT_GENERAL;
+
+ } else if (texture.usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
+ // Third priority, color or depth.
+
+ texture.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ } else if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
+ texture.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+ } else {
+ texture.layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
if (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
texture.read_aspect_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
@@ -2393,7 +2414,7 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p
return id;
}
-Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier) {
return _texture_update(p_texture, p_layer, p_data, p_post_barrier, false);
}
@@ -2413,7 +2434,7 @@ static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_
}
}
-Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier, bool p_use_setup_queue) {
+Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier, bool p_use_setup_queue) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG((draw_list || compute_list) && !p_use_setup_queue, ERR_INVALID_PARAMETER,
@@ -2587,15 +2608,15 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co
{
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -2848,7 +2869,7 @@ Size2i RenderingDeviceVulkan::texture_size(RID p_texture) {
return Size2i(tex->width, tex->height);
}
-Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
@@ -2973,15 +2994,15 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -3043,7 +3064,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture,
return OK;
}
-Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_from_texture);
@@ -3151,15 +3172,15 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -3214,7 +3235,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID
return OK;
}
-Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
Texture *src_tex = texture_owner.get_or_null(p_texture);
@@ -3287,15 +3308,15 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color,
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
@@ -3401,6 +3422,16 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
LocalVector<int32_t> attachment_last_pass;
attachment_last_pass.resize(p_attachments.size());
+ if (p_view_count > 1) {
+ const VulkanContext::MultiviewCapabilities capabilities = context->get_multiview_capabilities();
+
+ // This only works with multiview!
+ ERR_FAIL_COND_V_MSG(!capabilities.is_supported, VK_NULL_HANDLE, "Multiview not supported");
+
+ // Make sure we limit this to the number of views we support.
+ ERR_FAIL_COND_V_MSG(p_view_count > capabilities.max_view_count, VK_NULL_HANDLE, "Hardware does not support requested number of views for Multiview render pass");
+ }
+
// These are only used if we use multiview but we need to define them in scope.
const uint32_t view_mask = (1 << p_view_count) - 1;
const uint32_t correlation_mask = (1 << p_view_count) - 1;
@@ -3701,7 +3732,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachment_last_pass[attachment] = i;
}
- reference.aspectMask = 0;
+ reference.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_references.push_back(reference);
}
@@ -3723,7 +3754,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
reference.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
attachment_last_pass[attachment] = i;
}
- reference.aspectMask = 0; // TODO: We need to set this here, possibly VK_IMAGE_ASPECT_COLOR_BIT?
+ reference.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
input_references.push_back(reference);
}
@@ -3752,7 +3783,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
attachment_last_pass[attachment] = i;
}
- reference.aspectMask = 0;
+ reference.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolve_references.push_back(reference);
}
@@ -3767,7 +3798,7 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, VK_NULL_HANDLE, "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
depth_stencil_reference.attachment = attachment_remap[attachment];
depth_stencil_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- depth_stencil_reference.aspectMask = 0;
+ depth_stencil_reference.aspectMask = VK_IMAGE_ASPECT_NONE;
attachment_last_pass[attachment] = i;
if (is_multisample_first) {
@@ -3793,9 +3824,9 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
vrs_reference.pNext = nullptr;
vrs_reference.attachment = attachment_remap[attachment];
vrs_reference.layout = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR;
- vrs_reference.aspectMask = 0;
+ vrs_reference.aspectMask = VK_IMAGE_ASPECT_NONE;
- Size2i texel_size = context->get_vrs_capabilities().max_texel_size;
+ Size2i texel_size = context->get_vrs_capabilities().texel_size;
VkFragmentShadingRateAttachmentInfoKHR &vrs_attachment_info = vrs_attachment_info_array[i];
vrs_attachment_info.sType = VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR;
@@ -3934,16 +3965,9 @@ VkRenderPass RenderingDeviceVulkan::_render_pass_create(const Vector<AttachmentF
Vector<uint32_t> view_masks;
VkRenderPassMultiviewCreateInfo render_pass_multiview_create_info;
- if (p_view_count > 1) {
- // This may no longer be needed with the new settings already including this.
-
- const VulkanContext::MultiviewCapabilities capabilities = context->get_multiview_capabilities();
-
- // For now this only works with multiview!
- ERR_FAIL_COND_V_MSG(!capabilities.is_supported, VK_NULL_HANDLE, "Multiview not supported");
-
- // Make sure we limit this to the number of views we support.
- ERR_FAIL_COND_V_MSG(p_view_count > capabilities.max_view_count, VK_NULL_HANDLE, "Hardware does not support requested number of views for Multiview render pass");
+ if ((p_view_count > 1) && !context->supports_renderpass2()) {
+ // This is only required when using vkCreateRenderPass, we add it if vkCreateRenderPass2KHR is not supported
+ // resulting this in being passed to our vkCreateRenderPass fallback.
// Set view masks for each subpass.
for (uint32_t i = 0; i < subpasses.size(); i++) {
@@ -4354,7 +4378,7 @@ RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(cons
return id;
}
-RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers) {
+RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
@@ -4368,6 +4392,13 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo
VertexArray vertex_array;
+ if (p_offsets.is_empty()) {
+ vertex_array.offsets.resize_zeroed(p_src_buffers.size());
+ } else {
+ ERR_FAIL_COND_V(p_offsets.size() != p_src_buffers.size(), RID());
+ vertex_array.offsets = p_offsets;
+ }
+
vertex_array.vertex_count = p_vertex_count;
vertex_array.description = p_vertex_format;
vertex_array.max_instances_allowed = 0xFFFFFFFF; // By default as many as you want.
@@ -4399,7 +4430,6 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo
}
vertex_array.buffers.push_back(buffer->buffer);
- vertex_array.offsets.push_back(0); // Offset unused, but passing anyway.
}
RID id = vertex_array_owner.make_rid(vertex_array);
@@ -6291,7 +6321,7 @@ void RenderingDeviceVulkan::uniform_set_set_invalidation_callback(RID p_uniform_
us->invalidated_callback_userdata = p_userdata;
}
-Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
@@ -6301,7 +6331,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
VkPipelineStageFlags dst_stage_mask = 0;
VkAccessFlags dst_access = 0;
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
// Protect subsequent updates.
dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
@@ -6337,7 +6367,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
return err;
}
-Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier) {
+Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER,
@@ -6349,7 +6379,7 @@ Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint3
VkPipelineStageFlags dst_stage_mask = 0;
VkAccessFlags dst_access = 0;
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
// Protect subsequent updates.
dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
dst_access = VK_ACCESS_TRANSFER_WRITE_BIT;
@@ -6388,7 +6418,7 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) {
VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT;
VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT;
// Get the vulkan buffer and the potential stage/access possible.
- Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL);
+ Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL_BARRIERS);
if (!buffer) {
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving.");
}
@@ -7727,7 +7757,7 @@ void RenderingDeviceVulkan::draw_list_bind_index_array(DrawListID p_list, RID p_
dl->validation.index_array_size = index_array->indices;
dl->validation.index_array_offset = index_array->offset;
- vkCmdBindIndexBuffer(dl->command_buffer, index_array->buffer, index_array->offset, index_array->index_type);
+ vkCmdBindIndexBuffer(dl->command_buffer, index_array->buffer, 0, index_array->index_type);
}
void RenderingDeviceVulkan::draw_list_set_line_width(DrawListID p_list, float p_width) {
@@ -8060,7 +8090,7 @@ void RenderingDeviceVulkan::_draw_list_free(Rect2i *r_last_viewport) {
_THREAD_SAFE_UNLOCK_
}
-void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) {
+void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier) {
_THREAD_SAFE_METHOD_
ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive.");
@@ -8082,15 +8112,15 @@ void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) {
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
}
@@ -8568,20 +8598,20 @@ void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) {
#endif
}
-void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) {
+void RenderingDeviceVulkan::compute_list_end(BitField<BarrierMask> p_post_barrier) {
ERR_FAIL_COND(!compute_list);
uint32_t barrier_flags = 0;
uint32_t access_flags = 0;
- if (p_post_barrier & BARRIER_MASK_COMPUTE) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) {
barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (p_post_barrier & BARRIER_MASK_RASTER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) {
barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
}
- if (p_post_barrier & BARRIER_MASK_TRANSFER) {
+ if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) {
barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
}
@@ -8649,43 +8679,45 @@ void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) {
_THREAD_SAFE_UNLOCK_
}
-void RenderingDeviceVulkan::barrier(uint32_t p_from, uint32_t p_to) {
+void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to) {
uint32_t src_barrier_flags = 0;
uint32_t src_access_flags = 0;
- if (p_from & BARRIER_MASK_COMPUTE) {
- src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (p_from & BARRIER_MASK_RASTER) {
- src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
- src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- }
- if (p_from & BARRIER_MASK_TRANSFER) {
- src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
if (p_from == 0) {
src_barrier_flags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ } else {
+ if (p_from.has_flag(BARRIER_MASK_COMPUTE)) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_from.has_flag(BARRIER_MASK_RASTER)) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ }
+ if (p_from.has_flag(BARRIER_MASK_TRANSFER)) {
+ src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
}
uint32_t dst_barrier_flags = 0;
uint32_t dst_access_flags = 0;
- if (p_to & BARRIER_MASK_COMPUTE) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (p_to & BARRIER_MASK_RASTER) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
- dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
- }
- if (p_to & BARRIER_MASK_TRANSFER) {
- dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
- }
if (p_to == 0) {
dst_barrier_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ } else {
+ if (p_to.has_flag(BARRIER_MASK_COMPUTE)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (p_to.has_flag(BARRIER_MASK_RASTER)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (p_to.has_flag(BARRIER_MASK_TRANSFER)) {
+ dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
+ }
}
_memory_barrier(src_barrier_flags, dst_barrier_flags, src_access_flags, dst_access_flags, true);
@@ -9162,7 +9194,7 @@ void RenderingDeviceVulkan::_free_pending_resources(int p_frame) {
Texture *texture = &frames[p_frame].textures_to_dispose_of.front()->get();
if (texture->bound) {
- WARN_PRINT("Deleted a texture while it was bound..");
+ WARN_PRINT("Deleted a texture while it was bound.");
}
vkDestroyImageView(device, texture->view, nullptr);
if (texture->owner.is_null()) {
@@ -9363,12 +9395,10 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de
}
}
- // NOTE: If adding new project settings here, also duplicate their definition in
- // rendering_server.cpp for headless doctool.
- staging_buffer_block_size = GLOBAL_DEF("rendering/rendering_device/staging_buffer/block_size_kb", 256);
+ staging_buffer_block_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/block_size_kb");
staging_buffer_block_size = MAX(4u, staging_buffer_block_size);
staging_buffer_block_size *= 1024; // Kb -> bytes.
- staging_buffer_max_size = GLOBAL_DEF("rendering/rendering_device/staging_buffer/max_size_mb", 128);
+ staging_buffer_max_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/max_size_mb");
staging_buffer_max_size = MAX(1u, staging_buffer_max_size);
staging_buffer_max_size *= 1024 * 1024;
@@ -9376,7 +9406,7 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de
// Validate enough blocks.
staging_buffer_max_size = staging_buffer_block_size * 4;
}
- texture_upload_region_size_px = GLOBAL_DEF("rendering/rendering_device/staging_buffer/texture_upload_region_size_px", 64);
+ texture_upload_region_size_px = GLOBAL_GET("rendering/rendering_device/staging_buffer/texture_upload_region_size_px");
texture_upload_region_size_px = nearest_power_of_2_templated(texture_upload_region_size_px);
frames_drawn = frame_count; // Start from frame count, so everything else is immediately old.
@@ -9391,7 +9421,7 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de
ERR_CONTINUE(err != OK);
}
- max_descriptors_per_pool = GLOBAL_DEF("rendering/rendering_device/vulkan/max_descriptors_per_pool", 64);
+ max_descriptors_per_pool = GLOBAL_GET("rendering/rendering_device/vulkan/max_descriptors_per_pool");
// Check to make sure DescriptorPoolKey is good.
static_assert(sizeof(uint64_t) * 3 >= UNIFORM_TYPE_MAX * sizeof(uint16_t));
@@ -9702,6 +9732,12 @@ uint64_t RenderingDeviceVulkan::limit_get(Limit p_limit) const {
VulkanContext::SubgroupCapabilities subgroup_capabilities = context->get_subgroup_capabilities();
return subgroup_capabilities.supported_operations_flags_rd();
}
+ case LIMIT_VRS_TEXEL_WIDTH: {
+ return context->get_vrs_capabilities().texel_size.x;
+ }
+ case LIMIT_VRS_TEXEL_HEIGHT: {
+ return context->get_vrs_capabilities().texel_size.y;
+ }
default:
ERR_FAIL_V(0);
}
diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h
index 7c75e9bb2e..537ad88f5a 100644
--- a/drivers/vulkan/rendering_device_vulkan.h
+++ b/drivers/vulkan/rendering_device_vulkan.h
@@ -162,7 +162,7 @@ class RenderingDeviceVulkan : public RenderingDevice {
uint32_t texture_upload_region_size_px = 0;
Vector<uint8_t> _texture_get_data_from_image(Texture *tex, VkImage p_image, VmaAllocation p_allocation, uint32_t p_layer, bool p_2d = false);
- Error _texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier, bool p_use_setup_queue);
+ Error _texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier, bool p_use_setup_queue);
/*****************/
/**** SAMPLER ****/
@@ -909,7 +909,7 @@ class RenderingDeviceVulkan : public RenderingDevice {
Error _draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass, uint32_t *r_subpass_count);
Error _draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents, const Vector<RID> &p_storage_textures);
_FORCE_INLINE_ DrawList *_get_draw_list_ptr(DrawListID p_id);
- Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access, uint32_t p_post_barrier);
+ Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access, BitField<BarrierMask> p_post_barrier);
Error _draw_list_allocate(const Rect2i &p_viewport, uint32_t p_splits, uint32_t p_subpass);
void _draw_list_free(Rect2i *r_last_viewport = nullptr);
@@ -1052,7 +1052,7 @@ public:
virtual RID texture_create_from_extension(TextureType p_type, DataFormat p_format, TextureSamples p_samples, uint64_t p_flags, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers);
virtual RID texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps = 1, TextureSliceType p_slice_type = TEXTURE_SLICE_2D);
- virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL);
+ virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS);
virtual Vector<uint8_t> texture_get_data(RID p_texture, uint32_t p_layer);
virtual bool texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const;
@@ -1060,9 +1060,9 @@ public:
virtual bool texture_is_valid(RID p_texture);
virtual Size2i texture_size(RID p_texture);
- virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier = BARRIER_MASK_ALL);
- virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier = BARRIER_MASK_ALL);
- virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier = BARRIER_MASK_ALL);
+ virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS);
+ virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS);
+ virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS);
/*********************/
/**** FRAMEBUFFER ****/
@@ -1095,7 +1095,7 @@ public:
// Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated.
virtual VertexFormatID vertex_format_create(const Vector<VertexAttribute> &p_vertex_formats);
- virtual RID vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers);
+ virtual RID vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets = Vector<uint64_t>());
virtual RID index_buffer_create(uint32_t p_size_indices, IndexBufferFormat p_format, const Vector<uint8_t> &p_data = Vector<uint8_t>(), bool p_use_restart_indices = false);
@@ -1124,8 +1124,8 @@ public:
virtual bool uniform_set_is_valid(RID p_uniform_set);
virtual void uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata);
- virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); // Works for any buffer.
- virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier = BARRIER_MASK_ALL);
+ virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); // Works for any buffer.
+ virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS);
virtual Vector<uint8_t> buffer_get_data(RID p_buffer);
/*************************/
@@ -1176,7 +1176,7 @@ public:
virtual DrawListID draw_list_switch_to_next_pass();
virtual Error draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids);
- virtual void draw_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL);
+ virtual void draw_list_end(BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS);
/***********************/
/**** COMPUTE LISTS ****/
@@ -1191,9 +1191,9 @@ public:
virtual void compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups);
virtual void compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads);
virtual void compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset);
- virtual void compute_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL);
+ virtual void compute_list_end(BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS);
- virtual void barrier(uint32_t p_from = BARRIER_MASK_ALL, uint32_t p_to = BARRIER_MASK_ALL);
+ virtual void barrier(BitField<BarrierMask> p_from = BARRIER_MASK_ALL_BARRIERS, BitField<BarrierMask> p_to = BARRIER_MASK_ALL_BARRIERS);
virtual void full_barrier();
/**************/
diff --git a/drivers/vulkan/vulkan_context.cpp b/drivers/vulkan/vulkan_context.cpp
index 464ab474e1..381df6d65e 100644
--- a/drivers/vulkan/vulkan_context.cpp
+++ b/drivers/vulkan/vulkan_context.cpp
@@ -625,6 +625,9 @@ Error VulkanContext::_check_capabilities() {
vrs_capabilities.pipeline_vrs_supported = false;
vrs_capabilities.primitive_vrs_supported = false;
vrs_capabilities.attachment_vrs_supported = false;
+ vrs_capabilities.min_texel_size = Size2i();
+ vrs_capabilities.max_texel_size = Size2i();
+ vrs_capabilities.texel_size = Size2i();
multiview_capabilities.is_supported = false;
multiview_capabilities.geometry_shader_is_supported = false;
multiview_capabilities.tessellation_shader_is_supported = false;
@@ -788,6 +791,10 @@ Error VulkanContext::_check_capabilities() {
vrs_capabilities.max_texel_size.x = vrsProperties.maxFragmentShadingRateAttachmentTexelSize.width;
vrs_capabilities.max_texel_size.y = vrsProperties.maxFragmentShadingRateAttachmentTexelSize.height;
+ // We'll attempt to default to a texel size of 16x16
+ vrs_capabilities.texel_size.x = CLAMP(16, vrs_capabilities.min_texel_size.x, vrs_capabilities.max_texel_size.x);
+ vrs_capabilities.texel_size.y = CLAMP(16, vrs_capabilities.min_texel_size.y, vrs_capabilities.max_texel_size.y);
+
print_verbose(String(" Attachment fragment shading rate") + String(", min texel size: (") + itos(vrs_capabilities.min_texel_size.x) + String(", ") + itos(vrs_capabilities.min_texel_size.y) + String(")") + String(", max texel size: (") + itos(vrs_capabilities.max_texel_size.x) + String(", ") + itos(vrs_capabilities.max_texel_size.y) + String(")"));
}
diff --git a/drivers/vulkan/vulkan_context.h b/drivers/vulkan/vulkan_context.h
index b47aec1de1..8cf33fa463 100644
--- a/drivers/vulkan/vulkan_context.h
+++ b/drivers/vulkan/vulkan_context.h
@@ -76,6 +76,8 @@ public:
Size2i min_texel_size;
Size2i max_texel_size;
+
+ Size2i texel_size; // The texel size we'll use
};
struct ShaderCapabilities {
@@ -273,6 +275,7 @@ protected:
public:
// Extension calls.
+ bool supports_renderpass2() const { return has_renderpass2_ext; }
VkResult vkCreateRenderPass2KHR(VkDevice p_device, const VkRenderPassCreateInfo2 *p_create_info, const VkAllocationCallbacks *p_allocator, VkRenderPass *p_render_pass);
uint32_t get_vulkan_major() const { return vulkan_major; };