diff options
20 files changed, 309 insertions, 183 deletions
diff --git a/doc/classes/RenderingDevice.xml b/doc/classes/RenderingDevice.xml index 7b041d3b75..580ce6f382 100644 --- a/doc/classes/RenderingDevice.xml +++ b/doc/classes/RenderingDevice.xml @@ -9,8 +9,8 @@ <methods> <method name="barrier"> <return type="void" /> - <param index="0" name="from" type="int" default="7" /> - <param index="1" name="to" type="int" default="7" /> + <param index="0" name="from" type="int" enum="RenderingDevice.BarrierMask" default="7" /> + <param index="1" name="to" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -19,7 +19,7 @@ <param index="0" name="buffer" type="RID" /> <param index="1" name="offset" type="int" /> <param index="2" name="size_bytes" type="int" /> - <param index="3" name="post_barrier" type="int" default="7" /> + <param index="3" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -35,7 +35,7 @@ <param index="1" name="offset" type="int" /> <param index="2" name="size_bytes" type="int" /> <param index="3" name="data" type="PackedByteArray" /> - <param index="4" name="post_barrier" type="int" default="7" /> + <param index="4" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -83,7 +83,7 @@ </method> <method name="compute_list_end"> <return type="void" /> - <param index="0" name="post_barrier" type="int" default="7" /> + <param index="0" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -223,7 +223,7 @@ </method> <method name="draw_list_end"> <return type="void" /> - <param index="0" name="post_barrier" type="int" default="7" /> + <param index="0" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -534,7 +534,7 @@ <param index="3" name="mipmap_count" type="int" /> <param index="4" name="base_layer" type="int" /> <param index="5" name="layer_count" type="int" /> - <param index="6" name="post_barrier" type="int" default="7" /> + <param index="6" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -549,7 +549,7 @@ <param index="6" name="dst_mipmap" type="int" /> <param index="7" name="src_layer" type="int" /> <param index="8" name="dst_layer" type="int" /> - <param index="9" name="post_barrier" type="int" default="7" /> + <param index="9" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -609,7 +609,7 @@ <return type="int" enum="Error" /> <param index="0" name="from_texture" type="RID" /> <param index="1" name="to_texture" type="RID" /> - <param index="2" name="post_barrier" type="int" default="7" /> + <param index="2" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -618,7 +618,7 @@ <param index="0" name="texture" type="RID" /> <param index="1" name="layer" type="int" /> <param index="2" name="data" type="PackedByteArray" /> - <param index="3" name="post_barrier" type="int" default="7" /> + <param index="3" name="post_barrier" type="int" enum="RenderingDevice.BarrierMask" default="7" /> <description> </description> </method> @@ -669,16 +669,6 @@ </method> </methods> <constants> - <constant name="BARRIER_MASK_RASTER" value="1"> - </constant> - <constant name="BARRIER_MASK_COMPUTE" value="2"> - </constant> - <constant name="BARRIER_MASK_TRANSFER" value="4"> - </constant> - <constant name="BARRIER_MASK_ALL" value="7"> - </constant> - <constant name="BARRIER_MASK_NO_BARRIER" value="8"> - </constant> <constant name="DEVICE_TYPE_OTHER" value="0" enum="DeviceType"> Rendering device type does not match any of the other enum values or is unknown. </constant> @@ -1161,6 +1151,16 @@ </constant> <constant name="DATA_FORMAT_MAX" value="218" enum="DataFormat"> </constant> + <constant name="BARRIER_MASK_RASTER" value="1" enum="BarrierMask" is_bitfield="true"> + </constant> + <constant name="BARRIER_MASK_COMPUTE" value="2" enum="BarrierMask" is_bitfield="true"> + </constant> + <constant name="BARRIER_MASK_TRANSFER" value="4" enum="BarrierMask" is_bitfield="true"> + </constant> + <constant name="BARRIER_MASK_ALL_BARRIERS" value="7" enum="BarrierMask" is_bitfield="true"> + </constant> + <constant name="BARRIER_MASK_NO_BARRIER" value="8" enum="BarrierMask" is_bitfield="true"> + </constant> <constant name="TEXTURE_TYPE_1D" value="0" enum="TextureType"> </constant> <constant name="TEXTURE_TYPE_2D" value="1" enum="TextureType"> diff --git a/drivers/gles3/shaders/scene.glsl b/drivers/gles3/shaders/scene.glsl index 01135a9bbd..04dba602dd 100644 --- a/drivers/gles3/shaders/scene.glsl +++ b/drivers/gles3/shaders/scene.glsl @@ -920,6 +920,7 @@ void main() { #else vec3 view = -normalize(vertex_interp); #endif + highp mat4 model_matrix = world_transform; vec3 albedo = vec3(1.0); vec3 backlight = vec3(0.0); vec4 transmittance_color = vec4(0.0, 0.0, 0.0, 1.0); diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp index 7e86679851..01d1583ca4 100644 --- a/drivers/vulkan/rendering_device_vulkan.cpp +++ b/drivers/vulkan/rendering_device_vulkan.cpp @@ -47,7 +47,7 @@ static const uint32_t SMALL_ALLOCATION_MAX_SIZE = 4096; // Get the Vulkan object information and possible stage access types (bitwise OR'd with incoming values). -RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, uint32_t p_post_barrier) { +RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &r_stage_mask, VkAccessFlags &r_access_mask, BitField<BarrierMask> p_post_barrier) { Buffer *buffer = nullptr; if (vertex_buffer_owner.owns(p_buffer)) { buffer = vertex_buffer_owner.get_or_null(p_buffer); @@ -55,11 +55,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; r_access_mask |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; if (buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) { - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; } - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; } @@ -69,20 +69,20 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID r_access_mask |= VK_ACCESS_INDEX_READ_BIT; buffer = index_buffer_owner.get_or_null(p_buffer); } else if (uniform_buffer_owner.owns(p_buffer)) { - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; } - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; } r_access_mask |= VK_ACCESS_UNIFORM_READ_BIT; buffer = uniform_buffer_owner.get_or_null(p_buffer); } else if (texture_buffer_owner.owns(p_buffer)) { - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; r_access_mask |= VK_ACCESS_SHADER_READ_BIT; } - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; r_access_mask |= VK_ACCESS_SHADER_READ_BIT; } @@ -90,11 +90,11 @@ RenderingDeviceVulkan::Buffer *RenderingDeviceVulkan::_get_buffer_from_owner(RID buffer = &texture_buffer_owner.get_or_null(p_buffer)->buffer; } else if (storage_buffer_owner.owns(p_buffer)) { buffer = storage_buffer_owner.get_or_null(p_buffer); - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { r_stage_mask |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { r_stage_mask |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; r_access_mask |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } @@ -2009,7 +2009,7 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T if (p_data.size()) { for (uint32_t i = 0; i < image_create_info.arrayLayers; i++) { - _texture_update(id, i, p_data[i], RD::BARRIER_MASK_ALL, true); + _texture_update(id, i, p_data[i], RD::BARRIER_MASK_ALL_BARRIERS, true); } } return id; @@ -2414,7 +2414,7 @@ RID RenderingDeviceVulkan::texture_create_shared_from_slice(const TextureView &p return id; } -Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier) { +Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier) { return _texture_update(p_texture, p_layer, p_data, p_post_barrier, false); } @@ -2434,7 +2434,7 @@ static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_ } } -Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier, bool p_use_setup_queue) { +Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier, bool p_use_setup_queue) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND_V_MSG((draw_list || compute_list) && !p_use_setup_queue, ERR_INVALID_PARAMETER, @@ -2608,15 +2608,15 @@ Error RenderingDeviceVulkan::_texture_update(RID p_texture, uint32_t p_layer, co { uint32_t barrier_flags = 0; uint32_t access_flags = 0; - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; } @@ -2869,7 +2869,7 @@ Size2i RenderingDeviceVulkan::texture_size(RID p_texture) { return Size2i(tex->width, tex->height); } -Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier) { +Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier) { _THREAD_SAFE_METHOD_ Texture *src_tex = texture_owner.get_or_null(p_from_texture); @@ -2994,15 +2994,15 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, uint32_t barrier_flags = 0; uint32_t access_flags = 0; - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; } @@ -3064,7 +3064,7 @@ Error RenderingDeviceVulkan::texture_copy(RID p_from_texture, RID p_to_texture, return OK; } -Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier) { +Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier) { _THREAD_SAFE_METHOD_ Texture *src_tex = texture_owner.get_or_null(p_from_texture); @@ -3172,15 +3172,15 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID uint32_t barrier_flags = 0; uint32_t access_flags = 0; - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; } @@ -3235,7 +3235,7 @@ Error RenderingDeviceVulkan::texture_resolve_multisample(RID p_from_texture, RID return OK; } -Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier) { +Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier) { _THREAD_SAFE_METHOD_ Texture *src_tex = texture_owner.get_or_null(p_texture); @@ -3308,15 +3308,15 @@ Error RenderingDeviceVulkan::texture_clear(RID p_texture, const Color &p_color, uint32_t barrier_flags = 0; uint32_t access_flags = 0; - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { barrier_flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; } @@ -6321,7 +6321,7 @@ void RenderingDeviceVulkan::uniform_set_set_invalidation_callback(RID p_uniform_ us->invalidated_callback_userdata = p_userdata; } -Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier) { +Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER, @@ -6331,7 +6331,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint VkPipelineStageFlags dst_stage_mask = 0; VkAccessFlags dst_access = 0; - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { // Protect subsequent updates. dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; @@ -6367,7 +6367,7 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint return err; } -Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier) { +Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER, @@ -6379,7 +6379,7 @@ Error RenderingDeviceVulkan::buffer_clear(RID p_buffer, uint32_t p_offset, uint3 VkPipelineStageFlags dst_stage_mask = 0; VkAccessFlags dst_access = 0; - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { // Protect subsequent updates. dst_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; dst_access = VK_ACCESS_TRANSFER_WRITE_BIT; @@ -6418,7 +6418,7 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { VkPipelineShaderStageCreateFlags src_stage_mask = VK_PIPELINE_STAGE_TRANSFER_BIT; VkAccessFlags src_access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; // Get the vulkan buffer and the potential stage/access possible. - Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL); + Buffer *buffer = _get_buffer_from_owner(p_buffer, src_stage_mask, src_access_mask, BARRIER_MASK_ALL_BARRIERS); if (!buffer) { ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved. Only Index and Vertex buffers allow retrieving."); } @@ -8090,7 +8090,7 @@ void RenderingDeviceVulkan::_draw_list_free(Rect2i *r_last_viewport) { _THREAD_SAFE_UNLOCK_ } -void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) { +void RenderingDeviceVulkan::draw_list_end(BitField<BarrierMask> p_post_barrier) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND_MSG(!draw_list, "Immediate draw list is already inactive."); @@ -8112,15 +8112,15 @@ void RenderingDeviceVulkan::draw_list_end(uint32_t p_post_barrier) { uint32_t barrier_flags = 0; uint32_t access_flags = 0; - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT /*| VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT*/; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT /*| VK_ACCESS_INDIRECT_COMMAND_READ_BIT*/; } - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; } @@ -8598,20 +8598,20 @@ void RenderingDeviceVulkan::compute_list_add_barrier(ComputeListID p_list) { #endif } -void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) { +void RenderingDeviceVulkan::compute_list_end(BitField<BarrierMask> p_post_barrier) { ERR_FAIL_COND(!compute_list); uint32_t barrier_flags = 0; uint32_t access_flags = 0; - if (p_post_barrier & BARRIER_MASK_COMPUTE) { + if (p_post_barrier.has_flag(BARRIER_MASK_COMPUTE)) { barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; } - if (p_post_barrier & BARRIER_MASK_RASTER) { + if (p_post_barrier.has_flag(BARRIER_MASK_RASTER)) { barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; } - if (p_post_barrier & BARRIER_MASK_TRANSFER) { + if (p_post_barrier.has_flag(BARRIER_MASK_TRANSFER)) { barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; } @@ -8679,43 +8679,45 @@ void RenderingDeviceVulkan::compute_list_end(uint32_t p_post_barrier) { _THREAD_SAFE_UNLOCK_ } -void RenderingDeviceVulkan::barrier(uint32_t p_from, uint32_t p_to) { +void RenderingDeviceVulkan::barrier(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to) { uint32_t src_barrier_flags = 0; uint32_t src_access_flags = 0; - if (p_from & BARRIER_MASK_COMPUTE) { - src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT; - } - if (p_from & BARRIER_MASK_RASTER) { - src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; - src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - } - if (p_from & BARRIER_MASK_TRANSFER) { - src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; - src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; - } if (p_from == 0) { src_barrier_flags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; + } else { + if (p_from.has_flag(BARRIER_MASK_COMPUTE)) { + src_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + src_access_flags |= VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_from.has_flag(BARRIER_MASK_RASTER)) { + src_barrier_flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + src_access_flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + } + if (p_from.has_flag(BARRIER_MASK_TRANSFER)) { + src_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + src_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT; + } } uint32_t dst_barrier_flags = 0; uint32_t dst_access_flags = 0; - if (p_to & BARRIER_MASK_COMPUTE) { - dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; - dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; - } - if (p_to & BARRIER_MASK_RASTER) { - dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; - dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; - } - if (p_to & BARRIER_MASK_TRANSFER) { - dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; - dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; - } if (p_to == 0) { dst_barrier_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + } else { + if (p_to.has_flag(BARRIER_MASK_COMPUTE)) { + dst_barrier_flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; + dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; + } + if (p_to.has_flag(BARRIER_MASK_RASTER)) { + dst_barrier_flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; + dst_access_flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDIRECT_COMMAND_READ_BIT; + } + if (p_to.has_flag(BARRIER_MASK_TRANSFER)) { + dst_barrier_flags |= VK_PIPELINE_STAGE_TRANSFER_BIT; + dst_access_flags |= VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; + } } _memory_barrier(src_barrier_flags, dst_barrier_flags, src_access_flags, dst_access_flags, true); diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h index 05f12ee7f9..537ad88f5a 100644 --- a/drivers/vulkan/rendering_device_vulkan.h +++ b/drivers/vulkan/rendering_device_vulkan.h @@ -162,7 +162,7 @@ class RenderingDeviceVulkan : public RenderingDevice { uint32_t texture_upload_region_size_px = 0; Vector<uint8_t> _texture_get_data_from_image(Texture *tex, VkImage p_image, VmaAllocation p_allocation, uint32_t p_layer, bool p_2d = false); - Error _texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier, bool p_use_setup_queue); + Error _texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier, bool p_use_setup_queue); /*****************/ /**** SAMPLER ****/ @@ -909,7 +909,7 @@ class RenderingDeviceVulkan : public RenderingDevice { Error _draw_list_setup_framebuffer(Framebuffer *p_framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, VkFramebuffer *r_framebuffer, VkRenderPass *r_render_pass, uint32_t *r_subpass_count); Error _draw_list_render_pass_begin(Framebuffer *framebuffer, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_colors, float p_clear_depth, uint32_t p_clear_stencil, Point2i viewport_offset, Point2i viewport_size, VkFramebuffer vkframebuffer, VkRenderPass render_pass, VkCommandBuffer command_buffer, VkSubpassContents subpass_contents, const Vector<RID> &p_storage_textures); _FORCE_INLINE_ DrawList *_get_draw_list_ptr(DrawListID p_id); - Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access, uint32_t p_post_barrier); + Buffer *_get_buffer_from_owner(RID p_buffer, VkPipelineStageFlags &dst_stage_mask, VkAccessFlags &dst_access, BitField<BarrierMask> p_post_barrier); Error _draw_list_allocate(const Rect2i &p_viewport, uint32_t p_splits, uint32_t p_subpass); void _draw_list_free(Rect2i *r_last_viewport = nullptr); @@ -1052,7 +1052,7 @@ public: virtual RID texture_create_from_extension(TextureType p_type, DataFormat p_format, TextureSamples p_samples, uint64_t p_flags, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers); virtual RID texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps = 1, TextureSliceType p_slice_type = TEXTURE_SLICE_2D); - virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); virtual Vector<uint8_t> texture_get_data(RID p_texture, uint32_t p_layer); virtual bool texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const; @@ -1060,9 +1060,9 @@ public: virtual bool texture_is_valid(RID p_texture); virtual Size2i texture_size(RID p_texture); - virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier = BARRIER_MASK_ALL); - virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier = BARRIER_MASK_ALL); - virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); + virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); + virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); /*********************/ /**** FRAMEBUFFER ****/ @@ -1124,8 +1124,8 @@ public: virtual bool uniform_set_is_valid(RID p_uniform_set); virtual void uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata); - virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); // Works for any buffer. - virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); // Works for any buffer. + virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); virtual Vector<uint8_t> buffer_get_data(RID p_buffer); /*************************/ @@ -1176,7 +1176,7 @@ public: virtual DrawListID draw_list_switch_to_next_pass(); virtual Error draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids); - virtual void draw_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual void draw_list_end(BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); /***********************/ /**** COMPUTE LISTS ****/ @@ -1191,9 +1191,9 @@ public: virtual void compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups); virtual void compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads); virtual void compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset); - virtual void compute_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL); + virtual void compute_list_end(BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); - virtual void barrier(uint32_t p_from = BARRIER_MASK_ALL, uint32_t p_to = BARRIER_MASK_ALL); + virtual void barrier(BitField<BarrierMask> p_from = BARRIER_MASK_ALL_BARRIERS, BitField<BarrierMask> p_to = BARRIER_MASK_ALL_BARRIERS); virtual void full_barrier(); /**************/ diff --git a/modules/gdscript/gdscript_warning.cpp b/modules/gdscript/gdscript_warning.cpp index a0c107aa53..2548d26e14 100644 --- a/modules/gdscript/gdscript_warning.cpp +++ b/modules/gdscript/gdscript_warning.cpp @@ -96,7 +96,7 @@ String GDScriptWarning::get_message() const { } break; case RETURN_VALUE_DISCARDED: { CHECK_SYMBOLS(1); - return "The function '" + symbols[0] + "()' returns a value, but this value is never used."; + return "The function '" + symbols[0] + "()' returns a value that will be discarded if not used."; } break; case PROPERTY_USED_AS_FUNCTION: { CHECK_SYMBOLS(2); diff --git a/modules/gdscript/tests/scripts/parser/warnings/return_value_discarded.out b/modules/gdscript/tests/scripts/parser/warnings/return_value_discarded.out index 13f759dd46..e89bb9226f 100644 --- a/modules/gdscript/tests/scripts/parser/warnings/return_value_discarded.out +++ b/modules/gdscript/tests/scripts/parser/warnings/return_value_discarded.out @@ -2,4 +2,4 @@ GDTEST_OK >> WARNING >> Line: 6 >> RETURN_VALUE_DISCARDED ->> The function 'i_return_int()' returns a value, but this value is never used. +>> The function 'i_return_int()' returns a value that will be discarded if not used. diff --git a/modules/gltf/doc_classes/GLTFDocumentExtension.xml b/modules/gltf/doc_classes/GLTFDocumentExtension.xml index adc32df8dc..87d3d9bcb0 100644 --- a/modules/gltf/doc_classes/GLTFDocumentExtension.xml +++ b/modules/gltf/doc_classes/GLTFDocumentExtension.xml @@ -11,6 +11,16 @@ <tutorials> </tutorials> <methods> + <method name="_convert_scene_node" qualifiers="virtual"> + <return type="void" /> + <param index="0" name="state" type="GLTFState" /> + <param index="1" name="gltf_node" type="GLTFNode" /> + <param index="2" name="scene_node" type="Node" /> + <description> + Part of the export process. This method is run after [method _export_preflight] and before [method _export_node]. + Runs when converting the data from a Godot scene node. This method can be used to process the Godot scene node data into a format that can be used by [method _export_node]. + </description> + </method> <method name="_export_node" qualifiers="virtual"> <return type="int" /> <param index="0" name="state" type="GLTFState" /> @@ -18,23 +28,40 @@ <param index="2" name="json" type="Dictionary" /> <param index="3" name="node" type="Node" /> <description> + Part of the export process. This method is run after [method _convert_scene_node] and before [method _export_post]. + This method can be used to modify the final JSON of each node. </description> </method> <method name="_export_post" qualifiers="virtual"> <return type="int" /> <param index="0" name="state" type="GLTFState" /> <description> + Part of the export process. This method is run last, after all other parts of the export process. + This method can be used to modify the final JSON of the generated GLTF file. </description> </method> <method name="_export_preflight" qualifiers="virtual"> <return type="int" /> <param index="0" name="root" type="Node" /> <description> + Part of the export process. This method is run first, before all other parts of the export process. + The return value is used to determine if this GLTFDocumentExtension class should be used for exporting a given GLTF file. If [constant OK], the export will use this GLTFDocumentExtension class. If not overridden, [constant OK] is returned. + </description> + </method> + <method name="_generate_scene_node" qualifiers="virtual"> + <return type="Node3D" /> + <param index="0" name="state" type="GLTFState" /> + <param index="1" name="gltf_node" type="GLTFNode" /> + <param index="2" name="scene_parent" type="Node" /> + <description> + Part of the import process. This method is run after [method _parse_node_extensions] and before [method _import_post_parse]. + Runs when generating a Godot scene node from a GLTFNode. The returned node will be added to the scene tree. Multiple nodes can be generated in this step if they are added as a child of the returned node. </description> </method> <method name="_get_supported_extensions" qualifiers="virtual"> <return type="PackedStringArray" /> <description> + Part of the import process. This method is run after [method _import_preflight] and before [method _parse_node_extensions]. Returns an array of the GLTF extensions supported by this GLTFDocumentExtension class. This is used to validate if a GLTF file with required extensions can be loaded. </description> </method> @@ -45,6 +72,8 @@ <param index="2" name="json" type="Dictionary" /> <param index="3" name="node" type="Node" /> <description> + Part of the import process. This method is run after [method _import_post_parse] and before [method _import_post]. + This method can be used to make modifications to each of the generated Godot scene nodes. </description> </method> <method name="_import_post" qualifiers="virtual"> @@ -52,12 +81,16 @@ <param index="0" name="state" type="GLTFState" /> <param index="1" name="root" type="Node" /> <description> + Part of the import process. This method is run last, after all other parts of the import process. + This method can be used to modify the final Godot scene generated by the import process. </description> </method> <method name="_import_post_parse" qualifiers="virtual"> <return type="int" /> <param index="0" name="state" type="GLTFState" /> <description> + Part of the import process. This method is run after [method _generate_scene_node] and before [method _import_node]. + This method can be used to modify any of the data imported so far, including any scene nodes, before running the final per-node import step. </description> </method> <method name="_import_preflight" qualifiers="virtual"> @@ -65,7 +98,18 @@ <param index="0" name="state" type="GLTFState" /> <param index="1" name="extensions" type="PackedStringArray" /> <description> - This callback is run first. It is used to determine if this GLTFDocumentExtension class should be used for importing a given GLTF file. If [constant OK], the import will use this GLTFDocumentExtension class. + Part of the import process. This method is run first, before all other parts of the import process. + The return value is used to determine if this GLTFDocumentExtension class should be used for importing a given GLTF file. If [constant OK], the import will use this GLTFDocumentExtension class. If not overridden, [constant OK] is returned. + </description> + </method> + <method name="_parse_node_extensions" qualifiers="virtual"> + <return type="int" /> + <param index="0" name="state" type="GLTFState" /> + <param index="1" name="gltf_node" type="GLTFNode" /> + <param index="2" name="extensions" type="Dictionary" /> + <description> + Part of the import process. This method is run after [method _get_supported_extensions] and before [method _generate_scene_node]. + Runs when parsing the node extensions of a GLTFNode. This method can be used to process the extension JSON data into a format that can be used by [method _generate_scene_node]. </description> </method> </methods> diff --git a/modules/gltf/extensions/gltf_document_extension.cpp b/modules/gltf/extensions/gltf_document_extension.cpp index 56519f70d3..f997fe8f66 100644 --- a/modules/gltf/extensions/gltf_document_extension.cpp +++ b/modules/gltf/extensions/gltf_document_extension.cpp @@ -31,50 +31,77 @@ #include "gltf_document_extension.h" void GLTFDocumentExtension::_bind_methods() { - GDVIRTUAL_BIND(_get_supported_extensions); + // Import process. GDVIRTUAL_BIND(_import_preflight, "state", "extensions"); + GDVIRTUAL_BIND(_get_supported_extensions); + GDVIRTUAL_BIND(_parse_node_extensions, "state", "gltf_node", "extensions"); + GDVIRTUAL_BIND(_generate_scene_node, "state", "gltf_node", "scene_parent"); GDVIRTUAL_BIND(_import_post_parse, "state"); GDVIRTUAL_BIND(_import_node, "state", "gltf_node", "json", "node"); GDVIRTUAL_BIND(_import_post, "state", "root"); + // Export process. GDVIRTUAL_BIND(_export_preflight, "root"); + GDVIRTUAL_BIND(_convert_scene_node, "state", "gltf_node", "scene_node"); GDVIRTUAL_BIND(_export_node, "state", "gltf_node", "json", "node"); GDVIRTUAL_BIND(_export_post, "state"); } +// Import process. +Error GLTFDocumentExtension::import_preflight(Ref<GLTFState> p_state, Vector<String> p_extensions) { + ERR_FAIL_NULL_V(p_state, ERR_INVALID_PARAMETER); + int err = OK; + GDVIRTUAL_CALL(_import_preflight, p_state, p_extensions, err); + return Error(err); +} + Vector<String> GLTFDocumentExtension::get_supported_extensions() { Vector<String> ret; GDVIRTUAL_CALL(_get_supported_extensions, ret); return ret; } -Error GLTFDocumentExtension::import_post(Ref<GLTFState> p_state, Node *p_root) { - ERR_FAIL_NULL_V(p_root, ERR_INVALID_PARAMETER); +Error GLTFDocumentExtension::parse_node_extensions(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &p_extensions) { ERR_FAIL_NULL_V(p_state, ERR_INVALID_PARAMETER); + ERR_FAIL_NULL_V(p_gltf_node, ERR_INVALID_PARAMETER); int err = OK; - GDVIRTUAL_CALL(_import_post, p_state, p_root, err); + GDVIRTUAL_CALL(_parse_node_extensions, p_state, p_gltf_node, p_extensions, err); return Error(err); } -Error GLTFDocumentExtension::import_preflight(Ref<GLTFState> p_state, Vector<String> p_extensions) { +Node3D *GLTFDocumentExtension::generate_scene_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Node *p_scene_parent) { + ERR_FAIL_NULL_V(p_state, nullptr); + ERR_FAIL_NULL_V(p_gltf_node, nullptr); + ERR_FAIL_NULL_V(p_scene_parent, nullptr); + Node3D *ret_node = nullptr; + GDVIRTUAL_CALL(_generate_scene_node, p_state, p_gltf_node, p_scene_parent, ret_node); + return ret_node; +} + +Error GLTFDocumentExtension::import_post_parse(Ref<GLTFState> p_state) { ERR_FAIL_NULL_V(p_state, ERR_INVALID_PARAMETER); int err = OK; - GDVIRTUAL_CALL(_import_preflight, p_state, p_extensions, err); + GDVIRTUAL_CALL(_import_post_parse, p_state, err); return Error(err); } -Error GLTFDocumentExtension::import_post_parse(Ref<GLTFState> p_state) { +Error GLTFDocumentExtension::import_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &r_dict, Node *p_node) { ERR_FAIL_NULL_V(p_state, ERR_INVALID_PARAMETER); + ERR_FAIL_NULL_V(p_gltf_node, ERR_INVALID_PARAMETER); + ERR_FAIL_NULL_V(p_node, ERR_INVALID_PARAMETER); int err = OK; - GDVIRTUAL_CALL(_import_post_parse, p_state, err); + GDVIRTUAL_CALL(_import_node, p_state, p_gltf_node, r_dict, p_node, err); return Error(err); } -Error GLTFDocumentExtension::export_post(Ref<GLTFState> p_state) { +Error GLTFDocumentExtension::import_post(Ref<GLTFState> p_state, Node *p_root) { + ERR_FAIL_NULL_V(p_root, ERR_INVALID_PARAMETER); ERR_FAIL_NULL_V(p_state, ERR_INVALID_PARAMETER); int err = OK; - GDVIRTUAL_CALL(_export_post, p_state, err); + GDVIRTUAL_CALL(_import_post, p_state, p_root, err); return Error(err); } + +// Export process. Error GLTFDocumentExtension::export_preflight(Node *p_root) { ERR_FAIL_NULL_V(p_root, ERR_INVALID_PARAMETER); int err = OK; @@ -82,20 +109,25 @@ Error GLTFDocumentExtension::export_preflight(Node *p_root) { return Error(err); } -Error GLTFDocumentExtension::import_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &r_dict, Node *p_node) { +void GLTFDocumentExtension::convert_scene_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Node *p_scene_node) { + ERR_FAIL_NULL(p_state); + ERR_FAIL_NULL(p_gltf_node); + ERR_FAIL_NULL(p_scene_node); + GDVIRTUAL_CALL(_convert_scene_node, p_state, p_gltf_node, p_scene_node); +} + +Error GLTFDocumentExtension::export_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &r_dict, Node *p_node) { ERR_FAIL_NULL_V(p_state, ERR_INVALID_PARAMETER); ERR_FAIL_NULL_V(p_gltf_node, ERR_INVALID_PARAMETER); ERR_FAIL_NULL_V(p_node, ERR_INVALID_PARAMETER); int err = OK; - GDVIRTUAL_CALL(_import_node, p_state, p_gltf_node, r_dict, p_node, err); + GDVIRTUAL_CALL(_export_node, p_state, p_gltf_node, r_dict, p_node, err); return Error(err); } -Error GLTFDocumentExtension::export_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &r_dict, Node *p_node) { +Error GLTFDocumentExtension::export_post(Ref<GLTFState> p_state) { ERR_FAIL_NULL_V(p_state, ERR_INVALID_PARAMETER); - ERR_FAIL_NULL_V(p_gltf_node, ERR_INVALID_PARAMETER); - ERR_FAIL_NULL_V(p_node, ERR_INVALID_PARAMETER); int err = OK; - GDVIRTUAL_CALL(_export_node, p_state, p_gltf_node, r_dict, p_node, err); + GDVIRTUAL_CALL(_export_post, p_state, err); return Error(err); } diff --git a/modules/gltf/extensions/gltf_document_extension.h b/modules/gltf/extensions/gltf_document_extension.h index 43e29ea0ab..7cc9ca592f 100644 --- a/modules/gltf/extensions/gltf_document_extension.h +++ b/modules/gltf/extensions/gltf_document_extension.h @@ -40,20 +40,31 @@ protected: static void _bind_methods(); public: - virtual Vector<String> get_supported_extensions(); + // Import process. virtual Error import_preflight(Ref<GLTFState> p_state, Vector<String> p_extensions); + virtual Vector<String> get_supported_extensions(); + virtual Error parse_node_extensions(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &p_extensions); + virtual Node3D *generate_scene_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Node *p_scene_parent); virtual Error import_post_parse(Ref<GLTFState> p_state); - virtual Error export_post(Ref<GLTFState> p_state); + virtual Error import_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &r_json, Node *p_node); virtual Error import_post(Ref<GLTFState> p_state, Node *p_node); + // Export process. virtual Error export_preflight(Node *p_state); - virtual Error import_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &r_json, Node *p_node); + virtual void convert_scene_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Node *p_scene_node); virtual Error export_node(Ref<GLTFState> p_state, Ref<GLTFNode> p_gltf_node, Dictionary &r_json, Node *p_node); - GDVIRTUAL0R(Vector<String>, _get_supported_extensions); + virtual Error export_post(Ref<GLTFState> p_state); + + // Import process. GDVIRTUAL2R(int, _import_preflight, Ref<GLTFState>, Vector<String>); + GDVIRTUAL0R(Vector<String>, _get_supported_extensions); + GDVIRTUAL3R(int, _parse_node_extensions, Ref<GLTFState>, Ref<GLTFNode>, Dictionary); + GDVIRTUAL3R(Node3D *, _generate_scene_node, Ref<GLTFState>, Ref<GLTFNode>, Node *); GDVIRTUAL1R(int, _import_post_parse, Ref<GLTFState>); GDVIRTUAL4R(int, _import_node, Ref<GLTFState>, Ref<GLTFNode>, Dictionary, Node *); GDVIRTUAL2R(int, _import_post, Ref<GLTFState>, Node *); + // Export process. GDVIRTUAL1R(int, _export_preflight, Node *); + GDVIRTUAL3(_convert_scene_node, Ref<GLTFState>, Ref<GLTFNode>, Node *); GDVIRTUAL4R(int, _export_node, Ref<GLTFState>, Ref<GLTFNode>, Dictionary, Node *); GDVIRTUAL1R(int, _export_post, Ref<GLTFState>); }; diff --git a/modules/gltf/gltf_document.cpp b/modules/gltf/gltf_document.cpp index 49797bb8fa..faa8ed267a 100644 --- a/modules/gltf/gltf_document.cpp +++ b/modules/gltf/gltf_document.cpp @@ -625,6 +625,11 @@ Error GLTFDocument::_parse_nodes(Ref<GLTFState> state) { node->light = light; } } + for (Ref<GLTFDocumentExtension> ext : document_extensions) { + ERR_CONTINUE(ext.is_null()); + Error err = ext->parse_node_extensions(state, node, extensions); + ERR_CONTINUE_MSG(err != OK, "GLTF: Encountered error " + itos(err) + " when parsing node extensions for node " + node->get_name() + " in file " + state->filename + ". Continuing."); + } } if (n.has("children")) { @@ -5266,6 +5271,10 @@ void GLTFDocument::_convert_scene_node(Ref<GLTFState> state, Node *p_current, co AnimationPlayer *animation_player = Object::cast_to<AnimationPlayer>(p_current); _convert_animation_player_to_gltf(animation_player, state, p_gltf_parent, p_gltf_root, gltf_node, p_current); } + for (Ref<GLTFDocumentExtension> ext : document_extensions) { + ERR_CONTINUE(ext.is_null()); + ext->convert_scene_node(state, gltf_node, p_current); + } GLTFNodeIndex current_node_i = state->nodes.size(); GLTFNodeIndex gltf_root = p_gltf_root; if (gltf_root == -1) { @@ -5589,21 +5598,32 @@ void GLTFDocument::_generate_scene_node(Ref<GLTFState> state, Node *scene_parent // and attach it to the bone_attachment scene_parent = bone_attachment; } - if (gltf_node->mesh >= 0) { - current_node = _generate_mesh_instance(state, node_index); - } else if (gltf_node->camera >= 0) { - current_node = _generate_camera(state, node_index); - } else if (gltf_node->light >= 0) { - current_node = _generate_light(state, node_index); + // Check if any GLTFDocumentExtension classes want to generate a node for us. + for (Ref<GLTFDocumentExtension> ext : document_extensions) { + ERR_CONTINUE(ext.is_null()); + current_node = ext->generate_scene_node(state, gltf_node, scene_parent); + if (current_node) { + break; + } } - - // We still have not managed to make a node. + // If none of our GLTFDocumentExtension classes generated us a node, we generate one. if (!current_node) { - current_node = _generate_spatial(state, node_index); + if (gltf_node->mesh >= 0) { + current_node = _generate_mesh_instance(state, node_index); + } else if (gltf_node->camera >= 0) { + current_node = _generate_camera(state, node_index); + } else if (gltf_node->light >= 0) { + current_node = _generate_light(state, node_index); + } else { + current_node = _generate_spatial(state, node_index); + } } + // Add the node we generated and set the owner to the scene root. scene_parent->add_child(current_node, true); if (current_node != scene_root) { - current_node->set_owner(scene_root); + Array args; + args.append(scene_root); + current_node->propagate_call(StringName("set_owner"), args); } current_node->set_transform(gltf_node->xform); current_node->set_name(gltf_node->get_name()); @@ -5669,19 +5689,32 @@ void GLTFDocument::_generate_skeleton_bone_node(Ref<GLTFState> state, Node *scen // and attach it to the bone_attachment scene_parent = bone_attachment; } - - // We still have not managed to make a node - if (gltf_node->mesh >= 0) { - current_node = _generate_mesh_instance(state, node_index); - } else if (gltf_node->camera >= 0) { - current_node = _generate_camera(state, node_index); - } else if (gltf_node->light >= 0) { - current_node = _generate_light(state, node_index); + // Check if any GLTFDocumentExtension classes want to generate a node for us. + for (Ref<GLTFDocumentExtension> ext : document_extensions) { + ERR_CONTINUE(ext.is_null()); + current_node = ext->generate_scene_node(state, gltf_node, scene_parent); + if (current_node) { + break; + } + } + // If none of our GLTFDocumentExtension classes generated us a node, we generate one. + if (!current_node) { + if (gltf_node->mesh >= 0) { + current_node = _generate_mesh_instance(state, node_index); + } else if (gltf_node->camera >= 0) { + current_node = _generate_camera(state, node_index); + } else if (gltf_node->light >= 0) { + current_node = _generate_light(state, node_index); + } else { + current_node = _generate_spatial(state, node_index); + } } - + // Add the node we generated and set the owner to the scene root. scene_parent->add_child(current_node, true); if (current_node != scene_root) { - current_node->set_owner(scene_root); + Array args; + args.append(scene_root); + current_node->propagate_call(StringName("set_owner"), args); } // Do not set transform here. Transform is already applied to our bone. current_node->set_name(gltf_node->get_name()); diff --git a/scene/gui/color_picker.cpp b/scene/gui/color_picker.cpp index 66c790b0d9..eb9f9039b7 100644 --- a/scene/gui/color_picker.cpp +++ b/scene/gui/color_picker.cpp @@ -1088,7 +1088,9 @@ void ColorPicker::_hsv_draw(int p_which, Control *c) { } else if (p_which == 1) { if (actual_shape == SHAPE_HSV_RECTANGLE) { Ref<Texture2D> hue = get_theme_icon(SNAME("color_hue"), SNAME("ColorPicker")); - c->draw_texture_rect(hue, Rect2(Point2(), c->get_size())); + c->draw_set_transform(Point2(), -Math_PI / 2, Size2(c->get_size().x, -c->get_size().y)); + c->draw_texture_rect(hue, Rect2(Point2(), Size2(1, 1))); + c->draw_set_transform(Point2(), 0, Size2(1, 1)); int y = c->get_size().y - c->get_size().y * (1.0 - h); Color col; col.set_hsv(h, 1, 1); diff --git a/servers/rendering/renderer_rd/effects/resolve.h b/servers/rendering/renderer_rd/effects/resolve.h index 2a4cd06827..49fba921d5 100644 --- a/servers/rendering/renderer_rd/effects/resolve.h +++ b/servers/rendering/renderer_rd/effects/resolve.h @@ -65,8 +65,8 @@ public: Resolve(); ~Resolve(); - void resolve_gi(RID p_source_depth, RID p_source_normal_roughness, RID p_source_voxel_gi, RID p_dest_depth, RID p_dest_normal_roughness, RID p_dest_voxel_gi, Vector2i p_screen_size, int p_samples, uint32_t p_barrier = RD::BARRIER_MASK_ALL); - void resolve_depth(RID p_source_depth, RID p_dest_depth, Vector2i p_screen_size, int p_samples, uint32_t p_barrier = RD::BARRIER_MASK_ALL); + void resolve_gi(RID p_source_depth, RID p_source_normal_roughness, RID p_source_voxel_gi, RID p_dest_depth, RID p_dest_normal_roughness, RID p_dest_voxel_gi, Vector2i p_screen_size, int p_samples, uint32_t p_barrier = RD::BARRIER_MASK_ALL_BARRIERS); + void resolve_depth(RID p_source_depth, RID p_dest_depth, Vector2i p_screen_size, int p_samples, uint32_t p_barrier = RD::BARRIER_MASK_ALL_BARRIERS); }; } // namespace RendererRD diff --git a/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.cpp b/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.cpp index 41fceac7c2..3d1e04fe99 100644 --- a/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.cpp +++ b/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.cpp @@ -1446,7 +1446,7 @@ void RenderForwardClustered::_pre_opaque_render(RenderDataRD *p_render_data, boo } //full barrier here, we need raster, transfer and compute and it depends from the previous work - RD::get_singleton()->barrier(RD::BARRIER_MASK_ALL, RD::BARRIER_MASK_ALL); + RD::get_singleton()->barrier(RD::BARRIER_MASK_ALL_BARRIERS, RD::BARRIER_MASK_ALL_BARRIERS); if (current_cluster_builder) { current_cluster_builder->begin(p_render_data->scene_data->cam_transform, p_render_data->scene_data->cam_projection, !p_render_data->reflection_probe.is_valid()); diff --git a/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.h b/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.h index 670eb93e20..d9145b5818 100644 --- a/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.h +++ b/servers/rendering/renderer_rd/forward_clustered/render_forward_clustered.h @@ -210,10 +210,10 @@ class RenderForwardClustered : public RendererSceneRenderRD { float screen_mesh_lod_threshold = 0.0; RD::FramebufferFormatID framebuffer_format = 0; uint32_t element_offset = 0; - uint32_t barrier = RD::BARRIER_MASK_ALL; + uint32_t barrier = RD::BARRIER_MASK_ALL_BARRIERS; bool use_directional_soft_shadow = false; - RenderListParameters(GeometryInstanceSurfaceDataCache **p_elements, RenderElementInfo *p_element_info, int p_element_count, bool p_reverse_cull, PassMode p_pass_mode, uint32_t p_color_pass_flags, bool p_no_gi, bool p_use_directional_soft_shadows, RID p_render_pass_uniform_set, bool p_force_wireframe = false, const Vector2 &p_uv_offset = Vector2(), float p_lod_distance_multiplier = 0.0, float p_screen_mesh_lod_threshold = 0.0, uint32_t p_view_count = 1, uint32_t p_element_offset = 0, uint32_t p_barrier = RD::BARRIER_MASK_ALL) { + RenderListParameters(GeometryInstanceSurfaceDataCache **p_elements, RenderElementInfo *p_element_info, int p_element_count, bool p_reverse_cull, PassMode p_pass_mode, uint32_t p_color_pass_flags, bool p_no_gi, bool p_use_directional_soft_shadows, RID p_render_pass_uniform_set, bool p_force_wireframe = false, const Vector2 &p_uv_offset = Vector2(), float p_lod_distance_multiplier = 0.0, float p_screen_mesh_lod_threshold = 0.0, uint32_t p_view_count = 1, uint32_t p_element_offset = 0, uint32_t p_barrier = RD::BARRIER_MASK_ALL_BARRIERS) { elements = p_elements; element_info = p_element_info; element_count = p_element_count; @@ -596,7 +596,7 @@ class RenderForwardClustered : public RendererSceneRenderRD { void _render_shadow_begin(); void _render_shadow_append(RID p_framebuffer, const PagedArray<RenderGeometryInstance *> &p_instances, const Projection &p_projection, const Transform3D &p_transform, float p_zfar, float p_bias, float p_normal_bias, bool p_use_dp, bool p_use_dp_flip, bool p_use_pancake, const Plane &p_camera_plane = Plane(), float p_lod_distance_multiplier = 0.0, float p_screen_mesh_lod_threshold = 0.0, const Rect2i &p_rect = Rect2i(), bool p_flip_y = false, bool p_clear_region = true, bool p_begin = true, bool p_end = true, RenderingMethod::RenderInfo *p_render_info = nullptr); void _render_shadow_process(); - void _render_shadow_end(uint32_t p_barrier = RD::BARRIER_MASK_ALL); + void _render_shadow_end(uint32_t p_barrier = RD::BARRIER_MASK_ALL_BARRIERS); /* Render Scene */ void _process_ssao(Ref<RenderSceneBuffersRD> p_render_buffers, RID p_environment, RID p_normal_buffer, const Projection &p_projection); diff --git a/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.cpp b/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.cpp index 2506f4578b..f860bab15e 100644 --- a/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.cpp +++ b/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.cpp @@ -612,7 +612,7 @@ void RenderForwardMobile::_pre_opaque_render(RenderDataRD *p_render_data) { } //full barrier here, we need raster, transfer and compute and it depends from the previous work - RD::get_singleton()->barrier(RD::BARRIER_MASK_ALL, RD::BARRIER_MASK_ALL); + RD::get_singleton()->barrier(RD::BARRIER_MASK_ALL_BARRIERS, RD::BARRIER_MASK_ALL_BARRIERS); bool using_shadows = true; @@ -965,7 +965,7 @@ void RenderForwardMobile::_render_scene(RenderDataRD *p_render_data, const Color if (!using_subpass_transparent) { // We're done with our subpasses so end our container pass - RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL); + RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL_BARRIERS); RD::get_singleton()->draw_command_end_label(); // Render 3D Pass / Render Reflection Probe Pass } @@ -1017,7 +1017,7 @@ void RenderForwardMobile::_render_scene(RenderDataRD *p_render_data, const Color RD::get_singleton()->draw_command_end_label(); // Render 3D Pass / Render Reflection Probe Pass - RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL); + RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL_BARRIERS); } else { RENDER_TIMESTAMP("Render Transparent"); @@ -1037,12 +1037,12 @@ void RenderForwardMobile::_render_scene(RenderDataRD *p_render_data, const Color WorkerThreadPool::GroupID group_task = WorkerThreadPool::get_singleton()->add_template_group_task(this, &RenderForwardMobile::_render_list_thread_function, &render_list_params, thread_draw_lists.size(), -1, true, SNAME("ForwardMobileRenderSubpass")); WorkerThreadPool::get_singleton()->wait_for_group_task_completion(group_task); - RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL); + RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL_BARRIERS); } else { //single threaded RD::DrawListID draw_list = RD::get_singleton()->draw_list_begin(framebuffer, can_continue_color ? RD::INITIAL_ACTION_CONTINUE : RD::INITIAL_ACTION_KEEP, RD::FINAL_ACTION_READ, can_continue_depth ? RD::INITIAL_ACTION_CONTINUE : RD::INITIAL_ACTION_KEEP, RD::FINAL_ACTION_READ); _render_list(draw_list, fb_format, &render_list_params, 0, render_list_params.element_count); - RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL); + RD::get_singleton()->draw_list_end(RD::BARRIER_MASK_ALL_BARRIERS); } RD::get_singleton()->draw_command_end_label(); // Render Transparent Subpass diff --git a/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.h b/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.h index ce64d805b7..6c85e1cffc 100644 --- a/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.h +++ b/servers/rendering/renderer_rd/forward_mobile/render_forward_mobile.h @@ -168,10 +168,10 @@ private: float screen_mesh_lod_threshold = 0.0; RD::FramebufferFormatID framebuffer_format = 0; uint32_t element_offset = 0; - uint32_t barrier = RD::BARRIER_MASK_ALL; + uint32_t barrier = RD::BARRIER_MASK_ALL_BARRIERS; uint32_t subpass = 0; - RenderListParameters(GeometryInstanceSurfaceDataCache **p_elements, RenderElementInfo *p_element_info, int p_element_count, bool p_reverse_cull, PassMode p_pass_mode, RID p_render_pass_uniform_set, uint32_t p_spec_constant_base_flags = 0, bool p_force_wireframe = false, const Vector2 &p_uv_offset = Vector2(), float p_lod_distance_multiplier = 0.0, float p_screen_mesh_lod_threshold = 0.0, uint32_t p_view_count = 1, uint32_t p_element_offset = 0, uint32_t p_barrier = RD::BARRIER_MASK_ALL) { + RenderListParameters(GeometryInstanceSurfaceDataCache **p_elements, RenderElementInfo *p_element_info, int p_element_count, bool p_reverse_cull, PassMode p_pass_mode, RID p_render_pass_uniform_set, uint32_t p_spec_constant_base_flags = 0, bool p_force_wireframe = false, const Vector2 &p_uv_offset = Vector2(), float p_lod_distance_multiplier = 0.0, float p_screen_mesh_lod_threshold = 0.0, uint32_t p_view_count = 1, uint32_t p_element_offset = 0, uint32_t p_barrier = RD::BARRIER_MASK_ALL_BARRIERS) { elements = p_elements; element_info = p_element_info; element_count = p_element_count; @@ -196,7 +196,7 @@ private: void _render_shadow_begin(); void _render_shadow_append(RID p_framebuffer, const PagedArray<RenderGeometryInstance *> &p_instances, const Projection &p_projection, const Transform3D &p_transform, float p_zfar, float p_bias, float p_normal_bias, bool p_use_dp, bool p_use_dp_flip, bool p_use_pancake, const Plane &p_camera_plane = Plane(), float p_lod_distance_multiplier = 0.0, float p_screen_mesh_lod_threshold = 0.0, const Rect2i &p_rect = Rect2i(), bool p_flip_y = false, bool p_clear_region = true, bool p_begin = true, bool p_end = true, RenderingMethod::RenderInfo *p_render_info = nullptr); void _render_shadow_process(); - void _render_shadow_end(uint32_t p_barrier = RD::BARRIER_MASK_ALL); + void _render_shadow_end(uint32_t p_barrier = RD::BARRIER_MASK_ALL_BARRIERS); /* Render Scene */ diff --git a/servers/rendering/renderer_rd/storage_rd/material_storage.h b/servers/rendering/renderer_rd/storage_rd/material_storage.h index fb25e56ed3..a96fc847e1 100644 --- a/servers/rendering/renderer_rd/storage_rd/material_storage.h +++ b/servers/rendering/renderer_rd/storage_rd/material_storage.h @@ -79,7 +79,7 @@ public: virtual ~MaterialData(); //to be used internally by update_parameters, in the most common configuration of material parameters - bool update_parameters_uniform_set(const HashMap<StringName, Variant> &p_parameters, bool p_uniform_dirty, bool p_textures_dirty, const HashMap<StringName, ShaderLanguage::ShaderNode::Uniform> &p_uniforms, const uint32_t *p_uniform_offsets, const Vector<ShaderCompiler::GeneratedCode::Texture> &p_texture_uniforms, const HashMap<StringName, HashMap<int, RID>> &p_default_texture_params, uint32_t p_ubo_size, RID &uniform_set, RID p_shader, uint32_t p_shader_uniform_set, bool p_use_linear_color, uint32_t p_barrier = RD::BARRIER_MASK_ALL); + bool update_parameters_uniform_set(const HashMap<StringName, Variant> &p_parameters, bool p_uniform_dirty, bool p_textures_dirty, const HashMap<StringName, ShaderLanguage::ShaderNode::Uniform> &p_uniforms, const uint32_t *p_uniform_offsets, const Vector<ShaderCompiler::GeneratedCode::Texture> &p_texture_uniforms, const HashMap<StringName, HashMap<int, RID>> &p_default_texture_params, uint32_t p_ubo_size, RID &uniform_set, RID p_shader, uint32_t p_shader_uniform_set, bool p_use_linear_color, uint32_t p_barrier = RD::BARRIER_MASK_ALL_BARRIERS); void free_parameters_uniform_set(RID p_uniform_set); private: diff --git a/servers/rendering/renderer_rd/storage_rd/mesh_storage.cpp b/servers/rendering/renderer_rd/storage_rd/mesh_storage.cpp index adb882986f..71f4f3ad11 100644 --- a/servers/rendering/renderer_rd/storage_rd/mesh_storage.cpp +++ b/servers/rendering/renderer_rd/storage_rd/mesh_storage.cpp @@ -1842,7 +1842,7 @@ void MeshStorage::_update_dirty_multimeshes() { RD::get_singleton()->buffer_update(multimesh->buffer, buffer_offset * sizeof(float) + offset, MIN(region_size, size - offset), &data[region_start_index], RD::BARRIER_MASK_NO_BARRIER); } } - RD::get_singleton()->barrier(RD::BARRIER_MASK_NO_BARRIER, RD::BARRIER_MASK_ALL); + RD::get_singleton()->barrier(RD::BARRIER_MASK_NO_BARRIER, RD::BARRIER_MASK_ALL_BARRIERS); } memcpy(multimesh->previous_data_cache_dirty_regions, multimesh->data_cache_dirty_regions, data_cache_dirty_region_count * sizeof(bool)); diff --git a/servers/rendering/rendering_device.cpp b/servers/rendering/rendering_device.cpp index 522718b9ff..c09b185137 100644 --- a/servers/rendering/rendering_device.cpp +++ b/servers/rendering/rendering_device.cpp @@ -252,7 +252,7 @@ RID RenderingDevice::_uniform_set_create(const TypedArray<RDUniform> &p_uniforms return uniform_set_create(uniforms, p_shader, p_shader_set); } -Error RenderingDevice::_buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data, uint32_t p_post_barrier) { +Error RenderingDevice::_buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier) { return buffer_update(p_buffer, p_offset, p_size, p_data.ptr(), p_post_barrier); } @@ -373,7 +373,7 @@ void RenderingDevice::_bind_methods() { ClassDB::bind_method(D_METHOD("texture_create_shared", "view", "with_texture"), &RenderingDevice::_texture_create_shared); ClassDB::bind_method(D_METHOD("texture_create_shared_from_slice", "view", "with_texture", "layer", "mipmap", "mipmaps", "slice_type"), &RenderingDevice::_texture_create_shared_from_slice, DEFVAL(1), DEFVAL(TEXTURE_SLICE_2D)); - ClassDB::bind_method(D_METHOD("texture_update", "texture", "layer", "data", "post_barrier"), &RenderingDevice::texture_update, DEFVAL(BARRIER_MASK_ALL)); + ClassDB::bind_method(D_METHOD("texture_update", "texture", "layer", "data", "post_barrier"), &RenderingDevice::texture_update, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); ClassDB::bind_method(D_METHOD("texture_get_data", "texture", "layer"), &RenderingDevice::texture_get_data); ClassDB::bind_method(D_METHOD("texture_is_format_supported_for_usage", "format", "usage_flags"), &RenderingDevice::texture_is_format_supported_for_usage); @@ -381,9 +381,9 @@ void RenderingDevice::_bind_methods() { ClassDB::bind_method(D_METHOD("texture_is_shared", "texture"), &RenderingDevice::texture_is_shared); ClassDB::bind_method(D_METHOD("texture_is_valid", "texture"), &RenderingDevice::texture_is_valid); - ClassDB::bind_method(D_METHOD("texture_copy", "from_texture", "to_texture", "from_pos", "to_pos", "size", "src_mipmap", "dst_mipmap", "src_layer", "dst_layer", "post_barrier"), &RenderingDevice::texture_copy, DEFVAL(BARRIER_MASK_ALL)); - ClassDB::bind_method(D_METHOD("texture_clear", "texture", "color", "base_mipmap", "mipmap_count", "base_layer", "layer_count", "post_barrier"), &RenderingDevice::texture_clear, DEFVAL(BARRIER_MASK_ALL)); - ClassDB::bind_method(D_METHOD("texture_resolve_multisample", "from_texture", "to_texture", "post_barrier"), &RenderingDevice::texture_resolve_multisample, DEFVAL(BARRIER_MASK_ALL)); + ClassDB::bind_method(D_METHOD("texture_copy", "from_texture", "to_texture", "from_pos", "to_pos", "size", "src_mipmap", "dst_mipmap", "src_layer", "dst_layer", "post_barrier"), &RenderingDevice::texture_copy, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); + ClassDB::bind_method(D_METHOD("texture_clear", "texture", "color", "base_mipmap", "mipmap_count", "base_layer", "layer_count", "post_barrier"), &RenderingDevice::texture_clear, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); + ClassDB::bind_method(D_METHOD("texture_resolve_multisample", "from_texture", "to_texture", "post_barrier"), &RenderingDevice::texture_resolve_multisample, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); ClassDB::bind_method(D_METHOD("framebuffer_format_create", "attachments", "view_count"), &RenderingDevice::_framebuffer_format_create, DEFVAL(1)); ClassDB::bind_method(D_METHOD("framebuffer_format_create_multipass", "attachments", "passes", "view_count"), &RenderingDevice::_framebuffer_format_create_multipass, DEFVAL(1)); @@ -417,8 +417,8 @@ void RenderingDevice::_bind_methods() { ClassDB::bind_method(D_METHOD("uniform_set_create", "uniforms", "shader", "shader_set"), &RenderingDevice::_uniform_set_create); ClassDB::bind_method(D_METHOD("uniform_set_is_valid", "uniform_set"), &RenderingDevice::uniform_set_is_valid); - ClassDB::bind_method(D_METHOD("buffer_update", "buffer", "offset", "size_bytes", "data", "post_barrier"), &RenderingDevice::_buffer_update, DEFVAL(BARRIER_MASK_ALL)); - ClassDB::bind_method(D_METHOD("buffer_clear", "buffer", "offset", "size_bytes", "post_barrier"), &RenderingDevice::buffer_clear, DEFVAL(BARRIER_MASK_ALL)); + ClassDB::bind_method(D_METHOD("buffer_update", "buffer", "offset", "size_bytes", "data", "post_barrier"), &RenderingDevice::_buffer_update, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); + ClassDB::bind_method(D_METHOD("buffer_clear", "buffer", "offset", "size_bytes", "post_barrier"), &RenderingDevice::buffer_clear, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); ClassDB::bind_method(D_METHOD("buffer_get_data", "buffer"), &RenderingDevice::buffer_get_data); ClassDB::bind_method(D_METHOD("render_pipeline_create", "shader", "framebuffer_format", "vertex_format", "primitive", "rasterization_state", "multisample_state", "stencil_state", "color_blend_state", "dynamic_state_flags", "for_render_pass", "specialization_constants"), &RenderingDevice::_render_pipeline_create, DEFVAL(0), DEFVAL(0), DEFVAL(TypedArray<RDPipelineSpecializationConstant>())); @@ -451,7 +451,7 @@ void RenderingDevice::_bind_methods() { ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass"), &RenderingDevice::draw_list_switch_to_next_pass); ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass_split", "splits"), &RenderingDevice::_draw_list_switch_to_next_pass_split); - ClassDB::bind_method(D_METHOD("draw_list_end", "post_barrier"), &RenderingDevice::draw_list_end, DEFVAL(BARRIER_MASK_ALL)); + ClassDB::bind_method(D_METHOD("draw_list_end", "post_barrier"), &RenderingDevice::draw_list_end, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); ClassDB::bind_method(D_METHOD("compute_list_begin", "allow_draw_overlap"), &RenderingDevice::compute_list_begin, DEFVAL(false)); ClassDB::bind_method(D_METHOD("compute_list_bind_compute_pipeline", "compute_list", "compute_pipeline"), &RenderingDevice::compute_list_bind_compute_pipeline); @@ -459,7 +459,7 @@ void RenderingDevice::_bind_methods() { ClassDB::bind_method(D_METHOD("compute_list_bind_uniform_set", "compute_list", "uniform_set", "set_index"), &RenderingDevice::compute_list_bind_uniform_set); ClassDB::bind_method(D_METHOD("compute_list_dispatch", "compute_list", "x_groups", "y_groups", "z_groups"), &RenderingDevice::compute_list_dispatch); ClassDB::bind_method(D_METHOD("compute_list_add_barrier", "compute_list"), &RenderingDevice::compute_list_add_barrier); - ClassDB::bind_method(D_METHOD("compute_list_end", "post_barrier"), &RenderingDevice::compute_list_end, DEFVAL(BARRIER_MASK_ALL)); + ClassDB::bind_method(D_METHOD("compute_list_end", "post_barrier"), &RenderingDevice::compute_list_end, DEFVAL(BARRIER_MASK_ALL_BARRIERS)); ClassDB::bind_method(D_METHOD("free_rid", "rid"), &RenderingDevice::free); @@ -475,7 +475,7 @@ void RenderingDevice::_bind_methods() { ClassDB::bind_method(D_METHOD("submit"), &RenderingDevice::submit); ClassDB::bind_method(D_METHOD("sync"), &RenderingDevice::sync); - ClassDB::bind_method(D_METHOD("barrier", "from", "to"), &RenderingDevice::barrier, DEFVAL(BARRIER_MASK_ALL), DEFVAL(BARRIER_MASK_ALL)); + ClassDB::bind_method(D_METHOD("barrier", "from", "to"), &RenderingDevice::barrier, DEFVAL(BARRIER_MASK_ALL_BARRIERS), DEFVAL(BARRIER_MASK_ALL_BARRIERS)); ClassDB::bind_method(D_METHOD("full_barrier"), &RenderingDevice::full_barrier); ClassDB::bind_method(D_METHOD("create_local_device"), &RenderingDevice::create_local_device); @@ -494,12 +494,6 @@ void RenderingDevice::_bind_methods() { ClassDB::bind_method(D_METHOD("get_driver_resource", "resource", "rid", "index"), &RenderingDevice::get_driver_resource); - BIND_CONSTANT(BARRIER_MASK_RASTER); - BIND_CONSTANT(BARRIER_MASK_COMPUTE); - BIND_CONSTANT(BARRIER_MASK_TRANSFER); - BIND_CONSTANT(BARRIER_MASK_ALL); - BIND_CONSTANT(BARRIER_MASK_NO_BARRIER); - BIND_ENUM_CONSTANT(DEVICE_TYPE_OTHER); BIND_ENUM_CONSTANT(DEVICE_TYPE_INTEGRATED_GPU); BIND_ENUM_CONSTANT(DEVICE_TYPE_DISCRETE_GPU); @@ -741,6 +735,12 @@ void RenderingDevice::_bind_methods() { BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM); BIND_ENUM_CONSTANT(DATA_FORMAT_MAX); + BIND_BITFIELD_FLAG(BARRIER_MASK_RASTER); + BIND_BITFIELD_FLAG(BARRIER_MASK_COMPUTE); + BIND_BITFIELD_FLAG(BARRIER_MASK_TRANSFER); + BIND_BITFIELD_FLAG(BARRIER_MASK_ALL_BARRIERS); + BIND_BITFIELD_FLAG(BARRIER_MASK_NO_BARRIER); + BIND_ENUM_CONSTANT(TEXTURE_TYPE_1D); BIND_ENUM_CONSTANT(TEXTURE_TYPE_2D); BIND_ENUM_CONSTANT(TEXTURE_TYPE_3D); diff --git a/servers/rendering/rendering_device.h b/servers/rendering/rendering_device.h index d3f3972029..abdd07844a 100644 --- a/servers/rendering/rendering_device.h +++ b/servers/rendering/rendering_device.h @@ -396,8 +396,8 @@ public: BARRIER_MASK_RASTER = 1, BARRIER_MASK_COMPUTE = 2, BARRIER_MASK_TRANSFER = 4, + BARRIER_MASK_ALL_BARRIERS = BARRIER_MASK_RASTER | BARRIER_MASK_COMPUTE | BARRIER_MASK_TRANSFER, // 7 BARRIER_MASK_NO_BARRIER = 8, - BARRIER_MASK_ALL = BARRIER_MASK_RASTER | BARRIER_MASK_COMPUTE | BARRIER_MASK_TRANSFER }; /*****************/ @@ -532,7 +532,7 @@ public: virtual RID texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps = 1, TextureSliceType p_slice_type = TEXTURE_SLICE_2D) = 0; - virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; + virtual Error texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; virtual Vector<uint8_t> texture_get_data(RID p_texture, uint32_t p_layer) = 0; // CPU textures will return immediately, while GPU textures will most likely force a flush virtual bool texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const = 0; @@ -540,9 +540,9 @@ public: virtual bool texture_is_valid(RID p_texture) = 0; virtual Size2i texture_size(RID p_texture) = 0; - virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; - virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; - virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; + virtual Error texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; + virtual Error texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; + virtual Error texture_resolve_multisample(RID p_from_texture, RID p_to_texture, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; /*********************/ /**** FRAMEBUFFER ****/ @@ -826,8 +826,8 @@ public: virtual bool uniform_set_is_valid(RID p_uniform_set) = 0; virtual void uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata) = 0; - virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; - virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; + virtual Error buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; + virtual Error buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; virtual Vector<uint8_t> buffer_get_data(RID p_buffer) = 0; //this causes stall, only use to retrieve large buffers for saving /******************************************/ @@ -1173,7 +1173,7 @@ public: virtual DrawListID draw_list_switch_to_next_pass() = 0; virtual Error draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) = 0; - virtual void draw_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; + virtual void draw_list_end(BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; /***********************/ /**** COMPUTE LISTS ****/ @@ -1190,9 +1190,9 @@ public: virtual void compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset) = 0; virtual void compute_list_add_barrier(ComputeListID p_list) = 0; - virtual void compute_list_end(uint32_t p_post_barrier = BARRIER_MASK_ALL) = 0; + virtual void compute_list_end(BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS) = 0; - virtual void barrier(uint32_t p_from = BARRIER_MASK_ALL, uint32_t p_to = BARRIER_MASK_ALL) = 0; + virtual void barrier(BitField<BarrierMask> p_from = BARRIER_MASK_ALL_BARRIERS, BitField<BarrierMask> p_to = BARRIER_MASK_ALL_BARRIERS) = 0; virtual void full_barrier() = 0; /***************/ @@ -1320,7 +1320,7 @@ protected: RID _uniform_set_create(const TypedArray<RDUniform> &p_uniforms, RID p_shader, uint32_t p_shader_set); - Error _buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data, uint32_t p_post_barrier = BARRIER_MASK_ALL); + Error _buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data, BitField<BarrierMask> p_post_barrier = BARRIER_MASK_ALL_BARRIERS); RID _render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const Ref<RDPipelineRasterizationState> &p_rasterization_state, const Ref<RDPipelineMultisampleState> &p_multisample_state, const Ref<RDPipelineDepthStencilState> &p_depth_stencil_state, const Ref<RDPipelineColorBlendState> &p_blend_state, int p_dynamic_state_flags, uint32_t p_for_render_pass, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants); RID _compute_pipeline_create(RID p_shader, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants); @@ -1337,6 +1337,7 @@ VARIANT_ENUM_CAST(RenderingDevice::ShaderStage) VARIANT_ENUM_CAST(RenderingDevice::ShaderLanguage) VARIANT_ENUM_CAST(RenderingDevice::CompareOperator) VARIANT_ENUM_CAST(RenderingDevice::DataFormat) +VARIANT_BITFIELD_CAST(RenderingDevice::BarrierMask); VARIANT_ENUM_CAST(RenderingDevice::TextureType) VARIANT_ENUM_CAST(RenderingDevice::TextureSamples) VARIANT_ENUM_CAST(RenderingDevice::TextureUsageBits) |