summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dummy/rasterizer_dummy.h2
-rw-r--r--drivers/gles2/rasterizer_storage_gles2.cpp2
-rw-r--r--drivers/gles2/rasterizer_storage_gles2.h2
-rw-r--r--drivers/gles3/rasterizer_storage_gles3.cpp2
-rw-r--r--drivers/gles3/rasterizer_storage_gles3.h2
-rw-r--r--drivers/vulkan/rendering_device_vulkan.cpp291
-rw-r--r--drivers/vulkan/rendering_device_vulkan.h71
-rw-r--r--drivers/vulkan/vk_mem_alloc.cpp3
-rw-r--r--drivers/vulkan/vulkan_context.cpp504
-rw-r--r--drivers/vulkan/vulkan_context.h93
10 files changed, 641 insertions, 331 deletions
diff --git a/drivers/dummy/rasterizer_dummy.h b/drivers/dummy/rasterizer_dummy.h
index 648a8f8d44..796e276418 100644
--- a/drivers/dummy/rasterizer_dummy.h
+++ b/drivers/dummy/rasterizer_dummy.h
@@ -701,7 +701,7 @@ public:
void render_target_set_external_texture(RID p_render_target, unsigned int p_texture_id) {}
void render_target_set_flag(RID p_render_target, RenderTargetFlags p_flag, bool p_value) {}
bool render_target_was_used(RID p_render_target) { return false; }
- void render_target_clear_used_flag(RID p_render_target) {}
+ void render_target_set_as_unused(RID p_render_target) {}
void render_target_set_msaa(RID p_render_target, VS::ViewportMSAA p_msaa) {}
/* CANVAS SHADOW */
diff --git a/drivers/gles2/rasterizer_storage_gles2.cpp b/drivers/gles2/rasterizer_storage_gles2.cpp
index 0b44234173..2e06850413 100644
--- a/drivers/gles2/rasterizer_storage_gles2.cpp
+++ b/drivers/gles2/rasterizer_storage_gles2.cpp
@@ -5358,7 +5358,7 @@ bool RasterizerStorageGLES2::render_target_was_used(RID p_render_target) {
return rt->used_in_frame;
}
-void RasterizerStorageGLES2::render_target_clear_used_flag(RID p_render_target) {
+void RasterizerStorageGLES2::render_target_set_as_unused(RID p_render_target) {
RenderTarget *rt = render_target_owner.getornull(p_render_target);
ERR_FAIL_COND(!rt);
diff --git a/drivers/gles2/rasterizer_storage_gles2.h b/drivers/gles2/rasterizer_storage_gles2.h
index de4dfe3f46..79e3f610ea 100644
--- a/drivers/gles2/rasterizer_storage_gles2.h
+++ b/drivers/gles2/rasterizer_storage_gles2.h
@@ -1247,7 +1247,7 @@ public:
virtual void render_target_set_flag(RID p_render_target, RenderTargetFlags p_flag, bool p_value);
virtual bool render_target_was_used(RID p_render_target);
- virtual void render_target_clear_used_flag(RID p_render_target);
+ virtual void render_target_set_as_unused(RID p_render_target);
virtual void render_target_set_msaa(RID p_render_target, VS::ViewportMSAA p_msaa);
/* CANVAS SHADOW */
diff --git a/drivers/gles3/rasterizer_storage_gles3.cpp b/drivers/gles3/rasterizer_storage_gles3.cpp
index 5542f06eb6..5b19dee7cb 100644
--- a/drivers/gles3/rasterizer_storage_gles3.cpp
+++ b/drivers/gles3/rasterizer_storage_gles3.cpp
@@ -7594,7 +7594,7 @@ bool RasterizerStorageGLES3::render_target_was_used(RID p_render_target) {
return rt->used_in_frame;
}
-void RasterizerStorageGLES3::render_target_clear_used_flag(RID p_render_target) {
+void RasterizerStorageGLES3::render_target_set_as_unused(RID p_render_target) {
RenderTarget *rt = render_target_owner.getornull(p_render_target);
ERR_FAIL_COND(!rt);
diff --git a/drivers/gles3/rasterizer_storage_gles3.h b/drivers/gles3/rasterizer_storage_gles3.h
index 4137ab44fb..c4e4ac87e4 100644
--- a/drivers/gles3/rasterizer_storage_gles3.h
+++ b/drivers/gles3/rasterizer_storage_gles3.h
@@ -1404,7 +1404,7 @@ public:
virtual void render_target_set_flag(RID p_render_target, RenderTargetFlags p_flag, bool p_value);
virtual bool render_target_was_used(RID p_render_target);
- virtual void render_target_clear_used_flag(RID p_render_target);
+ virtual void render_target_set_as_unused(RID p_render_target);
virtual void render_target_set_msaa(RID p_render_target, VS::ViewportMSAA p_msaa);
/* CANVAS SHADOW */
diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp
index ed044ceb05..516853a64e 100644
--- a/drivers/vulkan/rendering_device_vulkan.cpp
+++ b/drivers/vulkan/rendering_device_vulkan.cpp
@@ -24,20 +24,14 @@ void RenderingDeviceVulkan::_add_dependency(RID p_id, RID p_depends_on) {
void RenderingDeviceVulkan::_free_dependencies(RID p_id) {
//direct dependencies must be freed
- List<RID> to_free;
+
Map<RID, Set<RID> >::Element *E = dependency_map.find(p_id);
if (E) {
- for (Set<RID>::Element *F = E->get().front(); F; F = F->next()) {
- to_free.push_back(F->get());
+ while (E->get().size()) {
+ free(E->get().front()->get());
}
-
dependency_map.erase(E);
-
- while (to_free.front()) {
- free(to_free.front()->get());
- to_free.pop_front();
- }
}
//reverse depenencies must be unreferenced
@@ -47,9 +41,9 @@ void RenderingDeviceVulkan::_free_dependencies(RID p_id) {
for (Set<RID>::Element *F = E->get().front(); F; F = F->next()) {
Map<RID, Set<RID> >::Element *G = dependency_map.find(F->get());
- if (G) {
- G->get().erase(p_id);
- }
+ ERR_CONTINUE(!G);
+ ERR_CONTINUE(!G->get().has(p_id));
+ G->get().erase(p_id);
}
reverse_dependency_map.erase(E);
@@ -1210,7 +1204,6 @@ Error RenderingDeviceVulkan::_buffer_free(Buffer *p_buffer) {
ERR_FAIL_COND_V(p_buffer->size == 0, ERR_INVALID_PARAMETER);
vmaDestroyBuffer(allocator, p_buffer->buffer, p_buffer->allocation);
- vmaFreeMemory(allocator, p_buffer->allocation);
p_buffer->buffer = NULL;
p_buffer->allocation = NULL;
p_buffer->size = 0;
@@ -1455,6 +1448,16 @@ Error RenderingDeviceVulkan::_buffer_update(Buffer *p_buffer, size_t p_offset, c
return OK;
}
+void RenderingDeviceVulkan::_memory_barrier(VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw) {
+
+ VkMemoryBarrier mem_barrier;
+ mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
+ mem_barrier.pNext = NULL;
+ mem_barrier.srcAccessMask = p_src_access;
+ mem_barrier.dstAccessMask = p_dst_sccess;
+
+ vkCmdPipelineBarrier(p_sync_with_draw ? frames[frame].draw_command_buffer : frames[frame].setup_command_buffer, p_src_stage_mask, p_dst_stage_mask, 0, 1, &mem_barrier, 0, NULL, 0, NULL);
+}
/*****************/
/**** TEXTURE ****/
/*****************/
@@ -1753,7 +1756,6 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
if (err) {
vmaDestroyImage(allocator, texture.image, texture.allocation);
- vmaFreeMemory(allocator, texture.allocation);
ERR_FAIL_V(RID());
}
@@ -2082,6 +2084,16 @@ Error RenderingDeviceVulkan::texture_update(RID p_texture, uint32_t p_layer, con
return OK;
}
+bool RenderingDeviceVulkan::texture_is_shared(RID p_texture) {
+ Texture *tex = texture_owner.getornull(p_texture);
+ ERR_FAIL_COND_V(!tex, false);
+ return tex->owner.is_valid();
+}
+
+bool RenderingDeviceVulkan::texture_is_valid(RID p_texture) {
+ return texture_owner.owns(p_texture);
+}
+
bool RenderingDeviceVulkan::texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const {
ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
@@ -2487,7 +2499,9 @@ RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Poo
uint64_t data_size = p_data.size();
PoolVector<uint8_t>::Read r = p_data.read();
_buffer_update(&buffer, 0, r.ptr(), data_size);
+ _memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, false);
}
+
return vertex_buffer_owner.make_rid(buffer);
}
@@ -2498,14 +2512,17 @@ RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(cons
VertexDescriptionKey key;
key.vertex_formats = p_vertex_formats;
- const Map<VertexDescriptionKey, VertexFormatID>::Element *E = vertex_format_cache.find(key);
- if (E) {
- return E->get();
+
+ VertexFormatID *idptr = vertex_format_cache.getptr(key);
+ if (idptr) {
+ return *idptr;
}
+
//does not exist, create one and cache it
VertexDescriptionCache vdcache;
vdcache.bindings = memnew_arr(VkVertexInputBindingDescription, p_vertex_formats.size());
vdcache.attributes = memnew_arr(VkVertexInputAttributeDescription, p_vertex_formats.size());
+
Set<int> used_locations;
for (int i = 0; i < p_vertex_formats.size(); i++) {
ERR_CONTINUE(p_vertex_formats[i].format >= DATA_FORMAT_MAX);
@@ -2533,9 +2550,10 @@ RenderingDevice::VertexFormatID RenderingDeviceVulkan::vertex_format_create(cons
vdcache.create_info.vertexBindingDescriptionCount = p_vertex_formats.size();
vdcache.create_info.pVertexBindingDescriptions = vdcache.bindings;
+ vdcache.vertex_formats = p_vertex_formats;
VertexFormatID id = VertexFormatID(vertex_format_cache.size()) | (VertexFormatID(ID_TYPE_VERTEX_FORMAT) << ID_BASE_SHIFT);
- vdcache.E = vertex_format_cache.insert(key, id);
+ vertex_format_cache[key] = id;
vertex_formats[id] = vdcache;
return id;
}
@@ -2547,7 +2565,7 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo
ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
- ERR_FAIL_COND_V(vd.E->key().vertex_formats.size() != p_src_buffers.size(), RID());
+ ERR_FAIL_COND_V(vd.vertex_formats.size() != p_src_buffers.size(), RID());
for (int i = 0; i < p_src_buffers.size(); i++) {
ERR_FAIL_COND_V(!vertex_buffer_owner.owns(p_src_buffers[i]), RID());
@@ -2563,7 +2581,8 @@ RID RenderingDeviceVulkan::vertex_array_create(uint32_t p_vertex_count, VertexFo
//validate with buffer
{
- const VertexDescription &atf = vd.E->key().vertex_formats[i];
+ const VertexDescription &atf = vd.vertex_formats[i];
+
uint32_t element_size = get_format_vertex_size(atf.format);
ERR_FAIL_COND_V(element_size == 0, RID()); //should never happens since this was prevalidated
@@ -2641,6 +2660,7 @@ RID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBuff
uint64_t data_size = p_data.size();
PoolVector<uint8_t>::Read r = p_data.read();
_buffer_update(&index_buffer, 0, r.ptr(), data_size);
+ _memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_INDEX_READ_BIT, false);
}
return index_buffer_owner.make_rid(index_buffer);
}
@@ -3314,6 +3334,7 @@ RID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const Po
uint64_t data_size = p_data.size();
PoolVector<uint8_t>::Read r = p_data.read();
_buffer_update(&buffer, 0, r.ptr(), data_size);
+ _memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_UNIFORM_READ_BIT, false);
}
return uniform_buffer_owner.make_rid(buffer);
}
@@ -3332,6 +3353,7 @@ RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Po
uint64_t data_size = p_data.size();
PoolVector<uint8_t>::Read r = p_data.read();
_buffer_update(&buffer, 0, r.ptr(), data_size);
+ _memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, false);
}
return storage_buffer_owner.make_rid(buffer);
}
@@ -3354,6 +3376,7 @@ RID RenderingDeviceVulkan::texture_buffer_create(uint32_t p_size_elements, DataF
uint64_t data_size = p_data.size();
PoolVector<uint8_t>::Read r = p_data.read();
_buffer_update(&texture_buffer.buffer, 0, r.ptr(), data_size);
+ _memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, false);
}
VkBufferViewCreateInfo view_create_info;
@@ -3483,6 +3506,9 @@ void RenderingDeviceVulkan::_descriptor_pool_free(const DescriptorPoolKey &p_key
vkDestroyDescriptorPool(device, p_pool->pool, NULL);
descriptor_pools[p_key].erase(p_pool);
memdelete(p_pool);
+ if (descriptor_pools[p_key].empty()) {
+ descriptor_pools.erase(p_key);
+ }
}
}
@@ -3857,16 +3883,29 @@ bool RenderingDeviceVulkan::uniform_set_is_valid(RID p_uniform_set) {
Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, void *p_data, bool p_sync_with_draw) {
_THREAD_SAFE_METHOD_
+ VkPipelineStageFlags dst_stage_mask;
+ VkAccessFlags dst_access;
+
Buffer *buffer = NULL;
if (vertex_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ dst_access = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
buffer = vertex_buffer_owner.getornull(p_buffer);
} else if (index_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ dst_access = VK_ACCESS_INDEX_READ_BIT;
buffer = index_buffer_owner.getornull(p_buffer);
} else if (uniform_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access = VK_ACCESS_UNIFORM_READ_BIT;
buffer = uniform_buffer_owner.getornull(p_buffer);
} else if (texture_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access = VK_ACCESS_SHADER_READ_BIT;
buffer = &texture_buffer_owner.getornull(p_buffer)->buffer;
} else if (storage_buffer_owner.owns(p_buffer)) {
+ dst_stage_mask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ dst_access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
buffer = storage_buffer_owner.getornull(p_buffer);
} else {
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
@@ -3875,7 +3914,14 @@ Error RenderingDeviceVulkan::buffer_update(RID p_buffer, uint32_t p_offset, uint
ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
"Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
- return _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw);
+ Error err = _buffer_update(buffer, p_offset, (uint8_t *)p_data, p_size, p_sync_with_draw);
+ if (err) {
+ return err;
+ }
+
+ _memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stage_mask, VK_ACCESS_TRANSFER_WRITE_BIT, dst_access, p_sync_with_draw);
+
+ return err;
}
/*************************/
@@ -3908,17 +3954,16 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma
if (p_vertex_format != INVALID_ID) {
//uses vertices, else it does not
ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
- VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
+ const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
pipeline_vertex_input_state_create_info = vd.create_info;
//validate with inputs
for (int i = 0; i < shader->vertex_input_locations.size(); i++) {
uint32_t location = shader->vertex_input_locations[i];
- const VertexDescriptionKey &k = vd.E->key();
bool found = false;
- for (int j = 0; j < k.vertex_formats.size(); j++) {
- if (k.vertex_formats[j].location == location) {
+ for (int j = 0; j < vd.vertex_formats.size(); j++) {
+ if (vd.vertex_formats[j].location == location) {
found = true;
}
}
@@ -4237,6 +4282,11 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma
return id;
}
+bool RenderingDeviceVulkan::render_pipeline_is_valid(RID p_pipeline) {
+ _THREAD_SAFE_METHOD_
+ return pipeline_owner.owns(p_pipeline);
+}
+
/****************/
/**** SCREEN ****/
/****************/
@@ -4244,12 +4294,12 @@ RID RenderingDeviceVulkan::render_pipeline_create(RID p_shader, FramebufferForma
int RenderingDeviceVulkan::screen_get_width(int p_screen) const {
_THREAD_SAFE_METHOD_
- return context->get_screen_width(p_screen);
+ return context->window_get_width(p_screen);
}
int RenderingDeviceVulkan::screen_get_height(int p_screen) const {
_THREAD_SAFE_METHOD_
- return context->get_screen_height(p_screen);
+ return context->window_get_height(p_screen);
}
RenderingDevice::FramebufferFormatID RenderingDeviceVulkan::screen_get_framebuffer_format() const {
@@ -4295,11 +4345,11 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin_for_screen(in
VkRenderPassBeginInfo render_pass_begin;
render_pass_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin.pNext = NULL;
- render_pass_begin.renderPass = context->get_render_pass();
- render_pass_begin.framebuffer = context->get_frame_framebuffer(frame);
+ render_pass_begin.renderPass = context->window_get_render_pass(p_screen);
+ render_pass_begin.framebuffer = context->window_get_framebuffer(p_screen);
- render_pass_begin.renderArea.extent.width = context->get_screen_width(p_screen);
- render_pass_begin.renderArea.extent.height = context->get_screen_height(p_screen);
+ render_pass_begin.renderArea.extent.width = context->window_get_width(p_screen);
+ render_pass_begin.renderArea.extent.height = context->window_get_height(p_screen);
render_pass_begin.renderArea.offset.x = 0;
render_pass_begin.renderArea.offset.y = 0;
@@ -4504,6 +4554,7 @@ RenderingDevice::DrawListID RenderingDeviceVulkan::draw_list_begin(RID p_framebu
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
+ draw_list->viewport = Rect2i(viewport_offset, viewport_size);
return ID_TYPE_DRAW_LIST;
}
@@ -4646,6 +4697,8 @@ Error RenderingDeviceVulkan::draw_list_begin_split(RID p_framebuffer, uint32_t p
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
r_split_ids[i] = (DrawListID(1) << DrawListID(ID_TYPE_SPLIT_DRAW_LIST)) + i;
+
+ draw_list[i].viewport = Rect2i(viewport_offset, viewport_size);
}
return OK;
@@ -4776,6 +4829,15 @@ void RenderingDeviceVulkan::draw_list_bind_index_array(DrawListID p_list, RID p_
vkCmdBindIndexBuffer(dl->command_buffer, index_array->buffer, index_array->offset, index_array->index_type);
}
+void RenderingDeviceVulkan::draw_list_set_line_width(DrawListID p_list, float p_width) {
+
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ vkCmdSetLineWidth(dl->command_buffer, p_width);
+}
+
void RenderingDeviceVulkan::draw_list_set_push_constant(DrawListID p_list, void *p_data, uint32_t p_data_size) {
DrawList *dl = _get_draw_list_ptr(p_list);
ERR_FAIL_COND(!dl);
@@ -4873,8 +4935,37 @@ void RenderingDeviceVulkan::draw_list_draw(DrawListID p_list, bool p_use_indices
}
void RenderingDeviceVulkan::draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect) {
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ Rect2i rect = p_rect;
+ rect.position += dl->viewport.position;
+
+ rect = dl->viewport.clip(rect);
+
+ if (rect.get_area() == 0) {
+ return;
+ }
+ VkRect2D scissor;
+ scissor.offset.x = rect.position.x;
+ scissor.offset.y = rect.position.y;
+ scissor.extent.width = rect.size.width;
+ scissor.extent.height = rect.size.height;
+
+ vkCmdSetScissor(dl->command_buffer, 0, 1, &scissor);
}
void RenderingDeviceVulkan::draw_list_disable_scissor(DrawListID p_list) {
+ DrawList *dl = _get_draw_list_ptr(p_list);
+ ERR_FAIL_COND(!dl);
+ ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
+
+ VkRect2D scissor;
+ scissor.offset.x = dl->viewport.position.x;
+ scissor.offset.y = dl->viewport.position.y;
+ scissor.extent.width = dl->viewport.size.width;
+ scissor.extent.height = dl->viewport.size.height;
+ vkCmdSetScissor(dl->command_buffer, 0, 1, &scissor);
}
void RenderingDeviceVulkan::draw_list_end() {
@@ -4997,6 +5088,7 @@ void RenderingDeviceVulkan::_free_internal(RID p_id) {
Buffer b;
b.allocation = index_buffer->allocation;
b.buffer = index_buffer->buffer;
+ b.size = index_buffer->size;
frames[frame].buffers_to_dispose_of.push_back(b);
index_buffer_owner.free(p_id);
} else if (index_array_owner.owns(p_id)) {
@@ -5050,42 +5142,42 @@ void RenderingDeviceVulkan::finalize_frame() {
vkEndCommandBuffer(frames[frame].setup_command_buffer);
vkEndCommandBuffer(frames[frame].draw_command_buffer);
}
+ screen_prepared = false;
}
-void RenderingDeviceVulkan::_free_pending_resources() {
+void RenderingDeviceVulkan::_free_pending_resources(int p_frame) {
//free in dependency usage order, so nothing weird happens
-
//pipelines
- while (frames[frame].pipelines_to_dispose_of.front()) {
- RenderPipeline *pipeline = &frames[frame].pipelines_to_dispose_of.front()->get();
+ while (frames[p_frame].pipelines_to_dispose_of.front()) {
+ RenderPipeline *pipeline = &frames[p_frame].pipelines_to_dispose_of.front()->get();
vkDestroyPipeline(device, pipeline->pipeline, NULL);
- frames[frame].pipelines_to_dispose_of.pop_front();
+ frames[p_frame].pipelines_to_dispose_of.pop_front();
}
//uniform sets
- while (frames[frame].uniform_sets_to_dispose_of.front()) {
- UniformSet *uniform_set = &frames[frame].uniform_sets_to_dispose_of.front()->get();
+ while (frames[p_frame].uniform_sets_to_dispose_of.front()) {
+ UniformSet *uniform_set = &frames[p_frame].uniform_sets_to_dispose_of.front()->get();
vkFreeDescriptorSets(device, uniform_set->pool->pool, 1, &uniform_set->descriptor_set);
_descriptor_pool_free(uniform_set->pool_key, uniform_set->pool);
- frames[frame].uniform_sets_to_dispose_of.pop_front();
+ frames[p_frame].uniform_sets_to_dispose_of.pop_front();
}
//buffer views
- while (frames[frame].buffer_views_to_dispose_of.front()) {
- VkBufferView buffer_view = frames[frame].buffer_views_to_dispose_of.front()->get();
+ while (frames[p_frame].buffer_views_to_dispose_of.front()) {
+ VkBufferView buffer_view = frames[p_frame].buffer_views_to_dispose_of.front()->get();
vkDestroyBufferView(device, buffer_view, NULL);
- frames[frame].buffer_views_to_dispose_of.pop_front();
+ frames[p_frame].buffer_views_to_dispose_of.pop_front();
}
//shaders
- while (frames[frame].shaders_to_dispose_of.front()) {
- Shader *shader = &frames[frame].shaders_to_dispose_of.front()->get();
+ while (frames[p_frame].shaders_to_dispose_of.front()) {
+ Shader *shader = &frames[p_frame].shaders_to_dispose_of.front()->get();
//descriptor set layout for each set
for (int i = 0; i < shader->sets.size(); i++) {
@@ -5100,21 +5192,21 @@ void RenderingDeviceVulkan::_free_pending_resources() {
vkDestroyShaderModule(device, shader->pipeline_stages[i].module, NULL);
}
- frames[frame].shaders_to_dispose_of.pop_front();
+ frames[p_frame].shaders_to_dispose_of.pop_front();
}
//samplers
- while (frames[frame].samplers_to_dispose_of.front()) {
- VkSampler sampler = frames[frame].samplers_to_dispose_of.front()->get();
+ while (frames[p_frame].samplers_to_dispose_of.front()) {
+ VkSampler sampler = frames[p_frame].samplers_to_dispose_of.front()->get();
vkDestroySampler(device, sampler, NULL);
- frames[frame].samplers_to_dispose_of.pop_front();
+ frames[p_frame].samplers_to_dispose_of.pop_front();
}
//framebuffers
- while (frames[frame].framebuffers_to_dispose_of.front()) {
- Framebuffer *framebuffer = &frames[frame].framebuffers_to_dispose_of.front()->get();
+ while (frames[p_frame].framebuffers_to_dispose_of.front()) {
+ Framebuffer *framebuffer = &frames[p_frame].framebuffers_to_dispose_of.front()->get();
for (Map<Framebuffer::VersionKey, Framebuffer::Version>::Element *E = framebuffer->framebuffers.front(); E; E = E->next()) {
//first framebuffer, then render pass because it depends on it
@@ -5122,12 +5214,12 @@ void RenderingDeviceVulkan::_free_pending_resources() {
vkDestroyRenderPass(device, E->get().render_pass, NULL);
}
- frames[frame].framebuffers_to_dispose_of.pop_front();
+ frames[p_frame].framebuffers_to_dispose_of.pop_front();
}
//textures
- while (frames[frame].textures_to_dispose_of.front()) {
- Texture *texture = &frames[frame].textures_to_dispose_of.front()->get();
+ while (frames[p_frame].textures_to_dispose_of.front()) {
+ Texture *texture = &frames[p_frame].textures_to_dispose_of.front()->get();
if (texture->bound) {
WARN_PRINT("Deleted a texture while it was bound..");
@@ -5136,19 +5228,25 @@ void RenderingDeviceVulkan::_free_pending_resources() {
if (texture->owner.is_null()) {
//actually owns the image and the allocation too
vmaDestroyImage(allocator, texture->image, texture->allocation);
- vmaFreeMemory(allocator, texture->allocation);
}
- frames[frame].textures_to_dispose_of.pop_front();
+ frames[p_frame].textures_to_dispose_of.pop_front();
}
//buffers
- while (frames[frame].buffers_to_dispose_of.front()) {
- _buffer_free(&frames[frame].buffers_to_dispose_of.front()->get());
+ while (frames[p_frame].buffers_to_dispose_of.front()) {
+
+ _buffer_free(&frames[p_frame].buffers_to_dispose_of.front()->get());
- frames[frame].buffers_to_dispose_of.pop_front();
+ frames[p_frame].buffers_to_dispose_of.pop_front();
}
}
+void RenderingDeviceVulkan::prepare_screen_for_drawing() {
+ _THREAD_SAFE_METHOD_
+ context->prepare_buffers();
+ screen_prepared = true;
+}
+
void RenderingDeviceVulkan::advance_frame() {
_THREAD_SAFE_METHOD_
@@ -5157,7 +5255,7 @@ void RenderingDeviceVulkan::advance_frame() {
frame = (frame + 1) % frame_count;
//erase pending resources
- _free_pending_resources();
+ _free_pending_resources(frame);
//create setup command buffer and set as the setup buffer
@@ -5192,7 +5290,7 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context) {
context = p_context;
device = p_context->get_device();
- frame_count = p_context->get_frame_count();
+ frame_count = p_context->get_swapchain_image_count() + 1; //always need one extra to ensure it's unused at any time, without having to use a fence for this.
limits = p_context->get_device_limits();
{ //initialize allocator
@@ -5292,10 +5390,83 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context) {
draw_list_count = 0;
draw_list_split = false;
}
+
+template <class T>
+void RenderingDeviceVulkan::_free_rids(T &p_owner, const char *p_type) {
+ List<RID> owned;
+ p_owner.get_owned_list(&owned);
+ if (owned.size()) {
+ WARN_PRINT(itos(owned.size()) + " RIDs of type '" + p_type + "' were leaked.");
+ for (List<RID>::Element *E = owned.front(); E; E = E->next()) {
+ free(E->get());
+ }
+ }
+}
+
void RenderingDeviceVulkan::finalize() {
+ //free all resources
+
+ context->flush(false, false);
+
+ _free_rids(pipeline_owner, "Pipeline");
+ _free_rids(uniform_set_owner, "UniformSet");
+ _free_rids(texture_buffer_owner, "TextureBuffer");
+ _free_rids(storage_buffer_owner, "StorageBuffer");
+ _free_rids(uniform_buffer_owner, "UniformBuffer");
+ _free_rids(shader_owner, "Shader");
+ _free_rids(index_array_owner, "IndexArray");
+ _free_rids(index_buffer_owner, "IndexBuffer");
+ _free_rids(vertex_array_owner, "VertexArray");
+ _free_rids(vertex_buffer_owner, "VertexBuffer");
+ _free_rids(framebuffer_owner, "Framebuffer");
+ _free_rids(sampler_owner, "Sampler");
+ {
+ //for textures it's a bit more difficult because they may be shared
+ List<RID> owned;
+ texture_owner.get_owned_list(&owned);
+ if (owned.size()) {
+ WARN_PRINT(itos(owned.size()) + " RIDs of type 'Texture' were leaked.");
+ //free shared first
+ for (List<RID>::Element *E = owned.front(); E;) {
+
+ List<RID>::Element *N = E->next();
+ if (texture_is_shared(E->get())) {
+ free(E->get());
+ owned.erase(E->get());
+ }
+ E = N;
+ }
+ //free non shared second, this will avoid an error trying to free unexisting textures due to dependencies.
+ for (List<RID>::Element *E = owned.front(); E; E = E->next()) {
+ free(E->get());
+ }
+ }
+ }
+
+ //free everything pending
+ for (int i = 0; i < frame_count; i++) {
+ int f = (frame + i) % frame_count;
+ _free_pending_resources(f);
+ vkDestroyCommandPool(device, frames[i].command_pool, NULL);
+ }
+
+ for (int i = 0; i < split_draw_list_allocators.size(); i++) {
+ vkDestroyCommandPool(device, split_draw_list_allocators[i].command_pool, NULL);
+ }
+
memdelete_arr(frames);
+
+ for (int i = 0; i < staging_buffer_blocks.size(); i++) {
+ vmaDestroyBuffer(allocator, staging_buffer_blocks[i].buffer, staging_buffer_blocks[i].allocation);
+ }
+
+ //all these should be clear at this point
+ ERR_FAIL_COND(descriptor_pools.size());
+ ERR_FAIL_COND(dependency_map.size());
+ ERR_FAIL_COND(reverse_dependency_map.size());
}
RenderingDeviceVulkan::RenderingDeviceVulkan() {
+ screen_prepared = false;
}
diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h
index e1dfb1fc4b..5042078f84 100644
--- a/drivers/vulkan/rendering_device_vulkan.h
+++ b/drivers/vulkan/rendering_device_vulkan.h
@@ -6,6 +6,9 @@
#include "core/rid_owner.h"
#include "servers/visual/rendering_device.h"
#include "thirdparty/glslang/glslang/Public/ShaderLang.h"
+#ifdef DEBUG_ENABLED
+#define _DEBUG
+#endif
#include "vk_mem_alloc.h"
#include <vulkan/vulkan.h>
//todo:
@@ -177,6 +180,8 @@ class RenderingDeviceVulkan : public RenderingDevice {
Error _buffer_free(Buffer *p_buffer);
Error _buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer = false, uint32_t p_required_align = 32);
+ void _memory_barrier(VkPipelineStageFlags p_src_stage_mask, VkPipelineStageFlags p_dst_stage_mask, VkAccessFlags p_src_access, VkAccessFlags p_dst_sccess, bool p_sync_with_draw);
+
/*********************/
/**** FRAMEBUFFER ****/
/*********************/
@@ -274,15 +279,13 @@ class RenderingDeviceVulkan : public RenderingDevice {
struct VertexDescriptionKey {
Vector<VertexDescription> vertex_formats;
- int buffer_count;
- bool operator<(const VertexDescriptionKey &p_key) const {
- if (buffer_count != p_key.buffer_count) {
- return buffer_count < p_key.buffer_count;
- }
- if (vertex_formats.size() != p_key.vertex_formats.size()) {
- return vertex_formats.size() < p_key.vertex_formats.size();
+ bool operator==(const VertexDescriptionKey &p_key) const {
+ int vdc = vertex_formats.size();
+ int vdck = p_key.vertex_formats.size();
+
+ if (vdc != vdck) {
+ return false;
} else {
- int vdc = vertex_formats.size();
const VertexDescription *a_ptr = vertex_formats.ptr();
const VertexDescription *b_ptr = p_key.vertex_formats.ptr();
for (int i = 0; i < vdc; i++) {
@@ -290,29 +293,51 @@ class RenderingDeviceVulkan : public RenderingDevice {
const VertexDescription &b = b_ptr[i];
if (a.location != b.location) {
- return a.location < b.location;
+ return false;
}
if (a.offset != b.offset) {
- return a.offset < b.offset;
+ return false;
}
if (a.format != b.format) {
- return a.format < b.format;
+ return false;
}
if (a.stride != b.stride) {
- return a.stride < b.stride;
+ return false;
}
- return a.frequency < b.frequency;
+ return a.frequency != b.frequency;
}
- return false; //they are equal
+ return true; //they are equal
}
}
+
+ uint32_t hash() const {
+ int vdc = vertex_formats.size();
+ uint32_t h = hash_djb2_one_32(vdc);
+ const VertexDescription *ptr = vertex_formats.ptr();
+ for (int i = 0; i < vdc; i++) {
+ const VertexDescription &vd = ptr[i];
+ h = hash_djb2_one_32(vd.location, h);
+ h = hash_djb2_one_32(vd.offset, h);
+ h = hash_djb2_one_32(vd.format, h);
+ h = hash_djb2_one_32(vd.stride, h);
+ h = hash_djb2_one_32(vd.frequency, h);
+ }
+ return h;
+ }
+ };
+
+ struct VertexDescriptionHash {
+ static _FORCE_INLINE_ uint32_t hash(const VertexDescriptionKey &p_key) {
+ return p_key.hash();
+ }
};
// This is a cache and it's never freed, it ensures that
// ID used for a specific format always remain the same.
- Map<VertexDescriptionKey, VertexFormatID> vertex_format_cache;
+ HashMap<VertexDescriptionKey, VertexFormatID, VertexDescriptionHash> vertex_format_cache;
+
struct VertexDescriptionCache {
- const Map<VertexDescriptionKey, VertexFormatID>::Element *E;
+ Vector<VertexDescription> vertex_formats;
VkVertexInputBindingDescription *bindings;
VkVertexInputAttributeDescription *attributes;
VkPipelineVertexInputStateCreateInfo create_info;
@@ -570,7 +595,7 @@ class RenderingDeviceVulkan : public RenderingDevice {
struct DrawList {
VkCommandBuffer command_buffer; //if persistent, this is owned, otherwise it's shared with the ringbuffer
-
+ Rect2i viewport;
struct Validation {
bool active; //means command buffer was not closes, so you can keep adding things
FramebufferFormatID framebuffer_format;
@@ -669,7 +694,7 @@ class RenderingDeviceVulkan : public RenderingDevice {
int frame_count; //total amount of frames
uint64_t frames_drawn;
- void _free_pending_resources();
+ void _free_pending_resources(int p_frame);
VmaAllocator allocator;
@@ -677,12 +702,19 @@ class RenderingDeviceVulkan : public RenderingDevice {
void _free_internal(RID p_id);
+ bool screen_prepared;
+
+ template <class T>
+ void _free_rids(T &p_owner, const char *p_type);
+
public:
virtual RID texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<PoolVector<uint8_t> > &p_data = Vector<PoolVector<uint8_t> >());
virtual RID texture_create_shared(const TextureView &p_view, RID p_with_texture);
virtual Error texture_update(RID p_texture, uint32_t p_layer, const PoolVector<uint8_t> &p_data, bool p_sync_with_draw = false);
virtual bool texture_is_format_supported_for_usage(DataFormat p_format, uint32_t p_usage) const;
+ virtual bool texture_is_shared(RID p_texture);
+ virtual bool texture_is_valid(RID p_texture);
/*********************/
/**** FRAMEBUFFER ****/
@@ -739,6 +771,7 @@ public:
/*************************/
virtual RID render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, int p_dynamic_state_flags = 0);
+ virtual bool render_pipeline_is_valid(RID p_pipeline);
/****************/
/**** SCREEN ****/
@@ -760,6 +793,7 @@ public:
virtual void draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index);
virtual void draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array);
virtual void draw_list_bind_index_array(DrawListID p_list, RID p_index_array);
+ virtual void draw_list_set_line_width(DrawListID p_list, float p_width);
virtual void draw_list_set_push_constant(DrawListID p_list, void *p_data, uint32_t p_data_size);
virtual void draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances = 1);
@@ -775,6 +809,7 @@ public:
virtual void free(RID p_id);
+ virtual void prepare_screen_for_drawing();
void initialize(VulkanContext *p_context);
void finalize();
diff --git a/drivers/vulkan/vk_mem_alloc.cpp b/drivers/vulkan/vk_mem_alloc.cpp
index a2023d33b2..8e91e8a08c 100644
--- a/drivers/vulkan/vk_mem_alloc.cpp
+++ b/drivers/vulkan/vk_mem_alloc.cpp
@@ -1,2 +1,5 @@
#define VMA_IMPLEMENTATION
+#ifdef DEBUG_ENABLED
+#define _DEBUG
+#endif
#include "vk_mem_alloc.h"
diff --git a/drivers/vulkan/vulkan_context.cpp b/drivers/vulkan/vulkan_context.cpp
index 991429f963..37eb99db7a 100644
--- a/drivers/vulkan/vulkan_context.cpp
+++ b/drivers/vulkan/vulkan_context.cpp
@@ -471,10 +471,7 @@ Error VulkanContext::_create_device() {
return OK;
}
-Error VulkanContext::_create_swap_chain() {
-
- VkResult err = _create_surface(&surface, inst);
- ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+Error VulkanContext::_initialize_queues(VkSurfaceKHR surface) {
// Iterate over each queue to learn whether it supports presenting:
VkBool32 *supportsPresent = (VkBool32 *)malloc(queue_family_count * sizeof(VkBool32));
@@ -551,7 +548,7 @@ Error VulkanContext::_create_swap_chain() {
// Get the list of VkFormat's that are supported:
uint32_t formatCount;
- err = fpGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &formatCount, NULL);
+ VkResult err = fpGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &formatCount, NULL);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
VkSurfaceFormatKHR *surfFormats = (VkSurfaceFormatKHR *)malloc(formatCount * sizeof(VkSurfaceFormatKHR));
err = fpGetPhysicalDeviceSurfaceFormatsKHR(gpu, surface, &formatCount, surfFormats);
@@ -566,6 +563,13 @@ Error VulkanContext::_create_swap_chain() {
format = surfFormats[0].format;
}
color_space = surfFormats[0].colorSpace;
+
+ Error serr = _create_semaphores();
+ if (serr) {
+ return serr;
+ }
+
+ queues_initialized = true;
return OK;
}
@@ -608,21 +612,111 @@ Error VulkanContext::_create_semaphores() {
return OK;
}
-Error VulkanContext::_prepare_buffers() {
+int VulkanContext::_window_create(VkSurfaceKHR p_surface, int p_width, int p_height) {
+
+ if (!queues_initialized) {
+ // We use a single GPU, but we need a surface to initialize the
+ // queues, so this process must be deferred until a surface
+ // is created.
+ _initialize_queues(p_surface);
+ }
+
+ Window window;
+ window.surface = p_surface;
+ window.width = p_width;
+ window.height = p_height;
+ Error err = _update_swap_chain(&window);
+ ERR_FAIL_COND_V(err != OK, -1);
+
+ int id = last_window_id;
+ windows[id] = window;
+ last_window_id++;
+ return id;
+}
+
+void VulkanContext::window_resize(int p_window, int p_width, int p_height) {
+ ERR_FAIL_COND(!windows.has(p_window));
+ windows[p_window].width = p_width;
+ windows[p_window].height = p_height;
+ _update_swap_chain(&windows[p_window]);
+}
+
+int VulkanContext::window_get_width(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), -1);
+ return windows[p_window].width;
+}
+
+int VulkanContext::window_get_height(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), -1);
+ return windows[p_window].height;
+}
+
+VkRenderPass VulkanContext::window_get_render_pass(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), VK_NULL_HANDLE);
+ Window *w = &windows[p_window];
+ //vulkan use of currentbuffer
+ return w->render_pass;
+}
+
+VkFramebuffer VulkanContext::window_get_framebuffer(int p_window) {
+ ERR_FAIL_COND_V(!windows.has(p_window), VK_NULL_HANDLE);
+ ERR_FAIL_COND_V(!buffers_prepared, VK_NULL_HANDLE);
+ Window *w = &windows[p_window];
+ //vulkan use of currentbuffer
+ return w->swapchain_image_resources[w->current_buffer].framebuffer;
+}
+
+void VulkanContext::window_destroy(int p_window_id) {
+ ERR_FAIL_COND(!windows.has(p_window_id));
+ _clean_up_swap_chain(&windows[p_window_id]);
+ vkDestroySurfaceKHR(inst, windows[p_window_id].surface, NULL);
+ windows.erase(p_window_id);
+}
+
+Error VulkanContext::_clean_up_swap_chain(Window *window) {
+
+ if (!window->swapchain) {
+ return OK;
+ }
+ vkDeviceWaitIdle(device);
+
+ //this destroys images associated it seems
+ fpDestroySwapchainKHR(device, window->swapchain, NULL);
+ window->swapchain = VK_NULL_HANDLE;
+ vkDestroyRenderPass(device, window->render_pass, NULL);
+ if (window->swapchain_image_resources) {
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ vkDestroyImageView(device, window->swapchain_image_resources[i].view, NULL);
+ vkDestroyFramebuffer(device, window->swapchain_image_resources[i].framebuffer, NULL);
+ }
+
+ free(window->swapchain_image_resources);
+ window->swapchain_image_resources = NULL;
+ }
+ if (separate_present_queue) {
+ vkDestroyCommandPool(device, window->present_cmd_pool, NULL);
+ }
+ return OK;
+}
+
+Error VulkanContext::_update_swap_chain(Window *window) {
VkResult err;
- VkSwapchainKHR oldSwapchain = swapchain;
+
+ if (window->swapchain) {
+ _clean_up_swap_chain(window);
+ }
// Check the surface capabilities and formats
VkSurfaceCapabilitiesKHR surfCapabilities;
- err = fpGetPhysicalDeviceSurfaceCapabilitiesKHR(gpu, surface, &surfCapabilities);
+ err = fpGetPhysicalDeviceSurfaceCapabilitiesKHR(gpu, window->surface, &surfCapabilities);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
uint32_t presentModeCount;
- err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, surface, &presentModeCount, NULL);
+ err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, window->surface, &presentModeCount, NULL);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR));
ERR_FAIL_COND_V(!presentModes, ERR_CANT_CREATE);
- err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, surface, &presentModeCount, presentModes);
+ err = fpGetPhysicalDeviceSurfacePresentModesKHR(gpu, window->surface, &presentModeCount, presentModes);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
VkExtent2D swapchainExtent;
@@ -631,8 +725,8 @@ Error VulkanContext::_prepare_buffers() {
// If the surface size is undefined, the size is set to the size
// of the images requested, which must fit within the minimum and
// maximum values.
- swapchainExtent.width = width;
- swapchainExtent.height = height;
+ swapchainExtent.width = window->width;
+ swapchainExtent.height = window->height;
if (swapchainExtent.width < surfCapabilities.minImageExtent.width) {
swapchainExtent.width = surfCapabilities.minImageExtent.width;
@@ -648,17 +742,14 @@ Error VulkanContext::_prepare_buffers() {
} else {
// If the surface size is defined, the swap chain size must match
swapchainExtent = surfCapabilities.currentExtent;
- width = surfCapabilities.currentExtent.width;
- height = surfCapabilities.currentExtent.height;
+ window->width = surfCapabilities.currentExtent.width;
+ window->height = surfCapabilities.currentExtent.height;
}
- if (width == 0 || height == 0) {
- is_minimized = true;
+ if (window->width == 0 || window->height == 0) {
+ //likely window minimized, no swapchain created
return OK;
- } else {
- is_minimized = false;
}
-
// The FIFO present mode is guaranteed by the spec to be supported
// and to have no tearing. It's a great default present mode to use.
VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR;
@@ -690,15 +781,15 @@ Error VulkanContext::_prepare_buffers() {
// the application wants the late image to be immediately displayed, even
// though that may mean some tearing.
- if (presentMode != swapchainPresentMode) {
+ if (window->presentMode != swapchainPresentMode) {
for (size_t i = 0; i < presentModeCount; ++i) {
- if (presentModes[i] == presentMode) {
- swapchainPresentMode = presentMode;
+ if (presentModes[i] == window->presentMode) {
+ swapchainPresentMode = window->presentMode;
break;
}
}
}
- ERR_FAIL_COND_V_MSG(swapchainPresentMode != presentMode, ERR_CANT_CREATE, "Present mode specified is not supported\n");
+ ERR_FAIL_COND_V_MSG(swapchainPresentMode != window->presentMode, ERR_CANT_CREATE, "Present mode specified is not supported\n");
// Determine the number of VkImages to use in the swap chain.
// Application desires to acquire 3 images at a time for triple
@@ -739,7 +830,7 @@ Error VulkanContext::_prepare_buffers() {
VkSwapchainCreateInfoKHR swapchain_ci = {
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = NULL,
- .surface = surface,
+ .surface = window->surface,
.minImageCount = desiredNumOfSwapchainImages,
.imageFormat = format,
.imageColorSpace = color_space,
@@ -756,33 +847,33 @@ Error VulkanContext::_prepare_buffers() {
.compositeAlpha = compositeAlpha,
.presentMode = swapchainPresentMode,
.clipped = true,
- .oldSwapchain = oldSwapchain,
+ .oldSwapchain = NULL,
};
- uint32_t i;
- err = fpCreateSwapchainKHR(device, &swapchain_ci, NULL, &swapchain);
- ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
- // If we just re-created an existing swapchain, we should destroy the old
- // swapchain at this point.
- // Note: destroying the swapchain also cleans up all its associated
- // presentable images once the platform is done with them.
- if (oldSwapchain != VK_NULL_HANDLE) {
- fpDestroySwapchainKHR(device, oldSwapchain, NULL);
- }
+ err = fpCreateSwapchainKHR(device, &swapchain_ci, NULL, &window->swapchain);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
- err = fpGetSwapchainImagesKHR(device, swapchain, &swapchainImageCount, NULL);
+ uint32_t sp_image_count;
+ err = fpGetSwapchainImagesKHR(device, window->swapchain, &sp_image_count, NULL);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ if (swapchainImageCount == 0) {
+ //assign here for the first time.
+ swapchainImageCount = sp_image_count;
+ } else {
+ ERR_FAIL_COND_V(swapchainImageCount != sp_image_count, ERR_BUG);
+ }
+
VkImage *swapchainImages = (VkImage *)malloc(swapchainImageCount * sizeof(VkImage));
ERR_FAIL_COND_V(!swapchainImages, ERR_CANT_CREATE);
- err = fpGetSwapchainImagesKHR(device, swapchain, &swapchainImageCount, swapchainImages);
+ err = fpGetSwapchainImagesKHR(device, window->swapchain, &swapchainImageCount, swapchainImages);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
- swapchain_image_resources =
+ window->swapchain_image_resources =
(SwapchainImageResources *)malloc(sizeof(SwapchainImageResources) * swapchainImageCount);
- ERR_FAIL_COND_V(!swapchain_image_resources, ERR_CANT_CREATE);
+ ERR_FAIL_COND_V(!window->swapchain_image_resources, ERR_CANT_CREATE);
- for (i = 0; i < swapchainImageCount; i++) {
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
VkImageViewCreateInfo color_image_view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
@@ -798,118 +889,84 @@ Error VulkanContext::_prepare_buffers() {
.subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1 },
};
- swapchain_image_resources[i].image = swapchainImages[i];
+ window->swapchain_image_resources[i].image = swapchainImages[i];
- color_image_view.image = swapchain_image_resources[i].image;
+ color_image_view.image = window->swapchain_image_resources[i].image;
- err = vkCreateImageView(device, &color_image_view, NULL, &swapchain_image_resources[i].view);
+ err = vkCreateImageView(device, &color_image_view, NULL, &window->swapchain_image_resources[i].view);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
}
- if (VK_GOOGLE_display_timing_enabled) {
- VkRefreshCycleDurationGOOGLE rc_dur;
- err = fpGetRefreshCycleDurationGOOGLE(device, swapchain, &rc_dur);
- ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
- refresh_duration = rc_dur.refreshDuration;
-
- syncd_with_actual_presents = false;
- // Initially target 1X the refresh duration:
- target_IPD = refresh_duration;
- refresh_duration_multiplier = 1;
- prev_desired_present_time = 0;
- next_present_id = 1;
- }
-
if (NULL != presentModes) {
free(presentModes);
}
- return OK;
-}
-
-Error VulkanContext::_prepare_framebuffers() {
-
- //for this, we only need color (no depth), since Godot does not render to the main
- //render buffer
-
- const VkAttachmentDescription attachment = {
-
- .flags = 0,
- .format = format,
- .samples = VK_SAMPLE_COUNT_1_BIT,
- .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
- .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
- .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
- .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
- .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
- .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ /******** FRAMEBUFFER ************/
- };
- const VkAttachmentReference color_reference = {
- .attachment = 0,
- .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
- };
+ {
+ const VkAttachmentDescription attachment = {
- const VkSubpassDescription subpass = {
- .flags = 0,
- .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
- .inputAttachmentCount = 0,
- .pInputAttachments = NULL,
- .colorAttachmentCount = 1,
- .pColorAttachments = &color_reference,
- .pResolveAttachments = NULL,
- .pDepthStencilAttachment = NULL,
- .preserveAttachmentCount = 0,
- .pPreserveAttachments = NULL,
- };
- const VkRenderPassCreateInfo rp_info = {
- .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
- .pNext = NULL,
- .flags = 0,
- .attachmentCount = 1,
- .pAttachments = &attachment,
- .subpassCount = 1,
- .pSubpasses = &subpass,
- .dependencyCount = 0,
- .pDependencies = NULL,
- };
- VkResult err;
+ .flags = 0,
+ .format = format,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
- err = vkCreateRenderPass(device, &rp_info, NULL, &render_pass);
- ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ };
+ const VkAttachmentReference color_reference = {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ };
- for (uint32_t i = 0; i < swapchainImageCount; i++) {
- const VkFramebufferCreateInfo fb_info = {
- .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ const VkSubpassDescription subpass = {
+ .flags = 0,
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .inputAttachmentCount = 0,
+ .pInputAttachments = NULL,
+ .colorAttachmentCount = 1,
+ .pColorAttachments = &color_reference,
+ .pResolveAttachments = NULL,
+ .pDepthStencilAttachment = NULL,
+ .preserveAttachmentCount = 0,
+ .pPreserveAttachments = NULL,
+ };
+ const VkRenderPassCreateInfo rp_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.pNext = NULL,
- .renderPass = render_pass,
+ .flags = 0,
.attachmentCount = 1,
- .pAttachments = &swapchain_image_resources[i].view,
- .width = width,
- .height = height,
- .layers = 1,
+ .pAttachments = &attachment,
+ .subpassCount = 1,
+ .pSubpasses = &subpass,
+ .dependencyCount = 0,
+ .pDependencies = NULL,
};
- err = vkCreateFramebuffer(device, &fb_info, NULL, &swapchain_image_resources[i].framebuffer);
+ err = vkCreateRenderPass(device, &rp_info, NULL, &window->render_pass);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
- }
- return OK;
-}
-
-Error VulkanContext::_create_buffers() {
-
- Error error = _prepare_buffers();
- if (error != OK) {
- return error;
- }
+ for (uint32_t i = 0; i < swapchainImageCount; i++) {
+ const VkFramebufferCreateInfo fb_info = {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .pNext = NULL,
+ .renderPass = window->render_pass,
+ .attachmentCount = 1,
+ .pAttachments = &window->swapchain_image_resources[i].view,
+ .width = (uint32_t)window->width,
+ .height = (uint32_t)window->height,
+ .layers = 1,
+ };
- if (minimized) {
- prepared = false;
- return OK;
+ err = vkCreateFramebuffer(device, &fb_info, NULL, &window->swapchain_image_resources[i].framebuffer);
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
}
- _prepare_framebuffers();
+ /******** SEPARATE PRESENT QUEUE ************/
if (separate_present_queue) {
const VkCommandPoolCreateInfo present_cmd_pool_info = {
@@ -918,18 +975,18 @@ Error VulkanContext::_create_buffers() {
.flags = 0,
.queueFamilyIndex = present_queue_family_index,
};
- VkResult err = vkCreateCommandPool(device, &present_cmd_pool_info, NULL, &present_cmd_pool);
+ err = vkCreateCommandPool(device, &present_cmd_pool_info, NULL, &window->present_cmd_pool);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
const VkCommandBufferAllocateInfo present_cmd_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = NULL,
- .commandPool = present_cmd_pool,
+ .commandPool = window->present_cmd_pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1,
};
for (uint32_t i = 0; i < swapchainImageCount; i++) {
err = vkAllocateCommandBuffers(device, &present_cmd_info,
- &swapchain_image_resources[i].graphics_to_present_cmd);
+ &window->swapchain_image_resources[i].graphics_to_present_cmd);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
const VkCommandBufferBeginInfo cmd_buf_info = {
@@ -938,7 +995,7 @@ Error VulkanContext::_create_buffers() {
.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
.pInheritanceInfo = NULL,
};
- err = vkBeginCommandBuffer(swapchain_image_resources[i].graphics_to_present_cmd, &cmd_buf_info);
+ err = vkBeginCommandBuffer(window->swapchain_image_resources[i].graphics_to_present_cmd, &cmd_buf_info);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
VkImageMemoryBarrier image_ownership_barrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
@@ -949,49 +1006,29 @@ Error VulkanContext::_create_buffers() {
.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.srcQueueFamilyIndex = graphics_queue_family_index,
.dstQueueFamilyIndex = present_queue_family_index,
- .image = swapchain_image_resources[i].image,
+ .image = window->swapchain_image_resources[i].image,
.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } };
- vkCmdPipelineBarrier(swapchain_image_resources[i].graphics_to_present_cmd, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ vkCmdPipelineBarrier(window->swapchain_image_resources[i].graphics_to_present_cmd, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1, &image_ownership_barrier);
- err = vkEndCommandBuffer(swapchain_image_resources[i].graphics_to_present_cmd);
+ err = vkEndCommandBuffer(window->swapchain_image_resources[i].graphics_to_present_cmd);
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
}
}
- current_buffer = 0;
- prepared = true;
+ //reset current buffer
+ window->current_buffer = 0;
return OK;
}
-Error VulkanContext::initialize(int p_width, int p_height, bool p_minimized) {
-
- screen_width = p_width;
- screen_height = p_height;
- minimized = p_minimized;
+Error VulkanContext::initialize() {
Error err = _create_physical_device();
if (err) {
return err;
}
-
- err = _create_swap_chain();
- if (err) {
- return err;
- }
-
- err = _create_semaphores();
- if (err) {
- return err;
- }
-
- err = _create_buffers();
- if (err) {
- return err;
- }
-
- print_line("Vulkan context creation success o_O");
+ print_line("Vulkan physical device creation success o_O");
return OK;
}
@@ -1012,10 +1049,7 @@ void VulkanContext::append_command_buffer(const VkCommandBuffer &pCommandBuffer)
void VulkanContext::flush(bool p_flush_setup, bool p_flush_pending) {
// ensure everything else pending is executed
- for (int i = 0; i < FRAME_LAG; i++) {
- int to_fence = (frame_index + i) % FRAME_LAG;
- vkWaitForFences(device, 1, &fences[to_fence], VK_TRUE, UINT64_MAX);
- }
+ vkDeviceWaitIdle(device);
//flush the pending setup buffer
@@ -1038,7 +1072,7 @@ void VulkanContext::flush(bool p_flush_setup, bool p_flush_pending) {
VkResult err = vkQueueSubmit(graphics_queue, 1, &submit_info, fences[frame_index]);
command_buffer_queue.write[0] = NULL;
ERR_FAIL_COND(err);
- vkWaitForFences(device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
+ vkDeviceWaitIdle(device);
}
if (p_flush_pending && command_buffer_count > 1) {
@@ -1060,41 +1094,68 @@ void VulkanContext::flush(bool p_flush_setup, bool p_flush_pending) {
VkResult err = vkQueueSubmit(graphics_queue, 1, &submit_info, fences[frame_index]);
command_buffer_queue.write[0] = NULL;
ERR_FAIL_COND(err);
- vkWaitForFences(device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
+ vkDeviceWaitIdle(device);
command_buffer_count = 1;
}
}
-Error VulkanContext::swap_buffers() {
+Error VulkanContext::prepare_buffers() {
+
+ if (!queues_initialized) {
+ return OK;
+ }
- // print_line("swapbuffers?");
VkResult err;
// Ensure no more than FRAME_LAG renderings are outstanding
vkWaitForFences(device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
vkResetFences(device, 1, &fences[frame_index]);
- do {
- // Get the index of the next available swapchain image:
- err =
- fpAcquireNextImageKHR(device, swapchain, UINT64_MAX,
- image_acquired_semaphores[frame_index], VK_NULL_HANDLE, &current_buffer);
-
- if (err == VK_ERROR_OUT_OF_DATE_KHR) {
- // swapchain is out of date (e.g. the window was resized) and
- // must be recreated:
- print_line("early out of data");
- resize_notify();
- } else if (err == VK_SUBOPTIMAL_KHR) {
- print_line("early suboptimal");
- // swapchain is not as optimal as it could be, but the platform's
- // presentation engine will still present the image correctly.
- break;
- } else {
- ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ for (Map<int, Window>::Element *E = windows.front(); E; E = E->next()) {
+
+ Window *w = &E->get();
+
+ if (w->swapchain == VK_NULL_HANDLE) {
+ continue;
}
- } while (err != VK_SUCCESS);
+
+ do {
+ // Get the index of the next available swapchain image:
+ err =
+ fpAcquireNextImageKHR(device, w->swapchain, UINT64_MAX,
+ image_acquired_semaphores[frame_index], VK_NULL_HANDLE, &w->current_buffer);
+
+ if (err == VK_ERROR_OUT_OF_DATE_KHR) {
+ // swapchain is out of date (e.g. the window was resized) and
+ // must be recreated:
+ print_line("early out of data");
+ //resize_notify();
+ _update_swap_chain(w);
+ } else if (err == VK_SUBOPTIMAL_KHR) {
+ print_line("early suboptimal");
+ // swapchain is not as optimal as it could be, but the platform's
+ // presentation engine will still present the image correctly.
+ break;
+ } else {
+ ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
+ }
+ } while (err != VK_SUCCESS);
+ }
+
+ buffers_prepared = true;
+
+ return OK;
+}
+
+Error VulkanContext::swap_buffers() {
+
+ if (!queues_initialized) {
+ return OK;
+ }
+
+ // print_line("swapbuffers?");
+ VkResult err;
#if 0
if (VK_GOOGLE_display_timing_enabled) {
@@ -1154,8 +1215,21 @@ Error VulkanContext::swap_buffers() {
pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &draw_complete_semaphores[frame_index];
- submit_info.commandBufferCount = 1;
- submit_info.pCommandBuffers = &swapchain_image_resources[current_buffer].graphics_to_present_cmd;
+ submit_info.commandBufferCount = 0;
+
+ VkCommandBuffer *cmdbufptr = (VkCommandBuffer *)alloca(sizeof(VkCommandBuffer *) * windows.size());
+ submit_info.pCommandBuffers = cmdbufptr;
+
+ for (Map<int, Window>::Element *E = windows.front(); E; E = E->next()) {
+ Window *w = &E->get();
+
+ if (w->swapchain == VK_NULL_HANDLE) {
+ continue;
+ }
+ cmdbufptr[submit_info.commandBufferCount] = w->swapchain_image_resources[w->current_buffer].graphics_to_present_cmd;
+ submit_info.commandBufferCount++;
+ }
+
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &image_ownership_semaphores[frame_index];
err = vkQueueSubmit(present_queue, 1, &submit_info, nullFence);
@@ -1169,10 +1243,28 @@ Error VulkanContext::swap_buffers() {
.pNext = NULL,
.waitSemaphoreCount = 1,
.pWaitSemaphores = (separate_present_queue) ? &image_ownership_semaphores[frame_index] : &draw_complete_semaphores[frame_index],
- .swapchainCount = 1,
- .pSwapchains = &swapchain,
- .pImageIndices = &current_buffer,
+ .swapchainCount = 0,
+ .pSwapchains = NULL,
+ .pImageIndices = NULL,
};
+
+ VkSwapchainKHR *pSwapchains = (VkSwapchainKHR *)alloca(sizeof(VkSwapchainKHR *) * windows.size());
+ uint32_t *pImageIndices = (uint32_t *)alloca(sizeof(uint32_t *) * windows.size());
+
+ present.pSwapchains = pSwapchains;
+ present.pImageIndices = pImageIndices;
+
+ for (Map<int, Window>::Element *E = windows.front(); E; E = E->next()) {
+ Window *w = &E->get();
+
+ if (w->swapchain == VK_NULL_HANDLE) {
+ continue;
+ }
+ pSwapchains[present.swapchainCount] = w->swapchain;
+ pImageIndices[present.swapchainCount] = w->current_buffer;
+ present.swapchainCount++;
+ }
+
#if 0
if (VK_KHR_incremental_present_enabled) {
// If using VK_KHR_incremental_present, we provide a hint of the region
@@ -1261,6 +1353,7 @@ Error VulkanContext::swap_buffers() {
ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
}
+ buffers_prepared = false;
return OK;
}
@@ -1274,47 +1367,34 @@ VkDevice VulkanContext::get_device() {
VkPhysicalDevice VulkanContext::get_physical_device() {
return gpu;
}
-int VulkanContext::get_frame_count() const {
+int VulkanContext::get_swapchain_image_count() const {
return swapchainImageCount;
}
uint32_t VulkanContext::get_graphics_queue() const {
return graphics_queue_family_index;
}
-int VulkanContext::get_screen_width(int p_screen) {
- return width;
-}
-
-int VulkanContext::get_screen_height(int p_screen) {
- return height;
-}
-
-VkFramebuffer VulkanContext::get_frame_framebuffer(int p_frame) {
- return swapchain_image_resources[p_frame].framebuffer;
-}
VkFormat VulkanContext::get_screen_format() const {
return format;
}
-VkRenderPass VulkanContext::get_render_pass() {
- return render_pass;
-}
-
VkPhysicalDeviceLimits VulkanContext::get_device_limits() const {
return gpu_props.limits;
}
VulkanContext::VulkanContext() {
- presentMode = VK_PRESENT_MODE_FIFO_KHR;
command_buffer_count = 0;
instance_validation_layers = NULL;
use_validation_layers = true;
VK_KHR_incremental_present_enabled = true;
VK_GOOGLE_display_timing_enabled = true;
- swapchain = NULL;
- prepared = false;
command_buffer_queue.resize(1); //first one is the setup command always
command_buffer_queue.write[0] = NULL;
command_buffer_count = 1;
+ queues_initialized = false;
+
+ buffers_prepared = false;
+ swapchainImageCount = 0;
+ last_window_id = 0;
}
diff --git a/drivers/vulkan/vulkan_context.h b/drivers/vulkan/vulkan_context.h
index 7a62ef51e2..4176f85c0f 100644
--- a/drivers/vulkan/vulkan_context.h
+++ b/drivers/vulkan/vulkan_context.h
@@ -2,6 +2,7 @@
#define VULKAN_CONTEXT_H
#include "core/error_list.h"
+#include "core/map.h"
#include "core/ustring.h"
#include <vulkan/vulkan.h>
@@ -24,6 +25,7 @@ class VulkanContext {
VkDevice device;
//present
+ bool queues_initialized;
uint32_t graphics_queue_family_index;
uint32_t present_queue_family_index;
bool separate_present_queue;
@@ -40,36 +42,46 @@ class VulkanContext {
typedef struct {
VkImage image;
- VkCommandBuffer cmd;
VkCommandBuffer graphics_to_present_cmd;
VkImageView view;
- VkBuffer uniform_buffer;
- VkDeviceMemory uniform_memory;
VkFramebuffer framebuffer;
- VkDescriptorSet descriptor_set;
+
} SwapchainImageResources;
- VkSwapchainKHR swapchain;
- SwapchainImageResources *swapchain_image_resources;
- VkPresentModeKHR presentMode;
+ struct Window {
+
+ bool is_minimzed;
+ VkSurfaceKHR surface;
+ VkSwapchainKHR swapchain;
+ SwapchainImageResources *swapchain_image_resources;
+ VkPresentModeKHR presentMode;
+ uint32_t current_buffer;
+ int width;
+ int height;
+ VkCommandPool present_cmd_pool; //for separate present queue
+
+ VkRenderPass render_pass;
+
+ Window() {
+ width = 0;
+ height = 0;
+ render_pass = VK_NULL_HANDLE;
+ current_buffer = 0;
+ surface = VK_NULL_HANDLE;
+ swapchain_image_resources = VK_NULL_HANDLE;
+ swapchain = VK_NULL_HANDLE;
+ is_minimzed = false;
+ presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ }
+ };
+
+ Map<int, Window> windows;
+ int last_window_id;
uint32_t swapchainImageCount;
- uint64_t refresh_duration;
- bool syncd_with_actual_presents;
- uint64_t refresh_duration_multiplier;
- uint64_t target_IPD; // image present duration (inverse of frame rate)
- uint64_t prev_desired_present_time;
- uint32_t next_present_id;
- uint32_t last_early_id; // 0 if no early images
- uint32_t last_late_id; // 0 if no late images
- bool is_minimized;
- uint32_t current_buffer;
//commands
- VkRenderPass render_pass;
- VkCommandPool present_cmd_pool; //for separate present queue
bool prepared;
- int width, height;
//extensions
bool VK_KHR_incremental_present_enabled;
@@ -111,38 +123,46 @@ class VulkanContext {
void *pUserData);
Error _create_physical_device();
+
+ Error _initialize_queues(VkSurfaceKHR surface);
+
Error _create_device();
- Error _create_swap_chain();
- Error _create_semaphores();
- Error _prepare_buffers();
- Error _prepare_framebuffers();
- Error _create_buffers();
+ Error _clean_up_swap_chain(Window *window);
+
+ Error _update_swap_chain(Window *window);
- int screen_width;
- int screen_height;
- bool minimized;
+ Error _create_swap_chain();
+ Error _create_semaphores();
Vector<VkCommandBuffer> command_buffer_queue;
int command_buffer_count;
protected:
virtual const char *_get_platform_surface_extension() const = 0;
- virtual VkResult _create_surface(VkSurfaceKHR *surface, VkInstance p_instance) = 0;
+ // virtual VkResult _create_surface(VkSurfaceKHR *surface, VkInstance p_instance) = 0;
+
+ virtual int _window_create(VkSurfaceKHR p_surface, int p_width, int p_height);
+
+ VkInstance _get_instance() {
+ return inst;
+ }
- VkSurfaceKHR &get_surface() { return surface; }
+ bool buffers_prepared;
public:
VkDevice get_device();
VkPhysicalDevice get_physical_device();
- int get_frame_count() const;
+ int get_swapchain_image_count() const;
uint32_t get_graphics_queue() const;
- int get_screen_width(int p_screen = 0);
- int get_screen_height(int p_screen = 0);
+ void window_resize(int p_window_id, int p_width, int p_height);
+ int window_get_width(int p_window = 0);
+ int window_get_height(int p_window = 0);
+ void window_destroy(int p_window_id);
+ VkFramebuffer window_get_framebuffer(int p_window = 0);
+ VkRenderPass window_get_render_pass(int p_window = 0);
- VkFramebuffer get_frame_framebuffer(int p_frame);
- VkRenderPass get_render_pass();
VkFormat get_screen_format() const;
VkPhysicalDeviceLimits get_device_limits() const;
@@ -150,8 +170,9 @@ public:
void append_command_buffer(const VkCommandBuffer &pCommandBuffer);
void resize_notify();
void flush(bool p_flush_setup = false, bool p_flush_pending = false);
+ Error prepare_buffers();
Error swap_buffers();
- Error initialize(int p_width, int p_height, bool p_minimized);
+ Error initialize();
VulkanContext();
};