diff options
62 files changed, 4354 insertions, 879 deletions
diff --git a/core/object/object.cpp b/core/object/object.cpp index 7db62fdc5c..0a7879c774 100644 --- a/core/object/object.cpp +++ b/core/object/object.cpp @@ -956,8 +956,14 @@ void Object::set_meta(const StringName &p_name, const Variant &p_value) { } } -Variant Object::get_meta(const StringName &p_name) const { - ERR_FAIL_COND_V_MSG(!metadata.has(p_name), Variant(), "The object does not have any 'meta' values with the key '" + p_name + "'."); +Variant Object::get_meta(const StringName &p_name, const Variant &p_default) const { + if (!metadata.has(p_name)) { + if (p_default != Variant()) { + return p_default; + } else { + ERR_FAIL_V_MSG(Variant(), "The object does not have any 'meta' values with the key '" + p_name + "'."); + } + } return metadata[p_name]; } @@ -1559,7 +1565,7 @@ void Object::_bind_methods() { ClassDB::bind_method(D_METHOD("set_meta", "name", "value"), &Object::set_meta); ClassDB::bind_method(D_METHOD("remove_meta", "name"), &Object::remove_meta); - ClassDB::bind_method(D_METHOD("get_meta", "name"), &Object::get_meta); + ClassDB::bind_method(D_METHOD("get_meta", "name", "default"), &Object::get_meta, DEFVAL(Variant())); ClassDB::bind_method(D_METHOD("has_meta", "name"), &Object::has_meta); ClassDB::bind_method(D_METHOD("get_meta_list"), &Object::_get_meta_list_bind); diff --git a/core/object/object.h b/core/object/object.h index dbaf9f13fd..f58a79b49c 100644 --- a/core/object/object.h +++ b/core/object/object.h @@ -745,7 +745,7 @@ public: bool has_meta(const StringName &p_name) const; void set_meta(const StringName &p_name, const Variant &p_value); void remove_meta(const StringName &p_name); - Variant get_meta(const StringName &p_name) const; + Variant get_meta(const StringName &p_name, const Variant &p_default = Variant()) const; void get_meta_list(List<StringName> *p_list) const; #ifdef TOOLS_ENABLED diff --git a/doc/classes/Object.xml b/doc/classes/Object.xml index d09f3a2b0d..77927b9a8a 100644 --- a/doc/classes/Object.xml +++ b/doc/classes/Object.xml @@ -366,8 +366,10 @@ <method name="get_meta" qualifiers="const"> <return type="Variant" /> <argument index="0" name="name" type="StringName" /> + <argument index="1" name="default" type="Variant" default="null" /> <description> Returns the object's metadata entry for the given [code]name[/code]. + Throws error if the entry does not exist, unless [code]default[/code] is not [code]null[/code] (in which case the default value will be returned). </description> </method> <method name="get_meta_list" qualifiers="const"> diff --git a/doc/classes/ProjectSettings.xml b/doc/classes/ProjectSettings.xml index a8b4129061..6f85ed5b38 100644 --- a/doc/classes/ProjectSettings.xml +++ b/doc/classes/ProjectSettings.xml @@ -553,6 +553,10 @@ <member name="editor/script/templates_search_path" type="String" setter="" getter="" default=""res://script_templates""> Search path for project-specific script templates. Godot will search for script templates both in the editor-specific path and in this project-specific path. </member> + <member name="filesystem/import/blend/enabled" type="bool" setter="" getter="" default="false"> + If [code]true[/code], 3D scene files with the [code].blend[/code] extension will be imported by converting them to glTF 2.0. + This requires configuring a path to a Blender executable in the editor settings at [code]filesystem/import/blend/blender_path[/code]. Blender 3.0 or later is required. + </member> <member name="gui/common/default_scroll_deadzone" type="int" setter="" getter="" default="0"> Default value for [member ScrollContainer.scroll_deadzone], which will be used for all [ScrollContainer]s unless overridden. </member> diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp index 62de01e8bb..84ca7dbfc2 100644 --- a/drivers/vulkan/rendering_device_vulkan.cpp +++ b/drivers/vulkan/rendering_device_vulkan.cpp @@ -1318,7 +1318,7 @@ const VkImageType RenderingDeviceVulkan::vulkan_image_type[RenderingDevice::TEXT /**** BUFFER MANAGEMENT ****/ /***************************/ -Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mapping) { +Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mem_usage, VmaAllocationCreateFlags p_mem_flags) { VkBufferCreateInfo bufferInfo; bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferInfo.pNext = nullptr; @@ -1330,8 +1330,8 @@ Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, bufferInfo.pQueueFamilyIndices = nullptr; VmaAllocationCreateInfo allocInfo; - allocInfo.flags = 0; - allocInfo.usage = p_mapping; + allocInfo.flags = p_mem_flags; + allocInfo.usage = p_mem_usage; allocInfo.requiredFlags = 0; allocInfo.preferredFlags = 0; allocInfo.memoryTypeBits = 0; @@ -1380,8 +1380,8 @@ Error RenderingDeviceVulkan::_insert_staging_block() { bufferInfo.pQueueFamilyIndices = nullptr; VmaAllocationCreateInfo allocInfo; - allocInfo.flags = 0; - allocInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + allocInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_HOST; allocInfo.requiredFlags = 0; allocInfo.preferredFlags = 0; allocInfo.memoryTypeBits = 0; @@ -1847,9 +1847,9 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T uint32_t image_size = get_image_format_required_size(p_format.format, p_format.width, p_format.height, p_format.depth, p_format.mipmaps, &width, &height); VmaAllocationCreateInfo allocInfo; - allocInfo.flags = 0; + allocInfo.flags = (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) ? VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT : 0; allocInfo.pool = nullptr; - allocInfo.usage = p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT ? VMA_MEMORY_USAGE_CPU_ONLY : VMA_MEMORY_USAGE_GPU_ONLY; + allocInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; allocInfo.requiredFlags = 0; allocInfo.preferredFlags = 0; allocInfo.memoryTypeBits = 0; @@ -2703,7 +2703,7 @@ Vector<uint8_t> RenderingDeviceVulkan::texture_get_data(RID p_texture, uint32_t //allocate buffer VkCommandBuffer command_buffer = frames[frame].draw_command_buffer; //makes more sense to retrieve Buffer tmp_buffer; - _buffer_allocate(&tmp_buffer, buffer_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_CPU_ONLY); + _buffer_allocate(&tmp_buffer, buffer_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_HOST, VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT); { //Source image barrier VkImageMemoryBarrier image_memory_barrier; @@ -4097,7 +4097,7 @@ RID RenderingDeviceVulkan::vertex_buffer_create(uint32_t p_size_bytes, const Vec usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; } Buffer buffer; - _buffer_allocate(&buffer, p_size_bytes, usage, VMA_MEMORY_USAGE_GPU_ONLY); + _buffer_allocate(&buffer, p_size_bytes, usage, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, 0); if (p_data.size()) { uint64_t data_size = p_data.size(); const uint8_t *r = p_data.ptr(); @@ -4259,7 +4259,7 @@ RID RenderingDeviceVulkan::index_buffer_create(uint32_t p_index_count, IndexBuff #else index_buffer.max_index = 0xFFFFFFFF; #endif - _buffer_allocate(&index_buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY); + _buffer_allocate(&index_buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, 0); if (p_data.size()) { uint64_t data_size = p_data.size(); const uint8_t *r = p_data.ptr(); @@ -5371,7 +5371,7 @@ RID RenderingDeviceVulkan::uniform_buffer_create(uint32_t p_size_bytes, const Ve "Creating buffers with data is forbidden during creation of a draw list"); Buffer buffer; - Error err = _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY); + Error err = _buffer_allocate(&buffer, p_size_bytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, 0); ERR_FAIL_COND_V(err != OK, RID()); if (p_data.size()) { uint64_t data_size = p_data.size(); @@ -5397,7 +5397,7 @@ RID RenderingDeviceVulkan::storage_buffer_create(uint32_t p_size_bytes, const Ve if (p_usage & STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT) { flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; } - Error err = _buffer_allocate(&buffer, p_size_bytes, flags, VMA_MEMORY_USAGE_GPU_ONLY); + Error err = _buffer_allocate(&buffer, p_size_bytes, flags, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, 0); ERR_FAIL_COND_V(err != OK, RID()); if (p_data.size()) { @@ -5423,7 +5423,7 @@ RID RenderingDeviceVulkan::texture_buffer_create(uint32_t p_size_elements, DataF ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != size_bytes, RID()); TextureBuffer texture_buffer; - Error err = _buffer_allocate(&texture_buffer.buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY); + Error err = _buffer_allocate(&texture_buffer.buffer, size_bytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, 0); ERR_FAIL_COND_V(err != OK, RID()); if (p_data.size()) { @@ -6170,7 +6170,7 @@ Vector<uint8_t> RenderingDeviceVulkan::buffer_get_data(RID p_buffer) { VkCommandBuffer command_buffer = frames[frame].setup_command_buffer; Buffer tmp_buffer; - _buffer_allocate(&tmp_buffer, buffer->size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_CPU_ONLY); + _buffer_allocate(&tmp_buffer, buffer->size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_AUTO_PREFER_HOST, VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT); VkBufferCopy region; region.srcOffset = 0; region.dstOffset = 0; diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h index a4d5af91a4..7d9bd19309 100644 --- a/drivers/vulkan/rendering_device_vulkan.h +++ b/drivers/vulkan/rendering_device_vulkan.h @@ -219,7 +219,7 @@ class RenderingDeviceVulkan : public RenderingDevice { } }; - Error _buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mapping); + Error _buffer_allocate(Buffer *p_buffer, uint32_t p_size, uint32_t p_usage, VmaMemoryUsage p_mem_usage, VmaAllocationCreateFlags p_mem_flags); Error _buffer_free(Buffer *p_buffer); Error _buffer_update(Buffer *p_buffer, size_t p_offset, const uint8_t *p_data, size_t p_data_size, bool p_use_draw_command_buffer = false, uint32_t p_required_align = 32); diff --git a/editor/editor_node.cpp b/editor/editor_node.cpp index 57998b7778..bcdd54edd4 100644 --- a/editor/editor_node.cpp +++ b/editor/editor_node.cpp @@ -3755,6 +3755,11 @@ void EditorNode::open_request(const String &p_path) { load_scene(p_path); // as it will be opened in separate tab } +void EditorNode::edit_foreign_resource(RES p_resource) { + load_scene(p_resource->get_path().get_slice("::", 0)); + InspectorDock::get_singleton()->call_deferred("edit_resource", p_resource); +} + void EditorNode::request_instance_scene(const String &p_path) { SceneTreeDock::get_singleton()->instantiate(p_path); } @@ -5740,6 +5745,7 @@ void EditorNode::_bind_methods() { ClassDB::bind_method("_get_scene_metadata", &EditorNode::_get_scene_metadata); ClassDB::bind_method("set_edited_scene", &EditorNode::set_edited_scene); ClassDB::bind_method("open_request", &EditorNode::open_request); + ClassDB::bind_method("edit_foreign_resource", &EditorNode::edit_foreign_resource); ClassDB::bind_method("_close_messages", &EditorNode::_close_messages); ClassDB::bind_method("_show_messages", &EditorNode::_show_messages); diff --git a/editor/editor_node.h b/editor/editor_node.h index 0c9df16b2e..785baee6c1 100644 --- a/editor/editor_node.h +++ b/editor/editor_node.h @@ -756,6 +756,7 @@ public: void select_editor_by_name(const String &p_name); void open_request(const String &p_path); + void edit_foreign_resource(RES p_resource); bool is_changing_scene() const; diff --git a/editor/editor_properties.cpp b/editor/editor_properties.cpp index cdb96bd0c8..c6b369f201 100644 --- a/editor/editor_properties.cpp +++ b/editor/editor_properties.cpp @@ -2975,6 +2975,12 @@ void EditorPropertyResource::_set_read_only(bool p_read_only) { }; void EditorPropertyResource::_resource_selected(const RES &p_resource, bool p_edit) { + if (p_resource->is_built_in() && !p_resource->get_path().is_empty() && p_resource->get_path().get_slice("::", 0) != EditorNode::get_singleton()->get_edited_scene()->get_scene_file_path()) { + // If the resource belongs to another scene, edit it in that scene instead. + EditorNode::get_singleton()->call_deferred("edit_foreign_resource", p_resource); + return; + } + if (!p_edit && use_sub_inspector) { bool unfold = !get_edited_object()->editor_is_section_unfolded(get_edited_property()); get_edited_object()->editor_set_section_unfold(get_edited_property(), unfold); @@ -3172,9 +3178,8 @@ void EditorPropertyResource::_viewport_selected(const NodePath &p_path) { void EditorPropertyResource::setup(Object *p_object, const String &p_path, const String &p_base_type) { if (resource_picker) { - resource_picker->disconnect("resource_selected", callable_mp(this, &EditorPropertyResource::_resource_selected)); - resource_picker->disconnect("resource_changed", callable_mp(this, &EditorPropertyResource::_resource_changed)); memdelete(resource_picker); + resource_picker = nullptr; } if (p_path == "script" && p_base_type == "Script" && Object::cast_to<Node>(p_object)) { diff --git a/editor/editor_property_name_processor.cpp b/editor/editor_property_name_processor.cpp index d89247b429..af4ae3e576 100644 --- a/editor/editor_property_name_processor.cpp +++ b/editor/editor_property_name_processor.cpp @@ -125,6 +125,7 @@ EditorPropertyNameProcessor::EditorPropertyNameProcessor() { capitalize_string_remaps["dtls"] = "DTLS"; capitalize_string_remaps["etc"] = "ETC"; capitalize_string_remaps["etc2"] = "ETC2"; + capitalize_string_remaps["fbx"] = "FBX"; capitalize_string_remaps["fft"] = "FFT"; capitalize_string_remaps["fov"] = "FOV"; capitalize_string_remaps["fps"] = "FPS"; diff --git a/editor/inspector_dock.cpp b/editor/inspector_dock.cpp index b6dba7ec36..821e4dde11 100644 --- a/editor/inspector_dock.cpp +++ b/editor/inspector_dock.cpp @@ -451,6 +451,8 @@ void InspectorDock::_bind_methods() { ClassDB::bind_method("_menu_collapseall", &InspectorDock::_menu_collapseall); ClassDB::bind_method("_menu_expandall", &InspectorDock::_menu_expandall); + ClassDB::bind_method("edit_resource", &InspectorDock::edit_resource); + ADD_SIGNAL(MethodInfo("request_help")); } diff --git a/editor/plugins/node_3d_editor_plugin.cpp b/editor/plugins/node_3d_editor_plugin.cpp index ea119a33fa..eab477adb7 100644 --- a/editor/plugins/node_3d_editor_plugin.cpp +++ b/editor/plugins/node_3d_editor_plugin.cpp @@ -2564,7 +2564,7 @@ void Node3DEditorViewport::_notification(int p_what) { text += "\n"; text += vformat(TTR("Objects: %d\n"), viewport->get_render_info(Viewport::RENDER_INFO_TYPE_VISIBLE, Viewport::RENDER_INFO_OBJECTS_IN_FRAME)); - text += vformat(TTR("Primitives: %d\n"), viewport->get_render_info(Viewport::RENDER_INFO_TYPE_VISIBLE, Viewport::RENDER_INFO_PRIMITIVES_IN_FRAME)); + text += vformat(TTR("Primitive Indices: %d\n"), viewport->get_render_info(Viewport::RENDER_INFO_TYPE_VISIBLE, Viewport::RENDER_INFO_PRIMITIVES_IN_FRAME)); text += vformat(TTR("Draw Calls: %d"), viewport->get_render_info(Viewport::RENDER_INFO_TYPE_VISIBLE, Viewport::RENDER_INFO_DRAW_CALLS_IN_FRAME)); info_label->set_text(text); diff --git a/editor/plugins/visual_shader_editor_plugin.cpp b/editor/plugins/visual_shader_editor_plugin.cpp index 55c1651d16..580e71a788 100644 --- a/editor/plugins/visual_shader_editor_plugin.cpp +++ b/editor/plugins/visual_shader_editor_plugin.cpp @@ -5047,7 +5047,6 @@ VisualShaderEditor::VisualShaderEditor() { add_options.push_back(AddOption("LightColor", "Input", "Light", "VisualShaderNodeInput", vformat(input_param_for_light_shader_mode, "light_color", "LIGHT_COLOR"), { "light_color" }, VisualShaderNode::PORT_TYPE_VECTOR_3D, TYPE_FLAGS_LIGHT, Shader::MODE_SPATIAL)); add_options.push_back(AddOption("Metallic", "Input", "Light", "VisualShaderNodeInput", vformat(input_param_for_light_shader_mode, "metallic", "METALLIC"), { "metallic" }, VisualShaderNode::PORT_TYPE_SCALAR, TYPE_FLAGS_LIGHT, Shader::MODE_SPATIAL)); add_options.push_back(AddOption("Roughness", "Input", "Light", "VisualShaderNodeInput", vformat(input_param_for_light_shader_mode, "roughness", "ROUGHNESS"), { "roughness" }, VisualShaderNode::PORT_TYPE_SCALAR, TYPE_FLAGS_LIGHT, Shader::MODE_SPATIAL)); - add_options.push_back(AddOption("ShadowAttenuation", "Input", "Light", "VisualShaderNodeInput", vformat(input_param_for_light_shader_mode, "shadow_attenuation", "SHADOW_ATTENUATION"), { "shadow_attenuation" }, VisualShaderNode::PORT_TYPE_VECTOR_3D, TYPE_FLAGS_LIGHT, Shader::MODE_SPATIAL)); add_options.push_back(AddOption("Specular", "Input", "Light", "VisualShaderNodeInput", vformat(input_param_for_light_shader_mode, "specular", "SPECULAR_LIGHT"), { "specular" }, VisualShaderNode::PORT_TYPE_VECTOR_3D, TYPE_FLAGS_LIGHT, Shader::MODE_SPATIAL)); add_options.push_back(AddOption("View", "Input", "Light", "VisualShaderNodeInput", vformat(input_param_for_fragment_and_light_shader_modes, "view", "VIEW"), { "view" }, VisualShaderNode::PORT_TYPE_VECTOR_3D, TYPE_FLAGS_LIGHT, Shader::MODE_SPATIAL)); diff --git a/editor/project_manager.h b/editor/project_manager.h index 2965dc7d2e..4f053793fd 100644 --- a/editor/project_manager.h +++ b/editor/project_manager.h @@ -55,46 +55,46 @@ class ProjectManager : public Control { static ProjectManager *singleton; - TabContainer *tabs; + TabContainer *tabs = nullptr; - ProjectList *_project_list; + ProjectList *_project_list = nullptr; - LineEdit *search_box; - Label *loading_label; - OptionButton *filter_option; + LineEdit *search_box = nullptr; + Label *loading_label = nullptr; + OptionButton *filter_option = nullptr; - Button *run_btn; - Button *open_btn; - Button *rename_btn; - Button *erase_btn; - Button *erase_missing_btn; - Button *about_btn; + Button *run_btn = nullptr; + Button *open_btn = nullptr; + Button *rename_btn = nullptr; + Button *erase_btn = nullptr; + Button *erase_missing_btn = nullptr; + Button *about_btn = nullptr; - EditorAssetLibrary *asset_library; + EditorAssetLibrary *asset_library = nullptr; - EditorFileDialog *scan_dir; - ConfirmationDialog *language_restart_ask; + EditorFileDialog *scan_dir = nullptr; + ConfirmationDialog *language_restart_ask = nullptr; - ConfirmationDialog *erase_ask; - Label *erase_ask_label; - CheckBox *delete_project_contents; + ConfirmationDialog *erase_ask = nullptr; + Label *erase_ask_label = nullptr; + CheckBox *delete_project_contents = nullptr; - ConfirmationDialog *erase_missing_ask; - ConfirmationDialog *multi_open_ask; - ConfirmationDialog *multi_run_ask; - ConfirmationDialog *multi_scan_ask; - ConfirmationDialog *ask_update_settings; - ConfirmationDialog *open_templates; - EditorAbout *about; + ConfirmationDialog *erase_missing_ask = nullptr; + ConfirmationDialog *multi_open_ask = nullptr; + ConfirmationDialog *multi_run_ask = nullptr; + ConfirmationDialog *multi_scan_ask = nullptr; + ConfirmationDialog *ask_update_settings = nullptr; + ConfirmationDialog *open_templates = nullptr; + EditorAbout *about = nullptr; - HBoxContainer *settings_hb; + HBoxContainer *settings_hb = nullptr; - AcceptDialog *run_error_diag; - AcceptDialog *dialog_error; - ProjectDialog *npdialog; + AcceptDialog *run_error_diag = nullptr; + AcceptDialog *dialog_error = nullptr; + ProjectDialog *npdialog = nullptr; - OptionButton *language_btn; - LinkButton *version_btn; + OptionButton *language_btn = nullptr; + LinkButton *version_btn = nullptr; void _open_asset_library(); void _scan_projects(); diff --git a/editor/translations/extract.py b/editor/translations/extract.py index 8737eb5204..4a7e87e271 100755 --- a/editor/translations/extract.py +++ b/editor/translations/extract.py @@ -9,6 +9,40 @@ import subprocess import sys +class Message: + __slots__ = ("msgid", "msgid_plural", "msgctxt", "comments", "locations") + + def format(self): + lines = [] + + if self.comments: + for i, content in enumerate(self.comments): + prefix = "#. TRANSLATORS:" if i == 0 else "#." + lines.append(prefix + content) + + lines.append("#: " + " ".join(self.locations)) + + if self.msgctxt: + lines.append('msgctxt "{}"'.format(self.msgctxt)) + + if self.msgid_plural: + lines += [ + 'msgid "{}"'.format(self.msgid), + 'msgid_plural "{}"'.format(self.msgid_plural), + 'msgstr[0] ""', + 'msgstr[1] ""', + ] + else: + lines += [ + 'msgid "{}"'.format(self.msgid), + 'msgstr ""', + ] + + return "\n".join(lines) + + +messages_map = {} # (id, context) -> Message. + line_nb = False for arg in sys.argv[1:]: @@ -42,9 +76,6 @@ with open("editor/editor_property_name_processor.cpp") as f: remaps[m.group("from")] = m.group("to") -unique_str = [] -unique_loc = {} -ctx_group = {} # Store msgctx, msg, and locations. main_po = """ # LANGUAGE translation of the Godot Engine editor. # Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. @@ -119,95 +150,6 @@ def _process_editor_string(name): return " ".join(capitalized_parts) -def _write_message(msgctx, msg, msg_plural, location): - global main_po - main_po += "#: " + location + "\n" - if msgctx != "": - main_po += 'msgctxt "' + msgctx + '"\n' - main_po += 'msgid "' + msg + '"\n' - if msg_plural != "": - main_po += 'msgid_plural "' + msg_plural + '"\n' - main_po += 'msgstr[0] ""\n' - main_po += 'msgstr[1] ""\n\n' - else: - main_po += 'msgstr ""\n\n' - - -def _add_additional_location(msgctx, msg, location): - global main_po - # Add additional location to previous occurrence. - if msgctx != "": - msg_pos = main_po.find('\nmsgctxt "' + msgctx + '"\nmsgid "' + msg + '"') - else: - msg_pos = main_po.find('\nmsgid "' + msg + '"') - - if msg_pos == -1: - print("Someone apparently thought writing Python was as easy as GDScript. Ping Akien.") - main_po = main_po[:msg_pos] + " " + location + main_po[msg_pos:] - - -def _write_translator_comment(msgctx, msg, translator_comment): - if translator_comment == "": - return - - global main_po - if msgctx != "": - msg_pos = main_po.find('\nmsgctxt "' + msgctx + '"\nmsgid "' + msg + '"') - else: - msg_pos = main_po.find('\nmsgid "' + msg + '"') - - # If it's a new message, just append comment to the end of PO file. - if msg_pos == -1: - main_po += _format_translator_comment(translator_comment, True) - return - - # Find position just before location. Translator comment will be added there. - translator_comment_pos = main_po.rfind("\n\n#", 0, msg_pos) + 2 - if translator_comment_pos - 2 == -1: - print("translator_comment_pos not found") - return - - # Check if a previous translator comment already exists. If so, merge them together. - if main_po.find("TRANSLATORS:", translator_comment_pos, msg_pos) != -1: - translator_comment_pos = main_po.find("\n#:", translator_comment_pos, msg_pos) + 1 - if translator_comment_pos == 0: - print('translator_comment_pos after "TRANSLATORS:" not found') - return - main_po = ( - main_po[:translator_comment_pos] - + _format_translator_comment(translator_comment, False) - + main_po[translator_comment_pos:] - ) - return - - main_po = ( - main_po[:translator_comment_pos] - + _format_translator_comment(translator_comment, True) - + main_po[translator_comment_pos:] - ) - - -def _format_translator_comment(comment, new): - if not comment: - return "" - - comment_lines = comment.split("\n") - - formatted_comment = "" - if not new: - for comment in comment_lines: - formatted_comment += "#. " + comment.strip() + "\n" - return formatted_comment - - formatted_comment = "#. TRANSLATORS: " - for i in range(len(comment_lines)): - if i == 0: - formatted_comment += comment_lines[i].strip() + "\n" - else: - formatted_comment += "#. " + comment_lines[i].strip() + "\n" - return formatted_comment - - def _is_block_translator_comment(translator_line): line = translator_line.strip() if line.find("//") == 0: @@ -301,32 +243,20 @@ def process_file(f, fname): def _add_message(msg, msg_plural, msgctx, location, translator_comment): - global main_po, unique_str, unique_loc - - # Write translator comment. - _write_translator_comment(msgctx, msg, translator_comment) - translator_comment = "" - - if msgctx != "": - # If it's a new context or a new message within an existing context, then write new msgid. - # Else add location to existing msgid. - if not msgctx in ctx_group: - _write_message(msgctx, msg, msg_plural, location) - ctx_group[msgctx] = {msg: [location]} - elif not msg in ctx_group[msgctx]: - _write_message(msgctx, msg, msg_plural, location) - ctx_group[msgctx][msg] = [location] - elif not location in ctx_group[msgctx][msg]: - _add_additional_location(msgctx, msg, location) - ctx_group[msgctx][msg].append(location) - else: - if not msg in unique_str: - _write_message(msgctx, msg, msg_plural, location) - unique_str.append(msg) - unique_loc[msg] = [location] - elif not location in unique_loc[msg]: - _add_additional_location(msgctx, msg, location) - unique_loc[msg].append(location) + key = (msg, msgctx) + message = messages_map.get(key) + if not message: + message = Message() + message.msgid = msg + message.msgid_plural = msg_plural + message.msgctxt = msgctx + message.locations = [] + message.comments = [] + messages_map[key] = message + if location not in message.locations: + message.locations.append(location) + if translator_comment and translator_comment not in message.comments: + message.comments.append(translator_comment) print("Updating the editor.pot template...") @@ -335,6 +265,8 @@ for fname in matches: with open(fname, "r", encoding="utf8") as f: process_file(f, fname) +main_po += "\n\n".join(message.format() for message in messages_map.values()) + with open("editor.pot", "w") as f: f.write(main_po) diff --git a/main/main.cpp b/main/main.cpp index c7a644d7b3..89b56a814e 100644 --- a/main/main.cpp +++ b/main/main.cpp @@ -1488,6 +1488,28 @@ Error Main::setup(const char *execpath, int argc, char *argv[], bool p_second_ph GLOBAL_DEF("display/window/ios/hide_home_indicator", true); GLOBAL_DEF("input_devices/pointing/ios/touch_delay", 0.150); + // XR project settings. + GLOBAL_DEF_BASIC("xr/openxr/enabled", false); + GLOBAL_DEF_BASIC("xr/openxr/default_action_map", "res://default_action_map.tres"); + ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/default_action_map", PropertyInfo(Variant::STRING, "xr/openxr/default_action_map", PROPERTY_HINT_FILE, "*.tres")); + + GLOBAL_DEF_BASIC("xr/openxr/form_factor", "0"); + ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/form_factor", PropertyInfo(Variant::INT, "xr/openxr/form_factor", PROPERTY_HINT_ENUM, "Head mounted,Handheld")); + + GLOBAL_DEF_BASIC("xr/openxr/view_configuration", "1"); + ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/view_configuration", PropertyInfo(Variant::INT, "xr/openxr/view_configuration", PROPERTY_HINT_ENUM, "Mono,Stereo")); // "Mono,Stereo,Quad,Observer" + + GLOBAL_DEF_BASIC("xr/openxr/reference_space", "1"); + ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/reference_space", PropertyInfo(Variant::INT, "xr/openxr/reference_space", PROPERTY_HINT_ENUM, "Local,Stage")); + +#ifdef TOOLS_ENABLED + // Disabled for now, using XR inside of the editor we'll be working on during the coming months. + + // editor settings (it seems we're too early in the process when setting up rendering, to access editor settings...) + // EDITOR_DEF_RST("xr/openxr/in_editor", false); + // GLOBAL_DEF("xr/openxr/in_editor", false); +#endif + Engine::get_singleton()->set_frame_delay(frame_delay); message_queue = memnew(MessageQueue); diff --git a/misc/scripts/clang_format.sh b/misc/scripts/clang_format.sh index 13b5ab79b6..5ab9c1bbb9 100755 --- a/misc/scripts/clang_format.sh +++ b/misc/scripts/clang_format.sh @@ -19,6 +19,10 @@ while read -r f; do continue elif [[ "$f" == "platform/android/java/lib/src/org/godotengine/godot/input/InputManager"* ]]; then continue + elif [[ "$f" == "platform/android/java/lib/src/org/godotengine/godot/gl/GLSurfaceView"* ]]; then + continue + elif [[ "$f" == "platform/android/java/lib/src/org/godotengine/godot/gl/EGLLogWrapper"* ]]; then + continue fi python misc/scripts/copyright_headers.py "$f" diff --git a/modules/fbx/editor_scene_importer_fbx.cpp b/modules/fbx/editor_scene_importer_fbx.cpp index ca10594a42..afaeb15708 100644 --- a/modules/fbx/editor_scene_importer_fbx.cpp +++ b/modules/fbx/editor_scene_importer_fbx.cpp @@ -58,24 +58,8 @@ void EditorSceneFormatImporterFBX::get_extensions(List<String> *r_extensions) const { // register FBX as the one and only format for FBX importing - const String import_setting_string = "filesystem/import/fbx/"; - const String fbx_str = "fbx"; - Vector<String> exts; - exts.push_back(fbx_str); - _register_project_setting_import(fbx_str, import_setting_string, exts, r_extensions, true); -} - -void EditorSceneFormatImporterFBX::_register_project_setting_import(const String generic, - const String import_setting_string, - const Vector<String> &exts, - List<String> *r_extensions, - const bool p_enabled) const { - const String use_generic = "use_" + generic; - _GLOBAL_DEF(import_setting_string + use_generic, p_enabled, true); - if (ProjectSettings::get_singleton()->get(import_setting_string + use_generic)) { - for (int32_t i = 0; i < exts.size(); i++) { - r_extensions->push_back(exts[i]); - } + if (GLOBAL_GET("filesystem/import/fbx/use_fbx")) { + r_extensions->push_back("fbx"); } } @@ -1473,3 +1457,7 @@ Ref<Animation> EditorSceneFormatImporterFBX::import_animation(const String &p_pa int p_bake_fps) { return Ref<Animation>(); } + +EditorSceneFormatImporterFBX::EditorSceneFormatImporterFBX() { + _GLOBAL_DEF("filesystem/import/fbx/use_fbx", true, true); +} diff --git a/modules/fbx/editor_scene_importer_fbx.h b/modules/fbx/editor_scene_importer_fbx.h index eebcb86409..fb72f93feb 100644 --- a/modules/fbx/editor_scene_importer_fbx.h +++ b/modules/fbx/editor_scene_importer_fbx.h @@ -119,10 +119,9 @@ private: template <class T> T _interpolate_track(const Vector<float> &p_times, const Vector<T> &p_values, float p_time, AssetImportAnimation::Interpolation p_interp); - void _register_project_setting_import(const String generic, const String import_setting_string, const Vector<String> &exts, List<String> *r_extensions, const bool p_enabled) const; public: - EditorSceneFormatImporterFBX() {} + EditorSceneFormatImporterFBX(); ~EditorSceneFormatImporterFBX() {} virtual void get_extensions(List<String> *r_extensions) const override; diff --git a/modules/gltf/config.py b/modules/gltf/config.py index a4736321fa..4ff2a6db38 100644 --- a/modules/gltf/config.py +++ b/modules/gltf/config.py @@ -8,12 +8,15 @@ def configure(env): def get_doc_classes(): return [ - "EditorSceneImporterGLTF", + "EditorSceneFormatImporterBlend", + "EditorSceneFormatImporterGLTF", "GLTFAccessor", "GLTFAnimation", "GLTFBufferView", "GLTFCamera", "GLTFDocument", + "GLTFDocumentExtension", + "GLTFDocumentExtensionConvertImporterMesh", "GLTFLight", "GLTFMesh", "GLTFNode", @@ -22,8 +25,6 @@ def get_doc_classes(): "GLTFSpecGloss", "GLTFState", "GLTFTexture", - "GLTFDocumentExtension", - "GLTFDocumentExtensionConvertImporterMesh", ] diff --git a/modules/gltf/doc_classes/EditorSceneFormatImporterBlend.xml b/modules/gltf/doc_classes/EditorSceneFormatImporterBlend.xml new file mode 100644 index 0000000000..b3a0381efc --- /dev/null +++ b/modules/gltf/doc_classes/EditorSceneFormatImporterBlend.xml @@ -0,0 +1,15 @@ +<?xml version="1.0" encoding="UTF-8" ?> +<class name="EditorSceneFormatImporterBlend" inherits="EditorSceneFormatImporter" version="4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../../../doc/class.xsd"> + <brief_description> + Importer for Blender's [code].blend[/code] scene file format. + </brief_description> + <description> + Imports Blender scenes in the [code].blend[/code] file format through the glTF 2.0 3D import pipeline. This importer requires Blender to be installed by the user, so that it can be used to export the scene as glTF 2.0. + The location of the Blender binary is set via the [code]filesystem/import/blend/blender_path[/code] editor setting. + This importer is only used if [member ProjectSettings.filesystem/import/blend/enabled] is enabled, otherwise [code].blend[/code] present in the project folder are not imported. + Blend import requires Blender 3.0. + Internally, the EditorSceneFormatImporterBlend uses the Blender glTF "Use Original" mode to reference external textures. + </description> + <tutorials> + </tutorials> +</class> diff --git a/doc/classes/EditorSceneFormatImporterGLTF.xml b/modules/gltf/doc_classes/EditorSceneFormatImporterGLTF.xml index 6478e853eb..5a6a2f52d9 100644 --- a/doc/classes/EditorSceneFormatImporterGLTF.xml +++ b/modules/gltf/doc_classes/EditorSceneFormatImporterGLTF.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="UTF-8" ?> -<class name="EditorSceneFormatImporterGLTF" inherits="EditorSceneFormatImporter" version="4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../class.xsd"> +<class name="EditorSceneFormatImporterGLTF" inherits="EditorSceneFormatImporter" version="4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../../../doc/class.xsd"> <brief_description> </brief_description> <description> diff --git a/modules/gltf/doc_classes/GLTFDocument.xml b/modules/gltf/doc_classes/GLTFDocument.xml index 7adabdc605..cb0e3b6754 100644 --- a/modules/gltf/doc_classes/GLTFDocument.xml +++ b/modules/gltf/doc_classes/GLTFDocument.xml @@ -24,6 +24,7 @@ <argument index="1" name="state" type="GLTFState" /> <argument index="2" name="flags" type="int" default="0" /> <argument index="3" name="bake_fps" type="int" default="30" /> + <argument index="4" name="base_path" type="String" default="""" /> <description> </description> </method> diff --git a/modules/gltf/editor/editor_scene_importer_blend.cpp b/modules/gltf/editor/editor_scene_importer_blend.cpp new file mode 100644 index 0000000000..931b2bc73b --- /dev/null +++ b/modules/gltf/editor/editor_scene_importer_blend.cpp @@ -0,0 +1,268 @@ +/*************************************************************************/ +/* editor_scene_importer_blend.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "editor_scene_importer_blend.h" + +#if TOOLS_ENABLED + +#include "../gltf_document.h" +#include "../gltf_state.h" + +#include "core/config/project_settings.h" +#include "core/io/json.h" +#include "editor/editor_settings.h" +#include "scene/main/node.h" +#include "scene/resources/animation.h" + +uint32_t EditorSceneFormatImporterBlend::get_import_flags() const { + return ImportFlags::IMPORT_SCENE | ImportFlags::IMPORT_ANIMATION; +} + +void EditorSceneFormatImporterBlend::get_extensions(List<String> *r_extensions) const { + r_extensions->push_back("blend"); +} + +Node *EditorSceneFormatImporterBlend::import_scene(const String &p_path, uint32_t p_flags, + const Map<StringName, Variant> &p_options, int p_bake_fps, + List<String> *r_missing_deps, Error *r_err) { + // Parse JSON config. + + const String source_global = ProjectSettings::get_singleton()->globalize_path(p_path).c_escape(); + const String sink = ProjectSettings::get_singleton()->get_imported_files_path().plus_file( + vformat("%s-%s.gltf", p_path.get_file().get_basename(), p_path.md5_text())); + const String sink_global = ProjectSettings::get_singleton()->globalize_path(sink).c_escape(); + const String json_parameters = vformat("{\"source\": \"%s\", \"sink\": \"%s\"}", source_global, sink_global); + + Ref<JSON> json; + json.instantiate(); + Error err = json->parse(json_parameters); + if (err != OK) { + if (r_err) { + *r_err = err; + } + ERR_PRINT(vformat("Blend config can't be read at line %s with error: %s", + json->get_error_line(), json->get_error_message())); + return nullptr; + } + Dictionary parameters = json->get_data(); + + // Handle configuration options. + + String parameters_arg; + + if (p_options.has(SNAME("blender/nodes/custom_properties")) && p_options[SNAME("blender/nodes/custom_properties")]) { + parameters_arg += "export_extras=True,"; + } else { + parameters_arg += "export_extras=False,"; + } + if (p_options.has(SNAME("blender/meshes/skins")) && p_options[SNAME("blender/meshes/skins")]) { + int32_t skins = p_options["blender/meshes/skins"]; + if (skins == BLEND_BONE_INFLUENCES_NONE) { + parameters_arg += "export_all_influences=False,"; + } else if (skins == BLEND_BONE_INFLUENCES_COMPATIBLE) { + parameters_arg += "export_all_influences=False,"; + } else if (skins == BLEND_BONE_INFLUENCES_ALL) { + parameters_arg += "export_all_influences=True,"; + } + parameters_arg += "export_skins=True,"; + } else { + parameters_arg += "export_skins=False,"; + } + if (p_options.has(SNAME("blender/nodes/cameras")) && p_options[SNAME("blender/nodes/cameras")]) { + parameters_arg += "export_cameras=True,"; + } else { + parameters_arg += "export_cameras=False,"; + } + if (p_options.has(SNAME("blender/nodes/lights")) && p_options[SNAME("blender/nodes/lights")]) { + parameters_arg += "export_lights=True,"; + } else { + parameters_arg += "export_lights=False,"; + } + if (p_options.has(SNAME("blender/meshes/colors")) && p_options[SNAME("blender/meshes/colors")]) { + parameters_arg += "export_colors=True,"; + } else { + parameters_arg += "export_colors=False,"; + } + if (p_options.has(SNAME("blender/nodes/visible")) && p_options[SNAME("blender/nodes/visible")]) { + int32_t visible = p_options["blender/nodes/visible"]; + if (visible == BLEND_VISIBLE_VISIBLE_ONLY) { + parameters_arg += "use_visible=True,"; + } else if (visible == BLEND_VISIBLE_RENDERABLE) { + parameters_arg += "use_renderable=True,"; + } else if (visible == BLEND_VISIBLE_ALL) { + parameters_arg += "use_visible=False,use_renderable=False,"; + } + } else { + parameters_arg += "use_visible=False,use_renderable=False,"; + } + if (p_options.has(SNAME("blender/meshes/uvs")) && p_options[SNAME("blender/meshes/uvs")]) { + parameters_arg += "export_texcoords=True,"; + } else { + parameters_arg += "export_texcoords=False,"; + } + if (p_options.has(SNAME("blender/meshes/normals")) && p_options[SNAME("blender/meshes/normals")]) { + parameters_arg += "export_normals=True,"; + } else { + parameters_arg += "export_normals=False,"; + } + if (p_options.has(SNAME("blender/meshes/tangents")) && p_options[SNAME("blender/meshes/tangents")]) { + parameters_arg += "export_tangents=True,"; + } else { + parameters_arg += "export_tangents=False,"; + } + if (p_options.has(SNAME("blender/animation/group_tracks")) && p_options[SNAME("blender/animation/group_tracks")]) { + parameters_arg += "export_nla_strips=True,"; + } else { + parameters_arg += "export_nla_strips=False,"; + } + if (p_options.has(SNAME("blender/animation/limit_playback")) && p_options[SNAME("blender/animation/limit_playback")]) { + parameters_arg += "export_frame_range=True,"; + } else { + parameters_arg += "export_frame_range=False,"; + } + if (p_options.has(SNAME("blender/animation/always_sample")) && p_options[SNAME("blender/animation/always_sample")]) { + parameters_arg += "export_force_sampling=True,"; + } else { + parameters_arg += "export_force_sampling=False,"; + } + if (p_options.has(SNAME("blender/meshes/export_bones_deforming_mesh_only")) && p_options[SNAME("blender/meshes/export_bones_deforming_mesh_only")]) { + parameters_arg += "export_def_bones=True,"; + } else { + parameters_arg += "export_def_bones=False,"; + } + if (p_options.has(SNAME("blender/nodes/modifiers")) && p_options[SNAME("blender/nodes/modifiers")]) { + parameters_arg += "export_apply=True"; + } else { + parameters_arg += "export_apply=False"; + } + + String unpack_all; + if (p_options.has(SNAME("blender/materials/unpack_enabled")) && p_options[SNAME("blender/materials/unpack_enabled")]) { + unpack_all = "bpy.ops.file.unpack_all(method='USE_LOCAL');"; + } + + // Prepare Blender export script. + + String common_args = vformat("filepath='%s',", parameters["sink"]) + + "export_format='GLTF_SEPARATE'," + "export_yup=True," + + parameters_arg; + String script = + String("import bpy, sys;") + + "print('Blender 3.0 or higher is required.', file=sys.stderr) if bpy.app.version < (3, 0, 0) else None;" + + vformat("bpy.ops.wm.open_mainfile(filepath='%s');", parameters["source"]) + + unpack_all + + vformat("bpy.ops.export_scene.gltf(export_keep_originals=True,%s);", common_args); + print_verbose(script); + + // Run script with configured Blender binary. + + String blender_path = EDITOR_GET("filesystem/import/blend/blender_path"); + + List<String> args; + args.push_back("--background"); + args.push_back("--python-expr"); + args.push_back(script); + + String standard_out; + int32_t ret = OS::get_singleton()->execute(blender_path, args, &standard_out, &ret, true); + print_verbose(standard_out); + + if (ret != OK) { + if (r_err) { + *r_err = ERR_SCRIPT_FAILED; + } + ERR_PRINT(vformat("Blend import failed with error: %d.", ret)); + return nullptr; + } + + // Import the generated glTF. + + // Use GLTFDocument instead of gltf importer to keep image references. + Ref<GLTFDocument> gltf; + gltf.instantiate(); + Ref<GLTFState> state; + state.instantiate(); + String base_dir; + if (p_options.has(SNAME("blender/materials/unpack_enabled")) && p_options[SNAME("blender/materials/unpack_enabled")]) { + base_dir = sink.get_base_dir(); + } + err = gltf->append_from_file(sink.get_basename() + ".gltf", state, p_flags, p_bake_fps, base_dir); + if (err != OK) { + if (r_err) { + *r_err = FAILED; + } + return nullptr; + } + return gltf->generate_scene(state, p_bake_fps); +} + +Ref<Animation> EditorSceneFormatImporterBlend::import_animation(const String &p_path, uint32_t p_flags, + const Map<StringName, Variant> &p_options, int p_bake_fps) { + return Ref<Animation>(); +} + +Variant EditorSceneFormatImporterBlend::get_option_visibility(const String &p_path, const String &p_option, + const Map<StringName, Variant> &p_options) { + if (p_option.begins_with("animation/")) { + if (p_option != "animation/import" && !bool(p_options["animation/import"])) { + return false; + } + } + return true; +} + +void EditorSceneFormatImporterBlend::get_import_options(const String &p_path, List<ResourceImporter::ImportOption> *r_options) { +#define ADD_OPTION_BOOL(PATH, VALUE) \ + r_options->push_back(ResourceImporter::ImportOption(PropertyInfo(Variant::BOOL, SNAME(PATH)), VALUE)); +#define ADD_OPTION_ENUM(PATH, ENUM_HINT, VALUE) \ + r_options->push_back(ResourceImporter::ImportOption(PropertyInfo(Variant::INT, SNAME(PATH), PROPERTY_HINT_ENUM, ENUM_HINT), VALUE)); + + ADD_OPTION_ENUM("blender/nodes/visible", "Visible Only,Renderable,All", BLEND_VISIBLE_ALL); + ADD_OPTION_BOOL("blender/nodes/punctual_lights", true); + ADD_OPTION_BOOL("blender/nodes/cameras", true); + ADD_OPTION_BOOL("blender/nodes/custom_properties", true); + ADD_OPTION_ENUM("blender/nodes/modifiers", "No Modifiers,All Modifiers", BLEND_MODIFIERS_ALL); + ADD_OPTION_BOOL("blender/meshes/colors", false); + ADD_OPTION_BOOL("blender/meshes/uvs", true); + ADD_OPTION_BOOL("blender/meshes/normals", true); + ADD_OPTION_BOOL("blender/meshes/tangents", true); + ADD_OPTION_ENUM("blender/meshes/skins", "None,4 Influences (Compatible),All Influences", BLEND_BONE_INFLUENCES_ALL); + ADD_OPTION_BOOL("blender/meshes/export_bones_deforming_mesh_only", false); + ADD_OPTION_BOOL("blender/materials/unpack_enabled", true); + ADD_OPTION_BOOL("blender/animation/limit_playback", true); + ADD_OPTION_BOOL("blender/animation/always_sample", true); + ADD_OPTION_BOOL("blender/animation/group_tracks", true); + +#undef ADD_OPTION_BOOL +#undef ADD_OPTION_ENUM +} + +#endif // TOOLS_ENABLED diff --git a/modules/gltf/editor/editor_scene_importer_blend.h b/modules/gltf/editor/editor_scene_importer_blend.h new file mode 100644 index 0000000000..4bdf4c93a2 --- /dev/null +++ b/modules/gltf/editor/editor_scene_importer_blend.h @@ -0,0 +1,75 @@ +/*************************************************************************/ +/* editor_scene_importer_blend.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef EDITOR_SCENE_IMPORTER_BLEND_H +#define EDITOR_SCENE_IMPORTER_BLEND_H + +#ifdef TOOLS_ENABLED + +#include "editor/import/resource_importer_scene.h" + +class Animation; +class Node; + +class EditorSceneFormatImporterBlend : public EditorSceneFormatImporter { + GDCLASS(EditorSceneFormatImporterBlend, EditorSceneFormatImporter); + +public: + enum { + BLEND_VISIBLE_VISIBLE_ONLY, + BLEND_VISIBLE_RENDERABLE, + BLEND_VISIBLE_ALL + }; + enum { + BLEND_BONE_INFLUENCES_NONE, + BLEND_BONE_INFLUENCES_COMPATIBLE, + BLEND_BONE_INFLUENCES_ALL + }; + enum { + BLEND_MODIFIERS_NONE, + BLEND_MODIFIERS_ALL + }; + + virtual uint32_t get_import_flags() const override; + virtual void get_extensions(List<String> *r_extensions) const override; + virtual Node *import_scene(const String &p_path, uint32_t p_flags, + const Map<StringName, Variant> &p_options, int p_bake_fps, + List<String> *r_missing_deps, Error *r_err = nullptr) override; + virtual Ref<Animation> import_animation(const String &p_path, uint32_t p_flags, + const Map<StringName, Variant> &p_options, int p_bake_fps) override; + virtual void get_import_options(const String &p_path, + List<ResourceImporter::ImportOption> *r_options) override; + virtual Variant get_option_visibility(const String &p_path, const String &p_option, + const Map<StringName, Variant> &p_options) override; +}; + +#endif // TOOLS_ENABLED + +#endif // EDITOR_SCENE_IMPORTER_BLEND_H diff --git a/modules/gltf/editor/editor_scene_importer_gltf.cpp b/modules/gltf/editor/editor_scene_importer_gltf.cpp index f9193c2a42..1d6a82e58a 100644 --- a/modules/gltf/editor/editor_scene_importer_gltf.cpp +++ b/modules/gltf/editor/editor_scene_importer_gltf.cpp @@ -35,8 +35,7 @@ #include "../gltf_document.h" #include "../gltf_state.h" -#include "scene/3d/node_3d.h" -#include "scene/animation/animation_player.h" +#include "scene/main/node.h" #include "scene/resources/animation.h" uint32_t EditorSceneFormatImporterGLTF::get_import_flags() const { @@ -48,26 +47,25 @@ void EditorSceneFormatImporterGLTF::get_extensions(List<String> *r_extensions) c r_extensions->push_back("glb"); } -Node *EditorSceneFormatImporterGLTF::import_scene(const String &p_path, - uint32_t p_flags, const Map<StringName, Variant> &p_options, int p_bake_fps, - List<String> *r_missing_deps, - Error *r_err) { +Node *EditorSceneFormatImporterGLTF::import_scene(const String &p_path, uint32_t p_flags, + const Map<StringName, Variant> &p_options, int p_bake_fps, + List<String> *r_missing_deps, Error *r_err) { Ref<GLTFDocument> doc; doc.instantiate(); Ref<GLTFState> state; state.instantiate(); Error err = doc->append_from_file(p_path, state, p_flags, p_bake_fps); if (err != OK) { - *r_err = err; + if (r_err) { + *r_err = err; + } return nullptr; } - Node *root = doc->generate_scene(state, p_bake_fps); - return root; + return doc->generate_scene(state, p_bake_fps); } Ref<Animation> EditorSceneFormatImporterGLTF::import_animation(const String &p_path, - uint32_t p_flags, const Map<StringName, Variant> &p_options, - int p_bake_fps) { + uint32_t p_flags, const Map<StringName, Variant> &p_options, int p_bake_fps) { return Ref<Animation>(); } diff --git a/modules/gltf/editor/editor_scene_importer_gltf.h b/modules/gltf/editor/editor_scene_importer_gltf.h index 206fe63426..1f62ca9537 100644 --- a/modules/gltf/editor/editor_scene_importer_gltf.h +++ b/modules/gltf/editor/editor_scene_importer_gltf.h @@ -33,14 +33,10 @@ #ifdef TOOLS_ENABLED -#include "../gltf_document_extension.h" -#include "../gltf_state.h" - #include "editor/import/resource_importer_scene.h" -#include "scene/main/node.h" -#include "scene/resources/packed_scene.h" class Animation; +class Node; class EditorSceneFormatImporterGLTF : public EditorSceneFormatImporter { GDCLASS(EditorSceneFormatImporterGLTF, EditorSceneFormatImporter); @@ -48,9 +44,11 @@ class EditorSceneFormatImporterGLTF : public EditorSceneFormatImporter { public: virtual uint32_t get_import_flags() const override; virtual void get_extensions(List<String> *r_extensions) const override; - virtual Node *import_scene(const String &p_path, uint32_t p_flags, const Map<StringName, Variant> &p_options, int p_bake_fps, List<String> *r_missing_deps, Error *r_err = nullptr) override; - virtual Ref<Animation> import_animation(const String &p_path, - uint32_t p_flags, const Map<StringName, Variant> &p_options, int p_bake_fps) override; + virtual Node *import_scene(const String &p_path, uint32_t p_flags, + const Map<StringName, Variant> &p_options, int p_bake_fps, + List<String> *r_missing_deps, Error *r_err = nullptr) override; + virtual Ref<Animation> import_animation(const String &p_path, uint32_t p_flags, + const Map<StringName, Variant> &p_options, int p_bake_fps) override; }; #endif // TOOLS_ENABLED diff --git a/modules/gltf/gltf_document.cpp b/modules/gltf/gltf_document.cpp index 56f31fd812..ee756c6d2e 100644 --- a/modules/gltf/gltf_document.cpp +++ b/modules/gltf/gltf_document.cpp @@ -6758,8 +6758,8 @@ Error GLTFDocument::_serialize_file(Ref<GLTFState> state, const String p_path) { } void GLTFDocument::_bind_methods() { - ClassDB::bind_method(D_METHOD("append_from_file", "path", "state", "flags", "bake_fps"), - &GLTFDocument::append_from_file, DEFVAL(0), DEFVAL(30)); + ClassDB::bind_method(D_METHOD("append_from_file", "path", "state", "flags", "bake_fps", "base_path"), + &GLTFDocument::append_from_file, DEFVAL(0), DEFVAL(30), DEFVAL(String())); ClassDB::bind_method(D_METHOD("append_from_buffer", "bytes", "base_path", "state", "flags", "bake_fps"), &GLTFDocument::append_from_buffer, DEFVAL(0), DEFVAL(30)); ClassDB::bind_method(D_METHOD("append_from_scene", "node", "state", "flags", "bake_fps"), @@ -7024,20 +7024,22 @@ Error GLTFDocument::_parse_gltf_state(Ref<GLTFState> state, const String &p_sear return OK; } -Error GLTFDocument::append_from_file(String p_path, Ref<GLTFState> r_state, uint32_t p_flags, int32_t p_bake_fps) { +Error GLTFDocument::append_from_file(String p_path, Ref<GLTFState> r_state, uint32_t p_flags, int32_t p_bake_fps, String p_base_path) { // TODO Add missing texture and missing .bin file paths to r_missing_deps 2021-09-10 fire if (r_state == Ref<GLTFState>()) { r_state.instantiate(); } r_state->filename = p_path.get_file().get_basename(); - r_state->use_named_skin_binds = - p_flags & GLTF_IMPORT_USE_NAMED_SKIN_BINDS; + r_state->use_named_skin_binds = p_flags & GLTF_IMPORT_USE_NAMED_SKIN_BINDS; Error err; FileAccess *f = FileAccess::open(p_path, FileAccess::READ, &err); ERR_FAIL_COND_V(err != OK, ERR_FILE_CANT_OPEN); ERR_FAIL_NULL_V(f, ERR_FILE_CANT_OPEN); - - err = _parse(r_state, p_path.get_base_dir(), f, p_bake_fps); + String base_path = p_base_path; + if (base_path.is_empty()) { + base_path = p_path.get_base_dir(); + } + err = _parse(r_state, base_path, f, p_bake_fps); ERR_FAIL_COND_V(err != OK, ERR_PARSE_ERROR); return err; } diff --git a/modules/gltf/gltf_document.h b/modules/gltf/gltf_document.h index c0649e0129..0b7664a616 100644 --- a/modules/gltf/gltf_document.h +++ b/modules/gltf/gltf_document.h @@ -381,7 +381,7 @@ private: static float get_max_component(const Color &p_color); public: - Error append_from_file(String p_path, Ref<GLTFState> r_state, uint32_t p_flags = 0, int32_t p_bake_fps = 30); + Error append_from_file(String p_path, Ref<GLTFState> r_state, uint32_t p_flags = 0, int32_t p_bake_fps = 30, String p_base_path = String()); Error append_from_buffer(PackedByteArray p_bytes, String p_base_path, Ref<GLTFState> r_state, uint32_t p_flags = 0, int32_t p_bake_fps = 30); Error append_from_scene(Node *p_node, Ref<GLTFState> r_state, uint32_t p_flags = 0, int32_t p_bake_fps = 30); diff --git a/modules/gltf/register_types.cpp b/modules/gltf/register_types.cpp index ef30628dbb..1669d40269 100644 --- a/modules/gltf/register_types.cpp +++ b/modules/gltf/register_types.cpp @@ -49,41 +49,72 @@ #include "gltf_texture.h" #ifdef TOOLS_ENABLED +#include "core/config/project_settings.h" #include "editor/editor_node.h" #include "editor/editor_scene_exporter_gltf_plugin.h" +#include "editor/editor_scene_importer_blend.h" #include "editor/editor_scene_importer_gltf.h" +#include "editor/editor_settings.h" static void _editor_init() { Ref<EditorSceneFormatImporterGLTF> import_gltf; import_gltf.instantiate(); ResourceImporterScene::get_singleton()->add_importer(import_gltf); + + // Blend to glTF importer. + bool blend_enabled = GLOBAL_GET("filesystem/import/blend/enabled"); + // Defined here because EditorSettings doesn't exist in `register_gltf_types` yet. + String blender_path = EDITOR_DEF_RST("filesystem/import/blend/blender_path", ""); + EditorSettings::get_singleton()->add_property_hint(PropertyInfo(Variant::STRING, + "filesystem/import/blend/blender_path", PROPERTY_HINT_GLOBAL_FILE)); + if (blend_enabled) { + DirAccessRef da = DirAccess::create(DirAccess::ACCESS_FILESYSTEM); + if (blender_path.is_empty()) { + WARN_PRINT("Blend file import is enabled, but no Blender path is configured. Blend files will not be imported."); + } else if (!da->file_exists(blender_path)) { + WARN_PRINT("Blend file import is enabled, but the Blender path doesn't point to a valid Blender executable. Blend files will not be imported."); + } else { + Ref<EditorSceneFormatImporterBlend> importer; + importer.instantiate(); + ResourceImporterScene::get_singleton()->add_importer(importer); + } + } } -#endif +#endif // TOOLS_ENABLED void register_gltf_types() { + // glTF API available at runtime. + GDREGISTER_CLASS(GLTFAccessor); + GDREGISTER_CLASS(GLTFAnimation); + GDREGISTER_CLASS(GLTFBufferView); + GDREGISTER_CLASS(GLTFCamera); + GDREGISTER_CLASS(GLTFDocument); + GDREGISTER_CLASS(GLTFDocumentExtension); + GDREGISTER_CLASS(GLTFDocumentExtensionConvertImporterMesh); + GDREGISTER_CLASS(GLTFLight); + GDREGISTER_CLASS(GLTFNode); + GDREGISTER_CLASS(GLTFSkeleton); + GDREGISTER_CLASS(GLTFSkin); + GDREGISTER_CLASS(GLTFSpecGloss); + GDREGISTER_CLASS(GLTFState); + GDREGISTER_CLASS(GLTFTexture); + #ifdef TOOLS_ENABLED + // Editor-specific API. ClassDB::APIType prev_api = ClassDB::get_current_api(); ClassDB::set_current_api(ClassDB::API_EDITOR); - GDREGISTER_CLASS(EditorSceneFormatImporterGLTF); + GDREGISTER_CLASS(GLTFMesh); + GDREGISTER_CLASS(EditorSceneFormatImporterGLTF); EditorPlugins::add_by_type<SceneExporterGLTFPlugin>(); + + // Blend to glTF importer. + GLOBAL_DEF_RST("filesystem/import/blend/enabled", false); // Defined here to catch in docs. + GDREGISTER_CLASS(EditorSceneFormatImporterBlend); + ClassDB::set_current_api(prev_api); EditorNode::add_init_callback(_editor_init); -#endif - GDREGISTER_CLASS(GLTFSpecGloss); - GDREGISTER_CLASS(GLTFNode); - GDREGISTER_CLASS(GLTFAnimation); - GDREGISTER_CLASS(GLTFBufferView); - GDREGISTER_CLASS(GLTFAccessor); - GDREGISTER_CLASS(GLTFTexture); - GDREGISTER_CLASS(GLTFSkeleton); - GDREGISTER_CLASS(GLTFSkin); - GDREGISTER_CLASS(GLTFCamera); - GDREGISTER_CLASS(GLTFLight); - GDREGISTER_CLASS(GLTFState); - GDREGISTER_CLASS(GLTFDocumentExtensionConvertImporterMesh); - GDREGISTER_CLASS(GLTFDocumentExtension); - GDREGISTER_CLASS(GLTFDocument); +#endif // TOOLS_ENABLED } void unregister_gltf_types() { diff --git a/modules/openxr/openxr_api.cpp b/modules/openxr/openxr_api.cpp index bf668bac27..f7ff6cc89d 100644 --- a/modules/openxr/openxr_api.cpp +++ b/modules/openxr/openxr_api.cpp @@ -52,32 +52,6 @@ OpenXRAPI *OpenXRAPI::singleton = nullptr; -void OpenXRAPI::setup_global_defs() { - // As OpenXRAPI is not constructed if OpenXR is not enabled, we register our project and editor settings here - - // Project settings - GLOBAL_DEF_BASIC("xr/openxr/enabled", false); - GLOBAL_DEF_BASIC("xr/openxr/default_action_map", "res://default_action_map.tres"); - ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/default_action_map", PropertyInfo(Variant::STRING, "xr/openxr/default_action_map", PROPERTY_HINT_FILE, "*.tres")); - - GLOBAL_DEF_BASIC("xr/openxr/form_factor", "0"); - ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/form_factor", PropertyInfo(Variant::INT, "xr/openxr/form_factor", PROPERTY_HINT_ENUM, "Head mounted,Handheld")); - - GLOBAL_DEF_BASIC("xr/openxr/view_configuration", "1"); - ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/view_configuration", PropertyInfo(Variant::INT, "xr/openxr/view_configuration", PROPERTY_HINT_ENUM, "Mono,Stereo")); // "Mono,Stereo,Quad,Observer" - - GLOBAL_DEF_BASIC("xr/openxr/reference_space", "1"); - ProjectSettings::get_singleton()->set_custom_property_info("xr/openxr/reference_space", PropertyInfo(Variant::INT, "xr/openxr/reference_space", PROPERTY_HINT_ENUM, "Local,Stage")); - -#ifdef TOOLS_ENABLED - // Disabled for now, using XR inside of the editor we'll be working on during the coming months. - - // editor settings (it seems we're too early in the process when setting up rendering, to access editor settings...) - // EDITOR_DEF_RST("xr/openxr/in_editor", false); - // GLOBAL_DEF("xr/openxr/in_editor", false); -#endif -} - bool OpenXRAPI::openxr_is_enabled() { // @TODO we need an overrule switch so we can force enable openxr, i.e run "godot --openxr_enabled" diff --git a/modules/openxr/openxr_api.h b/modules/openxr/openxr_api.h index 5d1cea1217..1a1508e993 100644 --- a/modules/openxr/openxr_api.h +++ b/modules/openxr/openxr_api.h @@ -222,7 +222,6 @@ protected: void parse_velocities(const XrSpaceVelocity &p_velocity, Vector3 &r_linear_velocity, Vector3 r_angular_velocity); public: - static void setup_global_defs(); static bool openxr_is_enabled(); static OpenXRAPI *get_singleton(); diff --git a/modules/openxr/register_types.cpp b/modules/openxr/register_types.cpp index bb550980cf..47ee1316e7 100644 --- a/modules/openxr/register_types.cpp +++ b/modules/openxr/register_types.cpp @@ -44,7 +44,6 @@ Ref<OpenXRInterface> openxr_interface; void preregister_openxr_types() { // For now we create our openxr device here. If we merge it with openxr_interface we'll create that here soon. - OpenXRAPI::setup_global_defs(); if (OpenXRAPI::openxr_is_enabled()) { openxr_api = memnew(OpenXRAPI); ERR_FAIL_NULL(openxr_api); diff --git a/modules/svg/register_types.cpp b/modules/svg/register_types.cpp index a4341c6f1e..b59c815056 100644 --- a/modules/svg/register_types.cpp +++ b/modules/svg/register_types.cpp @@ -38,7 +38,7 @@ static ImageLoaderSVG *image_loader_svg = nullptr; void register_svg_types() { tvg::CanvasEngine tvgEngine = tvg::CanvasEngine::Sw; - if (tvg::Initializer::init(tvgEngine, 0) != tvg::Result::Success) { + if (tvg::Initializer::init(tvgEngine, 1) != tvg::Result::Success) { return; } image_loader_svg = memnew(ImageLoaderSVG); diff --git a/modules/text_server_adv/SCsub b/modules/text_server_adv/SCsub index dfd3bb3f1e..525d4d3efb 100644 --- a/modules/text_server_adv/SCsub +++ b/modules/text_server_adv/SCsub @@ -122,7 +122,7 @@ if env["builtin_harfbuzz"]: env_harfbuzz.Append(CCFLAGS=["-DHAVE_ICU"]) if env["builtin_icu"]: env_harfbuzz.Append(CPPPATH=["#thirdparty/icu4c/common/"]) - env_harfbuzz.Append(CCFLAGS=["-DHAVE_ICU_BUILTIN"]) + env_harfbuzz.Append(CCFLAGS=["-DU_HAVE_LIB_SUFFIX=1", "-DU_LIB_SUFFIX_C_NAME=_godot", "-DHAVE_ICU_BUILTIN"]) if freetype_enabled: env_harfbuzz.Append( @@ -464,11 +464,16 @@ if env["builtin_icu"]: "-DUCONFIG_NO_FILE_IO", "-DUCONFIG_NO_TRANSLITERATION", "-DPKGDATA_MODE=static", + "-DU_ENABLE_DYLOAD=0", + "-DU_HAVE_LIB_SUFFIX=1", + "-DU_LIB_SUFFIX_C_NAME=_godot", "-DICU_DATA_NAME=" + icu_data_name, ] ) env_text_server_adv.Append( CXXFLAGS=[ + "-DU_HAVE_LIB_SUFFIX=1", + "-DU_LIB_SUFFIX_C_NAME=_godot", "-DICU_DATA_NAME=" + icu_data_name, ] ) diff --git a/platform/android/java/lib/src/org/godotengine/godot/GodotGLRenderView.java b/platform/android/java/lib/src/org/godotengine/godot/GodotGLRenderView.java index 61093d54de..08da1b1832 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/GodotGLRenderView.java +++ b/platform/android/java/lib/src/org/godotengine/godot/GodotGLRenderView.java @@ -29,6 +29,8 @@ /*************************************************************************/ package org.godotengine.godot; +import org.godotengine.godot.gl.GLSurfaceView; +import org.godotengine.godot.gl.GodotRenderer; import org.godotengine.godot.input.GodotGestureHandler; import org.godotengine.godot.input.GodotInputHandler; import org.godotengine.godot.utils.GLUtils; @@ -43,7 +45,6 @@ import org.godotengine.godot.xr.regular.RegularFallbackConfigChooser; import android.annotation.SuppressLint; import android.content.Context; import android.graphics.PixelFormat; -import android.opengl.GLSurfaceView; import android.os.Build; import android.view.GestureDetector; import android.view.KeyEvent; diff --git a/platform/android/java/lib/src/org/godotengine/godot/GodotLib.java b/platform/android/java/lib/src/org/godotengine/godot/GodotLib.java index 29e4b4b29e..253a51b83c 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/GodotLib.java +++ b/platform/android/java/lib/src/org/godotengine/godot/GodotLib.java @@ -30,6 +30,8 @@ package org.godotengine.godot; +import org.godotengine.godot.gl.GodotRenderer; + import android.app.Activity; import android.hardware.SensorEvent; import android.view.Surface; @@ -68,7 +70,7 @@ public class GodotLib { * @param p_surface * @param p_width * @param p_height - * @see android.opengl.GLSurfaceView.Renderer#onSurfaceChanged(GL10, int, int) + * @see org.godotengine.godot.gl.GLSurfaceView.Renderer#onSurfaceChanged(GL10, int, int) */ public static native void resize(Surface p_surface, int p_width, int p_height); @@ -85,9 +87,9 @@ public class GodotLib { /** * Invoked on the GL thread to draw the current frame. - * @see android.opengl.GLSurfaceView.Renderer#onDrawFrame(GL10) + * @see org.godotengine.godot.gl.GLSurfaceView.Renderer#onDrawFrame(GL10) */ - public static native void step(); + public static native boolean step(); /** * Forward touch events from the main thread to the GL thread. diff --git a/platform/android/java/lib/src/org/godotengine/godot/gl/EGLLogWrapper.java b/platform/android/java/lib/src/org/godotengine/godot/gl/EGLLogWrapper.java new file mode 100644 index 0000000000..af16cfce74 --- /dev/null +++ b/platform/android/java/lib/src/org/godotengine/godot/gl/EGLLogWrapper.java @@ -0,0 +1,566 @@ +// clang-format off + +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.godotengine.godot.gl; + +import android.opengl.GLDebugHelper; +import android.opengl.GLException; + +import java.io.IOException; +import java.io.Writer; + +import javax.microedition.khronos.egl.EGL; +import javax.microedition.khronos.egl.EGL10; +import javax.microedition.khronos.egl.EGL11; +import javax.microedition.khronos.egl.EGLConfig; +import javax.microedition.khronos.egl.EGLContext; +import javax.microedition.khronos.egl.EGLDisplay; +import javax.microedition.khronos.egl.EGLSurface; + +class EGLLogWrapper implements EGL11 { + private EGL10 mEgl10; + Writer mLog; + boolean mLogArgumentNames; + boolean mCheckError; + private int mArgCount; + + + public EGLLogWrapper(EGL egl, int configFlags, Writer log) { + mEgl10 = (EGL10) egl; + mLog = log; + mLogArgumentNames = + (GLDebugHelper.CONFIG_LOG_ARGUMENT_NAMES & configFlags) != 0; + mCheckError = + (GLDebugHelper.CONFIG_CHECK_GL_ERROR & configFlags) != 0; + } + + public boolean eglChooseConfig(EGLDisplay display, int[] attrib_list, + EGLConfig[] configs, int config_size, int[] num_config) { + begin("eglChooseConfig"); + arg("display", display); + arg("attrib_list", attrib_list); + arg("config_size", config_size); + end(); + + boolean result = mEgl10.eglChooseConfig(display, attrib_list, configs, + config_size, num_config); + arg("configs", configs); + arg("num_config", num_config); + returns(result); + checkError(); + return result; + } + + public boolean eglCopyBuffers(EGLDisplay display, EGLSurface surface, + Object native_pixmap) { + begin("eglCopyBuffers"); + arg("display", display); + arg("surface", surface); + arg("native_pixmap", native_pixmap); + end(); + + boolean result = mEgl10.eglCopyBuffers(display, surface, native_pixmap); + returns(result); + checkError(); + return result; + } + + public EGLContext eglCreateContext(EGLDisplay display, EGLConfig config, + EGLContext share_context, int[] attrib_list) { + begin("eglCreateContext"); + arg("display", display); + arg("config", config); + arg("share_context", share_context); + arg("attrib_list", attrib_list); + end(); + + EGLContext result = mEgl10.eglCreateContext(display, config, + share_context, attrib_list); + returns(result); + checkError(); + return result; + } + + public EGLSurface eglCreatePbufferSurface(EGLDisplay display, + EGLConfig config, int[] attrib_list) { + begin("eglCreatePbufferSurface"); + arg("display", display); + arg("config", config); + arg("attrib_list", attrib_list); + end(); + + EGLSurface result = mEgl10.eglCreatePbufferSurface(display, config, + attrib_list); + returns(result); + checkError(); + return result; + } + + public EGLSurface eglCreatePixmapSurface(EGLDisplay display, + EGLConfig config, Object native_pixmap, int[] attrib_list) { + begin("eglCreatePixmapSurface"); + arg("display", display); + arg("config", config); + arg("native_pixmap", native_pixmap); + arg("attrib_list", attrib_list); + end(); + + EGLSurface result = mEgl10.eglCreatePixmapSurface(display, config, + native_pixmap, attrib_list); + returns(result); + checkError(); + return result; + } + + public EGLSurface eglCreateWindowSurface(EGLDisplay display, + EGLConfig config, Object native_window, int[] attrib_list) { + begin("eglCreateWindowSurface"); + arg("display", display); + arg("config", config); + arg("native_window", native_window); + arg("attrib_list", attrib_list); + end(); + + EGLSurface result = mEgl10.eglCreateWindowSurface(display, config, + native_window, attrib_list); + returns(result); + checkError(); + return result; + } + + public boolean eglDestroyContext(EGLDisplay display, EGLContext context) { + begin("eglDestroyContext"); + arg("display", display); + arg("context", context); + end(); + + boolean result = mEgl10.eglDestroyContext(display, context); + returns(result); + checkError(); + return result; + } + + public boolean eglDestroySurface(EGLDisplay display, EGLSurface surface) { + begin("eglDestroySurface"); + arg("display", display); + arg("surface", surface); + end(); + + boolean result = mEgl10.eglDestroySurface(display, surface); + returns(result); + checkError(); + return result; + } + + public boolean eglGetConfigAttrib(EGLDisplay display, EGLConfig config, + int attribute, int[] value) { + begin("eglGetConfigAttrib"); + arg("display", display); + arg("config", config); + arg("attribute", attribute); + end(); + boolean result = mEgl10.eglGetConfigAttrib(display, config, attribute, + value); + arg("value", value); + returns(result); + checkError(); + return false; + } + + public boolean eglGetConfigs(EGLDisplay display, EGLConfig[] configs, + int config_size, int[] num_config) { + begin("eglGetConfigs"); + arg("display", display); + arg("config_size", config_size); + end(); + + boolean result = mEgl10.eglGetConfigs(display, configs, config_size, + num_config); + arg("configs", configs); + arg("num_config", num_config); + returns(result); + checkError(); + return result; + } + + public EGLContext eglGetCurrentContext() { + begin("eglGetCurrentContext"); + end(); + + EGLContext result = mEgl10.eglGetCurrentContext(); + returns(result); + + checkError(); + return result; + } + + public EGLDisplay eglGetCurrentDisplay() { + begin("eglGetCurrentDisplay"); + end(); + + EGLDisplay result = mEgl10.eglGetCurrentDisplay(); + returns(result); + + checkError(); + return result; + } + + public EGLSurface eglGetCurrentSurface(int readdraw) { + begin("eglGetCurrentSurface"); + arg("readdraw", readdraw); + end(); + + EGLSurface result = mEgl10.eglGetCurrentSurface(readdraw); + returns(result); + + checkError(); + return result; + } + + public EGLDisplay eglGetDisplay(Object native_display) { + begin("eglGetDisplay"); + arg("native_display", native_display); + end(); + + EGLDisplay result = mEgl10.eglGetDisplay(native_display); + returns(result); + + checkError(); + return result; + } + + public int eglGetError() { + begin("eglGetError"); + end(); + + int result = mEgl10.eglGetError(); + returns(getErrorString(result)); + + return result; + } + + public boolean eglInitialize(EGLDisplay display, int[] major_minor) { + begin("eglInitialize"); + arg("display", display); + end(); + boolean result = mEgl10.eglInitialize(display, major_minor); + returns(result); + arg("major_minor", major_minor); + checkError(); + return result; + } + + public boolean eglMakeCurrent(EGLDisplay display, EGLSurface draw, + EGLSurface read, EGLContext context) { + begin("eglMakeCurrent"); + arg("display", display); + arg("draw", draw); + arg("read", read); + arg("context", context); + end(); + boolean result = mEgl10.eglMakeCurrent(display, draw, read, context); + returns(result); + checkError(); + return result; + } + + public boolean eglQueryContext(EGLDisplay display, EGLContext context, + int attribute, int[] value) { + begin("eglQueryContext"); + arg("display", display); + arg("context", context); + arg("attribute", attribute); + end(); + boolean result = mEgl10.eglQueryContext(display, context, attribute, + value); + returns(value[0]); + returns(result); + checkError(); + return result; + } + + public String eglQueryString(EGLDisplay display, int name) { + begin("eglQueryString"); + arg("display", display); + arg("name", name); + end(); + String result = mEgl10.eglQueryString(display, name); + returns(result); + checkError(); + return result; + } + + public boolean eglQuerySurface(EGLDisplay display, EGLSurface surface, + int attribute, int[] value) { + begin("eglQuerySurface"); + arg("display", display); + arg("surface", surface); + arg("attribute", attribute); + end(); + boolean result = mEgl10.eglQuerySurface(display, surface, attribute, + value); + returns(value[0]); + returns(result); + checkError(); + return result; + } + + public boolean eglSwapBuffers(EGLDisplay display, EGLSurface surface) { + begin("eglSwapBuffers"); + arg("display", display); + arg("surface", surface); + end(); + boolean result = mEgl10.eglSwapBuffers(display, surface); + returns(result); + checkError(); + return result; + } + + public boolean eglTerminate(EGLDisplay display) { + begin("eglTerminate"); + arg("display", display); + end(); + boolean result = mEgl10.eglTerminate(display); + returns(result); + checkError(); + return result; + } + + public boolean eglWaitGL() { + begin("eglWaitGL"); + end(); + boolean result = mEgl10.eglWaitGL(); + returns(result); + checkError(); + return result; + } + + public boolean eglWaitNative(int engine, Object bindTarget) { + begin("eglWaitNative"); + arg("engine", engine); + arg("bindTarget", bindTarget); + end(); + boolean result = mEgl10.eglWaitNative(engine, bindTarget); + returns(result); + checkError(); + return result; + } + + private void checkError() { + int eglError; + if ((eglError = mEgl10.eglGetError()) != EGL_SUCCESS) { + String errorMessage = "eglError: " + getErrorString(eglError); + logLine(errorMessage); + if (mCheckError) { + throw new GLException(eglError, errorMessage); + } + } + } + + private void logLine(String message) { + log(message + '\n'); + } + + private void log(String message) { + try { + mLog.write(message); + } catch (IOException e) { + // Ignore exception, keep on trying + } + } + + private void begin(String name) { + log(name + '('); + mArgCount = 0; + } + + private void arg(String name, String value) { + if (mArgCount++ > 0) { + log(", "); + } + if (mLogArgumentNames) { + log(name + "="); + } + log(value); + } + + private void end() { + log(");\n"); + flush(); + } + + private void flush() { + try { + mLog.flush(); + } catch (IOException e) { + mLog = null; + } + } + + private void arg(String name, int value) { + arg(name, Integer.toString(value)); + } + + private void arg(String name, Object object) { + arg(name, toString(object)); + } + + private void arg(String name, EGLDisplay object) { + if (object == EGL10.EGL_DEFAULT_DISPLAY) { + arg(name, "EGL10.EGL_DEFAULT_DISPLAY"); + } else if (object == EGL_NO_DISPLAY) { + arg(name, "EGL10.EGL_NO_DISPLAY"); + } else { + arg(name, toString(object)); + } + } + + private void arg(String name, EGLContext object) { + if (object == EGL10.EGL_NO_CONTEXT) { + arg(name, "EGL10.EGL_NO_CONTEXT"); + } else { + arg(name, toString(object)); + } + } + + private void arg(String name, EGLSurface object) { + if (object == EGL10.EGL_NO_SURFACE) { + arg(name, "EGL10.EGL_NO_SURFACE"); + } else { + arg(name, toString(object)); + } + } + + private void returns(String result) { + log(" returns " + result + ";\n"); + flush(); + } + + private void returns(int result) { + returns(Integer.toString(result)); + } + + private void returns(boolean result) { + returns(Boolean.toString(result)); + } + + private void returns(Object result) { + returns(toString(result)); + } + + private String toString(Object obj) { + if (obj == null) { + return "null"; + } else { + return obj.toString(); + } + } + + private void arg(String name, int[] arr) { + if (arr == null) { + arg(name, "null"); + } else { + arg(name, toString(arr.length, arr, 0)); + } + } + + private void arg(String name, Object[] arr) { + if (arr == null) { + arg(name, "null"); + } else { + arg(name, toString(arr.length, arr, 0)); + } + } + + private String toString(int n, int[] arr, int offset) { + StringBuilder buf = new StringBuilder(); + buf.append("{\n"); + int arrLen = arr.length; + for (int i = 0; i < n; i++) { + int index = offset + i; + buf.append(" [" + index + "] = "); + if (index < 0 || index >= arrLen) { + buf.append("out of bounds"); + } else { + buf.append(arr[index]); + } + buf.append('\n'); + } + buf.append("}"); + return buf.toString(); + } + + private String toString(int n, Object[] arr, int offset) { + StringBuilder buf = new StringBuilder(); + buf.append("{\n"); + int arrLen = arr.length; + for (int i = 0; i < n; i++) { + int index = offset + i; + buf.append(" [" + index + "] = "); + if (index < 0 || index >= arrLen) { + buf.append("out of bounds"); + } else { + buf.append(arr[index]); + } + buf.append('\n'); + } + buf.append("}"); + return buf.toString(); + } + + private static String getHex(int value) { + return "0x" + Integer.toHexString(value); + } + + public static String getErrorString(int error) { + switch (error) { + case EGL_SUCCESS: + return "EGL_SUCCESS"; + case EGL_NOT_INITIALIZED: + return "EGL_NOT_INITIALIZED"; + case EGL_BAD_ACCESS: + return "EGL_BAD_ACCESS"; + case EGL_BAD_ALLOC: + return "EGL_BAD_ALLOC"; + case EGL_BAD_ATTRIBUTE: + return "EGL_BAD_ATTRIBUTE"; + case EGL_BAD_CONFIG: + return "EGL_BAD_CONFIG"; + case EGL_BAD_CONTEXT: + return "EGL_BAD_CONTEXT"; + case EGL_BAD_CURRENT_SURFACE: + return "EGL_BAD_CURRENT_SURFACE"; + case EGL_BAD_DISPLAY: + return "EGL_BAD_DISPLAY"; + case EGL_BAD_MATCH: + return "EGL_BAD_MATCH"; + case EGL_BAD_NATIVE_PIXMAP: + return "EGL_BAD_NATIVE_PIXMAP"; + case EGL_BAD_NATIVE_WINDOW: + return "EGL_BAD_NATIVE_WINDOW"; + case EGL_BAD_PARAMETER: + return "EGL_BAD_PARAMETER"; + case EGL_BAD_SURFACE: + return "EGL_BAD_SURFACE"; + case EGL11.EGL_CONTEXT_LOST: + return "EGL_CONTEXT_LOST"; + default: + return getHex(error); + } + } +} diff --git a/platform/android/java/lib/src/org/godotengine/godot/gl/GLSurfaceView.java b/platform/android/java/lib/src/org/godotengine/godot/gl/GLSurfaceView.java new file mode 100644 index 0000000000..8449c08b88 --- /dev/null +++ b/platform/android/java/lib/src/org/godotengine/godot/gl/GLSurfaceView.java @@ -0,0 +1,1939 @@ +// clang-format off + +/* + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.godotengine.godot.gl; + +import android.content.Context; +import android.opengl.EGL14; +import android.opengl.EGLExt; +import android.opengl.GLDebugHelper; +import android.util.AttributeSet; +import android.util.Log; +import android.view.SurfaceHolder; +import android.view.SurfaceView; + +import java.io.Writer; +import java.lang.ref.WeakReference; +import java.util.ArrayList; + +import javax.microedition.khronos.egl.EGL10; +import javax.microedition.khronos.egl.EGL11; +import javax.microedition.khronos.egl.EGLConfig; +import javax.microedition.khronos.egl.EGLContext; +import javax.microedition.khronos.egl.EGLDisplay; +import javax.microedition.khronos.egl.EGLSurface; +import javax.microedition.khronos.opengles.GL; +import javax.microedition.khronos.opengles.GL10; + +/** + * An implementation of SurfaceView that uses the dedicated surface for + * displaying OpenGL rendering. + * <p> + * A GLSurfaceView provides the following features: + * <p> + * <ul> + * <li>Manages a surface, which is a special piece of memory that can be + * composited into the Android view system. + * <li>Manages an EGL display, which enables OpenGL to render into a surface. + * <li>Accepts a user-provided Renderer object that does the actual rendering. + * <li>Renders on a dedicated thread to decouple rendering performance from the + * UI thread. + * <li>Supports both on-demand and continuous rendering. + * <li>Optionally wraps, traces, and/or error-checks the renderer's OpenGL calls. + * </ul> + * + * <div class="special reference"> + * <h3>Developer Guides</h3> + * <p>For more information about how to use OpenGL, read the + * <a href="{@docRoot}guide/topics/graphics/opengl.html">OpenGL</a> developer guide.</p> + * </div> + * + * <h3>Using GLSurfaceView</h3> + * <p> + * Typically you use GLSurfaceView by subclassing it and overriding one or more of the + * View system input event methods. If your application does not need to override event + * methods then GLSurfaceView can be used as-is. For the most part + * GLSurfaceView behavior is customized by calling "set" methods rather than by subclassing. + * For example, unlike a regular View, drawing is delegated to a separate Renderer object which + * is registered with the GLSurfaceView + * using the {@link #setRenderer(Renderer)} call. + * <p> + * <h3>Initializing GLSurfaceView</h3> + * All you have to do to initialize a GLSurfaceView is call {@link #setRenderer(Renderer)}. + * However, if desired, you can modify the default behavior of GLSurfaceView by calling one or + * more of these methods before calling setRenderer: + * <ul> + * <li>{@link #setDebugFlags(int)} + * <li>{@link #setEGLConfigChooser(boolean)} + * <li>{@link #setEGLConfigChooser(EGLConfigChooser)} + * <li>{@link #setEGLConfigChooser(int, int, int, int, int, int)} + * <li>{@link #setGLWrapper(GLWrapper)} + * </ul> + * <p> + * <h4>Specifying the android.view.Surface</h4> + * By default GLSurfaceView will create a PixelFormat.RGB_888 format surface. If a translucent + * surface is required, call getHolder().setFormat(PixelFormat.TRANSLUCENT). + * The exact format of a TRANSLUCENT surface is device dependent, but it will be + * a 32-bit-per-pixel surface with 8 bits per component. + * <p> + * <h4>Choosing an EGL Configuration</h4> + * A given Android device may support multiple EGLConfig rendering configurations. + * The available configurations may differ in how many channels of data are present, as + * well as how many bits are allocated to each channel. Therefore, the first thing + * GLSurfaceView has to do when starting to render is choose what EGLConfig to use. + * <p> + * By default GLSurfaceView chooses a EGLConfig that has an RGB_888 pixel format, + * with at least a 16-bit depth buffer and no stencil. + * <p> + * If you would prefer a different EGLConfig + * you can override the default behavior by calling one of the + * setEGLConfigChooser methods. + * <p> + * <h4>Debug Behavior</h4> + * You can optionally modify the behavior of GLSurfaceView by calling + * one or more of the debugging methods {@link #setDebugFlags(int)}, + * and {@link #setGLWrapper}. These methods may be called before and/or after setRenderer, but + * typically they are called before setRenderer so that they take effect immediately. + * <p> + * <h4>Setting a Renderer</h4> + * Finally, you must call {@link #setRenderer} to register a {@link Renderer}. + * The renderer is + * responsible for doing the actual OpenGL rendering. + * <p> + * <h3>Rendering Mode</h3> + * Once the renderer is set, you can control whether the renderer draws + * continuously or on-demand by calling + * {@link #setRenderMode}. The default is continuous rendering. + * <p> + * <h3>Activity Life-cycle</h3> + * A GLSurfaceView must be notified when to pause and resume rendering. GLSurfaceView clients + * are required to call {@link #onPause()} when the activity stops and + * {@link #onResume()} when the activity starts. These calls allow GLSurfaceView to + * pause and resume the rendering thread, and also allow GLSurfaceView to release and recreate + * the OpenGL display. + * <p> + * <h3>Handling events</h3> + * <p> + * To handle an event you will typically subclass GLSurfaceView and override the + * appropriate method, just as you would with any other View. However, when handling + * the event, you may need to communicate with the Renderer object + * that's running in the rendering thread. You can do this using any + * standard Java cross-thread communication mechanism. In addition, + * one relatively easy way to communicate with your renderer is + * to call + * {@link #queueEvent(Runnable)}. For example: + * <pre class="prettyprint"> + * class MyGLSurfaceView extends GLSurfaceView { + * + * private MyRenderer mMyRenderer; + * + * public void start() { + * mMyRenderer = ...; + * setRenderer(mMyRenderer); + * } + * + * public boolean onKeyDown(int keyCode, KeyEvent event) { + * if (keyCode == KeyEvent.KEYCODE_DPAD_CENTER) { + * queueEvent(new Runnable() { + * // This method will be called on the rendering + * // thread: + * public void run() { + * mMyRenderer.handleDpadCenter(); + * }}); + * return true; + * } + * return super.onKeyDown(keyCode, event); + * } + * } + * </pre> + * + */ +public class GLSurfaceView extends SurfaceView implements SurfaceHolder.Callback2 { + private final static String TAG = "GLSurfaceView"; + private final static boolean LOG_ATTACH_DETACH = false; + private final static boolean LOG_THREADS = false; + private final static boolean LOG_PAUSE_RESUME = false; + private final static boolean LOG_SURFACE = false; + private final static boolean LOG_RENDERER = false; + private final static boolean LOG_RENDERER_DRAW_FRAME = false; + private final static boolean LOG_EGL = false; + /** + * The renderer only renders + * when the surface is created, or when {@link #requestRender} is called. + * + * @see #getRenderMode() + * @see #setRenderMode(int) + * @see #requestRender() + */ + public final static int RENDERMODE_WHEN_DIRTY = 0; + /** + * The renderer is called + * continuously to re-render the scene. + * + * @see #getRenderMode() + * @see #setRenderMode(int) + */ + public final static int RENDERMODE_CONTINUOUSLY = 1; + + /** + * Check glError() after every GL call and throw an exception if glError indicates + * that an error has occurred. This can be used to help track down which OpenGL ES call + * is causing an error. + * + * @see #getDebugFlags + * @see #setDebugFlags + */ + public final static int DEBUG_CHECK_GL_ERROR = 1; + + /** + * Log GL calls to the system log at "verbose" level with tag "GLSurfaceView". + * + * @see #getDebugFlags + * @see #setDebugFlags + */ + public final static int DEBUG_LOG_GL_CALLS = 2; + + /** + * Standard View constructor. In order to render something, you + * must call {@link #setRenderer} to register a renderer. + */ + public GLSurfaceView(Context context) { + super(context); + init(); + } + + /** + * Standard View constructor. In order to render something, you + * must call {@link #setRenderer} to register a renderer. + */ + public GLSurfaceView(Context context, AttributeSet attrs) { + super(context, attrs); + init(); + } + + @Override + protected void finalize() throws Throwable { + try { + if (mGLThread != null) { + // GLThread may still be running if this view was never + // attached to a window. + mGLThread.requestExitAndWait(); + } + } finally { + super.finalize(); + } + } + + private void init() { + // Install a SurfaceHolder.Callback so we get notified when the + // underlying surface is created and destroyed + SurfaceHolder holder = getHolder(); + holder.addCallback(this); + // setFormat is done by SurfaceView in SDK 2.3 and newer. Uncomment + // this statement if back-porting to 2.2 or older: + // holder.setFormat(PixelFormat.RGB_565); + // + // setType is not needed for SDK 2.0 or newer. Uncomment this + // statement if back-porting this code to older SDKs. + // holder.setType(SurfaceHolder.SURFACE_TYPE_GPU); + } + + /** + * Set the glWrapper. If the glWrapper is not null, its + * {@link GLWrapper#wrap(GL)} method is called + * whenever a surface is created. A GLWrapper can be used to wrap + * the GL object that's passed to the renderer. Wrapping a GL + * object enables examining and modifying the behavior of the + * GL calls made by the renderer. + * <p> + * Wrapping is typically used for debugging purposes. + * <p> + * The default value is null. + * @param glWrapper the new GLWrapper + */ + public void setGLWrapper(GLWrapper glWrapper) { + mGLWrapper = glWrapper; + } + + /** + * Set the debug flags to a new value. The value is + * constructed by OR-together zero or more + * of the DEBUG_CHECK_* constants. The debug flags take effect + * whenever a surface is created. The default value is zero. + * @param debugFlags the new debug flags + * @see #DEBUG_CHECK_GL_ERROR + * @see #DEBUG_LOG_GL_CALLS + */ + public void setDebugFlags(int debugFlags) { + mDebugFlags = debugFlags; + } + + /** + * Get the current value of the debug flags. + * @return the current value of the debug flags. + */ + public int getDebugFlags() { + return mDebugFlags; + } + + /** + * Control whether the EGL context is preserved when the GLSurfaceView is paused and + * resumed. + * <p> + * If set to true, then the EGL context may be preserved when the GLSurfaceView is paused. + * <p> + * Prior to API level 11, whether the EGL context is actually preserved or not + * depends upon whether the Android device can support an arbitrary number of + * EGL contexts or not. Devices that can only support a limited number of EGL + * contexts must release the EGL context in order to allow multiple applications + * to share the GPU. + * <p> + * If set to false, the EGL context will be released when the GLSurfaceView is paused, + * and recreated when the GLSurfaceView is resumed. + * <p> + * + * The default is false. + * + * @param preserveOnPause preserve the EGL context when paused + */ + public void setPreserveEGLContextOnPause(boolean preserveOnPause) { + mPreserveEGLContextOnPause = preserveOnPause; + } + + /** + * @return true if the EGL context will be preserved when paused + */ + public boolean getPreserveEGLContextOnPause() { + return mPreserveEGLContextOnPause; + } + + /** + * Set the renderer associated with this view. Also starts the thread that + * will call the renderer, which in turn causes the rendering to start. + * <p>This method should be called once and only once in the life-cycle of + * a GLSurfaceView. + * <p>The following GLSurfaceView methods can only be called <em>before</em> + * setRenderer is called: + * <ul> + * <li>{@link #setEGLConfigChooser(boolean)} + * <li>{@link #setEGLConfigChooser(EGLConfigChooser)} + * <li>{@link #setEGLConfigChooser(int, int, int, int, int, int)} + * </ul> + * <p> + * The following GLSurfaceView methods can only be called <em>after</em> + * setRenderer is called: + * <ul> + * <li>{@link #getRenderMode()} + * <li>{@link #onPause()} + * <li>{@link #onResume()} + * <li>{@link #queueEvent(Runnable)} + * <li>{@link #requestRender()} + * <li>{@link #setRenderMode(int)} + * </ul> + * + * @param renderer the renderer to use to perform OpenGL drawing. + */ + public void setRenderer(Renderer renderer) { + checkRenderThreadState(); + if (mEGLConfigChooser == null) { + mEGLConfigChooser = new SimpleEGLConfigChooser(true); + } + if (mEGLContextFactory == null) { + mEGLContextFactory = new DefaultContextFactory(); + } + if (mEGLWindowSurfaceFactory == null) { + mEGLWindowSurfaceFactory = new DefaultWindowSurfaceFactory(); + } + mRenderer = renderer; + mGLThread = new GLThread(mThisWeakRef); + mGLThread.start(); + } + + /** + * Install a custom EGLContextFactory. + * <p>If this method is + * called, it must be called before {@link #setRenderer(Renderer)} + * is called. + * <p> + * If this method is not called, then by default + * a context will be created with no shared context and + * with a null attribute list. + */ + public void setEGLContextFactory(EGLContextFactory factory) { + checkRenderThreadState(); + mEGLContextFactory = factory; + } + + /** + * Install a custom EGLWindowSurfaceFactory. + * <p>If this method is + * called, it must be called before {@link #setRenderer(Renderer)} + * is called. + * <p> + * If this method is not called, then by default + * a window surface will be created with a null attribute list. + */ + public void setEGLWindowSurfaceFactory(EGLWindowSurfaceFactory factory) { + checkRenderThreadState(); + mEGLWindowSurfaceFactory = factory; + } + + /** + * Install a custom EGLConfigChooser. + * <p>If this method is + * called, it must be called before {@link #setRenderer(Renderer)} + * is called. + * <p> + * If no setEGLConfigChooser method is called, then by default the + * view will choose an EGLConfig that is compatible with the current + * android.view.Surface, with a depth buffer depth of + * at least 16 bits. + * @param configChooser + */ + public void setEGLConfigChooser(EGLConfigChooser configChooser) { + checkRenderThreadState(); + mEGLConfigChooser = configChooser; + } + + /** + * Install a config chooser which will choose a config + * as close to 16-bit RGB as possible, with or without an optional depth + * buffer as close to 16-bits as possible. + * <p>If this method is + * called, it must be called before {@link #setRenderer(Renderer)} + * is called. + * <p> + * If no setEGLConfigChooser method is called, then by default the + * view will choose an RGB_888 surface with a depth buffer depth of + * at least 16 bits. + * + * @param needDepth + */ + public void setEGLConfigChooser(boolean needDepth) { + setEGLConfigChooser(new SimpleEGLConfigChooser(needDepth)); + } + + /** + * Install a config chooser which will choose a config + * with at least the specified depthSize and stencilSize, + * and exactly the specified redSize, greenSize, blueSize and alphaSize. + * <p>If this method is + * called, it must be called before {@link #setRenderer(Renderer)} + * is called. + * <p> + * If no setEGLConfigChooser method is called, then by default the + * view will choose an RGB_888 surface with a depth buffer depth of + * at least 16 bits. + * + */ + public void setEGLConfigChooser(int redSize, int greenSize, int blueSize, + int alphaSize, int depthSize, int stencilSize) { + setEGLConfigChooser(new ComponentSizeChooser(redSize, greenSize, + blueSize, alphaSize, depthSize, stencilSize)); + } + + /** + * Inform the default EGLContextFactory and default EGLConfigChooser + * which EGLContext client version to pick. + * <p>Use this method to create an OpenGL ES 2.0-compatible context. + * Example: + * <pre class="prettyprint"> + * public MyView(Context context) { + * super(context); + * setEGLContextClientVersion(2); // Pick an OpenGL ES 2.0 context. + * setRenderer(new MyRenderer()); + * } + * </pre> + * <p>Note: Activities which require OpenGL ES 2.0 should indicate this by + * setting @lt;uses-feature android:glEsVersion="0x00020000" /> in the activity's + * AndroidManifest.xml file. + * <p>If this method is called, it must be called before {@link #setRenderer(Renderer)} + * is called. + * <p>This method only affects the behavior of the default EGLContexFactory and the + * default EGLConfigChooser. If + * {@link #setEGLContextFactory(EGLContextFactory)} has been called, then the supplied + * EGLContextFactory is responsible for creating an OpenGL ES 2.0-compatible context. + * If + * {@link #setEGLConfigChooser(EGLConfigChooser)} has been called, then the supplied + * EGLConfigChooser is responsible for choosing an OpenGL ES 2.0-compatible config. + * @param version The EGLContext client version to choose. Use 2 for OpenGL ES 2.0 + */ + public void setEGLContextClientVersion(int version) { + checkRenderThreadState(); + mEGLContextClientVersion = version; + } + + /** + * Set the rendering mode. When renderMode is + * RENDERMODE_CONTINUOUSLY, the renderer is called + * repeatedly to re-render the scene. When renderMode + * is RENDERMODE_WHEN_DIRTY, the renderer only rendered when the surface + * is created, or when {@link #requestRender} is called. Defaults to RENDERMODE_CONTINUOUSLY. + * <p> + * Using RENDERMODE_WHEN_DIRTY can improve battery life and overall system performance + * by allowing the GPU and CPU to idle when the view does not need to be updated. + * <p> + * This method can only be called after {@link #setRenderer(Renderer)} + * + * @param renderMode one of the RENDERMODE_X constants + * @see #RENDERMODE_CONTINUOUSLY + * @see #RENDERMODE_WHEN_DIRTY + */ + public void setRenderMode(int renderMode) { + mGLThread.setRenderMode(renderMode); + } + + /** + * Get the current rendering mode. May be called + * from any thread. Must not be called before a renderer has been set. + * @return the current rendering mode. + * @see #RENDERMODE_CONTINUOUSLY + * @see #RENDERMODE_WHEN_DIRTY + */ + public int getRenderMode() { + return mGLThread.getRenderMode(); + } + + /** + * Request that the renderer render a frame. + * This method is typically used when the render mode has been set to + * {@link #RENDERMODE_WHEN_DIRTY}, so that frames are only rendered on demand. + * May be called + * from any thread. Must not be called before a renderer has been set. + */ + public void requestRender() { + mGLThread.requestRender(); + } + + /** + * This method is part of the SurfaceHolder.Callback interface, and is + * not normally called or subclassed by clients of GLSurfaceView. + */ + public void surfaceCreated(SurfaceHolder holder) { + mGLThread.surfaceCreated(); + } + + /** + * This method is part of the SurfaceHolder.Callback interface, and is + * not normally called or subclassed by clients of GLSurfaceView. + */ + public void surfaceDestroyed(SurfaceHolder holder) { + // Surface will be destroyed when we return + mGLThread.surfaceDestroyed(); + } + + /** + * This method is part of the SurfaceHolder.Callback interface, and is + * not normally called or subclassed by clients of GLSurfaceView. + */ + public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { + mGLThread.onWindowResize(w, h); + } + + /** + * This method is part of the SurfaceHolder.Callback2 interface, and is + * not normally called or subclassed by clients of GLSurfaceView. + */ + @Override + public void surfaceRedrawNeededAsync(SurfaceHolder holder, Runnable finishDrawing) { + if (mGLThread != null) { + mGLThread.requestRenderAndNotify(finishDrawing); + } + } + + /** + * This method is part of the SurfaceHolder.Callback2 interface, and is + * not normally called or subclassed by clients of GLSurfaceView. + */ + @Deprecated + @Override + public void surfaceRedrawNeeded(SurfaceHolder holder) { + // Since we are part of the framework we know only surfaceRedrawNeededAsync + // will be called. + } + + + /** + * Pause the rendering thread, optionally tearing down the EGL context + * depending upon the value of {@link #setPreserveEGLContextOnPause(boolean)}. + * + * This method should be called when it is no longer desirable for the + * GLSurfaceView to continue rendering, such as in response to + * {@link android.app.Activity#onStop Activity.onStop}. + * + * Must not be called before a renderer has been set. + */ + public void onPause() { + mGLThread.onPause(); + } + + /** + * Resumes the rendering thread, re-creating the OpenGL context if necessary. It + * is the counterpart to {@link #onPause()}. + * + * This method should typically be called in + * {@link android.app.Activity#onStart Activity.onStart}. + * + * Must not be called before a renderer has been set. + */ + public void onResume() { + mGLThread.onResume(); + } + + /** + * Queue a runnable to be run on the GL rendering thread. This can be used + * to communicate with the Renderer on the rendering thread. + * Must not be called before a renderer has been set. + * @param r the runnable to be run on the GL rendering thread. + */ + public void queueEvent(Runnable r) { + mGLThread.queueEvent(r); + } + + /** + * This method is used as part of the View class and is not normally + * called or subclassed by clients of GLSurfaceView. + */ + @Override + protected void onAttachedToWindow() { + super.onAttachedToWindow(); + if (LOG_ATTACH_DETACH) { + Log.d(TAG, "onAttachedToWindow reattach =" + mDetached); + } + if (mDetached && (mRenderer != null)) { + int renderMode = RENDERMODE_CONTINUOUSLY; + if (mGLThread != null) { + renderMode = mGLThread.getRenderMode(); + } + mGLThread = new GLThread(mThisWeakRef); + if (renderMode != RENDERMODE_CONTINUOUSLY) { + mGLThread.setRenderMode(renderMode); + } + mGLThread.start(); + } + mDetached = false; + } + + @Override + protected void onDetachedFromWindow() { + if (LOG_ATTACH_DETACH) { + Log.d(TAG, "onDetachedFromWindow"); + } + if (mGLThread != null) { + mGLThread.requestExitAndWait(); + } + mDetached = true; + super.onDetachedFromWindow(); + } + + // ---------------------------------------------------------------------- + + /** + * An interface used to wrap a GL interface. + * <p>Typically + * used for implementing debugging and tracing on top of the default + * GL interface. You would typically use this by creating your own class + * that implemented all the GL methods by delegating to another GL instance. + * Then you could add your own behavior before or after calling the + * delegate. All the GLWrapper would do was instantiate and return the + * wrapper GL instance: + * <pre class="prettyprint"> + * class MyGLWrapper implements GLWrapper { + * GL wrap(GL gl) { + * return new MyGLImplementation(gl); + * } + * static class MyGLImplementation implements GL,GL10,GL11,... { + * ... + * } + * } + * </pre> + * @see #setGLWrapper(GLWrapper) + */ + public interface GLWrapper { + /** + * Wraps a gl interface in another gl interface. + * @param gl a GL interface that is to be wrapped. + * @return either the input argument or another GL object that wraps the input argument. + */ + GL wrap(GL gl); + } + + /** + * A generic renderer interface. + * <p> + * The renderer is responsible for making OpenGL calls to render a frame. + * <p> + * GLSurfaceView clients typically create their own classes that implement + * this interface, and then call {@link GLSurfaceView#setRenderer} to + * register the renderer with the GLSurfaceView. + * <p> + * + * <div class="special reference"> + * <h3>Developer Guides</h3> + * <p>For more information about how to use OpenGL, read the + * <a href="{@docRoot}guide/topics/graphics/opengl.html">OpenGL</a> developer guide.</p> + * </div> + * + * <h3>Threading</h3> + * The renderer will be called on a separate thread, so that rendering + * performance is decoupled from the UI thread. Clients typically need to + * communicate with the renderer from the UI thread, because that's where + * input events are received. Clients can communicate using any of the + * standard Java techniques for cross-thread communication, or they can + * use the {@link GLSurfaceView#queueEvent(Runnable)} convenience method. + * <p> + * <h3>EGL Context Lost</h3> + * There are situations where the EGL rendering context will be lost. This + * typically happens when device wakes up after going to sleep. When + * the EGL context is lost, all OpenGL resources (such as textures) that are + * associated with that context will be automatically deleted. In order to + * keep rendering correctly, a renderer must recreate any lost resources + * that it still needs. The {@link #onSurfaceCreated(GL10, EGLConfig)} method + * is a convenient place to do this. + * + * + * @see #setRenderer(Renderer) + */ + public interface Renderer { + /** + * Called when the surface is created or recreated. + * <p> + * Called when the rendering thread + * starts and whenever the EGL context is lost. The EGL context will typically + * be lost when the Android device awakes after going to sleep. + * <p> + * Since this method is called at the beginning of rendering, as well as + * every time the EGL context is lost, this method is a convenient place to put + * code to create resources that need to be created when the rendering + * starts, and that need to be recreated when the EGL context is lost. + * Textures are an example of a resource that you might want to create + * here. + * <p> + * Note that when the EGL context is lost, all OpenGL resources associated + * with that context will be automatically deleted. You do not need to call + * the corresponding "glDelete" methods such as glDeleteTextures to + * manually delete these lost resources. + * <p> + * @param gl the GL interface. Use <code>instanceof</code> to + * test if the interface supports GL11 or higher interfaces. + * @param config the EGLConfig of the created surface. Can be used + * to create matching pbuffers. + */ + void onSurfaceCreated(GL10 gl, EGLConfig config); + + /** + * Called when the surface changed size. + * <p> + * Called after the surface is created and whenever + * the OpenGL ES surface size changes. + * <p> + * Typically you will set your viewport here. If your camera + * is fixed then you could also set your projection matrix here: + * <pre class="prettyprint"> + * void onSurfaceChanged(GL10 gl, int width, int height) { + * gl.glViewport(0, 0, width, height); + * // for a fixed camera, set the projection too + * float ratio = (float) width / height; + * gl.glMatrixMode(GL10.GL_PROJECTION); + * gl.glLoadIdentity(); + * gl.glFrustumf(-ratio, ratio, -1, 1, 1, 10); + * } + * </pre> + * @param gl the GL interface. Use <code>instanceof</code> to + * test if the interface supports GL11 or higher interfaces. + * @param width + * @param height + */ + void onSurfaceChanged(GL10 gl, int width, int height); + + // -- GODOT start -- + /** + * Called to draw the current frame. + * <p> + * This method is responsible for drawing the current frame. + * <p> + * The implementation of this method typically looks like this: + * <pre class="prettyprint"> + * boolean onDrawFrame(GL10 gl) { + * gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT); + * //... other gl calls to render the scene ... + * return true; + * } + * </pre> + * @param gl the GL interface. Use <code>instanceof</code> to + * test if the interface supports GL11 or higher interfaces. + * + * @return true if the buffers should be swapped, false otherwise. + */ + boolean onDrawFrame(GL10 gl); + // -- GODOT end -- + } + + /** + * An interface for customizing the eglCreateContext and eglDestroyContext calls. + * <p> + * This interface must be implemented by clients wishing to call + * {@link GLSurfaceView#setEGLContextFactory(EGLContextFactory)} + */ + public interface EGLContextFactory { + EGLContext createContext(EGL10 egl, EGLDisplay display, EGLConfig eglConfig); + void destroyContext(EGL10 egl, EGLDisplay display, EGLContext context); + } + + private class DefaultContextFactory implements EGLContextFactory { + private int EGL_CONTEXT_CLIENT_VERSION = 0x3098; + + public EGLContext createContext(EGL10 egl, EGLDisplay display, EGLConfig config) { + int[] attrib_list = {EGL_CONTEXT_CLIENT_VERSION, mEGLContextClientVersion, + EGL10.EGL_NONE }; + + return egl.eglCreateContext(display, config, EGL10.EGL_NO_CONTEXT, + mEGLContextClientVersion != 0 ? attrib_list : null); + } + + public void destroyContext(EGL10 egl, EGLDisplay display, + EGLContext context) { + if (!egl.eglDestroyContext(display, context)) { + Log.e("DefaultContextFactory", "display:" + display + " context: " + context); + if (LOG_THREADS) { + Log.i("DefaultContextFactory", "tid=" + Thread.currentThread().getId()); + } + EglHelper.throwEglException("eglDestroyContex", egl.eglGetError()); + } + } + } + + /** + * An interface for customizing the eglCreateWindowSurface and eglDestroySurface calls. + * <p> + * This interface must be implemented by clients wishing to call + * {@link GLSurfaceView#setEGLWindowSurfaceFactory(EGLWindowSurfaceFactory)} + */ + public interface EGLWindowSurfaceFactory { + /** + * @return null if the surface cannot be constructed. + */ + EGLSurface createWindowSurface(EGL10 egl, EGLDisplay display, EGLConfig config, + Object nativeWindow); + void destroySurface(EGL10 egl, EGLDisplay display, EGLSurface surface); + } + + private static class DefaultWindowSurfaceFactory implements EGLWindowSurfaceFactory { + + public EGLSurface createWindowSurface(EGL10 egl, EGLDisplay display, + EGLConfig config, Object nativeWindow) { + EGLSurface result = null; + try { + result = egl.eglCreateWindowSurface(display, config, nativeWindow, null); + } catch (IllegalArgumentException e) { + // This exception indicates that the surface flinger surface + // is not valid. This can happen if the surface flinger surface has + // been torn down, but the application has not yet been + // notified via SurfaceHolder.Callback.surfaceDestroyed. + // In theory the application should be notified first, + // but in practice sometimes it is not. See b/4588890 + Log.e(TAG, "eglCreateWindowSurface", e); + } + return result; + } + + public void destroySurface(EGL10 egl, EGLDisplay display, + EGLSurface surface) { + egl.eglDestroySurface(display, surface); + } + } + + /** + * An interface for choosing an EGLConfig configuration from a list of + * potential configurations. + * <p> + * This interface must be implemented by clients wishing to call + * {@link GLSurfaceView#setEGLConfigChooser(EGLConfigChooser)} + */ + public interface EGLConfigChooser { + /** + * Choose a configuration from the list. Implementors typically + * implement this method by calling + * {@link EGL10#eglChooseConfig} and iterating through the results. Please consult the + * EGL specification available from The Khronos Group to learn how to call eglChooseConfig. + * @param egl the EGL10 for the current display. + * @param display the current display. + * @return the chosen configuration. + */ + EGLConfig chooseConfig(EGL10 egl, EGLDisplay display); + } + + private abstract class BaseConfigChooser + implements EGLConfigChooser { + public BaseConfigChooser(int[] configSpec) { + mConfigSpec = filterConfigSpec(configSpec); + } + + public EGLConfig chooseConfig(EGL10 egl, EGLDisplay display) { + int[] num_config = new int[1]; + if (!egl.eglChooseConfig(display, mConfigSpec, null, 0, + num_config)) { + throw new IllegalArgumentException("eglChooseConfig failed"); + } + + int numConfigs = num_config[0]; + + if (numConfigs <= 0) { + throw new IllegalArgumentException( + "No configs match configSpec"); + } + + EGLConfig[] configs = new EGLConfig[numConfigs]; + if (!egl.eglChooseConfig(display, mConfigSpec, configs, numConfigs, + num_config)) { + throw new IllegalArgumentException("eglChooseConfig#2 failed"); + } + EGLConfig config = chooseConfig(egl, display, configs); + if (config == null) { + throw new IllegalArgumentException("No config chosen"); + } + return config; + } + + abstract EGLConfig chooseConfig(EGL10 egl, EGLDisplay display, + EGLConfig[] configs); + + protected int[] mConfigSpec; + + private int[] filterConfigSpec(int[] configSpec) { + if (mEGLContextClientVersion != 2 && mEGLContextClientVersion != 3) { + return configSpec; + } + /* We know none of the subclasses define EGL_RENDERABLE_TYPE. + * And we know the configSpec is well formed. + */ + int len = configSpec.length; + int[] newConfigSpec = new int[len + 2]; + System.arraycopy(configSpec, 0, newConfigSpec, 0, len-1); + newConfigSpec[len-1] = EGL10.EGL_RENDERABLE_TYPE; + if (mEGLContextClientVersion == 2) { + newConfigSpec[len] = EGL14.EGL_OPENGL_ES2_BIT; /* EGL_OPENGL_ES2_BIT */ + } else { + newConfigSpec[len] = EGLExt.EGL_OPENGL_ES3_BIT_KHR; /* EGL_OPENGL_ES3_BIT_KHR */ + } + newConfigSpec[len+1] = EGL10.EGL_NONE; + return newConfigSpec; + } + } + + /** + * Choose a configuration with exactly the specified r,g,b,a sizes, + * and at least the specified depth and stencil sizes. + */ + private class ComponentSizeChooser extends BaseConfigChooser { + public ComponentSizeChooser(int redSize, int greenSize, int blueSize, + int alphaSize, int depthSize, int stencilSize) { + super(new int[] { + EGL10.EGL_RED_SIZE, redSize, + EGL10.EGL_GREEN_SIZE, greenSize, + EGL10.EGL_BLUE_SIZE, blueSize, + EGL10.EGL_ALPHA_SIZE, alphaSize, + EGL10.EGL_DEPTH_SIZE, depthSize, + EGL10.EGL_STENCIL_SIZE, stencilSize, + EGL10.EGL_NONE}); + mValue = new int[1]; + mRedSize = redSize; + mGreenSize = greenSize; + mBlueSize = blueSize; + mAlphaSize = alphaSize; + mDepthSize = depthSize; + mStencilSize = stencilSize; + } + + @Override + public EGLConfig chooseConfig(EGL10 egl, EGLDisplay display, + EGLConfig[] configs) { + for (EGLConfig config : configs) { + int d = findConfigAttrib(egl, display, config, + EGL10.EGL_DEPTH_SIZE, 0); + int s = findConfigAttrib(egl, display, config, + EGL10.EGL_STENCIL_SIZE, 0); + if ((d >= mDepthSize) && (s >= mStencilSize)) { + int r = findConfigAttrib(egl, display, config, + EGL10.EGL_RED_SIZE, 0); + int g = findConfigAttrib(egl, display, config, + EGL10.EGL_GREEN_SIZE, 0); + int b = findConfigAttrib(egl, display, config, + EGL10.EGL_BLUE_SIZE, 0); + int a = findConfigAttrib(egl, display, config, + EGL10.EGL_ALPHA_SIZE, 0); + if ((r == mRedSize) && (g == mGreenSize) + && (b == mBlueSize) && (a == mAlphaSize)) { + return config; + } + } + } + return null; + } + + private int findConfigAttrib(EGL10 egl, EGLDisplay display, + EGLConfig config, int attribute, int defaultValue) { + + if (egl.eglGetConfigAttrib(display, config, attribute, mValue)) { + return mValue[0]; + } + return defaultValue; + } + + private int[] mValue; + // Subclasses can adjust these values: + protected int mRedSize; + protected int mGreenSize; + protected int mBlueSize; + protected int mAlphaSize; + protected int mDepthSize; + protected int mStencilSize; + } + + /** + * This class will choose a RGB_888 surface with + * or without a depth buffer. + * + */ + private class SimpleEGLConfigChooser extends ComponentSizeChooser { + public SimpleEGLConfigChooser(boolean withDepthBuffer) { + super(8, 8, 8, 0, withDepthBuffer ? 16 : 0, 0); + } + } + + /** + * An EGL helper class. + */ + + private static class EglHelper { + public EglHelper(WeakReference<GLSurfaceView> glSurfaceViewWeakRef) { + mGLSurfaceViewWeakRef = glSurfaceViewWeakRef; + } + + /** + * Initialize EGL for a given configuration spec. + */ + public void start() { + if (LOG_EGL) { + Log.w("EglHelper", "start() tid=" + Thread.currentThread().getId()); + } + /* + * Get an EGL instance + */ + mEgl = (EGL10) EGLContext.getEGL(); + + /* + * Get to the default display. + */ + mEglDisplay = mEgl.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY); + + if (mEglDisplay == EGL10.EGL_NO_DISPLAY) { + throw new RuntimeException("eglGetDisplay failed"); + } + + /* + * We can now initialize EGL for that display + */ + int[] version = new int[2]; + if(!mEgl.eglInitialize(mEglDisplay, version)) { + throw new RuntimeException("eglInitialize failed"); + } + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view == null) { + mEglConfig = null; + mEglContext = null; + } else { + mEglConfig = view.mEGLConfigChooser.chooseConfig(mEgl, mEglDisplay); + + /* + * Create an EGL context. We want to do this as rarely as we can, because an + * EGL context is a somewhat heavy object. + */ + mEglContext = view.mEGLContextFactory.createContext(mEgl, mEglDisplay, mEglConfig); + } + if (mEglContext == null || mEglContext == EGL10.EGL_NO_CONTEXT) { + mEglContext = null; + throwEglException("createContext"); + } + if (LOG_EGL) { + Log.w("EglHelper", "createContext " + mEglContext + " tid=" + Thread.currentThread().getId()); + } + + mEglSurface = null; + } + + /** + * Create an egl surface for the current SurfaceHolder surface. If a surface + * already exists, destroy it before creating the new surface. + * + * @return true if the surface was created successfully. + */ + public boolean createSurface() { + if (LOG_EGL) { + Log.w("EglHelper", "createSurface() tid=" + Thread.currentThread().getId()); + } + /* + * Check preconditions. + */ + if (mEgl == null) { + throw new RuntimeException("egl not initialized"); + } + if (mEglDisplay == null) { + throw new RuntimeException("eglDisplay not initialized"); + } + if (mEglConfig == null) { + throw new RuntimeException("mEglConfig not initialized"); + } + + /* + * The window size has changed, so we need to create a new + * surface. + */ + destroySurfaceImp(); + + /* + * Create an EGL surface we can render into. + */ + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view != null) { + mEglSurface = view.mEGLWindowSurfaceFactory.createWindowSurface(mEgl, + mEglDisplay, mEglConfig, view.getHolder()); + } else { + mEglSurface = null; + } + + if (mEglSurface == null || mEglSurface == EGL10.EGL_NO_SURFACE) { + int error = mEgl.eglGetError(); + if (error == EGL10.EGL_BAD_NATIVE_WINDOW) { + Log.e("EglHelper", "createWindowSurface returned EGL_BAD_NATIVE_WINDOW."); + } + return false; + } + + /* + * Before we can issue GL commands, we need to make sure + * the context is current and bound to a surface. + */ + if (!mEgl.eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, mEglContext)) { + /* + * Could not make the context current, probably because the underlying + * SurfaceView surface has been destroyed. + */ + logEglErrorAsWarning("EGLHelper", "eglMakeCurrent", mEgl.eglGetError()); + return false; + } + + return true; + } + + /** + * Create a GL object for the current EGL context. + * @return + */ + GL createGL() { + + GL gl = mEglContext.getGL(); + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view != null) { + if (view.mGLWrapper != null) { + gl = view.mGLWrapper.wrap(gl); + } + + if ((view.mDebugFlags & (DEBUG_CHECK_GL_ERROR | DEBUG_LOG_GL_CALLS)) != 0) { + int configFlags = 0; + Writer log = null; + if ((view.mDebugFlags & DEBUG_CHECK_GL_ERROR) != 0) { + configFlags |= GLDebugHelper.CONFIG_CHECK_GL_ERROR; + } + if ((view.mDebugFlags & DEBUG_LOG_GL_CALLS) != 0) { + log = new LogWriter(); + } + gl = GLDebugHelper.wrap(gl, configFlags, log); + } + } + return gl; + } + + /** + * Display the current render surface. + * @return the EGL error code from eglSwapBuffers. + */ + public int swap() { + if (! mEgl.eglSwapBuffers(mEglDisplay, mEglSurface)) { + return mEgl.eglGetError(); + } + return EGL10.EGL_SUCCESS; + } + + public void destroySurface() { + if (LOG_EGL) { + Log.w("EglHelper", "destroySurface() tid=" + Thread.currentThread().getId()); + } + destroySurfaceImp(); + } + + private void destroySurfaceImp() { + if (mEglSurface != null && mEglSurface != EGL10.EGL_NO_SURFACE) { + mEgl.eglMakeCurrent(mEglDisplay, EGL10.EGL_NO_SURFACE, + EGL10.EGL_NO_SURFACE, + EGL10.EGL_NO_CONTEXT); + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view != null) { + view.mEGLWindowSurfaceFactory.destroySurface(mEgl, mEglDisplay, mEglSurface); + } + mEglSurface = null; + } + } + + public void finish() { + if (LOG_EGL) { + Log.w("EglHelper", "finish() tid=" + Thread.currentThread().getId()); + } + if (mEglContext != null) { + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view != null) { + view.mEGLContextFactory.destroyContext(mEgl, mEglDisplay, mEglContext); + } + mEglContext = null; + } + if (mEglDisplay != null) { + mEgl.eglTerminate(mEglDisplay); + mEglDisplay = null; + } + } + + private void throwEglException(String function) { + throwEglException(function, mEgl.eglGetError()); + } + + public static void throwEglException(String function, int error) { + String message = formatEglError(function, error); + if (LOG_THREADS) { + Log.e("EglHelper", "throwEglException tid=" + Thread.currentThread().getId() + " " + + message); + } + throw new RuntimeException(message); + } + + public static void logEglErrorAsWarning(String tag, String function, int error) { + Log.w(tag, formatEglError(function, error)); + } + + public static String formatEglError(String function, int error) { + return function + " failed: " + EGLLogWrapper.getErrorString(error); + } + + private WeakReference<GLSurfaceView> mGLSurfaceViewWeakRef; + EGL10 mEgl; + EGLDisplay mEglDisplay; + EGLSurface mEglSurface; + EGLConfig mEglConfig; + EGLContext mEglContext; + + } + + /** + * A generic GL Thread. Takes care of initializing EGL and GL. Delegates + * to a Renderer instance to do the actual drawing. Can be configured to + * render continuously or on request. + * + * All potentially blocking synchronization is done through the + * sGLThreadManager object. This avoids multiple-lock ordering issues. + * + */ + static class GLThread extends Thread { + GLThread(WeakReference<GLSurfaceView> glSurfaceViewWeakRef) { + super(); + mWidth = 0; + mHeight = 0; + mRequestRender = true; + mRenderMode = RENDERMODE_CONTINUOUSLY; + mWantRenderNotification = false; + mGLSurfaceViewWeakRef = glSurfaceViewWeakRef; + } + + @Override + public void run() { + setName("GLThread " + getId()); + if (LOG_THREADS) { + Log.i("GLThread", "starting tid=" + getId()); + } + + try { + guardedRun(); + } catch (InterruptedException e) { + // fall thru and exit normally + } finally { + sGLThreadManager.threadExiting(this); + } + } + + /* + * This private method should only be called inside a + * synchronized(sGLThreadManager) block. + */ + private void stopEglSurfaceLocked() { + if (mHaveEglSurface) { + mHaveEglSurface = false; + mEglHelper.destroySurface(); + } + } + + /* + * This private method should only be called inside a + * synchronized(sGLThreadManager) block. + */ + private void stopEglContextLocked() { + if (mHaveEglContext) { + mEglHelper.finish(); + mHaveEglContext = false; + sGLThreadManager.releaseEglContextLocked(this); + } + } + private void guardedRun() throws InterruptedException { + mEglHelper = new EglHelper(mGLSurfaceViewWeakRef); + mHaveEglContext = false; + mHaveEglSurface = false; + mWantRenderNotification = false; + + try { + GL10 gl = null; + boolean createEglContext = false; + boolean createEglSurface = false; + boolean createGlInterface = false; + boolean lostEglContext = false; + boolean sizeChanged = false; + boolean wantRenderNotification = false; + boolean doRenderNotification = false; + boolean askedToReleaseEglContext = false; + int w = 0; + int h = 0; + Runnable event = null; + Runnable finishDrawingRunnable = null; + + while (true) { + synchronized (sGLThreadManager) { + while (true) { + if (mShouldExit) { + return; + } + + if (! mEventQueue.isEmpty()) { + event = mEventQueue.remove(0); + break; + } + + // Update the pause state. + boolean pausing = false; + if (mPaused != mRequestPaused) { + pausing = mRequestPaused; + mPaused = mRequestPaused; + sGLThreadManager.notifyAll(); + if (LOG_PAUSE_RESUME) { + Log.i("GLThread", "mPaused is now " + mPaused + " tid=" + getId()); + } + } + + // Do we need to give up the EGL context? + if (mShouldReleaseEglContext) { + if (LOG_SURFACE) { + Log.i("GLThread", "releasing EGL context because asked to tid=" + getId()); + } + stopEglSurfaceLocked(); + stopEglContextLocked(); + mShouldReleaseEglContext = false; + askedToReleaseEglContext = true; + } + + // Have we lost the EGL context? + if (lostEglContext) { + stopEglSurfaceLocked(); + stopEglContextLocked(); + lostEglContext = false; + } + + // When pausing, release the EGL surface: + if (pausing && mHaveEglSurface) { + if (LOG_SURFACE) { + Log.i("GLThread", "releasing EGL surface because paused tid=" + getId()); + } + stopEglSurfaceLocked(); + } + + // When pausing, optionally release the EGL Context: + if (pausing && mHaveEglContext) { + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + boolean preserveEglContextOnPause = view == null ? + false : view.mPreserveEGLContextOnPause; + if (!preserveEglContextOnPause) { + stopEglContextLocked(); + if (LOG_SURFACE) { + Log.i("GLThread", "releasing EGL context because paused tid=" + getId()); + } + } + } + + // Have we lost the SurfaceView surface? + if ((! mHasSurface) && (! mWaitingForSurface)) { + if (LOG_SURFACE) { + Log.i("GLThread", "noticed surfaceView surface lost tid=" + getId()); + } + if (mHaveEglSurface) { + stopEglSurfaceLocked(); + } + mWaitingForSurface = true; + mSurfaceIsBad = false; + sGLThreadManager.notifyAll(); + } + + // Have we acquired the surface view surface? + if (mHasSurface && mWaitingForSurface) { + if (LOG_SURFACE) { + Log.i("GLThread", "noticed surfaceView surface acquired tid=" + getId()); + } + mWaitingForSurface = false; + sGLThreadManager.notifyAll(); + } + + if (doRenderNotification) { + if (LOG_SURFACE) { + Log.i("GLThread", "sending render notification tid=" + getId()); + } + mWantRenderNotification = false; + doRenderNotification = false; + mRenderComplete = true; + sGLThreadManager.notifyAll(); + } + + if (mFinishDrawingRunnable != null) { + finishDrawingRunnable = mFinishDrawingRunnable; + mFinishDrawingRunnable = null; + } + + // Ready to draw? + if (readyToDraw()) { + + // If we don't have an EGL context, try to acquire one. + if (! mHaveEglContext) { + if (askedToReleaseEglContext) { + askedToReleaseEglContext = false; + } else { + try { + mEglHelper.start(); + } catch (RuntimeException t) { + sGLThreadManager.releaseEglContextLocked(this); + throw t; + } + mHaveEglContext = true; + createEglContext = true; + + sGLThreadManager.notifyAll(); + } + } + + if (mHaveEglContext && !mHaveEglSurface) { + mHaveEglSurface = true; + createEglSurface = true; + createGlInterface = true; + sizeChanged = true; + } + + if (mHaveEglSurface) { + if (mSizeChanged) { + sizeChanged = true; + w = mWidth; + h = mHeight; + mWantRenderNotification = true; + if (LOG_SURFACE) { + Log.i("GLThread", + "noticing that we want render notification tid=" + + getId()); + } + + // Destroy and recreate the EGL surface. + createEglSurface = true; + + mSizeChanged = false; + } + mRequestRender = false; + sGLThreadManager.notifyAll(); + if (mWantRenderNotification) { + wantRenderNotification = true; + } + break; + } + } else { + if (finishDrawingRunnable != null) { + Log.w(TAG, "Warning, !readyToDraw() but waiting for " + + "draw finished! Early reporting draw finished."); + finishDrawingRunnable.run(); + finishDrawingRunnable = null; + } + } + // By design, this is the only place in a GLThread thread where we wait(). + if (LOG_THREADS) { + Log.i("GLThread", "waiting tid=" + getId() + + " mHaveEglContext: " + mHaveEglContext + + " mHaveEglSurface: " + mHaveEglSurface + + " mFinishedCreatingEglSurface: " + mFinishedCreatingEglSurface + + " mPaused: " + mPaused + + " mHasSurface: " + mHasSurface + + " mSurfaceIsBad: " + mSurfaceIsBad + + " mWaitingForSurface: " + mWaitingForSurface + + " mWidth: " + mWidth + + " mHeight: " + mHeight + + " mRequestRender: " + mRequestRender + + " mRenderMode: " + mRenderMode); + } + sGLThreadManager.wait(); + } + } // end of synchronized(sGLThreadManager) + + if (event != null) { + event.run(); + event = null; + continue; + } + + if (createEglSurface) { + if (LOG_SURFACE) { + Log.w("GLThread", "egl createSurface"); + } + if (mEglHelper.createSurface()) { + synchronized(sGLThreadManager) { + mFinishedCreatingEglSurface = true; + sGLThreadManager.notifyAll(); + } + } else { + synchronized(sGLThreadManager) { + mFinishedCreatingEglSurface = true; + mSurfaceIsBad = true; + sGLThreadManager.notifyAll(); + } + continue; + } + createEglSurface = false; + } + + if (createGlInterface) { + gl = (GL10) mEglHelper.createGL(); + + createGlInterface = false; + } + + // -- GODOT start -- + if (createEglContext) { + if (LOG_RENDERER) { + Log.w("GLThread", "onSurfaceCreated"); + } + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view != null) { + try { + view.mRenderer.onSurfaceCreated(gl, mEglHelper.mEglConfig); + } finally { + } + } + createEglContext = false; + } + + if (sizeChanged) { + if (LOG_RENDERER) { + Log.w("GLThread", "onSurfaceChanged(" + w + ", " + h + ")"); + } + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view != null) { + try { + view.mRenderer.onSurfaceChanged(gl, w, h); + } finally { + } + } + sizeChanged = false; + } + + boolean swapBuffers = false; + if (LOG_RENDERER_DRAW_FRAME) { + Log.w("GLThread", "onDrawFrame tid=" + getId()); + } + { + GLSurfaceView view = mGLSurfaceViewWeakRef.get(); + if (view != null) { + try { + swapBuffers = view.mRenderer.onDrawFrame(gl); + if (finishDrawingRunnable != null) { + finishDrawingRunnable.run(); + finishDrawingRunnable = null; + } + } finally {} + } + } + if (swapBuffers) { + int swapError = mEglHelper.swap(); + switch (swapError) { + case EGL10.EGL_SUCCESS: + break; + case EGL11.EGL_CONTEXT_LOST: + if (LOG_SURFACE) { + Log.i("GLThread", "egl context lost tid=" + getId()); + } + lostEglContext = true; + break; + default: + // Other errors typically mean that the current surface is bad, + // probably because the SurfaceView surface has been destroyed, + // but we haven't been notified yet. + // Log the error to help developers understand why rendering stopped. + EglHelper.logEglErrorAsWarning("GLThread", "eglSwapBuffers", swapError); + + synchronized (sGLThreadManager) { + mSurfaceIsBad = true; + sGLThreadManager.notifyAll(); + } + break; + } + } + // -- GODOT end -- + + if (wantRenderNotification) { + doRenderNotification = true; + wantRenderNotification = false; + } + } + + } finally { + /* + * clean-up everything... + */ + synchronized (sGLThreadManager) { + stopEglSurfaceLocked(); + stopEglContextLocked(); + } + } + } + + public boolean ableToDraw() { + return mHaveEglContext && mHaveEglSurface && readyToDraw(); + } + + private boolean readyToDraw() { + return (!mPaused) && mHasSurface && (!mSurfaceIsBad) + && (mWidth > 0) && (mHeight > 0) + && (mRequestRender || (mRenderMode == RENDERMODE_CONTINUOUSLY)); + } + + public void setRenderMode(int renderMode) { + if ( !((RENDERMODE_WHEN_DIRTY <= renderMode) && (renderMode <= RENDERMODE_CONTINUOUSLY)) ) { + throw new IllegalArgumentException("renderMode"); + } + synchronized(sGLThreadManager) { + mRenderMode = renderMode; + sGLThreadManager.notifyAll(); + } + } + + public int getRenderMode() { + synchronized(sGLThreadManager) { + return mRenderMode; + } + } + + public void requestRender() { + synchronized(sGLThreadManager) { + mRequestRender = true; + sGLThreadManager.notifyAll(); + } + } + + public void requestRenderAndNotify(Runnable finishDrawing) { + synchronized(sGLThreadManager) { + // If we are already on the GL thread, this means a client callback + // has caused reentrancy, for example via updating the SurfaceView parameters. + // We will return to the client rendering code, so here we don't need to + // do anything. + if (Thread.currentThread() == this) { + return; + } + + mWantRenderNotification = true; + mRequestRender = true; + mRenderComplete = false; + mFinishDrawingRunnable = finishDrawing; + + sGLThreadManager.notifyAll(); + } + } + + public void surfaceCreated() { + synchronized(sGLThreadManager) { + if (LOG_THREADS) { + Log.i("GLThread", "surfaceCreated tid=" + getId()); + } + mHasSurface = true; + mFinishedCreatingEglSurface = false; + sGLThreadManager.notifyAll(); + while (mWaitingForSurface + && !mFinishedCreatingEglSurface + && !mExited) { + try { + sGLThreadManager.wait(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } + } + + public void surfaceDestroyed() { + synchronized(sGLThreadManager) { + if (LOG_THREADS) { + Log.i("GLThread", "surfaceDestroyed tid=" + getId()); + } + mHasSurface = false; + sGLThreadManager.notifyAll(); + while((!mWaitingForSurface) && (!mExited)) { + try { + sGLThreadManager.wait(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } + } + + public void onPause() { + synchronized (sGLThreadManager) { + if (LOG_PAUSE_RESUME) { + Log.i("GLThread", "onPause tid=" + getId()); + } + mRequestPaused = true; + sGLThreadManager.notifyAll(); + while ((! mExited) && (! mPaused)) { + if (LOG_PAUSE_RESUME) { + Log.i("Main thread", "onPause waiting for mPaused."); + } + try { + sGLThreadManager.wait(); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + } + } + + public void onResume() { + synchronized (sGLThreadManager) { + if (LOG_PAUSE_RESUME) { + Log.i("GLThread", "onResume tid=" + getId()); + } + mRequestPaused = false; + mRequestRender = true; + mRenderComplete = false; + sGLThreadManager.notifyAll(); + while ((! mExited) && mPaused && (!mRenderComplete)) { + if (LOG_PAUSE_RESUME) { + Log.i("Main thread", "onResume waiting for !mPaused."); + } + try { + sGLThreadManager.wait(); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + } + } + + public void onWindowResize(int w, int h) { + synchronized (sGLThreadManager) { + mWidth = w; + mHeight = h; + mSizeChanged = true; + mRequestRender = true; + mRenderComplete = false; + + // If we are already on the GL thread, this means a client callback + // has caused reentrancy, for example via updating the SurfaceView parameters. + // We need to process the size change eventually though and update our EGLSurface. + // So we set the parameters and return so they can be processed on our + // next iteration. + if (Thread.currentThread() == this) { + return; + } + + sGLThreadManager.notifyAll(); + + // Wait for thread to react to resize and render a frame + while (! mExited && !mPaused && !mRenderComplete + && ableToDraw()) { + if (LOG_SURFACE) { + Log.i("Main thread", "onWindowResize waiting for render complete from tid=" + getId()); + } + try { + sGLThreadManager.wait(); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + } + } + + public void requestExitAndWait() { + // don't call this from GLThread thread or it is a guaranteed + // deadlock! + synchronized(sGLThreadManager) { + mShouldExit = true; + sGLThreadManager.notifyAll(); + while (! mExited) { + try { + sGLThreadManager.wait(); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + } + } + + public void requestReleaseEglContextLocked() { + mShouldReleaseEglContext = true; + sGLThreadManager.notifyAll(); + } + + /** + * Queue an "event" to be run on the GL rendering thread. + * @param r the runnable to be run on the GL rendering thread. + */ + public void queueEvent(Runnable r) { + if (r == null) { + throw new IllegalArgumentException("r must not be null"); + } + synchronized(sGLThreadManager) { + mEventQueue.add(r); + sGLThreadManager.notifyAll(); + } + } + + // Once the thread is started, all accesses to the following member + // variables are protected by the sGLThreadManager monitor + private boolean mShouldExit; + private boolean mExited; + private boolean mRequestPaused; + private boolean mPaused; + private boolean mHasSurface; + private boolean mSurfaceIsBad; + private boolean mWaitingForSurface; + private boolean mHaveEglContext; + private boolean mHaveEglSurface; + private boolean mFinishedCreatingEglSurface; + private boolean mShouldReleaseEglContext; + private int mWidth; + private int mHeight; + private int mRenderMode; + private boolean mRequestRender; + private boolean mWantRenderNotification; + private boolean mRenderComplete; + private ArrayList<Runnable> mEventQueue = new ArrayList<Runnable>(); + private boolean mSizeChanged = true; + private Runnable mFinishDrawingRunnable = null; + + // End of member variables protected by the sGLThreadManager monitor. + + private EglHelper mEglHelper; + + /** + * Set once at thread construction time, nulled out when the parent view is garbage + * called. This weak reference allows the GLSurfaceView to be garbage collected while + * the GLThread is still alive. + */ + private WeakReference<GLSurfaceView> mGLSurfaceViewWeakRef; + + } + + static class LogWriter extends Writer { + + @Override public void close() { + flushBuilder(); + } + + @Override public void flush() { + flushBuilder(); + } + + @Override public void write(char[] buf, int offset, int count) { + for(int i = 0; i < count; i++) { + char c = buf[offset + i]; + if ( c == '\n') { + flushBuilder(); + } + else { + mBuilder.append(c); + } + } + } + + private void flushBuilder() { + if (mBuilder.length() > 0) { + Log.v("GLSurfaceView", mBuilder.toString()); + mBuilder.delete(0, mBuilder.length()); + } + } + + private StringBuilder mBuilder = new StringBuilder(); + } + + + private void checkRenderThreadState() { + if (mGLThread != null) { + throw new IllegalStateException( + "setRenderer has already been called for this instance."); + } + } + + private static class GLThreadManager { + private static String TAG = "GLThreadManager"; + + public synchronized void threadExiting(GLThread thread) { + if (LOG_THREADS) { + Log.i("GLThread", "exiting tid=" + thread.getId()); + } + thread.mExited = true; + notifyAll(); + } + + /* + * Releases the EGL context. Requires that we are already in the + * sGLThreadManager monitor when this is called. + */ + public void releaseEglContextLocked(GLThread thread) { + notifyAll(); + } + } + + private static final GLThreadManager sGLThreadManager = new GLThreadManager(); + + private final WeakReference<GLSurfaceView> mThisWeakRef = + new WeakReference<GLSurfaceView>(this); + private GLThread mGLThread; + private Renderer mRenderer; + private boolean mDetached; + private EGLConfigChooser mEGLConfigChooser; + private EGLContextFactory mEGLContextFactory; + private EGLWindowSurfaceFactory mEGLWindowSurfaceFactory; + private GLWrapper mGLWrapper; + private int mDebugFlags; + private int mEGLContextClientVersion; + private boolean mPreserveEGLContextOnPause; +} + diff --git a/platform/android/java/lib/src/org/godotengine/godot/GodotRenderer.java b/platform/android/java/lib/src/org/godotengine/godot/gl/GodotRenderer.java index e3956ac459..5c4fd00f6d 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/GodotRenderer.java +++ b/platform/android/java/lib/src/org/godotengine/godot/gl/GodotRenderer.java @@ -28,38 +28,38 @@ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*************************************************************************/ -package org.godotengine.godot; +package org.godotengine.godot.gl; +import org.godotengine.godot.GodotLib; import org.godotengine.godot.plugin.GodotPlugin; import org.godotengine.godot.plugin.GodotPluginRegistry; -import org.godotengine.godot.utils.GLUtils; - -import android.opengl.GLSurfaceView; import javax.microedition.khronos.egl.EGLConfig; import javax.microedition.khronos.opengles.GL10; /** - * Godot's renderer implementation. + * Godot's GL renderer implementation. */ -class GodotRenderer implements GLSurfaceView.Renderer { +public class GodotRenderer implements GLSurfaceView.Renderer { private final GodotPluginRegistry pluginRegistry; private boolean activityJustResumed = false; - GodotRenderer() { + public GodotRenderer() { this.pluginRegistry = GodotPluginRegistry.getPluginRegistry(); } - public void onDrawFrame(GL10 gl) { + public boolean onDrawFrame(GL10 gl) { if (activityJustResumed) { GodotLib.onRendererResumed(); activityJustResumed = false; } - GodotLib.step(); + boolean swapBuffers = GodotLib.step(); for (GodotPlugin plugin : pluginRegistry.getAllPlugins()) { plugin.onGLDrawFrame(gl); } + + return swapBuffers; } public void onSurfaceChanged(GL10 gl, int width, int height) { @@ -76,13 +76,13 @@ class GodotRenderer implements GLSurfaceView.Renderer { } } - void onActivityResumed() { + public void onActivityResumed() { // We defer invoking GodotLib.onRendererResumed() until the first draw frame call. // This ensures we have a valid GL context and surface when we do so. activityJustResumed = true; } - void onActivityPaused() { + public void onActivityPaused() { GodotLib.onRendererPaused(); } } diff --git a/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrConfigChooser.java b/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrConfigChooser.java index 4c1c84affb..e35d4f5828 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrConfigChooser.java +++ b/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrConfigChooser.java @@ -30,8 +30,9 @@ package org.godotengine.godot.xr.ovr; +import org.godotengine.godot.gl.GLSurfaceView; + import android.opengl.EGLExt; -import android.opengl.GLSurfaceView; import javax.microedition.khronos.egl.EGL10; import javax.microedition.khronos.egl.EGLConfig; diff --git a/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrContextFactory.java b/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrContextFactory.java index 2b4369b8a6..deb9c4bb1d 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrContextFactory.java +++ b/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrContextFactory.java @@ -30,8 +30,9 @@ package org.godotengine.godot.xr.ovr; +import org.godotengine.godot.gl.GLSurfaceView; + import android.opengl.EGL14; -import android.opengl.GLSurfaceView; import javax.microedition.khronos.egl.EGL10; import javax.microedition.khronos.egl.EGLConfig; diff --git a/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrWindowSurfaceFactory.java b/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrWindowSurfaceFactory.java index fbfe0a3a75..f087b7dc74 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrWindowSurfaceFactory.java +++ b/platform/android/java/lib/src/org/godotengine/godot/xr/ovr/OvrWindowSurfaceFactory.java @@ -30,7 +30,7 @@ package org.godotengine.godot.xr.ovr; -import android.opengl.GLSurfaceView; +import org.godotengine.godot.gl.GLSurfaceView; import javax.microedition.khronos.egl.EGL10; import javax.microedition.khronos.egl.EGLConfig; diff --git a/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularConfigChooser.java b/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularConfigChooser.java index 9fde1961ea..445238b1c2 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularConfigChooser.java +++ b/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularConfigChooser.java @@ -30,10 +30,9 @@ package org.godotengine.godot.xr.regular; +import org.godotengine.godot.gl.GLSurfaceView; import org.godotengine.godot.utils.GLUtils; -import android.opengl.GLSurfaceView; - import javax.microedition.khronos.egl.EGL10; import javax.microedition.khronos.egl.EGLConfig; import javax.microedition.khronos.egl.EGLDisplay; diff --git a/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularContextFactory.java b/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularContextFactory.java index ce1184a75c..5d62723170 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularContextFactory.java +++ b/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularContextFactory.java @@ -30,9 +30,9 @@ package org.godotengine.godot.xr.regular; +import org.godotengine.godot.gl.GLSurfaceView; import org.godotengine.godot.utils.GLUtils; -import android.opengl.GLSurfaceView; import android.util.Log; import javax.microedition.khronos.egl.EGL10; diff --git a/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularFallbackConfigChooser.java b/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularFallbackConfigChooser.java index 420dda45a0..68329c5c49 100644 --- a/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularFallbackConfigChooser.java +++ b/platform/android/java/lib/src/org/godotengine/godot/xr/regular/RegularFallbackConfigChooser.java @@ -30,8 +30,6 @@ package org.godotengine.godot.xr.regular; -import org.godotengine.godot.utils.GLUtils; - import android.util.Log; import javax.microedition.khronos.egl.EGL10; diff --git a/platform/android/java/nativeSrcsConfigs/CMakeLists.txt b/platform/android/java/nativeSrcsConfigs/CMakeLists.txt index 59cfe21c49..711f7cd502 100644 --- a/platform/android/java/nativeSrcsConfigs/CMakeLists.txt +++ b/platform/android/java/nativeSrcsConfigs/CMakeLists.txt @@ -17,4 +17,4 @@ target_include_directories(${PROJECT_NAME} SYSTEM PUBLIC ${GODOT_ROOT_DIR}) -add_definitions(-DUNIX_ENABLED -DVULKAN_ENABLED -DTOOLS_ENABLED) +add_definitions(-DUNIX_ENABLED -DVULKAN_ENABLED -DANDROID_ENABLED) diff --git a/platform/android/java_godot_lib_jni.cpp b/platform/android/java_godot_lib_jni.cpp index 658f9281ab..ea72bc0e15 100644 --- a/platform/android/java_godot_lib_jni.cpp +++ b/platform/android/java_godot_lib_jni.cpp @@ -213,9 +213,9 @@ JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_back(JNIEnv *env, jcl } } -JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_step(JNIEnv *env, jclass clazz) { +JNIEXPORT jboolean JNICALL Java_org_godotengine_godot_GodotLib_step(JNIEnv *env, jclass clazz) { if (step.get() == -1) { - return; + return true; } if (step.get() == 0) { @@ -224,12 +224,12 @@ JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_step(JNIEnv *env, jcl Main::setup2(Thread::get_caller_id()); input_handler = new AndroidInputHandler(); step.increment(); - return; + return true; } if (step.get() == 1) { if (!Main::start()) { - return; // should exit instead and print the error + return true; // should exit instead and print the error } godot_java->on_godot_setup_completed(env); @@ -243,9 +243,12 @@ JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_step(JNIEnv *env, jcl DisplayServerAndroid::get_singleton()->process_magnetometer(magnetometer); DisplayServerAndroid::get_singleton()->process_gyroscope(gyroscope); - if (os_android->main_loop_iterate()) { + bool should_swap_buffers = false; + if (os_android->main_loop_iterate(&should_swap_buffers)) { godot_java->force_quit(env); } + + return should_swap_buffers; } void touch_preprocessing(JNIEnv *env, jclass clazz, jint input_device, jint ev, jint pointer, jint pointer_count, jfloatArray positions, jint buttons_mask, jfloat vertical_factor, jfloat horizontal_factor) { diff --git a/platform/android/java_godot_lib_jni.h b/platform/android/java_godot_lib_jni.h index 927b44ddb6..e686ee5c09 100644 --- a/platform/android/java_godot_lib_jni.h +++ b/platform/android/java_godot_lib_jni.h @@ -42,7 +42,7 @@ JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_ondestroy(JNIEnv *env JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_setup(JNIEnv *env, jclass clazz, jobjectArray p_cmdline); JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_resize(JNIEnv *env, jclass clazz, jobject p_surface, jint p_width, jint p_height); JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_newcontext(JNIEnv *env, jclass clazz, jobject p_surface); -JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_step(JNIEnv *env, jclass clazz); +JNIEXPORT jboolean JNICALL Java_org_godotengine_godot_GodotLib_step(JNIEnv *env, jclass clazz); JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_back(JNIEnv *env, jclass clazz); void touch_preprocessing(JNIEnv *env, jclass clazz, jint input_device, jint ev, jint pointer, jint pointer_count, jfloatArray positions, jint buttons_mask = 0, jfloat vertical_factor = 0, jfloat horizontal_factor = 0); JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_touch__IIII_3F(JNIEnv *env, jclass clazz, jint input_device, jint ev, jint pointer, jint pointer_count, jfloatArray positions); diff --git a/platform/android/os_android.cpp b/platform/android/os_android.cpp index 438fc04eb6..ef53415f16 100644 --- a/platform/android/os_android.cpp +++ b/platform/android/os_android.cpp @@ -36,6 +36,7 @@ #include "main/main.h" #include "platform/android/display_server_android.h" #include "scene/main/scene_tree.h" +#include "servers/rendering_server.h" #include "dir_access_jandroid.h" #include "file_access_android.h" @@ -181,12 +182,18 @@ void OS_Android::main_loop_begin() { } } -bool OS_Android::main_loop_iterate() { +bool OS_Android::main_loop_iterate(bool *r_should_swap_buffers) { if (!main_loop) { return false; } DisplayServerAndroid::get_singleton()->process_events(); - return Main::iteration(); + bool exit = Main::iteration(); + + if (r_should_swap_buffers) { + *r_should_swap_buffers = !is_in_low_processor_usage_mode() || RenderingServer::get_singleton()->has_changed(); + } + + return exit; } void OS_Android::main_loop_end() { diff --git a/platform/android/os_android.h b/platform/android/os_android.h index c0c5127224..48239b3f84 100644 --- a/platform/android/os_android.h +++ b/platform/android/os_android.h @@ -96,7 +96,7 @@ public: virtual MainLoop *get_main_loop() const override; void main_loop_begin(); - bool main_loop_iterate(); + bool main_loop_iterate(bool *r_should_swap_buffers = nullptr); void main_loop_end(); void main_loop_focusout(); void main_loop_focusin(); diff --git a/platform/windows/display_server_windows.cpp b/platform/windows/display_server_windows.cpp index 16323dcc13..d73239614f 100644 --- a/platform/windows/display_server_windows.cpp +++ b/platform/windows/display_server_windows.cpp @@ -3626,7 +3626,11 @@ DisplayServerWindows::DisplayServerWindows(const String &p_rendering_driver, Win } #endif - if (!OS::get_singleton()->is_in_low_processor_usage_mode()) { + if (!Engine::get_singleton()->is_editor_hint() && !OS::get_singleton()->is_in_low_processor_usage_mode()) { + // Increase priority for projects that are not in low-processor mode (typically games) + // to reduce the risk of frame stuttering. + // This is not done for the editor to prevent importers or resource bakers + // from making the system unresponsive. SetPriorityClass(GetCurrentProcess(), ABOVE_NORMAL_PRIORITY_CLASS); DWORD index = 0; HANDLE handle = AvSetMmThreadCharacteristics("Games", &index); diff --git a/scene/main/viewport.cpp b/scene/main/viewport.cpp index d59764b8ed..20cb0176e0 100644 --- a/scene/main/viewport.cpp +++ b/scene/main/viewport.cpp @@ -1038,8 +1038,8 @@ Transform2D Viewport::get_final_transform() const { void Viewport::_update_canvas_items(Node *p_node) { if (p_node != this) { - Viewport *vp = Object::cast_to<Viewport>(p_node); - if (vp) { + Window *w = Object::cast_to<Window>(p_node); + if (w && (!w->is_inside_tree() || !w->is_embedded())) { return; } diff --git a/scene/main/window.cpp b/scene/main/window.cpp index 6837fcae21..62e8e7fea7 100644 --- a/scene/main/window.cpp +++ b/scene/main/window.cpp @@ -1101,6 +1101,14 @@ void Window::popup_centered_ratio(float p_ratio) { void Window::popup(const Rect2i &p_screen_rect) { emit_signal(SNAME("about_to_popup")); + if (!_get_embedder() && get_flag(FLAG_POPUP)) { + // Send a focus-out notification when opening a Window Manager Popup. + SceneTree *scene_tree = get_tree(); + if (scene_tree) { + scene_tree->notify_group("_viewports", NOTIFICATION_WM_WINDOW_FOCUS_OUT); + } + } + // Update window size to calculate the actual window size based on contents minimum size and minimum size. _update_window_size(); diff --git a/scene/resources/visual_shader.cpp b/scene/resources/visual_shader.cpp index 63bed921f1..129f76702e 100644 --- a/scene/resources/visual_shader.cpp +++ b/scene/resources/visual_shader.cpp @@ -2627,7 +2627,6 @@ const VisualShaderNodeInput::Port VisualShaderNodeInput::ports[] = { { Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR_3D, "light", "LIGHT" }, { Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR_3D, "light_color", "LIGHT_COLOR" }, { Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_SCALAR, "attenuation", "ATTENUATION" }, - { Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR_3D, "shadow_attenuation", "SHADOW_ATTENUATION" }, { Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR_3D, "albedo", "ALBEDO" }, { Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR_3D, "backlight", "BACKLIGHT" }, { Shader::MODE_SPATIAL, VisualShader::TYPE_LIGHT, VisualShaderNode::PORT_TYPE_VECTOR_3D, "diffuse", "DIFFUSE_LIGHT" }, diff --git a/servers/rendering/renderer_canvas_cull.cpp b/servers/rendering/renderer_canvas_cull.cpp index 56eaa396c8..22149c9e43 100644 --- a/servers/rendering/renderer_canvas_cull.cpp +++ b/servers/rendering/renderer_canvas_cull.cpp @@ -1664,6 +1664,11 @@ bool RendererCanvasCull::free(RID p_rid) { } */ + if (canvas_item->canvas_group != nullptr) { + memdelete(canvas_item->canvas_group); + canvas_item->canvas_group = nullptr; + } + canvas_item_owner.free(p_rid); } else if (canvas_light_owner.owns(p_rid)) { diff --git a/servers/rendering/renderer_rd/forward_clustered/scene_shader_forward_clustered.cpp b/servers/rendering/renderer_rd/forward_clustered/scene_shader_forward_clustered.cpp index 33ad2c2c31..5a0ed7ebad 100644 --- a/servers/rendering/renderer_rd/forward_clustered/scene_shader_forward_clustered.cpp +++ b/servers/rendering/renderer_rd/forward_clustered/scene_shader_forward_clustered.cpp @@ -616,7 +616,6 @@ void SceneShaderForwardClustered::init(RendererStorageRD *p_storage, const Strin actions.renames["LIGHT_COLOR"] = "light_color"; actions.renames["LIGHT"] = "light"; actions.renames["ATTENUATION"] = "attenuation"; - actions.renames["SHADOW_ATTENUATION"] = "shadow_attenuation"; actions.renames["DIFFUSE_LIGHT"] = "diffuse_light"; actions.renames["SPECULAR_LIGHT"] = "specular_light"; diff --git a/servers/rendering/renderer_rd/forward_mobile/scene_shader_forward_mobile.cpp b/servers/rendering/renderer_rd/forward_mobile/scene_shader_forward_mobile.cpp index d7ed4a36f0..9452c7e6df 100644 --- a/servers/rendering/renderer_rd/forward_mobile/scene_shader_forward_mobile.cpp +++ b/servers/rendering/renderer_rd/forward_mobile/scene_shader_forward_mobile.cpp @@ -586,7 +586,6 @@ void SceneShaderForwardMobile::init(RendererStorageRD *p_storage, const String p actions.renames["LIGHT_COLOR"] = "light_color"; actions.renames["LIGHT"] = "light"; actions.renames["ATTENUATION"] = "attenuation"; - actions.renames["SHADOW_ATTENUATION"] = "shadow_attenuation"; actions.renames["DIFFUSE_LIGHT"] = "diffuse_light"; actions.renames["SPECULAR_LIGHT"] = "specular_light"; diff --git a/servers/rendering/shader_types.cpp b/servers/rendering/shader_types.cpp index f0785d4042..27a69fbc2e 100644 --- a/servers/rendering/shader_types.cpp +++ b/servers/rendering/shader_types.cpp @@ -178,7 +178,6 @@ ShaderTypes::ShaderTypes() { shader_modes[RS::SHADER_SPATIAL].functions["light"].built_ins["LIGHT"] = constt(ShaderLanguage::TYPE_VEC3); shader_modes[RS::SHADER_SPATIAL].functions["light"].built_ins["LIGHT_COLOR"] = constt(ShaderLanguage::TYPE_VEC3); shader_modes[RS::SHADER_SPATIAL].functions["light"].built_ins["ATTENUATION"] = constt(ShaderLanguage::TYPE_FLOAT); - shader_modes[RS::SHADER_SPATIAL].functions["light"].built_ins["SHADOW_ATTENUATION"] = constt(ShaderLanguage::TYPE_VEC3); shader_modes[RS::SHADER_SPATIAL].functions["light"].built_ins["ALBEDO"] = constt(ShaderLanguage::TYPE_VEC3); shader_modes[RS::SHADER_SPATIAL].functions["light"].built_ins["BACKLIGHT"] = constt(ShaderLanguage::TYPE_VEC3); shader_modes[RS::SHADER_SPATIAL].functions["light"].built_ins["METALLIC"] = constt(ShaderLanguage::TYPE_FLOAT); diff --git a/thirdparty/README.md b/thirdparty/README.md index f2f51683c1..9cf86dda25 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -688,7 +688,7 @@ Files extracted from upstream source: SDK release: https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/layers/generated/vk_enum_string_helper.h `vk_mem_alloc.h` is taken from https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator -Version: 3.0.0-development (2022-02-24), commit `dc3f6bb9159df22ceed69c7765ddfb4fbb1b6ed0` +Version: 3.0.1-development (2022-03-28), commit `5b598e0a359381d7e2a94149210a1b7642024ae5` `vk_mem_alloc.cpp` is a Godot file and should be preserved on updates. Patches in the `patches` directory should be re-applied after updates. diff --git a/thirdparty/vulkan/patches/VMA-use-volk.patch b/thirdparty/vulkan/patches/VMA-use-volk.patch index 1b6e0f04b8..eebe0c1bc3 100644 --- a/thirdparty/vulkan/patches/VMA-use-volk.patch +++ b/thirdparty/vulkan/patches/VMA-use-volk.patch @@ -1,5 +1,5 @@ diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h -index 52b403bede..7c450be211 100644 +index 44affc5ca4..d96f2dacc0 100644 --- a/thirdparty/vulkan/vk_mem_alloc.h +++ b/thirdparty/vulkan/vk_mem_alloc.h @@ -127,7 +127,11 @@ extern "C" { diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h index 6618f1d1f0..d96f2dacc0 100644 --- a/thirdparty/vulkan/vk_mem_alloc.h +++ b/thirdparty/vulkan/vk_mem_alloc.h @@ -25,7 +25,7 @@ /** \mainpage Vulkan Memory Allocator -<b>Version 3.0.0-development</b> +<b>Version 3.0.1-development (2022-03-28)</b> Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n License: MIT @@ -84,11 +84,14 @@ License: MIT - [Custom host memory allocator](@ref custom_memory_allocator) - [Device memory allocation callbacks](@ref allocation_callbacks) - [Device heap memory limit](@ref heap_memory_limit) -- \subpage vk_khr_dedicated_allocation -- \subpage enabling_buffer_device_address -- \subpage vk_amd_device_coherent_memory +- <b>Extension support</b> + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_ext_memory_priority + - \subpage vk_amd_device_coherent_memory - \subpage general_considerations - [Thread safety](@ref general_considerations_thread_safety) + - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) - [Allocation algorithm](@ref general_considerations_allocation_algorithm) - [Features not supported](@ref general_considerations_features_not_supported) @@ -427,6 +430,7 @@ typedef enum VmaAllocatorCreateFlagBits VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocatorCreateFlagBits; +/// See #VmaAllocatorCreateFlagBits. typedef VkFlags VmaAllocatorCreateFlags; /** @} */ @@ -534,8 +538,7 @@ typedef enum VmaAllocationCreateFlagBits You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. - - If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */ + */ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. @@ -548,13 +551,11 @@ typedef enum VmaAllocationCreateFlagBits support it (e.g. Intel GPU). */ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, - /// \deprecated Removed. Do not use. - VMA_ALLOCATION_CREATE_RESERVED_1_BIT = 0x00000008, - /// \deprecated Removed. Do not use. - VMA_ALLOCATION_CREATE_RESERVED_2_BIT = 0x00000010, - /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. + + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a null-terminated string. Instead of copying pointer value, a local copy of the - string is made and stored in allocation's `pUserData`. The string is automatically + string is made and stored in allocation's `pName`. The string is automatically freed together with the allocation. It is also used in vmaBuildStatsString(). */ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, @@ -567,6 +568,10 @@ typedef enum VmaAllocationCreateFlagBits It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). Otherwise it is ignored. + + If you want to make sure the new buffer/image is not tied to the new memory allocation + through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, + use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. */ VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, /** Create allocation only if additional device memory required for it, if any, won't exceed @@ -591,7 +596,7 @@ typedef enum VmaAllocationCreateFlagBits never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. \warning Violating this declaration may work correctly, but will likely be very slow. - Watch out for implicit reads introduces by doing e.g. `pMappedData[i] += x;` + Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. */ VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, @@ -604,7 +609,7 @@ typedef enum VmaAllocationCreateFlagBits This includes allocations created in \ref custom_memory_pools. Declares that mapped memory can be read, written, and accessed in random order, - so a `HOST_CACHED` memory type is preferred. + so a `HOST_CACHED` memory type is required. */ VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, /** @@ -648,6 +653,7 @@ typedef enum VmaAllocationCreateFlagBits VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocationCreateFlagBits; +/// See #VmaAllocationCreateFlagBits. typedef VkFlags VmaAllocationCreateFlags; /// Flags to be passed as VmaPoolCreateInfo::flags. @@ -726,16 +732,17 @@ typedef enum VmaDefragmentationFlagBits VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaDefragmentationFlagBits; +/// See #VmaDefragmentationFlagBits. typedef VkFlags VmaDefragmentationFlags; -/// Operation performed on single defragmentation move. +/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. typedef enum VmaDefragmentationMoveOperation { - /// Buffer/image has been recreated at `dstMemory` + `dstOffset`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, - /// Set this value if you cannot move the allocation. New place reserved `dstMemory` + `dstOffset` will be freed. `srcAllocation` will remain unchanged. + /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, - /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved `dstMemory` + `dstOffset` will be freed, along with `srcAllocation`. + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, } VmaDefragmentationMoveOperation; @@ -789,7 +796,7 @@ typedef enum VmaVirtualAllocationCreateFlagBits /** Allocation strategy that chooses always the lowest offset in available space. This is not the most efficient strategy but achieves highly packed data. */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_PACKED_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT , + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. @@ -1197,7 +1204,7 @@ typedef struct VmaBudget Fetched from system using VK_EXT_memory_budget extension if enabled. It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors - external to the program, like other programs also consuming system resources. + external to the program, decided by the operating system. Difference `budget - usage` is the amount of additional memory that can probably be allocated without problems. Exceeding the budget may result in various problems. */ @@ -1211,6 +1218,10 @@ typedef struct VmaBudget @{ */ +/** \brief Parameters of new #VmaAllocation. + +To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. +*/ typedef struct VmaAllocationCreateInfo { /// Use #VmaAllocationCreateFlagBits enum. @@ -1337,7 +1348,7 @@ typedef struct VmaAllocationInfo Same memory object can be shared by multiple allocations. - It can change after call to vmaDefragment() if this allocation is passed to the function. + It can change after the allocation is moved during \ref defragmentation. */ VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. @@ -1347,7 +1358,7 @@ typedef struct VmaAllocationInfo not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation and apply this offset automatically. - It can change after call to vmaDefragment() if this allocation is passed to the function. + It can change after the allocation is moved during \ref defragmentation. */ VkDeviceSize offset; /** \brief Size of this allocation, in bytes. @@ -1367,7 +1378,7 @@ typedef struct VmaAllocationInfo created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. It can change after call to vmaMapMemory(), vmaUnmapMemory(). - It can also change after call to vmaDefragment() if this allocation is passed to the function. + It can also change after the allocation is moved during \ref defragmentation. */ void* VMA_NULLABLE pMappedData; /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). @@ -1375,6 +1386,14 @@ typedef struct VmaAllocationInfo It can change after call to vmaSetAllocationUserData() for this allocation. */ void* VMA_NULLABLE pUserData; + /** \brief Custom allocation name that was set with vmaSetAllocationName(). + + It can change after call to vmaSetAllocationName() for this allocation. + + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with + additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. + */ + const char* VMA_NULLABLE pName; } VmaAllocationInfo; /** \brief Parameters for defragmentation. @@ -1409,12 +1428,13 @@ typedef struct VmaDefragmentationMove VmaDefragmentationMoveOperation operation; /// Allocation that should be moved. VmaAllocation VMA_NOT_NULL srcAllocation; - /// Destination memory block where the allocation should be moved. - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE dstMemory; - /// Destination offset where the allocation should be moved. - VkDeviceSize dstOffset; - /// Internal data used by VMA. Do not use or modify! - void* VMA_NOT_NULL internalData; + /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. + + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, + to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). + vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + */ + VmaAllocation VMA_NOT_NULL dstTmpAllocation; } VmaDefragmentationMove; /** \brief Parameters for incremental defragmentation steps. @@ -1825,14 +1845,19 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); -/** +/** \brief Allocates memory suitable for given `VkBuffer`. + \param allocator \param buffer \param pCreateInfo \param[out] pAllocation Handle to allocated memory. \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). -You should free the memory using vmaFreeMemory(). +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateBuffer(). + +You must free the allocation using vmaFreeMemory() when no longer needed. */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( VmaAllocator VMA_NOT_NULL allocator, @@ -1841,7 +1866,20 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); -/// Function similar to vmaAllocateMemoryForBuffer(). +/** \brief Allocates memory suitable for given `VkImage`. + +\param allocator +\param image +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateImage(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( VmaAllocator VMA_NOT_NULL allocator, VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, @@ -1889,15 +1927,8 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( /** \brief Sets pUserData in given allocation to new value. -If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT, -pUserData must be either null, or pointer to a null-terminated string. The function -makes local copy of the string and sets it as allocation's `pUserData`. String -passed as pUserData doesn't need to be valid for whole lifetime of the allocation - -you can free it after this call. String previously pointed by allocation's -pUserData is freed from memory. - -If the flag was not used, the value of pointer `pUserData` is just copied to -allocation's `pUserData`. It is opaque, so you can use it however you want - e.g. +The value of pointer `pUserData` is copied to allocation's `pUserData`. +It is opaque, so you can use it however you want - e.g. as a pointer, ordinal number or some handle to you own data. */ VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( @@ -1905,6 +1936,19 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( VmaAllocation VMA_NOT_NULL allocation, void* VMA_NULLABLE pUserData); +/** \brief Sets pName in given allocation to new value. + +`pName` must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pName`. String +passed as pName doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +`pName` is freed from memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName); + /** \brief Given an allocation, returns Property Flags of its memory type. @@ -2092,6 +2136,9 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( \param allocator Allocator object. \param pInfo Structure filled with parameters of defragmentation. \param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. +\returns +- `VK_SUCCESS` if defragmentation can begin. +- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. For more information about defragmentation, see documentation chapter: [Defragmentation](@ref defragmentation). @@ -2109,7 +2156,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( Use this function to finish defragmentation started by vmaBeginDefragmentation(). */ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentation( +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationStats* VMA_NULLABLE pStats); @@ -2226,7 +2273,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, const void* VMA_NULLABLE pNext); -/** +/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. + \param allocator \param pBufferCreateInfo \param pAllocationCreateInfo @@ -2241,7 +2289,7 @@ This function automatically: -# Binds the buffer with the memory. If any of these operations fail, buffer and allocation are not created, -returned value is negative error code, *pBuffer and *pAllocation are null. +returned value is negative error code, `*pBuffer` and `*pAllocation` are null. If the function succeeded, you must destroy both buffer and allocation when you no longer need them using either convenience function vmaDestroyBuffer() or @@ -2282,6 +2330,31 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + /** \brief Destroys Vulkan buffer and frees allocated memory. This is just a convenience function equivalent to: @@ -2307,6 +2380,13 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); +/// Function similar to vmaCreateAliasingBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + /** \brief Destroys Vulkan image and frees allocated memory. This is just a convenience function equivalent to: @@ -2493,6 +2573,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( #include <cstdlib> #include <cstring> #include <utility> +#include <type_traits> #ifdef _MSC_VER #include <intrin.h> // For functions like __popcnt, _BitScanForward etc. @@ -2689,6 +2770,11 @@ static void vma_aligned_free(void* VMA_NULLABLE ptr) #endif #endif +#ifndef VMA_COUNT_BITS_SET + // Returns number of bits set to 1 in (v) + #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) +#endif + #ifndef VMA_BITSCAN_LSB // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) @@ -3077,22 +3163,29 @@ class VmaAllocationObjectAllocator; #endif // _VMA_FORWARD_DECLARATIONS -#ifndef _VMA_FUNCTIONS -// Returns number of bits set to 1 in (v). -static inline uint32_t VmaCountBitsSet(uint32_t v) -{ -#ifdef _MSC_VER +#ifndef _VMA_FUNCTIONS + +/* +Returns number of bits set to 1 in (v). + +On specific platforms and compilers you can use instrinsics like: + +Visual Studio: return __popcnt(v); -#elif defined __GNUC__ || defined __clang__ +GCC, Clang: return static_cast<uint32_t>(__builtin_popcount(v)); -#else + +Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. +But you need to check in runtime whether user's CPU supports these, as some old processors don't. +*/ +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ uint32_t c = v - ((v >> 1) & 0x55555555); c = ((c >> 2) & 0x33333333) + (c & 0x33333333); c = ((c >> 4) + c) & 0x0F0F0F0F; c = ((c >> 8) + c) & 0x00FF00FF; c = ((c >> 16) + c) & 0x0000FFFF; return c; -#endif } static inline uint8_t VmaBitScanLSB(uint64_t mask) @@ -3631,7 +3724,11 @@ static bool FindMemoryPreferences( // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory if(deviceAccess) { - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + // ...unless there is a clear preference from the user not to do so. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } // No direct GPU access, no CPU access, just transfers. // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or @@ -5399,6 +5496,7 @@ public: // Posts next part of an open string. The number is converted to decimal characters. void ContinueString(uint32_t n); void ContinueString(uint64_t n); + void ContinueString_Size(size_t n); // Posts next part of an open string. Pointer value is converted to characters // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 void ContinueString_Pointer(const void* ptr); @@ -5408,6 +5506,7 @@ public: // Writes a number value. void WriteNumber(uint32_t n); void WriteNumber(uint64_t n); + void WriteSize(size_t n); // Writes a boolean value - false or true. void WriteBool(bool b); // Writes a null value. @@ -5432,6 +5531,11 @@ private: VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack; bool m_InsideString; + // Write size_t for less than 64bits + void WriteSize(size_t n, std::integral_constant<bool, false>) { m_SB.AddNumber(static_cast<uint32_t>(n)); } + // Write size_t for 64bits + void WriteSize(size_t n, std::integral_constant<bool, true>) { m_SB.AddNumber(static_cast<uint64_t>(n)); } + void BeginValue(bool isString); void WriteIndent(bool oneLess = false); }; @@ -5574,6 +5678,14 @@ void VmaJsonWriter::ContinueString(uint64_t n) m_SB.AddNumber(n); } +void VmaJsonWriter::ContinueString_Size(size_t n) +{ + VMA_ASSERT(m_InsideString); + // Fix for AppleClang incorrect type casting + // TODO: Change to if constexpr when C++17 used as minimal standard + WriteSize(n, std::is_same<size_t, uint64_t>{}); +} + void VmaJsonWriter::ContinueString_Pointer(const void* ptr) { VMA_ASSERT(m_InsideString); @@ -5605,6 +5717,15 @@ void VmaJsonWriter::WriteNumber(uint64_t n) m_SB.AddNumber(n); } +void VmaJsonWriter::WriteSize(size_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + // Fix for AppleClang incorrect type casting + // TODO: Change to if constexpr when C++17 used as minimal standard + WriteSize(n, std::is_same<size_t, uint64_t>{}); +} + void VmaJsonWriter::WriteBool(bool b) { VMA_ASSERT(!m_InsideString); @@ -5673,41 +5794,29 @@ static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedSta json.WriteString("BlockCount"); json.WriteNumber(stat.statistics.blockCount); - - json.WriteString("AllocationCount"); - json.WriteNumber(stat.statistics.allocationCount); - - json.WriteString("UnusedRangeCount"); - json.WriteNumber(stat.unusedRangeCount); - json.WriteString("BlockBytes"); json.WriteNumber(stat.statistics.blockBytes); - + json.WriteString("AllocationCount"); + json.WriteNumber(stat.statistics.allocationCount); json.WriteString("AllocationBytes"); json.WriteNumber(stat.statistics.allocationBytes); + json.WriteString("UnusedRangeCount"); + json.WriteNumber(stat.unusedRangeCount); if (stat.statistics.allocationCount > 1) { - json.WriteString("AllocationSize"); - json.BeginObject(true); - json.WriteString("Min"); + json.WriteString("AllocationSizeMin"); json.WriteNumber(stat.allocationSizeMin); - json.WriteString("Max"); + json.WriteString("AllocationSizeMax"); json.WriteNumber(stat.allocationSizeMax); - json.EndObject(); } - if (stat.unusedRangeCount > 1) { - json.WriteString("UnusedRangeSize"); - json.BeginObject(true); - json.WriteString("Min"); + json.WriteString("UnusedRangeSizeMin"); json.WriteNumber(stat.unusedRangeSizeMin); - json.WriteString("Max"); + json.WriteString("UnusedRangeSizeMax"); json.WriteNumber(stat.unusedRangeSizeMax); - json.EndObject(); } - json.EndObject(); } #endif // _VMA_JSON_WRITER @@ -5799,9 +5908,14 @@ private: void PostMinorCounter() { if(m_MinorCounter < m_MajorCounter) + { ++m_MinorCounter; + } else if(m_MajorCounter > 0) - --m_MajorCounter, --m_MinorCounter; + { + --m_MajorCounter; + --m_MinorCounter; + } } }; @@ -5899,9 +6013,8 @@ struct VmaAllocation_T enum FLAGS { - FLAG_USER_DATA_STRING = 0x01, - FLAG_PERSISTENT_MAP = 0x02, - FLAG_MAPPING_ALLOWED = 0x04, + FLAG_PERSISTENT_MAP = 0x01, + FLAG_MAPPING_ALLOWED = 0x02, }; public: @@ -5913,7 +6026,7 @@ public: }; // This struct is allocated using VmaPoolAllocator. - VmaAllocation_T(bool userDataString, bool mappingAllowed); + VmaAllocation_T(bool mappingAllowed); ~VmaAllocation_T(); void InitBlockAllocation( @@ -5936,8 +6049,8 @@ public: ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } VkDeviceSize GetAlignment() const { return m_Alignment; } VkDeviceSize GetSize() const { return m_Size; } - bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; } void* GetUserData() const { return m_pUserData; } + const char* GetName() const { return m_pName; } VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } @@ -5945,8 +6058,10 @@ public: bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } - void SetUserData(VmaAllocator hAllocator, void* pUserData); - void SwapBlockAllocation(VmaAllocation allocation); + void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } + void SetName(VmaAllocator hAllocator, const char* pName); + void FreeName(VmaAllocator hAllocator); + uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); VmaAllocHandle GetAllocHandle() const; VkDeviceSize GetOffset() const; VmaPool GetParentPool() const; @@ -5992,6 +6107,7 @@ private: VkDeviceSize m_Alignment; VkDeviceSize m_Size; void* m_pUserData; + char* m_pName; uint32_t m_MemoryTypeIndex; uint8_t m_Type; // ALLOCATION_TYPE uint8_t m_SuballocationType; // VmaSuballocationType @@ -6001,8 +6117,6 @@ private: #if VMA_STATS_STRING_ENABLED uint32_t m_BufferImageUsage; // 0 if unknown. #endif - - void FreeUserDataString(VmaAllocator hAllocator); }; #endif // _VMA_ALLOCATION_T @@ -6239,6 +6353,7 @@ public: // Validates all data structures inside this object. If not valid, returns false. virtual bool Validate() const = 0; virtual size_t GetAllocationCount() const = 0; + virtual size_t GetFreeRegionsCount() const = 0; virtual VkDeviceSize GetSumFreeSize() const = 0; // Returns true if this block is empty - contains only single free suballocation. virtual bool IsEmpty() const = 0; @@ -6248,14 +6363,14 @@ public: virtual VmaAllocHandle GetAllocationListBegin() const = 0; virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; + virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; // Shouldn't modify blockCount. virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; #if VMA_STATS_STRING_ENABLED - // mapRefCount == UINT32_MAX means unspecified. - virtual void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const = 0; + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; #endif // Tries to find a place for suballocation with given parameters inside this block. @@ -6299,8 +6414,7 @@ protected: void PrintDetailedMap_Begin(class VmaJsonWriter& json, VkDeviceSize unusedBytes, size_t allocationCount, - size_t unusedRangeCount, - uint32_t mapRefCount) const; + size_t unusedRangeCount) const; void PrintDetailedMap_Allocation(class VmaJsonWriter& json, VkDeviceSize offset, VkDeviceSize size, void* userData) const; void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, @@ -6336,35 +6450,17 @@ void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData); userData = allocation->GetUserData(); + const char* name = allocation->GetName(); #if VMA_STATS_STRING_ENABLED - if (userData != VMA_NULL && allocation->IsUserDataString()) - { - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %s; Type: %s; Usage: %u", - offset, size, reinterpret_cast<const char*>(userData), - VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], - allocation->GetBufferImageUsage()); - } - else - { - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Type: %s; Usage: %u", - offset, size, userData, - VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], - allocation->GetBufferImageUsage()); - } + VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u", + offset, size, userData, name ? name : "vma_empty", + VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], + allocation->GetBufferImageUsage()); #else - if (userData != VMA_NULL && allocation->IsUserDataString()) - { - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %s; Type: %u", - offset, size, reinterpret_cast<const char*>(userData), - (uint32_t)allocation->GetSuballocationType()); - } - else - { - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Type: %u", - offset, size, userData, - (uint32_t)allocation->GetSuballocationType()); - } + VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u", + offset, size, userData, name ? name : "vma_empty", + (uint32_t)allocation->GetSuballocationType()); #endif // VMA_STATS_STRING_ENABLED } @@ -6372,27 +6468,19 @@ void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size #if VMA_STATS_STRING_ENABLED void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount, uint32_t mapRefCount) const + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const { - json.BeginObject(); - json.WriteString("TotalBytes"); json.WriteNumber(GetSize()); json.WriteString("UnusedBytes"); - json.WriteNumber(unusedBytes); + json.WriteSize(unusedBytes); json.WriteString("Allocations"); - json.WriteNumber((uint64_t)allocationCount); + json.WriteSize(allocationCount); json.WriteString("UnusedRanges"); - json.WriteNumber((uint64_t)unusedRangeCount); - - if(mapRefCount != UINT32_MAX) - { - json.WriteString("MapRefCount"); - json.WriteNumber(mapRefCount); - } + json.WriteSize(unusedRangeCount); json.WriteString("Suballocations"); json.BeginArray(); @@ -6408,15 +6496,11 @@ void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, if (IsVirtual()) { - json.WriteString("Type"); - json.WriteString("VirtualAllocation"); - json.WriteString("Size"); json.WriteNumber(size); - - if (userData != VMA_NULL) + if (userData) { - json.WriteString("UserData"); + json.WriteString("CustomData"); json.BeginString(); json.ContinueString_Pointer(userData); json.EndString(); @@ -6450,7 +6534,6 @@ void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const { json.EndArray(); - json.EndObject(); } #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_BLOCK_METADATA_FUNCTIONS @@ -7549,12 +7632,13 @@ public: void Init(VkDeviceSize size) override; bool Validate() const override; size_t GetAllocationCount() const override; + size_t GetFreeRegionsCount() const override; void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; + void PrintDetailedMap(class VmaJsonWriter& json) const override; #endif bool CreateAllocationRequest( @@ -7577,6 +7661,7 @@ public: void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; VmaAllocHandle GetAllocationListBegin() const override; VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; void DebugLogAllAllocations() const override; @@ -7815,6 +7900,13 @@ size_t VmaBlockMetadata_Linear::GetAllocationCount() const AccessSuballocations2nd().size() - m_2ndNullItemsCount; } +size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return SIZE_MAX; +} + void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const { const VkDeviceSize size = GetSize(); @@ -8133,7 +8225,7 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { const VkDeviceSize size = GetSize(); const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); @@ -8295,7 +8387,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json, uint32 } const VkDeviceSize unusedBytes = size - usedBytes; - PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount, mapRefCount); + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); // SECOND PASS lastOffset = 0; @@ -8686,6 +8778,13 @@ VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAll return VK_NULL_HANDLE; } +VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return 0; +} + void VmaBlockMetadata_Linear::Clear() { m_SumFreeSize = GetSize(); @@ -9884,6 +9983,7 @@ public: virtual ~VmaBlockMetadata_TLSF(); size_t GetAllocationCount() const override { return m_AllocCount; } + size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } bool IsEmpty() const override { return m_NullBlock->offset == 0; } VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }; @@ -9895,7 +9995,7 @@ public: void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; + void PrintDetailedMap(class VmaJsonWriter& json) const override; #endif bool CreateAllocationRequest( @@ -9917,6 +10017,7 @@ public: void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; VmaAllocHandle GetAllocationListBegin() const override; VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; void DebugLogAllAllocations() const override; @@ -10167,7 +10268,7 @@ void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const { size_t blockCount = m_AllocCount + m_BlocksFreeCount; VmaStlAllocator<Block*> allocator(GetAllocationCallbacks()); @@ -10184,12 +10285,10 @@ void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json, uint32_t VmaClearDetailedStatistics(stats); AddDetailedStatistics(stats); - PrintDetailedMap_Begin( - json, + PrintDetailedMap_Begin(json, stats.statistics.blockBytes - stats.statistics.allocationBytes, stats.statistics.allocationCount, - stats.unusedRangeCount, - mapRefCount); + stats.unusedRangeCount); for (; i < blockCount; ++i) { @@ -10593,6 +10692,16 @@ VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc return VK_NULL_HANDLE; } +VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + Block* block = (Block*)alloc; + VMA_ASSERT(!block->IsFree() && "Incorrect block!"); + + if (block->prevPhysical) + return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; + return 0; +} + void VmaBlockMetadata_TLSF::Clear() { m_AllocCount = 0; @@ -10837,6 +10946,7 @@ public: size_t GetBlockCount() const { return m_Blocks.size(); } // To be used only while the m_Mutex is locked. Used during defragmentation. VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + VMA_RW_MUTEX &GetMutex() { return m_Mutex; } VkResult CreateMinBlocks(); void AddStatistics(VmaStatistics& inoutStats); @@ -10852,7 +10962,7 @@ public: size_t allocationCount, VmaAllocation* pAllocations); - void Free(const VmaAllocation hAllocation, bool incrementalSort = true); + void Free(const VmaAllocation hAllocation); #if VMA_STATS_STRING_ENABLED void PrintDetailedMap(class VmaJsonWriter& json); @@ -10878,6 +10988,9 @@ private: // Incrementally sorted by sumFreeSize, ascending. VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks; uint32_t m_NextBlockId; + bool m_IncrementalSort = true; + + void SetIncrementalSort(bool val) { m_IncrementalSort = val; } VkDeviceSize CalcMaxBlockSize() const; // Finds and removes given block from vector. @@ -10928,17 +11041,26 @@ public: const VmaDefragmentationInfo& info); ~VmaDefragmentationContext_T(); - void GetStats(VmaDefragmentationStats& outStats) { outStats = m_Stats; } + void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); private: - struct ImmovableBlock + // Max number of allocations to ignore due to size constraints before ending single pass + static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; + enum class CounterStatus { Pass, Ignore, End }; + + struct FragmentedBlock { - uint32_t vectorIndex; + uint32_t data; VmaDeviceMemoryBlock* block; }; + struct StateBalanced + { + VkDeviceSize avgFreeSize = 0; + VkDeviceSize avgAllocSize = UINT64_MAX; + }; struct StateExtensive { enum class Operation : uint8_t @@ -10966,25 +11088,29 @@ private: VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator; VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves; + uint8_t m_IgnoredAllocs = 0; uint32_t m_Algorithm; uint32_t m_BlockVectorCount; VmaBlockVector* m_PoolBlockVector; VmaBlockVector** m_pBlockVectors; size_t m_ImmovableBlockCount = 0; - VmaDefragmentationStats m_Stats = { 0 }; + VmaDefragmentationStats m_GlobalStats = { 0 }; + VmaDefragmentationStats m_PassStats = { 0 }; void* m_AlgorithmState = VMA_NULL; static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); - bool IncrementCounters(uint32_t& allocations, VkDeviceSize bytes); + CounterStatus CheckCounters(VkDeviceSize bytes); + bool IncrementCounters(VkDeviceSize bytes); bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); bool ComputeDefragmentation_Fast(VmaBlockVector& vector); - bool ComputeDefragmentation_Balanced(VmaBlockVector& vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); bool ComputeDefragmentation_Full(VmaBlockVector& vector); bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); bool MoveDataToFreeBlocks(VmaSuballocationType currentType, VmaBlockVector& vector, size_t firstFreeBlock, bool& texturePresent, bool& bufferPresent, bool& otherPresent); @@ -11259,8 +11385,9 @@ void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) if (detailedMap) { json.WriteString("Details"); - m_Metadata->PrintDetailedMap(json, - UINT32_MAX); // mapRefCount + json.BeginObject(); + m_Metadata->PrintDetailedMap(json); + json.EndObject(); } json.EndObject(); @@ -11868,18 +11995,17 @@ VkResult VmaDeviceMemoryBlock::BindImageMemory( #endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS #ifndef _VMA_ALLOCATION_T_FUNCTIONS -VmaAllocation_T::VmaAllocation_T(bool userDataString, bool mappingAllowed) +VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) : m_Alignment{ 1 }, m_Size{ 0 }, m_pUserData{ VMA_NULL }, + m_pName{ VMA_NULL }, m_MemoryTypeIndex{ 0 }, m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, m_MapCount{ 0 }, m_Flags{ 0 } { - if(userDataString) - m_Flags |= (uint8_t)FLAG_USER_DATA_STRING; if(mappingAllowed) m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; @@ -11893,7 +12019,7 @@ VmaAllocation_T::~VmaAllocation_T() VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction."); // Check if owned string was freed. - VMA_ASSERT((IsUserDataString() && m_pUserData == VMA_NULL) || !IsUserDataString()); + VMA_ASSERT(m_pName == VMA_NULL); } void VmaAllocation_T::InitBlockAllocation( @@ -11948,31 +12074,25 @@ void VmaAllocation_T::InitDedicatedAllocation( m_DedicatedAllocation.m_Next = VMA_NULL; } -void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData) +void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) { - if (IsUserDataString()) - { - VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData); + VMA_ASSERT(pName == VMA_NULL || pName != m_pName); - FreeUserDataString(hAllocator); + FreeName(hAllocator); - if (pUserData != VMA_NULL) - { - m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData); - } - } - else - { - m_pUserData = pUserData; - } + if (pName != VMA_NULL) + m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); } -void VmaAllocation_T::SwapBlockAllocation(VmaAllocation allocation) +uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) { VMA_ASSERT(allocation != VMA_NULL); VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); + if (m_MapCount != 0) + m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation); m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); @@ -11980,6 +12100,7 @@ void VmaAllocation_T::SwapBlockAllocation(VmaAllocation allocation) #if VMA_STATS_STRING_ENABLED VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage); #endif + return m_MapCount; } VmaAllocHandle VmaAllocation_T::GetAllocHandle() const @@ -12165,35 +12286,31 @@ void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const json.WriteString("Size"); json.WriteNumber(m_Size); + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage); if (m_pUserData != VMA_NULL) { - json.WriteString("UserData"); - if (IsUserDataString()) - { - json.WriteString((const char*)m_pUserData); - } - else - { - json.BeginString(); - json.ContinueString_Pointer(m_pUserData); - json.EndString(); - } + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); } - - if (m_BufferImageUsage != 0) + if (m_pName != VMA_NULL) { - json.WriteString("Usage"); - json.WriteNumber(m_BufferImageUsage); + json.WriteString("Name"); + json.WriteString(m_pName); } } #endif // VMA_STATS_STRING_ENABLED -void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator) +void VmaAllocation_T::FreeName(VmaAllocator hAllocator) { - VMA_ASSERT(IsUserDataString()); - VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData); - m_pUserData = VMA_NULL; + if(m_pName) + { + VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); + m_pName = VMA_NULL; + } } #endif // _VMA_ALLOCATION_T_FUNCTIONS @@ -12542,9 +12659,7 @@ VkResult VmaBlockVector::AllocatePage( return VK_ERROR_OUT_OF_DEVICE_MEMORY; } -void VmaBlockVector::Free( - const VmaAllocation hAllocation, - bool incrementalSort) +void VmaBlockVector::Free(const VmaAllocation hAllocation) { VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; @@ -12604,8 +12719,7 @@ void VmaBlockVector::Free( } } - if (incrementalSort) - IncrementallySortBlocks(); + IncrementallySortBlocks(); } // Destruction of a free block. Deferred until this point, outside of mutex @@ -12650,6 +12764,8 @@ void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) void VmaBlockVector::IncrementallySortBlocks() { + if (!m_IncrementalSort) + return; if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) { // Bubble sort only until first swap. @@ -12724,7 +12840,7 @@ VkResult VmaBlockVector::CommitAllocationRequest( } } - *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isUserDataString, isMappingAllowed); + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); (*pAllocation)->InitBlockAllocation( pBlock, @@ -12735,7 +12851,10 @@ VkResult VmaBlockVector::CommitAllocationRequest( suballocType, mapped); VMA_HEAVY_ASSERT(pBlock->Validate()); - (*pAllocation)->SetUserData(m_hAllocator, pUserData); + if (isUserDataString) + (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); + else + (*pAllocation)->SetUserData(m_hAllocator, pUserData); m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { @@ -12770,6 +12889,7 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; if (m_hAllocator->m_UseExtMemoryPriority) { + VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); priorityInfo.priority = m_Priority; VmaPnextChainPushFront(&allocInfo, &priorityInfo); } @@ -12833,50 +12953,7 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - if (IsCustomPool()) - { - const char* poolName = m_hParentPool->GetName(); - if (poolName != VMA_NULL && poolName[0] != '\0') - { - json.WriteString("Name"); - json.WriteString(poolName); - } - - json.WriteString("MemoryTypeIndex"); - json.WriteNumber(m_MemoryTypeIndex); - - json.WriteString("BlockSize"); - json.WriteNumber(m_PreferredBlockSize); - - json.WriteString("BlockCount"); - json.BeginObject(true); - if (m_MinBlockCount > 0) - { - json.WriteString("Min"); - json.WriteNumber((uint64_t)m_MinBlockCount); - } - if (m_MaxBlockCount < SIZE_MAX) - { - json.WriteString("Max"); - json.WriteNumber((uint64_t)m_MaxBlockCount); - } - json.WriteString("Cur"); - json.WriteNumber((uint64_t)m_Blocks.size()); - json.EndObject(); - - if (m_Algorithm != 0) - { - json.WriteString("Algorithm"); - json.WriteString(VmaAlgorithmToStr(m_Algorithm)); - } - } - else - { - json.WriteString("PreferredBlockSize"); - json.WriteNumber(m_PreferredBlockSize); - } - json.WriteString("Blocks"); json.BeginObject(); for (size_t i = 0; i < m_Blocks.size(); ++i) { @@ -12884,7 +12961,12 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) json.ContinueString(m_Blocks[i]->GetId()); json.EndString(); - m_Blocks[i]->m_pMetadata->PrintDetailedMap(json, m_Blocks[i]->GetMapRefCount()); + json.BeginObject(); + json.WriteString("MapRefCount"); + json.WriteNumber(m_Blocks[i]->GetMapRefCount()); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + json.EndObject(); } json.EndObject(); } @@ -12929,6 +13011,7 @@ VmaDefragmentationContext_T::VmaDefragmentationContext_T( m_BlockVectorCount = 1; m_PoolBlockVector = &info.pool->m_BlockVector; m_pBlockVectors = &m_PoolBlockVector; + m_PoolBlockVector->SetIncrementalSort(false); m_PoolBlockVector->SortByFreeSize(); } else @@ -12940,12 +13023,22 @@ VmaDefragmentationContext_T::VmaDefragmentationContext_T( { VmaBlockVector* vector = m_pBlockVectors[i]; if (vector != VMA_NULL) + { + vector->SetIncrementalSort(false); vector->SortByFreeSize(); + } } } switch (m_Algorithm) { + case 0: // Default algorithm + m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + { + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + } case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: { if (hAllocator->GetBufferImageGranularity() > 1) @@ -12959,10 +13052,27 @@ VmaDefragmentationContext_T::VmaDefragmentationContext_T( VmaDefragmentationContext_T::~VmaDefragmentationContext_T() { + if (m_PoolBlockVector != VMA_NULL) + { + m_PoolBlockVector->SetIncrementalSort(true); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + vector->SetIncrementalSort(true); + } + } + if (m_AlgorithmState) { switch (m_Algorithm) { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount); + break; case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount); break; @@ -12976,6 +13086,8 @@ VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPass { if (m_PoolBlockVector != VMA_NULL) { + VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); + if (m_PoolBlockVector->GetBlockCount() > 1) ComputeDefragmentation(*m_PoolBlockVector, 0); else if (m_PoolBlockVector->GetBlockCount() == 1) @@ -12987,6 +13099,8 @@ VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPass { if (m_pBlockVectors[i] != VMA_NULL) { + VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); + if (m_pBlockVectors[i]->GetBlockCount() > 1) { if (ComputeDefragmentation(*m_pBlockVectors[i], i)) @@ -13017,7 +13131,11 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); VkResult result = VK_SUCCESS; - VmaVector<ImmovableBlock, VmaStlAllocator<ImmovableBlock>> immovableBlocks(VmaStlAllocator<ImmovableBlock>(m_MoveAllocator.m_pCallbacks)); + VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks); + VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator); + VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator); + + VmaAllocator allocator = VMA_NULL; for (uint32_t i = 0; i < moveInfo.moveCount; ++i) { VmaDefragmentationMove& move = moveInfo.pMoves[i]; @@ -13037,29 +13155,54 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo vector = m_pBlockVectors[vectorIndex]; VMA_ASSERT(vector != VMA_NULL); } - - VmaAllocation dst = reinterpret_cast<VmaAllocation>(move.internalData); + switch (move.operation) { case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: { - move.srcAllocation->SwapBlockAllocation(dst); - prevCount = vector->GetBlockCount(); - freedBlockSize = dst->GetBlock()->m_pMetadata->GetSize(); - vector->Free(dst, false); - currentCount = vector->GetBlockCount(); + uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); + if (mapCount > 0) + { + allocator = vector->m_hAllocator; + VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (FragmentedBlock& block : mappedBlocks) + { + if (block.block == newMapBlock) + { + notPresent = false; + block.data += mapCount; + break; + } + } + if (notPresent) + mappedBlocks.push_back({ mapCount, newMapBlock }); + } + + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } result = VK_INCOMPLETE; break; } case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: { - m_Stats.bytesMoved -= move.srcAllocation->GetSize(); - vector->Free(dst, false); + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + vector->Free(move.dstTmpAllocation); VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); bool notPresent = true; - for (const ImmovableBlock& block : immovableBlocks) + for (const FragmentedBlock& block : immovableBlocks) { if (block.block == newBlock) { @@ -13073,16 +13216,32 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo } case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: { - prevCount = vector->GetBlockCount(); - freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); - vector->Free(move.srcAllocation, false); - currentCount = vector->GetBlockCount(); + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.srcAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } freedBlockSize *= prevCount - currentCount; - VkDeviceSize dstBlockSize = dst->GetBlock()->m_pMetadata->GetSize(); - vector->Free(dst, false); - freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); - currentCount = vector->GetBlockCount(); + VkDeviceSize dstBlockSize; + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); + currentCount = vector->GetBlockCount(); + } result = VK_INCOMPLETE; break; @@ -13094,8 +13253,8 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo if (prevCount > currentCount) { size_t freedBlocks = prevCount - currentCount; - m_Stats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks); - m_Stats.bytesFreed += freedBlockSize; + m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks); + m_PassStats.bytesFreed += freedBlockSize; } switch (m_Algorithm) @@ -13108,9 +13267,15 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex]; if (state.firstFreeBlock != SIZE_MAX) { - state.firstFreeBlock -= prevCount - currentCount; - if (state.firstFreeBlock != 0) - state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); + const size_t diff = prevCount - currentCount; + if (state.firstFreeBlock >= diff) + { + state.firstFreeBlock -= diff; + if (state.firstFreeBlock != 0) + state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); + } + else + state.firstFreeBlock = 0; } } } @@ -13120,6 +13285,13 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo moveInfo.pMoves = VMA_NULL; m_Moves.clear(); + // Update stats + m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; + m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; + m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; + m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; + m_PassStats = { 0 }; + // Move blocks with immovable allocations according to algorithm if (immovableBlocks.size() > 0) { @@ -13131,12 +13303,14 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo { bool swapped = false; // Move to the start of free blocks range - for (const ImmovableBlock& block : immovableBlocks) + for (const FragmentedBlock& block : immovableBlocks) { - StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.vectorIndex]; + StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data]; if (state.operation != StateExtensive::Operation::Cleanup) { - VmaBlockVector* vector = m_pBlockVectors[block.vectorIndex]; + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) { if (vector->GetBlock(i) == block.block) @@ -13144,9 +13318,12 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); if (state.firstFreeBlock != SIZE_MAX) { - if (i < state.firstFreeBlock - 1) + if (i + 1 < state.firstFreeBlock) { - VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + if (state.firstFreeBlock > 1) + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + else + --state.firstFreeBlock; } } swapped = true; @@ -13163,10 +13340,12 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo default: { // Move to the begining - for (const ImmovableBlock& block : immovableBlocks) + for (const FragmentedBlock& block : immovableBlocks) { - VmaBlockVector* vector = m_pBlockVectors[block.vectorIndex]; - for (size_t i = m_ImmovableBlockCount; vector->GetBlockCount(); ++i) + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) { if (vector->GetBlock(i) == block.block) { @@ -13179,6 +13358,13 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo } } } + + // Bulk-map destination blocks + for (const FragmentedBlock& block : mappedBlocks) + { + VkResult res = block.block->Map(allocator, block.data, VMA_NULL); + VMA_ASSERT(res == VK_SUCCESS); + } return result; } @@ -13188,9 +13374,10 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, { case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: return ComputeDefragmentation_Fast(vector); - default: // Default algoritm + default: + VMA_ASSERT(0); case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - return ComputeDefragmentation_Balanced(vector); + return ComputeDefragmentation_Balanced(vector, index, true); case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: return ComputeDefragmentation_Full(vector); case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: @@ -13216,12 +13403,27 @@ VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::Get return moveData; } -bool VmaDefragmentationContext_T::IncrementCounters(uint32_t& allocations, VkDeviceSize bytes) +VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) { - if (++allocations >= m_MaxPassAllocations || bytes >= m_MaxPassBytes) + // Ignore allocation if will exceed max size for copy + if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) { - m_Stats.bytesMoved += bytes; - m_Stats.allocationsMoved += allocations; + if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) + return CounterStatus::Ignore; + else + return CounterStatus::End; + } + return CounterStatus::Pass; +} + +bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) +{ + m_PassStats.bytesMoved += bytes; + // Early return when max found + if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) + { + VMA_ASSERT(m_PassStats.allocationsMoved == m_MaxPassAllocations || + m_PassStats.bytesMoved == m_MaxPassBytes && "Exceeded maximal pass threshold!"); return true; } return false; @@ -13229,8 +13431,6 @@ bool VmaDefragmentationContext_T::IncrementCounters(uint32_t& allocations, VkDev bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) { - VkDeviceSize currentBytesMoved = 0; - uint32_t currentAllocsMoved = 0; VmaBlockMetadata* metadata = block->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); @@ -13241,8 +13441,18 @@ bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, Vma // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; - VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(moveData.move.internalData); - + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { @@ -13264,32 +13474,21 @@ bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, Vma moveData.flags, this, moveData.type, - &dst) == VK_SUCCESS) + &moveData.move.dstTmpAllocation) == VK_SUCCESS) { - moveData.move.dstMemory = dst->GetMemory(); - moveData.move.dstOffset = dst->GetOffset(); m_Moves.push_back(moveData.move); - currentBytesMoved += moveData.size; - - if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) + if (IncrementCounters(moveData.size)) return true; } } } } } - - m_Stats.bytesMoved += currentBytesMoved; - m_Stats.allocationsMoved += currentAllocsMoved; return false; } bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) { - VkDeviceSize currentBytesMoved = 0; - uint32_t currentAllocsMoved = 0; - VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(data.move.internalData); - for (; start < end; ++start) { VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); @@ -13302,22 +13501,15 @@ bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, Mo this, data.type, 0, - &dst) == VK_SUCCESS) + &data.move.dstTmpAllocation) == VK_SUCCESS) { - data.move.dstMemory = dst->GetMemory(); - data.move.dstOffset = dst->GetOffset(); m_Moves.push_back(data.move); - currentBytesMoved += data.size; - - if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) + if (IncrementCounters(data.size)) return true; break; } } } - - m_Stats.bytesMoved += currentBytesMoved; - m_Stats.allocationsMoved += currentAllocsMoved; return false; } @@ -13338,6 +13530,17 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& ve // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } // Check all previous blocks for free space if (AllocInOtherBlock(0, i, moveData, vector)) @@ -13347,19 +13550,24 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& ve return false; } -bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) { // Go over every allocation and try to fit it in previous blocks at lowest offsets, // if not possible: realloc within single block to minimize offset (exclude offset == 0), // but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block) + VMA_ASSERT(m_AlgorithmState != VMA_NULL); - VkDeviceSize currentBytesMoved = 0; - uint32_t currentAllocsMoved = 0; + StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index]; + if (update && vectorState.avgAllocSize == UINT64_MAX) + UpdateVectorStatistics(vector, vectorState); + const size_t startMoveCount = m_Moves.size(); + VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { VmaDeviceMemoryBlock* block = vector.GetBlock(i); VmaBlockMetadata* metadata = block->m_pMetadata; + VkDeviceSize prevFreeRegionSize = 0; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); handle != VK_NULL_HANDLE; @@ -13369,53 +13577,72 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } // Check all previous blocks for free space const size_t prevMoveCount = m_Moves.size(); if (AllocInOtherBlock(0, i, moveData, vector)) return true; + VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); // If no room found then realloc within block for lower offset VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { - VmaAllocationRequest request = {}; - if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) + // Check if realloc will make sense + if (prevFreeRegionSize >= minimalFreeRegion || + nextFreeRegionSize >= minimalFreeRegion || + moveData.size <= vectorState.avgFreeSize || + moveData.size <= vectorState.avgAllocSize) { - if (metadata->GetAllocationOffset(request.allocHandle) < offset) + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) { - VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(moveData.move.internalData); - if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &dst) == VK_SUCCESS) + if (metadata->GetAllocationOffset(request.allocHandle) < offset) { - moveData.move.dstMemory = dst->GetMemory(); - moveData.move.dstOffset = dst->GetOffset(); - m_Moves.push_back(moveData.move); - currentBytesMoved += moveData.size; - - if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) - return true; + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } } } } } + prevFreeRegionSize = nextFreeRegionSize; } } - - m_Stats.bytesMoved += currentBytesMoved; - m_Stats.allocationsMoved += currentAllocsMoved; + + // No moves perfomed, update statistics to current vector state + if (startMoveCount == m_Moves.size() && !update) + { + vectorState.avgAllocSize = UINT64_MAX; + return ComputeDefragmentation_Balanced(vector, index, false); + } return false; } @@ -13424,9 +13651,6 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& ve // Go over every allocation and try to fit it in previous blocks at lowest offsets, // if not possible: realloc within single block to minimize offset (exclude offset == 0) - VkDeviceSize currentBytesMoved = 0; - uint32_t currentAllocsMoved = 0; - for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { VmaDeviceMemoryBlock* block = vector.GetBlock(i); @@ -13440,6 +13664,17 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& ve // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } // Check all previous blocks for free space const size_t prevMoveCount = m_Moves.size(); @@ -13461,7 +13696,6 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& ve { if (metadata->GetAllocationOffset(request.allocHandle) < offset) { - VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(moveData.move.internalData); if (vector.CommitAllocationRequest( request, block, @@ -13469,14 +13703,10 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& ve moveData.flags, this, moveData.type, - &dst) == VK_SUCCESS) + &moveData.move.dstTmpAllocation) == VK_SUCCESS) { - moveData.move.dstMemory = dst->GetMemory(); - moveData.move.dstOffset = dst->GetOffset(); m_Moves.push_back(moveData.move); - currentBytesMoved += moveData.size; - - if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) + if (IncrementCounters(moveData.size)) return true; } } @@ -13484,9 +13714,6 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& ve } } } - - m_Stats.bytesMoved += currentBytesMoved; - m_Stats.allocationsMoved += currentAllocsMoved; return false; } @@ -13511,6 +13738,13 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto case StateExtensive::Operation::FindFreeBlockTexture: case StateExtensive::Operation::FindFreeBlockAll: { + // No more blocks to free, just perform fast realloc and move to cleanup + if (vectorState.firstFreeBlock == 0) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + return ComputeDefragmentation_Fast(vector); + } + // No free blocks, have to clear last one size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; @@ -13521,6 +13755,17 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto handle = freeMetadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, freeMetadata); + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } // Check all previous blocks for free space if (AllocInOtherBlock(0, last, moveData, vector)) @@ -13568,8 +13813,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto } vectorState.firstFreeBlock = last; // Nothing done, block found without reallocations, can perform another reallocs in same pass - if (prevMoveCount == m_Moves.size()) - return ComputeDefragmentation_Extensive(vector, index); + return ComputeDefragmentation_Extensive(vector, index); } break; } @@ -13637,6 +13881,9 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto } break; } + case StateExtensive::Operation::Cleanup: + // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). + break; } if (vectorState.operation == StateExtensive::Operation::Cleanup) @@ -13655,6 +13902,27 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto return false; } +void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) +{ + size_t allocCount = 0; + size_t freeCount = 0; + state.avgFreeSize = 0; + state.avgAllocSize = 0; + + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + allocCount += metadata->GetAllocationCount(); + freeCount += metadata->GetFreeRegionsCount(); + state.avgFreeSize += metadata->GetSumFreeSize(); + state.avgAllocSize += metadata->GetSize(); + } + + state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; + state.avgFreeSize /= freeCount; +} + bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, VmaBlockVector& vector, size_t firstFreeBlock, bool& texturePresent, bool& bufferPresent, bool& otherPresent) @@ -13673,6 +13941,17 @@ bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType curr // Ignore newly created allocations by defragmentation algorithm if (moveData.move.srcAllocation->GetUserData() == this) continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } // Move only single type of resources at once if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) @@ -14404,6 +14683,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; if(m_UseExtMemoryPriority) { + VMA_ASSERT(priority >= 0.f && priority <= 1.f); priorityInfo.priority = priority; VmaPnextChainPushFront(&allocInfo, &priorityInfo); } @@ -14515,9 +14795,12 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( } } - *pAllocation = m_AllocationObjectAllocator.Allocate(isUserDataString, isMappingAllowed); + *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); - (*pAllocation)->SetUserData(this, pUserData); + if (isUserDataString) + (*pAllocation)->SetName(this, (const char*)pUserData); + else + (*pAllocation)->SetUserData(this, pUserData); m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) { @@ -14629,8 +14912,8 @@ VkResult VmaAllocator_T::FindMemoryTypeIndex( if((requiredFlags & ~currFlags) == 0) { // Calculate cost as number of bits from preferredFlags not present in this memory type. - uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) + - VmaCountBitsSet(currFlags & notPreferredFlags); + uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); // Remember memory type with lowest cost. if(currCost < minCost) { @@ -14849,6 +15132,8 @@ void VmaAllocator_T::FreeMemory( FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); } + allocation->FreeName(this); + switch(allocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: @@ -14994,6 +15279,7 @@ void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationI pAllocationInfo->size = hAllocation->GetSize(); pAllocationInfo->pMappedData = hAllocation->GetMappedData(); pAllocationInfo->pUserData = hAllocation->GetUserData(); + pAllocationInfo->pName = hAllocation->GetName(); } VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) @@ -15664,89 +15950,90 @@ uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() #if VMA_STATS_STRING_ENABLED void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) { - bool dedicatedAllocationsStarted = false; - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; - if(!dedicatedAllocList.IsEmpty()) - { - if(dedicatedAllocationsStarted == false) - { - dedicatedAllocationsStarted = true; - json.WriteString("DedicatedAllocations"); - json.BeginObject(); - } - - json.BeginString("Type "); - json.ContinueString(memTypeIndex); - json.EndString(); - - dedicatedAllocList.BuildStatsString(json); - } - } - if(dedicatedAllocationsStarted) - { - json.EndObject(); - } - + json.WriteString("DefaultPools"); + json.BeginObject(); { - bool allocationsStarted = false; - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; - if(pBlockVector != VMA_NULL) + VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; + if (pBlockVector != VMA_NULL) { - if (pBlockVector->IsEmpty() == false) + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginObject(); { - if (allocationsStarted == false) - { - allocationsStarted = true; - json.WriteString("DefaultPools"); - json.BeginObject(); - } - - json.BeginString("Type "); - json.ContinueString(memTypeIndex); - json.EndString(); + json.WriteString("PreferredBlockSize"); + json.WriteNumber(pBlockVector->GetPreferredBlockSize()); - json.BeginObject(); + json.WriteString("Blocks"); pBlockVector->PrintDetailedMap(json); - json.EndObject(); + + json.WriteString("DedicatedAllocations"); + dedicatedAllocList.BuildStatsString(json); } + json.EndObject(); } } - if(allocationsStarted) - { - json.EndObject(); - } } + json.EndObject(); - // Custom pools + json.WriteString("CustomPools"); + json.BeginObject(); { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - if(!m_Pools.IsEmpty()) + if (!m_Pools.IsEmpty()) { - json.WriteString("Pools"); - json.BeginObject(); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - json.BeginString(); - json.ContinueString(pool->GetId()); - json.EndString(); + bool displayType = true; + size_t index = 0; + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + if (blockVector.GetMemoryTypeIndex() == memTypeIndex) + { + if (displayType) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginArray(); + displayType = false; + } - json.BeginObject(); - pool->m_BlockVector.PrintDetailedMap(json); + json.BeginObject(); + { + json.WriteString("Name"); + json.BeginString(); + json.ContinueString_Size(index++); + if (pool->GetName()) + { + json.WriteString(" - "); + json.WriteString(pool->GetName()); + } + json.EndString(); - if (!pool->m_DedicatedAllocations.IsEmpty()) - { - json.WriteString("DedicatedAllocations"); - pool->m_DedicatedAllocations.BuildStatsString(json); + json.WriteString("PreferredBlockSize"); + json.WriteNumber(blockVector.GetPreferredBlockSize()); + + json.WriteString("Blocks"); + blockVector.PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + pool->m_DedicatedAllocations.BuildStatsString(json); + } + json.EndObject(); + } } - json.EndObject(); + + if (!displayType) + json.EndArray(); } - json.EndObject(); } } + json.EndObject(); } #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_ALLOCATOR_T_FUNCTIONS @@ -15857,127 +16144,176 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( VmaStringBuilder sb(allocator->GetAllocationCallbacks()); { - VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); - json.BeginObject(); - VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); VmaTotalStatistics stats; allocator->CalculateStatistics(&stats); - json.WriteString("Total"); - VmaPrintDetailedStatistics(json, stats.total); - - for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); { - json.BeginString("Heap "); - json.ContinueString(heapIndex); - json.EndString(); + json.WriteString("General"); json.BeginObject(); + { + const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; + const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; - json.WriteString("Size"); - json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size); + json.WriteString("API"); + json.WriteString("Vulkan"); - json.WriteString("Flags"); - json.BeginArray(true); - if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) - { - json.WriteString("DEVICE_LOCAL"); - } - json.EndArray(); + json.WriteString("apiVersion"); + json.BeginString(); + json.ContinueString(VK_API_VERSION_MAJOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_API_VERSION_MINOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_API_VERSION_PATCH(deviceProperties.apiVersion)); + json.EndString(); - json.WriteString("Budget"); - json.BeginObject(); - { - json.WriteString("BlockBytes"); - json.WriteNumber(budgets[heapIndex].statistics.blockBytes); - json.WriteString("AllocationBytes"); - json.WriteNumber(budgets[heapIndex].statistics.allocationBytes); - json.WriteString("BlockCount"); - json.WriteNumber(budgets[heapIndex].statistics.blockCount); - json.WriteString("AllocationCount"); - json.WriteNumber(budgets[heapIndex].statistics.allocationCount); - json.WriteString("Usage"); - json.WriteNumber(budgets[heapIndex].usage); - json.WriteString("Budget"); - json.WriteNumber(budgets[heapIndex].budget); + json.WriteString("GPU"); + json.WriteString(deviceProperties.deviceName); + json.WriteString("deviceType"); + json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType)); + + json.WriteString("maxMemoryAllocationCount"); + json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); + json.WriteString("bufferImageGranularity"); + json.WriteNumber(deviceProperties.limits.bufferImageGranularity); + json.WriteString("nonCoherentAtomSize"); + json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); + + json.WriteString("memoryHeapCount"); + json.WriteNumber(memoryProperties.memoryHeapCount); + json.WriteString("memoryTypeCount"); + json.WriteNumber(memoryProperties.memoryTypeCount); } json.EndObject(); - - if(stats.memoryHeap[heapIndex].statistics.blockCount > 0) - { - json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); - } - - for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + } + { + json.WriteString("Total"); + VmaPrintDetailedStatistics(json, stats.total); + } + { + json.WriteString("MemoryInfo"); + json.BeginObject(); { - if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) { - json.BeginString("Type "); - json.ContinueString(typeIndex); + json.BeginString("Heap "); + json.ContinueString(heapIndex); json.EndString(); - json.BeginObject(); - - json.WriteString("Flags"); - json.BeginArray(true); - VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; - if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) - { - json.WriteString("DEVICE_LOCAL"); - } - if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) - { - json.WriteString("HOST_VISIBLE"); - } - if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0) - { - json.WriteString("HOST_COHERENT"); - } - if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) - { - json.WriteString("HOST_CACHED"); - } - if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0) - { - json.WriteString("LAZILY_ALLOCATED"); - } -#if VMA_VULKAN_VERSION >= 1001000 - if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0) - { - json.WriteString("PROTECTED"); - } -#endif // #if VMA_VULKAN_VERSION >= 1001000 -#if VK_AMD_device_coherent_memory - if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) - { - json.WriteString("DEVICE_COHERENT"); - } - if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0) { - json.WriteString("DEVICE_UNCACHED"); - } -#endif // #if VK_AMD_device_coherent_memory - json.EndArray(); + const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; + json.WriteString("Flags"); + json.BeginArray(true); + { + if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + #if VMA_VULKAN_VERSION >= 1001000 + if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) + json.WriteString("MULTI_INSTANCE"); + #endif + + VkMemoryHeapFlags flags = heapInfo.flags & + ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT + #endif + ); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Size"); + json.WriteNumber(heapInfo.size); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BudgetBytes"); + json.WriteNumber(budgets[heapIndex].budget); + json.WriteString("UsageBytes"); + json.WriteNumber(budgets[heapIndex].usage); + } + json.EndObject(); - if(stats.memoryType[typeIndex].statistics.blockCount > 0) - { json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); - } + VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); + json.WriteString("MemoryPools"); + json.BeginObject(); + { + for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("Flags"); + json.BeginArray(true); + { + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) + json.WriteString("HOST_VISIBLE"); + if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) + json.WriteString("HOST_COHERENT"); + if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) + json.WriteString("HOST_CACHED"); + if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) + json.WriteString("LAZILY_ALLOCATED"); + #if VMA_VULKAN_VERSION >= 1001000 + if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) + json.WriteString("PROTECTED"); + #endif + #if VK_AMD_device_coherent_memory + if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) + json.WriteString("DEVICE_COHERENT_AMD"); + if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) + json.WriteString("DEVICE_UNCACHED_AMD"); + #endif + + flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT + #endif + #if VK_AMD_device_coherent_memory + | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY + | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY + #endif + | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT + | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT + | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); + } + json.EndObject(); + } + } + + } + json.EndObject(); + } json.EndObject(); } } - json.EndObject(); } - if(detailedMap == VK_TRUE) - { + + if (detailedMap == VK_TRUE) allocator->PrintDetailedMap(json); - } json.EndObject(); } @@ -16428,6 +16764,14 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( allocation->SetUserData(allocator, pUserData); } +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName) +{ + allocation->SetName(allocator, pName); +} + VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, @@ -16567,13 +16911,20 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( VMA_DEBUG_LOG("vmaBeginDefragmentation"); + if (pInfo->pool != VMA_NULL) + { + // Check if run on supported algorithms + if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + return VK_ERROR_FEATURE_NOT_PRESENT; + } + VMA_DEBUG_GLOBAL_MUTEX_LOCK *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); return VK_SUCCESS; } -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentation( +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationStats* pStats) @@ -16587,7 +16938,6 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentation( if (pStats) context->GetStats(*pStats); vma_delete(allocator, context); - return VK_SUCCESS; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( @@ -16864,6 +17214,50 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( return res; } +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); + + VMA_DEBUG_LOG("vmaCreateAliasingBuffer"); + + *pBuffer = VK_NULL_HANDLE; + + if (pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if (res >= 0) + { + // 2. Bind buffer with memory. + res = allocator->BindBufferMemory(allocation, 0, *pBuffer, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( VmaAllocator allocator, VkBuffer buffer, @@ -16985,10 +17379,52 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( return res; } +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); + + *pImage = VK_NULL_HANDLE; + + VMA_DEBUG_LOG("vmaCreateImage"); + + if (pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if (res >= 0) + { + // 2. Bind image with memory. + res = allocator->BindImageMemory(allocation, 0, *pImage, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + } + return res; +} + VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( - VmaAllocator allocator, - VkImage image, - VmaAllocation allocation) + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation) { VMA_ASSERT(allocator); @@ -17176,9 +17612,10 @@ before including these headers (like `WIN32_LEAN_AND_MEAN` or `WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define them before every `#include` of this library. -\note This library is written in C++, but has C-compatible interface. +This library is written in C++, but has C-compatible interface. Thus you can include and use vk_mem_alloc.h in C or C++ code, but full implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. +Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used. \section quick_start_initialization Initialization @@ -17775,14 +18212,28 @@ To use custom memory pools: Example: \code +// Find memoryTypeIndex for the pool. +VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +sampleBufCreateInfo.size = 0x10000; // Doesn't matter. +sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo sampleAllocCreateInfo = {}; +sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, + &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); +// Check res... + // Create a pool that can have at most 2 blocks, 128 MiB each. VmaPoolCreateInfo poolCreateInfo = {}; -poolCreateInfo.memoryTypeIndex = ... +poolCreateInfo.memoryTypeIndex = memTypeIndex; poolCreateInfo.blockSize = 128ull * 1024 * 1024; poolCreateInfo.maxBlockCount = 2; VmaPool pool; -vmaCreatePool(allocator, &poolCreateInfo, &pool); +res = vmaCreatePool(allocator, &poolCreateInfo, &pool); +// Check res... // Allocate a buffer out of it. VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; @@ -17794,8 +18245,8 @@ allocCreateInfo.pool = pool; VkBuffer buf; VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); +res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... \endcode You have to free all allocations made from this pool before destroying it. @@ -17933,6 +18384,8 @@ you can achieve behavior of a ring buffer / queue. Ring buffer is available only in pools with one memory block - VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. +\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + \page defragmentation Defragmentation @@ -17965,41 +18418,105 @@ for(;;) res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); if(res == VK_SUCCESS) break; - else if(res == VK_INCOMPLETE) + else if(res != VK_INCOMPLETE) + // Handle error... + + for(uint32_t i = 0; i < pass.moveCount; ++i) { - for(uint32_t i = 0; i < pass.moveCount; ++i) - { - //- Inspect pass.pMoves[i].srcAllocation, identify what buffer or image it represents. - //- Recreate this buffer or image at pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. - //- Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. - } - //- Make sure the copy commands finished executing. - //- Update appropriate descriptors to point to the new places. - res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); - if(res == VK_SUCCESS) - break; - else if(res != VK_INCOMPLETE) - // Handle error... + // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, pMoves[i].srcAllocation, &allocInfo); + MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; + + // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. + VkImageCreateInfo imgCreateInfo = ... + VkImage newImg; + res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); + // Check res... + res = vmaBindImageMemory(allocator, pMoves[i].dstTmpAllocation, newImg); + // Check res... + + // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. + vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); } - else + + // Make sure the copy commands finished executing. + vkWaitForFences(...); + + // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // ... + vkDestroyImage(device, resData->img, nullptr); + } + + // Update appropriate descriptors to point to the new places... + + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) // Handle error... } vmaEndDefragmentation(allocator, defragCtx, nullptr); \endcode -You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool -(like in the example above) or all the default pools by setting this member to null. +Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() +create/destroy an allocation and a buffer/image at once, these are just a shortcut for +creating the resource, allocating memory, and binding them together. +Defragmentation works on memory allocations only. You must handle the rest manually. +Defragmentation is an iterative process that should repreat "passes" as long as related functions +return `VK_INCOMPLETE` not `VK_SUCCESS`. +In each pass: + +1. vmaBeginDefragmentationPass() function call: + - Calculates and returns the list of allocations to be moved in this pass. + Note this can be a time-consuming process. + - Reserves destination memory for them by creating temporary destination allocations + that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). +2. Inside the pass, **you should**: + - Inspect the returned list of allocations to be moved. + - Create new buffers/images and bind them at the returned destination temporary allocations. + - Copy data from source to destination resources if necessary. + - Destroy the source buffers/images, but NOT their allocations. +3. vmaEndDefragmentationPass() function call: + - Frees the source memory reserved for the allocations that are moved. + - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. + - Frees `VkDeviceMemory` blocks that became empty. Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. Defragmentation algorithm tries to move all suitable allocations. You can, however, refuse to move some of them inside a defragmentation pass, by setting `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. -However, this is not recommended and may result in suboptimal packing of the allocations after defragmentation. +This is not recommended and may result in suboptimal packing of the allocations after defragmentation. If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. -You can also decide to destroy an allocation instead of moving it. -You should then set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. +Inside a pass, for each allocation that should be moved: + +- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. + - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). +- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, + filled, and used temporarily in each rendering frame, you can just recreate this image + without copying its data. +- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU + using `memcpy()`. +- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + This will cancel the move. + - vmaEndDefragmentationPass() will then free the destination memory + not the source memory of the allocation, leaving it unchanged. +- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), + you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. + +You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool +(like in the example above) or all the default pools by setting this member to null. + +Defragmentation is always performed in each pool separately. +Allocations are never moved between different Vulkan memory types. +The size of the destination memory reserved for a moved allocation is the same as the original one. +Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. +Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. @@ -18009,6 +18526,13 @@ It is also safe to perform the defragmentation asynchronously to render frames a usage, possibly from multiple threads, with the exception that allocations returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. +<b>Mapping</b> is preserved on allocations that are moved during defragmentation. +Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations +are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried +using VmaAllocationInfo::pMappedData. + +\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + \page statistics Statistics @@ -18081,6 +18605,8 @@ To do that, fill VmaAllocationCreateInfo::pUserData field when creating an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, some handle, index, key, ordinal number or any other value that would associate the allocation with your custom metadata. +It it useful to identify appropriate data structures in your engine given #VmaAllocation, +e.g. when doing \ref defragmentation. \code VkBufferCreateInfo bufCreateInfo = ... @@ -18111,44 +18637,21 @@ vmaBuildStatsString() in hexadecimal form. \section allocation_names Allocation names -There is alternative mode available where `pUserData` pointer is used to point to -a null-terminated string, giving a name to the allocation. To use this mode, -set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags. -Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to -vmaSetAllocationUserData() must be either null or a pointer to a null-terminated string. +An allocation can also carry a null-terminated string, giving a name to the allocation. +To set it, call vmaSetAllocationName(). The library creates internal copy of the string, so the pointer you pass doesn't need to be valid for whole lifetime of the allocation. You can free it after the call. \code -VkImageCreateInfo imageInfo = ... - std::string imageName = "Texture: "; imageName += fileName; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT; -allocCreateInfo.pUserData = imageName.c_str(); - -VkImage image; -VmaAllocation allocation; -vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr); +vmaSetAllocationName(allocator, allocation, imageName.c_str()); \endcode -The value of `pUserData` pointer of the allocation will be different than the one -you passed when setting allocation's name - pointing to a buffer managed -internally that holds copy of the string. - -\code -VmaAllocationInfo allocInfo; -vmaGetAllocationInfo(allocator, allocation, &allocInfo); -const char* imageName = (const char*)allocInfo.pUserData; -printf("Image name: %s\n", imageName); -\endcode +The string can be later retrieved by inspecting VmaAllocationInfo::pName. +It is also printed in JSON report created by vmaBuildStatsString(). -That string is also printed in JSON report created by vmaBuildStatsString(). - -\note Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. @@ -18491,6 +18994,7 @@ imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; VkImage img; VmaAllocation alloc; @@ -18502,7 +19006,8 @@ Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DED especially if they are large or if you plan to destroy and recreate them with different sizes e.g. when display resolution changes. Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. - +When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation +to decrease chances to be evicted to system memory by the operating system. \section usage_patterns_staging_copy_upload Staging copy for upload @@ -18653,6 +19158,7 @@ else // [Executed in runtime]: memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); + //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT VkBufferCopy bufCopy = { 0, // srcOffset 0, // dstOffset, @@ -18815,6 +19321,86 @@ To learn more about this extension, see: +\page vk_ext_memory_priority VK_EXT_memory_priority + +VK_EXT_memory_priority is a device extension that allows to pass additional "priority" +value to Vulkan memory allocations that the implementation may use prefer certain +buffers and images that are critical for performance to stay in device-local memory +in cases when the memory is over-subscribed, while some others may be moved to the system memory. + +VMA offers convenient usage of this extension. +If you enable it, you can pass "priority" parameter when creating allocations or custom pools +and the library automatically passes the value to Vulkan using this extension. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_ext_memory_priority_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to +`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_ext_memory_priority_usage Usage + +When using this extension, you should initialize following member: + +- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +- VmaPoolCreateInfo::priority when creating a custom pool. + +It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. +Memory allocated with higher value can be treated by the Vulkan implementation as higher priority +and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. + +It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images +as dedicated and set high priority to them. For example: + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +`priority` member is ignored in the following situations: + +- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters + from the parametrs passed in #VmaPoolCreateInfo when the pool was created. +- Allocations created in default pools: They inherit the priority from the parameters + VMA used when creating default pools, which means `priority == 0.5f`. + + \page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory VK_AMD_device_coherent_memory is a device extension that enables access to @@ -18939,6 +19525,28 @@ accompanying this library. functions. - #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. +\section general_considerations_versioning_and_compatibility Versioning and compatibility + +The library uses [**Semantic Versioning**](https://semver.org/), +which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: + +- Incremented Patch version means a release is backward- and forward-compatible, + introducing only some internal improvements, bug fixes, optimizations etc. + or changes that are out of scope of the official API described in this documentation. +- Incremented Minor version means a release is backward-compatible, + so existing code that uses the library should continue to work, while some new + symbols could have been added: new structures, functions, new values in existing + enums and bit flags, new structure members, but not new function parameters. +- Incrementing Major version means a release could break some backward compatibility. + +All changes between official releases are documented in file "CHANGELOG.md". + +\warning Backward compatiblity is considered on the level of C++ source code, not binary linkage. +Adding new members to existing structures is treated as backward compatible if initializing +the new members to binary zero results in the old behavior. +You should always fully initialize all library structures to zeros and not rely on their +exact binary size. + \section general_considerations_validation_layer_warnings Validation layer warnings When using this library, you can meet following types of warnings issued by |