diff options
author | bruvzg <7645683+bruvzg@users.noreply.github.com> | 2020-02-11 18:19:02 +0200 |
---|---|---|
committer | bruvzg <7645683+bruvzg@users.noreply.github.com> | 2020-02-11 19:05:50 +0200 |
commit | 4cc439922a286c8b9534e5b85c07e04c5155a754 (patch) | |
tree | 9c154cea536d26f57d114f2de1fb41e586183947 /thirdparty/vulkan | |
parent | 8c73e813134001e575b6f59e3b0100471c007410 (diff) |
Update VulkanMemoryAllocator to 2.3.0 (Fixes build for 32-bit Windows and Linux).
Diffstat (limited to 'thirdparty/vulkan')
-rw-r--r-- | thirdparty/vulkan/vk_mem_alloc.h | 13676 |
1 files changed, 8245 insertions, 5431 deletions
diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h index 862ea312a6..465864b363 100644 --- a/thirdparty/vulkan/vk_mem_alloc.h +++ b/thirdparty/vulkan/vk_mem_alloc.h @@ -1,5 +1,5 @@ // -// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -29,9 +29,9 @@ extern "C" { /** \mainpage Vulkan Memory Allocator -<b>Version 2.3.0-development</b> (2019-03-05) +<b>Version 2.3.0</b> (2019-12-04) -Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved. \n +Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. \n License: MIT Documentation of all members: vk_mem_alloc.h @@ -40,46 +40,50 @@ Documentation of all members: vk_mem_alloc.h - <b>User guide</b> - \subpage quick_start - - [Project setup](@ref quick_start_project_setup) - - [Initialization](@ref quick_start_initialization) - - [Resource allocation](@ref quick_start_resource_allocation) + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) - \subpage choosing_memory_type - - [Usage](@ref choosing_memory_type_usage) - - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) - - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) - - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) - - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) - \subpage memory_mapping - - [Mapping functions](@ref memory_mapping_mapping_functions) - - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) - - [Cache control](@ref memory_mapping_cache_control) - - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable) + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) - \subpage custom_memory_pools - - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) - - [Linear allocation algorithm](@ref linear_algorithm) - - [Free-at-once](@ref linear_algorithm_free_at_once) - - [Stack](@ref linear_algorithm_stack) - - [Double stack](@ref linear_algorithm_double_stack) - - [Ring buffer](@ref linear_algorithm_ring_buffer) - - [Buddy allocation algorithm](@ref buddy_algorithm) + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - [Buddy allocation algorithm](@ref buddy_algorithm) - \subpage defragmentation - - [Defragmenting CPU memory](@ref defragmentation_cpu) - - [Defragmenting GPU memory](@ref defragmentation_gpu) - - [Additional notes](@ref defragmentation_additional_notes) - - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm) + - [Defragmenting CPU memory](@ref defragmentation_cpu) + - [Defragmenting GPU memory](@ref defragmentation_gpu) + - [Additional notes](@ref defragmentation_additional_notes) + - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm) - \subpage lost_allocations - \subpage statistics - - [Numeric statistics](@ref statistics_numeric_statistics) - - [JSON dump](@ref statistics_json_dump) + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) - \subpage allocation_annotation - - [Allocation user data](@ref allocation_user_data) - - [Allocation names](@ref allocation_names) + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) - \subpage debugging_memory_usage - - [Memory initialization](@ref debugging_memory_usage_initialization) - - [Margins](@ref debugging_memory_usage_margins) - - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) - \subpage record_and_replay - \subpage usage_patterns + - [Common mistakes](@ref usage_patterns_common_mistakes) - [Simple patterns](@ref usage_patterns_simple) - [Advanced patterns](@ref usage_patterns_advanced) - \subpage configuration @@ -88,6 +92,7 @@ Documentation of all members: vk_mem_alloc.h - [Device memory allocation callbacks](@ref allocation_callbacks) - [Device heap memory limit](@ref heap_memory_limit) - \subpage vk_khr_dedicated_allocation + - \subpage vk_amd_device_coherent_memory - \subpage general_considerations - [Thread safety](@ref general_considerations_thread_safety) - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) @@ -206,7 +211,8 @@ You can also combine multiple methods. -# If you already have a buffer or an image created, you want to allocate memory for it and then you will bind it yourself, you can use function vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). - For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). -# If you want to create a buffer or an image, allocate memory for it and bind them together, all in one call, you can use function vmaCreateBuffer(), vmaCreateImage(). This is the easiest and recommended way to use this library. @@ -351,7 +357,7 @@ Example: struct ConstantBuffer { - ... + ... }; ConstantBuffer constantBufferData; @@ -421,12 +427,13 @@ There are some exceptions though, when you should consider mapping memory only f which requires unmapping before GPU can see updated texture. - Keeping many large memory blocks mapped may impact performance or stability of some debugging tools. -\section memory_mapping_cache_control Cache control - +\section memory_mapping_cache_control Cache flush and invalidate + Memory in Vulkan doesn't need to be unmapped before using it on GPU, but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, -you need to manually invalidate cache before reading of mapped pointer -and flush cache after writing to mapped pointer. +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, `vkInvalidateMappedMemoryRanges()`, but this library provides more convenient functions that refer to given allocation object: vmaFlushAllocation(), @@ -440,7 +447,7 @@ within blocks are aligned to this value, so their offsets are always multiply of Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`. -Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) +Also, Windows drivers from all 3 **PC** GPU vendors (AMD, Intel, NVIDIA) currently provide `HOST_COHERENT` flag on all memory types that are `HOST_VISIBLE`, so on this platform you may not need to bother. @@ -474,16 +481,16 @@ VkMemoryPropertyFlags memFlags; vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags); if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { - // Allocation ended up in mappable memory. You can map it and access it directly. - void* mappedData; - vmaMapMemory(allocator, alloc, &mappedData); - memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); - vmaUnmapMemory(allocator, alloc); + // Allocation ended up in mappable memory. You can map it and access it directly. + void* mappedData; + vmaMapMemory(allocator, alloc, &mappedData); + memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); + vmaUnmapMemory(allocator, alloc); } else { - // Allocation ended up in non-mappable memory. - // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. } \endcode @@ -509,18 +516,90 @@ vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allo if(allocInfo.pUserData != nullptr) { - // Allocation ended up in mappable memory. - // It's persistently mapped. You can access it directly. - memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); + // Allocation ended up in mappable memory. + // It's persistently mapped. You can access it directly. + memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); } else { - // Allocation ended up in non-mappable memory. - // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. } \endcode +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it's physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetBudget(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStats(). vmaGetBudget() can be called every frame or even before every +allocation, while vmaCalculateStats() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +Some other allocations become lost instead to make room for it, if the mechanism of +[lost allocations](@ref lost_allocations) is used. +If that is not possible, the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +Please note that creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will try to allocate memory blocks without checking whether they +fit within budget. + + \page custom_memory_pools Custom memory pools A memory pool contains a number of `VkDeviceMemory` blocks. @@ -744,7 +823,7 @@ allocations. To mitigate this problem, you can use defragmentation feature: structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd(). -Given set of allocations, +Given set of allocations, this function can move them to compact used memory, ensure more continuous free space and possibly also free some `VkDeviceMemory` blocks. @@ -761,7 +840,8 @@ What it doesn't do, so you need to do it yourself: - Recreate buffers and images that were bound to allocations that were defragmented and bind them with their new places in memory. You must use `vkDestroyBuffer()`, `vkDestroyImage()`, - `vkCreateBuffer()`, `vkCreateImage()` for that purpose and NOT vmaDestroyBuffer(), + `vkCreateBuffer()`, `vkCreateImage()`, vmaBindBufferMemory(), vmaBindImageMemory() + for that purpose and NOT vmaDestroyBuffer(), vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to destroy or create allocation objects! - Recreate views and update descriptors that point to these buffers and images. @@ -801,22 +881,22 @@ vmaDefragmentationEnd(allocator, defragCtx); for(uint32_t i = 0; i < allocCount; ++i) { - if(allocationsChanged[i]) - { - // Destroy buffer that is immutably bound to memory region which is no longer valid. - vkDestroyBuffer(device, buffers[i], nullptr); - - // Create new buffer with same parameters. - VkBufferCreateInfo bufferInfo = ...; - vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); - - // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); - // Bind new buffer to new memory region. Data contained in it is already moved. - VmaAllocationInfo allocInfo; - vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); - vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset); - } + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } } \endcode @@ -879,22 +959,22 @@ vmaDefragmentationEnd(allocator, defragCtx); for(uint32_t i = 0; i < allocCount; ++i) { - if(allocationsChanged[i]) - { - // Destroy buffer that is immutably bound to memory region which is no longer valid. - vkDestroyBuffer(device, buffers[i], nullptr); - - // Create new buffer with same parameters. - VkBufferCreateInfo bufferInfo = ...; - vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); - - // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); - // Bind new buffer to new memory region. Data contained in it is already moved. - VmaAllocationInfo allocInfo; - vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); - vkBindBufferMemory(device, buffers[i], allocInfo.deviceMemory, allocInfo.offset); - } + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } } \endcode @@ -1008,40 +1088,40 @@ Example code: \code struct MyBuffer { - VkBuffer m_Buf = nullptr; - VmaAllocation m_Alloc = nullptr; + VkBuffer m_Buf = nullptr; + VmaAllocation m_Alloc = nullptr; - // Called when the buffer is really needed in the current frame. - void EnsureBuffer(); + // Called when the buffer is really needed in the current frame. + void EnsureBuffer(); }; void MyBuffer::EnsureBuffer() { - // Buffer has been created. - if(m_Buf != VK_NULL_HANDLE) - { - // Check if its allocation is not lost + mark it as used in current frame. - if(vmaTouchAllocation(allocator, m_Alloc)) + // Buffer has been created. + if(m_Buf != VK_NULL_HANDLE) { - // It's all OK - safe to use m_Buf. - return; + // Check if its allocation is not lost + mark it as used in current frame. + if(vmaTouchAllocation(allocator, m_Alloc)) + { + // It's all OK - safe to use m_Buf. + return; + } } - } - // Buffer not yet exists or lost - destroy and recreate it. + // Buffer not yet exists or lost - destroy and recreate it. - vmaDestroyBuffer(allocator, m_Buf, m_Alloc); + vmaDestroyBuffer(allocator, m_Buf, m_Alloc); - VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; - bufCreateInfo.size = 1024; - bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + bufCreateInfo.size = 1024; + bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - VmaAllocationCreateInfo allocCreateInfo = {}; - allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; - allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT | - VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT | + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; - vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr); + vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr); } \endcode @@ -1189,6 +1269,9 @@ printf("Image name: %s\n", imageName); That string is also printed in JSON report created by vmaBuildStatsString(). +\note Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + \page debugging_memory_usage Debugging incorrect memory usage @@ -1280,7 +1363,7 @@ which indicates a serious bug. You can also explicitly request checking margins of all allocations in all memory blocks that belong to specified memory types by using function vmaCheckCorruption(), -or in memory blocks that belong to specified custom pool, by using function +or in memory blocks that belong to specified custom pool, by using function vmaCheckPoolCorruption(). Margin validation (corruption detection) works only for memory types that are @@ -1304,6 +1387,13 @@ application. It can be useful to: \section record_and_replay_usage Usage +Recording functionality is disabled by default. +To enable it, define following macro before every include of this library: + +\code +#define VMA_RECORDING_ENABLED 1 +\endcode + <b>To record sequence of calls to a file:</b> Fill in VmaAllocatorCreateInfo::pRecordSettings member while creating #VmaAllocator object. File is opened and written during whole lifetime of the allocator. @@ -1315,7 +1405,7 @@ Its project is generated by Premake. Command line syntax is printed when the program is launched without parameters. Basic usage: - VmaReplay.exe MyRecording.csv + VmaReplay.exe MyRecording.csv <b>Documentation of file format</b> can be found in file: "docs/Recording file format.md". It's a human-readable, text file in CSV format (Comma Separated Values). @@ -1330,7 +1420,6 @@ It's a human-readable, text file in CSV format (Comma Separated Values). coded and tested only on Windows. Inclusion of recording code is driven by `VMA_RECORDING_ENABLED` macro. Support for other platforms should be easy to add. Contributions are welcomed. -- Currently calls to vmaDefragment() function are not recorded. \page usage_patterns Recommended usage patterns @@ -1339,6 +1428,27 @@ See also slides from talk: [Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) +\section usage_patterns_common_mistakes Common mistakes + +<b>Use of CPU_TO_GPU instead of CPU_ONLY memory</b> + +#VMA_MEMORY_USAGE_CPU_TO_GPU is recommended only for resources that will be +mapped and written by the CPU, as well as read directly by the GPU - like some +buffers or textures updated every frame (dynamic). If you create a staging copy +of a resource to be written by CPU and then used as a source of transfer to +another resource placed in the GPU memory, that staging resource should be +created with #VMA_MEMORY_USAGE_CPU_ONLY. Please read the descriptions of these +enums carefully for details. + +<b>Unnecessary use of custom pools</b> + +\ref custom_memory_pools may be useful for special purposes - when you want to +keep certain type of resources separate e.g. to reserve minimum amount of memory +for them, limit maximum amount of memory they can occupy, or make some of them +push out the other through the mechanism of \ref lost_allocations. For most +resources this is not needed and so it is not recommended to create #VmaPool +objects and allocations out of them. Allocating from the default pool is sufficient. + \section usage_patterns_simple Simple patterns \subsection usage_patterns_simple_render_targets Render targets @@ -1472,6 +1582,11 @@ mutex, atomic etc. The library uses its own implementation of containers by default, but you can switch to using STL containers instead. +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + \section config_Vulkan_functions Pointers to Vulkan functions The library uses Vulkan functions straight from the `vulkan.h` header by default. @@ -1505,7 +1620,7 @@ behavior is implementation-dependant - it depends on GPU vendor and graphics driver. On AMD cards it can be controlled while creating Vulkan device object by using -VK_AMD_memory_allocation_behavior extension, if available. +VK_AMD_memory_overallocation_behavior extension, if available. Alternatively, if you want to test how your program behaves with limited amount of Vulkan device memory available without switching your graphics card to one that really has @@ -1550,7 +1665,7 @@ buffer using vmaCreateBuffer() or image using vmaCreateImage(). When using the extension together with Vulkan Validation Layer, you will receive warnings like this: - vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer. + vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer. It is OK, you should just ignore it. It happens because you use function `vkGetBufferMemoryRequirements2KHR()` instead of standard @@ -1559,11 +1674,68 @@ unaware of it. To learn more about this extension, see: -- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap44.html#VK_KHR_dedicated_allocation) - [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap44.html#VK_AMD_device_coherent_memory) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + \page general_considerations General considerations \section general_considerations_thread_safety Thread safety @@ -1590,14 +1762,14 @@ to just ignore them. - *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* - It happens when VK_KHR_dedicated_allocation extension is enabled. - `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. - *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* - It happens when you map a buffer or image, because the library maps entire - `VkDeviceMemory` block, where different types of images and buffers may end - up together, especially on GPUs with unified memory like Intel. + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. - *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* - It happens when you use lost allocations, and a new image or buffer is - created in place of an existing object that bacame lost. + created in place of an existing object that bacame lost. - It may happen also when you use [defragmentation](@ref defragmentation). \section general_considerations_allocation_algorithm Allocation algorithm @@ -1639,7 +1811,7 @@ Features deliberately excluded from the scope of this library: and handled gracefully, because that would complicate code significantly and is usually not needed in desktop PC applications anyway. - Code free of any compiler warnings. Maintaining the library to compile and - work correctly on so many different platforms is hard enough. Being free of + work correctly on so many different platforms is hard enough. Being free of any warnings, on any version of any compiler, is simply not feasible. - This is a C++ library with C interface. Bindings or ports to any other programming languages are welcomed as external projects and @@ -1652,31 +1824,66 @@ Define this macro to 0/1 to disable/enable support for recording functionality, available through VmaAllocatorCreateInfo::pRecordSettings. */ #ifndef VMA_RECORDING_ENABLED -#ifdef _WIN32 -#define VMA_RECORDING_ENABLED 1 -#else -#define VMA_RECORDING_ENABLED 0 -#endif + #define VMA_RECORDING_ENABLED 0 #endif #ifndef NOMINMAX -#define NOMINMAX // For windows.h + #define NOMINMAX // For windows.h #endif #ifndef VULKAN_H_ -#include <vulkan/vulkan.h> + #include <vulkan/vulkan.h> #endif #if VMA_RECORDING_ENABLED -#include <windows.h> + #include <windows.h> +#endif + +// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC, +// where AAA = major, BBB = minor, CCC = patch. +// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion. +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif #endif #if !defined(VMA_DEDICATED_ALLOCATION) -#if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation -#define VMA_DEDICATED_ALLOCATION 1 -#else -#define VMA_DEDICATED_ALLOCATION 0 + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporing the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE #endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST #endif /** \struct VmaAllocator @@ -1691,17 +1898,17 @@ right after Vulkan is initialized and keep it alive until before Vulkan device i VK_DEFINE_HANDLE(VmaAllocator) /// Callback function called after successful vkAllocateMemory. -typedef void(VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)( - VmaAllocator allocator, - uint32_t memoryType, - VkDeviceMemory memory, - VkDeviceSize size); +typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator allocator, + uint32_t memoryType, + VkDeviceMemory memory, + VkDeviceSize size); /// Callback function called before vkFreeMemory. -typedef void(VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)( - VmaAllocator allocator, - uint32_t memoryType, - VkDeviceMemory memory, - VkDeviceSize size); +typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator allocator, + uint32_t memoryType, + VkDeviceMemory memory, + VkDeviceSize size); /** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. @@ -1721,31 +1928,79 @@ typedef struct VmaDeviceMemoryCallbacks { typedef enum VmaAllocatorCreateFlagBits { /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. - Using this flag may increase performance because internal mutexes are not used. - */ + Using this flag may increase performance because internal mutexes are not used. + */ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, /** \brief Enables usage of VK_KHR_dedicated_allocation extension. - Using this extenion will automatically allocate dedicated blocks of memory for - some buffers and images instead of suballocating place for them out of bigger - memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT - flag) when it is recommended by the driver. It may improve performance on some - GPUs. + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - You may set this flag only if you found out that following device extensions are - supported, you enabled them while creating Vulkan device passed as - VmaAllocatorCreateInfo::device, and you want them to be used internally by this - library: + Using this extenion will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. - - VK_KHR_get_memory_requirements2 - - VK_KHR_dedicated_allocation + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: -When this flag is set, you can experience following warnings reported by Vulkan -validation layer. You can ignore them. + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) -> vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. - */ + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enabled usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocatorCreateFlagBits; @@ -1773,41 +2028,50 @@ typedef struct VmaVulkanFunctions { PFN_vkCreateImage vkCreateImage; PFN_vkDestroyImage vkDestroyImage; PFN_vkCmdCopyBuffer vkCmdCopyBuffer; -#if VMA_DEDICATED_ALLOCATION +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; #endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; + PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; +#endif } VmaVulkanFunctions; /// Flags to be used in VmaRecordSettings::flags. typedef enum VmaRecordFlagBits { /** \brief Enables flush after recording every function call. - Enable it if you expect your application to crash, which may leave recording file truncated. - It may degrade performance though. - */ + Enable it if you expect your application to crash, which may leave recording file truncated. + It may degrade performance though. + */ VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001, - + VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaRecordFlagBits; typedef VkFlags VmaRecordFlags; /// Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSettings. -typedef struct VmaRecordSettings { +typedef struct VmaRecordSettings +{ /// Flags for recording. Use #VmaRecordFlagBits enum. VmaRecordFlags flags; /** \brief Path to the file that should be written by the recording. - Suggested extension: "csv". - If the file already exists, it will be overwritten. - It will be opened for the whole time #VmaAllocator object is alive. - If opening this file fails, creation of the whole allocator object fails. - */ - const char *pFilePath; + Suggested extension: "csv". + If the file already exists, it will be overwritten. + It will be opened for the whole time #VmaAllocator object is alive. + If opening this file fails, creation of the whole allocator object fails. + */ + const char* pFilePath; } VmaRecordSettings; /// Description of a Allocator to be created. -typedef struct VmaAllocatorCreateInfo { +typedef struct VmaAllocatorCreateInfo +{ /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. VmaAllocatorCreateFlags flags; /// Vulkan physical device. @@ -1821,94 +2085,109 @@ typedef struct VmaAllocatorCreateInfo { VkDeviceSize preferredLargeHeapBlockSize; /// Custom CPU memory allocation callbacks. Optional. /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ - const VkAllocationCallbacks *pAllocationCallbacks; + const VkAllocationCallbacks* pAllocationCallbacks; /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. /** Optional, can be null. */ - const VmaDeviceMemoryCallbacks *pDeviceMemoryCallbacks; + const VmaDeviceMemoryCallbacks* pDeviceMemoryCallbacks; /** \brief Maximum number of additional frames that are in use at the same time as current frame. - This value is used only when you make allocations with - VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become - lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + This value is used only when you make allocations with + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. - For example, if you double-buffer your command buffers, so resources used for - rendering in previous frame may still be in use by the GPU at the moment you - allocate resources needed for the current frame, set this value to 1. + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. - If you want to allow any allocations other than used in the current frame to - become lost, set this value to 0. - */ + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ uint32_t frameInUseCount; /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. - If not NULL, it must be a pointer to an array of - `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on - maximum number of bytes that can be allocated out of particular Vulkan memory - heap. - - Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that - heap. This is also the default in case of `pHeapSizeLimit` = NULL. - - If there is a limit defined for a heap: - - - If user tries to allocate more memory from that heap using this allocator, - the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the - value of this limit will be reported instead when using vmaGetMemoryProperties(). - - Warning! Using this feature may not be equivalent to installing a GPU with - smaller amount of memory, because graphics driver doesn't necessary fail new - allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is - exceeded. It may return success and just silently migrate some device memory - blocks to system RAM. This driver behavior can also be controlled using - VK_AMD_memory_overallocation_behavior extension. - */ - const VkDeviceSize *pHeapSizeLimit; + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* pHeapSizeLimit; /** \brief Pointers to Vulkan functions. Can be null if you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1`. - If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section, - you can pass null as this member, because the library will fetch pointers to - Vulkan functions internally in a static way, like: + If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section, + you can pass null as this member, because the library will fetch pointers to + Vulkan functions internally in a static way, like: - vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; - Fill this member if you want to provide your own pointers to Vulkan functions, - e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`. - */ - const VmaVulkanFunctions *pVulkanFunctions; + Fill this member if you want to provide your own pointers to Vulkan functions, + e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`. + */ + const VmaVulkanFunctions* pVulkanFunctions; /** \brief Parameters for recording of VMA calls. Can be null. - If not null, it enables recording of calls to VMA functions to a file. - If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro, - creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`. - */ - const VmaRecordSettings *pRecordSettings; + If not null, it enables recording of calls to VMA functions to a file. + If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro, + creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + const VmaRecordSettings* pRecordSettings; + /** \brief Optional handle to Vulkan instance object. + + Optional, can be null. Must be set if #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT flas is used + or if `vulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)`. + */ + VkInstance instance; + /** \brief Optional. The highest version of Vulkan that the application is designed to use. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + It must be less or euqal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. + Only versions 1.0 and 1.1 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + */ + uint32_t vulkanApiVersion; } VmaAllocatorCreateInfo; /// Creates Allocator object. -VkResult vmaCreateAllocator( - const VmaAllocatorCreateInfo *pCreateInfo, - VmaAllocator *pAllocator); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator); /// Destroys allocator object. -void vmaDestroyAllocator( - VmaAllocator allocator); +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator); /** PhysicalDeviceProperties are fetched from physicalDevice by the allocator. You can access it here, without fetching it again on your own. */ -void vmaGetPhysicalDeviceProperties( - VmaAllocator allocator, - const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties); +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties); /** PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. You can access it here, without fetching it again on your own. */ -void vmaGetMemoryProperties( - VmaAllocator allocator, - const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties); +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties); /** \brief Given Memory Type Index, returns Property Flags of this memory type. @@ -1916,10 +2195,10 @@ void vmaGetMemoryProperties( This is just a convenience function. Same information can be obtained using vmaGetMemoryProperties(). */ -void vmaGetMemoryTypeProperties( - VmaAllocator allocator, - uint32_t memoryTypeIndex, - VkMemoryPropertyFlags *pFlags); +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags); /** \brief Sets index of the current frame. @@ -1929,13 +2208,14 @@ This function must be used if you make allocations with when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot become lost in the current frame. */ -void vmaSetCurrentFrameIndex( - VmaAllocator allocator, - uint32_t frameIndex); +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex); /** \brief Calculated statistics of memory usage in entire allocator. */ -typedef struct VmaStatInfo { +typedef struct VmaStatInfo +{ /// Number of `VkDeviceMemory` Vulkan memory blocks allocated. uint32_t blockCount; /// Number of #VmaAllocation allocation objects allocated. @@ -1951,16 +2231,80 @@ typedef struct VmaStatInfo { } VmaStatInfo; /// General statistics from current state of Allocator. -typedef struct VmaStats { +typedef struct VmaStats +{ VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]; VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]; VmaStatInfo total; } VmaStats; -/// Retrieves statistics from current state of the Allocator. -void vmaCalculateStats( - VmaAllocator allocator, - VmaStats *pStats); +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. For faster but more brief statistics +suitable to be called every frame or every allocation, use vmaGetBudget(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator allocator, + VmaStats* pStats); + +/** \brief Statistics of current memory usage and available budget, in bytes, for specific memory heap. +*/ +typedef struct VmaBudget +{ + /** \brief Sum size of all `VkDeviceMemory` blocks allocated from particular heap, in bytes. + */ + VkDeviceSize blockBytes; + + /** \brief Sum size of all allocations created in particular heap, in bytes. + + Usually less or equal than `blockBytes`. + Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused - + available for new allocations or wasted due to fragmentation. + + It might be greater than `blockBytes` if there are some allocations in lost state, as they account + to this value as well. + */ + VkDeviceSize allocationBytes; + + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different than `blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, like other programs also consuming system resources. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** \brief Retrieves information about current memory budget for all memory heaps. + +\param[out] pBudget Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStats(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator allocator, + VmaBudget* pBudget); #ifndef VMA_STATS_STRING_ENABLED #define VMA_STATS_STRING_ENABLED 1 @@ -1971,14 +2315,14 @@ void vmaCalculateStats( /// Builds and returns statistics as string in JSON format. /** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function. */ -void vmaBuildStatsString( - VmaAllocator allocator, - char **ppStatsString, - VkBool32 detailedMap); +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap); -void vmaFreeStatsString( - VmaAllocator allocator, - char *pStatsString); +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString); #endif // #if VMA_STATS_STRING_ENABLED @@ -1992,205 +2336,226 @@ For more information see [Custom memory pools](@ref choosing_memory_type_custom_ */ VK_DEFINE_HANDLE(VmaPool) -typedef enum VmaMemoryUsage { +typedef enum VmaMemoryUsage +{ /** No intended memory usage specified. - Use other members of VmaAllocationCreateInfo to specify your requirements. - */ + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ VMA_MEMORY_USAGE_UNKNOWN = 0, /** Memory will be used on device only, so fast access from the device is preferred. - It usually means device-local GPU (video) memory. - No need to be mappable on host. - It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`. - - Usage: - - - Resources written and read by device, e.g. images used as attachments. - - Resources transferred from host once (immutable) or infrequently and read by - device multiple times, e.g. textures to be sampled, vertex buffers, uniform - (constant) buffers, and majority of other types of resources used on GPU. - - Allocation may still end up in `HOST_VISIBLE` memory on some implementations. - In such case, you are free to map it. - You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type. - */ + It usually means device-local GPU (video) memory. + No need to be mappable on host. + It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`. + + Usage: + + - Resources written and read by device, e.g. images used as attachments. + - Resources transferred from host once (immutable) or infrequently and read by + device multiple times, e.g. textures to be sampled, vertex buffers, uniform + (constant) buffers, and majority of other types of resources used on GPU. + + Allocation may still end up in `HOST_VISIBLE` memory on some implementations. + In such case, you are free to map it. + You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type. + */ VMA_MEMORY_USAGE_GPU_ONLY = 1, /** Memory will be mappable on host. - It usually means CPU (system) memory. - Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`. - CPU access is typically uncached. Writes may be write-combined. - Resources created in this pool may still be accessible to the device, but access to them can be slow. - It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`. - - Usage: Staging copy of resources used as transfer source. - */ + It usually means CPU (system) memory. + Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`. + CPU access is typically uncached. Writes may be write-combined. + Resources created in this pool may still be accessible to the device, but access to them can be slow. + It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`. + + Usage: Staging copy of resources used as transfer source. + */ VMA_MEMORY_USAGE_CPU_ONLY = 2, /** - Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU. - CPU access is typically uncached. Writes may be write-combined. + Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU. + CPU access is typically uncached. Writes may be write-combined. - Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call. - */ + Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call. + */ VMA_MEMORY_USAGE_CPU_TO_GPU = 3, /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached. - It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`. + It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`. - Usage: + Usage: - - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping. - - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection. - */ + - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping. + - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection. + */ VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** CPU memory - memory that is preferably not `DEVICE_LOCAL`, but also not guaranteed to be `HOST_VISIBLE`. + + Usage: Staging copy of resources moved from GPU memory to CPU memory as part + of custom paging/residency mechanism, to be moved back to GPU memory when needed. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF } VmaMemoryUsage; /// Flags to be passed as VmaAllocationCreateInfo::flags. typedef enum VmaAllocationCreateFlagBits { /** \brief Set this flag if the allocation should have its own memory block. - - Use it for special, big resources, like fullscreen images used as attachments. - - You should not use this flag if VmaAllocationCreateInfo::pool is not null. - */ + + Use it for special, big resources, like fullscreen images used as attachments. + + You should not use this flag if VmaAllocationCreateInfo::pool is not null. + */ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. - - If new allocation cannot be placed in any of the existing blocks, allocation - fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. - - You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and - #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. - - If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */ + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + + If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. - Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. - - Is it valid to use this flag for allocation made from memory type that is not - `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is - useful if you need an allocation that is efficient to use on GPU - (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that - support it (e.g. Intel GPU). + Is it valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). - You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT. - */ + You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT. + */ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, /** Allocation created with this flag can become lost as a result of another - allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you - must check it before use. + allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you + must check it before use. - To check if allocation is not lost, call vmaGetAllocationInfo() and check if - VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`. + To check if allocation is not lost, call vmaGetAllocationInfo() and check if + VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`. - For details about supporting lost allocations, see Lost Allocations - chapter of User Guide on Main Page. + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. - You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT. - */ + You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT. + */ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008, /** While creating allocation using this flag, other allocations that were - created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost. + created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost. - For details about supporting lost allocations, see Lost Allocations - chapter of User Guide on Main Page. - */ + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. + */ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010, /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a - null-terminated string. Instead of copying pointer value, a local copy of the - string is made and stored in allocation's `pUserData`. The string is automatically - freed together with the allocation. It is also used in vmaBuildStatsString(). - */ + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pUserData`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, /** Allocation will be created from upper stack in a double stack pool. - This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. - */ + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, /** Create both buffer/image and allocation, but don't bind them together. - It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. - The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). - Otherwise it is ignored. - */ + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + */ VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, /** Allocation strategy that chooses smallest possible free range for the - allocation. - */ - VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000, + allocation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000, /** Allocation strategy that chooses biggest possible free range for the - allocation. - */ + allocation. + */ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000, /** Allocation strategy that chooses first suitable free range for the - allocation. + allocation. - "First" doesn't necessarily means the one with smallest offset in memory, - but rather the one that is easiest and fastest to find. - */ + "First" doesn't necessarily means the one with smallest offset in memory, + but rather the one that is easiest and fastest to find. + */ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000, /** Allocation strategy that tries to minimize memory usage. - */ + */ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT, /** Allocation strategy that tries to minimize allocation time. - */ + */ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, /** Allocation strategy that tries to minimize memory fragmentation. - */ + */ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT, /** A bit mask to extract only `STRATEGY` bits from entire set of flags. - */ + */ VMA_ALLOCATION_CREATE_STRATEGY_MASK = - VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocationCreateFlagBits; typedef VkFlags VmaAllocationCreateFlags; -typedef struct VmaAllocationCreateInfo { +typedef struct VmaAllocationCreateInfo +{ /// Use #VmaAllocationCreateFlagBits enum. VmaAllocationCreateFlags flags; /** \brief Intended usage of memory. - - You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored. - */ + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ VmaMemoryUsage usage; /** \brief Flags that must be set in a Memory Type chosen for an allocation. - - Leave 0 if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored.*/ + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ VkMemoryPropertyFlags requiredFlags; /** \brief Flags that preferably should be set in a memory type chosen for an allocation. - - Set to 0 if no additional flags are prefered. \n - If `pool` is not null, this member is ignored. */ + + Set to 0 if no additional flags are prefered. \n + If `pool` is not null, this member is ignored. */ VkMemoryPropertyFlags preferredFlags; /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. - Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if - it meets other requirements specified by this structure, with no further - restrictions on memory type index. \n - If `pool` is not null, this member is ignored. - */ + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ uint32_t memoryTypeBits; /** \brief Pool that this allocation should be created in. - Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: - `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. - */ + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ VmaPool pool; /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). - - If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either - null or pointer to a null-terminated string. The string will be then copied to - internal buffer, so it doesn't need to be valid after allocation call. - */ - void *pUserData; + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* pUserData; } VmaAllocationCreateInfo; /** @@ -2209,11 +2574,11 @@ device doesn't support any memory type with requested features for the specific type of resource you want to use it for. Please check parameters of your resource, like image layout (OPTIMAL versus LINEAR) or mip level count. */ -VkResult vmaFindMemoryTypeIndex( - VmaAllocator allocator, - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - uint32_t *pMemoryTypeIndex); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex); /** \brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. @@ -2227,11 +2592,11 @@ It is just a convenience function, equivalent to calling: - `vmaFindMemoryTypeIndex` - `vkDestroyBuffer` */ -VkResult vmaFindMemoryTypeIndexForBufferInfo( - VmaAllocator allocator, - const VkBufferCreateInfo *pBufferCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - uint32_t *pMemoryTypeIndex); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex); /** \brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. @@ -2245,66 +2610,66 @@ It is just a convenience function, equivalent to calling: - `vmaFindMemoryTypeIndex` - `vkDestroyImage` */ -VkResult vmaFindMemoryTypeIndexForImageInfo( - VmaAllocator allocator, - const VkImageCreateInfo *pImageCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - uint32_t *pMemoryTypeIndex); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex); /// Flags to be passed as VmaPoolCreateInfo::flags. typedef enum VmaPoolCreateFlagBits { /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. - This is an optional optimization flag. - - If you always allocate using vmaCreateBuffer(), vmaCreateImage(), - vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator - knows exact type of your allocations so it can handle Buffer-Image Granularity - in the optimal way. - - If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), - exact type of such allocations is not known, so allocator must be conservative - in handling Buffer-Image Granularity, which can lead to suboptimal allocation - (wasted memory). In that case, if you can make sure you always allocate only - buffers and linear images or only optimal images out of this pool, use this flag - to make allocator disregard Buffer-Image Granularity and so make allocations - faster and more optimal. - */ + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, /** \brief Enables alternative, linear allocation algorithm in this pool. - Specify this flag to enable linear allocation algorithm, which always creates - new allocations after last one and doesn't reuse space from allocations freed in - between. It trades memory consumption for simplified algorithm and data - structure, which has better performance and uses less memory for metadata. + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. - By using this flag, you can achieve behavior of free-at-once, stack, - ring buffer, and double stack. For details, see documentation chapter - \ref linear_algorithm. + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. For details, see documentation chapter + \ref linear_algorithm. - When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default). + When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default). - For more details, see [Linear allocation algorithm](@ref linear_algorithm). - */ + For more details, see [Linear allocation algorithm](@ref linear_algorithm). + */ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, /** \brief Enables alternative, buddy allocation algorithm in this pool. - It operates on a tree of blocks, each having size that is a power of two and - a half of its parent's size. Comparing to default algorithm, this one provides - faster allocation and deallocation and decreased external fragmentation, - at the expense of more memory wasted (internal fragmentation). + It operates on a tree of blocks, each having size that is a power of two and + a half of its parent's size. Comparing to default algorithm, this one provides + faster allocation and deallocation and decreased external fragmentation, + at the expense of more memory wasted (internal fragmentation). - For more details, see [Buddy allocation algorithm](@ref buddy_algorithm). - */ + For more details, see [Buddy allocation algorithm](@ref buddy_algorithm). + */ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008, /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. - */ + */ VMA_POOL_CREATE_ALGORITHM_MASK = - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT | - VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT, + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT | + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT, VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaPoolCreateFlagBits; @@ -2314,46 +2679,46 @@ typedef VkFlags VmaPoolCreateFlags; */ typedef struct VmaPoolCreateInfo { /** \brief Vulkan memory type index to allocate this pool from. - */ + */ uint32_t memoryTypeIndex; /** \brief Use combination of #VmaPoolCreateFlagBits. - */ + */ VmaPoolCreateFlags flags; /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. - Specify nonzero to set explicit, constant size of memory blocks used by this - pool. + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. - Leave 0 to use default and let the library manage block sizes automatically. - Sizes of particular blocks may vary. - */ + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + */ VkDeviceSize blockSize; /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. - Set to 0 to have no preallocated blocks and allow the pool be completely empty. - */ + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ size_t minBlockCount; /** \brief Maximum number of blocks that can be allocated in this pool. Optional. - Set to 0 to use default, which is `SIZE_MAX`, which means no limit. - - Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated - throughout whole lifetime of this pool. - */ + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ size_t maxBlockCount; /** \brief Maximum number of additional frames that are in use at the same time as current frame. - This value is used only when you make allocations with - #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become - lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + This value is used only when you make allocations with + #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. - For example, if you double-buffer your command buffers, so resources used for - rendering in previous frame may still be in use by the GPU at the moment you - allocate resources needed for the current frame, set this value to 1. + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. - If you want to allow any allocations other than used in the current frame to - become lost, set this value to 0. - */ + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ uint32_t frameInUseCount; } VmaPoolCreateInfo; @@ -2361,26 +2726,26 @@ typedef struct VmaPoolCreateInfo { */ typedef struct VmaPoolStats { /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes. - */ + */ VkDeviceSize size; /** \brief Total number of bytes in the pool not used by any #VmaAllocation. - */ + */ VkDeviceSize unusedSize; /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed or lost. - */ + */ size_t allocationCount; /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation. - */ + */ size_t unusedRangeCount; /** \brief Size of the largest continuous free memory region available for new allocation. - Making a new allocation of that size is not guaranteed to succeed because of - possible additional margin required to respect alignment and buffer/image - granularity. - */ + Making a new allocation of that size is not guaranteed to succeed because of + possible additional margin required to respect alignment and buffer/image + granularity. + */ VkDeviceSize unusedRangeSizeMax; /** \brief Number of `VkDeviceMemory` blocks allocated for this pool. - */ + */ size_t blockCount; } VmaPoolStats; @@ -2390,16 +2755,16 @@ typedef struct VmaPoolStats { @param pCreateInfo Parameters of pool to create. @param[out] pPool Handle to created pool. */ -VkResult vmaCreatePool( - VmaAllocator allocator, - const VmaPoolCreateInfo *pCreateInfo, - VmaPool *pPool); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool); /** \brief Destroys #VmaPool object and frees Vulkan device memory. */ -void vmaDestroyPool( - VmaAllocator allocator, - VmaPool pool); +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool); /** \brief Retrieves statistics of existing #VmaPool object. @@ -2407,10 +2772,10 @@ void vmaDestroyPool( @param pool Pool object. @param[out] pPoolStats Statistics of specified pool. */ -void vmaGetPoolStats( - VmaAllocator allocator, - VmaPool pool, - VmaPoolStats *pPoolStats); +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator allocator, + VmaPool pool, + VmaPoolStats* pPoolStats); /** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now. @@ -2418,10 +2783,10 @@ void vmaGetPoolStats( @param pool Pool. @param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information. */ -void vmaMakePoolAllocationsLost( - VmaAllocator allocator, - VmaPool pool, - size_t *pLostAllocationCount); +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator allocator, + VmaPool pool, + size_t* pLostAllocationCount); /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. @@ -2437,7 +2802,28 @@ Possible return values: `VMA_ASSERT` is also fired in that case. - Other value: Error returned by Vulkan, e.g. memory mapping failure. */ -VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName); /** \struct VmaAllocation \brief Represents single memory allocation. @@ -2469,43 +2855,43 @@ VK_DEFINE_HANDLE(VmaAllocation) */ typedef struct VmaAllocationInfo { /** \brief Memory type index that this allocation was allocated from. - - It never changes. - */ + + It never changes. + */ uint32_t memoryType; /** \brief Handle to Vulkan memory object. - Same memory object can be shared by multiple allocations. - - It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + Same memory object can be shared by multiple allocations. + + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. - If the allocation is lost, it is equal to `VK_NULL_HANDLE`. - */ + If the allocation is lost, it is equal to `VK_NULL_HANDLE`. + */ VkDeviceMemory deviceMemory; /** \brief Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation. - It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. - */ + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + */ VkDeviceSize offset; /** \brief Size of this allocation, in bytes. - It never changes, unless allocation is lost. - */ + It never changes, unless allocation is lost. + */ VkDeviceSize size; /** \brief Pointer to the beginning of this allocation as mapped data. - If the allocation hasn't been mapped using vmaMapMemory() and hasn't been - created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null. + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null. - It can change after call to vmaMapMemory(), vmaUnmapMemory(). - It can also change after call to vmaDefragment() if this allocation is passed to the function. - */ - void *pMappedData; + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after call to vmaDefragment() if this allocation is passed to the function. + */ + void* pMappedData; /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). - It can change after call to vmaSetAllocationUserData() for this allocation. - */ - void *pUserData; + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* pUserData; } VmaAllocationInfo; /** \brief General purpose memory allocation. @@ -2518,12 +2904,12 @@ You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), vmaCreateBuffer(), vmaCreateImage() instead whenever possible. */ -VkResult vmaAllocateMemory( - VmaAllocator allocator, - const VkMemoryRequirements *pVkMemoryRequirements, - const VmaAllocationCreateInfo *pCreateInfo, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); /** \brief General purpose memory allocation for multiple allocation objects at once. @@ -2544,13 +2930,13 @@ All allocations are made using same parameters. All of them are created out of t If any allocation fails, all allocations already made within this function call are also freed, so that when returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. */ -VkResult vmaAllocateMemoryPages( - VmaAllocator allocator, - const VkMemoryRequirements *pVkMemoryRequirements, - const VmaAllocationCreateInfo *pCreateInfo, - size_t allocationCount, - VmaAllocation *pAllocations, - VmaAllocationInfo *pAllocationInfo); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo); /** @param[out] pAllocation Handle to allocated memory. @@ -2558,28 +2944,28 @@ VkResult vmaAllocateMemoryPages( You should free the memory using vmaFreeMemory(). */ -VkResult vmaAllocateMemoryForBuffer( - VmaAllocator allocator, - VkBuffer buffer, - const VmaAllocationCreateInfo *pCreateInfo, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); /// Function similar to vmaAllocateMemoryForBuffer(). -VkResult vmaAllocateMemoryForImage( - VmaAllocator allocator, - VkImage image, - const VmaAllocationCreateInfo *pCreateInfo, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); /** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. */ -void vmaFreeMemory( - VmaAllocator allocator, - VmaAllocation allocation); +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation); /** \brief Frees memory and destroys multiple allocations. @@ -2591,35 +2977,22 @@ It may be internally optimized to be more efficient than calling vmaFreeMemory() Allocations in `pAllocations` array can come from any memory pools and types. Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. */ -void vmaFreeMemoryPages( - VmaAllocator allocator, - size_t allocationCount, - VmaAllocation *pAllocations); - -/** \brief Tries to resize an allocation in place, if there is enough free memory after it. +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + VmaAllocation* pAllocations); -Tries to change allocation's size without moving or reallocating it. -You can both shrink and grow allocation size. -When growing, it succeeds only when the allocation belongs to a memory block with enough -free space after it. +/** \brief Deprecated. -Returns `VK_SUCCESS` if allocation's size has been successfully changed. -Returns `VK_ERROR_OUT_OF_POOL_MEMORY` if allocation's size could not be changed. - -After successful call to this function, VmaAllocationInfo::size of this allocation changes. -All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer. - -- Calling this function on allocation that is in lost state fails with result `VK_ERROR_VALIDATION_FAILED_EXT`. -- Calling this function with `newSize` same as current allocation size does nothing and returns `VK_SUCCESS`. -- Resizing dedicated allocations, as well as allocations created in pools that use linear - or buddy algorithm, is not supported. - The function returns `VK_ERROR_FEATURE_NOT_PRESENT` in such cases. - Support may be added in the future. +\deprecated +In version 2.2.0 it used to try to change allocation's size without moving or reallocating it. +In current version it returns `VK_SUCCESS` only if `newSize` equals current allocation's size. +Otherwise returns `VK_ERROR_OUT_OF_POOL_MEMORY`, indicating that allocation's size could not be changed. */ -VkResult vmaResizeAllocation( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize newSize); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize); /** \brief Returns current information about specified allocation and atomically marks it as used in current frame. @@ -2637,10 +3010,10 @@ you can avoid calling it too often. (e.g. due to defragmentation or allocation becoming lost). - If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster. */ -void vmaGetAllocationInfo( - VmaAllocator allocator, - VmaAllocation allocation, - VmaAllocationInfo *pAllocationInfo); +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo); /** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame. @@ -2656,9 +3029,9 @@ Lost allocation and the buffer/image still need to be destroyed. If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, this function always returns `VK_TRUE`. */ -VkBool32 vmaTouchAllocation( - VmaAllocator allocator, - VmaAllocation allocation); +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator allocator, + VmaAllocation allocation); /** \brief Sets pUserData in given allocation to new value. @@ -2673,10 +3046,10 @@ If the flag was not used, the value of pointer `pUserData` is just copied to allocation's `pUserData`. It is opaque, so you can use it however you want - e.g. as a pointer, ordinal number or some handle to you own data. */ -void vmaSetAllocationUserData( - VmaAllocator allocator, - VmaAllocation allocation, - void *pUserData); +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData); /** \brief Creates new allocation that is in lost state from the beginning. @@ -2688,9 +3061,9 @@ Returned allocation is not tied to any specific memory pool or memory type and not bound to any image or buffer. It has size = 0. It cannot be turned into a real, non-empty allocation. */ -void vmaCreateLostAllocation( - VmaAllocator allocator, - VmaAllocation *pAllocation); +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator allocator, + VmaAllocation* pAllocation); /** \brief Maps memory represented by given allocation and returns pointer to it. @@ -2725,23 +3098,33 @@ This function fails when used on allocation made in memory type that is not This function always fails when called for allocation that was created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be mapped. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. */ -VkResult vmaMapMemory( - VmaAllocator allocator, - VmaAllocation allocation, - void **ppData); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData); /** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. */ -void vmaUnmapMemory( - VmaAllocator allocator, - VmaAllocation allocation); +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation); /** \brief Flushes memory of given allocation. Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. - `offset` must be relative to the beginning of allocation. - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. @@ -2755,11 +3138,13 @@ Warning! `offset` and `size` are relative to the contents of given `allocation`. If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. Do not pass allocation's offset as `offset`!!! */ -void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); +VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); /** \brief Invalidates memory of given allocation. Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. - `offset` must be relative to the beginning of allocation. - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. @@ -2773,7 +3158,7 @@ Warning! `offset` and `size` are relative to the contents of given `allocation`. If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. Do not pass allocation's offset as `offset`!!! */ -void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); +VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); /** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. @@ -2791,7 +3176,7 @@ Possible return values: `VMA_ASSERT` is also fired in that case. - Other value: Error returned by Vulkan, e.g. memory mapping failure. */ -VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits); /** \struct VmaDefragmentationContext \brief Represents Opaque object that represents started defragmentation process. @@ -2803,6 +3188,7 @@ VK_DEFINE_HANDLE(VmaDefragmentationContext) /// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use. typedef enum VmaDefragmentationFlagBits { + VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 0x1, VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaDefragmentationFlagBits; typedef VkFlags VmaDefragmentationFlags; @@ -2813,90 +3199,105 @@ To be used with function vmaDefragmentationBegin(). */ typedef struct VmaDefragmentationInfo2 { /** \brief Reserved for future use. Should be 0. - */ + */ VmaDefragmentationFlags flags; /** \brief Number of allocations in `pAllocations` array. - */ + */ uint32_t allocationCount; /** \brief Pointer to array of allocations that can be defragmented. - The array should have `allocationCount` elements. - The array should not contain nulls. - Elements in the array should be unique - same allocation cannot occur twice. - It is safe to pass allocations that are in the lost state - they are ignored. - All allocations not present in this array are considered non-moveable during this defragmentation. - */ - VmaAllocation *pAllocations; + The array should have `allocationCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same allocation cannot occur twice. + It is safe to pass allocations that are in the lost state - they are ignored. + All allocations not present in this array are considered non-moveable during this defragmentation. + */ + VmaAllocation* pAllocations; /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation. - The array should have `allocationCount` elements. - You can pass null if you are not interested in this information. - */ - VkBool32 *pAllocationsChanged; + The array should have `allocationCount` elements. + You can pass null if you are not interested in this information. + */ + VkBool32* pAllocationsChanged; /** \brief Numer of pools in `pPools` array. - */ + */ uint32_t poolCount; /** \brief Either null or pointer to array of pools to be defragmented. - All the allocations in the specified pools can be moved during defragmentation - and there is no way to check if they were really moved as in `pAllocationsChanged`, - so you must query all the allocations in all these pools for new `VkDeviceMemory` - and offset using vmaGetAllocationInfo() if you might need to recreate buffers - and images bound to them. + All the allocations in the specified pools can be moved during defragmentation + and there is no way to check if they were really moved as in `pAllocationsChanged`, + so you must query all the allocations in all these pools for new `VkDeviceMemory` + and offset using vmaGetAllocationInfo() if you might need to recreate buffers + and images bound to them. - The array should have `poolCount` elements. - The array should not contain nulls. - Elements in the array should be unique - same pool cannot occur twice. + The array should have `poolCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same pool cannot occur twice. - Using this array is equivalent to specifying all allocations from the pools in `pAllocations`. - It might be more efficient. - */ - VmaPool *pPools; + Using this array is equivalent to specifying all allocations from the pools in `pAllocations`. + It might be more efficient. + */ + VmaPool* pPools; /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`. - - `VK_WHOLE_SIZE` means no limit. - */ + + `VK_WHOLE_SIZE` means no limit. + */ VkDeviceSize maxCpuBytesToMove; /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`. - `UINT32_MAX` means no limit. - */ + `UINT32_MAX` means no limit. + */ uint32_t maxCpuAllocationsToMove; /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`. - - `VK_WHOLE_SIZE` means no limit. - */ + + `VK_WHOLE_SIZE` means no limit. + */ VkDeviceSize maxGpuBytesToMove; /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`. - `UINT32_MAX` means no limit. - */ + `UINT32_MAX` means no limit. + */ uint32_t maxGpuAllocationsToMove; /** \brief Optional. Command buffer where GPU copy commands will be posted. - If not null, it must be a valid command buffer handle that supports Transfer queue type. - It must be in the recording state and outside of a render pass instance. - You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd(). + If not null, it must be a valid command buffer handle that supports Transfer queue type. + It must be in the recording state and outside of a render pass instance. + You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd(). - Passing null means that only CPU defragmentation will be performed. - */ + Passing null means that only CPU defragmentation will be performed. + */ VkCommandBuffer commandBuffer; } VmaDefragmentationInfo2; +typedef struct VmaDefragmentationPassMoveInfo { + VmaAllocation allocation; + VkDeviceMemory memory; + VkDeviceSize offset; +} VmaDefragmentationPassMoveInfo; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassInfo { + uint32_t moveCount; + VmaDefragmentationPassMoveInfo* pMoves; +} VmaDefragmentationPassInfo; + /** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment(). \deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. */ typedef struct VmaDefragmentationInfo { /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places. - - Default is `VK_WHOLE_SIZE`, which means no limit. - */ + + Default is `VK_WHOLE_SIZE`, which means no limit. + */ VkDeviceSize maxBytesToMove; /** \brief Maximum number of allocations that can be moved to different place. - Default is `UINT32_MAX`, which means no limit. - */ + Default is `UINT32_MAX`, which means no limit. + */ uint32_t maxAllocationsToMove; } VmaDefragmentationInfo; @@ -2941,20 +3342,30 @@ Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd For more information and important limitations regarding defragmentation, see documentation chapter: [Defragmentation](@ref defragmentation). */ -VkResult vmaDefragmentationBegin( - VmaAllocator allocator, - const VmaDefragmentationInfo2 *pInfo, - VmaDefragmentationStats *pStats, - VmaDefragmentationContext *pContext); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator allocator, + const VmaDefragmentationInfo2* pInfo, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext *pContext); /** \brief Ends defragmentation process. Use this function to finish defragmentation started by vmaDefragmentationBegin(). It is safe to pass `context == null`. The function then does nothing. */ -VkResult vmaDefragmentationEnd( - VmaAllocator allocator, - VmaDefragmentationContext context); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator allocator, + VmaDefragmentationContext context); + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationPassInfo* pInfo +); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context +); /** \brief Deprecated. Compacts memory by moving allocations. @@ -2996,13 +3407,13 @@ you should measure that on your platform. For more information, see [Defragmentation](@ref defragmentation) chapter. */ -VkResult vmaDefragment( - VmaAllocator allocator, - VmaAllocation *pAllocations, - size_t allocationCount, - VkBool32 *pAllocationsChanged, - const VmaDefragmentationInfo *pDefragmentationInfo, - VmaDefragmentationStats *pDefragmentationStats); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator allocator, + VmaAllocation* pAllocations, + size_t allocationCount, + VkBool32* pAllocationsChanged, + const VmaDefragmentationInfo *pDefragmentationInfo, + VmaDefragmentationStats* pDefragmentationStats); /** \brief Binds buffer to allocation. @@ -3016,10 +3427,27 @@ allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from mul It is recommended to use function vmaCreateBuffer() instead of this one. */ -VkResult vmaBindBufferMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkBuffer buffer); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext); /** \brief Binds image to allocation. @@ -3033,10 +3461,27 @@ allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from mul It is recommended to use function vmaCreateImage() instead of this one. */ -VkResult vmaBindImageMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkImage image); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image); + +/** \brief Binds image to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext); /** @param[out] pBuffer Buffer that was created. @@ -3064,13 +3509,13 @@ and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated allocation for this buffer, just like when using VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. */ -VkResult vmaCreateBuffer( - VmaAllocator allocator, - const VkBufferCreateInfo *pBufferCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - VkBuffer *pBuffer, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); /** \brief Destroys Vulkan buffer and frees allocated memory. @@ -3083,19 +3528,19 @@ vmaFreeMemory(allocator, allocation); It it safe to pass null as buffer and/or allocation. */ -void vmaDestroyBuffer( - VmaAllocator allocator, - VkBuffer buffer, - VmaAllocation allocation); +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation); /// Function similar to vmaCreateBuffer(). -VkResult vmaCreateImage( - VmaAllocator allocator, - const VkImageCreateInfo *pImageCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - VkImage *pImage, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); /** \brief Destroys Vulkan image and frees allocated memory. @@ -3108,10 +3553,10 @@ vmaFreeMemory(allocator, allocation); It it safe to pass null as image and/or allocation. */ -void vmaDestroyImage( - VmaAllocator allocator, - VkImage image, - VmaAllocation allocation); +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator allocator, + VkImage image, + VmaAllocation allocation); #ifdef __cplusplus } @@ -3130,6 +3575,7 @@ void vmaDestroyImage( #include <cstdint> #include <cstdlib> #include <cstring> +#include <utility> /******************************************************************************* CONFIGURATION SECTION @@ -3142,7 +3588,7 @@ here if you need other then default behavior depending on your environment. Define this macro to 1 to make the library fetch pointers to Vulkan functions internally, like: - vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; Define to 0 if you are going to provide you own pointers to Vulkan functions via VmaAllocatorCreateInfo::pVulkanFunctions. @@ -3161,23 +3607,23 @@ Set it to 0 or undefined to make the library using its own implementation of the containers. */ #if VMA_USE_STL_CONTAINERS -#define VMA_USE_STL_VECTOR 1 -#define VMA_USE_STL_UNORDERED_MAP 1 -#define VMA_USE_STL_LIST 1 + #define VMA_USE_STL_VECTOR 1 + #define VMA_USE_STL_UNORDERED_MAP 1 + #define VMA_USE_STL_LIST 1 #endif #ifndef VMA_USE_STL_SHARED_MUTEX -// Compiler conforms to C++17. -#if __cplusplus >= 201703L -#define VMA_USE_STL_SHARED_MUTEX 1 -// Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus -// Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. -// See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/ -#elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L -#define VMA_USE_STL_SHARED_MUTEX 1 -#else -#define VMA_USE_STL_SHARED_MUTEX 0 -#endif + // Compiler conforms to C++17. + #if __cplusplus >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/ + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif #endif /* @@ -3185,50 +3631,54 @@ THESE INCLUDES ARE NOT ENABLED BY DEFAULT. Library has its own container implementation. */ #if VMA_USE_STL_VECTOR -#include <vector> + #include <vector> #endif #if VMA_USE_STL_UNORDERED_MAP -#include <unordered_map> + #include <unordered_map> #endif #if VMA_USE_STL_LIST -#include <list> + #include <list> #endif /* Following headers are used in this CONFIGURATION section only, so feel free to remove them if not needed. */ -#include <algorithm> // for min, max #include <cassert> // for assert +#include <algorithm> // for min, max #include <mutex> #ifndef VMA_NULL -// Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. -#define VMA_NULL nullptr + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr #endif #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) #include <cstdlib> -void *aligned_alloc(size_t alignment, size_t size) { +void *aligned_alloc(size_t alignment, size_t size) +{ // alignment must be >= sizeof(void*) - if (alignment < sizeof(void *)) { - alignment = sizeof(void *); + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); } return memalign(alignment, size); } -#elif defined(__APPLE__) || defined(__ANDROID__) +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) #include <cstdlib> -void *aligned_alloc(size_t alignment, size_t size) { +void *aligned_alloc(size_t alignment, size_t size) +{ // alignment must be >= sizeof(void*) - if (alignment < sizeof(void *)) { - alignment = sizeof(void *); + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); } void *pointer; - if (posix_memalign(&pointer, alignment, size) == 0) + if(posix_memalign(&pointer, alignment, size) == 0) return pointer; return VMA_NULL; } @@ -3241,226 +3691,236 @@ void *aligned_alloc(size_t alignment, size_t size) { // Normal assert to check for programmer's errors, especially in Debug configuration. #ifndef VMA_ASSERT -#ifdef _DEBUG -#define VMA_ASSERT(expr) assert(expr) -#else -#define VMA_ASSERT(expr) -#endif + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif #endif // Assert that will be called very often, like inside data structures e.g. operator[]. // Making it non-empty can make program slow. #ifndef VMA_HEAVY_ASSERT -#ifdef _DEBUG -#define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) -#else -#define VMA_HEAVY_ASSERT(expr) -#endif + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif #endif #ifndef VMA_ALIGN_OF -#define VMA_ALIGN_OF(type) (__alignof(type)) + #define VMA_ALIGN_OF(type) (__alignof(type)) #endif #ifndef VMA_SYSTEM_ALIGNED_MALLOC -#if defined(_WIN32) -#define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment))) -#else -#define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size))) -#endif + #if defined(_WIN32) + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment))) + #else + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) )) + #endif #endif #ifndef VMA_SYSTEM_FREE -#if defined(_WIN32) -#define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr) -#else -#define VMA_SYSTEM_FREE(ptr) free(ptr) -#endif + #if defined(_WIN32) + #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr) + #else + #define VMA_SYSTEM_FREE(ptr) free(ptr) + #endif #endif #ifndef VMA_MIN -#define VMA_MIN(v1, v2) (std::min((v1), (v2))) + #define VMA_MIN(v1, v2) (std::min((v1), (v2))) #endif #ifndef VMA_MAX -#define VMA_MAX(v1, v2) (std::max((v1), (v2))) + #define VMA_MAX(v1, v2) (std::max((v1), (v2))) #endif #ifndef VMA_SWAP -#define VMA_SWAP(v1, v2) std::swap((v1), (v2)) + #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) #endif #ifndef VMA_SORT -#define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) #endif #ifndef VMA_DEBUG_LOG -#define VMA_DEBUG_LOG(format, ...) -/* + #define VMA_DEBUG_LOG(format, ...) + /* #define VMA_DEBUG_LOG(format, ...) do { \ - printf(format, __VA_ARGS__); \ - printf("\n"); \ + printf(format, __VA_ARGS__); \ + printf("\n"); \ } while(false) */ #endif // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. #if VMA_STATS_STRING_ENABLED -static inline void VmaUint32ToStr(char *outStr, size_t strLen, uint32_t num) { - snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num)); -} -static inline void VmaUint64ToStr(char *outStr, size_t strLen, uint64_t num) { - snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num)); -} -static inline void VmaPtrToStr(char *outStr, size_t strLen, const void *ptr) { - snprintf(outStr, strLen, "%p", ptr); -} + static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num)); + } + static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num)); + } + static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } #endif #ifndef VMA_MUTEX -class VmaMutex { -public: - void Lock() { m_Mutex.lock(); } - void Unlock() { m_Mutex.unlock(); } - -private: - std::mutex m_Mutex; -}; -#define VMA_MUTEX VmaMutex + class VmaMutex + { + public: + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex #endif // Read-write mutex, where "read" is shared access, "write" is exclusive access. #ifndef VMA_RW_MUTEX -#if VMA_USE_STL_SHARED_MUTEX -// Use std::shared_mutex from C++17. -#include <shared_mutex> -class VmaRWMutex { -public: - void LockRead() { m_Mutex.lock_shared(); } - void UnlockRead() { m_Mutex.unlock_shared(); } - void LockWrite() { m_Mutex.lock(); } - void UnlockWrite() { m_Mutex.unlock(); } - -private: - std::shared_mutex m_Mutex; -}; -#define VMA_RW_MUTEX VmaRWMutex -#elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 -// Use SRWLOCK from WinAPI. -// Minimum supported client = Windows Vista, server = Windows Server 2008. -class VmaRWMutex { -public: - VmaRWMutex() { InitializeSRWLock(&m_Lock); } - void LockRead() { AcquireSRWLockShared(&m_Lock); } - void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } - void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } - void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } - -private: - SRWLOCK m_Lock; -}; -#define VMA_RW_MUTEX VmaRWMutex -#else -// Less efficient fallback: Use normal mutex. -class VmaRWMutex { -public: - void LockRead() { m_Mutex.Lock(); } - void UnlockRead() { m_Mutex.Unlock(); } - void LockWrite() { m_Mutex.Lock(); } - void UnlockWrite() { m_Mutex.Unlock(); } - -private: - VMA_MUTEX m_Mutex; -}; -#define VMA_RW_MUTEX VmaRWMutex -#endif // #if VMA_USE_STL_SHARED_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include <shared_mutex> + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX #endif // #ifndef VMA_RW_MUTEX /* -If providing your own implementation, you need to implement a subset of std::atomic: - -- Constructor(uint32_t desired) -- uint32_t load() const -- void store(uint32_t desired) -- bool compare_exchange_weak(uint32_t& expected, uint32_t desired) +If providing your own implementation, you need to implement a subset of std::atomic. */ #ifndef VMA_ATOMIC_UINT32 -#include <atomic> -#define VMA_ATOMIC_UINT32 std::atomic<uint32_t> + #include <atomic> + #define VMA_ATOMIC_UINT32 std::atomic<uint32_t> +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include <atomic> + #define VMA_ATOMIC_UINT64 std::atomic<uint64_t> #endif #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY -/** - Every allocation will have its own memory block. - Define to 1 for debugging purposes only. - */ -#define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) #endif #ifndef VMA_DEBUG_ALIGNMENT -/** - Minimum alignment of all allocations, in bytes. - Set to more than 1 for debugging purposes only. Must be power of two. - */ -#define VMA_DEBUG_ALIGNMENT (1) + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_ALIGNMENT (1) #endif #ifndef VMA_DEBUG_MARGIN -/** - Minimum margin before and after every allocation, in bytes. - Set nonzero for debugging purposes only. - */ -#define VMA_DEBUG_MARGIN (0) + /** + Minimum margin before and after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) #endif #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS -/** - Define this macro to 1 to automatically fill new allocations and destroyed - allocations with some bit pattern. - */ -#define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) #endif #ifndef VMA_DEBUG_DETECT_CORRUPTION -/** - Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to - enable writing magic value to the margin before and after every allocation and - validating it, so that memory corruptions (out-of-bounds writes) are detected. - */ -#define VMA_DEBUG_DETECT_CORRUPTION (0) + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin before and after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) #endif #ifndef VMA_DEBUG_GLOBAL_MUTEX -/** - Set this to 1 for debugging purposes only, to enable single mutex protecting all - entry calls to the library. Can be useful for debugging multithreading issues. - */ -#define VMA_DEBUG_GLOBAL_MUTEX (0) + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) #endif #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY -/** - Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. - Set to more than 1 for debugging purposes only. Must be power of two. - */ -#define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) #endif #ifndef VMA_SMALL_HEAP_MAX_SIZE -/// Maximum size of a memory heap in Vulkan to consider it "small". -#define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) #endif #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE -/// Default size of a block allocated as single VkDeviceMemory from a "large" heap. -#define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) #endif #ifndef VMA_CLASS_NO_COPY -#define VMA_CLASS_NO_COPY(className) \ -private: \ - className(const className &) = delete; \ - className &operator=(const className &) = delete; + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; #endif static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX; @@ -3468,25 +3928,31 @@ static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX; // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; -static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; /******************************************************************************* END OF CONFIGURATION */ +// # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. + +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; + + static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; static VkAllocationCallbacks VmaEmptyAllocationCallbacks = { - VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL -}; + VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; // Returns number of bits set to 1 in (v). -static inline uint32_t VmaCountBitsSet(uint32_t v) { +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ uint32_t c = v - ((v >> 1) & 0x55555555); - c = ((c >> 2) & 0x33333333) + (c & 0x33333333); - c = ((c >> 4) + c) & 0x0F0F0F0F; - c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; c = ((c >> 16) + c) & 0x0000FFFF; return c; } @@ -3494,19 +3960,22 @@ static inline uint32_t VmaCountBitsSet(uint32_t v) { // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. // Use types like uint32_t, uint64_t as T. template <typename T> -static inline T VmaAlignUp(T val, T align) { +static inline T VmaAlignUp(T val, T align) +{ return (val + align - 1) / align * align; } // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8. // Use types like uint32_t, uint64_t as T. template <typename T> -static inline T VmaAlignDown(T val, T align) { +static inline T VmaAlignDown(T val, T align) +{ return val / align * align; } // Division with mathematical rounding to nearest number. template <typename T> -static inline T VmaRoundDiv(T x, T y) { +static inline T VmaRoundDiv(T x, T y) +{ return (x + (y / (T)2)) / y; } @@ -3516,12 +3985,14 @@ T must be unsigned integer number or signed integer but always nonnegative. For 0 returns true. */ template <typename T> -inline bool VmaIsPow2(T x) { - return (x & (x - 1)) == 0; +inline bool VmaIsPow2(T x) +{ + return (x & (x-1)) == 0; } // Returns smallest power of 2 greater or equal to v. -static inline uint32_t VmaNextPow2(uint32_t v) { +static inline uint32_t VmaNextPow2(uint32_t v) +{ v--; v |= v >> 1; v |= v >> 2; @@ -3531,7 +4002,8 @@ static inline uint32_t VmaNextPow2(uint32_t v) { v++; return v; } -static inline uint64_t VmaNextPow2(uint64_t v) { +static inline uint64_t VmaNextPow2(uint64_t v) +{ v--; v |= v >> 1; v |= v >> 2; @@ -3544,7 +4016,8 @@ static inline uint64_t VmaNextPow2(uint64_t v) { } // Returns largest power of 2 less or equal to v. -static inline uint32_t VmaPrevPow2(uint32_t v) { +static inline uint32_t VmaPrevPow2(uint32_t v) +{ v |= v >> 1; v |= v >> 2; v |= v >> 4; @@ -3553,7 +4026,8 @@ static inline uint32_t VmaPrevPow2(uint32_t v) { v = v ^ (v >> 1); return v; } -static inline uint64_t VmaPrevPow2(uint64_t v) { +static inline uint64_t VmaPrevPow2(uint64_t v) +{ v |= v >> 1; v |= v >> 2; v |= v >> 4; @@ -3564,23 +4038,26 @@ static inline uint64_t VmaPrevPow2(uint64_t v) { return v; } -static inline bool VmaStrIsEmpty(const char *pStr) { +static inline bool VmaStrIsEmpty(const char* pStr) +{ return pStr == VMA_NULL || *pStr == '\0'; } #if VMA_STATS_STRING_ENABLED -static const char *VmaAlgorithmToStr(uint32_t algorithm) { - switch (algorithm) { - case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: - return "Linear"; - case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: - return "Buddy"; - case 0: - return "Default"; - default: - VMA_ASSERT(0); - return ""; +static const char* VmaAlgorithmToStr(uint32_t algorithm) +{ + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + return "Linear"; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + return "Buddy"; + case 0: + return "Default"; + default: + VMA_ASSERT(0); + return ""; } } @@ -3588,28 +4065,34 @@ static const char *VmaAlgorithmToStr(uint32_t algorithm) { #ifndef VMA_SORT -template <typename Iterator, typename Compare> -Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp) { - Iterator centerValue = end; - --centerValue; +template<typename Iterator, typename Compare> +Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp) +{ + Iterator centerValue = end; --centerValue; Iterator insertIndex = beg; - for (Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex) { - if (cmp(*memTypeIndex, *centerValue)) { - if (insertIndex != memTypeIndex) { + for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex) + { + if(cmp(*memTypeIndex, *centerValue)) + { + if(insertIndex != memTypeIndex) + { VMA_SWAP(*memTypeIndex, *insertIndex); } ++insertIndex; } } - if (insertIndex != centerValue) { + if(insertIndex != centerValue) + { VMA_SWAP(*insertIndex, *centerValue); } return insertIndex; } -template <typename Iterator, typename Compare> -void VmaQuickSort(Iterator beg, Iterator end, Compare cmp) { - if (beg < end) { +template<typename Iterator, typename Compare> +void VmaQuickSort(Iterator beg, Iterator end, Compare cmp) +{ + if(beg < end) + { Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp); VmaQuickSort<Iterator, Compare>(beg, it, cmp); VmaQuickSort<Iterator, Compare>(it + 1, end, cmp); @@ -3628,10 +4111,11 @@ Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulk chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". */ static inline bool VmaBlocksOnSamePage( - VkDeviceSize resourceAOffset, - VkDeviceSize resourceASize, - VkDeviceSize resourceBOffset, - VkDeviceSize pageSize) { + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); @@ -3640,7 +4124,8 @@ static inline bool VmaBlocksOnSamePage( return resourceAEndPage == resourceBStartPage; } -enum VmaSuballocationType { +enum VmaSuballocationType +{ VMA_SUBALLOCATION_TYPE_FREE = 0, VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, VMA_SUBALLOCATION_TYPE_BUFFER = 2, @@ -3657,50 +4142,67 @@ or linear image and another one is optimal image. If type is unknown, behave conservatively. */ static inline bool VmaIsBufferImageGranularityConflict( - VmaSuballocationType suballocType1, - VmaSuballocationType suballocType2) { - if (suballocType1 > suballocType2) { + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if(suballocType1 > suballocType2) + { VMA_SWAP(suballocType1, suballocType2); } - - switch (suballocType1) { - case VMA_SUBALLOCATION_TYPE_FREE: - return false; - case VMA_SUBALLOCATION_TYPE_UNKNOWN: - return true; - case VMA_SUBALLOCATION_TYPE_BUFFER: - return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: - return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: - return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: - return false; - default: - VMA_ASSERT(0); - return true; + + switch(suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; } } -static void VmaWriteMagicValue(void *pData, VkDeviceSize offset) { - uint32_t *pDst = (uint32_t *)((char *)pData + offset); +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); - for (size_t i = 0; i < numberCount; ++i, ++pDst) { + for(size_t i = 0; i < numberCount; ++i, ++pDst) + { *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; } +#else + // no-op +#endif } -static bool VmaValidateMagicValue(const void *pData, VkDeviceSize offset) { - const uint32_t *pSrc = (const uint32_t *)((const char *)pData + offset); +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); - for (size_t i = 0; i < numberCount; ++i, ++pSrc) { - if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) { + for(size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { return false; } } +#endif return true; } @@ -3708,7 +4210,8 @@ static bool VmaValidateMagicValue(const void *pData, VkDeviceSize offset) { Fills structure with parameters of an example buffer to be used for transfers during GPU memory defragmentation. */ -static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo &outBufCreateInfo) { +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; @@ -3716,70 +4219,50 @@ static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo &outBuf } // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). -struct VmaMutexLock { +struct VmaMutexLock +{ VMA_CLASS_NO_COPY(VmaMutexLock) public: - VmaMutexLock(VMA_MUTEX &mutex, bool useMutex = true) : - m_pMutex(useMutex ? &mutex : VMA_NULL) { - if (m_pMutex) { - m_pMutex->Lock(); - } - } - ~VmaMutexLock() { - if (m_pMutex) { - m_pMutex->Unlock(); - } - } - + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->Lock(); } } + ~VmaMutexLock() + { if(m_pMutex) { m_pMutex->Unlock(); } } private: - VMA_MUTEX *m_pMutex; + VMA_MUTEX* m_pMutex; }; // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. -struct VmaMutexLockRead { +struct VmaMutexLockRead +{ VMA_CLASS_NO_COPY(VmaMutexLockRead) public: - VmaMutexLockRead(VMA_RW_MUTEX &mutex, bool useMutex) : - m_pMutex(useMutex ? &mutex : VMA_NULL) { - if (m_pMutex) { - m_pMutex->LockRead(); - } - } - ~VmaMutexLockRead() { - if (m_pMutex) { - m_pMutex->UnlockRead(); - } - } - + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockRead(); } } + ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } } private: - VMA_RW_MUTEX *m_pMutex; + VMA_RW_MUTEX* m_pMutex; }; // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. -struct VmaMutexLockWrite { +struct VmaMutexLockWrite +{ VMA_CLASS_NO_COPY(VmaMutexLockWrite) public: - VmaMutexLockWrite(VMA_RW_MUTEX &mutex, bool useMutex) : - m_pMutex(useMutex ? &mutex : VMA_NULL) { - if (m_pMutex) { - m_pMutex->LockWrite(); - } - } - ~VmaMutexLockWrite() { - if (m_pMutex) { - m_pMutex->UnlockWrite(); - } - } - + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockWrite(); } } + ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } } private: - VMA_RW_MUTEX *m_pMutex; + VMA_RW_MUTEX* m_pMutex; }; #if VMA_DEBUG_GLOBAL_MUTEX -static VMA_MUTEX gDebugGlobalMutex; -#define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); #else -#define VMA_DEBUG_GLOBAL_MUTEX_LOCK + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK #endif // Minimum size of a free suballocation to register it in the free suballocation collection. @@ -3795,33 +4278,56 @@ Returned value is the found element, if present in the collection or place where new element with value (key) should be inserted. */ template <typename CmpLess, typename IterT, typename KeyT> -static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp) { +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp) +{ size_t down = 0, up = (end - beg); - while (down < up) { + while(down < up) + { const size_t mid = (down + up) / 2; - if (cmp(*(beg + mid), key)) { + if(cmp(*(beg+mid), key)) + { down = mid + 1; - } else { + } + else + { up = mid; } } return beg + down; } +template<typename CmpLess, typename IterT, typename KeyT> +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>( + beg, end, value, cmp); + if(it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + /* Returns true if all pointers in the array are not-null and unique. Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. T must be pointer type, e.g. VmaAllocation, VmaPool. */ -template <typename T> -static bool VmaValidatePointerArray(uint32_t count, const T *arr) { - for (uint32_t i = 0; i < count; ++i) { +template<typename T> +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for(uint32_t i = 0; i < count; ++i) + { const T iPtr = arr[i]; - if (iPtr == VMA_NULL) { + if(iPtr == VMA_NULL) + { return false; } - for (uint32_t j = i + 1; j < count; ++j) { - if (iPtr == arr[j]) { + for(uint32_t j = i + 1; j < count; ++j) + { + if(iPtr == arr[j]) + { return false; } } @@ -3832,97 +4338,137 @@ static bool VmaValidatePointerArray(uint32_t count, const T *arr) { //////////////////////////////////////////////////////////////////////////////// // Memory allocation -static void *VmaMalloc(const VkAllocationCallbacks *pAllocationCallbacks, size_t size, size_t alignment) { - if ((pAllocationCallbacks != VMA_NULL) && - (pAllocationCallbacks->pfnAllocation != VMA_NULL)) { +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { return (*pAllocationCallbacks->pfnAllocation)( - pAllocationCallbacks->pUserData, - size, - alignment, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); - } else { + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); } } -static void VmaFree(const VkAllocationCallbacks *pAllocationCallbacks, void *ptr) { - if ((pAllocationCallbacks != VMA_NULL) && - (pAllocationCallbacks->pfnFree != VMA_NULL)) { +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); - } else { + } + else + { VMA_SYSTEM_FREE(ptr); } } -template <typename T> -static T *VmaAllocate(const VkAllocationCallbacks *pAllocationCallbacks) { - return (T *)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +template<typename T> +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); } -template <typename T> -static T *VmaAllocateArray(const VkAllocationCallbacks *pAllocationCallbacks, size_t count) { - return (T *)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +template<typename T> +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); } -#define vma_new(allocator, type) new (VmaAllocate<type>(allocator))(type) +#define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type) -#define vma_new_array(allocator, type, count) new (VmaAllocateArray<type>((allocator), (count)))(type) +#define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type) -template <typename T> -static void vma_delete(const VkAllocationCallbacks *pAllocationCallbacks, T *ptr) { +template<typename T> +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ ptr->~T(); VmaFree(pAllocationCallbacks, ptr); } -template <typename T> -static void vma_delete_array(const VkAllocationCallbacks *pAllocationCallbacks, T *ptr, size_t count) { - if (ptr != VMA_NULL) { - for (size_t i = count; i--;) { +template<typename T> +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + { ptr[i].~T(); } VmaFree(pAllocationCallbacks, ptr); } } +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if(srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + else + { + return VMA_NULL; + } +} + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if(str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + // STL-compatible allocator. -template <typename T> -class VmaStlAllocator { +template<typename T> +class VmaStlAllocator +{ public: - const VkAllocationCallbacks *const m_pCallbacks; + const VkAllocationCallbacks* const m_pCallbacks; typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { } + template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { } - VmaStlAllocator(const VkAllocationCallbacks *pCallbacks) : - m_pCallbacks(pCallbacks) {} - template <typename U> - VmaStlAllocator(const VmaStlAllocator<U> &src) : - m_pCallbacks(src.m_pCallbacks) {} + T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } - T *allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); } - void deallocate(T *p, size_t n) { VmaFree(m_pCallbacks, p); } - - template <typename U> - bool operator==(const VmaStlAllocator<U> &rhs) const { + template<typename U> + bool operator==(const VmaStlAllocator<U>& rhs) const + { return m_pCallbacks == rhs.m_pCallbacks; } - template <typename U> - bool operator!=(const VmaStlAllocator<U> &rhs) const { + template<typename U> + bool operator!=(const VmaStlAllocator<U>& rhs) const + { return m_pCallbacks != rhs.m_pCallbacks; } - VmaStlAllocator &operator=(const VmaStlAllocator &x) = delete; + VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete; }; #if VMA_USE_STL_VECTOR #define VmaVector std::vector -template <typename T, typename allocatorT> -static void VmaVectorInsert(std::vector<T, allocatorT> &vec, size_t index, const T &item) { +template<typename T, typename allocatorT> +static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item) +{ vec.insert(vec.begin() + index, item); } -template <typename T, typename allocatorT> -static void VmaVectorRemove(std::vector<T, allocatorT> &vec, size_t index) { +template<typename T, typename allocatorT> +static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index) +{ vec.erase(vec.begin() + index); } @@ -3931,90 +4477,114 @@ static void VmaVectorRemove(std::vector<T, allocatorT> &vec, size_t index) { /* Class with interface compatible with subset of std::vector. T must be POD because constructors and destructors are not called and memcpy is used for these objects. */ -template <typename T, typename AllocatorT> -class VmaVector { +template<typename T, typename AllocatorT> +class VmaVector +{ public: typedef T value_type; - VmaVector(const AllocatorT &allocator) : - m_Allocator(allocator), - m_pArray(VMA_NULL), - m_Count(0), - m_Capacity(0) { + VmaVector(const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) + { } - VmaVector(size_t count, const AllocatorT &allocator) : - m_Allocator(allocator), - m_pArray(count ? (T *)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL), - m_Count(count), - m_Capacity(count) { + VmaVector(size_t count, const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) + { } - - VmaVector(const VmaVector<T, AllocatorT> &src) : - m_Allocator(src.m_Allocator), - m_pArray(src.m_Count ? (T *)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), - m_Count(src.m_Count), - m_Capacity(src.m_Count) { - if (m_Count != 0) { + + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) + : VmaVector(count, allocator) {} + + VmaVector(const VmaVector<T, AllocatorT>& src) : + m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) + { + if(m_Count != 0) + { memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); } } - - ~VmaVector() { + + ~VmaVector() + { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } - VmaVector &operator=(const VmaVector<T, AllocatorT> &rhs) { - if (&rhs != this) { + VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs) + { + if(&rhs != this) + { resize(rhs.m_Count); - if (m_Count != 0) { + if(m_Count != 0) + { memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); } } return *this; } - + bool empty() const { return m_Count == 0; } size_t size() const { return m_Count; } - T *data() { return m_pArray; } - const T *data() const { return m_pArray; } - - T &operator[](size_t index) { + T* data() { return m_pArray; } + const T* data() const { return m_pArray; } + + T& operator[](size_t index) + { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } - const T &operator[](size_t index) const { + const T& operator[](size_t index) const + { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } - T &front() { + T& front() + { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - const T &front() const { + const T& front() const + { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - T &back() { + T& back() + { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } - const T &back() const { + const T& back() const + { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } - void reserve(size_t newCapacity, bool freeMemory = false) { + void reserve(size_t newCapacity, bool freeMemory = false) + { newCapacity = VMA_MAX(newCapacity, m_Count); - - if ((newCapacity < m_Capacity) && !freeMemory) { + + if((newCapacity < m_Capacity) && !freeMemory) + { newCapacity = m_Capacity; } - - if (newCapacity != m_Capacity) { - T *const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL; - if (m_Count != 0) { + + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL; + if(m_Count != 0) + { memcpy(newArray, m_pArray, m_Count * sizeof(T)); } VmaFree(m_Allocator.m_pCallbacks, m_pArray); @@ -4023,18 +4593,24 @@ public: } } - void resize(size_t newCount, bool freeMemory = false) { + void resize(size_t newCount, bool freeMemory = false) + { size_t newCapacity = m_Capacity; - if (newCount > m_Capacity) { + if(newCount > m_Capacity) + { newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); - } else if (freeMemory) { + } + else if(freeMemory) + { newCapacity = newCount; } - if (newCapacity != m_Capacity) { - T *const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; const size_t elementsToCopy = VMA_MIN(m_Count, newCount); - if (elementsToCopy != 0) { + if(elementsToCopy != 0) + { memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); } VmaFree(m_Allocator.m_pCallbacks, m_pArray); @@ -4045,94 +4621,107 @@ public: m_Count = newCount; } - void clear(bool freeMemory = false) { + void clear(bool freeMemory = false) + { resize(0, freeMemory); } - void insert(size_t index, const T &src) { + void insert(size_t index, const T& src) + { VMA_HEAVY_ASSERT(index <= m_Count); const size_t oldCount = size(); resize(oldCount + 1); - if (index < oldCount) { + if(index < oldCount) + { memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); } m_pArray[index] = src; } - void remove(size_t index) { + void remove(size_t index) + { VMA_HEAVY_ASSERT(index < m_Count); const size_t oldCount = size(); - if (index < oldCount - 1) { + if(index < oldCount - 1) + { memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); } resize(oldCount - 1); } - void push_back(const T &src) { + void push_back(const T& src) + { const size_t newIndex = size(); resize(newIndex + 1); m_pArray[newIndex] = src; } - void pop_back() { + void pop_back() + { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } - void push_front(const T &src) { + void push_front(const T& src) + { insert(0, src); } - void pop_front() { + void pop_front() + { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } - typedef T *iterator; + typedef T* iterator; iterator begin() { return m_pArray; } iterator end() { return m_pArray + m_Count; } private: AllocatorT m_Allocator; - T *m_pArray; + T* m_pArray; size_t m_Count; size_t m_Capacity; }; -template <typename T, typename allocatorT> -static void VmaVectorInsert(VmaVector<T, allocatorT> &vec, size_t index, const T &item) { +template<typename T, typename allocatorT> +static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item) +{ vec.insert(index, item); } -template <typename T, typename allocatorT> -static void VmaVectorRemove(VmaVector<T, allocatorT> &vec, size_t index) { +template<typename T, typename allocatorT> +static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index) +{ vec.remove(index); } #endif // #if VMA_USE_STL_VECTOR -template <typename CmpLess, typename VectorT> -size_t VmaVectorInsertSorted(VectorT &vector, const typename VectorT::value_type &value) { +template<typename CmpLess, typename VectorT> +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ const size_t indexToInsert = VmaBinaryFindFirstNotLess( - vector.data(), - vector.data() + vector.size(), - value, - CmpLess()) - - vector.data(); + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); VmaVectorInsert(vector, indexToInsert, value); return indexToInsert; } -template <typename CmpLess, typename VectorT> -bool VmaVectorRemoveSorted(VectorT &vector, const typename VectorT::value_type &value) { +template<typename CmpLess, typename VectorT> +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ CmpLess comparator; typename VectorT::iterator it = VmaBinaryFindFirstNotLess( - vector.begin(), - vector.end(), - value, - comparator); - if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) { + vector.begin(), + vector.end(), + value, + comparator); + if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { size_t indexToRemove = it - vector.begin(); VmaVectorRemove(vector, indexToRemove); return true; @@ -4140,18 +4729,6 @@ bool VmaVectorRemoveSorted(VectorT &vector, const typename VectorT::value_type & return false; } -template <typename CmpLess, typename IterT, typename KeyT> -IterT VmaVectorFindSorted(const IterT &beg, const IterT &end, const KeyT &value) { - CmpLess comparator; - IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>( - beg, end, value, comparator); - if (it == end || - (!comparator(*it, value) && !comparator(value, *it))) { - return it; - } - return end; -} - //////////////////////////////////////////////////////////////////////////////// // class VmaPoolAllocator @@ -4160,86 +4737,96 @@ Allocator for objects of type T using a list of arrays (pools) to speed up allocation. Number of elements that can be allocated is not bounded because allocator can create multiple blocks. */ -template <typename T> -class VmaPoolAllocator { +template<typename T> +class VmaPoolAllocator +{ VMA_CLASS_NO_COPY(VmaPoolAllocator) public: - VmaPoolAllocator(const VkAllocationCallbacks *pAllocationCallbacks, uint32_t firstBlockCapacity); + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); ~VmaPoolAllocator(); - void Clear(); - T *Alloc(); - void Free(T *ptr); + template<typename... Types> T* Alloc(Types... args); + void Free(T* ptr); private: - union Item { + union Item + { uint32_t NextFreeIndex; - T Value; + alignas(T) char Value[sizeof(T)]; }; - struct ItemBlock { - Item *pItems; + struct ItemBlock + { + Item* pItems; uint32_t Capacity; uint32_t FirstFreeIndex; }; - - const VkAllocationCallbacks *m_pAllocationCallbacks; + + const VkAllocationCallbacks* m_pAllocationCallbacks; const uint32_t m_FirstBlockCapacity; - VmaVector<ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks; + VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks; - ItemBlock &CreateNewBlock(); + ItemBlock& CreateNewBlock(); }; -template <typename T> -VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks *pAllocationCallbacks, uint32_t firstBlockCapacity) : - m_pAllocationCallbacks(pAllocationCallbacks), - m_FirstBlockCapacity(firstBlockCapacity), - m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks)) { +template<typename T> +VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks)) +{ VMA_ASSERT(m_FirstBlockCapacity > 1); } -template <typename T> -VmaPoolAllocator<T>::~VmaPoolAllocator() { - Clear(); -} - -template <typename T> -void VmaPoolAllocator<T>::Clear() { - for (size_t i = m_ItemBlocks.size(); i--;) +template<typename T> +VmaPoolAllocator<T>::~VmaPoolAllocator() +{ + for(size_t i = m_ItemBlocks.size(); i--; ) vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); m_ItemBlocks.clear(); } -template <typename T> -T *VmaPoolAllocator<T>::Alloc() { - for (size_t i = m_ItemBlocks.size(); i--;) { - ItemBlock &block = m_ItemBlocks[i]; +template<typename T> +template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args) +{ + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; // This block has some free items: Use first one. - if (block.FirstFreeIndex != UINT32_MAX) { - Item *const pItem = &block.pItems[block.FirstFreeIndex]; + if(block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; block.FirstFreeIndex = pItem->NextFreeIndex; - return &pItem->Value; + T* result = (T*)&pItem->Value; + new(result)T(std::forward<Types>(args)...); // Explicit constructor call. + return result; } } // No block has free item: Create new one and use it. - ItemBlock &newBlock = CreateNewBlock(); - Item *const pItem = &newBlock.pItems[0]; + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; newBlock.FirstFreeIndex = pItem->NextFreeIndex; - return &pItem->Value; + T* result = (T*)&pItem->Value; + new(result)T(std::forward<Types>(args)...); // Explicit constructor call. + return result; } -template <typename T> -void VmaPoolAllocator<T>::Free(T *ptr) { +template<typename T> +void VmaPoolAllocator<T>::Free(T* ptr) +{ // Search all memory blocks to find ptr. - for (size_t i = m_ItemBlocks.size(); i--;) { - ItemBlock &block = m_ItemBlocks[i]; - + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // Casting to union. - Item *pItemPtr; + Item* pItemPtr; memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); - + // Check if pItemPtr is in address range of this block. - if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) { + if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems); pItemPtr->NextFreeIndex = block.FirstFreeIndex; block.FirstFreeIndex = index; @@ -4249,22 +4836,21 @@ void VmaPoolAllocator<T>::Free(T *ptr) { VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); } -template <typename T> -typename VmaPoolAllocator<T>::ItemBlock &VmaPoolAllocator<T>::CreateNewBlock() { +template<typename T> +typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock() +{ const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? - m_FirstBlockCapacity : - m_ItemBlocks.back().Capacity * 3 / 2; + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; const ItemBlock newBlock = { vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), newBlockCapacity, - 0 - }; + 0 }; m_ItemBlocks.push_back(newBlock); // Setup singly-linked list of all free items in this block. - for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) + for(uint32_t i = 0; i < newBlockCapacity - 1; ++i) newBlock.pItems[i].NextFreeIndex = i + 1; newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; return m_ItemBlocks.back(); @@ -4279,78 +4865,85 @@ typename VmaPoolAllocator<T>::ItemBlock &VmaPoolAllocator<T>::CreateNewBlock() { #else // #if VMA_USE_STL_LIST -template <typename T> -struct VmaListItem { - VmaListItem *pPrev; - VmaListItem *pNext; +template<typename T> +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; T Value; }; // Doubly linked list. -template <typename T> -class VmaRawList { +template<typename T> +class VmaRawList +{ VMA_CLASS_NO_COPY(VmaRawList) public: typedef VmaListItem<T> ItemType; - VmaRawList(const VkAllocationCallbacks *pAllocationCallbacks); + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); ~VmaRawList(); void Clear(); size_t GetCount() const { return m_Count; } bool IsEmpty() const { return m_Count == 0; } - ItemType *Front() { return m_pFront; } - const ItemType *Front() const { return m_pFront; } - ItemType *Back() { return m_pBack; } - const ItemType *Back() const { return m_pBack; } + ItemType* Front() { return m_pFront; } + const ItemType* Front() const { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Back() const { return m_pBack; } - ItemType *PushBack(); - ItemType *PushFront(); - ItemType *PushBack(const T &value); - ItemType *PushFront(const T &value); + ItemType* PushBack(); + ItemType* PushFront(); + ItemType* PushBack(const T& value); + ItemType* PushFront(const T& value); void PopBack(); void PopFront(); - + // Item can be null - it means PushBack. - ItemType *InsertBefore(ItemType *pItem); + ItemType* InsertBefore(ItemType* pItem); // Item can be null - it means PushFront. - ItemType *InsertAfter(ItemType *pItem); + ItemType* InsertAfter(ItemType* pItem); - ItemType *InsertBefore(ItemType *pItem, const T &value); - ItemType *InsertAfter(ItemType *pItem, const T &value); + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); - void Remove(ItemType *pItem); + void Remove(ItemType* pItem); private: - const VkAllocationCallbacks *const m_pAllocationCallbacks; + const VkAllocationCallbacks* const m_pAllocationCallbacks; VmaPoolAllocator<ItemType> m_ItemAllocator; - ItemType *m_pFront; - ItemType *m_pBack; + ItemType* m_pFront; + ItemType* m_pBack; size_t m_Count; }; -template <typename T> -VmaRawList<T>::VmaRawList(const VkAllocationCallbacks *pAllocationCallbacks) : - m_pAllocationCallbacks(pAllocationCallbacks), - m_ItemAllocator(pAllocationCallbacks, 128), - m_pFront(VMA_NULL), - m_pBack(VMA_NULL), - m_Count(0) { +template<typename T> +VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) +{ } -template <typename T> -VmaRawList<T>::~VmaRawList() { +template<typename T> +VmaRawList<T>::~VmaRawList() +{ // Intentionally not calling Clear, because that would be unnecessary // computations to return all items to m_ItemAllocator as free. } -template <typename T> -void VmaRawList<T>::Clear() { - if (IsEmpty() == false) { - ItemType *pItem = m_pBack; - while (pItem != VMA_NULL) { - ItemType *const pPrevItem = pItem->pPrev; +template<typename T> +void VmaRawList<T>::Clear() +{ + if(IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while(pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; m_ItemAllocator.Free(pItem); pItem = pPrevItem; } @@ -4360,16 +4953,20 @@ void VmaRawList<T>::Clear() { } } -template <typename T> -VmaListItem<T> *VmaRawList<T>::PushBack() { - ItemType *const pNewItem = m_ItemAllocator.Alloc(); +template<typename T> +VmaListItem<T>* VmaRawList<T>::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); pNewItem->pNext = VMA_NULL; - if (IsEmpty()) { + if(IsEmpty()) + { pNewItem->pPrev = VMA_NULL; m_pFront = pNewItem; m_pBack = pNewItem; m_Count = 1; - } else { + } + else + { pNewItem->pPrev = m_pBack; m_pBack->pNext = pNewItem; m_pBack = pNewItem; @@ -4378,16 +4975,20 @@ VmaListItem<T> *VmaRawList<T>::PushBack() { return pNewItem; } -template <typename T> -VmaListItem<T> *VmaRawList<T>::PushFront() { - ItemType *const pNewItem = m_ItemAllocator.Alloc(); +template<typename T> +VmaListItem<T>* VmaRawList<T>::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); pNewItem->pPrev = VMA_NULL; - if (IsEmpty()) { + if(IsEmpty()) + { pNewItem->pNext = VMA_NULL; m_pFront = pNewItem; m_pBack = pNewItem; m_Count = 1; - } else { + } + else + { pNewItem->pNext = m_pFront; m_pFront->pPrev = pNewItem; m_pFront = pNewItem; @@ -4396,26 +4997,30 @@ VmaListItem<T> *VmaRawList<T>::PushFront() { return pNewItem; } -template <typename T> -VmaListItem<T> *VmaRawList<T>::PushBack(const T &value) { - ItemType *const pNewItem = PushBack(); +template<typename T> +VmaListItem<T>* VmaRawList<T>::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); pNewItem->Value = value; return pNewItem; } -template <typename T> -VmaListItem<T> *VmaRawList<T>::PushFront(const T &value) { - ItemType *const pNewItem = PushFront(); +template<typename T> +VmaListItem<T>* VmaRawList<T>::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); pNewItem->Value = value; return pNewItem; } -template <typename T> -void VmaRawList<T>::PopBack() { +template<typename T> +void VmaRawList<T>::PopBack() +{ VMA_HEAVY_ASSERT(m_Count > 0); - ItemType *const pBackItem = m_pBack; - ItemType *const pPrevItem = pBackItem->pPrev; - if (pPrevItem != VMA_NULL) { + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { pPrevItem->pNext = VMA_NULL; } m_pBack = pPrevItem; @@ -4423,12 +5028,14 @@ void VmaRawList<T>::PopBack() { --m_Count; } -template <typename T> -void VmaRawList<T>::PopFront() { +template<typename T> +void VmaRawList<T>::PopFront() +{ VMA_HEAVY_ASSERT(m_Count > 0); - ItemType *const pFrontItem = m_pFront; - ItemType *const pNextItem = pFrontItem->pNext; - if (pNextItem != VMA_NULL) { + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if(pNextItem != VMA_NULL) + { pNextItem->pPrev = VMA_NULL; } m_pFront = pNextItem; @@ -4436,21 +5043,28 @@ void VmaRawList<T>::PopFront() { --m_Count; } -template <typename T> -void VmaRawList<T>::Remove(ItemType *pItem) { +template<typename T> +void VmaRawList<T>::Remove(ItemType* pItem) +{ VMA_HEAVY_ASSERT(pItem != VMA_NULL); VMA_HEAVY_ASSERT(m_Count > 0); - if (pItem->pPrev != VMA_NULL) { + if(pItem->pPrev != VMA_NULL) + { pItem->pPrev->pNext = pItem->pNext; - } else { + } + else + { VMA_HEAVY_ASSERT(m_pFront == pItem); m_pFront = pItem->pNext; } - if (pItem->pNext != VMA_NULL) { + if(pItem->pNext != VMA_NULL) + { pItem->pNext->pPrev = pItem->pPrev; - } else { + } + else + { VMA_HEAVY_ASSERT(m_pBack == pItem); m_pBack = pItem->pPrev; } @@ -4459,197 +5073,240 @@ void VmaRawList<T>::Remove(ItemType *pItem) { --m_Count; } -template <typename T> -VmaListItem<T> *VmaRawList<T>::InsertBefore(ItemType *pItem) { - if (pItem != VMA_NULL) { - ItemType *const prevItem = pItem->pPrev; - ItemType *const newItem = m_ItemAllocator.Alloc(); +template<typename T> +VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); newItem->pPrev = prevItem; newItem->pNext = pItem; pItem->pPrev = newItem; - if (prevItem != VMA_NULL) { + if(prevItem != VMA_NULL) + { prevItem->pNext = newItem; - } else { + } + else + { VMA_HEAVY_ASSERT(m_pFront == pItem); m_pFront = newItem; } ++m_Count; return newItem; - } else + } + else return PushBack(); } -template <typename T> -VmaListItem<T> *VmaRawList<T>::InsertAfter(ItemType *pItem) { - if (pItem != VMA_NULL) { - ItemType *const nextItem = pItem->pNext; - ItemType *const newItem = m_ItemAllocator.Alloc(); +template<typename T> +VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); newItem->pNext = nextItem; newItem->pPrev = pItem; pItem->pNext = newItem; - if (nextItem != VMA_NULL) { + if(nextItem != VMA_NULL) + { nextItem->pPrev = newItem; - } else { + } + else + { VMA_HEAVY_ASSERT(m_pBack == pItem); m_pBack = newItem; } ++m_Count; return newItem; - } else + } + else return PushFront(); } -template <typename T> -VmaListItem<T> *VmaRawList<T>::InsertBefore(ItemType *pItem, const T &value) { - ItemType *const newItem = InsertBefore(pItem); +template<typename T> +VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); newItem->Value = value; return newItem; } -template <typename T> -VmaListItem<T> *VmaRawList<T>::InsertAfter(ItemType *pItem, const T &value) { - ItemType *const newItem = InsertAfter(pItem); +template<typename T> +VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); newItem->Value = value; return newItem; } -template <typename T, typename AllocatorT> -class VmaList { +template<typename T, typename AllocatorT> +class VmaList +{ VMA_CLASS_NO_COPY(VmaList) public: - class iterator { + class iterator + { public: iterator() : - m_pList(VMA_NULL), - m_pItem(VMA_NULL) { + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { } - T &operator*() const { + T& operator*() const + { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - T *operator->() const { + T* operator->() const + { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - iterator &operator++() { + iterator& operator++() + { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - iterator &operator--() { - if (m_pItem != VMA_NULL) { + iterator& operator--() + { + if(m_pItem != VMA_NULL) + { m_pItem = m_pItem->pPrev; - } else { + } + else + { VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); m_pItem = m_pList->Back(); } return *this; } - iterator operator++(int) { + iterator operator++(int) + { iterator result = *this; ++*this; return result; } - iterator operator--(int) { + iterator operator--(int) + { iterator result = *this; --*this; return result; } - bool operator==(const iterator &rhs) const { + bool operator==(const iterator& rhs) const + { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const iterator &rhs) const { + bool operator!=(const iterator& rhs) const + { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - + private: - VmaRawList<T> *m_pList; - VmaListItem<T> *m_pItem; + VmaRawList<T>* m_pList; + VmaListItem<T>* m_pItem; - iterator(VmaRawList<T> *pList, VmaListItem<T> *pItem) : - m_pList(pList), - m_pItem(pItem) { + iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : + m_pList(pList), + m_pItem(pItem) + { } friend class VmaList<T, AllocatorT>; }; - class const_iterator { + class const_iterator + { public: const_iterator() : - m_pList(VMA_NULL), - m_pItem(VMA_NULL) { + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { } - const_iterator(const iterator &src) : - m_pList(src.m_pList), - m_pItem(src.m_pItem) { + const_iterator(const iterator& src) : + m_pList(src.m_pList), + m_pItem(src.m_pItem) + { } - - const T &operator*() const { + + const T& operator*() const + { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - const T *operator->() const { + const T* operator->() const + { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - const_iterator &operator++() { + const_iterator& operator++() + { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - const_iterator &operator--() { - if (m_pItem != VMA_NULL) { + const_iterator& operator--() + { + if(m_pItem != VMA_NULL) + { m_pItem = m_pItem->pPrev; - } else { + } + else + { VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); m_pItem = m_pList->Back(); } return *this; } - const_iterator operator++(int) { + const_iterator operator++(int) + { const_iterator result = *this; ++*this; return result; } - const_iterator operator--(int) { + const_iterator operator--(int) + { const_iterator result = *this; --*this; return result; } - bool operator==(const const_iterator &rhs) const { + bool operator==(const const_iterator& rhs) const + { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const const_iterator &rhs) const { + bool operator!=(const const_iterator& rhs) const + { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - + private: - const_iterator(const VmaRawList<T> *pList, const VmaListItem<T> *pItem) : - m_pList(pList), - m_pItem(pItem) { + const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : + m_pList(pList), + m_pItem(pItem) + { } - const VmaRawList<T> *m_pList; - const VmaListItem<T> *m_pItem; + const VmaRawList<T>* m_pList; + const VmaListItem<T>* m_pItem; friend class VmaList<T, AllocatorT>; }; - VmaList(const AllocatorT &allocator) : - m_RawList(allocator.m_pCallbacks) {} + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { } bool empty() const { return m_RawList.IsEmpty(); } size_t size() const { return m_RawList.GetCount(); } @@ -4661,9 +5318,9 @@ public: const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } void clear() { m_RawList.Clear(); } - void push_back(const T &value) { m_RawList.PushBack(value); } + void push_back(const T& value) { m_RawList.PushBack(value); } void erase(iterator it) { m_RawList.Remove(it.m_pItem); } - iterator insert(iterator it, const T &value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } private: VmaRawList<T> m_RawList; @@ -4682,18 +5339,18 @@ private: #define VmaPair std::pair #define VMA_MAP_TYPE(KeyT, ValueT) \ - std::unordered_map<KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator<std::pair<KeyT, ValueT> > > + std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > > #else // #if VMA_USE_STL_UNORDERED_MAP template<typename T1, typename T2> struct VmaPair { - T1 first; - T2 second; + T1 first; + T2 second; - VmaPair() : first(), second() { } - VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { } + VmaPair() : first(), second() { } + VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { } }; /* Class compatible with subset of interface of std::unordered_map. @@ -4703,20 +5360,20 @@ template<typename KeyT, typename ValueT> class VmaMap { public: - typedef VmaPair<KeyT, ValueT> PairType; - typedef PairType* iterator; - - VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { } + typedef VmaPair<KeyT, ValueT> PairType; + typedef PairType* iterator; - iterator begin() { return m_Vector.begin(); } - iterator end() { return m_Vector.end(); } + VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { } - void insert(const PairType& pair); - iterator find(const KeyT& key); - void erase(iterator it); + iterator begin() { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + void insert(const PairType& pair); + iterator find(const KeyT& key); + void erase(iterator it); + private: - VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector; + VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector; }; #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT> @@ -4724,49 +5381,49 @@ private: template<typename FirstT, typename SecondT> struct VmaPairFirstLess { - bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const - { - return lhs.first < rhs.first; - } - bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const - { - return lhs.first < rhsFirst; - } + bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const + { + return lhs.first < rhs.first; + } + bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const + { + return lhs.first < rhsFirst; + } }; template<typename KeyT, typename ValueT> void VmaMap<KeyT, ValueT>::insert(const PairType& pair) { - const size_t indexToInsert = VmaBinaryFindFirstNotLess( - m_Vector.data(), - m_Vector.data() + m_Vector.size(), - pair, - VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data(); - VmaVectorInsert(m_Vector, indexToInsert, pair); + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + pair, + VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data(); + VmaVectorInsert(m_Vector, indexToInsert, pair); } template<typename KeyT, typename ValueT> VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key) { - PairType* it = VmaBinaryFindFirstNotLess( - m_Vector.data(), - m_Vector.data() + m_Vector.size(), - key, - VmaPairFirstLess<KeyT, ValueT>()); - if((it != m_Vector.end()) && (it->first == key)) - { - return it; - } - else - { - return m_Vector.end(); - } + PairType* it = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + key, + VmaPairFirstLess<KeyT, ValueT>()); + if((it != m_Vector.end()) && (it->first == key)) + { + return it; + } + else + { + return m_Vector.end(); + } } template<typename KeyT, typename ValueT> void VmaMap<KeyT, ValueT>::erase(iterator it) { - VmaVectorRemove(m_Vector, it - m_Vector.begin()); + VmaVectorRemove(m_Vector, it - m_Vector.begin()); } #endif // #if VMA_USE_STL_UNORDERED_MAP @@ -4777,46 +5434,49 @@ void VmaMap<KeyT, ValueT>::erase(iterator it) class VmaDeviceMemoryBlock; -enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, - VMA_CACHE_INVALIDATE }; +enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE }; -struct VmaAllocation_T { +struct VmaAllocation_T +{ private: static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80; - enum FLAGS { + enum FLAGS + { FLAG_USER_DATA_STRING = 0x01, }; public: - enum ALLOCATION_TYPE { + enum ALLOCATION_TYPE + { ALLOCATION_TYPE_NONE, ALLOCATION_TYPE_BLOCK, ALLOCATION_TYPE_DEDICATED, }; /* - This struct cannot have constructor or destructor. It must be POD because it is - allocated using VmaPoolAllocator. - */ - - void Ctor(uint32_t currentFrameIndex, bool userDataString) { - m_Alignment = 1; - m_Size = 0; - m_pUserData = VMA_NULL; - m_LastUseFrameIndex = currentFrameIndex; - m_Type = (uint8_t)ALLOCATION_TYPE_NONE; - m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN; - m_MapCount = 0; - m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0; + This struct is allocated using VmaPoolAllocator. + */ + VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) : + m_Alignment{1}, + m_Size{0}, + m_pUserData{VMA_NULL}, + m_LastUseFrameIndex{currentFrameIndex}, + m_MemoryTypeIndex{0}, + m_Type{(uint8_t)ALLOCATION_TYPE_NONE}, + m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN}, + m_MapCount{0}, + m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0} + { #if VMA_STATS_STRING_ENABLED m_CreationFrameIndex = currentFrameIndex; m_BufferImageUsage = 0; #endif } - void Dtor() { + ~VmaAllocation_T() + { VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction."); // Check if owned string was freed. @@ -4824,18 +5484,21 @@ public: } void InitBlockAllocation( - VmaDeviceMemoryBlock *block, - VkDeviceSize offset, - VkDeviceSize alignment, - VkDeviceSize size, - VmaSuballocationType suballocationType, - bool mapped, - bool canBecomeLost) { + VmaDeviceMemoryBlock* block, + VkDeviceSize offset, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped, + bool canBecomeLost) + { VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); VMA_ASSERT(block != VMA_NULL); m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; m_Alignment = alignment; m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; m_SuballocationType = (uint8_t)suballocationType; m_BlockAllocation.m_Block = block; @@ -4843,38 +5506,40 @@ public: m_BlockAllocation.m_CanBecomeLost = canBecomeLost; } - void InitLost() { + void InitLost() + { VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST); m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_MemoryTypeIndex = 0; m_BlockAllocation.m_Block = VMA_NULL; m_BlockAllocation.m_Offset = 0; m_BlockAllocation.m_CanBecomeLost = true; } void ChangeBlockAllocation( - VmaAllocator hAllocator, - VmaDeviceMemoryBlock *block, - VkDeviceSize offset); + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset); - void ChangeSize(VkDeviceSize newSize); void ChangeOffset(VkDeviceSize newOffset); // pMappedData not null means allocation is created with MAPPED flag. void InitDedicatedAllocation( - uint32_t memoryTypeIndex, - VkDeviceMemory hMemory, - VmaSuballocationType suballocationType, - void *pMappedData, - VkDeviceSize size) { + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) + { VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); VMA_ASSERT(hMemory != VK_NULL_HANDLE); m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; m_Alignment = 0; m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; m_SuballocationType = (uint8_t)suballocationType; m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; - m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex; m_DedicatedAllocation.m_hMemory = hMemory; m_DedicatedAllocation.m_pMappedData = pMappedData; } @@ -4883,38 +5548,42 @@ public: VkDeviceSize GetAlignment() const { return m_Alignment; } VkDeviceSize GetSize() const { return m_Size; } bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; } - void *GetUserData() const { return m_pUserData; } - void SetUserData(VmaAllocator hAllocator, void *pUserData); + void* GetUserData() const { return m_pUserData; } + void SetUserData(VmaAllocator hAllocator, void* pUserData); VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } - VmaDeviceMemoryBlock *GetBlock() const { + VmaDeviceMemoryBlock* GetBlock() const + { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } VkDeviceSize GetOffset() const; VkDeviceMemory GetMemory() const; - uint32_t GetMemoryTypeIndex() const; + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; } - void *GetMappedData() const; + void* GetMappedData() const; bool CanBecomeLost() const; - - uint32_t GetLastUseFrameIndex() const { + + uint32_t GetLastUseFrameIndex() const + { return m_LastUseFrameIndex.load(); } - bool CompareExchangeLastUseFrameIndex(uint32_t &expected, uint32_t desired) { + bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired) + { return m_LastUseFrameIndex.compare_exchange_weak(expected, desired); } /* - - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex, - makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true. - - Else, returns false. - - If hAllocation is already lost, assert - you should not call it then. - If hAllocation was not created with CAN_BECOME_LOST_BIT, assert. - */ + - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex, + makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true. + - Else, returns false. + + If hAllocation is already lost, assert - you should not call it then. + If hAllocation was not created with CAN_BECOME_LOST_BIT, assert. + */ bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); - void DedicatedAllocCalcStatsInfo(VmaStatInfo &outInfo) { + void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo) + { VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); outInfo.blockCount = 1; outInfo.allocationCount = 1; @@ -4928,26 +5597,28 @@ public: void BlockAllocMap(); void BlockAllocUnmap(); - VkResult DedicatedAllocMap(VmaAllocator hAllocator, void **ppData); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); void DedicatedAllocUnmap(VmaAllocator hAllocator); #if VMA_STATS_STRING_ENABLED uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; } uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } - void InitBufferImageUsage(uint32_t bufferImageUsage) { + void InitBufferImageUsage(uint32_t bufferImageUsage) + { VMA_ASSERT(m_BufferImageUsage == 0); m_BufferImageUsage = bufferImageUsage; } - void PrintParameters(class VmaJsonWriter &json) const; + void PrintParameters(class VmaJsonWriter& json) const; #endif private: VkDeviceSize m_Alignment; VkDeviceSize m_Size; - void *m_pUserData; + void* m_pUserData; VMA_ATOMIC_UINT32 m_LastUseFrameIndex; + uint32_t m_MemoryTypeIndex; uint8_t m_Type; // ALLOCATION_TYPE uint8_t m_SuballocationType; // VmaSuballocationType // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT. @@ -4956,20 +5627,22 @@ private: uint8_t m_Flags; // enum FLAGS // Allocation out of VmaDeviceMemoryBlock. - struct BlockAllocation { - VmaDeviceMemoryBlock *m_Block; + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; VkDeviceSize m_Offset; bool m_CanBecomeLost; }; // Allocation for an object that has its own private VkDeviceMemory. - struct DedicatedAllocation { - uint32_t m_MemoryTypeIndex; + struct DedicatedAllocation + { VkDeviceMemory m_hMemory; - void *m_pMappedData; // Not null means memory is mapped. + void* m_pMappedData; // Not null means memory is mapped. }; - union { + union + { // Allocation out of VmaDeviceMemoryBlock. BlockAllocation m_BlockAllocation; // Allocation for an object that has its own private VkDeviceMemory. @@ -4988,7 +5661,8 @@ private: Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as allocated memory block or free. */ -struct VmaSuballocation { +struct VmaSuballocation +{ VkDeviceSize offset; VkDeviceSize size; VmaAllocation hAllocation; @@ -4996,23 +5670,28 @@ struct VmaSuballocation { }; // Comparator for offsets. -struct VmaSuballocationOffsetLess { - bool operator()(const VmaSuballocation &lhs, const VmaSuballocation &rhs) const { +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { return lhs.offset < rhs.offset; } }; -struct VmaSuballocationOffsetGreater { - bool operator()(const VmaSuballocation &lhs, const VmaSuballocation &rhs) const { +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { return lhs.offset > rhs.offset; } }; -typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList; +typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList; // Cost of one additional allocation lost, as equivalent in bytes. static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576; -enum class VmaAllocationRequestType { +enum class VmaAllocationRequestType +{ Normal, // Used by "Linear" algorithm. UpperAddress, @@ -5033,16 +5712,18 @@ If canMakeOtherLost was true: - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for the requested allocation to succeed. */ -struct VmaAllocationRequest { +struct VmaAllocationRequest +{ VkDeviceSize offset; VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation. VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation. VmaSuballocationList::iterator item; size_t itemsToMakeLostCount; - void *customData; + void* customData; VmaAllocationRequestType type; - VkDeviceSize CalcCost() const { + VkDeviceSize CalcCost() const + { return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST; } }; @@ -5051,10 +5732,11 @@ struct VmaAllocationRequest { Data structure used for bookkeeping of allocations and unused ranges of memory in a single VkDeviceMemory block. */ -class VmaBlockMetadata { +class VmaBlockMetadata +{ public: VmaBlockMetadata(VmaAllocator hAllocator); - virtual ~VmaBlockMetadata() {} + virtual ~VmaBlockMetadata() { } virtual void Init(VkDeviceSize size) { m_Size = size; } // Validates all data structures inside this object. If not valid, returns false. @@ -5066,84 +5748,79 @@ public: // Returns true if this block is empty - contains only single free suballocation. virtual bool IsEmpty() const = 0; - virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const = 0; + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0; // Shouldn't modify blockCount. - virtual void AddPoolStats(VmaPoolStats &inoutStats) const = 0; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter &json) const = 0; + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; #endif // Tries to find a place for suballocation with given parameters inside this block. // If succeeded, fills pAllocationRequest and returns true. // If failed, returns false. virtual bool CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - bool canMakeOtherLost, - // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest) = 0; + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; virtual bool MakeRequestedAllocationsLost( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VmaAllocationRequest *pAllocationRequest) = 0; + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) = 0; virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0; - virtual VkResult CheckCorruption(const void *pBlockData) = 0; + virtual VkResult CheckCorruption(const void* pBlockData) = 0; // Makes actual allocation based on request. Request must already be checked and valid. virtual void Alloc( - const VmaAllocationRequest &request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation) = 0; + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) = 0; // Frees suballocation assigned to given memory region. virtual void Free(const VmaAllocation allocation) = 0; virtual void FreeAtOffset(VkDeviceSize offset) = 0; - // Tries to resize (grow or shrink) space for given allocation, in place. - virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; } - protected: - const VkAllocationCallbacks *GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap_Begin(class VmaJsonWriter &json, - VkDeviceSize unusedBytes, - size_t allocationCount, - size_t unusedRangeCount) const; - void PrintDetailedMap_Allocation(class VmaJsonWriter &json, - VkDeviceSize offset, - VmaAllocation hAllocation) const; - void PrintDetailedMap_UnusedRange(class VmaJsonWriter &json, - VkDeviceSize offset, - VkDeviceSize size) const; - void PrintDetailedMap_End(class VmaJsonWriter &json) const; + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; #endif private: VkDeviceSize m_Size; - const VkAllocationCallbacks *m_pAllocationCallbacks; + const VkAllocationCallbacks* m_pAllocationCallbacks; }; -#define VMA_VALIDATE(cond) \ - do { \ - if (!(cond)) { \ - VMA_ASSERT(0 && "Validation failed: " #cond); \ - return false; \ - } \ - } while (false) +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) -class VmaBlockMetadata_Generic : public VmaBlockMetadata { +class VmaBlockMetadata_Generic : public VmaBlockMetadata +{ VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) public: VmaBlockMetadata_Generic(VmaAllocator hAllocator); @@ -5156,51 +5833,49 @@ public: virtual VkDeviceSize GetUnusedRangeSizeMax() const; virtual bool IsEmpty() const; - virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const; - virtual void AddPoolStats(VmaPoolStats &inoutStats) const; + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter &json) const; + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; #endif virtual bool CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); virtual bool MakeRequestedAllocationsLost( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); - virtual VkResult CheckCorruption(const void *pBlockData); + virtual VkResult CheckCorruption(const void* pBlockData); virtual void Alloc( - const VmaAllocationRequest &request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation); + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); virtual void Free(const VmaAllocation allocation); virtual void FreeAtOffset(VkDeviceSize offset); - virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize); - //////////////////////////////////////////////////////////////////////////////// // For defragmentation - + bool IsBufferImageGranularityConflictPossible( - VkDeviceSize bufferImageGranularity, - VmaSuballocationType &inOutPrevSuballocType) const; + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const; private: friend class VmaDefragmentationAlgorithm_Generic; @@ -5211,25 +5886,25 @@ private: VmaSuballocationList m_Suballocations; // Suballocations that are free and have size greater than certain threshold. // Sorted by size, ascending. - VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator> > m_FreeSuballocationsBySize; + VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize; bool ValidateFreeSuballocationList() const; // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. // If yes, fills pOffset and returns true. If no, returns false. bool CheckAllocation( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaSuballocationList::const_iterator suballocItem, - bool canMakeOtherLost, - VkDeviceSize *pOffset, - size_t *itemsToMakeLostCount, - VkDeviceSize *pSumFreeSize, - VkDeviceSize *pSumItemSize) const; + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const; // Given free suballocation, it merges it with following one, which must also be free. void MergeFreeWithNext(VmaSuballocationList::iterator item); // Releases given suballocation, making it free. @@ -5249,80 +5924,81 @@ Allocations and their references in internal data structure look like this: if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): - 0 +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | - | | - | | + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | GetSize() +-------+ if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): - 0 +-------+ - | Alloc | 2nd[0] - +-------+ - | Alloc | 2nd[1] - +-------+ - | ... | - +-------+ - | Alloc | 2nd[2nd.size() - 1] - +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | GetSize() +-------+ if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): - 0 +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | - | | - | | - +-------+ - | Alloc | 2nd[2nd.size() - 1] - +-------+ - | ... | - +-------+ - | Alloc | 2nd[1] - +-------+ - | Alloc | 2nd[0] + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] GetSize() +-------+ */ -class VmaBlockMetadata_Linear : public VmaBlockMetadata { +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) public: VmaBlockMetadata_Linear(VmaAllocator hAllocator); @@ -5335,65 +6011,66 @@ public: virtual VkDeviceSize GetUnusedRangeSizeMax() const; virtual bool IsEmpty() const { return GetAllocationCount() == 0; } - virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const; - virtual void AddPoolStats(VmaPoolStats &inoutStats) const; + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter &json) const; + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; #endif virtual bool CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); virtual bool MakeRequestedAllocationsLost( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); - virtual VkResult CheckCorruption(const void *pBlockData); + virtual VkResult CheckCorruption(const void* pBlockData); virtual void Alloc( - const VmaAllocationRequest &request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation); + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); virtual void Free(const VmaAllocation allocation); virtual void FreeAtOffset(VkDeviceSize offset); private: /* - There are two suballocation vectors, used in ping-pong way. - The one with index m_1stVectorIndex is called 1st. - The one with index (m_1stVectorIndex ^ 1) is called 2nd. - 2nd can be non-empty only when 1st is not empty. - When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. - */ - typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType; - - enum SECOND_VECTOR_MODE { + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { SECOND_VECTOR_EMPTY, /* - Suballocations in 2nd vector are created later than the ones in 1st, but they - all have smaller offset. - */ + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ SECOND_VECTOR_RING_BUFFER, /* - Suballocations in 2nd vector are upper side of double stack. - They all have offsets higher than those in 1st vector. - Top of this stack means smaller offsets, but higher indices in this vector. - */ + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ SECOND_VECTOR_DOUBLE_STACK, }; @@ -5402,11 +6079,11 @@ private: uint32_t m_1stVectorIndex; SECOND_VECTOR_MODE m_2ndVectorMode; - SuballocationVectorType &AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - SuballocationVectorType &AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - const SuballocationVectorType &AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - const SuballocationVectorType &AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + // Number of items in 1st vector with hAllocation = null at the beginning. size_t m_1stNullItemsBeginCount; // Number of other items in 1st vector with hAllocation = null somewhere in the middle. @@ -5418,25 +6095,25 @@ private: void CleanupAfterFree(); bool CreateAllocationRequest_LowerAddress( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); bool CreateAllocationRequest_UpperAddress( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); }; /* @@ -5450,7 +6127,8 @@ Node at level 0 has size = m_UsableSize. Each next level contains nodes with size 2 times smaller than current level. m_LevelCount is the maximum number of levels to use in the current object. */ -class VmaBlockMetadata_Buddy : public VmaBlockMetadata { +class VmaBlockMetadata_Buddy : public VmaBlockMetadata +{ VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) public: VmaBlockMetadata_Buddy(VmaAllocator hAllocator); @@ -5463,39 +6141,39 @@ public: virtual VkDeviceSize GetUnusedRangeSizeMax() const; virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; } - virtual void CalcAllocationStatInfo(VmaStatInfo &outInfo) const; - virtual void AddPoolStats(VmaPoolStats &inoutStats) const; + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter &json) const; + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; #endif virtual bool CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); virtual bool MakeRequestedAllocationsLost( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VmaAllocationRequest *pAllocationRequest); + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); - virtual VkResult CheckCorruption(const void *pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; } + virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; } virtual void Alloc( - const VmaAllocationRequest &request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation); + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); } virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); } @@ -5504,33 +6182,37 @@ private: static const VkDeviceSize MIN_NODE_SIZE = 32; static const size_t MAX_LEVELS = 30; - struct ValidationContext { + struct ValidationContext + { size_t calculatedAllocationCount; size_t calculatedFreeCount; VkDeviceSize calculatedSumFreeSize; ValidationContext() : - calculatedAllocationCount(0), - calculatedFreeCount(0), - calculatedSumFreeSize(0) {} + calculatedAllocationCount(0), + calculatedFreeCount(0), + calculatedSumFreeSize(0) { } }; - struct Node { + struct Node + { VkDeviceSize offset; - enum TYPE { + enum TYPE + { TYPE_FREE, TYPE_ALLOCATION, TYPE_SPLIT, TYPE_COUNT } type; - Node *parent; - Node *buddy; + Node* parent; + Node* buddy; - union { + union + { struct { - Node *prev; - Node *next; + Node* prev; + Node* next; } free; struct { @@ -5538,7 +6220,7 @@ private: } allocation; struct { - Node *leftChild; + Node* leftChild; } split; }; }; @@ -5547,10 +6229,10 @@ private: VkDeviceSize m_UsableSize; uint32_t m_LevelCount; - Node *m_Root; + Node* m_Root; struct { - Node *front; - Node *back; + Node* front; + Node* back; } m_FreeList[MAX_LEVELS]; // Number of nodes in the tree with type == TYPE_ALLOCATION. size_t m_AllocationCount; @@ -5560,24 +6242,24 @@ private: VkDeviceSize m_SumFreeSize; VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } - void DeleteNode(Node *node); - bool ValidateNode(ValidationContext &ctx, const Node *parent, const Node *curr, uint32_t level, VkDeviceSize levelNodeSize) const; + void DeleteNode(Node* node); + bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } // Alloc passed just for validation. Can be null. void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset); - void CalcAllocationStatInfoNode(VmaStatInfo &outInfo, const Node *node, VkDeviceSize levelNodeSize) const; + void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const; // Adds node to the front of FreeList at given level. // node->type must be FREE. // node->free.prev, next can be undefined. - void AddToFreeListFront(uint32_t level, Node *node); + void AddToFreeListFront(uint32_t level, Node* node); // Removes node from FreeList at given level. // node->type must be FREE. // node->free.prev, next stay untouched. - void RemoveFromFreeList(uint32_t level, Node *node); + void RemoveFromFreeList(uint32_t level, Node* node); #if VMA_STATS_STRING_ENABLED - void PrintDetailedMapNode(class VmaJsonWriter &json, const Node *node, VkDeviceSize levelNodeSize) const; + void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; #endif }; @@ -5587,35 +6269,37 @@ data about its regions (aka suballocations, #VmaAllocation), assigned and free. Thread-safety: This class must be externally synchronized. */ -class VmaDeviceMemoryBlock { +class VmaDeviceMemoryBlock +{ VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) public: - VmaBlockMetadata *m_pMetadata; + VmaBlockMetadata* m_pMetadata; VmaDeviceMemoryBlock(VmaAllocator hAllocator); - ~VmaDeviceMemoryBlock() { + ~VmaDeviceMemoryBlock() + { VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); } // Always call after construction. void Init( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t newMemoryTypeIndex, - VkDeviceMemory newMemory, - VkDeviceSize newSize, - uint32_t id, - uint32_t algorithm); + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm); // Always call before destruction. void Destroy(VmaAllocator allocator); - + VmaPool GetParentPool() const { return m_hParentPool; } VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } uint32_t GetId() const { return m_Id; } - void *GetMappedData() const { return m_pMappedData; } + void* GetMappedData() const { return m_pMappedData; } // Validates all data structures inside this object. If not valid, returns false. bool Validate() const; @@ -5623,20 +6307,24 @@ public: VkResult CheckCorruption(VmaAllocator hAllocator); // ppData can be null. - VkResult Map(VmaAllocator hAllocator, uint32_t count, void **ppData); + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); void Unmap(VmaAllocator hAllocator, uint32_t count); VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); VkResult BindBufferMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkBuffer hBuffer); + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); VkResult BindImageMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkImage hImage); + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); private: VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. @@ -5645,27 +6333,33 @@ private: VkDeviceMemory m_hMemory; /* - Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. - Also protects m_MapCount, m_pMappedData. - Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. - */ + Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ VMA_MUTEX m_Mutex; uint32_t m_MapCount; - void *m_pMappedData; + void* m_pMappedData; }; -struct VmaPointerLess { - bool operator()(const void *lhs, const void *rhs) const { +struct VmaPointerLess +{ + bool operator()(const void* lhs, const void* rhs) const + { return lhs < rhs; } }; -struct VmaDefragmentationMove { +struct VmaDefragmentationMove +{ size_t srcBlockIndex; size_t dstBlockIndex; VkDeviceSize srcOffset; VkDeviceSize dstOffset; VkDeviceSize size; + VmaAllocation hAllocation; + VmaDeviceMemoryBlock* pSrcBlock; + VmaDeviceMemoryBlock* pDstBlock; }; class VmaDefragmentationAlgorithm; @@ -5676,77 +6370,86 @@ Vulkan memory type. Synchronized internally with a mutex. */ -struct VmaBlockVector { +struct VmaBlockVector +{ VMA_CLASS_NO_COPY(VmaBlockVector) public: VmaBlockVector( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceSize preferredBlockSize, - size_t minBlockCount, - size_t maxBlockCount, - VkDeviceSize bufferImageGranularity, - uint32_t frameInUseCount, - bool isCustomPool, - bool explicitBlockSize, - uint32_t algorithm); + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm); ~VmaBlockVector(); VkResult CreateMinBlocks(); + VmaAllocator GetAllocator() const { return m_hAllocator; } VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; } uint32_t GetAlgorithm() const { return m_Algorithm; } - void GetPoolStats(VmaPoolStats *pStats); + void GetPoolStats(VmaPoolStats* pStats); - bool IsEmpty() const { return m_Blocks.empty(); } + bool IsEmpty(); bool IsCorruptionDetectionEnabled() const; VkResult Allocate( - uint32_t currentFrameIndex, - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo &createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation *pAllocations); + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); - void Free( - VmaAllocation hAllocation); + void Free(const VmaAllocation hAllocation); // Adds statistics of this BlockVector to pStats. - void AddStats(VmaStats *pStats); + void AddStats(VmaStats* pStats); #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter &json); + void PrintDetailedMap(class VmaJsonWriter& json); #endif void MakePoolAllocationsLost( - uint32_t currentFrameIndex, - size_t *pLostAllocationCount); + uint32_t currentFrameIndex, + size_t* pLostAllocationCount); VkResult CheckCorruption(); // Saves results in pCtx->res. void Defragment( - class VmaBlockVectorDefragmentationContext *pCtx, - VmaDefragmentationStats *pStats, - VkDeviceSize &maxCpuBytesToMove, uint32_t &maxCpuAllocationsToMove, - VkDeviceSize &maxGpuBytesToMove, uint32_t &maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer); + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer); void DefragmentationEnd( - class VmaBlockVectorDefragmentationContext *pCtx, - VmaDefragmentationStats *pStats); + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats); + + uint32_t ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves); + + void CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats); //////////////////////////////////////////////////////////////////////////////// // To be used only while the m_Mutex is locked. Used during defragmentation. size_t GetBlockCount() const { return m_Blocks.size(); } - VmaDeviceMemoryBlock *GetBlock(size_t index) const { return m_Blocks[index]; } + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } size_t CalcAllocationCount() const; bool IsBufferImageGranularityConflictPossible() const; @@ -5761,82 +6464,84 @@ private: const size_t m_MaxBlockCount; const VkDeviceSize m_BufferImageGranularity; const uint32_t m_FrameInUseCount; - const bool m_IsCustomPool; const bool m_ExplicitBlockSize; const uint32_t m_Algorithm; - /* There can be at most one allocation that is completely empty - a - hysteresis to avoid pessimistic case of alternating creation and destruction - of a VkDeviceMemory. */ - bool m_HasEmptyBlock; VMA_RW_MUTEX m_Mutex; + + /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) - + a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */ + bool m_HasEmptyBlock; // Incrementally sorted by sumFreeSize, ascending. - VmaVector<VmaDeviceMemoryBlock *, VmaStlAllocator<VmaDeviceMemoryBlock *> > m_Blocks; + VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks; uint32_t m_NextBlockId; VkDeviceSize CalcMaxBlockSize() const; // Finds and removes given block from vector. - void Remove(VmaDeviceMemoryBlock *pBlock); + void Remove(VmaDeviceMemoryBlock* pBlock); // Performs single step in sorting m_Blocks. They may not be fully sorted // after this call. void IncrementallySortBlocks(); VkResult AllocatePage( - uint32_t currentFrameIndex, - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo &createInfo, - VmaSuballocationType suballocType, - VmaAllocation *pAllocation); + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); // To be used only without CAN_MAKE_OTHER_LOST flag. VkResult AllocateFromBlock( - VmaDeviceMemoryBlock *pBlock, - uint32_t currentFrameIndex, - VkDeviceSize size, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void *pUserData, - VmaSuballocationType suballocType, - uint32_t strategy, - VmaAllocation *pAllocation); - - VkResult CreateBlock(VkDeviceSize blockSize, size_t *pNewBlockIndex); + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); // Saves result to pCtx->res. void ApplyDefragmentationMovesCpu( - class VmaBlockVectorDefragmentationContext *pDefragCtx, - const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves); + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves); // Saves result to pCtx->res. void ApplyDefragmentationMovesGpu( - class VmaBlockVectorDefragmentationContext *pDefragCtx, - const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkCommandBuffer commandBuffer); + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkCommandBuffer commandBuffer); /* - Used during defragmentation. pDefragmentationStats is optional. It's in/out - - updated with new data. - */ - void FreeEmptyBlocks(VmaDefragmentationStats *pDefragmentationStats); + Used during defragmentation. pDefragmentationStats is optional. It's in/out + - updated with new data. + */ + void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats); + + void UpdateHasEmptyBlock(); }; -struct VmaPool_T { +struct VmaPool_T +{ VMA_CLASS_NO_COPY(VmaPool_T) public: VmaBlockVector m_BlockVector; VmaPool_T( - VmaAllocator hAllocator, - const VmaPoolCreateInfo &createInfo, - VkDeviceSize preferredBlockSize); + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); ~VmaPool_T(); uint32_t GetId() const { return m_Id; } - void SetId(uint32_t id) { - VMA_ASSERT(m_Id == 0); - m_Id = id; - } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); #if VMA_STATS_STRING_ENABLED //void PrintDetailedMap(class VmaStringBuilder& sb); @@ -5844,6 +6549,7 @@ public: private: uint32_t m_Id; + char* m_Name; }; /* @@ -5853,68 +6559,77 @@ Performs defragmentation: - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset(). - Does not move actual data, only returns requested moves as `moves`. */ -class VmaDefragmentationAlgorithm { +class VmaDefragmentationAlgorithm +{ VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm) public: VmaDefragmentationAlgorithm( - VmaAllocator hAllocator, - VmaBlockVector *pBlockVector, - uint32_t currentFrameIndex) : - m_hAllocator(hAllocator), - m_pBlockVector(pBlockVector), - m_CurrentFrameIndex(currentFrameIndex) { + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex) : + m_hAllocator(hAllocator), + m_pBlockVector(pBlockVector), + m_CurrentFrameIndex(currentFrameIndex) + { } - virtual ~VmaDefragmentationAlgorithm() { + virtual ~VmaDefragmentationAlgorithm() + { } - virtual void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) = 0; + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0; virtual void AddAll() = 0; virtual VkResult Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove) = 0; + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) = 0; virtual VkDeviceSize GetBytesMoved() const = 0; virtual uint32_t GetAllocationsMoved() const = 0; protected: VmaAllocator const m_hAllocator; - VmaBlockVector *const m_pBlockVector; + VmaBlockVector* const m_pBlockVector; const uint32_t m_CurrentFrameIndex; - struct AllocationInfo { + struct AllocationInfo + { VmaAllocation m_hAllocation; - VkBool32 *m_pChanged; + VkBool32* m_pChanged; AllocationInfo() : - m_hAllocation(VK_NULL_HANDLE), - m_pChanged(VMA_NULL) { + m_hAllocation(VK_NULL_HANDLE), + m_pChanged(VMA_NULL) + { } - AllocationInfo(VmaAllocation hAlloc, VkBool32 *pChanged) : - m_hAllocation(hAlloc), - m_pChanged(pChanged) { + AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) : + m_hAllocation(hAlloc), + m_pChanged(pChanged) + { } }; }; -class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm { +class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm +{ VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic) public: VmaDefragmentationAlgorithm_Generic( - VmaAllocator hAllocator, - VmaBlockVector *pBlockVector, - uint32_t currentFrameIndex, - bool overlappingMoveSupported); + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); virtual ~VmaDefragmentationAlgorithm_Generic(); - virtual void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged); + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); virtual void AddAll() { m_AllAllocations = true; } virtual VkResult Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove); + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } @@ -5926,143 +6641,173 @@ private: VkDeviceSize m_BytesMoved; uint32_t m_AllocationsMoved; - struct AllocationInfoSizeGreater { - bool operator()(const AllocationInfo &lhs, const AllocationInfo &rhs) const { + struct AllocationInfoSizeGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize(); } }; - struct AllocationInfoOffsetGreater { - bool operator()(const AllocationInfo &lhs, const AllocationInfo &rhs) const { + struct AllocationInfoOffsetGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset(); } }; - struct BlockInfo { + struct BlockInfo + { size_t m_OriginalBlockIndex; - VmaDeviceMemoryBlock *m_pBlock; + VmaDeviceMemoryBlock* m_pBlock; bool m_HasNonMovableAllocations; - VmaVector<AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations; + VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations; - BlockInfo(const VkAllocationCallbacks *pAllocationCallbacks) : - m_OriginalBlockIndex(SIZE_MAX), - m_pBlock(VMA_NULL), - m_HasNonMovableAllocations(true), - m_Allocations(pAllocationCallbacks) { + BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) : + m_OriginalBlockIndex(SIZE_MAX), + m_pBlock(VMA_NULL), + m_HasNonMovableAllocations(true), + m_Allocations(pAllocationCallbacks) + { } - void CalcHasNonMovableAllocations() { + void CalcHasNonMovableAllocations() + { const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount(); const size_t defragmentAllocCount = m_Allocations.size(); m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount; } - void SortAllocationsBySizeDescending() { + void SortAllocationsBySizeDescending() + { VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater()); } - void SortAllocationsByOffsetDescending() { + void SortAllocationsByOffsetDescending() + { VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater()); } }; - struct BlockPointerLess { - bool operator()(const BlockInfo *pLhsBlockInfo, const VmaDeviceMemoryBlock *pRhsBlock) const { + struct BlockPointerLess + { + bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const + { return pLhsBlockInfo->m_pBlock < pRhsBlock; } - bool operator()(const BlockInfo *pLhsBlockInfo, const BlockInfo *pRhsBlockInfo) const { + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock; } }; // 1. Blocks with some non-movable allocations go first. // 2. Blocks with smaller sumFreeSize go first. - struct BlockInfoCompareMoveDestination { - bool operator()(const BlockInfo *pLhsBlockInfo, const BlockInfo *pRhsBlockInfo) const { - if (pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations) { + struct BlockInfoCompareMoveDestination + { + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { + if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations) + { return true; } - if (!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations) { + if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations) + { return false; } - if (pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize()) { + if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize()) + { return true; } return false; } }; - typedef VmaVector<BlockInfo *, VmaStlAllocator<BlockInfo *> > BlockInfoVector; + typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector; BlockInfoVector m_Blocks; VkResult DefragmentRound( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove); + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations); size_t CalcBlocksWithNonMovableCount() const; static bool MoveMakesSense( - size_t dstBlockIndex, VkDeviceSize dstOffset, - size_t srcBlockIndex, VkDeviceSize srcOffset); + size_t dstBlockIndex, VkDeviceSize dstOffset, + size_t srcBlockIndex, VkDeviceSize srcOffset); }; -class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm { +class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm +{ VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast) public: VmaDefragmentationAlgorithm_Fast( - VmaAllocator hAllocator, - VmaBlockVector *pBlockVector, - uint32_t currentFrameIndex, - bool overlappingMoveSupported); + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); virtual ~VmaDefragmentationAlgorithm_Fast(); - virtual void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) { ++m_AllocationCount; } + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; } virtual void AddAll() { m_AllAllocations = true; } virtual VkResult Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove); + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } private: - struct BlockInfo { + struct BlockInfo + { size_t origBlockIndex; }; - class FreeSpaceDatabase { + class FreeSpaceDatabase + { public: - FreeSpaceDatabase() { + FreeSpaceDatabase() + { FreeSpace s = {}; s.blockInfoIndex = SIZE_MAX; - for (size_t i = 0; i < MAX_COUNT; ++i) { + for(size_t i = 0; i < MAX_COUNT; ++i) + { m_FreeSpaces[i] = s; } } - void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size) { - if (size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) { + void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size) + { + if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { return; } // Find first invalid or the smallest structure. size_t bestIndex = SIZE_MAX; - for (size_t i = 0; i < MAX_COUNT; ++i) { + for(size_t i = 0; i < MAX_COUNT; ++i) + { // Empty structure. - if (m_FreeSpaces[i].blockInfoIndex == SIZE_MAX) { + if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX) + { bestIndex = i; break; } - if (m_FreeSpaces[i].size < size && - (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size)) { + if(m_FreeSpaces[i].size < size && + (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size)) + { bestIndex = i; } } - if (bestIndex != SIZE_MAX) { + if(bestIndex != SIZE_MAX) + { m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex; m_FreeSpaces[bestIndex].offset = offset; m_FreeSpaces[bestIndex].size = size; @@ -6070,35 +6815,44 @@ private: } bool Fetch(VkDeviceSize alignment, VkDeviceSize size, - size_t &outBlockInfoIndex, VkDeviceSize &outDstOffset) { + size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset) + { size_t bestIndex = SIZE_MAX; VkDeviceSize bestFreeSpaceAfter = 0; - for (size_t i = 0; i < MAX_COUNT; ++i) { + for(size_t i = 0; i < MAX_COUNT; ++i) + { // Structure is valid. - if (m_FreeSpaces[i].blockInfoIndex != SIZE_MAX) { + if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX) + { const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment); // Allocation fits into this structure. - if (dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size) { + if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size) + { const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) - - (dstOffset + size); - if (bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter) { + (dstOffset + size); + if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter) + { bestIndex = i; bestFreeSpaceAfter = freeSpaceAfter; } } } } - - if (bestIndex != SIZE_MAX) { + + if(bestIndex != SIZE_MAX) + { outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex; outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment); - if (bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) { + if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { // Leave this structure for remaining empty space. const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size; m_FreeSpaces[bestIndex].offset += alignmentPlusSize; m_FreeSpaces[bestIndex].size -= alignmentPlusSize; - } else { + } + else + { // This structure becomes invalid. m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX; } @@ -6112,7 +6866,8 @@ private: private: static const size_t MAX_COUNT = 4; - struct FreeSpace { + struct FreeSpace + { size_t blockInfoIndex; // SIZE_MAX means this structure is invalid. VkDeviceSize offset; VkDeviceSize size; @@ -6127,231 +6882,251 @@ private: VkDeviceSize m_BytesMoved; uint32_t m_AllocationsMoved; - VmaVector<BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos; + VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos; void PreprocessMetadata(); void PostprocessMetadata(); - void InsertSuballoc(VmaBlockMetadata_Generic *pMetadata, const VmaSuballocation &suballoc); + void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc); }; -struct VmaBlockDefragmentationContext { - enum BLOCK_FLAG { +struct VmaBlockDefragmentationContext +{ + enum BLOCK_FLAG + { BLOCK_FLAG_USED = 0x00000001, }; uint32_t flags; VkBuffer hBuffer; - - VmaBlockDefragmentationContext() : - flags(0), - hBuffer(VK_NULL_HANDLE) { - } }; -class VmaBlockVectorDefragmentationContext { +class VmaBlockVectorDefragmentationContext +{ VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext) public: VkResult res; bool mutexLocked; - VmaVector<VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts; + VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts; + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves; + uint32_t defragmentationMovesProcessed; + uint32_t defragmentationMovesCommitted; + bool hasDefragmentationPlan; VmaBlockVectorDefragmentationContext( - VmaAllocator hAllocator, - VmaPool hCustomPool, // Optional. - VmaBlockVector *pBlockVector, - uint32_t currFrameIndex, - uint32_t flags); + VmaAllocator hAllocator, + VmaPool hCustomPool, // Optional. + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex); ~VmaBlockVectorDefragmentationContext(); VmaPool GetCustomPool() const { return m_hCustomPool; } - VmaBlockVector *GetBlockVector() const { return m_pBlockVector; } - VmaDefragmentationAlgorithm *GetAlgorithm() const { return m_pAlgorithm; } + VmaBlockVector* GetBlockVector() const { return m_pBlockVector; } + VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; } - void AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged); + void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); void AddAll() { m_AllAllocations = true; } - void Begin(bool overlappingMoveSupported); + void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags); private: const VmaAllocator m_hAllocator; // Null if not from custom pool. const VmaPool m_hCustomPool; // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors. - VmaBlockVector *const m_pBlockVector; + VmaBlockVector* const m_pBlockVector; const uint32_t m_CurrFrameIndex; - const uint32_t m_AlgorithmFlags; // Owner of this object. - VmaDefragmentationAlgorithm *m_pAlgorithm; + VmaDefragmentationAlgorithm* m_pAlgorithm; - struct AllocInfo { + struct AllocInfo + { VmaAllocation hAlloc; - VkBool32 *pChanged; + VkBool32* pChanged; }; // Used between constructor and Begin. - VmaVector<AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations; + VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations; bool m_AllAllocations; }; -struct VmaDefragmentationContext_T { +struct VmaDefragmentationContext_T +{ private: VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) public: VmaDefragmentationContext_T( - VmaAllocator hAllocator, - uint32_t currFrameIndex, - uint32_t flags, - VmaDefragmentationStats *pStats); + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats); ~VmaDefragmentationContext_T(); - void AddPools(uint32_t poolCount, VmaPool *pPools); + void AddPools(uint32_t poolCount, VmaPool* pPools); void AddAllocations( - uint32_t allocationCount, - VmaAllocation *pAllocations, - VkBool32 *pAllocationsChanged); + uint32_t allocationCount, + VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged); /* - Returns: - - `VK_SUCCESS` if succeeded and object can be destroyed immediately. - - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd(). - - Negative value if error occured and object can be destroyed immediately. - */ + Returns: + - `VK_SUCCESS` if succeeded and object can be destroyed immediately. + - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd(). + - Negative value if error occured and object can be destroyed immediately. + */ VkResult Defragment( - VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, - VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer, VmaDefragmentationStats *pStats); + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags); + + VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo); + VkResult DefragmentPassEnd(); private: const VmaAllocator m_hAllocator; const uint32_t m_CurrFrameIndex; const uint32_t m_Flags; - VmaDefragmentationStats *const m_pStats; + VmaDefragmentationStats* const m_pStats; + + VkDeviceSize m_MaxCpuBytesToMove; + uint32_t m_MaxCpuAllocationsToMove; + VkDeviceSize m_MaxGpuBytesToMove; + uint32_t m_MaxGpuAllocationsToMove; + // Owner of these objects. - VmaBlockVectorDefragmentationContext *m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES]; + VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES]; // Owner of these objects. - VmaVector<VmaBlockVectorDefragmentationContext *, VmaStlAllocator<VmaBlockVectorDefragmentationContext *> > m_CustomPoolContexts; + VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts; }; #if VMA_RECORDING_ENABLED -class VmaRecorder { +class VmaRecorder +{ public: VmaRecorder(); - VkResult Init(const VmaRecordSettings &settings, bool useMutex); + VkResult Init(const VmaRecordSettings& settings, bool useMutex); void WriteConfiguration( - const VkPhysicalDeviceProperties &devProps, - const VkPhysicalDeviceMemoryProperties &memProps, - bool dedicatedAllocationExtensionEnabled); + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled, + bool deviceCoherentMemoryExtensionEnabled); ~VmaRecorder(); void RecordCreateAllocator(uint32_t frameIndex); void RecordDestroyAllocator(uint32_t frameIndex); void RecordCreatePool(uint32_t frameIndex, - const VmaPoolCreateInfo &createInfo, - VmaPool pool); + const VmaPoolCreateInfo& createInfo, + VmaPool pool); void RecordDestroyPool(uint32_t frameIndex, VmaPool pool); void RecordAllocateMemory(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - const VmaAllocationCreateInfo &createInfo, - VmaAllocation allocation); + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); void RecordAllocateMemoryPages(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - const VmaAllocationCreateInfo &createInfo, - uint64_t allocationCount, - const VmaAllocation *pAllocations); + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations); void RecordAllocateMemoryForBuffer(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - const VmaAllocationCreateInfo &createInfo, - VmaAllocation allocation); + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); void RecordAllocateMemoryForImage(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - const VmaAllocationCreateInfo &createInfo, - VmaAllocation allocation); + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); void RecordFreeMemory(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordFreeMemoryPages(uint32_t frameIndex, - uint64_t allocationCount, - const VmaAllocation *pAllocations); - void RecordResizeAllocation( - uint32_t frameIndex, - VmaAllocation allocation, - VkDeviceSize newSize); + uint64_t allocationCount, + const VmaAllocation* pAllocations); void RecordSetAllocationUserData(uint32_t frameIndex, - VmaAllocation allocation, - const void *pUserData); + VmaAllocation allocation, + const void* pUserData); void RecordCreateLostAllocation(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordMapMemory(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordUnmapMemory(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordFlushAllocation(uint32_t frameIndex, - VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); void RecordInvalidateAllocation(uint32_t frameIndex, - VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); void RecordCreateBuffer(uint32_t frameIndex, - const VkBufferCreateInfo &bufCreateInfo, - const VmaAllocationCreateInfo &allocCreateInfo, - VmaAllocation allocation); + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); void RecordCreateImage(uint32_t frameIndex, - const VkImageCreateInfo &imageCreateInfo, - const VmaAllocationCreateInfo &allocCreateInfo, - VmaAllocation allocation); + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); void RecordDestroyBuffer(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordDestroyImage(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordTouchAllocation(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordGetAllocationInfo(uint32_t frameIndex, - VmaAllocation allocation); + VmaAllocation allocation); void RecordMakePoolAllocationsLost(uint32_t frameIndex, - VmaPool pool); + VmaPool pool); void RecordDefragmentationBegin(uint32_t frameIndex, - const VmaDefragmentationInfo2 &info, - VmaDefragmentationContext ctx); + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx); void RecordDefragmentationEnd(uint32_t frameIndex, - VmaDefragmentationContext ctx); + VmaDefragmentationContext ctx); + void RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name); private: - struct CallParams { + struct CallParams + { uint32_t threadId; double time; }; - class UserDataString { + class UserDataString + { public: - UserDataString(VmaAllocationCreateFlags allocFlags, const void *pUserData); - const char *GetString() const { return m_Str; } + UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData); + const char* GetString() const { return m_Str; } private: char m_PtrStr[17]; - const char *m_Str; + const char* m_Str; }; bool m_UseMutex; VmaRecordFlags m_Flags; - FILE *m_File; + FILE* m_File; VMA_MUTEX m_FileMutex; int64_t m_Freq; int64_t m_StartCounter; - void GetBasicParams(CallParams &outParams); + void GetBasicParams(CallParams& outParams); // T must be a pointer type, e.g. VmaAllocation, VmaPool. - template <typename T> - void PrintPointerList(uint64_t count, const T *pItems) { - if (count) { + template<typename T> + void PrintPointerList(uint64_t count, const T* pItems) + { + if(count) + { fprintf(m_File, "%p", pItems[0]); - for (uint64_t i = 1; i < count; ++i) { + for(uint64_t i = 1; i < count; ++i) + { fprintf(m_File, " %p", pItems[i]); } } } - void PrintPointerList(uint64_t count, const VmaAllocation *pItems); + void PrintPointerList(uint64_t count, const VmaAllocation* pItems); void Flush(); }; @@ -6360,12 +7135,13 @@ private: /* Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. */ -class VmaAllocationObjectAllocator { +class VmaAllocationObjectAllocator +{ VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) public: - VmaAllocationObjectAllocator(const VkAllocationCallbacks *pAllocationCallbacks); + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks); - VmaAllocation Allocate(); + template<typename... Types> VmaAllocation Allocate(Types... args); void Free(VmaAllocation hAlloc); private: @@ -6373,160 +7149,256 @@ private: VmaPoolAllocator<VmaAllocation_T> m_Allocator; }; +struct VmaCurrentBudgetData +{ + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // #if VMA_MEMORY_BUDGET + + VmaCurrentBudgetData() + { + for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif + } + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + m_AllocationBytes[heapIndex] += allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } + + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME + m_AllocationBytes[heapIndex] -= allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } +}; + // Main allocator object. -struct VmaAllocator_T { +struct VmaAllocator_T +{ VMA_CLASS_NO_COPY(VmaAllocator_T) public: bool m_UseMutex; - bool m_UseKhrDedicatedAllocation; + uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; VkDevice m_hDevice; + VkInstance m_hInstance; bool m_AllocationCallbacksSpecified; VkAllocationCallbacks m_AllocationCallbacks; VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; VmaAllocationObjectAllocator m_AllocationObjectAllocator; - - // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap. - VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS]; - VMA_MUTEX m_HeapSizeLimitMutex; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; VkPhysicalDeviceProperties m_PhysicalDeviceProperties; VkPhysicalDeviceMemoryProperties m_MemProps; // Default pools. - VmaBlockVector *m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; // Each vector is sorted by memory (handle value). - typedef VmaVector<VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType; - AllocationVectorType *m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES]; + typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType; + AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES]; VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES]; - VmaAllocator_T(const VmaAllocatorCreateInfo *pCreateInfo); - VkResult Init(const VmaAllocatorCreateInfo *pCreateInfo); + VmaCurrentBudgetData m_Budget; + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); ~VmaAllocator_T(); - const VkAllocationCallbacks *GetAllocationCallbacks() const { + const VkAllocationCallbacks* GetAllocationCallbacks() const + { return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0; } - const VmaVulkanFunctions &GetVulkanFunctions() const { + const VmaVulkanFunctions& GetVulkanFunctions() const + { return m_VulkanFunctions; } - VkDeviceSize GetBufferImageGranularity() const { + VkDeviceSize GetBufferImageGranularity() const + { return VMA_MAX( - static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), - m_PhysicalDeviceProperties.limits.bufferImageGranularity); + static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); } uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } - uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const { + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); return m_MemProps.memoryTypes[memTypeIndex].heapIndex; } // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. - bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const { + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; } // Minimum alignment for all allocations in specific memory type. - VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const { + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { return IsMemoryTypeNonCoherent(memTypeIndex) ? - VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : - (VkDeviceSize)VMA_DEBUG_ALIGNMENT; + VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_DEBUG_ALIGNMENT; } - bool IsIntegratedGpu() const { + bool IsIntegratedGpu() const + { return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; } + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + #if VMA_RECORDING_ENABLED - VmaRecorder *GetRecorder() const { return m_pRecorder; } + VmaRecorder* GetRecorder() const { return m_pRecorder; } #endif void GetBufferMemoryRequirements( - VkBuffer hBuffer, - VkMemoryRequirements &memReq, - bool &requiresDedicatedAllocation, - bool &prefersDedicatedAllocation) const; + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; void GetImageMemoryRequirements( - VkImage hImage, - VkMemoryRequirements &memReq, - bool &requiresDedicatedAllocation, - bool &prefersDedicatedAllocation) const; + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; // Main allocation function. VkResult AllocateMemory( - const VkMemoryRequirements &vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - const VmaAllocationCreateInfo &createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation *pAllocations); + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); // Main deallocation function. void FreeMemory( - size_t allocationCount, - const VmaAllocation *pAllocations); + size_t allocationCount, + const VmaAllocation* pAllocations); VkResult ResizeAllocation( - const VmaAllocation alloc, - VkDeviceSize newSize); + const VmaAllocation alloc, + VkDeviceSize newSize); - void CalculateStats(VmaStats *pStats); + void CalculateStats(VmaStats* pStats); + + void GetBudget( + VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount); #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter &json); + void PrintDetailedMap(class VmaJsonWriter& json); #endif VkResult DefragmentationBegin( - const VmaDefragmentationInfo2 &info, - VmaDefragmentationStats *pStats, - VmaDefragmentationContext *pContext); + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext); VkResult DefragmentationEnd( - VmaDefragmentationContext context); + VmaDefragmentationContext context); + + VkResult DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context); + VkResult DefragmentationPassEnd( + VmaDefragmentationContext context); - void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo *pAllocationInfo); + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); bool TouchAllocation(VmaAllocation hAllocation); - VkResult CreatePool(const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool); + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); void DestroyPool(VmaPool pool); - void GetPoolStats(VmaPool pool, VmaPoolStats *pPoolStats); + void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats); void SetCurrentFrameIndex(uint32_t frameIndex); uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } void MakePoolAllocationsLost( - VmaPool hPool, - size_t *pLostAllocationCount); + VmaPool hPool, + size_t* pLostAllocationCount); VkResult CheckPoolCorruption(VmaPool hPool); VkResult CheckCorruption(uint32_t memoryTypeBits); - void CreateLostAllocation(VmaAllocation *pAllocation); + void CreateLostAllocation(VmaAllocation* pAllocation); - VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory); + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); - VkResult Map(VmaAllocation hAllocation, void **ppData); + VkResult Map(VmaAllocation hAllocation, void** ppData); void Unmap(VmaAllocation hAllocation); - VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer); - VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage); + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); void FlushOrInvalidateAllocation( - VmaAllocation hAllocation, - VkDeviceSize offset, VkDeviceSize size, - VMA_CACHE_OPERATION op); + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); /* - Returns bit mask of memory types that can support defragmentation on GPU as - they support creation of required buffer for copy operations. - */ + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ uint32_t GetGpuDefragmentationMemoryTypeBits(); private: @@ -6535,7 +7407,7 @@ private: VkPhysicalDevice m_PhysicalDevice; VMA_ATOMIC_UINT32 m_CurrentFrameIndex; VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. - + VMA_RW_MUTEX m_PoolsMutex; // Protected by m_PoolsMutex. Sorted by pointer value. VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools; @@ -6543,93 +7415,110 @@ private: VmaVulkanFunctions m_VulkanFunctions; + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + #if VMA_RECORDING_ENABLED - VmaRecorder *m_pRecorder; + VmaRecorder* m_pRecorder; #endif - void ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunctions); + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); VkResult AllocateMemoryOfType( - VkDeviceSize size, - VkDeviceSize alignment, - bool dedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - const VmaAllocationCreateInfo &createInfo, - uint32_t memTypeIndex, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation *pAllocations); + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); // Helper function only to be used inside AllocateDedicatedMemory. VkResult AllocateDedicatedMemoryPage( - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - const VkMemoryAllocateInfo &allocInfo, - bool map, - bool isUserDataString, - void *pUserData, - VmaAllocation *pAllocation); + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation); // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. VkResult AllocateDedicatedMemory( - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - bool map, - bool isUserDataString, - void *pUserData, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - size_t allocationCount, - VmaAllocation *pAllocations); - - // Tries to free pMemory as Dedicated Memory. Returns true if found and freed. - void FreeDedicatedMemory(VmaAllocation allocation); + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations); + + void FreeDedicatedMemory(const VmaAllocation allocation); /* - Calculates and returns bit mask of memory types that can support defragmentation - on GPU as they support creation of required buffer for copy operations. - */ + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + + uint32_t CalculateGlobalMemoryTypeBits() const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET }; //////////////////////////////////////////////////////////////////////////////// // Memory allocation #2 after VmaAllocator_T definition -static void *VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) { +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); } -static void VmaFree(VmaAllocator hAllocator, void *ptr) { +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ VmaFree(&hAllocator->m_AllocationCallbacks, ptr); } -template <typename T> -static T *VmaAllocate(VmaAllocator hAllocator) { - return (T *)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +template<typename T> +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); } -template <typename T> -static T *VmaAllocateArray(VmaAllocator hAllocator, size_t count) { - return (T *)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +template<typename T> +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); } -template <typename T> -static void vma_delete(VmaAllocator hAllocator, T *ptr) { - if (ptr != VMA_NULL) { +template<typename T> +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { ptr->~T(); VmaFree(hAllocator, ptr); } } -template <typename T> -static void vma_delete_array(VmaAllocator hAllocator, T *ptr, size_t count) { - if (ptr != VMA_NULL) { - for (size_t i = count; i--;) +template<typename T> +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) ptr[i].~T(); VmaFree(hAllocator, ptr); } @@ -6640,46 +7529,65 @@ static void vma_delete_array(VmaAllocator hAllocator, T *ptr, size_t count) { #if VMA_STATS_STRING_ENABLED -class VmaStringBuilder { +class VmaStringBuilder +{ public: - VmaStringBuilder(VmaAllocator alloc) : - m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) {} + VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { } size_t GetLength() const { return m_Data.size(); } - const char *GetData() const { return m_Data.data(); } + const char* GetData() const { return m_Data.data(); } void Add(char ch) { m_Data.push_back(ch); } - void Add(const char *pStr); + void Add(const char* pStr); void AddNewLine() { Add('\n'); } void AddNumber(uint32_t num); void AddNumber(uint64_t num); - void AddPointer(const void *ptr); + void AddPointer(const void* ptr); private: - VmaVector<char, VmaStlAllocator<char> > m_Data; + VmaVector< char, VmaStlAllocator<char> > m_Data; }; -void VmaStringBuilder::Add(const char *pStr) { +void VmaStringBuilder::Add(const char* pStr) +{ const size_t strLen = strlen(pStr); - if (strLen > 0) { + if(strLen > 0) + { const size_t oldCount = m_Data.size(); m_Data.resize(oldCount + strLen); memcpy(m_Data.data() + oldCount, pStr, strLen); } } -void VmaStringBuilder::AddNumber(uint32_t num) { +void VmaStringBuilder::AddNumber(uint32_t num) +{ char buf[11]; - VmaUint32ToStr(buf, sizeof(buf), num); - Add(buf); + buf[10] = '\0'; + char *p = &buf[10]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); } -void VmaStringBuilder::AddNumber(uint64_t num) { +void VmaStringBuilder::AddNumber(uint64_t num) +{ char buf[21]; - VmaUint64ToStr(buf, sizeof(buf), num); - Add(buf); + buf[20] = '\0'; + char *p = &buf[20]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); } -void VmaStringBuilder::AddPointer(const void *ptr) { +void VmaStringBuilder::AddPointer(const void* ptr) +{ char buf[21]; VmaPtrToStr(buf, sizeof(buf), ptr); Add(buf); @@ -6692,66 +7600,72 @@ void VmaStringBuilder::AddPointer(const void *ptr) { #if VMA_STATS_STRING_ENABLED -class VmaJsonWriter { +class VmaJsonWriter +{ VMA_CLASS_NO_COPY(VmaJsonWriter) public: - VmaJsonWriter(const VkAllocationCallbacks *pAllocationCallbacks, VmaStringBuilder &sb); + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); ~VmaJsonWriter(); void BeginObject(bool singleLine = false); void EndObject(); - + void BeginArray(bool singleLine = false); void EndArray(); - - void WriteString(const char *pStr); - void BeginString(const char *pStr = VMA_NULL); - void ContinueString(const char *pStr); + + void WriteString(const char* pStr); + void BeginString(const char* pStr = VMA_NULL); + void ContinueString(const char* pStr); void ContinueString(uint32_t n); void ContinueString(uint64_t n); - void ContinueString_Pointer(const void *ptr); - void EndString(const char *pStr = VMA_NULL); - + void ContinueString_Pointer(const void* ptr); + void EndString(const char* pStr = VMA_NULL); + void WriteNumber(uint32_t n); void WriteNumber(uint64_t n); void WriteBool(bool b); void WriteNull(); private: - static const char *const INDENT; + static const char* const INDENT; - enum COLLECTION_TYPE { + enum COLLECTION_TYPE + { COLLECTION_TYPE_OBJECT, COLLECTION_TYPE_ARRAY, }; - struct StackItem { + struct StackItem + { COLLECTION_TYPE type; uint32_t valueCount; bool singleLineMode; }; - VmaStringBuilder &m_SB; - VmaVector<StackItem, VmaStlAllocator<StackItem> > m_Stack; + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack; bool m_InsideString; void BeginValue(bool isString); void WriteIndent(bool oneLess = false); }; -const char *const VmaJsonWriter::INDENT = " "; +const char* const VmaJsonWriter::INDENT = " "; -VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks *pAllocationCallbacks, VmaStringBuilder &sb) : - m_SB(sb), - m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)), - m_InsideString(false) { +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) : + m_SB(sb), + m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)), + m_InsideString(false) +{ } -VmaJsonWriter::~VmaJsonWriter() { +VmaJsonWriter::~VmaJsonWriter() +{ VMA_ASSERT(!m_InsideString); VMA_ASSERT(m_Stack.empty()); } -void VmaJsonWriter::BeginObject(bool singleLine) { +void VmaJsonWriter::BeginObject(bool singleLine) +{ VMA_ASSERT(!m_InsideString); BeginValue(false); @@ -6764,7 +7678,8 @@ void VmaJsonWriter::BeginObject(bool singleLine) { m_Stack.push_back(item); } -void VmaJsonWriter::EndObject() { +void VmaJsonWriter::EndObject() +{ VMA_ASSERT(!m_InsideString); WriteIndent(true); @@ -6774,7 +7689,8 @@ void VmaJsonWriter::EndObject() { m_Stack.pop_back(); } -void VmaJsonWriter::BeginArray(bool singleLine) { +void VmaJsonWriter::BeginArray(bool singleLine) +{ VMA_ASSERT(!m_InsideString); BeginValue(false); @@ -6787,7 +7703,8 @@ void VmaJsonWriter::BeginArray(bool singleLine) { m_Stack.push_back(item); } -void VmaJsonWriter::EndArray() { +void VmaJsonWriter::EndArray() +{ VMA_ASSERT(!m_InsideString); WriteIndent(true); @@ -6797,136 +7714,168 @@ void VmaJsonWriter::EndArray() { m_Stack.pop_back(); } -void VmaJsonWriter::WriteString(const char *pStr) { +void VmaJsonWriter::WriteString(const char* pStr) +{ BeginString(pStr); EndString(); } -void VmaJsonWriter::BeginString(const char *pStr) { +void VmaJsonWriter::BeginString(const char* pStr) +{ VMA_ASSERT(!m_InsideString); BeginValue(true); m_SB.Add('"'); m_InsideString = true; - if (pStr != VMA_NULL && pStr[0] != '\0') { + if(pStr != VMA_NULL && pStr[0] != '\0') + { ContinueString(pStr); } } -void VmaJsonWriter::ContinueString(const char *pStr) { +void VmaJsonWriter::ContinueString(const char* pStr) +{ VMA_ASSERT(m_InsideString); const size_t strLen = strlen(pStr); - for (size_t i = 0; i < strLen; ++i) { + for(size_t i = 0; i < strLen; ++i) + { char ch = pStr[i]; - if (ch == '\\') { + if(ch == '\\') + { m_SB.Add("\\\\"); - } else if (ch == '"') { + } + else if(ch == '"') + { m_SB.Add("\\\""); - } else if (ch >= 32) { + } + else if(ch >= 32) + { m_SB.Add(ch); - } else - switch (ch) { - case '\b': - m_SB.Add("\\b"); - break; - case '\f': - m_SB.Add("\\f"); - break; - case '\n': - m_SB.Add("\\n"); - break; - case '\r': - m_SB.Add("\\r"); - break; - case '\t': - m_SB.Add("\\t"); - break; - default: - VMA_ASSERT(0 && "Character not currently supported."); - break; - } + } + else switch(ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + break; + } } } -void VmaJsonWriter::ContinueString(uint32_t n) { +void VmaJsonWriter::ContinueString(uint32_t n) +{ VMA_ASSERT(m_InsideString); m_SB.AddNumber(n); } -void VmaJsonWriter::ContinueString(uint64_t n) { +void VmaJsonWriter::ContinueString(uint64_t n) +{ VMA_ASSERT(m_InsideString); m_SB.AddNumber(n); } -void VmaJsonWriter::ContinueString_Pointer(const void *ptr) { +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ VMA_ASSERT(m_InsideString); m_SB.AddPointer(ptr); } -void VmaJsonWriter::EndString(const char *pStr) { +void VmaJsonWriter::EndString(const char* pStr) +{ VMA_ASSERT(m_InsideString); - if (pStr != VMA_NULL && pStr[0] != '\0') { + if(pStr != VMA_NULL && pStr[0] != '\0') + { ContinueString(pStr); } m_SB.Add('"'); m_InsideString = false; } -void VmaJsonWriter::WriteNumber(uint32_t n) { +void VmaJsonWriter::WriteNumber(uint32_t n) +{ VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.AddNumber(n); } -void VmaJsonWriter::WriteNumber(uint64_t n) { +void VmaJsonWriter::WriteNumber(uint64_t n) +{ VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.AddNumber(n); } -void VmaJsonWriter::WriteBool(bool b) { +void VmaJsonWriter::WriteBool(bool b) +{ VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.Add(b ? "true" : "false"); } -void VmaJsonWriter::WriteNull() { +void VmaJsonWriter::WriteNull() +{ VMA_ASSERT(!m_InsideString); BeginValue(false); m_SB.Add("null"); } -void VmaJsonWriter::BeginValue(bool isString) { - if (!m_Stack.empty()) { - StackItem &currItem = m_Stack.back(); - if (currItem.type == COLLECTION_TYPE_OBJECT && - currItem.valueCount % 2 == 0) { +void VmaJsonWriter::BeginValue(bool isString) +{ + if(!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { VMA_ASSERT(isString); } - if (currItem.type == COLLECTION_TYPE_OBJECT && - currItem.valueCount % 2 != 0) { + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { m_SB.Add(": "); - } else if (currItem.valueCount > 0) { + } + else if(currItem.valueCount > 0) + { m_SB.Add(", "); WriteIndent(); - } else { + } + else + { WriteIndent(); } ++currItem.valueCount; } } -void VmaJsonWriter::WriteIndent(bool oneLess) { - if (!m_Stack.empty() && !m_Stack.back().singleLineMode) { +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if(!m_Stack.empty() && !m_Stack.back().singleLineMode) + { m_SB.AddNewLine(); - + size_t count = m_Stack.size(); - if (count > 0 && oneLess) { + if(count > 0 && oneLess) + { --count; } - for (size_t i = 0; i < count; ++i) { + for(size_t i = 0; i < count; ++i) + { m_SB.Add(INDENT); } } @@ -6936,35 +7885,38 @@ void VmaJsonWriter::WriteIndent(bool oneLess) { //////////////////////////////////////////////////////////////////////////////// -void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void *pUserData) { - if (IsUserDataString()) { +void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData) +{ + if(IsUserDataString()) + { VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData); FreeUserDataString(hAllocator); - if (pUserData != VMA_NULL) { - const char *const newStrSrc = (char *)pUserData; - const size_t newStrLen = strlen(newStrSrc); - char *const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1); - memcpy(newStrDst, newStrSrc, newStrLen + 1); - m_pUserData = newStrDst; + if(pUserData != VMA_NULL) + { + m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData); } - } else { + } + else + { m_pUserData = pUserData; } } void VmaAllocation_T::ChangeBlockAllocation( - VmaAllocator hAllocator, - VmaDeviceMemoryBlock *block, - VkDeviceSize offset) { + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset) +{ VMA_ASSERT(block != VMA_NULL); VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); // Move mapping reference counter from old block to new block. - if (block != m_BlockAllocation.m_Block) { + if(block != m_BlockAllocation.m_Block) + { uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP; - if (IsPersistentMap()) + if(IsPersistentMap()) ++mapRefCount; m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount); block->Map(hAllocator, mapRefCount, VMA_NULL); @@ -6974,101 +7926,103 @@ void VmaAllocation_T::ChangeBlockAllocation( m_BlockAllocation.m_Offset = offset; } -void VmaAllocation_T::ChangeSize(VkDeviceSize newSize) { - VMA_ASSERT(newSize > 0); - m_Size = newSize; -} - -void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset) { +void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset) +{ VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); m_BlockAllocation.m_Offset = newOffset; } -VkDeviceSize VmaAllocation_T::GetOffset() const { - switch (m_Type) { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Offset; - case ALLOCATION_TYPE_DEDICATED: - return 0; - default: - VMA_ASSERT(0); - return 0; - } -} - -VkDeviceMemory VmaAllocation_T::GetMemory() const { - switch (m_Type) { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->GetDeviceMemory(); - case ALLOCATION_TYPE_DEDICATED: - return m_DedicatedAllocation.m_hMemory; - default: - VMA_ASSERT(0); - return VK_NULL_HANDLE; +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Offset; + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; } } -uint32_t VmaAllocation_T::GetMemoryTypeIndex() const { - switch (m_Type) { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->GetMemoryTypeIndex(); - case ALLOCATION_TYPE_DEDICATED: - return m_DedicatedAllocation.m_MemoryTypeIndex; - default: - VMA_ASSERT(0); - return UINT32_MAX; +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; } } -void *VmaAllocation_T::GetMappedData() const { - switch (m_Type) { - case ALLOCATION_TYPE_BLOCK: - if (m_MapCount != 0) { - void *pBlockData = m_BlockAllocation.m_Block->GetMappedData(); - VMA_ASSERT(pBlockData != VMA_NULL); - return (char *)pBlockData + m_BlockAllocation.m_Offset; - } else { - return VMA_NULL; - } - break; - case ALLOCATION_TYPE_DEDICATED: - VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0)); - return m_DedicatedAllocation.m_pMappedData; - default: - VMA_ASSERT(0); +void* VmaAllocation_T::GetMappedData() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if(m_MapCount != 0) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + m_BlockAllocation.m_Offset; + } + else + { return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0)); + return m_DedicatedAllocation.m_pMappedData; + default: + VMA_ASSERT(0); + return VMA_NULL; } } -bool VmaAllocation_T::CanBecomeLost() const { - switch (m_Type) { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_CanBecomeLost; - case ALLOCATION_TYPE_DEDICATED: - return false; - default: - VMA_ASSERT(0); - return false; +bool VmaAllocation_T::CanBecomeLost() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_CanBecomeLost; + case ALLOCATION_TYPE_DEDICATED: + return false; + default: + VMA_ASSERT(0); + return false; } } -bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) { +bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ VMA_ASSERT(CanBecomeLost()); /* - Warning: This is a carefully designed algorithm. - Do not modify unless you really know what you're doing :) - */ + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ uint32_t localLastUseFrameIndex = GetLastUseFrameIndex(); - for (;;) { - if (localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) { + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { VMA_ASSERT(0); return false; - } else if (localLastUseFrameIndex + frameInUseCount >= currentFrameIndex) { + } + else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex) + { return false; - } else // Last use time earlier than current time. + } + else // Last use time earlier than current time. { - if (CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST)) { + if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST)) + { // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST. // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock. return true; @@ -7080,7 +8034,7 @@ bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCo #if VMA_STATS_STRING_ENABLED // Correspond to values of enum VmaSuballocationType. -static const char *VMA_SUBALLOCATION_TYPE_NAMES[] = { +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = { "FREE", "UNKNOWN", "BUFFER", @@ -7089,18 +8043,23 @@ static const char *VMA_SUBALLOCATION_TYPE_NAMES[] = { "IMAGE_OPTIMAL", }; -void VmaAllocation_T::PrintParameters(class VmaJsonWriter &json) const { +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ json.WriteString("Type"); json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); json.WriteString("Size"); json.WriteNumber(m_Size); - if (m_pUserData != VMA_NULL) { + if(m_pUserData != VMA_NULL) + { json.WriteString("UserData"); - if (IsUserDataString()) { - json.WriteString((const char *)m_pUserData); - } else { + if(IsUserDataString()) + { + json.WriteString((const char*)m_pUserData); + } + else + { json.BeginString(); json.ContinueString_Pointer(m_pUserData); json.EndString(); @@ -7113,7 +8072,8 @@ void VmaAllocation_T::PrintParameters(class VmaJsonWriter &json) const { json.WriteString("LastUseFrameIndex"); json.WriteNumber(GetLastUseFrameIndex()); - if (m_BufferImageUsage != 0) { + if(m_BufferImageUsage != 0) + { json.WriteString("Usage"); json.WriteNumber(m_BufferImageUsage); } @@ -7121,58 +8081,71 @@ void VmaAllocation_T::PrintParameters(class VmaJsonWriter &json) const { #endif -void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator) { +void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator) +{ VMA_ASSERT(IsUserDataString()); - if (m_pUserData != VMA_NULL) { - char *const oldStr = (char *)m_pUserData; - const size_t oldStrLen = strlen(oldStr); - vma_delete_array(hAllocator, oldStr, oldStrLen + 1); - m_pUserData = VMA_NULL; - } + VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData); + m_pUserData = VMA_NULL; } -void VmaAllocation_T::BlockAllocMap() { +void VmaAllocation_T::BlockAllocMap() +{ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) { + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { ++m_MapCount; - } else { + } + else + { VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); } } -void VmaAllocation_T::BlockAllocUnmap() { +void VmaAllocation_T::BlockAllocUnmap() +{ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) { + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { --m_MapCount; - } else { + } + else + { VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); } } -VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void **ppData) { +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); - if (m_MapCount != 0) { - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) { + if(m_MapCount != 0) + { + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); *ppData = m_DedicatedAllocation.m_pMappedData; ++m_MapCount; return VK_SUCCESS; - } else { + } + else + { VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); return VK_ERROR_MEMORY_MAP_FAILED; } - } else { + } + else + { VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( - hAllocator->m_hDevice, - m_DedicatedAllocation.m_hMemory, - 0, // offset - VK_WHOLE_SIZE, - 0, // flags - ppData); - if (result == VK_SUCCESS) { + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if(result == VK_SUCCESS) + { m_DedicatedAllocation.m_pMappedData = *ppData; m_MapCount = 1; } @@ -7180,25 +8153,31 @@ VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void **ppDa } } -void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) { +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) { + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { --m_MapCount; - if (m_MapCount == 0) { + if(m_MapCount == 0) + { m_DedicatedAllocation.m_pMappedData = VMA_NULL; (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( - hAllocator->m_hDevice, - m_DedicatedAllocation.m_hMemory); + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); } - } else { + } + else + { VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); } } #if VMA_STATS_STRING_ENABLED -static void VmaPrintStatInfo(VmaJsonWriter &json, const VmaStatInfo &stat) { +static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat) +{ json.BeginObject(); json.WriteString("Blocks"); @@ -7216,7 +8195,8 @@ static void VmaPrintStatInfo(VmaJsonWriter &json, const VmaStatInfo &stat) { json.WriteString("UnusedBytes"); json.WriteNumber(stat.unusedBytes); - if (stat.allocationCount > 1) { + if(stat.allocationCount > 1) + { json.WriteString("AllocationSize"); json.BeginObject(true); json.WriteString("Min"); @@ -7228,7 +8208,8 @@ static void VmaPrintStatInfo(VmaJsonWriter &json, const VmaStatInfo &stat) { json.EndObject(); } - if (stat.unusedRangeCount > 1) { + if(stat.unusedRangeCount > 1) + { json.WriteString("UnusedRangeSize"); json.BeginObject(true); json.WriteString("Min"); @@ -7245,33 +8226,39 @@ static void VmaPrintStatInfo(VmaJsonWriter &json, const VmaStatInfo &stat) { #endif // #if VMA_STATS_STRING_ENABLED -struct VmaSuballocationItemSizeLess { +struct VmaSuballocationItemSizeLess +{ bool operator()( - const VmaSuballocationList::iterator lhs, - const VmaSuballocationList::iterator rhs) const { + const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { return lhs->size < rhs->size; } bool operator()( - const VmaSuballocationList::iterator lhs, - VkDeviceSize rhsSize) const { + const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { return lhs->size < rhsSize; } }; + //////////////////////////////////////////////////////////////////////////////// // class VmaBlockMetadata VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) : - m_Size(0), - m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks()) { + m_Size(0), + m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks()) +{ } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter &json, - VkDeviceSize unusedBytes, - size_t allocationCount, - size_t unusedRangeCount) const { +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const +{ json.BeginObject(); json.WriteString("TotalBytes"); @@ -7290,11 +8277,12 @@ void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter &json, json.BeginArray(); } -void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter &json, - VkDeviceSize offset, - VmaAllocation hAllocation) const { +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const +{ json.BeginObject(true); - + json.WriteString("Offset"); json.WriteNumber(offset); @@ -7303,11 +8291,12 @@ void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter &json, json.EndObject(); } -void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter &json, - VkDeviceSize offset, - VkDeviceSize size) const { +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const +{ json.BeginObject(true); - + json.WriteString("Offset"); json.WriteNumber(offset); @@ -7320,7 +8309,8 @@ void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter &json, json.EndObject(); } -void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter &json) const { +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ json.EndArray(); json.EndObject(); } @@ -7331,17 +8321,20 @@ void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter &json) const { // class VmaBlockMetadata_Generic VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) : - VmaBlockMetadata(hAllocator), - m_FreeCount(0), - m_SumFreeSize(0), - m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())), - m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks())) { + VmaBlockMetadata(hAllocator), + m_FreeCount(0), + m_SumFreeSize(0), + m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())), + m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks())) +{ } -VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic() { +VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic() +{ } -void VmaBlockMetadata_Generic::Init(VkDeviceSize size) { +void VmaBlockMetadata_Generic::Init(VkDeviceSize size) +{ VmaBlockMetadata::Init(size); m_FreeCount = 1; @@ -7360,9 +8353,10 @@ void VmaBlockMetadata_Generic::Init(VkDeviceSize size) { m_FreeSuballocationsBySize.push_back(suballocItem); } -bool VmaBlockMetadata_Generic::Validate() const { +bool VmaBlockMetadata_Generic::Validate() const +{ VMA_VALIDATE(!m_Suballocations.empty()); - + // Expected offset of new suballocation as calculated from previous ones. VkDeviceSize calculatedOffset = 0; // Expected number of free suballocations as calculated from traversing their list. @@ -7375,11 +8369,12 @@ bool VmaBlockMetadata_Generic::Validate() const { // True if previous visited suballocation was free. bool prevFree = false; - for (VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); - suballocItem != m_Suballocations.cend(); - ++suballocItem) { - const VmaSuballocation &subAlloc = *suballocItem; - + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& subAlloc = *suballocItem; + // Actual offset of this suballocation doesn't match expected one. VMA_VALIDATE(subAlloc.offset == calculatedOffset); @@ -7389,16 +8384,20 @@ bool VmaBlockMetadata_Generic::Validate() const { VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE)); - if (currFree) { + if(currFree) + { calculatedSumFreeSize += subAlloc.size; ++calculatedFreeCount; - if (subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) { + if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { ++freeSuballocationsToRegister; } // Margin required between allocations - every free space must be at least that large. VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN); - } else { + } + else + { VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset); VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size); @@ -7415,9 +8414,10 @@ bool VmaBlockMetadata_Generic::Validate() const { VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); VkDeviceSize lastSize = 0; - for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) { + for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) + { VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; - + // Only free suballocations can be registered in m_FreeSuballocationsBySize. VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); // They must be sorted by size ascending. @@ -7435,25 +8435,31 @@ bool VmaBlockMetadata_Generic::Validate() const { return true; } -VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const { - if (!m_FreeSuballocationsBySize.empty()) { +VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const +{ + if(!m_FreeSuballocationsBySize.empty()) + { return m_FreeSuballocationsBySize.back()->size; - } else { + } + else + { return 0; } } -bool VmaBlockMetadata_Generic::IsEmpty() const { +bool VmaBlockMetadata_Generic::IsEmpty() const +{ return (m_Suballocations.size() == 1) && (m_FreeCount == 1); } -void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo &outInfo) const { +void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ outInfo.blockCount = 1; const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); outInfo.allocationCount = rangeCount - m_FreeCount; outInfo.unusedRangeCount = m_FreeCount; - + outInfo.unusedBytes = m_SumFreeSize; outInfo.usedBytes = GetSize() - outInfo.unusedBytes; @@ -7462,21 +8468,26 @@ void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo &outInfo) cons outInfo.unusedRangeSizeMin = UINT64_MAX; outInfo.unusedRangeSizeMax = 0; - for (VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); - suballocItem != m_Suballocations.cend(); - ++suballocItem) { - const VmaSuballocation &suballoc = *suballocItem; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& suballoc = *suballocItem; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size); - } else { + } + else + { outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size); outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size); } } } -void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats &inoutStats) const { +void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const +{ const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); inoutStats.size += GetSize(); @@ -7488,19 +8499,24 @@ void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats &inoutStats) const { #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter &json) const { +void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const +{ PrintDetailedMap_Begin(json, - m_SumFreeSize, // unusedBytes - m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount - m_FreeCount); // unusedRangeCount + m_SumFreeSize, // unusedBytes + m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount + m_FreeCount); // unusedRangeCount size_t i = 0; - for (VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); - suballocItem != m_Suballocations.cend(); - ++suballocItem, ++i) { - if (suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) { + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem, ++i) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size); - } else { + } + else + { PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation); } } @@ -7511,16 +8527,17 @@ void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter &json) const #endif // #if VMA_STATS_STRING_ENABLED bool VmaBlockMetadata_Generic::CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ VMA_ASSERT(allocSize > 0); VMA_ASSERT(!upperAddress); VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); @@ -7530,78 +8547,90 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( pAllocationRequest->type = VmaAllocationRequestType::Normal; // There is not enough total free space in this block to fullfill the request: Early return. - if (canMakeOtherLost == false && - m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN) { + if(canMakeOtherLost == false && + m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN) + { return false; } // New algorithm, efficiently searching freeSuballocationsBySize. const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); - if (freeSuballocCount > 0) { - if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) { + if(freeSuballocCount > 0) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN. - VmaSuballocationList::iterator *const it = VmaBinaryFindFirstNotLess( - m_FreeSuballocationsBySize.data(), - m_FreeSuballocationsBySize.data() + freeSuballocCount, - allocSize + 2 * VMA_DEBUG_MARGIN, - VmaSuballocationItemSizeLess()); + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + freeSuballocCount, + allocSize + 2 * VMA_DEBUG_MARGIN, + VmaSuballocationItemSizeLess()); size_t index = it - m_FreeSuballocationsBySize.data(); - for (; index < freeSuballocCount; ++index) { - if (CheckAllocation( - currentFrameIndex, - frameInUseCount, - bufferImageGranularity, - allocSize, - allocAlignment, - allocType, - m_FreeSuballocationsBySize[index], - false, // canMakeOtherLost - &pAllocationRequest->offset, - &pAllocationRequest->itemsToMakeLostCount, - &pAllocationRequest->sumFreeSize, - &pAllocationRequest->sumItemSize)) { + for(; index < freeSuballocCount; ++index) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { pAllocationRequest->item = m_FreeSuballocationsBySize[index]; return true; } } - } else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) { - for (VmaSuballocationList::iterator it = m_Suballocations.begin(); - it != m_Suballocations.end(); - ++it) { - if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( - currentFrameIndex, - frameInUseCount, - bufferImageGranularity, - allocSize, - allocAlignment, - allocType, - it, - false, // canMakeOtherLost - &pAllocationRequest->offset, - &pAllocationRequest->itemsToMakeLostCount, - &pAllocationRequest->sumFreeSize, - &pAllocationRequest->sumItemSize)) { + } + else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) + { + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + it, + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { pAllocationRequest->item = it; return true; } } - } else // WORST_FIT, FIRST_FIT + } + else // WORST_FIT, FIRST_FIT { // Search staring from biggest suballocations. - for (size_t index = freeSuballocCount; index--;) { - if (CheckAllocation( - currentFrameIndex, - frameInUseCount, - bufferImageGranularity, - allocSize, - allocAlignment, - allocType, - m_FreeSuballocationsBySize[index], - false, // canMakeOtherLost - &pAllocationRequest->offset, - &pAllocationRequest->itemsToMakeLostCount, - &pAllocationRequest->sumFreeSize, - &pAllocationRequest->sumItemSize)) { + for(size_t index = freeSuballocCount; index--; ) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { pAllocationRequest->item = m_FreeSuballocationsBySize[index]; return true; } @@ -7609,36 +8638,42 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( } } - if (canMakeOtherLost) { + if(canMakeOtherLost) + { // Brute-force algorithm. TODO: Come up with something better. bool found = false; VmaAllocationRequest tmpAllocRequest = {}; tmpAllocRequest.type = VmaAllocationRequestType::Normal; - for (VmaSuballocationList::iterator suballocIt = m_Suballocations.begin(); - suballocIt != m_Suballocations.end(); - ++suballocIt) { - if (suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE || - suballocIt->hAllocation->CanBecomeLost()) { - if (CheckAllocation( - currentFrameIndex, - frameInUseCount, - bufferImageGranularity, - allocSize, - allocAlignment, - allocType, - suballocIt, - canMakeOtherLost, - &tmpAllocRequest.offset, - &tmpAllocRequest.itemsToMakeLostCount, - &tmpAllocRequest.sumFreeSize, - &tmpAllocRequest.sumItemSize)) { - if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) { + for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin(); + suballocIt != m_Suballocations.end(); + ++suballocIt) + { + if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE || + suballocIt->hAllocation->CanBecomeLost()) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + suballocIt, + canMakeOtherLost, + &tmpAllocRequest.offset, + &tmpAllocRequest.itemsToMakeLostCount, + &tmpAllocRequest.sumFreeSize, + &tmpAllocRequest.sumItemSize)) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { *pAllocationRequest = tmpAllocRequest; pAllocationRequest->item = suballocIt; break; } - if (!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost()) { + if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost()) + { *pAllocationRequest = tmpAllocRequest; pAllocationRequest->item = suballocIt; found = true; @@ -7654,22 +8689,28 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( } bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VmaAllocationRequest *pAllocationRequest) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal); - while (pAllocationRequest->itemsToMakeLostCount > 0) { - if (pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE) { + while(pAllocationRequest->itemsToMakeLostCount > 0) + { + if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE) + { ++pAllocationRequest->item; } VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE); VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost()); - if (pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) { + if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item); --pAllocationRequest->itemsToMakeLostCount; - } else { + } + else + { return false; } } @@ -7677,18 +8718,21 @@ bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost( VMA_HEAVY_ASSERT(Validate()); VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE); - + return true; } -uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) { +uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ uint32_t lostAllocationCount = 0; - for (VmaSuballocationList::iterator it = m_Suballocations.begin(); - it != m_Suballocations.end(); - ++it) { - if (it->type != VMA_SUBALLOCATION_TYPE_FREE && - it->hAllocation->CanBecomeLost() && - it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) { + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE && + it->hAllocation->CanBecomeLost() && + it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { it = FreeSuballocation(it); ++lostAllocationCount; } @@ -7696,16 +8740,21 @@ uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameInde return lostAllocationCount; } -VkResult VmaBlockMetadata_Generic::CheckCorruption(const void *pBlockData) { - for (VmaSuballocationList::iterator it = m_Suballocations.begin(); - it != m_Suballocations.end(); - ++it) { - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) { - if (!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN)) { +VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) +{ + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); return VK_ERROR_VALIDATION_FAILED_EXT; } - if (!VmaValidateMagicValue(pBlockData, it->offset + it->size)) { + if(!VmaValidateMagicValue(pBlockData, it->offset + it->size)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); return VK_ERROR_VALIDATION_FAILED_EXT; } @@ -7716,13 +8765,14 @@ VkResult VmaBlockMetadata_Generic::CheckCorruption(const void *pBlockData) { } void VmaBlockMetadata_Generic::Alloc( - const VmaAllocationRequest &request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation) { + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); VMA_ASSERT(request.item != m_Suballocations.end()); - VmaSuballocation &suballoc = *request.item; + VmaSuballocation& suballoc = *request.item; // Given suballocation is a free block. VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); // Given offset is inside this suballocation. @@ -7741,7 +8791,8 @@ void VmaBlockMetadata_Generic::Alloc( suballoc.hAllocation = hAllocation; // If there are any free bytes remaining at the end, insert new free suballocation after current one. - if (paddingEnd) { + if(paddingEnd) + { VmaSuballocation paddingSuballoc = {}; paddingSuballoc.offset = request.offset + allocSize; paddingSuballoc.size = paddingEnd; @@ -7749,38 +8800,44 @@ void VmaBlockMetadata_Generic::Alloc( VmaSuballocationList::iterator next = request.item; ++next; const VmaSuballocationList::iterator paddingEndItem = - m_Suballocations.insert(next, paddingSuballoc); + m_Suballocations.insert(next, paddingSuballoc); RegisterFreeSuballocation(paddingEndItem); } // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. - if (paddingBegin) { + if(paddingBegin) + { VmaSuballocation paddingSuballoc = {}; paddingSuballoc.offset = request.offset - paddingBegin; paddingSuballoc.size = paddingBegin; paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; const VmaSuballocationList::iterator paddingBeginItem = - m_Suballocations.insert(request.item, paddingSuballoc); + m_Suballocations.insert(request.item, paddingSuballoc); RegisterFreeSuballocation(paddingBeginItem); } // Update totals. m_FreeCount = m_FreeCount - 1; - if (paddingBegin > 0) { + if(paddingBegin > 0) + { ++m_FreeCount; } - if (paddingEnd > 0) { + if(paddingEnd > 0) + { ++m_FreeCount; } m_SumFreeSize -= allocSize; } -void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation) { - for (VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); - suballocItem != m_Suballocations.end(); - ++suballocItem) { - VmaSuballocation &suballoc = *suballocItem; - if (suballoc.hAllocation == allocation) { +void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.hAllocation == allocation) + { FreeSuballocation(suballocItem); VMA_HEAVY_ASSERT(Validate()); return; @@ -7789,12 +8846,15 @@ void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation) { VMA_ASSERT(0 && "Not found!"); } -void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) { - for (VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); - suballocItem != m_Suballocations.end(); - ++suballocItem) { - VmaSuballocation &suballoc = *suballocItem; - if (suballoc.offset == offset) { +void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.offset == offset) + { FreeSuballocation(suballocItem); return; } @@ -7802,120 +8862,11 @@ void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) { VMA_ASSERT(0 && "Not found!"); } -bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { - typedef VmaSuballocationList::iterator iter_type; - for (iter_type suballocItem = m_Suballocations.begin(); - suballocItem != m_Suballocations.end(); - ++suballocItem) { - VmaSuballocation &suballoc = *suballocItem; - if (suballoc.hAllocation == alloc) { - iter_type nextItem = suballocItem; - ++nextItem; - - // Should have been ensured on higher level. - VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0); - - // Shrinking. - if (newSize < alloc->GetSize()) { - const VkDeviceSize sizeDiff = suballoc.size - newSize; - - // There is next item. - if (nextItem != m_Suballocations.end()) { - // Next item is free. - if (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) { - // Grow this next item backward. - UnregisterFreeSuballocation(nextItem); - nextItem->offset -= sizeDiff; - nextItem->size += sizeDiff; - RegisterFreeSuballocation(nextItem); - } - // Next item is not free. - else { - // Create free item after current one. - VmaSuballocation newFreeSuballoc; - newFreeSuballoc.hAllocation = VK_NULL_HANDLE; - newFreeSuballoc.offset = suballoc.offset + newSize; - newFreeSuballoc.size = sizeDiff; - newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc); - RegisterFreeSuballocation(newFreeSuballocIt); - - ++m_FreeCount; - } - } - // This is the last item. - else { - // Create free item at the end. - VmaSuballocation newFreeSuballoc; - newFreeSuballoc.hAllocation = VK_NULL_HANDLE; - newFreeSuballoc.offset = suballoc.offset + newSize; - newFreeSuballoc.size = sizeDiff; - newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - m_Suballocations.push_back(newFreeSuballoc); - - iter_type newFreeSuballocIt = m_Suballocations.end(); - RegisterFreeSuballocation(--newFreeSuballocIt); - - ++m_FreeCount; - } - - suballoc.size = newSize; - m_SumFreeSize += sizeDiff; - } - // Growing. - else { - const VkDeviceSize sizeDiff = newSize - suballoc.size; - - // There is next item. - if (nextItem != m_Suballocations.end()) { - // Next item is free. - if (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) { - // There is not enough free space, including margin. - if (nextItem->size < sizeDiff + VMA_DEBUG_MARGIN) { - return false; - } - - // There is more free space than required. - if (nextItem->size > sizeDiff) { - // Move and shrink this next item. - UnregisterFreeSuballocation(nextItem); - nextItem->offset += sizeDiff; - nextItem->size -= sizeDiff; - RegisterFreeSuballocation(nextItem); - } - // There is exactly the amount of free space required. - else { - // Remove this next free item. - UnregisterFreeSuballocation(nextItem); - m_Suballocations.erase(nextItem); - --m_FreeCount; - } - } - // Next item is not free - there is no space to grow. - else { - return false; - } - } - // This is the last item - there is no space to grow. - else { - return false; - } - - suballoc.size = newSize; - m_SumFreeSize -= sizeDiff; - } - - // We cannot call Validate() here because alloc object is updated to new size outside of this call. - return true; - } - } - VMA_ASSERT(0 && "Not found!"); - return false; -} - -bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const { +bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const +{ VkDeviceSize lastSize = 0; - for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) { + for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) + { const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); @@ -7927,84 +8878,101 @@ bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const { } bool VmaBlockMetadata_Generic::CheckAllocation( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaSuballocationList::const_iterator suballocItem, - bool canMakeOtherLost, - VkDeviceSize *pOffset, - size_t *itemsToMakeLostCount, - VkDeviceSize *pSumFreeSize, - VkDeviceSize *pSumItemSize) const { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const +{ VMA_ASSERT(allocSize > 0); VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(suballocItem != m_Suballocations.cend()); VMA_ASSERT(pOffset != VMA_NULL); - + *itemsToMakeLostCount = 0; *pSumFreeSize = 0; *pSumItemSize = 0; - if (canMakeOtherLost) { - if (suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) { + if(canMakeOtherLost) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { *pSumFreeSize = suballocItem->size; - } else { - if (suballocItem->hAllocation->CanBecomeLost() && - suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) { + } + else + { + if(suballocItem->hAllocation->CanBecomeLost() && + suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { ++*itemsToMakeLostCount; *pSumItemSize = suballocItem->size; - } else { + } + else + { return false; } } // Remaining size is too small for this request: Early return. - if (GetSize() - suballocItem->offset < allocSize) { + if(GetSize() - suballocItem->offset < allocSize) + { return false; } // Start from offset equal to beginning of this suballocation. *pOffset = suballocItem->offset; - + // Apply VMA_DEBUG_MARGIN at the beginning. - if (VMA_DEBUG_MARGIN > 0) { + if(VMA_DEBUG_MARGIN > 0) + { *pOffset += VMA_DEBUG_MARGIN; } - + // Apply alignment. *pOffset = VmaAlignUp(*pOffset, allocAlignment); // Check previous suballocations for BufferImageGranularity conflicts. // Make bigger alignment if necessary. - if (bufferImageGranularity > 1) { + if(bufferImageGranularity > 1) + { bool bufferImageGranularityConflict = false; VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; - while (prevSuballocItem != m_Suballocations.cbegin()) { + while(prevSuballocItem != m_Suballocations.cbegin()) + { --prevSuballocItem; - const VmaSuballocation &prevSuballoc = *prevSuballocItem; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) { + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { bufferImageGranularityConflict = true; break; } - } else + } + else // Already on previous page. break; } - if (bufferImageGranularityConflict) { + if(bufferImageGranularityConflict) + { *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); } } - + // Now that we have final *pOffset, check if we are past suballocItem. // If yes, return false - this function should be called for another suballocItem as starting point. - if (*pOffset >= suballocItem->offset + suballocItem->size) { + if(*pOffset >= suballocItem->offset + suballocItem->size) + { return false; } - + // Calculate padding at the beginning based on current offset. const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset; @@ -8013,106 +8981,134 @@ bool VmaBlockMetadata_Generic::CheckAllocation( const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin; // Another early return check. - if (suballocItem->offset + totalSize > GetSize()) { + if(suballocItem->offset + totalSize > GetSize()) + { return false; } // Advance lastSuballocItem until desired size is reached. // Update itemsToMakeLostCount. VmaSuballocationList::const_iterator lastSuballocItem = suballocItem; - if (totalSize > suballocItem->size) { + if(totalSize > suballocItem->size) + { VkDeviceSize remainingSize = totalSize - suballocItem->size; - while (remainingSize > 0) { + while(remainingSize > 0) + { ++lastSuballocItem; - if (lastSuballocItem == m_Suballocations.cend()) { + if(lastSuballocItem == m_Suballocations.cend()) + { return false; } - if (lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) { + if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { *pSumFreeSize += lastSuballocItem->size; - } else { + } + else + { VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE); - if (lastSuballocItem->hAllocation->CanBecomeLost() && - lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) { + if(lastSuballocItem->hAllocation->CanBecomeLost() && + lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { ++*itemsToMakeLostCount; *pSumItemSize += lastSuballocItem->size; - } else { + } + else + { return false; } } remainingSize = (lastSuballocItem->size < remainingSize) ? - remainingSize - lastSuballocItem->size : - 0; + remainingSize - lastSuballocItem->size : 0; } } // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, we must mark more allocations lost or fail. - if (bufferImageGranularity > 1) { + if(bufferImageGranularity > 1) + { VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem; ++nextSuballocItem; - while (nextSuballocItem != m_Suballocations.cend()) { - const VmaSuballocation &nextSuballoc = *nextSuballocItem; - if (VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) { + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE); - if (nextSuballoc.hAllocation->CanBecomeLost() && - nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) { + if(nextSuballoc.hAllocation->CanBecomeLost() && + nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { ++*itemsToMakeLostCount; - } else { + } + else + { return false; } } - } else { + } + else + { // Already on next page. break; } ++nextSuballocItem; } } - } else { - const VmaSuballocation &suballoc = *suballocItem; + } + else + { + const VmaSuballocation& suballoc = *suballocItem; VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); *pSumFreeSize = suballoc.size; // Size of this suballocation is too small for this request: Early return. - if (suballoc.size < allocSize) { + if(suballoc.size < allocSize) + { return false; } // Start from offset equal to beginning of this suballocation. *pOffset = suballoc.offset; - + // Apply VMA_DEBUG_MARGIN at the beginning. - if (VMA_DEBUG_MARGIN > 0) { + if(VMA_DEBUG_MARGIN > 0) + { *pOffset += VMA_DEBUG_MARGIN; } - + // Apply alignment. *pOffset = VmaAlignUp(*pOffset, allocAlignment); - + // Check previous suballocations for BufferImageGranularity conflicts. // Make bigger alignment if necessary. - if (bufferImageGranularity > 1) { + if(bufferImageGranularity > 1) + { bool bufferImageGranularityConflict = false; VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; - while (prevSuballocItem != m_Suballocations.cbegin()) { + while(prevSuballocItem != m_Suballocations.cbegin()) + { --prevSuballocItem; - const VmaSuballocation &prevSuballoc = *prevSuballocItem; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) { + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { bufferImageGranularityConflict = true; break; } - } else + } + else // Already on previous page. break; } - if (bufferImageGranularityConflict) { + if(bufferImageGranularityConflict) + { *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); } } - + // Calculate padding at the beginning based on current offset. const VkDeviceSize paddingBegin = *pOffset - suballoc.offset; @@ -8120,22 +9116,29 @@ bool VmaBlockMetadata_Generic::CheckAllocation( const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN; // Fail if requested size plus margin before and after is bigger than size of this suballocation. - if (paddingBegin + allocSize + requiredEndMargin > suballoc.size) { + if(paddingBegin + allocSize + requiredEndMargin > suballoc.size) + { return false; } // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. - if (bufferImageGranularity > 1) { + if(bufferImageGranularity > 1) + { VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; ++nextSuballocItem; - while (nextSuballocItem != m_Suballocations.cend()) { - const VmaSuballocation &nextSuballoc = *nextSuballocItem; - if (VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) { + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { return false; } - } else { + } + else + { // Already on next page. break; } @@ -8148,10 +9151,11 @@ bool VmaBlockMetadata_Generic::CheckAllocation( return true; } -void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) { +void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) +{ VMA_ASSERT(item != m_Suballocations.end()); VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - + VmaSuballocationList::iterator nextItem = item; ++nextItem; VMA_ASSERT(nextItem != m_Suballocations.end()); @@ -8162,12 +9166,13 @@ void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator m_Suballocations.erase(nextItem); } -VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) { +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) +{ // Change this suballocation to be marked as free. - VmaSuballocation &suballoc = *suballocItem; + VmaSuballocation& suballoc = *suballocItem; suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; suballoc.hAllocation = VK_NULL_HANDLE; - + // Update totals. ++m_FreeCount; m_SumFreeSize += suballoc.size; @@ -8175,38 +9180,46 @@ VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSu // Merge with previous and/or next suballocation if it's also free. bool mergeWithNext = false; bool mergeWithPrev = false; - + VmaSuballocationList::iterator nextItem = suballocItem; ++nextItem; - if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) { + if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) + { mergeWithNext = true; } VmaSuballocationList::iterator prevItem = suballocItem; - if (suballocItem != m_Suballocations.begin()) { + if(suballocItem != m_Suballocations.begin()) + { --prevItem; - if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) { + if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { mergeWithPrev = true; } } - if (mergeWithNext) { + if(mergeWithNext) + { UnregisterFreeSuballocation(nextItem); MergeFreeWithNext(suballocItem); } - if (mergeWithPrev) { + if(mergeWithPrev) + { UnregisterFreeSuballocation(prevItem); MergeFreeWithNext(prevItem); RegisterFreeSuballocation(prevItem); return prevItem; - } else { + } + else + { RegisterFreeSuballocation(suballocItem); return suballocItem; } } -void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) { +void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) +{ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(item->size > 0); @@ -8214,10 +9227,14 @@ void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::i // this function, depending on what do you want to check. VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); - if (item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) { - if (m_FreeSuballocationsBySize.empty()) { + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + if(m_FreeSuballocationsBySize.empty()) + { m_FreeSuballocationsBySize.push_back(item); - } else { + } + else + { VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item); } } @@ -8225,7 +9242,9 @@ void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::i //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); } -void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) { + +void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) +{ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(item->size > 0); @@ -8233,16 +9252,19 @@ void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList: // this function, depending on what do you want to check. VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); - if (item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) { - VmaSuballocationList::iterator *const it = VmaBinaryFindFirstNotLess( - m_FreeSuballocationsBySize.data(), - m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), - item, - VmaSuballocationItemSizeLess()); - for (size_t index = it - m_FreeSuballocationsBySize.data(); - index < m_FreeSuballocationsBySize.size(); - ++index) { - if (m_FreeSuballocationsBySize[index] == item) { + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), + item, + VmaSuballocationItemSizeLess()); + for(size_t index = it - m_FreeSuballocationsBySize.data(); + index < m_FreeSuballocationsBySize.size(); + ++index) + { + if(m_FreeSuballocationsBySize[index] == item) + { VmaVectorRemove(m_FreeSuballocationsBySize, index); return; } @@ -8255,21 +9277,26 @@ void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList: } bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible( - VkDeviceSize bufferImageGranularity, - VmaSuballocationType &inOutPrevSuballocType) const { - if (bufferImageGranularity == 1 || IsEmpty()) { + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const +{ + if(bufferImageGranularity == 1 || IsEmpty()) + { return false; } VkDeviceSize minAlignment = VK_WHOLE_SIZE; bool typeConflictFound = false; - for (VmaSuballocationList::const_iterator it = m_Suballocations.cbegin(); - it != m_Suballocations.cend(); - ++it) { + for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin(); + it != m_Suballocations.cend(); + ++it) + { const VmaSuballocationType suballocType = it->type; - if (suballocType != VMA_SUBALLOCATION_TYPE_FREE) { + if(suballocType != VMA_SUBALLOCATION_TYPE_FREE) + { minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment()); - if (VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType)) { + if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType)) + { typeConflictFound = true; } inOutPrevSuballocType = suballocType; @@ -8283,41 +9310,47 @@ bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible( // class VmaBlockMetadata_Linear VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) : - VmaBlockMetadata(hAllocator), - m_SumFreeSize(0), - m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())), - m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())), - m_1stVectorIndex(0), - m_2ndVectorMode(SECOND_VECTOR_EMPTY), - m_1stNullItemsBeginCount(0), - m_1stNullItemsMiddleCount(0), - m_2ndNullItemsCount(0) { + VmaBlockMetadata(hAllocator), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())), + m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) +{ } -VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear() { +VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear() +{ } -void VmaBlockMetadata_Linear::Init(VkDeviceSize size) { +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ VmaBlockMetadata::Init(size); m_SumFreeSize = size; } -bool VmaBlockMetadata_Linear::Validate() const { - const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); VMA_VALIDATE(!suballocations1st.empty() || - suballocations2nd.empty() || - m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); - if (!suballocations1st.empty()) { + if(!suballocations1st.empty()) + { // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE); // Null item at the end should be just pop_back(). VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE); } - if (!suballocations2nd.empty()) { + if(!suballocations2nd.empty()) + { // Null item at the end should be just pop_back(). VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE); } @@ -8329,21 +9362,26 @@ bool VmaBlockMetadata_Linear::Validate() const { const size_t suballoc1stCount = suballocations1st.size(); VkDeviceSize offset = VMA_DEBUG_MARGIN; - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { const size_t suballoc2ndCount = suballocations2nd.size(); size_t nullItem2ndCount = 0; - for (size_t i = 0; i < suballoc2ndCount; ++i) { - const VmaSuballocation &suballoc = suballocations2nd[i]; + for(size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); VMA_VALIDATE(suballoc.offset >= offset); - if (!currFree) { + if(!currFree) + { VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); sumUsedSize += suballoc.size; - } else { + } + else + { ++nullItem2ndCount; } @@ -8353,27 +9391,32 @@ bool VmaBlockMetadata_Linear::Validate() const { VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); } - for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) { - const VmaSuballocation &suballoc = suballocations1st[i]; + for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && - suballoc.hAllocation == VK_NULL_HANDLE); + suballoc.hAllocation == VK_NULL_HANDLE); } size_t nullItem1stCount = m_1stNullItemsBeginCount; - for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) { - const VmaSuballocation &suballoc = suballocations1st[i]; + for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); VMA_VALIDATE(suballoc.offset >= offset); VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); - if (!currFree) { + if(!currFree) + { VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); sumUsedSize += suballoc.size; - } else { + } + else + { ++nullItem1stCount; } @@ -8381,21 +9424,26 @@ bool VmaBlockMetadata_Linear::Validate() const { } VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { const size_t suballoc2ndCount = suballocations2nd.size(); size_t nullItem2ndCount = 0; - for (size_t i = suballoc2ndCount; i--;) { - const VmaSuballocation &suballoc = suballocations2nd[i]; + for(size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); VMA_VALIDATE(suballoc.offset >= offset); - if (!currFree) { + if(!currFree) + { VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); sumUsedSize += suballoc.size; - } else { + } + else + { ++nullItem2ndCount; } @@ -8411,76 +9459,81 @@ bool VmaBlockMetadata_Linear::Validate() const { return true; } -size_t VmaBlockMetadata_Linear::GetAllocationCount() const { +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) + - AccessSuballocations2nd().size() - m_2ndNullItemsCount; + AccessSuballocations2nd().size() - m_2ndNullItemsCount; } -VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const { +VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const +{ const VkDeviceSize size = GetSize(); /* - We don't consider gaps inside allocation vectors with freed allocations because - they are not suitable for reuse in linear allocator. We consider only space that - is available for new allocations. - */ - if (IsEmpty()) { + We don't consider gaps inside allocation vectors with freed allocations because + they are not suitable for reuse in linear allocator. We consider only space that + is available for new allocations. + */ + if(IsEmpty()) + { return size; } + + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - - switch (m_2ndVectorMode) { - case SECOND_VECTOR_EMPTY: - /* - Available space is after end of 1st, as well as before beginning of 1st (which - whould make it a ring buffer). - */ - { - const size_t suballocations1stCount = suballocations1st.size(); - VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount); - const VmaSuballocation &firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; - const VmaSuballocation &lastSuballoc = suballocations1st[suballocations1stCount - 1]; - return VMA_MAX( - firstSuballoc.offset, - size - (lastSuballoc.offset + lastSuballoc.size)); - } - break; + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + /* + Available space is after end of 1st, as well as before beginning of 1st (which + whould make it a ring buffer). + */ + { + const size_t suballocations1stCount = suballocations1st.size(); + VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount); + const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1]; + return VMA_MAX( + firstSuballoc.offset, + size - (lastSuballoc.offset + lastSuballoc.size)); + } + break; - case SECOND_VECTOR_RING_BUFFER: - /* - Available space is only between end of 2nd and beginning of 1st. - */ - { - const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); - const VmaSuballocation &lastSuballoc2nd = suballocations2nd.back(); - const VmaSuballocation &firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount]; - return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size); - } - break; + case SECOND_VECTOR_RING_BUFFER: + /* + Available space is only between end of 2nd and beginning of 1st. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount]; + return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size); + } + break; - case SECOND_VECTOR_DOUBLE_STACK: - /* - Available space is only between end of 1st and top of 2nd. - */ - { - const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); - const VmaSuballocation &topSuballoc2nd = suballocations2nd.back(); - const VmaSuballocation &lastSuballoc1st = suballocations1st.back(); - return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size); - } - break; + case SECOND_VECTOR_DOUBLE_STACK: + /* + Available space is only between end of 1st and top of 2nd. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& topSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& lastSuballoc1st = suballocations1st.back(); + return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size); + } + break; - default: - VMA_ASSERT(0); - return 0; + default: + VMA_ASSERT(0); + return 0; } } -void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const { +void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ const VkDeviceSize size = GetSize(); - const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); @@ -8495,22 +9548,27 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const VkDeviceSize lastOffset = 0; - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) { + while(lastOffset < freeSpace2ndTo1stEnd) + { // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; ++outInfo.unusedRangeCount; @@ -8518,27 +9576,29 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. outInfo.usedBytes += suballoc.size; outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. - else { + else + { // There is free space from lastOffset to freeSpace2ndTo1stEnd. - if (lastOffset < freeSpace2ndTo1stEnd) { + if(lastOffset < freeSpace2ndTo1stEnd) + { const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; ++outInfo.unusedRangeCount; outInfo.unusedBytes += unusedRangeSize; outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); - } + } // End of loop. lastOffset = freeSpace2ndTo1stEnd; @@ -8548,20 +9608,24 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) { + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc1stIndex; } // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; - + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; ++outInfo.unusedRangeCount; @@ -8569,48 +9633,55 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. outInfo.usedBytes += suballoc.size; outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. - else { + else + { // There is free space from lastOffset to freeSpace1stTo2ndEnd. - if (lastOffset < freeSpace1stTo2ndEnd) { + if(lastOffset < freeSpace1stTo2ndEnd) + { const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; ++outInfo.unusedRangeCount; outInfo.unusedBytes += unusedRangeSize; outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); - } + } // End of loop. lastOffset = freeSpace1stTo2ndEnd; } } - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) { + while(lastOffset < size) + { // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { --nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; ++outInfo.unusedRangeCount; @@ -8618,27 +9689,29 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. outInfo.usedBytes += suballoc.size; outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. - else { + else + { // There is free space from lastOffset to size. - if (lastOffset < size) { + if(lastOffset < size) + { const VkDeviceSize unusedRangeSize = size - lastOffset; ++outInfo.unusedRangeCount; outInfo.unusedBytes += unusedRangeSize; outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); - } + } // End of loop. lastOffset = size; @@ -8649,9 +9722,10 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo &outInfo) const outInfo.unusedBytes = size - outInfo.usedBytes; } -void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats &inoutStats) const { - const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); +void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); const VkDeviceSize size = GetSize(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); @@ -8660,40 +9734,47 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats &inoutStats) const { VkDeviceSize lastOffset = 0; - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; - while (lastOffset < freeSpace2ndTo1stEnd) { + while(lastOffset < freeSpace2ndTo1stEnd) + { // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; inoutStats.unusedSize += unusedRangeSize; ++inoutStats.unusedRangeCount; inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++inoutStats.allocationCount; - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. - else { - if (lastOffset < freeSpace2ndTo1stEnd) { + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { // There is free space from lastOffset to freeSpace2ndTo1stEnd. const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; inoutStats.unusedSize += unusedRangeSize; @@ -8709,38 +9790,44 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats &inoutStats) const { size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) { + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc1stIndex; } // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; - + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; inoutStats.unusedSize += unusedRangeSize; ++inoutStats.unusedRangeCount; inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++inoutStats.allocationCount; - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. - else { - if (lastOffset < freeSpace1stTo2ndEnd) { + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { // There is free space from lastOffset to freeSpace1stTo2ndEnd. const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; inoutStats.unusedSize += unusedRangeSize; @@ -8753,39 +9840,46 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats &inoutStats) const { } } - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) { + while(lastOffset < size) + { // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { --nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; inoutStats.unusedSize += unusedRangeSize; ++inoutStats.unusedRangeCount; inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++inoutStats.allocationCount; - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. - else { - if (lastOffset < size) { + else + { + if(lastOffset < size) + { // There is free space from lastOffset to size. const VkDeviceSize unusedRangeSize = size - lastOffset; inoutStats.unusedSize += unusedRangeSize; @@ -8801,10 +9895,11 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats &inoutStats) const { } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const { +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ const VkDeviceSize size = GetSize(); - const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); @@ -8816,38 +9911,45 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const VkDeviceSize lastOffset = 0; size_t alloc2ndCount = 0; - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) { + while(lastOffset < freeSpace2ndTo1stEnd) + { // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. ++unusedRangeCount; } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++alloc2ndCount; usedBytes += suballoc.size; - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. - else { - if (lastOffset < freeSpace2ndTo1stEnd) { + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { // There is free space from lastOffset to freeSpace2ndTo1stEnd. ++unusedRangeCount; } @@ -8861,36 +9963,42 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; size_t alloc1stCount = 0; const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) { + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc1stIndex; } // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; - + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. ++unusedRangeCount; } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++alloc1stCount; usedBytes += suballoc.size; - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. - else { - if (lastOffset < size) { + else + { + if(lastOffset < size) + { // There is free space from lastOffset to freeSpace1stTo2ndEnd. ++unusedRangeCount; } @@ -8900,37 +10008,44 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const } } - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) { + while(lastOffset < size) + { // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { --nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. ++unusedRangeCount; } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. ++alloc2ndCount; usedBytes += suballoc.size; - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. - else { - if (lastOffset < size) { + else + { + if(lastOffset < size) + { // There is free space from lastOffset to size. ++unusedRangeCount; } @@ -8947,38 +10062,45 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const // SECOND PASS lastOffset = 0; - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) { + while(lastOffset < freeSpace2ndTo1stEnd) + { // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc2ndIndex; } // We are at the end. - else { - if (lastOffset < freeSpace2ndTo1stEnd) { + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { // There is free space from lastOffset to freeSpace2ndTo1stEnd. const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); @@ -8991,35 +10113,41 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const } nextAlloc1stIndex = m_1stNullItemsBeginCount; - while (lastOffset < freeSpace1stTo2ndEnd) { + while(lastOffset < freeSpace1stTo2ndEnd) + { // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { ++nextAlloc1stIndex; } // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; - + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; ++nextAlloc1stIndex; } // We are at the end. - else { - if (lastOffset < freeSpace1stTo2ndEnd) { + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { // There is free space from lastOffset to freeSpace1stTo2ndEnd. const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); @@ -9030,37 +10158,44 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const } } - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) { + while(lastOffset < size) + { // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) { + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { --nextAlloc2ndIndex; } // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; - + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) { + if(lastOffset < suballoc.offset) + { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); } - + // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); - + // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; --nextAlloc2ndIndex; } // We are at the end. - else { - if (lastOffset < size) { + else + { + if(lastOffset < size) + { // There is free space from lastOffset to size. const VkDeviceSize unusedRangeSize = size - lastOffset; PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); @@ -9077,57 +10212,63 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const #endif // #if VMA_STATS_STRING_ENABLED bool VmaBlockMetadata_Linear::CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ VMA_ASSERT(allocSize > 0); VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(pAllocationRequest != VMA_NULL); VMA_HEAVY_ASSERT(Validate()); return upperAddress ? - CreateAllocationRequest_UpperAddress( - currentFrameIndex, frameInUseCount, bufferImageGranularity, - allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) : - CreateAllocationRequest_LowerAddress( - currentFrameIndex, frameInUseCount, bufferImageGranularity, - allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest); + CreateAllocationRequest_UpperAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest); } bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ const VkDeviceSize size = GetSize(); - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); return false; } // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). - if (allocSize > size) { + if(allocSize > size) + { return false; } VkDeviceSize resultBaseOffset = size - allocSize; - if (!suballocations2nd.empty()) { - const VmaSuballocation &lastSuballoc = suballocations2nd.back(); + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); resultBaseOffset = lastSuballoc.offset - allocSize; - if (allocSize > lastSuballoc.offset) { + if(allocSize > lastSuballoc.offset) + { return false; } } @@ -9136,8 +10277,10 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( VkDeviceSize resultOffset = resultBaseOffset; // Apply VMA_DEBUG_MARGIN at the end. - if (VMA_DEBUG_MARGIN > 0) { - if (resultOffset < VMA_DEBUG_MARGIN) { + if(VMA_DEBUG_MARGIN > 0) + { + if(resultOffset < VMA_DEBUG_MARGIN) + { return false; } resultOffset -= VMA_DEBUG_MARGIN; @@ -9148,39 +10291,52 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( // Check next suballocations from 2nd for BufferImageGranularity conflicts. // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && !suballocations2nd.empty()) { + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { bool bufferImageGranularityConflict = false; - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--;) { - const VmaSuballocation &nextSuballoc = suballocations2nd[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) { + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { bufferImageGranularityConflict = true; break; } - } else + } + else // Already on previous page. break; } - if (bufferImageGranularityConflict) { + if(bufferImageGranularityConflict) + { resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); } } // There is enough free space. const VkDeviceSize endOf1st = !suballocations1st.empty() ? - suballocations1st.back().offset + suballocations1st.back().size : - 0; - if (endOf1st + VMA_DEBUG_MARGIN <= resultOffset) { + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset) + { // Check previous suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. - if (bufferImageGranularity > 1) { - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--;) { - const VmaSuballocation &prevSuballoc = suballocations1st[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) { + if(bufferImageGranularity > 1) + { + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { return false; } - } else { + } + else + { // Already on next page. break; } @@ -9201,25 +10357,28 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( } bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ const VkDeviceSize size = GetSize(); - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { // Try to allocate at the end of 1st vector. VkDeviceSize resultBaseOffset = 0; - if (!suballocations1st.empty()) { - const VmaSuballocation &lastSuballoc = suballocations1st.back(); + if(!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; } @@ -9227,7 +10386,8 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( VkDeviceSize resultOffset = resultBaseOffset; // Apply VMA_DEBUG_MARGIN at the beginning. - if (VMA_DEBUG_MARGIN > 0) { + if(VMA_DEBUG_MARGIN > 0) + { resultOffset += VMA_DEBUG_MARGIN; } @@ -9236,40 +10396,52 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( // Check previous suballocations for BufferImageGranularity conflicts. // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && !suballocations1st.empty()) { + if(bufferImageGranularity > 1 && !suballocations1st.empty()) + { bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--;) { - const VmaSuballocation &prevSuballoc = suballocations1st[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) { + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { bufferImageGranularityConflict = true; break; } - } else + } + else // Already on previous page. break; } - if (bufferImageGranularityConflict) { + if(bufferImageGranularityConflict) + { resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); } } const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? - suballocations2nd.back().offset : - size; + suballocations2nd.back().offset : size; // There is enough free space at the end after alignment. - if (resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd) { + if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd) + { // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. - if (bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--;) { - const VmaSuballocation &nextSuballoc = suballocations2nd[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) { + if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { return false; } - } else { + } + else + { // Already on previous page. break; } @@ -9289,12 +10461,14 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( // Wrap-around to end of 2nd vector. Try to allocate there, watching for the // beginning of 1st vector as the end of free space. - if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { VMA_ASSERT(!suballocations1st.empty()); VkDeviceSize resultBaseOffset = 0; - if (!suballocations2nd.empty()) { - const VmaSuballocation &lastSuballoc = suballocations2nd.back(); + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; } @@ -9302,7 +10476,8 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( VkDeviceSize resultOffset = resultBaseOffset; // Apply VMA_DEBUG_MARGIN at the beginning. - if (VMA_DEBUG_MARGIN > 0) { + if(VMA_DEBUG_MARGIN > 0) + { resultOffset += VMA_DEBUG_MARGIN; } @@ -9311,20 +10486,26 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( // Check previous suballocations for BufferImageGranularity conflicts. // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && !suballocations2nd.empty()) { + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--;) { - const VmaSuballocation &prevSuballoc = suballocations2nd[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) { + for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { bufferImageGranularityConflict = true; break; } - } else + } + else // Already on previous page. break; } - if (bufferImageGranularityConflict) { + if(bufferImageGranularityConflict) + { resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); } } @@ -9333,20 +10514,28 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( pAllocationRequest->sumItemSize = 0; size_t index1st = m_1stNullItemsBeginCount; - if (canMakeOtherLost) { - while (index1st < suballocations1st.size() && - resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset) { + if(canMakeOtherLost) + { + while(index1st < suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset) + { // Next colliding allocation at the beginning of 1st vector found. Try to make it lost. - const VmaSuballocation &suballoc = suballocations1st[index1st]; - if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) { + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) + { // No problem. - } else { + } + else + { VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); - if (suballoc.hAllocation->CanBecomeLost() && - suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) { + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { ++pAllocationRequest->itemsToMakeLostCount; pAllocationRequest->sumItemSize += suballoc.size; - } else { + } + else + { return false; } } @@ -9355,21 +10544,30 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, we must mark more allocations lost or fail. - if (bufferImageGranularity > 1) { - while (index1st < suballocations1st.size()) { - const VmaSuballocation &suballoc = suballocations1st[index1st]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity)) { - if (suballoc.hAllocation != VK_NULL_HANDLE) { + if(bufferImageGranularity > 1) + { + while(index1st < suballocations1st.size()) + { + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity)) + { + if(suballoc.hAllocation != VK_NULL_HANDLE) + { // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type). - if (suballoc.hAllocation->CanBecomeLost() && - suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) { + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { ++pAllocationRequest->itemsToMakeLostCount; pAllocationRequest->sumItemSize += suballoc.size; - } else { + } + else + { return false; } } - } else { + } + else + { // Already on next page. break; } @@ -9378,28 +10576,36 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( } // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost. - if (index1st == suballocations1st.size() && - resultOffset + allocSize + VMA_DEBUG_MARGIN > size) { + if(index1st == suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > size) + { // TODO: This is a known bug that it's not yet implemented and the allocation is failing. VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost."); } } // There is enough free space at the end after alignment. - if ((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) || - (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)) { + if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) || + (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)) + { // Check next suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. - if (bufferImageGranularity > 1) { - for (size_t nextSuballocIndex = index1st; - nextSuballocIndex < suballocations1st.size(); - nextSuballocIndex++) { - const VmaSuballocation &nextSuballoc = suballocations1st[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) { + if(bufferImageGranularity > 1) + { + for(size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { return false; } - } else { + } + else + { // Already on next page. break; } @@ -9409,7 +10615,9 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( // All tests passed: Success. pAllocationRequest->offset = resultOffset; pAllocationRequest->sumFreeSize = - (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size) - resultBaseOffset - pAllocationRequest->sumItemSize; + (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size) + - resultBaseOffset + - pAllocationRequest->sumItemSize; pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; // pAllocationRequest->item, customData unused. return true; @@ -9420,45 +10628,57 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( } bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VmaAllocationRequest *pAllocationRequest) { - if (pAllocationRequest->itemsToMakeLostCount == 0) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + if(pAllocationRequest->itemsToMakeLostCount == 0) + { return true; } VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER); - + // We always start from 1st. - SuballocationVectorType *suballocations = &AccessSuballocations1st(); + SuballocationVectorType* suballocations = &AccessSuballocations1st(); size_t index = m_1stNullItemsBeginCount; size_t madeLostCount = 0; - while (madeLostCount < pAllocationRequest->itemsToMakeLostCount) { - if (index == suballocations->size()) { + while(madeLostCount < pAllocationRequest->itemsToMakeLostCount) + { + if(index == suballocations->size()) + { index = 0; // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st. - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { suballocations = &AccessSuballocations2nd(); } // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY: // suballocations continues pointing at AccessSuballocations1st(). VMA_ASSERT(!suballocations->empty()); } - VmaSuballocation &suballoc = (*suballocations)[index]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { + VmaSuballocation& suballoc = (*suballocations)[index]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); VMA_ASSERT(suballoc.hAllocation->CanBecomeLost()); - if (suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) { + if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; suballoc.hAllocation = VK_NULL_HANDLE; m_SumFreeSize += suballoc.size; - if (suballocations == &AccessSuballocations1st()) { + if(suballocations == &AccessSuballocations1st()) + { ++m_1stNullItemsMiddleCount; - } else { + } + else + { ++m_2ndNullItemsCount; } ++madeLostCount; - } else { + } + else + { return false; } } @@ -9467,19 +10687,22 @@ bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost( CleanupAfterFree(); //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree(). - + return true; } -uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) { +uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ uint32_t lostAllocationCount = 0; - - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) { - VmaSuballocation &suballoc = suballocations1st[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && - suballoc.hAllocation->CanBecomeLost() && - suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) { + + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; suballoc.hAllocation = VK_NULL_HANDLE; ++m_1stNullItemsMiddleCount; @@ -9488,12 +10711,14 @@ uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex } } - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); - for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) { - VmaSuballocation &suballoc = suballocations2nd[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && - suballoc.hAllocation->CanBecomeLost() && - suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) { + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; suballoc.hAllocation = VK_NULL_HANDLE; ++m_2ndNullItemsCount; @@ -9502,38 +10727,48 @@ uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex } } - if (lostAllocationCount) { + if(lostAllocationCount) + { CleanupAfterFree(); } return lostAllocationCount; } -VkResult VmaBlockMetadata_Linear::CheckCorruption(const void *pBlockData) { - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) { - const VmaSuballocation &suballoc = suballocations1st[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) { +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); return VK_ERROR_VALIDATION_FAILED_EXT; } - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); return VK_ERROR_VALIDATION_FAILED_EXT; } } } - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); - for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) { - const VmaSuballocation &suballoc = suballocations2nd[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) { + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); return VK_ERROR_VALIDATION_FAILED_EXT; } - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); return VK_ERROR_VALIDATION_FAILED_EXT; } @@ -9544,75 +10779,88 @@ VkResult VmaBlockMetadata_Linear::CheckCorruption(const void *pBlockData) { } void VmaBlockMetadata_Linear::Alloc( - const VmaAllocationRequest &request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation) { + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type }; - switch (request.type) { - case VmaAllocationRequestType::UpperAddress: { + switch(request.type) + { + case VmaAllocationRequestType::UpperAddress: + { VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && - "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); suballocations2nd.push_back(newSuballoc); m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; - } break; - case VmaAllocationRequestType::EndOf1st: { - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); VMA_ASSERT(suballocations1st.empty() || - request.offset >= suballocations1st.back().offset + suballocations1st.back().size); + request.offset >= suballocations1st.back().offset + suballocations1st.back().size); // Check if it fits before the end of the block. VMA_ASSERT(request.offset + allocSize <= GetSize()); suballocations1st.push_back(newSuballoc); - } break; - case VmaAllocationRequestType::EndOf2nd: { - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. VMA_ASSERT(!suballocations1st.empty() && - request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset); - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); - - switch (m_2ndVectorMode) { - case SECOND_VECTOR_EMPTY: - // First allocation from second part ring buffer. - VMA_ASSERT(suballocations2nd.empty()); - m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; - break; - case SECOND_VECTOR_RING_BUFFER: - // 2-part ring buffer is already started. - VMA_ASSERT(!suballocations2nd.empty()); - break; - case SECOND_VECTOR_DOUBLE_STACK: - VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); - break; - default: - VMA_ASSERT(0); + request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); } suballocations2nd.push_back(newSuballoc); - } break; - default: - VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); } m_SumFreeSize -= newSuballoc.size; } -void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation) { +void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation) +{ FreeAtOffset(allocation->GetOffset()); } -void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) { - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); +void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - if (!suballocations1st.empty()) { + if(!suballocations1st.empty()) + { // First allocation: Mark it as next empty at the beginning. - VmaSuballocation &firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; - if (firstSuballoc.offset == offset) { + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if(firstSuballoc.offset == offset) + { firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; firstSuballoc.hAllocation = VK_NULL_HANDLE; m_SumFreeSize += firstSuballoc.size; @@ -9623,10 +10871,12 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) { } // Last allocation in 2-part ring buffer or top of upper stack (same logic). - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { - VmaSuballocation &lastSuballoc = suballocations2nd.back(); - if (lastSuballoc.offset == offset) { + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if(lastSuballoc.offset == offset) + { m_SumFreeSize += lastSuballoc.size; suballocations2nd.pop_back(); CleanupAfterFree(); @@ -9634,9 +10884,11 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) { } } // Last allocation in 1st vector. - else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) { - VmaSuballocation &lastSuballoc = suballocations1st.back(); - if (lastSuballoc.offset == offset) { + else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if(lastSuballoc.offset == offset) + { m_SumFreeSize += lastSuballoc.size; suballocations1st.pop_back(); CleanupAfterFree(); @@ -9649,11 +10901,13 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) { VmaSuballocation refSuballoc; refSuballoc.offset = offset; // Rest of members stays uninitialized intentionally for better performance. - SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>( - suballocations1st.begin() + m_1stNullItemsBeginCount, - suballocations1st.end(), - refSuballoc); - if (it != suballocations1st.end()) { + SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if(it != suballocations1st.end()) + { it->type = VMA_SUBALLOCATION_TYPE_FREE; it->hAllocation = VK_NULL_HANDLE; ++m_1stNullItemsMiddleCount; @@ -9663,15 +10917,17 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) { } } - if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) { + if(m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { // Item from the middle of 2nd vector. VmaSuballocation refSuballoc; refSuballoc.offset = offset; // Rest of members stays uninitialized intentionally for better performance. SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? - VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) : - VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc); - if (it != suballocations2nd.end()) { + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if(it != suballocations2nd.end()) + { it->type = VMA_SUBALLOCATION_TYPE_FREE; it->hAllocation = VK_NULL_HANDLE; ++m_2ndNullItemsCount; @@ -9684,64 +10940,77 @@ void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) { VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); } -bool VmaBlockMetadata_Linear::ShouldCompact1st() const { +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; const size_t suballocCount = AccessSuballocations1st().size(); return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; } -void VmaBlockMetadata_Linear::CleanupAfterFree() { - SuballocationVectorType &suballocations1st = AccessSuballocations1st(); - SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - if (IsEmpty()) { + if(IsEmpty()) + { suballocations1st.clear(); suballocations2nd.clear(); m_1stNullItemsBeginCount = 0; m_1stNullItemsMiddleCount = 0; m_2ndNullItemsCount = 0; m_2ndVectorMode = SECOND_VECTOR_EMPTY; - } else { + } + else + { const size_t suballoc1stCount = suballocations1st.size(); const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; VMA_ASSERT(nullItem1stCount <= suballoc1stCount); // Find more null items at the beginning of 1st vector. - while (m_1stNullItemsBeginCount < suballoc1stCount && - suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) { + while(m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { ++m_1stNullItemsBeginCount; --m_1stNullItemsMiddleCount; } // Find more null items at the end of 1st vector. - while (m_1stNullItemsMiddleCount > 0 && - suballocations1st.back().hAllocation == VK_NULL_HANDLE) { + while(m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().hAllocation == VK_NULL_HANDLE) + { --m_1stNullItemsMiddleCount; suballocations1st.pop_back(); } // Find more null items at the end of 2nd vector. - while (m_2ndNullItemsCount > 0 && - suballocations2nd.back().hAllocation == VK_NULL_HANDLE) { + while(m_2ndNullItemsCount > 0 && + suballocations2nd.back().hAllocation == VK_NULL_HANDLE) + { --m_2ndNullItemsCount; suballocations2nd.pop_back(); } // Find more null items at the beginning of 2nd vector. - while (m_2ndNullItemsCount > 0 && - suballocations2nd[0].hAllocation == VK_NULL_HANDLE) { + while(m_2ndNullItemsCount > 0 && + suballocations2nd[0].hAllocation == VK_NULL_HANDLE) + { --m_2ndNullItemsCount; VmaVectorRemove(suballocations2nd, 0); } - if (ShouldCompact1st()) { + if(ShouldCompact1st()) + { const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; size_t srcIndex = m_1stNullItemsBeginCount; - for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) { - while (suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE) { + for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE) + { ++srcIndex; } - if (dstIndex != srcIndex) { + if(dstIndex != srcIndex) + { suballocations1st[dstIndex] = suballocations1st[srcIndex]; } ++srcIndex; @@ -9752,21 +11021,25 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() { } // 2nd vector became empty. - if (suballocations2nd.empty()) { + if(suballocations2nd.empty()) + { m_2ndVectorMode = SECOND_VECTOR_EMPTY; } // 1st vector became empty. - if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) { + if(suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { suballocations1st.clear(); m_1stNullItemsBeginCount = 0; - if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { + if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { // Swap 1st with 2nd. Now 2nd is empty. m_2ndVectorMode = SECOND_VECTOR_EMPTY; m_1stNullItemsMiddleCount = m_2ndNullItemsCount; - while (m_1stNullItemsBeginCount < suballocations2nd.size() && - suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) { + while(m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { ++m_1stNullItemsBeginCount; --m_1stNullItemsMiddleCount; } @@ -9779,23 +11052,27 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() { VMA_HEAVY_ASSERT(Validate()); } + //////////////////////////////////////////////////////////////////////////////// // class VmaBlockMetadata_Buddy VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) : - VmaBlockMetadata(hAllocator), - m_Root(VMA_NULL), - m_AllocationCount(0), - m_FreeCount(1), - m_SumFreeSize(0) { + VmaBlockMetadata(hAllocator), + m_Root(VMA_NULL), + m_AllocationCount(0), + m_FreeCount(1), + m_SumFreeSize(0) +{ memset(m_FreeList, 0, sizeof(m_FreeList)); } -VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() { +VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() +{ DeleteNode(m_Root); } -void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) { +void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) +{ VmaBlockMetadata::Init(size); m_UsableSize = VmaPrevPow2(size); @@ -9803,12 +11080,13 @@ void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) { // Calculate m_LevelCount. m_LevelCount = 1; - while (m_LevelCount < MAX_LEVELS && - LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE) { + while(m_LevelCount < MAX_LEVELS && + LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE) + { ++m_LevelCount; } - Node *rootNode = vma_new(GetAllocationCallbacks(), Node)(); + Node* rootNode = vma_new(GetAllocationCallbacks(), Node)(); rootNode->offset = 0; rootNode->type = Node::TYPE_FREE; rootNode->parent = VMA_NULL; @@ -9818,51 +11096,63 @@ void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) { AddToFreeListFront(0, rootNode); } -bool VmaBlockMetadata_Buddy::Validate() const { +bool VmaBlockMetadata_Buddy::Validate() const +{ // Validate tree. ValidationContext ctx; - if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) { + if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) + { VMA_VALIDATE(false && "ValidateNode failed."); } VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); // Validate free node lists. - for (uint32_t level = 0; level < m_LevelCount; ++level) { + for(uint32_t level = 0; level < m_LevelCount; ++level) + { VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || - m_FreeList[level].front->free.prev == VMA_NULL); + m_FreeList[level].front->free.prev == VMA_NULL); - for (Node *node = m_FreeList[level].front; - node != VMA_NULL; - node = node->free.next) { + for(Node* node = m_FreeList[level].front; + node != VMA_NULL; + node = node->free.next) + { VMA_VALIDATE(node->type == Node::TYPE_FREE); - - if (node->free.next == VMA_NULL) { + + if(node->free.next == VMA_NULL) + { VMA_VALIDATE(m_FreeList[level].back == node); - } else { + } + else + { VMA_VALIDATE(node->free.next->free.prev == node); } } } // Validate that free lists ar higher levels are empty. - for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) { + for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) + { VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); } return true; } -VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const { - for (uint32_t level = 0; level < m_LevelCount; ++level) { - if (m_FreeList[level].front != VMA_NULL) { +VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const +{ + for(uint32_t level = 0; level < m_LevelCount; ++level) + { + if(m_FreeList[level].front != VMA_NULL) + { return LevelToNodeSize(level); } } return 0; } -void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo &outInfo) const { +void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ const VkDeviceSize unusableSize = GetUnusableSize(); outInfo.blockCount = 1; @@ -9876,7 +11166,8 @@ void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo &outInfo) const CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0)); - if (unusableSize > 0) { + if(unusableSize > 0) + { ++outInfo.unusedRangeCount; outInfo.unusedBytes += unusableSize; outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize); @@ -9884,7 +11175,8 @@ void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo &outInfo) const } } -void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats &inoutStats) const { +void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const +{ const VkDeviceSize unusableSize = GetUnusableSize(); inoutStats.size += GetSize(); @@ -9893,7 +11185,8 @@ void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats &inoutStats) const { inoutStats.unusedRangeCount += m_FreeCount; inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); - if (unusableSize > 0) { + if(unusableSize > 0) + { ++inoutStats.unusedRangeCount; // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations. } @@ -9901,24 +11194,26 @@ void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats &inoutStats) const { #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter &json) const { +void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const +{ // TODO optimize VmaStatInfo stat; CalcAllocationStatInfo(stat); PrintDetailedMap_Begin( - json, - stat.unusedBytes, - stat.allocationCount, - stat.unusedRangeCount); + json, + stat.unusedBytes, + stat.allocationCount, + stat.unusedRangeCount); PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); const VkDeviceSize unusableSize = GetUnusableSize(); - if (unusableSize > 0) { + if(unusableSize > 0) + { PrintDetailedMap_UnusedRange(json, - m_UsableSize, // offset - unusableSize); // size + m_UsableSize, // offset + unusableSize); // size } PrintDetailedMap_End(json); @@ -9927,43 +11222,49 @@ void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter &json) const { #endif // #if VMA_STATS_STRING_ENABLED bool VmaBlockMetadata_Buddy::CreateAllocationRequest( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VkDeviceSize bufferImageGranularity, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - bool canMakeOtherLost, - uint32_t strategy, - VmaAllocationRequest *pAllocationRequest) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); // Simple way to respect bufferImageGranularity. May be optimized some day. // Whenever it might be an OPTIMAL image... - if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) { + if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity); allocSize = VMA_MAX(allocSize, bufferImageGranularity); } - if (allocSize > m_UsableSize) { + if(allocSize > m_UsableSize) + { return false; } const uint32_t targetLevel = AllocSizeToLevel(allocSize); - for (uint32_t level = targetLevel + 1; level--;) { - for (Node *freeNode = m_FreeList[level].front; - freeNode != VMA_NULL; - freeNode = freeNode->free.next) { - if (freeNode->offset % allocAlignment == 0) { + for(uint32_t level = targetLevel + 1; level--; ) + { + for(Node* freeNode = m_FreeList[level].front; + freeNode != VMA_NULL; + freeNode = freeNode->free.next) + { + if(freeNode->offset % allocAlignment == 0) + { pAllocationRequest->type = VmaAllocationRequestType::Normal; pAllocationRequest->offset = freeNode->offset; pAllocationRequest->sumFreeSize = LevelToNodeSize(level); pAllocationRequest->sumItemSize = 0; pAllocationRequest->itemsToMakeLostCount = 0; - pAllocationRequest->customData = (void *)(uintptr_t)level; + pAllocationRequest->customData = (void*)(uintptr_t)level; return true; } } @@ -9973,52 +11274,57 @@ bool VmaBlockMetadata_Buddy::CreateAllocationRequest( } bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost( - uint32_t currentFrameIndex, - uint32_t frameInUseCount, - VmaAllocationRequest *pAllocationRequest) { + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ /* - Lost allocations are not supported in buddy allocator at the moment. - Support might be added in the future. - */ + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ return pAllocationRequest->itemsToMakeLostCount == 0; } -uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) { +uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ /* - Lost allocations are not supported in buddy allocator at the moment. - Support might be added in the future. - */ + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ return 0; } void VmaBlockMetadata_Buddy::Alloc( - const VmaAllocationRequest &request, - VmaSuballocationType type, - VkDeviceSize allocSize, - VmaAllocation hAllocation) { + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); const uint32_t targetLevel = AllocSizeToLevel(allocSize); uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; - - Node *currNode = m_FreeList[currLevel].front; + + Node* currNode = m_FreeList[currLevel].front; VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); - while (currNode->offset != request.offset) { + while(currNode->offset != request.offset) + { currNode = currNode->free.next; VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); } - + // Go down, splitting free nodes. - while (currLevel < targetLevel) { + while(currLevel < targetLevel) + { // currNode is already first free node at currLevel. // Remove it from list of free nodes at this currLevel. RemoveFromFreeList(currLevel, currNode); - + const uint32_t childrenLevel = currLevel + 1; // Create two free sub-nodes. - Node *leftChild = vma_new(GetAllocationCallbacks(), Node)(); - Node *rightChild = vma_new(GetAllocationCallbacks(), Node)(); + Node* leftChild = vma_new(GetAllocationCallbacks(), Node)(); + Node* rightChild = vma_new(GetAllocationCallbacks(), Node)(); leftChild->offset = currNode->offset; leftChild->type = Node::TYPE_FREE; @@ -10044,15 +11350,15 @@ void VmaBlockMetadata_Buddy::Alloc( currNode = m_FreeList[currLevel].front; /* - We can be sure that currNode, as left child of node previously split, - also fullfills the alignment requirement. - */ + We can be sure that currNode, as left child of node previously split, + also fullfills the alignment requirement. + */ } // Remove from free list. VMA_ASSERT(currLevel == targetLevel && - currNode != VMA_NULL && - currNode->type == Node::TYPE_FREE); + currNode != VMA_NULL && + currNode->type == Node::TYPE_FREE); RemoveFromFreeList(currLevel, currNode); // Convert to allocation node. @@ -10064,8 +11370,10 @@ void VmaBlockMetadata_Buddy::Alloc( m_SumFreeSize -= allocSize; } -void VmaBlockMetadata_Buddy::DeleteNode(Node *node) { - if (node->type == Node::TYPE_SPLIT) { +void VmaBlockMetadata_Buddy::DeleteNode(Node* node) +{ + if(node->type == Node::TYPE_SPLIT) + { DeleteNode(node->split.leftChild->buddy); DeleteNode(node->split.leftChild); } @@ -10073,50 +11381,58 @@ void VmaBlockMetadata_Buddy::DeleteNode(Node *node) { vma_delete(GetAllocationCallbacks(), node); } -bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext &ctx, const Node *parent, const Node *curr, uint32_t level, VkDeviceSize levelNodeSize) const { +bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const +{ VMA_VALIDATE(level < m_LevelCount); VMA_VALIDATE(curr->parent == parent); VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); - switch (curr->type) { - case Node::TYPE_FREE: - // curr->free.prev, next are validated separately. - ctx.calculatedSumFreeSize += levelNodeSize; - ++ctx.calculatedFreeCount; - break; - case Node::TYPE_ALLOCATION: - ++ctx.calculatedAllocationCount; - ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize(); - VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE); - break; - case Node::TYPE_SPLIT: { + switch(curr->type) + { + case Node::TYPE_FREE: + // curr->free.prev, next are validated separately. + ctx.calculatedSumFreeSize += levelNodeSize; + ++ctx.calculatedFreeCount; + break; + case Node::TYPE_ALLOCATION: + ++ctx.calculatedAllocationCount; + ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize(); + VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE); + break; + case Node::TYPE_SPLIT: + { const uint32_t childrenLevel = level + 1; const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2; - const Node *const leftChild = curr->split.leftChild; + const Node* const leftChild = curr->split.leftChild; VMA_VALIDATE(leftChild != VMA_NULL); VMA_VALIDATE(leftChild->offset == curr->offset); - if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) { + if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) + { VMA_VALIDATE(false && "ValidateNode for left child failed."); } - const Node *const rightChild = leftChild->buddy; + const Node* const rightChild = leftChild->buddy; VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); - if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) { + if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) + { VMA_VALIDATE(false && "ValidateNode for right child failed."); } - } break; - default: - return false; + } + break; + default: + return false; } return true; } -uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const { +uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const +{ // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. uint32_t level = 0; VkDeviceSize currLevelNodeSize = m_UsableSize; VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; - while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) { + while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) + { ++level; currLevelNodeSize = nextLevelNodeSize; nextLevelNodeSize = currLevelNodeSize >> 1; @@ -10124,17 +11440,22 @@ uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const return level; } -void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset) { +void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset) +{ // Find node and level. - Node *node = m_Root; + Node* node = m_Root; VkDeviceSize nodeOffset = 0; uint32_t level = 0; VkDeviceSize levelNodeSize = LevelToNodeSize(0); - while (node->type == Node::TYPE_SPLIT) { + while(node->type == Node::TYPE_SPLIT) + { const VkDeviceSize nextLevelSize = levelNodeSize >> 1; - if (offset < nodeOffset + nextLevelSize) { + if(offset < nodeOffset + nextLevelSize) + { node = node->split.leftChild; - } else { + } + else + { node = node->split.leftChild->buddy; nodeOffset += nextLevelSize; } @@ -10152,14 +11473,15 @@ void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offs node->type = Node::TYPE_FREE; // Join free nodes if possible. - while (level > 0 && node->buddy->type == Node::TYPE_FREE) { + while(level > 0 && node->buddy->type == Node::TYPE_FREE) + { RemoveFromFreeList(level, node->buddy); - Node *const parent = node->parent; + Node* const parent = node->parent; vma_delete(GetAllocationCallbacks(), node->buddy); vma_delete(GetAllocationCallbacks(), node); parent->type = Node::TYPE_FREE; - + node = parent; --level; //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2. @@ -10169,15 +11491,18 @@ void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offs AddToFreeListFront(level, node); } -void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo &outInfo, const Node *node, VkDeviceSize levelNodeSize) const { - switch (node->type) { - case Node::TYPE_FREE: - ++outInfo.unusedRangeCount; - outInfo.unusedBytes += levelNodeSize; - outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize); - outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize); - break; - case Node::TYPE_ALLOCATION: { +void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += levelNodeSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize); + outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); ++outInfo.allocationCount; outInfo.usedBytes += allocSize; @@ -10185,35 +11510,43 @@ void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo &outInfo, co outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize); const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize; - if (unusedRangeSize > 0) { + if(unusedRangeSize > 0) + { ++outInfo.unusedRangeCount; outInfo.unusedBytes += unusedRangeSize; outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize); outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize); } - } break; - case Node::TYPE_SPLIT: { + } + break; + case Node::TYPE_SPLIT: + { const VkDeviceSize childrenNodeSize = levelNodeSize / 2; - const Node *const leftChild = node->split.leftChild; + const Node* const leftChild = node->split.leftChild; CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize); - const Node *const rightChild = leftChild->buddy; + const Node* const rightChild = leftChild->buddy; CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize); - } break; - default: - VMA_ASSERT(0); + } + break; + default: + VMA_ASSERT(0); } } -void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node *node) { +void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) +{ VMA_ASSERT(node->type == Node::TYPE_FREE); // List is empty. - Node *const frontNode = m_FreeList[level].front; - if (frontNode == VMA_NULL) { + Node* const frontNode = m_FreeList[level].front; + if(frontNode == VMA_NULL) + { VMA_ASSERT(m_FreeList[level].back == VMA_NULL); node->free.prev = node->free.next = VMA_NULL; m_FreeList[level].front = m_FreeList[level].back = node; - } else { + } + else + { VMA_ASSERT(frontNode->free.prev == VMA_NULL); node->free.prev = VMA_NULL; node->free.next = frontNode; @@ -10222,76 +11555,93 @@ void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node *node) { } } -void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node *node) { +void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) +{ VMA_ASSERT(m_FreeList[level].front != VMA_NULL); // It is at the front. - if (node->free.prev == VMA_NULL) { + if(node->free.prev == VMA_NULL) + { VMA_ASSERT(m_FreeList[level].front == node); m_FreeList[level].front = node->free.next; - } else { - Node *const prevFreeNode = node->free.prev; + } + else + { + Node* const prevFreeNode = node->free.prev; VMA_ASSERT(prevFreeNode->free.next == node); prevFreeNode->free.next = node->free.next; } // It is at the back. - if (node->free.next == VMA_NULL) { + if(node->free.next == VMA_NULL) + { VMA_ASSERT(m_FreeList[level].back == node); m_FreeList[level].back = node->free.prev; - } else { - Node *const nextFreeNode = node->free.next; + } + else + { + Node* const nextFreeNode = node->free.next; VMA_ASSERT(nextFreeNode->free.prev == node); nextFreeNode->free.prev = node->free.prev; } } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter &json, const Node *node, VkDeviceSize levelNodeSize) const { - switch (node->type) { - case Node::TYPE_FREE: - PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); - break; - case Node::TYPE_ALLOCATION: { +void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc); const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); - if (allocSize < levelNodeSize) { + if(allocSize < levelNodeSize) + { PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize); } - } break; - case Node::TYPE_SPLIT: { + } + break; + case Node::TYPE_SPLIT: + { const VkDeviceSize childrenNodeSize = levelNodeSize / 2; - const Node *const leftChild = node->split.leftChild; + const Node* const leftChild = node->split.leftChild; PrintDetailedMapNode(json, leftChild, childrenNodeSize); - const Node *const rightChild = leftChild->buddy; + const Node* const rightChild = leftChild->buddy; PrintDetailedMapNode(json, rightChild, childrenNodeSize); - } break; - default: - VMA_ASSERT(0); + } + break; + default: + VMA_ASSERT(0); } } #endif // #if VMA_STATS_STRING_ENABLED + //////////////////////////////////////////////////////////////////////////////// // class VmaDeviceMemoryBlock VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : - m_pMetadata(VMA_NULL), - m_MemoryTypeIndex(UINT32_MAX), - m_Id(0), - m_hMemory(VK_NULL_HANDLE), - m_MapCount(0), - m_pMappedData(VMA_NULL) { + m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) +{ } void VmaDeviceMemoryBlock::Init( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t newMemoryTypeIndex, - VkDeviceMemory newMemory, - VkDeviceSize newSize, - uint32_t id, - uint32_t algorithm) { + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm) +{ VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); m_hParentPool = hParentPool; @@ -10299,23 +11649,25 @@ void VmaDeviceMemoryBlock::Init( m_Id = id; m_hMemory = newMemory; - switch (algorithm) { - case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator); - break; - case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator); - break; - default: - VMA_ASSERT(0); - // Fall-through. - case 0: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator); + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator); + break; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator); + break; + default: + VMA_ASSERT(0); + // Fall-through. + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator); } m_pMetadata->Init(newSize); } -void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) { +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ // This is the most important assert in the entire library. // Hitting it means you have some memory leak - unreleased VmaAllocation objects. VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); @@ -10328,17 +11680,20 @@ void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) { m_pMetadata = VMA_NULL; } -bool VmaDeviceMemoryBlock::Validate() const { +bool VmaDeviceMemoryBlock::Validate() const +{ VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && - (m_pMetadata->GetSize() != 0)); - + (m_pMetadata->GetSize() != 0)); + return m_pMetadata->Validate(); } -VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) { - void *pData = nullptr; +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = nullptr; VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } @@ -10349,29 +11704,37 @@ VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) { return res; } -VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void **ppData) { - if (count == 0) { +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if(count == 0) + { return VK_SUCCESS; } VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); - if (m_MapCount != 0) { + if(m_MapCount != 0) + { m_MapCount += count; VMA_ASSERT(m_pMappedData != VMA_NULL); - if (ppData != VMA_NULL) { + if(ppData != VMA_NULL) + { *ppData = m_pMappedData; } return VK_SUCCESS; - } else { + } + else + { VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( - hAllocator->m_hDevice, - m_hMemory, - 0, // offset - VK_WHOLE_SIZE, - 0, // flags - &m_pMappedData); - if (result == VK_SUCCESS) { - if (ppData != VMA_NULL) { + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if(result == VK_SUCCESS) + { + if(ppData != VMA_NULL) + { *ppData = m_pMappedData; } m_MapCount = count; @@ -10380,30 +11743,38 @@ VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void } } -void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) { - if (count == 0) { +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if(count == 0) + { return; } VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); - if (m_MapCount >= count) { + if(m_MapCount >= count) + { m_MapCount -= count; - if (m_MapCount == 0) { + if(m_MapCount == 0) + { m_pMappedData = VMA_NULL; (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); } - } else { + } + else + { VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); } } -VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) { +VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); - void *pData; + void* pData; VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } @@ -10415,19 +11786,24 @@ VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAll return VK_SUCCESS; } -VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) { +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); - void *pData; + void* pData; VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } - if (!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN)) { + if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!"); - } else if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) { + } + else if(!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); } @@ -10437,43 +11813,49 @@ VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator h } VkResult VmaDeviceMemoryBlock::BindBufferMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkBuffer hBuffer) { + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); - return hAllocator->GetVulkanFunctions().vkBindBufferMemory( - hAllocator->m_hDevice, - hBuffer, - m_hMemory, - hAllocation->GetOffset()); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); } VkResult VmaDeviceMemoryBlock::BindImageMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkImage hImage) { + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); - return hAllocator->GetVulkanFunctions().vkBindImageMemory( - hAllocator->m_hDevice, - hImage, - m_hMemory, - hAllocation->GetOffset()); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); } -static void InitStatInfo(VmaStatInfo &outInfo) { +static void InitStatInfo(VmaStatInfo& outInfo) +{ memset(&outInfo, 0, sizeof(outInfo)); outInfo.allocationSizeMin = UINT64_MAX; outInfo.unusedRangeSizeMin = UINT64_MAX; } // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo. -static void VmaAddStatInfo(VmaStatInfo &inoutInfo, const VmaStatInfo &srcInfo) { +static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo) +{ inoutInfo.blockCount += srcInfo.blockCount; inoutInfo.allocationCount += srcInfo.allocationCount; inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount; @@ -10485,35 +11867,51 @@ static void VmaAddStatInfo(VmaStatInfo &inoutInfo, const VmaStatInfo &srcInfo) { inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax); } -static void VmaPostprocessCalcStatInfo(VmaStatInfo &inoutInfo) { +static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo) +{ inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ? - VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : - 0; + VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0; inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ? - VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : - 0; + VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0; } VmaPool_T::VmaPool_T( - VmaAllocator hAllocator, - const VmaPoolCreateInfo &createInfo, - VkDeviceSize preferredBlockSize) : - m_BlockVector( - hAllocator, - this, // hParentPool - createInfo.memoryTypeIndex, - createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, - createInfo.minBlockCount, - createInfo.maxBlockCount, - (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), - createInfo.frameInUseCount, - true, // isCustomPool - createInfo.blockSize != 0, // explicitBlockSize - createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm - m_Id(0) { -} - -VmaPool_T::~VmaPool_T() { + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) : + m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.frameInUseCount, + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm + m_Id(0), + m_Name(VMA_NULL) +{ +} + +VmaPool_T::~VmaPool_T() +{ +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if(pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } } #if VMA_STATS_STRING_ENABLED @@ -10521,51 +11919,56 @@ VmaPool_T::~VmaPool_T() { #endif // #if VMA_STATS_STRING_ENABLED VmaBlockVector::VmaBlockVector( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceSize preferredBlockSize, - size_t minBlockCount, - size_t maxBlockCount, - VkDeviceSize bufferImageGranularity, - uint32_t frameInUseCount, - bool isCustomPool, - bool explicitBlockSize, - uint32_t algorithm) : - m_hAllocator(hAllocator), - m_hParentPool(hParentPool), - m_MemoryTypeIndex(memoryTypeIndex), - m_PreferredBlockSize(preferredBlockSize), - m_MinBlockCount(minBlockCount), - m_MaxBlockCount(maxBlockCount), - m_BufferImageGranularity(bufferImageGranularity), - m_FrameInUseCount(frameInUseCount), - m_IsCustomPool(isCustomPool), - m_ExplicitBlockSize(explicitBlockSize), - m_Algorithm(algorithm), - m_HasEmptyBlock(false), - m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock *>(hAllocator->GetAllocationCallbacks())), - m_NextBlockId(0) { -} - -VmaBlockVector::~VmaBlockVector() { - for (size_t i = m_Blocks.size(); i--;) { + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm) : + m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_FrameInUseCount(frameInUseCount), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_HasEmptyBlock(false), + m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) +{ +} + +VmaBlockVector::~VmaBlockVector() +{ + for(size_t i = m_Blocks.size(); i--; ) + { m_Blocks[i]->Destroy(m_hAllocator); vma_delete(m_hAllocator, m_Blocks[i]); } } -VkResult VmaBlockVector::CreateMinBlocks() { - for (size_t i = 0; i < m_MinBlockCount; ++i) { +VkResult VmaBlockVector::CreateMinBlocks() +{ + for(size_t i = 0; i < m_MinBlockCount; ++i) + { VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } } return VK_SUCCESS; } -void VmaBlockVector::GetPoolStats(VmaPoolStats *pStats) { +void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats) +{ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); const size_t blockCount = m_Blocks.size(); @@ -10577,59 +11980,73 @@ void VmaBlockVector::GetPoolStats(VmaPoolStats *pStats) { pStats->unusedRangeSizeMax = 0; pStats->blockCount = blockCount; - for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { - const VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex]; + for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); pBlock->m_pMetadata->AddPoolStats(*pStats); } } -bool VmaBlockVector::IsCorruptionDetectionEnabled() const { +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; return (VMA_DEBUG_DETECT_CORRUPTION != 0) && - (VMA_DEBUG_MARGIN > 0) && - (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && - (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; } static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; VkResult VmaBlockVector::Allocate( - uint32_t currentFrameIndex, - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo &createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation *pAllocations) { + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ size_t allocIndex; VkResult res = VK_SUCCESS; - if (IsCorruptionDetectionEnabled()) { + if(IsCorruptionDetectionEnabled()) + { size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); } { VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { res = AllocatePage( - currentFrameIndex, - size, - alignment, - createInfo, - suballocType, - pAllocations + allocIndex); - if (res != VK_SUCCESS) { + currentFrameIndex, + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { break; } } } - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { // Free all already created allocations. - while (allocIndex--) { + while(allocIndex--) + { Free(pAllocations[allocIndex]); } memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); @@ -10639,67 +12056,113 @@ VkResult VmaBlockVector::Allocate( } VkResult VmaBlockVector::AllocatePage( - uint32_t currentFrameIndex, - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo &createInfo, - VmaSuballocationType suballocType, - VmaAllocation *pAllocation) { + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0; const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + + const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0; + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !IsCustomPool(); const bool canCreateNewBlock = - ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && - (m_Blocks.size() < m_MaxBlockCount); + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer. // Which in turn is available only when maxBlockCount = 1. - if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1) { + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1) + { canMakeOtherLost = false; } // Upper address can only be used with linear allocator and within single memory block. - if (isUpperAddress && - (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) { + if(isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { return VK_ERROR_FEATURE_NOT_PRESENT; } // Validate strategy. - switch (strategy) { - case 0: - strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; - break; - case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT: - case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT: - case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT: - break; - default: - return VK_ERROR_FEATURE_NOT_PRESENT; + switch(strategy) + { + case 0: + strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; + break; + case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT: + break; + default: + return VK_ERROR_FEATURE_NOT_PRESENT; } // Early reject: requested allocation size is larger that maximum block size for this block vector. - if (size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize) { + if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { return VK_ERROR_OUT_OF_DEVICE_MEMORY; } /* - Under certain condition, this whole section can be skipped for optimization, so - we move on directly to trying to allocate with canMakeOtherLost. That's the case - e.g. for custom pools with linear algorithm. - */ - if (!canMakeOtherLost || canCreateNewBlock) { + Under certain condition, this whole section can be skipped for optimization, so + we move on directly to trying to allocate with canMakeOtherLost. That's the case + e.g. for custom pools with linear algorithm. + */ + if(!canMakeOtherLost || canCreateNewBlock) + { // 1. Search existing allocations. Try to allocate without making other allocations lost. VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags; allocFlagsCopy &= ~VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; - if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) { + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { // Use only last block. - if (!m_Blocks.empty()) { - VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks.back(); + if(!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + else + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( pCurrBlock, currentFrameIndex, size, @@ -10709,50 +12172,33 @@ VkResult VmaBlockVector::AllocatePage( suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) { - VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1)); - return VK_SUCCESS; - } - } - } else { - if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) { - // Forward order in m_Blocks - prefer blocks with smallest amount of free space. - for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock( - pCurrBlock, - currentFrameIndex, - size, - alignment, - allocFlagsCopy, - createInfo.pUserData, - suballocType, - strategy, - pAllocation); - if (res == VK_SUCCESS) { - VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); return VK_SUCCESS; } } - } else // WORST_FIT, FIRST_FIT + } + else // WORST_FIT, FIRST_FIT { // Backward order in m_Blocks - prefer blocks with largest amount of free space. - for (size_t blockIndex = m_Blocks.size(); blockIndex--;) { - VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex]; + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock( - pCurrBlock, - currentFrameIndex, - size, - alignment, - allocFlagsCopy, - createInfo.pUserData, - suballocType, - strategy, - pAllocation); - if (res == VK_SUCCESS) { - VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex); + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); return VK_SUCCESS; } } @@ -10760,60 +12206,77 @@ VkResult VmaBlockVector::AllocatePage( } // 2. Try to create new block. - if (canCreateNewBlock) { + if(canCreateNewBlock) + { // Calculate optimal size for new block. VkDeviceSize newBlockSize = m_PreferredBlockSize; uint32_t newBlockSizeShift = 0; const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; - if (!m_ExplicitBlockSize) { + if(!m_ExplicitBlockSize) + { // Allocate 1/8, 1/4, 1/2 as first blocks. const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); - for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) { + for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; - if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) { + if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { newBlockSize = smallerNewBlockSize; ++newBlockSizeShift; - } else { + } + else + { break; } } } size_t newBlockIndex = 0; - VkResult res = CreateBlock(newBlockSize, &newBlockIndex); + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. - if (!m_ExplicitBlockSize) { - while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) { + if(!m_ExplicitBlockSize) + { + while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; - if (smallerNewBlockSize >= size) { + if(smallerNewBlockSize >= size) + { newBlockSize = smallerNewBlockSize; ++newBlockSizeShift; - res = CreateBlock(newBlockSize, &newBlockIndex); - } else { + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { break; } } } - if (res == VK_SUCCESS) { - VmaDeviceMemoryBlock *const pBlock = m_Blocks[newBlockIndex]; + if(res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); res = AllocateFromBlock( - pBlock, - currentFrameIndex, - size, - alignment, - allocFlagsCopy, - createInfo.pUserData, - suballocType, - strategy, - pAllocation); - if (res == VK_SUCCESS) { - VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize); + pBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); return VK_SUCCESS; - } else { + } + else + { // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. return VK_ERROR_OUT_OF_DEVICE_MEMORY; } @@ -10822,72 +12285,84 @@ VkResult VmaBlockVector::AllocatePage( } // 3. Try to allocate from existing blocks with making other allocations lost. - if (canMakeOtherLost) { + if(canMakeOtherLost) + { uint32_t tryIndex = 0; - for (; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex) { - VmaDeviceMemoryBlock *pBestRequestBlock = VMA_NULL; + for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex) + { + VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL; VmaAllocationRequest bestRequest = {}; VkDeviceSize bestRequestCost = VK_WHOLE_SIZE; // 1. Search existing allocations. - if (strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { // Forward order in m_Blocks - prefer blocks with smallest amount of free space. - for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex]; + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VmaAllocationRequest currRequest = {}; - if (pCurrBlock->m_pMetadata->CreateAllocationRequest( - currentFrameIndex, - m_FrameInUseCount, - m_BufferImageGranularity, - size, - alignment, - (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, - suballocType, - canMakeOtherLost, - strategy, - &currRequest)) { + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { const VkDeviceSize currRequestCost = currRequest.CalcCost(); - if (pBestRequestBlock == VMA_NULL || - currRequestCost < bestRequestCost) { + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost) + { pBestRequestBlock = pCurrBlock; bestRequest = currRequest; bestRequestCost = currRequestCost; - if (bestRequestCost == 0) { + if(bestRequestCost == 0) + { break; } } } } - } else // WORST_FIT, FIRST_FIT + } + else // WORST_FIT, FIRST_FIT { // Backward order in m_Blocks - prefer blocks with largest amount of free space. - for (size_t blockIndex = m_Blocks.size(); blockIndex--;) { - VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex]; + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VmaAllocationRequest currRequest = {}; - if (pCurrBlock->m_pMetadata->CreateAllocationRequest( - currentFrameIndex, - m_FrameInUseCount, - m_BufferImageGranularity, - size, - alignment, - (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, - suballocType, - canMakeOtherLost, - strategy, - &currRequest)) { + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { const VkDeviceSize currRequestCost = currRequest.CalcCost(); - if (pBestRequestBlock == VMA_NULL || - currRequestCost < bestRequestCost || - strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) { + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { pBestRequestBlock = pCurrBlock; bestRequest = currRequest; bestRequestCost = currRequestCost; - if (bestRequestCost == 0 || - strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) { + if(bestRequestCost == 0 || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { break; } } @@ -10895,49 +12370,63 @@ VkResult VmaBlockVector::AllocatePage( } } - if (pBestRequestBlock != VMA_NULL) { - if (mapped) { + if(pBestRequestBlock != VMA_NULL) + { + if(mapped) + { VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } } - if (pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost( - currentFrameIndex, - m_FrameInUseCount, - &bestRequest)) { - // We no longer have an empty Allocation. - if (pBestRequestBlock->m_pMetadata->IsEmpty()) { - m_HasEmptyBlock = false; - } + if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost( + currentFrameIndex, + m_FrameInUseCount, + &bestRequest)) + { // Allocate from this pBlock. - *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(); - (*pAllocation)->Ctor(currentFrameIndex, isUserDataString); + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString); pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation); - (*pAllocation)->InitBlockAllocation(pBestRequestBlock, bestRequest.offset, alignment, size, suballocType, mapped, (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBestRequestBlock, + bestRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); VMA_HEAVY_ASSERT(pBestRequestBlock->Validate()); VMA_DEBUG_LOG(" Returned from existing block"); (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData); - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); } - if (IsCorruptionDetectionEnabled()) { + if(IsCorruptionDetectionEnabled()) + { VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size); VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); } return VK_SUCCESS; } // else: Some allocations must have been touched while we are here. Next try. - } else { + } + else + { // Could not find place in any of the blocks - break outer loop. break; } } /* Maximum number of tries exceeded - a very unlike event when many other - threads are simultaneously touching allocations making it impossible to make - lost at the same time as we try to allocate. */ - if (tryIndex == VMA_ALLOCATION_TRY_COUNT) { + threads are simultaneously touching allocations making it impossible to make + lost at the same time as we try to allocate. */ + if(tryIndex == VMA_ALLOCATION_TRY_COUNT) + { return VK_ERROR_TOO_MANY_OBJECTS; } } @@ -10946,21 +12435,32 @@ VkResult VmaBlockVector::AllocatePage( } void VmaBlockVector::Free( - VmaAllocation hAllocation) { - VmaDeviceMemoryBlock *pBlockToDelete = VMA_NULL; + const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } // Scope for lock. { VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock(); + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); - if (IsCorruptionDetectionEnabled()) { + if(IsCorruptionDetectionEnabled()) + { VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); } - if (hAllocation->IsPersistentMap()) { + if(hAllocation->IsPersistentMap()) + { pBlock->Unmap(m_hAllocator, 1); } @@ -10969,55 +12469,64 @@ void VmaBlockVector::Free( VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; // pBlock became empty after this deallocation. - if (pBlock->m_pMetadata->IsEmpty()) { - // Already has empty Allocation. We don't want to have two, so delete this one. - if (m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount) { + if(pBlock->m_pMetadata->IsEmpty()) + { + // Already has empty block. We don't want to have two, so delete this one. + if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock) + { pBlockToDelete = pBlock; Remove(pBlock); } - // We now have first empty block. - else { - m_HasEmptyBlock = true; - } + // else: We now have an empty block - leave it. } // pBlock didn't become empty, but we have another empty block - find and free that one. // (This is optional, heuristics.) - else if (m_HasEmptyBlock) { - VmaDeviceMemoryBlock *pLastBlock = m_Blocks.back(); - if (pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount) { + else if(m_HasEmptyBlock && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if(pLastBlock->m_pMetadata->IsEmpty()) + { pBlockToDelete = pLastBlock; m_Blocks.pop_back(); - m_HasEmptyBlock = false; } } + UpdateHasEmptyBlock(); IncrementallySortBlocks(); } - // Destruction of a free Allocation. Deferred until this point, outside of mutex + // Destruction of a free block. Deferred until this point, outside of mutex // lock, for performance reason. - if (pBlockToDelete != VMA_NULL) { - VMA_DEBUG_LOG(" Deleted empty allocation"); + if(pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG(" Deleted empty block"); pBlockToDelete->Destroy(m_hAllocator); vma_delete(m_hAllocator, pBlockToDelete); } } -VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const { +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ VkDeviceSize result = 0; - for (size_t i = m_Blocks.size(); i--;) { + for(size_t i = m_Blocks.size(); i--; ) + { result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); - if (result >= m_PreferredBlockSize) { + if(result >= m_PreferredBlockSize) + { break; } } return result; } -void VmaBlockVector::Remove(VmaDeviceMemoryBlock *pBlock) { - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - if (m_Blocks[blockIndex] == pBlock) { +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if(m_Blocks[blockIndex] == pBlock) + { VmaVectorRemove(m_Blocks, blockIndex); return; } @@ -11025,11 +12534,15 @@ void VmaBlockVector::Remove(VmaDeviceMemoryBlock *pBlock) { VMA_ASSERT(0); } -void VmaBlockVector::IncrementallySortBlocks() { - if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) { +void VmaBlockVector::IncrementallySortBlocks() +{ + if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { // Bubble sort only until first swap. - for (size_t i = 1; i < m_Blocks.size(); ++i) { - if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) { + for(size_t i = 1; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); return; } @@ -11038,57 +12551,67 @@ void VmaBlockVector::IncrementallySortBlocks() { } VkResult VmaBlockVector::AllocateFromBlock( - VmaDeviceMemoryBlock *pBlock, - uint32_t currentFrameIndex, - VkDeviceSize size, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void *pUserData, - VmaSuballocationType suballocType, - uint32_t strategy, - VmaAllocation *pAllocation) { + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0); const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; VmaAllocationRequest currRequest = {}; - if (pBlock->m_pMetadata->CreateAllocationRequest( - currentFrameIndex, - m_FrameInUseCount, - m_BufferImageGranularity, - size, - alignment, - isUpperAddress, - suballocType, - false, // canMakeOtherLost - strategy, - &currRequest)) { + if(pBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + isUpperAddress, + suballocType, + false, // canMakeOtherLost + strategy, + &currRequest)) + { // Allocate from pCurrBlock. VMA_ASSERT(currRequest.itemsToMakeLostCount == 0); - if (mapped) { + if(mapped) + { VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } } - - // We no longer have an empty Allocation. - if (pBlock->m_pMetadata->IsEmpty()) { - m_HasEmptyBlock = false; - } - - *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(); - (*pAllocation)->Ctor(currentFrameIndex, isUserDataString); + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString); pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation); - (*pAllocation)->InitBlockAllocation(pBlock, currRequest.offset, alignment, size, suballocType, mapped, (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBlock, + currRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); VMA_HEAVY_ASSERT(pBlock->Validate()); (*pAllocation)->SetUserData(m_hAllocator, pUserData); - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); } - if (IsCorruptionDetectionEnabled()) { + if(IsCorruptionDetectionEnabled()) + { VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size); VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); } @@ -11097,31 +12620,34 @@ VkResult VmaBlockVector::AllocateFromBlock( return VK_ERROR_OUT_OF_DEVICE_MEMORY; } -VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t *pNewBlockIndex) { +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; allocInfo.memoryTypeIndex = m_MemoryTypeIndex; allocInfo.allocationSize = blockSize; VkDeviceMemory mem = VK_NULL_HANDLE; VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); - if (res < 0) { + if(res < 0) + { return res; } // New VkDeviceMemory successfully created. // Create new Allocation for it. - VmaDeviceMemoryBlock *const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); pBlock->Init( - m_hAllocator, - m_hParentPool, - m_MemoryTypeIndex, - mem, - allocInfo.allocationSize, - m_NextBlockId++, - m_Algorithm); + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm); m_Blocks.push_back(pBlock); - if (pNewBlockIndex != VMA_NULL) { + if(pNewBlockIndex != VMA_NULL) + { *pNewBlockIndex = m_Blocks.size() - 1; } @@ -11129,28 +12655,32 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t *pNewBlockIn } void VmaBlockVector::ApplyDefragmentationMovesCpu( - class VmaBlockVectorDefragmentationContext *pDefragCtx, - const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves) { + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves) +{ const size_t blockCount = m_Blocks.size(); const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex); - enum BLOCK_FLAG { + enum BLOCK_FLAG + { BLOCK_FLAG_USED = 0x00000001, BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002, }; - struct BlockInfo { + struct BlockInfo + { uint32_t flags; - void *pMappedData; + void* pMappedData; }; - VmaVector<BlockInfo, VmaStlAllocator<BlockInfo> > - blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks())); + VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > + blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks())); memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo)); // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. const size_t moveCount = moves.size(); - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) { - const VmaDefragmentationMove &move = moves[moveIndex]; + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED; blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED; } @@ -11158,15 +12688,19 @@ void VmaBlockVector::ApplyDefragmentationMovesCpu( VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); // Go over all blocks. Get mapped pointer or map if necessary. - for (size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) { - BlockInfo &currBlockInfo = blockInfo[blockIndex]; - VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex]; - if ((currBlockInfo.flags & BLOCK_FLAG_USED) != 0) { + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + BlockInfo& currBlockInfo = blockInfo[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0) + { currBlockInfo.pMappedData = pBlock->GetMappedData(); // It is not originally mapped - map it. - if (currBlockInfo.pMappedData == VMA_NULL) { + if(currBlockInfo.pMappedData == VMA_NULL) + { pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData); - if (pDefragCtx->res == VK_SUCCESS) { + if(pDefragCtx->res == VK_SUCCESS) + { currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION; } } @@ -11174,48 +12708,53 @@ void VmaBlockVector::ApplyDefragmentationMovesCpu( } // Go over all moves. Do actual data transfer. - if (pDefragCtx->res == VK_SUCCESS) { + if(pDefragCtx->res == VK_SUCCESS) + { const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) { - const VmaDefragmentationMove &move = moves[moveIndex]; + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; - const BlockInfo &srcBlockInfo = blockInfo[move.srcBlockIndex]; - const BlockInfo &dstBlockInfo = blockInfo[move.dstBlockIndex]; + const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex]; + const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex]; VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData); // Invalidate source. - if (isNonCoherent) { - VmaDeviceMemoryBlock *const pSrcBlock = m_Blocks[move.srcBlockIndex]; + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex]; memRange.memory = pSrcBlock->GetDeviceMemory(); memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize); memRange.size = VMA_MIN( - VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize), - pSrcBlock->m_pMetadata->GetSize() - memRange.offset); + VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize), + pSrcBlock->m_pMetadata->GetSize() - memRange.offset); (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); } // THE PLACE WHERE ACTUAL DATA COPY HAPPENS. memmove( - reinterpret_cast<char *>(dstBlockInfo.pMappedData) + move.dstOffset, - reinterpret_cast<char *>(srcBlockInfo.pMappedData) + move.srcOffset, - static_cast<size_t>(move.size)); + reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset, + reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset, + static_cast<size_t>(move.size)); - if (IsCorruptionDetectionEnabled()) { + if(IsCorruptionDetectionEnabled()) + { VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN); VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size); } // Flush destination. - if (isNonCoherent) { - VmaDeviceMemoryBlock *const pDstBlock = m_Blocks[move.dstBlockIndex]; + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex]; memRange.memory = pDstBlock->GetDeviceMemory(); memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize); memRange.size = VMA_MIN( - VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize), - pDstBlock->m_pMetadata->GetSize() - memRange.offset); + VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize), + pDstBlock->m_pMetadata->GetSize() - memRange.offset); (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); } } @@ -11223,19 +12762,22 @@ void VmaBlockVector::ApplyDefragmentationMovesCpu( // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation. // Regardless of pCtx->res == VK_SUCCESS. - for (size_t blockIndex = blockCount; blockIndex--;) { - const BlockInfo &currBlockInfo = blockInfo[blockIndex]; - if ((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0) { - VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex]; + for(size_t blockIndex = blockCount; blockIndex--; ) + { + const BlockInfo& currBlockInfo = blockInfo[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; pBlock->Unmap(m_hAllocator, 1); } } } void VmaBlockVector::ApplyDefragmentationMovesGpu( - class VmaBlockVectorDefragmentationContext *pDefragCtx, - const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkCommandBuffer commandBuffer) { + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkCommandBuffer commandBuffer) +{ const size_t blockCount = m_Blocks.size(); pDefragCtx->blockContexts.resize(blockCount); @@ -11243,10 +12785,16 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu( // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. const size_t moveCount = moves.size(); - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) { - const VmaDefragmentationMove &move = moves[moveIndex]; - pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; - pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN) + { + // Old school move still require us to map the whole block + pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + } } VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); @@ -11256,54 +12804,63 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu( VkBufferCreateInfo bufCreateInfo; VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo); - for (size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) { - VmaBlockDefragmentationContext &currBlockCtx = pDefragCtx->blockContexts[blockIndex]; - VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex]; - if ((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0) { + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0) + { bufCreateInfo.size = pBlock->m_pMetadata->GetSize(); pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)( - m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer); - if (pDefragCtx->res == VK_SUCCESS) { + m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer); + if(pDefragCtx->res == VK_SUCCESS) + { pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)( - m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0); + m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0); } } } } // Go over all moves. Post data transfer commands to command buffer. - if (pDefragCtx->res == VK_SUCCESS) { - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) { - const VmaDefragmentationMove &move = moves[moveIndex]; + if(pDefragCtx->res == VK_SUCCESS) + { + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; - const VmaBlockDefragmentationContext &srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex]; - const VmaBlockDefragmentationContext &dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex]; + const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex]; + const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex]; VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer); VkBufferCopy region = { move.srcOffset, move.dstOffset, - move.size - }; + move.size }; (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)( - commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion); + commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion); } } // Save buffers to defrag context for later destruction. - if (pDefragCtx->res == VK_SUCCESS && moveCount > 0) { + if(pDefragCtx->res == VK_SUCCESS && moveCount > 0) + { pDefragCtx->res = VK_NOT_READY; } } -void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats *pDefragmentationStats) { - m_HasEmptyBlock = false; - for (size_t blockIndex = m_Blocks.size(); blockIndex--;) { - VmaDeviceMemoryBlock *pBlock = m_Blocks[blockIndex]; - if (pBlock->m_pMetadata->IsEmpty()) { - if (m_Blocks.size() > m_MinBlockCount) { - if (pDefragmentationStats != VMA_NULL) { +void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats) +{ + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if(pBlock->m_pMetadata->IsEmpty()) + { + if(m_Blocks.size() > m_MinBlockCount) + { + if(pDefragmentationStats != VMA_NULL) + { ++pDefragmentationStats->deviceMemoryBlocksFreed; pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize(); } @@ -11311,21 +12868,47 @@ void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats *pDefragmentationSt VmaVectorRemove(m_Blocks, blockIndex); pBlock->Destroy(m_hAllocator); vma_delete(m_hAllocator, pBlock); - } else { - m_HasEmptyBlock = true; } + else + { + break; + } + } + } + UpdateHasEmptyBlock(); +} + +void VmaBlockVector::UpdateHasEmptyBlock() +{ + m_HasEmptyBlock = false; + for(size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if(pBlock->m_pMetadata->IsEmpty()) + { + m_HasEmptyBlock = true; + break; } } } #if VMA_STATS_STRING_ENABLED -void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter &json) { +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); json.BeginObject(); - if (m_IsCustomPool) { + if(IsCustomPool()) + { + const char* poolName = m_hParentPool->GetName(); + if(poolName != VMA_NULL && poolName[0] != '\0') + { + json.WriteString("Name"); + json.WriteString(poolName); + } + json.WriteString("MemoryTypeIndex"); json.WriteNumber(m_MemoryTypeIndex); @@ -11334,11 +12917,13 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter &json) { json.WriteString("BlockCount"); json.BeginObject(true); - if (m_MinBlockCount > 0) { + if(m_MinBlockCount > 0) + { json.WriteString("Min"); json.WriteNumber((uint64_t)m_MinBlockCount); } - if (m_MaxBlockCount < SIZE_MAX) { + if(m_MaxBlockCount < SIZE_MAX) + { json.WriteString("Max"); json.WriteNumber((uint64_t)m_MaxBlockCount); } @@ -11346,23 +12931,28 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter &json) { json.WriteNumber((uint64_t)m_Blocks.size()); json.EndObject(); - if (m_FrameInUseCount > 0) { + if(m_FrameInUseCount > 0) + { json.WriteString("FrameInUseCount"); json.WriteNumber(m_FrameInUseCount); } - if (m_Algorithm != 0) { + if(m_Algorithm != 0) + { json.WriteString("Algorithm"); json.WriteString(VmaAlgorithmToStr(m_Algorithm)); } - } else { + } + else + { json.WriteString("PreferredBlockSize"); json.WriteNumber(m_PreferredBlockSize); } json.WriteString("Blocks"); json.BeginObject(); - for (size_t i = 0; i < m_Blocks.size(); ++i) { + for(size_t i = 0; i < m_Blocks.size(); ++i) + { json.BeginString(); json.ContinueString(m_Blocks[i]->GetId()); json.EndString(); @@ -11377,121 +12967,206 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter &json) { #endif // #if VMA_STATS_STRING_ENABLED void VmaBlockVector::Defragment( - class VmaBlockVectorDefragmentationContext *pCtx, - VmaDefragmentationStats *pStats, - VkDeviceSize &maxCpuBytesToMove, uint32_t &maxCpuAllocationsToMove, - VkDeviceSize &maxGpuBytesToMove, uint32_t &maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer) { + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer) +{ pCtx->res = VK_SUCCESS; - + const VkMemoryPropertyFlags memPropFlags = - m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags; + m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags; const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; - const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0; const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 && - isHostVisible; + isHostVisible; const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 && - !IsCorruptionDetectionEnabled() && - ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0; + !IsCorruptionDetectionEnabled() && + ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0; // There are options to defragment this memory type. - if (canDefragmentOnCpu || canDefragmentOnGpu) { + if(canDefragmentOnCpu || canDefragmentOnGpu) + { bool defragmentOnGpu; // There is only one option to defragment this memory type. - if (canDefragmentOnGpu != canDefragmentOnCpu) { + if(canDefragmentOnGpu != canDefragmentOnCpu) + { defragmentOnGpu = canDefragmentOnGpu; } // Both options are available: Heuristics to choose the best one. - else { + else + { defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 || - m_hAllocator->IsIntegratedGpu(); + m_hAllocator->IsIntegratedGpu(); } bool overlappingMoveSupported = !defragmentOnGpu; - if (m_hAllocator->m_UseMutex) { - m_Mutex.LockWrite(); - pCtx->mutexLocked = true; + if(m_hAllocator->m_UseMutex) + { + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(!m_Mutex.TryLockWrite()) + { + pCtx->res = VK_ERROR_INITIALIZATION_FAILED; + return; + } + } + else + { + m_Mutex.LockWrite(); + pCtx->mutexLocked = true; + } } - pCtx->Begin(overlappingMoveSupported); + pCtx->Begin(overlappingMoveSupported, flags); // Defragment. const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove; const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove; - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves = - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks())); - pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove); + pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags); // Accumulate statistics. - if (pStats != VMA_NULL) { + if(pStats != VMA_NULL) + { const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved(); const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved(); pStats->bytesMoved += bytesMoved; pStats->allocationsMoved += allocationsMoved; VMA_ASSERT(bytesMoved <= maxBytesToMove); VMA_ASSERT(allocationsMoved <= maxAllocationsToMove); - if (defragmentOnGpu) { + if(defragmentOnGpu) + { maxGpuBytesToMove -= bytesMoved; maxGpuAllocationsToMove -= allocationsMoved; - } else { + } + else + { maxCpuBytesToMove -= bytesMoved; maxCpuAllocationsToMove -= allocationsMoved; } } - if (pCtx->res >= VK_SUCCESS) { - if (defragmentOnGpu) { - ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer); - } else { - ApplyDefragmentationMovesCpu(pCtx, moves); + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(m_hAllocator->m_UseMutex) + m_Mutex.UnlockWrite(); + + if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty()) + pCtx->res = VK_NOT_READY; + + return; + } + + if(pCtx->res >= VK_SUCCESS) + { + if(defragmentOnGpu) + { + ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer); + } + else + { + ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves); } } } } void VmaBlockVector::DefragmentationEnd( - class VmaBlockVectorDefragmentationContext *pCtx, - VmaDefragmentationStats *pStats) { + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats) +{ // Destroy buffers. - for (size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;) { - VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex]; - if (blockCtx.hBuffer) { + for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; ) + { + VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex]; + if(blockCtx.hBuffer) + { (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)( - m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks()); + m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks()); } } - if (pCtx->res >= VK_SUCCESS) { + if(pCtx->res >= VK_SUCCESS) + { FreeEmptyBlocks(pStats); } - if (pCtx->mutexLocked) { + if(pCtx->mutexLocked) + { VMA_ASSERT(m_hAllocator->m_UseMutex); m_Mutex.UnlockWrite(); } } -size_t VmaBlockVector::CalcAllocationCount() const { +uint32_t VmaBlockVector::ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves); + + for(uint32_t i = 0; i < moveCount; ++ i) + { + VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i]; + + pMove->allocation = move.hAllocation; + pMove->memory = move.pDstBlock->GetDeviceMemory(); + pMove->offset = move.dstOffset; + + ++ pMove; + } + + pCtx->defragmentationMovesProcessed += moveCount; + + return moveCount; +} + +void VmaBlockVector::CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i) + { + const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i]; + + move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset); + move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset); + } + + pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed; + FreeEmptyBlocks(pStats); +} + +size_t VmaBlockVector::CalcAllocationCount() const +{ size_t result = 0; - for (size_t i = 0; i < m_Blocks.size(); ++i) { + for(size_t i = 0; i < m_Blocks.size(); ++i) + { result += m_Blocks[i]->m_pMetadata->GetAllocationCount(); } return result; } -bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const { - if (m_BufferImageGranularity == 1) { +bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const +{ + if(m_BufferImageGranularity == 1) + { return false; } VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE; - for (size_t i = 0, count = m_Blocks.size(); i < count; ++i) { - VmaDeviceMemoryBlock *const pBlock = m_Blocks[i]; + for(size_t i = 0, count = m_Blocks.size(); i < count; ++i) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[i]; VMA_ASSERT(m_Algorithm == 0); - VmaBlockMetadata_Generic *const pMetadata = (VmaBlockMetadata_Generic *)pBlock->m_pMetadata; - if (pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType)) { + VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata; + if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType)) + { return true; } } @@ -11499,45 +13174,54 @@ bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const { } void VmaBlockVector::MakePoolAllocationsLost( - uint32_t currentFrameIndex, - size_t *pLostAllocationCount) { + uint32_t currentFrameIndex, + size_t* pLostAllocationCount) +{ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); size_t lostAllocationCount = 0; - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex]; + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount); } - if (pLostAllocationCount != VMA_NULL) { + if(pLostAllocationCount != VMA_NULL) + { *pLostAllocationCount = lostAllocationCount; } } -VkResult VmaBlockVector::CheckCorruption() { - if (!IsCorruptionDetectionEnabled()) { +VkResult VmaBlockVector::CheckCorruption() +{ + if(!IsCorruptionDetectionEnabled()) + { return VK_ERROR_FEATURE_NOT_PRESENT; } VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex]; + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VkResult res = pBlock->CheckCorruption(m_hAllocator); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } } return VK_SUCCESS; } -void VmaBlockVector::AddStats(VmaStats *pStats) { +void VmaBlockVector::AddStats(VmaStats* pStats) +{ const uint32_t memTypeIndex = m_MemoryTypeIndex; const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex); VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - const VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex]; + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); VmaStatInfo allocationStatInfo; @@ -11552,20 +13236,22 @@ void VmaBlockVector::AddStats(VmaStats *pStats) { // VmaDefragmentationAlgorithm_Generic members definition VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic( - VmaAllocator hAllocator, - VmaBlockVector *pBlockVector, - uint32_t currentFrameIndex, - bool overlappingMoveSupported) : - VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), - m_AllocationCount(0), - m_AllAllocations(false), - m_BytesMoved(0), - m_AllocationsMoved(0), - m_Blocks(VmaStlAllocator<BlockInfo *>(hAllocator->GetAllocationCallbacks())) { + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks())) +{ // Create block info for each block. const size_t blockCount = m_pBlockVector->m_Blocks.size(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { - BlockInfo *pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks()); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks()); pBlockInfo->m_OriginalBlockIndex = blockIndex; pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex]; m_Blocks.push_back(pBlockInfo); @@ -11575,21 +13261,28 @@ VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic( VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess()); } -VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic() { - for (size_t i = m_Blocks.size(); i--;) { +VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic() +{ + for(size_t i = m_Blocks.size(); i--; ) + { vma_delete(m_hAllocator, m_Blocks[i]); } } -void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) { +void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost. - if (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST) { - VmaDeviceMemoryBlock *pBlock = hAlloc->GetBlock(); + if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST) + { + VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock(); BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess()); - if (it != m_Blocks.end() && (*it)->m_pBlock == pBlock) { + if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock) + { AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged); (*it)->m_Allocations.push_back(allocInfo); - } else { + } + else + { VMA_ASSERT(0); } @@ -11598,10 +13291,13 @@ void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, Vk } VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove) { - if (m_Blocks.empty()) { + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations) +{ + if(m_Blocks.empty()) + { return VK_SUCCESS; } @@ -11616,38 +13312,46 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( size_t srcBlockMinIndex = 0; // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations. /* - if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT) - { - const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount(); - if(blocksWithNonMovableCount > 0) + if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT) { - srcBlockMinIndex = blocksWithNonMovableCount - 1; + const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount(); + if(blocksWithNonMovableCount > 0) + { + srcBlockMinIndex = blocksWithNonMovableCount - 1; + } } - } - */ + */ size_t srcBlockIndex = m_Blocks.size() - 1; size_t srcAllocIndex = SIZE_MAX; - for (;;) { + for(;;) + { // 1. Find next allocation to move. // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source". // 1.2. Then start from last to first m_Allocations. - while (srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size()) { - if (m_Blocks[srcBlockIndex]->m_Allocations.empty()) { + while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size()) + { + if(m_Blocks[srcBlockIndex]->m_Allocations.empty()) + { // Finished: no more allocations to process. - if (srcBlockIndex == srcBlockMinIndex) { + if(srcBlockIndex == srcBlockMinIndex) + { return VK_SUCCESS; - } else { + } + else + { --srcBlockIndex; srcAllocIndex = SIZE_MAX; } - } else { + } + else + { srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1; } } - - BlockInfo *pSrcBlockInfo = m_Blocks[srcBlockIndex]; - AllocationInfo &allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex]; + + BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex]; + AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex]; const VkDeviceSize size = allocInfo.m_hAllocation->GetSize(); const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset(); @@ -11655,48 +13359,59 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType(); // 2. Try to find new place for this allocation in preceding or current block. - for (size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) { - BlockInfo *pDstBlockInfo = m_Blocks[dstBlockIndex]; + for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) + { + BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex]; VmaAllocationRequest dstAllocRequest; - if (pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest( - m_CurrentFrameIndex, - m_pBlockVector->GetFrameInUseCount(), - m_pBlockVector->GetBufferImageGranularity(), - size, - alignment, - false, // upperAddress - suballocType, - false, // canMakeOtherLost - strategy, - &dstAllocRequest) && - MoveMakesSense( - dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset)) { + if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest( + m_CurrentFrameIndex, + m_pBlockVector->GetFrameInUseCount(), + m_pBlockVector->GetBufferImageGranularity(), + size, + alignment, + false, // upperAddress + suballocType, + false, // canMakeOtherLost + strategy, + &dstAllocRequest) && + MoveMakesSense( + dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset)) + { VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0); // Reached limit on number of allocations or bytes to move. - if ((m_AllocationsMoved + 1 > maxAllocationsToMove) || - (m_BytesMoved + size > maxBytesToMove)) { + if((m_AllocationsMoved + 1 > maxAllocationsToMove) || + (m_BytesMoved + size > maxBytesToMove)) + { return VK_SUCCESS; } - VmaDefragmentationMove move; + VmaDefragmentationMove move = {}; move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex; move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex; move.srcOffset = srcOffset; move.dstOffset = dstAllocRequest.offset; move.size = size; + move.hAllocation = allocInfo.m_hAllocation; + move.pSrcBlock = pSrcBlockInfo->m_pBlock; + move.pDstBlock = pDstBlockInfo->m_pBlock; + moves.push_back(move); pDstBlockInfo->m_pBlock->m_pMetadata->Alloc( - dstAllocRequest, - suballocType, - size, - allocInfo.m_hAllocation); - pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset); - - allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset); + dstAllocRequest, + suballocType, + size, + allocInfo.m_hAllocation); - if (allocInfo.m_pChanged != VMA_NULL) { + if(freeOldAllocations) + { + pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset); + allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset); + } + + if(allocInfo.m_pChanged != VMA_NULL) + { *allocInfo.m_pChanged = VK_TRUE; } @@ -11711,23 +13426,32 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round. - if (srcAllocIndex > 0) { + if(srcAllocIndex > 0) + { --srcAllocIndex; - } else { - if (srcBlockIndex > 0) { + } + else + { + if(srcBlockIndex > 0) + { --srcBlockIndex; srcAllocIndex = SIZE_MAX; - } else { + } + else + { return VK_SUCCESS; } } } } -size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const { +size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const +{ size_t result = 0; - for (size_t i = 0; i < m_Blocks.size(); ++i) { - if (m_Blocks[i]->m_HasNonMovableAllocations) { + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i]->m_HasNonMovableAllocations) + { ++result; } } @@ -11735,23 +13459,30 @@ size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() cons } VkResult VmaDefragmentationAlgorithm_Generic::Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove) { - if (!m_AllAllocations && m_AllocationCount == 0) { + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ + if(!m_AllAllocations && m_AllocationCount == 0) + { return VK_SUCCESS; } const size_t blockCount = m_Blocks.size(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { - BlockInfo *pBlockInfo = m_Blocks[blockIndex]; - - if (m_AllAllocations) { - VmaBlockMetadata_Generic *pMetadata = (VmaBlockMetadata_Generic *)pBlockInfo->m_pBlock->m_pMetadata; - for (VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin(); - it != pMetadata->m_Suballocations.end(); - ++it) { - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) { + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = m_Blocks[blockIndex]; + + if(m_AllAllocations) + { + VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata; + for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL); pBlockInfo->m_Allocations.push_back(allocInfo); } @@ -11759,7 +13490,7 @@ VkResult VmaDefragmentationAlgorithm_Generic::Defragment( } pBlockInfo->CalcHasNonMovableAllocations(); - + // This is a choice based on research. // Option 1: pBlockInfo->SortAllocationsByOffsetDescending(); @@ -11775,8 +13506,9 @@ VkResult VmaDefragmentationAlgorithm_Generic::Defragment( // Execute defragmentation rounds (the main part). VkResult result = VK_SUCCESS; - for (uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round) { - result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove); + for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round) + { + result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)); } return result; @@ -11784,14 +13516,18 @@ VkResult VmaDefragmentationAlgorithm_Generic::Defragment( bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense( size_t dstBlockIndex, VkDeviceSize dstOffset, - size_t srcBlockIndex, VkDeviceSize srcOffset) { - if (dstBlockIndex < srcBlockIndex) { + size_t srcBlockIndex, VkDeviceSize srcOffset) +{ + if(dstBlockIndex < srcBlockIndex) + { return true; } - if (dstBlockIndex > srcBlockIndex) { + if(dstBlockIndex > srcBlockIndex) + { return false; } - if (dstOffset < srcOffset) { + if(dstOffset < srcOffset) + { return true; } return false; @@ -11801,31 +13537,37 @@ bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense( // VmaDefragmentationAlgorithm_Fast VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast( - VmaAllocator hAllocator, - VmaBlockVector *pBlockVector, - uint32_t currentFrameIndex, - bool overlappingMoveSupported) : - VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), - m_OverlappingMoveSupported(overlappingMoveSupported), - m_AllocationCount(0), - m_AllAllocations(false), - m_BytesMoved(0), - m_AllocationsMoved(0), - m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks())) { + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_OverlappingMoveSupported(overlappingMoveSupported), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks())) +{ VMA_ASSERT(VMA_DEBUG_MARGIN == 0); + } -VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast() { +VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast() +{ } VkResult VmaDefragmentationAlgorithm_Fast::Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > &moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove) { + VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount); const size_t blockCount = m_pBlockVector->GetBlockCount(); - if (blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0) { + if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0) + { return VK_SUCCESS; } @@ -11834,13 +13576,14 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( // Sort blocks in order from most destination. m_BlockInfos.resize(blockCount); - for (size_t i = 0; i < blockCount; ++i) { + for(size_t i = 0; i < blockCount; ++i) + { m_BlockInfos[i].origBlockIndex = i; } - VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo &lhs, const BlockInfo &rhs) -> bool { + VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool { return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() < - m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize(); + m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize(); }); // THE MAIN ALGORITHM @@ -11849,39 +13592,45 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( size_t dstBlockInfoIndex = 0; size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; - VmaDeviceMemoryBlock *pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); - VmaBlockMetadata_Generic *pDstMetadata = (VmaBlockMetadata_Generic *)pDstBlock->m_pMetadata; + VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); + VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; VkDeviceSize dstBlockSize = pDstMetadata->GetSize(); VkDeviceSize dstOffset = 0; bool end = false; - for (size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex) { + for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex) + { const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex; - VmaDeviceMemoryBlock *const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex); - VmaBlockMetadata_Generic *const pSrcMetadata = (VmaBlockMetadata_Generic *)pSrcBlock->m_pMetadata; - for (VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin(); - !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end();) { - VmaAllocation_T *const pAlloc = srcSuballocIt->hAllocation; + VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex); + VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata; + for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin(); + !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); ) + { + VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation; const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment(); const VkDeviceSize srcAllocSize = srcSuballocIt->size; - if (m_AllocationsMoved == maxAllocationsToMove || - m_BytesMoved + srcAllocSize > maxBytesToMove) { + if(m_AllocationsMoved == maxAllocationsToMove || + m_BytesMoved + srcAllocSize > maxBytesToMove) + { end = true; break; } const VkDeviceSize srcAllocOffset = srcSuballocIt->offset; + VmaDefragmentationMove move = {}; // Try to place it in one of free spaces from the database. size_t freeSpaceInfoIndex; VkDeviceSize dstAllocOffset; - if (freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize, - freeSpaceInfoIndex, dstAllocOffset)) { + if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize, + freeSpaceInfoIndex, dstAllocOffset)) + { size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex; - VmaDeviceMemoryBlock *pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex); - VmaBlockMetadata_Generic *pFreeSpaceMetadata = (VmaBlockMetadata_Generic *)pFreeSpaceBlock->m_pMetadata; + VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex); + VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata; // Same block - if (freeSpaceInfoIndex == srcBlockInfoIndex) { + if(freeSpaceInfoIndex == srcBlockInfoIndex) + { VMA_ASSERT(dstAllocOffset <= srcAllocOffset); // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. @@ -11891,7 +13640,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( suballoc.hAllocation->ChangeOffset(dstAllocOffset); m_BytesMoved += srcAllocSize; ++m_AllocationsMoved; - + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; ++nextSuballocIt; pSrcMetadata->m_Suballocations.erase(srcSuballocIt); @@ -11899,15 +13648,17 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( InsertSuballoc(pFreeSpaceMetadata, suballoc); - VmaDefragmentationMove move = { - srcOrigBlockIndex, freeSpaceOrigBlockIndex, - srcAllocOffset, dstAllocOffset, - srcAllocSize - }; + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + moves.push_back(move); } // Different block - else { + else + { // MOVE OPTION 2: Move the allocation to a different block. VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex); @@ -11925,68 +13676,79 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( InsertSuballoc(pFreeSpaceMetadata, suballoc); - VmaDefragmentationMove move = { - srcOrigBlockIndex, freeSpaceOrigBlockIndex, - srcAllocOffset, dstAllocOffset, - srcAllocSize - }; + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + moves.push_back(move); } - } else { + } + else + { dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment); // If the allocation doesn't fit before the end of dstBlock, forward to next block. - while (dstBlockInfoIndex < srcBlockInfoIndex && - dstAllocOffset + srcAllocSize > dstBlockSize) { + while(dstBlockInfoIndex < srcBlockInfoIndex && + dstAllocOffset + srcAllocSize > dstBlockSize) + { // But before that, register remaining free space at the end of dst block. freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset); ++dstBlockInfoIndex; dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); - pDstMetadata = (VmaBlockMetadata_Generic *)pDstBlock->m_pMetadata; + pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; dstBlockSize = pDstMetadata->GetSize(); dstOffset = 0; dstAllocOffset = 0; } // Same block - if (dstBlockInfoIndex == srcBlockInfoIndex) { + if(dstBlockInfoIndex == srcBlockInfoIndex) + { VMA_ASSERT(dstAllocOffset <= srcAllocOffset); const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset; bool skipOver = overlap; - if (overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset) { + if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset) + { // If destination and source place overlap, skip if it would move it // by only < 1/64 of its size. skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize; } - if (skipOver) { + if(skipOver) + { freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset); dstOffset = srcAllocOffset + srcAllocSize; ++srcSuballocIt; } // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. - else { + else + { srcSuballocIt->offset = dstAllocOffset; srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset); dstOffset = dstAllocOffset + srcAllocSize; m_BytesMoved += srcAllocSize; ++m_AllocationsMoved; ++srcSuballocIt; - VmaDefragmentationMove move = { - srcOrigBlockIndex, dstOrigBlockIndex, - srcAllocOffset, dstAllocOffset, - srcAllocSize - }; + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + moves.push_back(move); } } // Different block - else { + else + { // MOVE OPTION 2: Move the allocation to a different block. VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex); @@ -12006,11 +13768,12 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( pDstMetadata->m_Suballocations.push_back(suballoc); - VmaDefragmentationMove move = { - srcOrigBlockIndex, dstOrigBlockIndex, - srcAllocOffset, dstAllocOffset, - srcAllocSize - }; + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + moves.push_back(move); } } @@ -12018,76 +13781,87 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment( } m_BlockInfos.clear(); - + PostprocessMetadata(); return VK_SUCCESS; } -void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata() { +void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata() +{ const size_t blockCount = m_pBlockVector->GetBlockCount(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { - VmaBlockMetadata_Generic *const pMetadata = - (VmaBlockMetadata_Generic *)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; pMetadata->m_FreeCount = 0; pMetadata->m_SumFreeSize = pMetadata->GetSize(); pMetadata->m_FreeSuballocationsBySize.clear(); - for (VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); - it != pMetadata->m_Suballocations.end();) { - if (it->type == VMA_SUBALLOCATION_TYPE_FREE) { + for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); ) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE) + { VmaSuballocationList::iterator nextIt = it; ++nextIt; pMetadata->m_Suballocations.erase(it); it = nextIt; - } else { + } + else + { ++it; } } } } -void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() { +void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() +{ const size_t blockCount = m_pBlockVector->GetBlockCount(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { - VmaBlockMetadata_Generic *const pMetadata = - (VmaBlockMetadata_Generic *)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; const VkDeviceSize blockSize = pMetadata->GetSize(); - + // No allocations in this block - entire area is free. - if (pMetadata->m_Suballocations.empty()) { + if(pMetadata->m_Suballocations.empty()) + { pMetadata->m_FreeCount = 1; //pMetadata->m_SumFreeSize is already set to blockSize. VmaSuballocation suballoc = { 0, // offset blockSize, // size VMA_NULL, // hAllocation - VMA_SUBALLOCATION_TYPE_FREE - }; + VMA_SUBALLOCATION_TYPE_FREE }; pMetadata->m_Suballocations.push_back(suballoc); pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin()); } // There are some allocations in this block. - else { + else + { VkDeviceSize offset = 0; VmaSuballocationList::iterator it; - for (it = pMetadata->m_Suballocations.begin(); - it != pMetadata->m_Suballocations.end(); - ++it) { + for(it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(it->offset >= offset); // Need to insert preceding free space. - if (it->offset > offset) { + if(it->offset > offset) + { ++pMetadata->m_FreeCount; const VkDeviceSize freeSize = it->offset - offset; VmaSuballocation suballoc = { offset, // offset freeSize, // size VMA_NULL, // hAllocation - VMA_SUBALLOCATION_TYPE_FREE - }; + VMA_SUBALLOCATION_TYPE_FREE }; VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); - if (freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) { + if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt); } } @@ -12097,37 +13871,41 @@ void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() { } // Need to insert trailing free space. - if (offset < blockSize) { + if(offset < blockSize) + { ++pMetadata->m_FreeCount; const VkDeviceSize freeSize = blockSize - offset; VmaSuballocation suballoc = { offset, // offset freeSize, // size VMA_NULL, // hAllocation - VMA_SUBALLOCATION_TYPE_FREE - }; + VMA_SUBALLOCATION_TYPE_FREE }; VMA_ASSERT(it == pMetadata->m_Suballocations.end()); VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); - if (freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) { + if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt); } } VMA_SORT( - pMetadata->m_FreeSuballocationsBySize.begin(), - pMetadata->m_FreeSuballocationsBySize.end(), - VmaSuballocationItemSizeLess()); + pMetadata->m_FreeSuballocationsBySize.begin(), + pMetadata->m_FreeSuballocationsBySize.end(), + VmaSuballocationItemSizeLess()); } VMA_HEAVY_ASSERT(pMetadata->Validate()); } } -void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic *pMetadata, const VmaSuballocation &suballoc) { +void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc) +{ // TODO: Optimize somehow. Remember iterator instead of searching for it linearly. VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); - while (it != pMetadata->m_Suballocations.end()) { - if (it->offset < suballoc.offset) { + while(it != pMetadata->m_Suballocations.end()) + { + if(it->offset < suballoc.offset) + { ++it; } } @@ -12138,61 +13916,76 @@ void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic * // VmaBlockVectorDefragmentationContext VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext( - VmaAllocator hAllocator, - VmaPool hCustomPool, - VmaBlockVector *pBlockVector, - uint32_t currFrameIndex, - uint32_t algorithmFlags) : - res(VK_SUCCESS), - mutexLocked(false), - blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())), - m_hAllocator(hAllocator), - m_hCustomPool(hCustomPool), - m_pBlockVector(pBlockVector), - m_CurrFrameIndex(currFrameIndex), - m_AlgorithmFlags(algorithmFlags), - m_pAlgorithm(VMA_NULL), - m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())), - m_AllAllocations(false) { + VmaAllocator hAllocator, + VmaPool hCustomPool, + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex) : + res(VK_SUCCESS), + mutexLocked(false), + blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())), + defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())), + defragmentationMovesProcessed(0), + defragmentationMovesCommitted(0), + hasDefragmentationPlan(0), + m_hAllocator(hAllocator), + m_hCustomPool(hCustomPool), + m_pBlockVector(pBlockVector), + m_CurrFrameIndex(currFrameIndex), + m_pAlgorithm(VMA_NULL), + m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())), + m_AllAllocations(false) +{ } -VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() { +VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() +{ vma_delete(m_hAllocator, m_pAlgorithm); } -void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32 *pChanged) { +void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ AllocInfo info = { hAlloc, pChanged }; m_Allocations.push_back(info); } -void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported) { +void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags) +{ const bool allAllocations = m_AllAllocations || - m_Allocations.size() == m_pBlockVector->CalcAllocationCount(); + m_Allocations.size() == m_pBlockVector->CalcAllocationCount(); /******************************** - HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM. - ********************************/ + HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM. + ********************************/ /* - Fast algorithm is supported only when certain criteria are met: - - VMA_DEBUG_MARGIN is 0. - - All allocations in this block vector are moveable. - - There is no possibility of image/buffer granularity conflict. - */ - if (VMA_DEBUG_MARGIN == 0 && - allAllocations && - !m_pBlockVector->IsBufferImageGranularityConflictPossible()) { + Fast algorithm is supported only when certain criteria are met: + - VMA_DEBUG_MARGIN is 0. + - All allocations in this block vector are moveable. + - There is no possibility of image/buffer granularity conflict. + - The defragmentation is not incremental + */ + if(VMA_DEBUG_MARGIN == 0 && + allAllocations && + !m_pBlockVector->IsBufferImageGranularityConflictPossible() && + !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)) + { m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)( - m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); - } else { + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + } + else + { m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)( - m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); } - if (allAllocations) { + if(allAllocations) + { m_pAlgorithm->AddAll(); - } else { - for (size_t i = 0, count = m_Allocations.size(); i < count; ++i) { + } + else + { + for(size_t i = 0, count = m_Allocations.size(); i < count; ++i) + { m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged); } } @@ -12202,55 +13995,65 @@ void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported) // VmaDefragmentationContext VmaDefragmentationContext_T::VmaDefragmentationContext_T( - VmaAllocator hAllocator, - uint32_t currFrameIndex, - uint32_t flags, - VmaDefragmentationStats *pStats) : - m_hAllocator(hAllocator), - m_CurrFrameIndex(currFrameIndex), - m_Flags(flags), - m_pStats(pStats), - m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext *>(hAllocator->GetAllocationCallbacks())) { + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats) : + m_hAllocator(hAllocator), + m_CurrFrameIndex(currFrameIndex), + m_Flags(flags), + m_pStats(pStats), + m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks())) +{ memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts)); } -VmaDefragmentationContext_T::~VmaDefragmentationContext_T() { - for (size_t i = m_CustomPoolContexts.size(); i--;) { - VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[i]; +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i]; pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); vma_delete(m_hAllocator, pBlockVectorCtx); } - for (size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--;) { - VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[i]; - if (pBlockVectorCtx) { + for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i]; + if(pBlockVectorCtx) + { pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); vma_delete(m_hAllocator, pBlockVectorCtx); } } } -void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool *pPools) { - for (uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) { +void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools) +{ + for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { VmaPool pool = pPools[poolIndex]; VMA_ASSERT(pool); // Pools with algorithm other than default are not defragmented. - if (pool->m_BlockVector.GetAlgorithm() == 0) { - VmaBlockVectorDefragmentationContext *pBlockVectorDefragCtx = VMA_NULL; - - for (size_t i = m_CustomPoolContexts.size(); i--;) { - if (m_CustomPoolContexts[i]->GetCustomPool() == pool) { + if(pool->m_BlockVector.GetAlgorithm() == 0) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == pool) + { pBlockVectorDefragCtx = m_CustomPoolContexts[i]; break; } } - - if (!pBlockVectorDefragCtx) { + + if(!pBlockVectorDefragCtx) + { pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( - m_hAllocator, - pool, - &pool->m_BlockVector, - m_CurrFrameIndex, - m_Flags); + m_hAllocator, + pool, + &pool->m_BlockVector, + m_CurrFrameIndex); m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); } @@ -12260,60 +14063,68 @@ void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool *pPools) } void VmaDefragmentationContext_T::AddAllocations( - uint32_t allocationCount, - VmaAllocation *pAllocations, - VkBool32 *pAllocationsChanged) { + uint32_t allocationCount, + VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged) +{ // Dispatch pAllocations among defragmentators. Create them when necessary. - for (uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { const VmaAllocation hAlloc = pAllocations[allocIndex]; VMA_ASSERT(hAlloc); // DedicatedAlloc cannot be defragmented. - if ((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) && - // Lost allocation cannot be defragmented. - (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)) { - VmaBlockVectorDefragmentationContext *pBlockVectorDefragCtx = VMA_NULL; + if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) && + // Lost allocation cannot be defragmented. + (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool(); // This allocation belongs to custom pool. - if (hAllocPool != VK_NULL_HANDLE) { + if(hAllocPool != VK_NULL_HANDLE) + { // Pools with algorithm other than default are not defragmented. - if (hAllocPool->m_BlockVector.GetAlgorithm() == 0) { - for (size_t i = m_CustomPoolContexts.size(); i--;) { - if (m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool) { + if(hAllocPool->m_BlockVector.GetAlgorithm() == 0) + { + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool) + { pBlockVectorDefragCtx = m_CustomPoolContexts[i]; break; } } - if (!pBlockVectorDefragCtx) { + if(!pBlockVectorDefragCtx) + { pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( - m_hAllocator, - hAllocPool, - &hAllocPool->m_BlockVector, - m_CurrFrameIndex, - m_Flags); + m_hAllocator, + hAllocPool, + &hAllocPool->m_BlockVector, + m_CurrFrameIndex); m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); } } } // This allocation belongs to default pool. - else { + else + { const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex]; - if (!pBlockVectorDefragCtx) { + if(!pBlockVectorDefragCtx) + { pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( - m_hAllocator, - VMA_NULL, // hCustomPool - m_hAllocator->m_pBlockVectors[memTypeIndex], - m_CurrFrameIndex, - m_Flags); + m_hAllocator, + VMA_NULL, // hCustomPool + m_hAllocator->m_pBlockVectors[memTypeIndex], + m_CurrFrameIndex); m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx; } } - if (pBlockVectorDefragCtx) { - VkBool32 *const pChanged = (pAllocationsChanged != VMA_NULL) ? - &pAllocationsChanged[allocIndex] : - VMA_NULL; + if(pBlockVectorDefragCtx) + { + VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ? + &pAllocationsChanged[allocIndex] : VMA_NULL; pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged); } } @@ -12321,14 +14132,34 @@ void VmaDefragmentationContext_T::AddAllocations( } VkResult VmaDefragmentationContext_T::Defragment( - VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, - VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer, VmaDefragmentationStats *pStats) { - if (pStats) { + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags) +{ + if(pStats) + { memset(pStats, 0, sizeof(VmaDefragmentationStats)); } - if (commandBuffer == VK_NULL_HANDLE) { + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + // For incremental defragmetnations, we just earmark how much we can move + // The real meat is in the defragmentation steps + m_MaxCpuBytesToMove = maxCpuBytesToMove; + m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove; + + m_MaxGpuBytesToMove = maxGpuBytesToMove; + m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove; + + if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 && + m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0) + return VK_SUCCESS; + + return VK_NOT_READY; + } + + if(commandBuffer == VK_NULL_HANDLE) + { maxGpuBytesToMove = 0; maxGpuAllocationsToMove = 0; } @@ -12336,37 +14167,42 @@ VkResult VmaDefragmentationContext_T::Defragment( VkResult res = VK_SUCCESS; // Process default pools. - for (uint32_t memTypeIndex = 0; - memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS; - ++memTypeIndex) { - VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; - if (pBlockVectorCtx) { + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS; + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); pBlockVectorCtx->GetBlockVector()->Defragment( - pBlockVectorCtx, - pStats, - maxCpuBytesToMove, maxCpuAllocationsToMove, - maxGpuBytesToMove, maxGpuAllocationsToMove, - commandBuffer); - if (pBlockVectorCtx->res != VK_SUCCESS) { + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { res = pBlockVectorCtx->res; } } } // Process custom pools. - for (size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); - customCtxIndex < customCtxCount && res >= VK_SUCCESS; - ++customCtxIndex) { - VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount && res >= VK_SUCCESS; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); pBlockVectorCtx->GetBlockVector()->Defragment( - pBlockVectorCtx, - pStats, - maxCpuBytesToMove, maxCpuAllocationsToMove, - maxGpuBytesToMove, maxGpuAllocationsToMove, - commandBuffer); - if (pBlockVectorCtx->res != VK_SUCCESS) { + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { res = pBlockVectorCtx->res; } } @@ -12374,46 +14210,178 @@ VkResult VmaDefragmentationContext_T::Defragment( return res; } +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo) +{ + VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves; + uint32_t movesLeft = pInfo->moveCount; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + + pInfo->moveCount = pInfo->moveCount - movesLeft; + + return VK_SUCCESS; +} +VkResult VmaDefragmentationContext_T::DefragmentPassEnd() +{ + VkResult res = VK_SUCCESS; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + + return res; +} + //////////////////////////////////////////////////////////////////////////////// // VmaRecorder #if VMA_RECORDING_ENABLED VmaRecorder::VmaRecorder() : - m_UseMutex(true), - m_Flags(0), - m_File(VMA_NULL), - m_Freq(INT64_MAX), - m_StartCounter(INT64_MAX) { + m_UseMutex(true), + m_Flags(0), + m_File(VMA_NULL), + m_Freq(INT64_MAX), + m_StartCounter(INT64_MAX) +{ } -VkResult VmaRecorder::Init(const VmaRecordSettings &settings, bool useMutex) { +VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex) +{ m_UseMutex = useMutex; m_Flags = settings.flags; - QueryPerformanceFrequency((LARGE_INTEGER *)&m_Freq); - QueryPerformanceCounter((LARGE_INTEGER *)&m_StartCounter); + QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq); + QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter); // Open file for writing. errno_t err = fopen_s(&m_File, settings.pFilePath, "wb"); - if (err != 0) { + if(err != 0) + { return VK_ERROR_INITIALIZATION_FAILED; } // Write header. fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording"); - fprintf(m_File, "%s\n", "1,5"); + fprintf(m_File, "%s\n", "1,8"); return VK_SUCCESS; } -VmaRecorder::~VmaRecorder() { - if (m_File != VMA_NULL) { +VmaRecorder::~VmaRecorder() +{ + if(m_File != VMA_NULL) + { fclose(m_File); } } -void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex) { +void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex) +{ CallParams callParams; GetBasicParams(callParams); @@ -12422,7 +14390,8 @@ void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex) { Flush(); } -void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex) { +void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex) +{ CallParams callParams; GetBasicParams(callParams); @@ -12431,151 +14400,159 @@ void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex) { Flush(); } -void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo &createInfo, VmaPool pool) { +void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex, - createInfo.memoryTypeIndex, - createInfo.flags, - createInfo.blockSize, - (uint64_t)createInfo.minBlockCount, - (uint64_t)createInfo.maxBlockCount, - createInfo.frameInUseCount, - pool); + createInfo.memoryTypeIndex, + createInfo.flags, + createInfo.blockSize, + (uint64_t)createInfo.minBlockCount, + (uint64_t)createInfo.maxBlockCount, + createInfo.frameInUseCount, + pool); Flush(); } -void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool) { +void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex, - pool); + pool); Flush(); } void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - const VmaAllocationCreateInfo &createInfo, - VmaAllocation allocation) { + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); UserDataString userDataStr(createInfo.flags, createInfo.pUserData); fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, - vkMemReq.size, - vkMemReq.alignment, - vkMemReq.memoryTypeBits, - createInfo.flags, - createInfo.usage, - createInfo.requiredFlags, - createInfo.preferredFlags, - createInfo.memoryTypeBits, - createInfo.pool, - allocation, - userDataStr.GetString()); + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); Flush(); } void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - const VmaAllocationCreateInfo &createInfo, - uint64_t allocationCount, - const VmaAllocation *pAllocations) { + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); UserDataString userDataStr(createInfo.flags, createInfo.pUserData); fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex, - vkMemReq.size, - vkMemReq.alignment, - vkMemReq.memoryTypeBits, - createInfo.flags, - createInfo.usage, - createInfo.requiredFlags, - createInfo.preferredFlags, - createInfo.memoryTypeBits, - createInfo.pool); + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool); PrintPointerList(allocationCount, pAllocations); fprintf(m_File, ",%s\n", userDataStr.GetString()); Flush(); } void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - const VmaAllocationCreateInfo &createInfo, - VmaAllocation allocation) { + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); UserDataString userDataStr(createInfo.flags, createInfo.pUserData); fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, - vkMemReq.size, - vkMemReq.alignment, - vkMemReq.memoryTypeBits, - requiresDedicatedAllocation ? 1 : 0, - prefersDedicatedAllocation ? 1 : 0, - createInfo.flags, - createInfo.usage, - createInfo.requiredFlags, - createInfo.preferredFlags, - createInfo.memoryTypeBits, - createInfo.pool, - allocation, - userDataStr.GetString()); + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); Flush(); } void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex, - const VkMemoryRequirements &vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - const VmaAllocationCreateInfo &createInfo, - VmaAllocation allocation) { + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); UserDataString userDataStr(createInfo.flags, createInfo.pUserData); fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, - vkMemReq.size, - vkMemReq.alignment, - vkMemReq.memoryTypeBits, - requiresDedicatedAllocation ? 1 : 0, - prefersDedicatedAllocation ? 1 : 0, - createInfo.flags, - createInfo.usage, - createInfo.requiredFlags, - createInfo.preferredFlags, - createInfo.memoryTypeBits, - createInfo.pool, - allocation, - userDataStr.GetString()); + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); Flush(); } void VmaRecorder::RecordFreeMemory(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex, - uint64_t allocationCount, - const VmaAllocation *pAllocations) { + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ CallParams callParams; GetBasicParams(callParams); @@ -12586,260 +14563,289 @@ void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex, Flush(); } -void VmaRecorder::RecordResizeAllocation( - uint32_t frameIndex, - VmaAllocation allocation, - VkDeviceSize newSize) { - CallParams callParams; - GetBasicParams(callParams); - - VmaMutexLock lock(m_FileMutex, m_UseMutex); - fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex, - allocation, newSize); - Flush(); -} - void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex, - VmaAllocation allocation, - const void *pUserData) { + VmaAllocation allocation, + const void* pUserData) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); UserDataString userDataStr( - allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0, - pUserData); + allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0, + pUserData); fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex, - allocation, - userDataStr.GetString()); + allocation, + userDataStr.GetString()); Flush(); } void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordMapMemory(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex, - VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) { + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, - allocation, - offset, - size); + allocation, + offset, + size); Flush(); } void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex, - VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) { + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, - allocation, - offset, - size); + allocation, + offset, + size); Flush(); } void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex, - const VkBufferCreateInfo &bufCreateInfo, - const VmaAllocationCreateInfo &allocCreateInfo, - VmaAllocation allocation) { + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, - bufCreateInfo.flags, - bufCreateInfo.size, - bufCreateInfo.usage, - bufCreateInfo.sharingMode, - allocCreateInfo.flags, - allocCreateInfo.usage, - allocCreateInfo.requiredFlags, - allocCreateInfo.preferredFlags, - allocCreateInfo.memoryTypeBits, - allocCreateInfo.pool, - allocation, - userDataStr.GetString()); + bufCreateInfo.flags, + bufCreateInfo.size, + bufCreateInfo.usage, + bufCreateInfo.sharingMode, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); Flush(); } void VmaRecorder::RecordCreateImage(uint32_t frameIndex, - const VkImageCreateInfo &imageCreateInfo, - const VmaAllocationCreateInfo &allocCreateInfo, - VmaAllocation allocation) { + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, - imageCreateInfo.flags, - imageCreateInfo.imageType, - imageCreateInfo.format, - imageCreateInfo.extent.width, - imageCreateInfo.extent.height, - imageCreateInfo.extent.depth, - imageCreateInfo.mipLevels, - imageCreateInfo.arrayLayers, - imageCreateInfo.samples, - imageCreateInfo.tiling, - imageCreateInfo.usage, - imageCreateInfo.sharingMode, - imageCreateInfo.initialLayout, - allocCreateInfo.flags, - allocCreateInfo.usage, - allocCreateInfo.requiredFlags, - allocCreateInfo.preferredFlags, - allocCreateInfo.memoryTypeBits, - allocCreateInfo.pool, - allocation, - userDataStr.GetString()); + imageCreateInfo.flags, + imageCreateInfo.imageType, + imageCreateInfo.format, + imageCreateInfo.extent.width, + imageCreateInfo.extent.height, + imageCreateInfo.extent.depth, + imageCreateInfo.mipLevels, + imageCreateInfo.arrayLayers, + imageCreateInfo.samples, + imageCreateInfo.tiling, + imageCreateInfo.usage, + imageCreateInfo.sharingMode, + imageCreateInfo.initialLayout, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); Flush(); } void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordDestroyImage(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex, - VmaAllocation allocation) { + VmaAllocation allocation) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex, - allocation); + allocation); Flush(); } void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex, - VmaPool pool) { + VmaPool pool) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex, - pool); + pool); Flush(); } void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex, - const VmaDefragmentationInfo2 &info, - VmaDefragmentationContext ctx) { + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex, - info.flags); + info.flags); PrintPointerList(info.allocationCount, info.pAllocations); fprintf(m_File, ","); PrintPointerList(info.poolCount, info.pPools); fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n", - info.maxCpuBytesToMove, - info.maxCpuAllocationsToMove, - info.maxGpuBytesToMove, - info.maxGpuAllocationsToMove, - info.commandBuffer, - ctx); + info.maxCpuBytesToMove, + info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, + info.maxGpuAllocationsToMove, + info.commandBuffer, + ctx); Flush(); } void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex, - VmaDefragmentationContext ctx) { + VmaDefragmentationContext ctx) +{ CallParams callParams; GetBasicParams(callParams); VmaMutexLock lock(m_FileMutex, m_UseMutex); fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex, - ctx); + ctx); + Flush(); +} + +void VmaRecorder::RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + pool, name != VMA_NULL ? name : ""); Flush(); } -VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void *pUserData) { - if (pUserData != VMA_NULL) { - if ((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0) { - m_Str = (const char *)pUserData; - } else { +VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData) +{ + if(pUserData != VMA_NULL) + { + if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0) + { + m_Str = (const char*)pUserData; + } + else + { sprintf_s(m_PtrStr, "%p", pUserData); m_Str = m_PtrStr; } - } else { + } + else + { m_Str = ""; } } void VmaRecorder::WriteConfiguration( - const VkPhysicalDeviceProperties &devProps, - const VkPhysicalDeviceMemoryProperties &memProps, - bool dedicatedAllocationExtensionEnabled) { + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled, + bool deviceCoherentMemoryExtensionEnabled) +{ fprintf(m_File, "Config,Begin\n"); + fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion)); + fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion); fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion); fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID); @@ -12852,17 +14858,22 @@ void VmaRecorder::WriteConfiguration( fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize); fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount); - for (uint32_t i = 0; i < memProps.memoryHeapCount; ++i) { + for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i) + { fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size); fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags); } fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount); - for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) { + for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i) + { fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex); fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags); } fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0); fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0); fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT); @@ -12877,7 +14888,8 @@ void VmaRecorder::WriteConfiguration( fprintf(m_File, "Config,End\n"); } -void VmaRecorder::GetBasicParams(CallParams &outParams) { +void VmaRecorder::GetBasicParams(CallParams& outParams) +{ outParams.threadId = GetCurrentThreadId(); LARGE_INTEGER counter; @@ -12885,17 +14897,22 @@ void VmaRecorder::GetBasicParams(CallParams &outParams) { outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq; } -void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation *pItems) { - if (count) { +void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems) +{ + if(count) + { fprintf(m_File, "%p", pItems[0]); - for (uint64_t i = 1; i < count; ++i) { + for(uint64_t i = 1; i < count; ++i) + { fprintf(m_File, " %p", pItems[i]); } } } -void VmaRecorder::Flush() { - if ((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0) { +void VmaRecorder::Flush() +{ + if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0) + { fflush(m_File); } } @@ -12905,16 +14922,19 @@ void VmaRecorder::Flush() { //////////////////////////////////////////////////////////////////////////////// // VmaAllocationObjectAllocator -VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks *pAllocationCallbacks) : - m_Allocator(pAllocationCallbacks, 1024) { +VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) : + m_Allocator(pAllocationCallbacks, 1024) +{ } -VmaAllocation VmaAllocationObjectAllocator::Allocate() { +template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args) +{ VmaMutexLock mutexLock(m_Mutex); - return m_Allocator.Alloc(); + return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...); } -void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) { +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ VmaMutexLock mutexLock(m_Mutex); m_Allocator.Free(hAlloc); } @@ -12922,51 +14942,83 @@ void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) { //////////////////////////////////////////////////////////////////////////////// // VmaAllocator_T -VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo *pCreateInfo) : - m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), - m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), - m_hDevice(pCreateInfo->device), - m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), - m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? - *pCreateInfo->pAllocationCallbacks : - VmaEmptyAllocationCallbacks), - m_AllocationObjectAllocator(&m_AllocationCallbacks), - m_PreferredLargeHeapBlockSize(0), - m_PhysicalDevice(pCreateInfo->physicalDevice), - m_CurrentFrameIndex(0), - m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), - m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())), - m_NextPoolId(0) +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_CurrentFrameIndex(0), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) #if VMA_RECORDING_ENABLED - , - m_pRecorder(VMA_NULL) + ,m_pRecorder(VMA_NULL) #endif { - if (VMA_DEBUG_DETECT_CORRUPTION) { + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); } VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device); + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { #if !(VMA_DEDICATED_ALLOCATION) - if ((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); } #endif - memset(&m_DeviceMemoryCallbacks, 0, sizeof(m_DeviceMemoryCallbacks)); + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); memset(&m_MemProps, 0, sizeof(m_MemProps)); - + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); - for (uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) { - m_HeapSizeLimit[i] = VK_WHOLE_SIZE; - } - - if (pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) { + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; } @@ -12982,57 +15034,70 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo *pCreateInfo) : VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? - pCreateInfo->preferredLargeHeapBlockSize : - static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); - if (pCreateInfo->pHeapSizeLimit != VMA_NULL) { - for (uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) { + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; - if (limit != VK_WHOLE_SIZE) { - m_HeapSizeLimit[heapIndex] = limit; - if (limit < m_MemProps.memoryHeaps[heapIndex].size) { + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { m_MemProps.memoryHeaps[heapIndex].size = limit; } } } } - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( - this, - VK_NULL_HANDLE, // hParentPool - memTypeIndex, - preferredBlockSize, - 0, - SIZE_MAX, - GetBufferImageGranularity(), - pCreateInfo->frameInUseCount, - false, // isCustomPool - false, // explicitBlockSize - false); // linearAlgorithm + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + pCreateInfo->frameInUseCount, + false, // explicitBlockSize + false); // linearAlgorithm // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, // becase minBlockCount is 0. m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks())); + } } -VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo *pCreateInfo) { +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ VkResult res = VK_SUCCESS; - if (pCreateInfo->pRecordSettings != VMA_NULL && - !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath)) { + if(pCreateInfo->pRecordSettings != VMA_NULL && + !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath)) + { #if VMA_RECORDING_ENABLED m_pRecorder = vma_new(this, VmaRecorder)(); res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { return res; } m_pRecorder->WriteConfiguration( - m_PhysicalDeviceProperties, - m_MemProps, - m_UseKhrDedicatedAllocation); + m_PhysicalDeviceProperties, + m_MemProps, + m_VulkanApiVersion, + m_UseKhrDedicatedAllocation, + m_UseKhrBindMemory2, + m_UseExtMemoryBudget, + m_UseAmdDeviceCoherentMemory); m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex()); #else VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1."); @@ -13040,21 +15105,32 @@ VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo *pCreateInfo) { #endif } +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + return res; } -VmaAllocator_T::~VmaAllocator_T() { +VmaAllocator_T::~VmaAllocator_T() +{ #if VMA_RECORDING_ENABLED - if (m_pRecorder != VMA_NULL) { + if(m_pRecorder != VMA_NULL) + { m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex()); vma_delete(this, m_pRecorder); } #endif - + VMA_ASSERT(m_Pools.empty()); - for (size_t i = GetMemoryTypeCount(); i--;) { - if (m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty()) { + for(size_t i = GetMemoryTypeCount(); i--; ) + { + if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty()) + { VMA_ASSERT(0 && "Unfreed dedicated allocations found."); } @@ -13063,7 +15139,8 @@ VmaAllocator_T::~VmaAllocator_T() { } } -void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunctions) { +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ #if VMA_STATIC_VULKAN_FUNCTIONS == 1 m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; @@ -13082,20 +15159,55 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunc m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_hInstance != VK_NULL_HANDLE); + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = + (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2"); + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = + (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2"); + m_VulkanFunctions.vkBindBufferMemory2KHR = + (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2"); + m_VulkanFunctions.vkBindImageMemory2KHR = + (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2"); + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = + (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2"); + } +#endif #if VMA_DEDICATED_ALLOCATION - if (m_UseKhrDedicatedAllocation) { + if(m_UseKhrDedicatedAllocation) + { m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = - (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR"); + (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR"); m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = - (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR"); + (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR"); + } +#endif +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + m_VulkanFunctions.vkBindBufferMemory2KHR = + (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR"); + m_VulkanFunctions.vkBindImageMemory2KHR = + (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_hInstance != VK_NULL_HANDLE); + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = + (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR"); } -#endif // #if VMA_DEDICATED_ALLOCATION +#endif // #if VMA_MEMORY_BUDGET #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1 #define VMA_COPY_IF_NOT_NULL(funcName) \ - if (pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; - if (pVulkanFunctions != VMA_NULL) { + if(pVulkanFunctions != VMA_NULL) + { VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); VMA_COPY_IF_NOT_NULL(vkAllocateMemory); @@ -13113,10 +15225,17 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunc VMA_COPY_IF_NOT_NULL(vkCreateImage); VMA_COPY_IF_NOT_NULL(vkDestroyImage); VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); -#if VMA_DEDICATED_ALLOCATION +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); #endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif +#if VMA_MEMORY_BUDGET + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif } #undef VMA_COPY_IF_NOT_NULL @@ -13140,108 +15259,146 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunc VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); -#if VMA_DEDICATED_ALLOCATION - if (m_UseKhrDedicatedAllocation) { +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); } #endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif } -VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) { +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; - return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); } VkResult VmaAllocator_T::AllocateMemoryOfType( - VkDeviceSize size, - VkDeviceSize alignment, - bool dedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - const VmaAllocationCreateInfo &createInfo, - uint32_t memTypeIndex, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation *pAllocations) { + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ VMA_ASSERT(pAllocations != VMA_NULL); VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); VmaAllocationCreateInfo finalCreateInfo = createInfo; // If memory type is not HOST_VISIBLE, disable MAPPED. - if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && - (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; } + // If memory is lazily allocated, it should be always dedicated. + if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } - VmaBlockVector *const blockVector = m_pBlockVectors[memTypeIndex]; + VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex]; VMA_ASSERT(blockVector); const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize(); bool preferDedicatedMemory = - VMA_DEBUG_ALWAYS_DEDICATED_MEMORY || - dedicatedAllocation || - // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. - size > preferredBlockSize / 2; - - if (preferDedicatedMemory && - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && - finalCreateInfo.pool == VK_NULL_HANDLE) { + VMA_DEBUG_ALWAYS_DEDICATED_MEMORY || + dedicatedAllocation || + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + size > preferredBlockSize / 2; + + if(preferDedicatedMemory && + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + finalCreateInfo.pool == VK_NULL_HANDLE) + { finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; } - if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) { - if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } else { + } + else + { return AllocateDedicatedMemory( - size, - suballocType, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - finalCreateInfo.pUserData, - dedicatedBuffer, - dedicatedImage, - allocationCount, - pAllocations); - } - } else { - VkResult res = blockVector->Allocate( - m_CurrentFrameIndex.load(), size, - alignment, - finalCreateInfo, suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + dedicatedBuffer, + dedicatedImage, allocationCount, pAllocations); - if (res == VK_SUCCESS) { + } + } + else + { + VkResult res = blockVector->Allocate( + m_CurrentFrameIndex.load(), + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { return res; } // 5. Try dedicated memory. - if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } else { + } + else + { res = AllocateDedicatedMemory( - size, - suballocType, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - finalCreateInfo.pUserData, - dedicatedBuffer, - dedicatedImage, - allocationCount, - pAllocations); - if (res == VK_SUCCESS) { + size, + suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + dedicatedBuffer, + dedicatedImage, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); return VK_SUCCESS; - } else { + } + else + { // Everything failed: Return error code. VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); return res; @@ -13251,85 +15408,108 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( } VkResult VmaAllocator_T::AllocateDedicatedMemory( - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - bool map, - bool isUserDataString, - void *pUserData, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - size_t allocationCount, - VmaAllocation *pAllocations) { + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations) +{ VMA_ASSERT(allocationCount > 0 && pAllocations); + if(withinBudget) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetBudget(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; allocInfo.memoryTypeIndex = memTypeIndex; allocInfo.allocationSize = size; -#if VMA_DEDICATED_ALLOCATION +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; - if (m_UseKhrDedicatedAllocation) { - if (dedicatedBuffer != VK_NULL_HANDLE) { + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); dedicatedAllocInfo.buffer = dedicatedBuffer; allocInfo.pNext = &dedicatedAllocInfo; - } else if (dedicatedImage != VK_NULL_HANDLE) { + } + else if(dedicatedImage != VK_NULL_HANDLE) + { dedicatedAllocInfo.image = dedicatedImage; allocInfo.pNext = &dedicatedAllocInfo; } } -#endif // #if VMA_DEDICATED_ALLOCATION +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 size_t allocIndex; VkResult res = VK_SUCCESS; - for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { res = AllocateDedicatedMemoryPage( - size, - suballocType, - memTypeIndex, - allocInfo, - map, - isUserDataString, - pUserData, - pAllocations + allocIndex); - if (res != VK_SUCCESS) { + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { break; } } - if (res == VK_SUCCESS) { + if(res == VK_SUCCESS) + { // Register them in m_pDedicatedAllocations. { VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType *pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; VMA_ASSERT(pDedicatedAllocations); - for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]); } } VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); - } else { + } + else + { // Free all already created allocations. - while (allocIndex--) { + while(allocIndex--) + { VmaAllocation currAlloc = pAllocations[allocIndex]; VkDeviceMemory hMemory = currAlloc->GetMemory(); - + /* - There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory - before vkFreeMemory. - - if(currAlloc->GetMappedData() != VMA_NULL) - { - (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); - } - */ + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); - + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); currAlloc->SetUserData(this, VMA_NULL); - currAlloc->Dtor(); m_AllocationObjectAllocator.Free(currAlloc); } @@ -13340,42 +15520,47 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( } VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - const VkMemoryAllocateInfo &allocInfo, - bool map, - bool isUserDataString, - void *pUserData, - VmaAllocation *pAllocation) { + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation) +{ VkDeviceMemory hMemory = VK_NULL_HANDLE; VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); - if (res < 0) { + if(res < 0) + { VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); return res; } - void *pMappedData = VMA_NULL; - if (map) { + void* pMappedData = VMA_NULL; + if(map) + { res = (*m_VulkanFunctions.vkMapMemory)( - m_hDevice, - hMemory, - 0, - VK_WHOLE_SIZE, - 0, - &pMappedData); - if (res < 0) { + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { VMA_DEBUG_LOG(" vkMapMemory FAILED"); FreeVulkanMemory(memTypeIndex, size, hMemory); return res; } } - *pAllocation = m_AllocationObjectAllocator.Allocate(); - (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString); + *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString); (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size); (*pAllocation)->SetUserData(this, pUserData); - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); } @@ -13383,12 +15568,14 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( } void VmaAllocator_T::GetBufferMemoryRequirements( - VkBuffer hBuffer, - VkMemoryRequirements &memReq, - bool &requiresDedicatedAllocation, - bool &prefersDedicatedAllocation) const { -#if VMA_DEDICATED_ALLOCATION - if (m_UseKhrDedicatedAllocation) { + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; memReqInfo.buffer = hBuffer; @@ -13401,23 +15588,26 @@ void VmaAllocator_T::GetBufferMemoryRequirements( memReq = memReq2.memoryRequirements; requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); - } else -#endif // #if VMA_DEDICATED_ALLOCATION + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 { (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; + prefersDedicatedAllocation = false; } } void VmaAllocator_T::GetImageMemoryRequirements( - VkImage hImage, - VkMemoryRequirements &memReq, - bool &requiresDedicatedAllocation, - bool &prefersDedicatedAllocation) const { -#if VMA_DEDICATED_ALLOCATION - if (m_UseKhrDedicatedAllocation) { + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; memReqInfo.image = hImage; @@ -13430,127 +15620,155 @@ void VmaAllocator_T::GetImageMemoryRequirements( memReq = memReq2.memoryRequirements; requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); - } else -#endif // #if VMA_DEDICATED_ALLOCATION + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 { (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; + prefersDedicatedAllocation = false; } } VkResult VmaAllocator_T::AllocateMemory( - const VkMemoryRequirements &vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - const VmaAllocationCreateInfo &createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation *pAllocations) { + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); - if (vkMemReq.size == 0) { + if(vkMemReq.size == 0) + { return VK_ERROR_VALIDATION_FAILED_EXT; } - if ((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && - (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { + if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); return VK_ERROR_OUT_OF_DEVICE_MEMORY; } - if ((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && - (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0) { + if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0) + { VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid."); return VK_ERROR_OUT_OF_DEVICE_MEMORY; } - if (requiresDedicatedAllocation) { - if ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { + if(requiresDedicatedAllocation) + { + if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required."); return VK_ERROR_OUT_OF_DEVICE_MEMORY; } - if (createInfo.pool != VK_NULL_HANDLE) { + if(createInfo.pool != VK_NULL_HANDLE) + { VMA_ASSERT(0 && "Pool specified while dedicated allocation is required."); return VK_ERROR_OUT_OF_DEVICE_MEMORY; } } - if ((createInfo.pool != VK_NULL_HANDLE) && - ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0)) { + if((createInfo.pool != VK_NULL_HANDLE) && + ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0)) + { VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid."); return VK_ERROR_OUT_OF_DEVICE_MEMORY; } - if (createInfo.pool != VK_NULL_HANDLE) { + if(createInfo.pool != VK_NULL_HANDLE) + { const VkDeviceSize alignmentForPool = VMA_MAX( - vkMemReq.alignment, - GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex())); + vkMemReq.alignment, + GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex())); + + VmaAllocationCreateInfo createInfoForPool = createInfo; + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + return createInfo.pool->m_BlockVector.Allocate( - m_CurrentFrameIndex.load(), - vkMemReq.size, - alignmentForPool, - createInfo, - suballocType, - allocationCount, - pAllocations); - } else { + m_CurrentFrameIndex.load(), + vkMemReq.size, + alignmentForPool, + createInfoForPool, + suballocType, + allocationCount, + pAllocations); + } + else + { // Bit mask of memory Vulkan types acceptable for this allocation. uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; uint32_t memTypeIndex = UINT32_MAX; VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); - if (res == VK_SUCCESS) { + if(res == VK_SUCCESS) + { VkDeviceSize alignmentForMemType = VMA_MAX( - vkMemReq.alignment, - GetMemoryTypeMinAlignment(memTypeIndex)); + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); res = AllocateMemoryOfType( - vkMemReq.size, - alignmentForMemType, - requiresDedicatedAllocation || prefersDedicatedAllocation, - dedicatedBuffer, - dedicatedImage, - createInfo, - memTypeIndex, - suballocType, - allocationCount, - pAllocations); + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); // Succeeded on first try. - if (res == VK_SUCCESS) { + if(res == VK_SUCCESS) + { return res; } // Allocation from this memory type failed. Try other compatible memory types. - else { - for (;;) { + else + { + for(;;) + { // Remove old memTypeIndex from list of possibilities. memoryTypeBits &= ~(1u << memTypeIndex); // Find alternative memTypeIndex. res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); - if (res == VK_SUCCESS) { + if(res == VK_SUCCESS) + { alignmentForMemType = VMA_MAX( - vkMemReq.alignment, - GetMemoryTypeMinAlignment(memTypeIndex)); - + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); + res = AllocateMemoryOfType( - vkMemReq.size, - alignmentForMemType, - requiresDedicatedAllocation || prefersDedicatedAllocation, - dedicatedBuffer, - dedicatedImage, - createInfo, - memTypeIndex, - suballocType, - allocationCount, - pAllocations); + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); // Allocation from this alternative memory type succeeded. - if (res == VK_SUCCESS) { + if(res == VK_SUCCESS) + { return res; } // else: Allocation from this memory type failed. Try next one - next loop iteration. } // No other matching memory type index could be found. - else { + else + { // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. return VK_ERROR_OUT_OF_DEVICE_MEMORY; } @@ -13564,84 +15782,87 @@ VkResult VmaAllocator_T::AllocateMemory( } void VmaAllocator_T::FreeMemory( - size_t allocationCount, - const VmaAllocation *pAllocations) { + size_t allocationCount, + const VmaAllocation* pAllocations) +{ VMA_ASSERT(pAllocations); - for (size_t allocIndex = allocationCount; allocIndex--;) { + for(size_t allocIndex = allocationCount; allocIndex--; ) + { VmaAllocation allocation = pAllocations[allocIndex]; - if (allocation != VK_NULL_HANDLE) { - if (TouchAllocation(allocation)) { - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { + if(allocation != VK_NULL_HANDLE) + { + if(TouchAllocation(allocation)) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); } - switch (allocation->GetType()) { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - VmaBlockVector *pBlockVector = VMA_NULL; + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; VmaPool hPool = allocation->GetBlock()->GetParentPool(); - if (hPool != VK_NULL_HANDLE) { + if(hPool != VK_NULL_HANDLE) + { pBlockVector = &hPool->m_BlockVector; - } else { + } + else + { const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); pBlockVector = m_pBlockVectors[memTypeIndex]; } pBlockVector->Free(allocation); - } break; - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - FreeDedicatedMemory(allocation); - break; - default: - VMA_ASSERT(0); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); } } + // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes. + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); allocation->SetUserData(this, VMA_NULL); - allocation->Dtor(); m_AllocationObjectAllocator.Free(allocation); } } } VkResult VmaAllocator_T::ResizeAllocation( - const VmaAllocation alloc, - VkDeviceSize newSize) { - if (newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) { + const VmaAllocation alloc, + VkDeviceSize newSize) +{ + // This function is deprecated and so it does nothing. It's left for backward compatibility. + if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) + { return VK_ERROR_VALIDATION_FAILED_EXT; } - if (newSize == alloc->GetSize()) { + if(newSize == alloc->GetSize()) + { return VK_SUCCESS; } - - switch (alloc->GetType()) { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - return VK_ERROR_FEATURE_NOT_PRESENT; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - if (alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize)) { - alloc->ChangeSize(newSize); - VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate()); - return VK_SUCCESS; - } else { - return VK_ERROR_OUT_OF_POOL_MEMORY; - } - default: - VMA_ASSERT(0); - return VK_ERROR_VALIDATION_FAILED_EXT; - } + return VK_ERROR_OUT_OF_POOL_MEMORY; } -void VmaAllocator_T::CalculateStats(VmaStats *pStats) { +void VmaAllocator_T::CalculateStats(VmaStats* pStats) +{ // Initialize. InitStatInfo(pStats->total); - for (size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) InitStatInfo(pStats->memoryType[i]); - for (size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) InitStatInfo(pStats->memoryHeap[i]); - + // Process default pools. - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - VmaBlockVector *const pBlockVector = m_pBlockVectors[memTypeIndex]; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; VMA_ASSERT(pBlockVector); pBlockVector->AddStats(pStats); } @@ -13649,18 +15870,21 @@ void VmaAllocator_T::CalculateStats(VmaStats *pStats) { // Process custom pools. { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for (size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) { + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { m_Pools[poolIndex]->m_BlockVector.AddStats(pStats); } } // Process dedicated allocations. - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType *const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; VMA_ASSERT(pDedicatedAllocVector); - for (size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex) { + for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex) + { VmaStatInfo allocationStatInfo; (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo); VmaAddStatInfo(pStats->total, allocationStatInfo); @@ -13671,35 +15895,90 @@ void VmaAllocator_T::CalculateStats(VmaStats *pStats) { // Postprocess. VmaPostprocessCalcStatInfo(pStats->total); - for (size_t i = 0; i < GetMemoryTypeCount(); ++i) + for(size_t i = 0; i < GetMemoryTypeCount(); ++i) VmaPostprocessCalcStatInfo(pStats->memoryType[i]); - for (size_t i = 0; i < GetMemoryHeapCount(); ++i) + for(size_t i = 0; i < GetMemoryHeapCount(); ++i) VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]); } +void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudget->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudget->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetBudget(outBudget, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudget->usage = outBudget->blockBytes; + outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + static const uint32_t VMA_VENDOR_ID_AMD = 4098; VkResult VmaAllocator_T::DefragmentationBegin( - const VmaDefragmentationInfo2 &info, - VmaDefragmentationStats *pStats, - VmaDefragmentationContext *pContext) { - if (info.pAllocationsChanged != VMA_NULL) { + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext) +{ + if(info.pAllocationsChanged != VMA_NULL) + { memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32)); } *pContext = vma_new(this, VmaDefragmentationContext_T)( - this, m_CurrentFrameIndex.load(), info.flags, pStats); + this, m_CurrentFrameIndex.load(), info.flags, pStats); (*pContext)->AddPools(info.poolCount, info.pPools); (*pContext)->AddAllocations( - info.allocationCount, info.pAllocations, info.pAllocationsChanged); + info.allocationCount, info.pAllocations, info.pAllocationsChanged); VkResult res = (*pContext)->Defragment( - info.maxCpuBytesToMove, info.maxCpuAllocationsToMove, - info.maxGpuBytesToMove, info.maxGpuAllocationsToMove, - info.commandBuffer, pStats); + info.maxCpuBytesToMove, info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, info.maxGpuAllocationsToMove, + info.commandBuffer, pStats, info.flags); - if (res != VK_NOT_READY) { + if(res != VK_NOT_READY) + { vma_delete(this, *pContext); *pContext = VMA_NULL; } @@ -13708,21 +15987,39 @@ VkResult VmaAllocator_T::DefragmentationBegin( } VkResult VmaAllocator_T::DefragmentationEnd( - VmaDefragmentationContext context) { + VmaDefragmentationContext context) +{ vma_delete(this, context); return VK_SUCCESS; } -void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo *pAllocationInfo) { - if (hAllocation->CanBecomeLost()) { +VkResult VmaAllocator_T::DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context) +{ + return context->DefragmentPassBegin(pInfo); +} +VkResult VmaAllocator_T::DefragmentationPassEnd( + VmaDefragmentationContext context) +{ + return context->DefragmentPassEnd(); + +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + if(hAllocation->CanBecomeLost()) + { /* - Warning: This is a carefully designed algorithm. - Do not modify unless you really know what you're doing :) - */ + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); - for (;;) { - if (localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) { + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { pAllocationInfo->memoryType = UINT32_MAX; pAllocationInfo->deviceMemory = VK_NULL_HANDLE; pAllocationInfo->offset = 0; @@ -13730,7 +16027,9 @@ void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationI pAllocationInfo->pMappedData = VMA_NULL; pAllocationInfo->pUserData = hAllocation->GetUserData(); return; - } else if (localLastUseFrameIndex == localCurrFrameIndex) { + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); pAllocationInfo->deviceMemory = hAllocation->GetMemory(); pAllocationInfo->offset = hAllocation->GetOffset(); @@ -13738,24 +16037,32 @@ void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationI pAllocationInfo->pMappedData = VMA_NULL; pAllocationInfo->pUserData = hAllocation->GetUserData(); return; - } else // Last use time earlier than current time. + } + else // Last use time earlier than current time. { - if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { localLastUseFrameIndex = localCurrFrameIndex; } } } - } else { + } + else + { #if VMA_STATS_STRING_ENABLED uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); - for (;;) { + for(;;) + { VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); - if (localLastUseFrameIndex == localCurrFrameIndex) { + if(localLastUseFrameIndex == localCurrFrameIndex) + { break; - } else // Last use time earlier than current time. + } + else // Last use time earlier than current time. { - if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { localLastUseFrameIndex = localCurrFrameIndex; } } @@ -13771,34 +16078,48 @@ void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationI } } -bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation) { +bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation) +{ // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo. - if (hAllocation->CanBecomeLost()) { + if(hAllocation->CanBecomeLost()) + { uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); - for (;;) { - if (localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) { + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { return false; - } else if (localLastUseFrameIndex == localCurrFrameIndex) { + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { return true; - } else // Last use time earlier than current time. + } + else // Last use time earlier than current time. { - if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { localLastUseFrameIndex = localCurrFrameIndex; } } } - } else { + } + else + { #if VMA_STATS_STRING_ENABLED uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); - for (;;) { + for(;;) + { VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); - if (localLastUseFrameIndex == localCurrFrameIndex) { + if(localLastUseFrameIndex == localCurrFrameIndex) + { break; - } else // Last use time earlier than current time. + } + else // Last use time earlier than current time. { - if (hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { localLastUseFrameIndex = localCurrFrameIndex; } } @@ -13809,24 +16130,34 @@ bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation) { } } -VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool) { +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); VmaPoolCreateInfo newCreateInfo = *pCreateInfo; - if (newCreateInfo.maxBlockCount == 0) { + if(newCreateInfo.maxBlockCount == 0) + { newCreateInfo.maxBlockCount = SIZE_MAX; } - if (newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) { + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { return VK_ERROR_INITIALIZATION_FAILED; } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); - if (res != VK_SUCCESS) { + if(res != VK_SUCCESS) + { vma_delete(this, *pPool); *pPool = VMA_NULL; return res; @@ -13842,7 +16173,8 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo *pCreateInfo, VmaPoo return VK_SUCCESS; } -void VmaAllocator_T::DestroyPool(VmaPool pool) { +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ // Remove from m_Pools. { VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); @@ -13853,43 +16185,58 @@ void VmaAllocator_T::DestroyPool(VmaPool pool) { vma_delete(this, pool); } -void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats *pPoolStats) { +void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats) +{ pool->m_BlockVector.GetPoolStats(pPoolStats); } -void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) { +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET } void VmaAllocator_T::MakePoolAllocationsLost( - VmaPool hPool, - size_t *pLostAllocationCount) { + VmaPool hPool, + size_t* pLostAllocationCount) +{ hPool->m_BlockVector.MakePoolAllocationsLost( - m_CurrentFrameIndex.load(), - pLostAllocationCount); + m_CurrentFrameIndex.load(), + pLostAllocationCount); } -VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) { +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ return hPool->m_BlockVector.CheckCorruption(); } -VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) { +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; // Process default pools. - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - if (((1u << memTypeIndex) & memoryTypeBits) != 0) { - VmaBlockVector *const pBlockVector = m_pBlockVectors[memTypeIndex]; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(((1u << memTypeIndex) & memoryTypeBits) != 0) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; VMA_ASSERT(pBlockVector); VkResult localRes = pBlockVector->CheckCorruption(); - switch (localRes) { - case VK_ERROR_FEATURE_NOT_PRESENT: - break; - case VK_SUCCESS: - finalRes = VK_SUCCESS; - break; - default: - return localRes; + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; } } } @@ -13897,17 +16244,20 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) { // Process custom pools. { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for (size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) { - if (((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) { + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { + if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption(); - switch (localRes) { - case VK_ERROR_FEATURE_NOT_PRESENT: - break; - case VK_SUCCESS: - finalRes = VK_SUCCESS; - break; - default: - return localRes; + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; } } } @@ -13916,140 +16266,244 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) { return finalRes; } -void VmaAllocator_T::CreateLostAllocation(VmaAllocation *pAllocation) { - *pAllocation = m_AllocationObjectAllocator.Allocate(); - (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false); +void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation) +{ + *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false); (*pAllocation)->InitLost(); } -VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) { +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); - VkResult res; - if (m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE) { - VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex); - if (m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize) { - res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); - if (res == VK_SUCCESS) { - m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize; + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; } - } else { - res = VK_ERROR_OUT_OF_DEVICE_MEMORY; } - } else { - res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); - if (res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) { - (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize); + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize); + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; } return res; } -void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) { - if (m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) { +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size); } + // VULKAN CALL vkFreeMemory. (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); - if (m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE) { - VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex); - m_HeapSizeLimit[heapIndex] += size; + m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); } } -VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void **ppData) { - if (hAllocation->CanBecomeLost()) { +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + if(hAllocation->CanBecomeLost()) + { return VK_ERROR_MEMORY_MAP_FAILED; } - switch (hAllocation->GetType()) { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - VmaDeviceMemoryBlock *const pBlock = hAllocation->GetBlock(); + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); char *pBytes = VMA_NULL; - VkResult res = pBlock->Map(this, 1, (void **)&pBytes); - if (res == VK_SUCCESS) { + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); hAllocation->BlockAllocMap(); } return res; } - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - return hAllocation->DedicatedAllocMap(this, ppData); - default: - VMA_ASSERT(0); - return VK_ERROR_MEMORY_MAP_FAILED; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; } } -void VmaAllocator_T::Unmap(VmaAllocation hAllocation) { - switch (hAllocation->GetType()) { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - VmaDeviceMemoryBlock *const pBlock = hAllocation->GetBlock(); +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); hAllocation->BlockAllocUnmap(); pBlock->Unmap(this, 1); - } break; - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - hAllocation->DedicatedAllocUnmap(this); - break; - default: - VMA_ASSERT(0); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); } } -VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer) { +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ VkResult res = VK_SUCCESS; - switch (hAllocation->GetType()) { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - res = GetVulkanFunctions().vkBindBufferMemory( - m_hDevice, - hBuffer, - hAllocation->GetMemory(), - 0); //memoryOffset - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock(); - VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?"); - res = pBlock->BindBufferMemory(this, hAllocation, hBuffer); - break; - } - default: - VMA_ASSERT(0); + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); } return res; } -VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage) { +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ VkResult res = VK_SUCCESS; - switch (hAllocation->GetType()) { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - res = GetVulkanFunctions().vkBindImageMemory( - m_hDevice, - hImage, - hAllocation->GetMemory(), - 0); //memoryOffset - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock(); - VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?"); - res = pBlock->BindImageMemory(this, hAllocation, hImage); - break; - } - default: - VMA_ASSERT(0); + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); } return res; } void VmaAllocator_T::FlushOrInvalidateAllocation( - VmaAllocation hAllocation, - VkDeviceSize offset, VkDeviceSize size, - VMA_CACHE_OPERATION op) { + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex(); - if (size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) { + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { const VkDeviceSize allocationSize = hAllocation->GetSize(); VMA_ASSERT(offset <= allocationSize); @@ -14057,98 +16511,110 @@ void VmaAllocator_T::FlushOrInvalidateAllocation( VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; memRange.memory = hAllocation->GetMemory(); - - switch (hAllocation->GetType()) { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if (size == VK_WHOLE_SIZE) { - memRange.size = allocationSize - memRange.offset; - } else { - VMA_ASSERT(offset + size <= allocationSize); - memRange.size = VMA_MIN( - VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize), - allocationSize - memRange.offset); - } - break; - - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - // 1. Still within this allocation. - memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if (size == VK_WHOLE_SIZE) { - size = allocationSize - offset; - } else { - VMA_ASSERT(offset + size <= allocationSize); - } - memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize); - - // 2. Adjust to whole block. - const VkDeviceSize allocationOffset = hAllocation->GetOffset(); - VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); - const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize(); - memRange.offset += allocationOffset; - memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset); - - break; + + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + memRange.size = allocationSize - memRange.offset; } + else + { + VMA_ASSERT(offset + size <= allocationSize); + memRange.size = VMA_MIN( + VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize), + allocationSize - memRange.offset); + } + break; - default: - VMA_ASSERT(0); + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = hAllocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize(); + memRange.offset += allocationOffset; + memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset); + + break; + } + + default: + VMA_ASSERT(0); } - switch (op) { - case VMA_CACHE_FLUSH: - (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); - break; - case VMA_CACHE_INVALIDATE: - (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); - break; - default: - VMA_ASSERT(0); + switch(op) + { + case VMA_CACHE_FLUSH: + (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); } } // else: Just ignore this call. } -void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation) { +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); { VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType *const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; VMA_ASSERT(pDedicatedAllocations); bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation); VMA_ASSERT(success); } VkDeviceMemory hMemory = allocation->GetMemory(); - + /* - There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory - before vkFreeMemory. - - if(allocation->GetMappedData() != VMA_NULL) - { - (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); - } - */ + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); } -uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const { +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ VkBufferCreateInfo dummyBufCreateInfo; VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); uint32_t memoryTypeBits = 0; // Create buffer. - VkBuffer buf = VMA_NULL; + VkBuffer buf = VK_NULL_HANDLE; VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( - m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); - if (res == VK_SUCCESS) { + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { // Query for supported memory types. VkMemoryRequirements memReq; (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); @@ -14161,25 +16627,82 @@ uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const { return memoryTypeBits; } -void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) { - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS && - !hAllocation->CanBecomeLost() && - (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { - void *pData = VMA_NULL; +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1u << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +#if VMA_MEMORY_BUDGET + +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + memProps.pNext = &budgetProps; + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} + +#endif // #if VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + !hAllocation->CanBecomeLost() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; VkResult res = Map(hAllocation, &pData); - if (res == VK_SUCCESS) { + if(res == VK_SUCCESS) + { memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); Unmap(hAllocation); - } else { + } + else + { VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); } } } -uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() { +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); - if (memoryTypeBits == UINT32_MAX) { + if(memoryTypeBits == UINT32_MAX) + { memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); } @@ -14188,14 +16711,18 @@ uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() { #if VMA_STATS_STRING_ENABLED -void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) { +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ bool dedicatedAllocationsStarted = false; - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); - AllocationVectorType *const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; VMA_ASSERT(pDedicatedAllocVector); - if (pDedicatedAllocVector->empty() == false) { - if (dedicatedAllocationsStarted == false) { + if(pDedicatedAllocVector->empty() == false) + { + if(dedicatedAllocationsStarted == false) + { dedicatedAllocationsStarted = true; json.WriteString("DedicatedAllocations"); json.BeginObject(); @@ -14204,10 +16731,11 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) { json.BeginString("Type "); json.ContinueString(memTypeIndex); json.EndString(); - + json.BeginArray(); - for (size_t i = 0; i < pDedicatedAllocVector->size(); ++i) { + for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i) + { json.BeginObject(true); const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i]; hAlloc->PrintParameters(json); @@ -14217,15 +16745,19 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) { json.EndArray(); } } - if (dedicatedAllocationsStarted) { + if(dedicatedAllocationsStarted) + { json.EndObject(); } { bool allocationsStarted = false; - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - if (m_pBlockVectors[memTypeIndex]->IsEmpty() == false) { - if (allocationsStarted == false) { + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false) + { + if(allocationsStarted == false) + { allocationsStarted = true; json.WriteString("DefaultPools"); json.BeginObject(); @@ -14238,7 +16770,8 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) { m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json); } } - if (allocationsStarted) { + if(allocationsStarted) + { json.EndObject(); } } @@ -14247,10 +16780,12 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) { { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); const size_t poolCount = m_Pools.size(); - if (poolCount > 0) { + if(poolCount > 0) + { json.WriteString("Pools"); json.BeginObject(); - for (size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) { + for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { json.BeginString(); json.ContinueString(m_Pools[poolIndex]->GetId()); json.EndString(); @@ -14267,50 +16802,59 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) { //////////////////////////////////////////////////////////////////////////////// // Public interface -VkResult vmaCreateAllocator( - const VmaAllocatorCreateInfo *pCreateInfo, - VmaAllocator *pAllocator) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1)); VMA_DEBUG_LOG("vmaCreateAllocator"); *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); return (*pAllocator)->Init(pCreateInfo); } -void vmaDestroyAllocator( - VmaAllocator allocator) { - if (allocator != VK_NULL_HANDLE) { +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { VMA_DEBUG_LOG("vmaDestroyAllocator"); VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; vma_delete(&allocationCallbacks, allocator); } } -void vmaGetPhysicalDeviceProperties( - VmaAllocator allocator, - const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) { +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ VMA_ASSERT(allocator && ppPhysicalDeviceProperties); *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; } -void vmaGetMemoryProperties( - VmaAllocator allocator, - const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties) { +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; } -void vmaGetMemoryTypeProperties( - VmaAllocator allocator, - uint32_t memoryTypeIndex, - VkMemoryPropertyFlags *pFlags) { +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ VMA_ASSERT(allocator && pFlags); VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; } -void vmaSetCurrentFrameIndex( - VmaAllocator allocator, - uint32_t frameIndex) { +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ VMA_ASSERT(allocator); VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST); @@ -14319,20 +16863,31 @@ void vmaSetCurrentFrameIndex( allocator->SetCurrentFrameIndex(frameIndex); } -void vmaCalculateStats( - VmaAllocator allocator, - VmaStats *pStats) { +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator allocator, + VmaStats* pStats) +{ VMA_ASSERT(allocator && pStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK allocator->CalculateStats(pStats); } +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator allocator, + VmaBudget* pBudget) +{ + VMA_ASSERT(allocator && pBudget); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount()); +} + #if VMA_STATS_STRING_ENABLED -void vmaBuildStatsString( - VmaAllocator allocator, - char **ppStatsString, - VkBool32 detailedMap) { +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ VMA_ASSERT(allocator && ppStatsString); VMA_DEBUG_GLOBAL_MUTEX_LOCK @@ -14341,13 +16896,17 @@ void vmaBuildStatsString( VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); json.BeginObject(); + VmaBudget budget[VK_MAX_MEMORY_HEAPS]; + allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount()); + VmaStats stats; allocator->CalculateStats(&stats); json.WriteString("Total"); VmaPrintStatInfo(json, stats.total); - - for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) { + + for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { json.BeginString("Heap "); json.ContinueString(heapIndex); json.EndString(); @@ -14358,18 +16917,36 @@ void vmaBuildStatsString( json.WriteString("Flags"); json.BeginArray(true); - if ((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) { + if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) + { json.WriteString("DEVICE_LOCAL"); } json.EndArray(); - if (stats.memoryHeap[heapIndex].blockCount > 0) { + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BlockBytes"); + json.WriteNumber(budget[heapIndex].blockBytes); + json.WriteString("AllocationBytes"); + json.WriteNumber(budget[heapIndex].allocationBytes); + json.WriteString("Usage"); + json.WriteNumber(budget[heapIndex].usage); + json.WriteString("Budget"); + json.WriteNumber(budget[heapIndex].budget); + } + json.EndObject(); + + if(stats.memoryHeap[heapIndex].blockCount > 0) + { json.WriteString("Stats"); VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]); } - for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) { - if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) { + for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { json.BeginString("Type "); json.ContinueString(typeIndex); json.EndString(); @@ -14379,24 +16956,42 @@ void vmaBuildStatsString( json.WriteString("Flags"); json.BeginArray(true); VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; - if ((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) { + if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) + { json.WriteString("DEVICE_LOCAL"); } - if ((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { + if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { json.WriteString("HOST_VISIBLE"); } - if ((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0) { + if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0) + { json.WriteString("HOST_COHERENT"); } - if ((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) { + if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) + { json.WriteString("HOST_CACHED"); } - if ((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0) { + if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0) + { json.WriteString("LAZILY_ALLOCATED"); } + if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0) + { + json.WriteString(" PROTECTED"); + } + if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + json.WriteString(" DEVICE_COHERENT"); + } + if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0) + { + json.WriteString(" DEVICE_UNCACHED"); + } json.EndArray(); - if (stats.memoryType[typeIndex].blockCount > 0) { + if(stats.memoryType[typeIndex].blockCount > 0) + { json.WriteString("Stats"); VmaPrintStatInfo(json, stats.memoryType[typeIndex]); } @@ -14407,7 +17002,8 @@ void vmaBuildStatsString( json.EndObject(); } - if (detailedMap == VK_TRUE) { + if(detailedMap == VK_TRUE) + { allocator->PrintDetailedMap(json); } @@ -14415,18 +17011,21 @@ void vmaBuildStatsString( } const size_t len = sb.GetLength(); - char *const pChars = vma_new_array(allocator, char, len + 1); - if (len > 0) { + char* const pChars = vma_new_array(allocator, char, len + 1); + if(len > 0) + { memcpy(pChars, sb.GetData(), len); } pChars[len] = '\0'; *ppStatsString = pChars; } -void vmaFreeStatsString( - VmaAllocator allocator, - char *pStatsString) { - if (pStatsString != VMA_NULL) { +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { VMA_ASSERT(allocator); size_t len = strlen(pStatsString); vma_delete_array(allocator, pStatsString, len + 1); @@ -14438,65 +17037,93 @@ void vmaFreeStatsString( /* This function is not protected by any mutex because it just reads immutable data. */ -VkResult vmaFindMemoryTypeIndex( - VmaAllocator allocator, - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - uint32_t *pMemoryTypeIndex) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - if (pAllocationCreateInfo->memoryTypeBits != 0) { + memoryTypeBits &= allocator->GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; } - + uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags; uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags; + uint32_t notPreferredFlags = 0; // Convert usage to requiredFlags and preferredFlags. - switch (pAllocationCreateInfo->usage) { - case VMA_MEMORY_USAGE_UNKNOWN: - break; - case VMA_MEMORY_USAGE_GPU_ONLY: - if (!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { - preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_CPU_ONLY: - requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - break; - case VMA_MEMORY_USAGE_CPU_TO_GPU: - requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - if (!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { - preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_GPU_TO_CPU: - requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - break; - default: - break; + switch(pAllocationCreateInfo->usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + default: + VMA_ASSERT(0); + break; + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY; } *pMemoryTypeIndex = UINT32_MAX; uint32_t minCost = UINT32_MAX; - for (uint32_t memTypeIndex = 0, memTypeBit = 1; - memTypeIndex < allocator->GetMemoryTypeCount(); - ++memTypeIndex, memTypeBit <<= 1) { + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < allocator->GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { // This memory type is acceptable according to memoryTypeBits bitmask. - if ((memTypeBit & memoryTypeBits) != 0) { + if((memTypeBit & memoryTypeBits) != 0) + { const VkMemoryPropertyFlags currFlags = - allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; // This memory type contains requiredFlags. - if ((requiredFlags & ~currFlags) == 0) { + if((requiredFlags & ~currFlags) == 0) + { // Calculate cost as number of bits from preferredFlags not present in this memory type. - uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags); + uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) + + VmaCountBitsSet(currFlags & notPreferredFlags); // Remember memory type with lowest cost. - if (currCost < minCost) { + if(currCost < minCost) + { *pMemoryTypeIndex = memTypeIndex; - if (currCost == 0) { + if(currCost == 0) + { return VK_SUCCESS; } minCost = currCost; @@ -14507,11 +17134,12 @@ VkResult vmaFindMemoryTypeIndex( return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; } -VkResult vmaFindMemoryTypeIndexForBufferInfo( - VmaAllocator allocator, - const VkBufferCreateInfo *pBufferCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - uint32_t *pMemoryTypeIndex) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pBufferCreateInfo != VMA_NULL); VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); @@ -14520,29 +17148,31 @@ VkResult vmaFindMemoryTypeIndexForBufferInfo( const VkDevice hDev = allocator->m_hDevice; VkBuffer hBuffer = VK_NULL_HANDLE; VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer( - hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); - if (res == VK_SUCCESS) { + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { VkMemoryRequirements memReq = {}; allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements( - hDev, hBuffer, &memReq); + hDev, hBuffer, &memReq); res = vmaFindMemoryTypeIndex( - allocator, - memReq.memoryTypeBits, - pAllocationCreateInfo, - pMemoryTypeIndex); + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); allocator->GetVulkanFunctions().vkDestroyBuffer( - hDev, hBuffer, allocator->GetAllocationCallbacks()); + hDev, hBuffer, allocator->GetAllocationCallbacks()); } return res; } -VkResult vmaFindMemoryTypeIndexForImageInfo( - VmaAllocator allocator, - const VkImageCreateInfo *pImageCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - uint32_t *pMemoryTypeIndex) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pImageCreateInfo != VMA_NULL); VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); @@ -14551,60 +17181,66 @@ VkResult vmaFindMemoryTypeIndexForImageInfo( const VkDevice hDev = allocator->m_hDevice; VkImage hImage = VK_NULL_HANDLE; VkResult res = allocator->GetVulkanFunctions().vkCreateImage( - hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); - if (res == VK_SUCCESS) { + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { VkMemoryRequirements memReq = {}; allocator->GetVulkanFunctions().vkGetImageMemoryRequirements( - hDev, hImage, &memReq); + hDev, hImage, &memReq); res = vmaFindMemoryTypeIndex( - allocator, - memReq.memoryTypeBits, - pAllocationCreateInfo, - pMemoryTypeIndex); + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); allocator->GetVulkanFunctions().vkDestroyImage( - hDev, hImage, allocator->GetAllocationCallbacks()); + hDev, hImage, allocator->GetAllocationCallbacks()); } return res; } -VkResult vmaCreatePool( - VmaAllocator allocator, - const VmaPoolCreateInfo *pCreateInfo, - VmaPool *pPool) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ VMA_ASSERT(allocator && pCreateInfo && pPool); - + VMA_DEBUG_LOG("vmaCreatePool"); - + VMA_DEBUG_GLOBAL_MUTEX_LOCK - + VkResult res = allocator->CreatePool(pCreateInfo, pPool); - + #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool); } #endif - + return res; } -void vmaDestroyPool( - VmaAllocator allocator, - VmaPool pool) { +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ VMA_ASSERT(allocator); - - if (pool == VK_NULL_HANDLE) { + + if(pool == VK_NULL_HANDLE) + { return; } - + VMA_DEBUG_LOG("vmaDestroyPool"); - + VMA_DEBUG_GLOBAL_MUTEX_LOCK - + #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool); } #endif @@ -14612,10 +17248,11 @@ void vmaDestroyPool( allocator->DestroyPool(pool); } -void vmaGetPoolStats( - VmaAllocator allocator, - VmaPool pool, - VmaPoolStats *pPoolStats) { +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator allocator, + VmaPool pool, + VmaPoolStats* pPoolStats) +{ VMA_ASSERT(allocator && pool && pPoolStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK @@ -14623,16 +17260,18 @@ void vmaGetPoolStats( allocator->GetPoolStats(pool, pPoolStats); } -void vmaMakePoolAllocationsLost( - VmaAllocator allocator, - VmaPool pool, - size_t *pLostAllocationCount) { +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator allocator, + VmaPool pool, + size_t* pLostAllocationCount) +{ VMA_ASSERT(allocator && pool); VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool); } #endif @@ -14640,7 +17279,8 @@ void vmaMakePoolAllocationsLost( allocator->MakePoolAllocationsLost(pool, pLostAllocationCount); } -VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ VMA_ASSERT(allocator && pool); VMA_DEBUG_GLOBAL_MUTEX_LOCK @@ -14650,12 +17290,48 @@ VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) { return allocator->CheckPoolCorruption(pool); } -VkResult vmaAllocateMemory( - VmaAllocator allocator, - const VkMemoryRequirements *pVkMemoryRequirements, - const VmaAllocationCreateInfo *pCreateInfo, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo) { +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); VMA_DEBUG_LOG("vmaAllocateMemory"); @@ -14663,41 +17339,45 @@ VkResult vmaAllocateMemory( VMA_DEBUG_GLOBAL_MUTEX_LOCK VkResult result = allocator->AllocateMemory( - *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation - VK_NULL_HANDLE, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_UNKNOWN, - 1, // allocationCount - pAllocation); + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordAllocateMemory( - allocator->GetCurrentFrameIndex(), - *pVkMemoryRequirements, - *pCreateInfo, - *pAllocation); + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + *pAllocation); } #endif - - if (pAllocationInfo != VMA_NULL && result == VK_SUCCESS) { + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return result; } -VkResult vmaAllocateMemoryPages( - VmaAllocator allocator, - const VkMemoryRequirements *pVkMemoryRequirements, - const VmaAllocationCreateInfo *pCreateInfo, - size_t allocationCount, - VmaAllocation *pAllocations, - VmaAllocationInfo *pAllocationInfo) { - if (allocationCount == 0) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { return VK_SUCCESS; } @@ -14708,29 +17388,32 @@ VkResult vmaAllocateMemoryPages( VMA_DEBUG_GLOBAL_MUTEX_LOCK VkResult result = allocator->AllocateMemory( - *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation - VK_NULL_HANDLE, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_UNKNOWN, - allocationCount, - pAllocations); + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordAllocateMemoryPages( - allocator->GetCurrentFrameIndex(), - *pVkMemoryRequirements, - *pCreateInfo, - (uint64_t)allocationCount, - pAllocations); + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + (uint64_t)allocationCount, + pAllocations); } #endif - - if (pAllocationInfo != VMA_NULL && result == VK_SUCCESS) { - for (size_t i = 0; i < allocationCount; ++i) { + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); } } @@ -14738,12 +17421,13 @@ VkResult vmaAllocateMemoryPages( return result; } -VkResult vmaAllocateMemoryForBuffer( - VmaAllocator allocator, - VkBuffer buffer, - const VmaAllocationCreateInfo *pCreateInfo, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); @@ -14754,45 +17438,48 @@ VkResult vmaAllocateMemoryForBuffer( bool requiresDedicatedAllocation = false; bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(buffer, vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation); + requiresDedicatedAllocation, + prefersDedicatedAllocation); VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForBuffer( + allocator->GetCurrentFrameIndex(), vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, - buffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage *pCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); - -#if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { - allocator->GetRecorder()->RecordAllocateMemoryForBuffer( - allocator->GetCurrentFrameIndex(), - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - *pCreateInfo, - *pAllocation); + *pAllocation); } #endif - if (pAllocationInfo && result == VK_SUCCESS) { + if(pAllocationInfo && result == VK_SUCCESS) + { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return result; } -VkResult vmaAllocateMemoryForImage( - VmaAllocator allocator, - VkImage image, - const VmaAllocationCreateInfo *pCreateInfo, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); @@ -14801,155 +17488,160 @@ VkResult vmaAllocateMemoryForImage( VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; allocator->GetImageMemoryRequirements(image, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); + requiresDedicatedAllocation, prefersDedicatedAllocation); VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + image, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForImage( + allocator->GetCurrentFrameIndex(), vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, - VK_NULL_HANDLE, // dedicatedBuffer - image, // dedicatedImage *pCreateInfo, - VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, - 1, // allocationCount - pAllocation); - -#if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { - allocator->GetRecorder()->RecordAllocateMemoryForImage( - allocator->GetCurrentFrameIndex(), - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - *pCreateInfo, - *pAllocation); + *pAllocation); } #endif - if (pAllocationInfo && result == VK_SUCCESS) { + if(pAllocationInfo && result == VK_SUCCESS) + { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return result; } -void vmaFreeMemory( - VmaAllocator allocator, - VmaAllocation allocation) { +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ VMA_ASSERT(allocator); - - if (allocation == VK_NULL_HANDLE) { + + if(allocation == VK_NULL_HANDLE) + { return; } - + VMA_DEBUG_LOG("vmaFreeMemory"); - + VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordFreeMemory( - allocator->GetCurrentFrameIndex(), - allocation); + allocator->GetCurrentFrameIndex(), + allocation); } #endif - + allocator->FreeMemory( - 1, // allocationCount - &allocation); + 1, // allocationCount + &allocation); } -void vmaFreeMemoryPages( - VmaAllocator allocator, - size_t allocationCount, - VmaAllocation *pAllocations) { - if (allocationCount == 0) { +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { return; } VMA_ASSERT(allocator); - + VMA_DEBUG_LOG("vmaFreeMemoryPages"); - + VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordFreeMemoryPages( - allocator->GetCurrentFrameIndex(), - (uint64_t)allocationCount, - pAllocations); + allocator->GetCurrentFrameIndex(), + (uint64_t)allocationCount, + pAllocations); } #endif - + allocator->FreeMemory(allocationCount, pAllocations); } -VkResult vmaResizeAllocation( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize newSize) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize) +{ VMA_ASSERT(allocator && allocation); - + VMA_DEBUG_LOG("vmaResizeAllocation"); - + VMA_DEBUG_GLOBAL_MUTEX_LOCK -#if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { - allocator->GetRecorder()->RecordResizeAllocation( - allocator->GetCurrentFrameIndex(), - allocation, - newSize); - } -#endif - return allocator->ResizeAllocation(allocation, newSize); } -void vmaGetAllocationInfo( - VmaAllocator allocator, - VmaAllocation allocation, - VmaAllocationInfo *pAllocationInfo) { +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ VMA_ASSERT(allocator && allocation && pAllocationInfo); VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordGetAllocationInfo( - allocator->GetCurrentFrameIndex(), - allocation); + allocator->GetCurrentFrameIndex(), + allocation); } #endif allocator->GetAllocationInfo(allocation, pAllocationInfo); } -VkBool32 vmaTouchAllocation( - VmaAllocator allocator, - VmaAllocation allocation) { +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator allocator, + VmaAllocation allocation) +{ VMA_ASSERT(allocator && allocation); VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordTouchAllocation( - allocator->GetCurrentFrameIndex(), - allocation); + allocator->GetCurrentFrameIndex(), + allocation); } #endif return allocator->TouchAllocation(allocation); } -void vmaSetAllocationUserData( - VmaAllocator allocator, - VmaAllocation allocation, - void *pUserData) { +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ VMA_ASSERT(allocator && allocation); VMA_DEBUG_GLOBAL_MUTEX_LOCK @@ -14957,18 +17649,20 @@ void vmaSetAllocationUserData( allocation->SetUserData(allocator, pUserData); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordSetAllocationUserData( - allocator->GetCurrentFrameIndex(), - allocation, - pUserData); + allocator->GetCurrentFrameIndex(), + allocation, + pUserData); } #endif } -void vmaCreateLostAllocation( - VmaAllocator allocator, - VmaAllocation *pAllocation) { +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator allocator, + VmaAllocation* pAllocation) +{ VMA_ASSERT(allocator && pAllocation); VMA_DEBUG_GLOBAL_MUTEX_LOCK; @@ -14976,18 +17670,20 @@ void vmaCreateLostAllocation( allocator->CreateLostAllocation(pAllocation); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordCreateLostAllocation( - allocator->GetCurrentFrameIndex(), - *pAllocation); + allocator->GetCurrentFrameIndex(), + *pAllocation); } #endif } -VkResult vmaMapMemory( - VmaAllocator allocator, - VmaAllocation allocation, - void **ppData) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ VMA_ASSERT(allocator && allocation && ppData); VMA_DEBUG_GLOBAL_MUTEX_LOCK @@ -14995,35 +17691,39 @@ VkResult vmaMapMemory( VkResult res = allocator->Map(allocation, ppData); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordMapMemory( - allocator->GetCurrentFrameIndex(), - allocation); + allocator->GetCurrentFrameIndex(), + allocation); } #endif return res; } -void vmaUnmapMemory( - VmaAllocator allocator, - VmaAllocation allocation) { +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ VMA_ASSERT(allocator && allocation); VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordUnmapMemory( - allocator->GetCurrentFrameIndex(), - allocation); + allocator->GetCurrentFrameIndex(), + allocation); } #endif allocator->Unmap(allocation); } -void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) { +VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ VMA_ASSERT(allocator && allocation); VMA_DEBUG_LOG("vmaFlushAllocation"); @@ -15033,15 +17733,17 @@ void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDevi allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordFlushAllocation( - allocator->GetCurrentFrameIndex(), - allocation, offset, size); + allocator->GetCurrentFrameIndex(), + allocation, offset, size); } #endif } -void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) { +VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ VMA_ASSERT(allocator && allocation); VMA_DEBUG_LOG("vmaInvalidateAllocation"); @@ -15051,15 +17753,17 @@ void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, V allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordInvalidateAllocation( - allocator->GetCurrentFrameIndex(), - allocation, offset, size); + allocator->GetCurrentFrameIndex(), + allocation, offset, size); } #endif } -VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) +{ VMA_ASSERT(allocator); VMA_DEBUG_LOG("vmaCheckCorruption"); @@ -15069,23 +17773,27 @@ VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) { return allocator->CheckCorruption(memoryTypeBits); } -VkResult vmaDefragment( - VmaAllocator allocator, - VmaAllocation *pAllocations, - size_t allocationCount, - VkBool32 *pAllocationsChanged, - const VmaDefragmentationInfo *pDefragmentationInfo, - VmaDefragmentationStats *pDefragmentationStats) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator allocator, + VmaAllocation* pAllocations, + size_t allocationCount, + VkBool32* pAllocationsChanged, + const VmaDefragmentationInfo *pDefragmentationInfo, + VmaDefragmentationStats* pDefragmentationStats) +{ // Deprecated interface, reimplemented using new one. VmaDefragmentationInfo2 info2 = {}; info2.allocationCount = (uint32_t)allocationCount; info2.pAllocations = pAllocations; info2.pAllocationsChanged = pAllocationsChanged; - if (pDefragmentationInfo != VMA_NULL) { + if(pDefragmentationInfo != VMA_NULL) + { info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove; info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove; - } else { + } + else + { info2.maxCpuAllocationsToMove = UINT32_MAX; info2.maxCpuBytesToMove = VK_WHOLE_SIZE; } @@ -15093,21 +17801,24 @@ VkResult vmaDefragment( VmaDefragmentationContext ctx; VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx); - if (res == VK_NOT_READY) { - res = vmaDefragmentationEnd(allocator, ctx); + if(res == VK_NOT_READY) + { + res = vmaDefragmentationEnd( allocator, ctx); } return res; } -VkResult vmaDefragmentationBegin( - VmaAllocator allocator, - const VmaDefragmentationInfo2 *pInfo, - VmaDefragmentationStats *pStats, - VmaDefragmentationContext *pContext) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator allocator, + const VmaDefragmentationInfo2* pInfo, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext *pContext) +{ VMA_ASSERT(allocator && pInfo && pContext); // Degenerate case: Nothing to defragment. - if (pInfo->allocationCount == 0 && pInfo->poolCount == 0) { + if(pInfo->allocationCount == 0 && pInfo->poolCount == 0) + { return VK_SUCCESS; } @@ -15123,79 +17834,158 @@ VkResult vmaDefragmentationBegin( VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordDefragmentationBegin( - allocator->GetCurrentFrameIndex(), *pInfo, *pContext); + allocator->GetCurrentFrameIndex(), *pInfo, *pContext); } #endif return res; } -VkResult vmaDefragmentationEnd( - VmaAllocator allocator, - VmaDefragmentationContext context) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ VMA_ASSERT(allocator); VMA_DEBUG_LOG("vmaDefragmentationEnd"); - if (context != VK_NULL_HANDLE) { + if(context != VK_NULL_HANDLE) + { VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordDefragmentationEnd( - allocator->GetCurrentFrameIndex(), context); + allocator->GetCurrentFrameIndex(), context); } #endif return allocator->DefragmentationEnd(context); - } else { + } + else + { return VK_SUCCESS; } } -VkResult vmaBindBufferMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkBuffer buffer) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationPassInfo* pInfo + ) +{ + VMA_ASSERT(allocator); + VMA_ASSERT(pInfo); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves)); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + { + pInfo->moveCount = 0; + return VK_SUCCESS; + } + + return allocator->DefragmentationPassBegin(pInfo, context); +} +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + return VK_SUCCESS; + + return allocator->DefragmentationPassEnd(context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ VMA_ASSERT(allocator && allocation && buffer); VMA_DEBUG_LOG("vmaBindBufferMemory"); VMA_DEBUG_GLOBAL_MUTEX_LOCK - return allocator->BindBufferMemory(allocation, buffer); + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); } -VkResult vmaBindImageMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkImage image) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ VMA_ASSERT(allocator && allocation && image); VMA_DEBUG_LOG("vmaBindImageMemory"); VMA_DEBUG_GLOBAL_MUTEX_LOCK - return allocator->BindImageMemory(allocation, image); + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); } -VkResult vmaCreateBuffer( - VmaAllocator allocator, - const VkBufferCreateInfo *pBufferCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - VkBuffer *pBuffer, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); - if (pBufferCreateInfo->size == 0) { + if(pBufferCreateInfo->size == 0) + { return VK_ERROR_VALIDATION_FAILED_EXT; } - + VMA_DEBUG_LOG("vmaCreateBuffer"); - + VMA_DEBUG_GLOBAL_MUTEX_LOCK *pBuffer = VK_NULL_HANDLE; @@ -15203,77 +17993,83 @@ VkResult vmaCreateBuffer( // 1. Create VkBuffer. VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( - allocator->m_hDevice, - pBufferCreateInfo, - allocator->GetAllocationCallbacks(), - pBuffer); - if (res >= 0) { + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { // 2. vkGetBufferMemoryRequirements. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); + requiresDedicatedAllocation, prefersDedicatedAllocation); // Make sure alignment requirements for specific buffer usages reported // in Physical Device Properties are included in alignment reported by memory requirements. - if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0) { - VMA_ASSERT(vkMemReq.alignment % - allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == - 0); + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0) + { + VMA_ASSERT(vkMemReq.alignment % + allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0); } - if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0) { - VMA_ASSERT(vkMemReq.alignment % - allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == - 0); + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0) + { + VMA_ASSERT(vkMemReq.alignment % + allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0); } - if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0) { - VMA_ASSERT(vkMemReq.alignment % - allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == - 0); + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0) + { + VMA_ASSERT(vkMemReq.alignment % + allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0); } // 3. Allocate memory using allocator. res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - *pBuffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - *pAllocationCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordCreateBuffer( - allocator->GetCurrentFrameIndex(), - *pBufferCreateInfo, - *pAllocationCreateInfo, - *pAllocation); + allocator->GetCurrentFrameIndex(), + *pBufferCreateInfo, + *pAllocationCreateInfo, + *pAllocation); } #endif - if (res >= 0) { + if(res >= 0) + { // 3. Bind buffer with memory. - if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { - res = allocator->BindBufferMemory(*pAllocation, *pBuffer); + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); } - if (res >= 0) { -// All steps succeeded. -#if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); -#endif - if (pAllocationInfo != VMA_NULL) { + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return VK_SUCCESS; } allocator->FreeMemory( - 1, // allocationCount - pAllocation); + 1, // allocationCount + pAllocation); *pAllocation = VK_NULL_HANDLE; (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); *pBuffer = VK_NULL_HANDLE; @@ -15286,13 +18082,15 @@ VkResult vmaCreateBuffer( return res; } -void vmaDestroyBuffer( - VmaAllocator allocator, - VkBuffer buffer, - VmaAllocation allocation) { +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ VMA_ASSERT(allocator); - if (buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) { + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { return; } @@ -15301,38 +18099,43 @@ void vmaDestroyBuffer( VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordDestroyBuffer( - allocator->GetCurrentFrameIndex(), - allocation); + allocator->GetCurrentFrameIndex(), + allocation); } #endif - if (buffer != VK_NULL_HANDLE) { + if(buffer != VK_NULL_HANDLE) + { (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); } - if (allocation != VK_NULL_HANDLE) { + if(allocation != VK_NULL_HANDLE) + { allocator->FreeMemory( - 1, // allocationCount - &allocation); + 1, // allocationCount + &allocation); } } -VkResult vmaCreateImage( - VmaAllocator allocator, - const VkImageCreateInfo *pImageCreateInfo, - const VmaAllocationCreateInfo *pAllocationCreateInfo, - VkImage *pImage, - VmaAllocation *pAllocation, - VmaAllocationInfo *pAllocationInfo) { +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); - if (pImageCreateInfo->extent.width == 0 || - pImageCreateInfo->extent.height == 0 || - pImageCreateInfo->extent.depth == 0 || - pImageCreateInfo->mipLevels == 0 || - pImageCreateInfo->arrayLayers == 0) { + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { return VK_ERROR_VALIDATION_FAILED_EXT; } @@ -15345,62 +18148,68 @@ VkResult vmaCreateImage( // 1. Create VkImage. VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( - allocator->m_hDevice, - pImageCreateInfo, - allocator->GetAllocationCallbacks(), - pImage); - if (res >= 0) { + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? - VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : - VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; - + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + // 2. Allocate memory using allocator. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; allocator->GetImageMemoryRequirements(*pImage, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); + requiresDedicatedAllocation, prefersDedicatedAllocation); res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - VK_NULL_HANDLE, // dedicatedBuffer - *pImage, // dedicatedImage - *pAllocationCreateInfo, - suballocType, - 1, // allocationCount - pAllocation); + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordCreateImage( - allocator->GetCurrentFrameIndex(), - *pImageCreateInfo, - *pAllocationCreateInfo, - *pAllocation); + allocator->GetCurrentFrameIndex(), + *pImageCreateInfo, + *pAllocationCreateInfo, + *pAllocation); } #endif - if (res >= 0) { + if(res >= 0) + { // 3. Bind image with memory. - if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { - res = allocator->BindImageMemory(*pAllocation, *pImage); + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); } - if (res >= 0) { -// All steps succeeded. -#if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); -#endif - if (pAllocationInfo != VMA_NULL) { + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } return VK_SUCCESS; } allocator->FreeMemory( - 1, // allocationCount - pAllocation); + 1, // allocationCount + pAllocation); *pAllocation = VK_NULL_HANDLE; (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); *pImage = VK_NULL_HANDLE; @@ -15413,13 +18222,15 @@ VkResult vmaCreateImage( return res; } -void vmaDestroyImage( - VmaAllocator allocator, - VkImage image, - VmaAllocation allocation) { +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator allocator, + VkImage image, + VmaAllocation allocation) +{ VMA_ASSERT(allocator); - if (image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) { + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { return; } @@ -15428,20 +18239,23 @@ void vmaDestroyImage( VMA_DEBUG_GLOBAL_MUTEX_LOCK #if VMA_RECORDING_ENABLED - if (allocator->GetRecorder() != VMA_NULL) { + if(allocator->GetRecorder() != VMA_NULL) + { allocator->GetRecorder()->RecordDestroyImage( - allocator->GetCurrentFrameIndex(), - allocation); + allocator->GetCurrentFrameIndex(), + allocation); } #endif - if (image != VK_NULL_HANDLE) { + if(image != VK_NULL_HANDLE) + { (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); } - if (allocation != VK_NULL_HANDLE) { + if(allocation != VK_NULL_HANDLE) + { allocator->FreeMemory( - 1, // allocationCount - &allocation); + 1, // allocationCount + &allocation); } } |