diff options
author | Juan Linietsky <reduzio@gmail.com> | 2020-12-24 00:20:45 -0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-12-24 00:20:45 -0300 |
commit | 3fdf4bfe71311000bf706c16d79b475308a200ac (patch) | |
tree | 9a6cd10023c594aaaa79ab12f65fd19cbfc70db0 /core/templates | |
parent | 169159c8aac5807725594145bf029c0ff7ac8ef1 (diff) | |
parent | 83058597cf02255d7c0359a96f125010f63deff7 (diff) |
Merge pull request #44623 from reduz/rewrite-renderer-indexer
Replace Octree by DynamicBVH in cull code
Diffstat (limited to 'core/templates')
-rw-r--r-- | core/templates/paged_allocator.h | 129 | ||||
-rw-r--r-- | core/templates/paged_array.h | 2 | ||||
-rw-r--r-- | core/templates/rid_owner.h | 13 |
3 files changed, 143 insertions, 1 deletions
diff --git a/core/templates/paged_allocator.h b/core/templates/paged_allocator.h new file mode 100644 index 0000000000..8bd1eecdeb --- /dev/null +++ b/core/templates/paged_allocator.h @@ -0,0 +1,129 @@ +/*************************************************************************/ +/* paged_allocator.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef PAGED_ALLOCATOR_H +#define PAGED_ALLOCATOR_H + +#include "core/os/memory.h" +#include "core/os/spin_lock.h" +#include "core/typedefs.h" + +template <class T, bool thread_safe = false> +class PagedAllocator { + T **page_pool = nullptr; + T ***available_pool = nullptr; + uint32_t pages_allocated = 0; + uint32_t allocs_available = 0; + + uint32_t page_shift = 0; + uint32_t page_mask = 0; + uint32_t page_size = 0; + SpinLock spin_lock; + +public: + T *alloc() { + if (thread_safe) { + spin_lock.lock(); + } + if (unlikely(allocs_available == 0)) { + uint32_t pages_used = pages_allocated; + + pages_allocated++; + page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated); + available_pool = (T ***)memrealloc(available_pool, sizeof(T **) * pages_allocated); + + page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size); + available_pool[pages_used] = (T **)memalloc(sizeof(T *) * page_size); + + for (uint32_t i = 0; i < page_size; i++) { + available_pool[0][i] = &page_pool[pages_used][i]; + } + allocs_available += page_size; + } + + allocs_available--; + T *alloc = available_pool[allocs_available >> page_shift][allocs_available & page_mask]; + if (thread_safe) { + spin_lock.unlock(); + } + memnew_placement(alloc, T); + return alloc; + } + + void free(T *p_mem) { + if (thread_safe) { + spin_lock.lock(); + } + p_mem->~T(); + available_pool[allocs_available >> page_shift][allocs_available & page_mask] = p_mem; + if (thread_safe) { + spin_lock.unlock(); + } + allocs_available++; + } + + void reset() { + ERR_FAIL_COND(allocs_available < pages_allocated * page_size); + if (pages_allocated) { + for (uint32_t i = 0; i < pages_allocated; i++) { + memfree(page_pool[i]); + memfree(available_pool[i]); + } + memfree(page_pool); + memfree(available_pool); + page_pool = nullptr; + available_pool = nullptr; + pages_allocated = 0; + allocs_available = 0; + } + } + bool is_configured() const { + return page_size > 0; + } + + void configure(uint32_t p_page_size, bool p_thread_safe) { + ERR_FAIL_COND(page_pool != nullptr); //sanity check + ERR_FAIL_COND(p_page_size == 0); + page_size = nearest_power_of_2_templated(p_page_size); + page_mask = page_size - 1; + page_shift = get_shift_from_power_of_2(page_size); + } + + PagedAllocator(uint32_t p_page_size = 4096, bool p_thread_safe = false) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages + configure(p_page_size, false); + } + + ~PagedAllocator() { + ERR_FAIL_COND_MSG(allocs_available < pages_allocated * page_size, "Pages in use exist at exit in PagedAllocator"); + reset(); + } +}; + +#endif // PAGED_ALLOCATOR_H diff --git a/core/templates/paged_array.h b/core/templates/paged_array.h index 71183c4ad8..aaf3893715 100644 --- a/core/templates/paged_array.h +++ b/core/templates/paged_array.h @@ -329,7 +329,7 @@ public: } } - uint64_t size() const { + _FORCE_INLINE_ uint64_t size() const { return count; } diff --git a/core/templates/rid_owner.h b/core/templates/rid_owner.h index d1bcb92010..7de4e43648 100644 --- a/core/templates/rid_owner.h +++ b/core/templates/rid_owner.h @@ -346,6 +346,18 @@ public: alloc.free(p_rid); } + _FORCE_INLINE_ uint32_t get_rid_count() const { + return alloc.get_rid_count(); + } + + _FORCE_INLINE_ RID get_rid_by_index(uint32_t p_index) { + return alloc.get_rid_by_index(p_index); + } + + _FORCE_INLINE_ T *get_ptr_by_index(uint32_t p_index) { + return *alloc.get_ptr_by_index(p_index); + } + _FORCE_INLINE_ void get_owned_list(List<RID> *p_owned) { return alloc.get_owned_list(p_owned); } @@ -353,6 +365,7 @@ public: void set_description(const char *p_descrption) { alloc.set_description(p_descrption); } + RID_PtrOwner(uint32_t p_target_chunk_byte_size = 4096) : alloc(p_target_chunk_byte_size) {} }; |