diff options
Diffstat (limited to 'core/templates')
-rw-r--r-- | core/templates/cowdata.h | 2 | ||||
-rw-r--r-- | core/templates/hash_map.h | 2 | ||||
-rw-r--r-- | core/templates/list.h | 2 | ||||
-rw-r--r-- | core/templates/local_vector.h | 2 | ||||
-rw-r--r-- | core/templates/map.h | 2 | ||||
-rw-r--r-- | core/templates/oa_hash_map.h | 2 | ||||
-rw-r--r-- | core/templates/ordered_hash_map.h | 2 | ||||
-rw-r--r-- | core/templates/paged_allocator.h | 129 | ||||
-rw-r--r-- | core/templates/paged_array.h | 367 | ||||
-rw-r--r-- | core/templates/rid_owner.h | 13 | ||||
-rw-r--r-- | core/templates/set.h | 2 | ||||
-rw-r--r-- | core/templates/vector.h | 6 | ||||
-rw-r--r-- | core/templates/vmap.h | 6 | ||||
-rw-r--r-- | core/templates/vset.h | 6 |
14 files changed, 528 insertions, 15 deletions
diff --git a/core/templates/cowdata.h b/core/templates/cowdata.h index d5eb08286d..705eae8f9f 100644 --- a/core/templates/cowdata.h +++ b/core/templates/cowdata.h @@ -135,7 +135,7 @@ public: } _FORCE_INLINE_ void clear() { resize(0); } - _FORCE_INLINE_ bool empty() const { return _ptr == nullptr; } + _FORCE_INLINE_ bool is_empty() const { return _ptr == nullptr; } _FORCE_INLINE_ void set(int p_index, const T &p_elem) { CRASH_BAD_INDEX(p_index, size()); diff --git a/core/templates/hash_map.h b/core/templates/hash_map.h index e1ba381595..2e98302809 100644 --- a/core/templates/hash_map.h +++ b/core/templates/hash_map.h @@ -497,7 +497,7 @@ public: return elements; } - inline bool empty() const { + inline bool is_empty() const { return elements == 0; } diff --git a/core/templates/list.h b/core/templates/list.h index 8e14aaa90d..bab5198380 100644 --- a/core/templates/list.h +++ b/core/templates/list.h @@ -373,7 +373,7 @@ public: /** * return whether the list is empty */ - _FORCE_INLINE_ bool empty() const { + _FORCE_INLINE_ bool is_empty() const { return (!_data || !_data->size_cache); } diff --git a/core/templates/local_vector.h b/core/templates/local_vector.h index 4ef040dc77..1bf4161f87 100644 --- a/core/templates/local_vector.h +++ b/core/templates/local_vector.h @@ -104,7 +104,7 @@ public: capacity = 0; } } - _FORCE_INLINE_ bool empty() const { return count == 0; } + _FORCE_INLINE_ bool is_empty() const { return count == 0; } _FORCE_INLINE_ void reserve(U p_size) { p_size = nearest_power_of_2_templated(p_size); if (p_size > capacity) { diff --git a/core/templates/map.h b/core/templates/map.h index c454d69256..4e002b67f8 100644 --- a/core/templates/map.h +++ b/core/templates/map.h @@ -625,7 +625,7 @@ public: return e; } - inline bool empty() const { return _data.size_cache == 0; } + inline bool is_empty() const { return _data.size_cache == 0; } inline int size() const { return _data.size_cache; } int calculate_depth() const { diff --git a/core/templates/oa_hash_map.h b/core/templates/oa_hash_map.h index d9d632b4ce..49551ffc2d 100644 --- a/core/templates/oa_hash_map.h +++ b/core/templates/oa_hash_map.h @@ -190,7 +190,7 @@ public: _FORCE_INLINE_ uint32_t get_capacity() const { return capacity; } _FORCE_INLINE_ uint32_t get_num_elements() const { return num_elements; } - bool empty() const { + bool is_empty() const { return num_elements == 0; } diff --git a/core/templates/ordered_hash_map.h b/core/templates/ordered_hash_map.h index 9398868b01..fce9bcfbfc 100644 --- a/core/templates/ordered_hash_map.h +++ b/core/templates/ordered_hash_map.h @@ -265,7 +265,7 @@ public: return ConstElement(list.back()); } - inline bool empty() const { return list.empty(); } + inline bool is_empty() const { return list.is_empty(); } inline int size() const { return list.size(); } const void *id() const { diff --git a/core/templates/paged_allocator.h b/core/templates/paged_allocator.h new file mode 100644 index 0000000000..ab9945dd3b --- /dev/null +++ b/core/templates/paged_allocator.h @@ -0,0 +1,129 @@ +/*************************************************************************/ +/* paged_allocator.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef PAGED_ALLOCATOR_H +#define PAGED_ALLOCATOR_H + +#include "core/os/memory.h" +#include "core/os/spin_lock.h" +#include "core/typedefs.h" + +template <class T, bool thread_safe = false> +class PagedAllocator { + T **page_pool = nullptr; + T ***available_pool = nullptr; + uint32_t pages_allocated = 0; + uint32_t allocs_available = 0; + + uint32_t page_shift = 0; + uint32_t page_mask = 0; + uint32_t page_size = 0; + SpinLock spin_lock; + +public: + T *alloc() { + if (thread_safe) { + spin_lock.lock(); + } + if (unlikely(allocs_available == 0)) { + uint32_t pages_used = pages_allocated; + + pages_allocated++; + page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated); + available_pool = (T ***)memrealloc(available_pool, sizeof(T **) * pages_allocated); + + page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size); + available_pool[pages_used] = (T **)memalloc(sizeof(T *) * page_size); + + for (uint32_t i = 0; i < page_size; i++) { + available_pool[0][i] = &page_pool[pages_used][i]; + } + allocs_available += page_size; + } + + allocs_available--; + T *alloc = available_pool[allocs_available >> page_shift][allocs_available & page_mask]; + if (thread_safe) { + spin_lock.unlock(); + } + memnew_placement(alloc, T); + return alloc; + } + + void free(T *p_mem) { + if (thread_safe) { + spin_lock.lock(); + } + p_mem->~T(); + available_pool[allocs_available >> page_shift][allocs_available & page_mask] = p_mem; + if (thread_safe) { + spin_lock.unlock(); + } + allocs_available++; + } + + void reset() { + ERR_FAIL_COND(allocs_available < pages_allocated * page_size); + if (pages_allocated) { + for (uint32_t i = 0; i < pages_allocated; i++) { + memfree(page_pool[i]); + memfree(available_pool[i]); + } + memfree(page_pool); + memfree(available_pool); + page_pool = nullptr; + available_pool = nullptr; + pages_allocated = 0; + allocs_available = 0; + } + } + bool is_configured() const { + return page_size > 0; + } + + void configure(uint32_t p_page_size) { + ERR_FAIL_COND(page_pool != nullptr); //sanity check + ERR_FAIL_COND(p_page_size == 0); + page_size = nearest_power_of_2_templated(p_page_size); + page_mask = page_size - 1; + page_shift = get_shift_from_power_of_2(page_size); + } + + PagedAllocator(uint32_t p_page_size = 4096) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages + configure(p_page_size); + } + + ~PagedAllocator() { + ERR_FAIL_COND_MSG(allocs_available < pages_allocated * page_size, "Pages in use exist at exit in PagedAllocator"); + reset(); + } +}; + +#endif // PAGED_ALLOCATOR_H diff --git a/core/templates/paged_array.h b/core/templates/paged_array.h new file mode 100644 index 0000000000..9bbaac35c4 --- /dev/null +++ b/core/templates/paged_array.h @@ -0,0 +1,367 @@ +/*************************************************************************/ +/* paged_array.h */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#ifndef PAGED_ARRAY_H +#define PAGED_ARRAY_H + +#include "core/os/memory.h" +#include "core/os/spin_lock.h" +#include "core/typedefs.h" + +// PagedArray is used mainly for filling a very large array from multiple threads efficiently and without causing major fragmentation + +// PageArrayPool manages central page allocation in a thread safe matter + +template <class T> +class PagedArrayPool { + T **page_pool = nullptr; + uint32_t pages_allocated = 0; + + uint32_t *available_page_pool = nullptr; + uint32_t pages_available = 0; + + uint32_t page_size = 0; + SpinLock spin_lock; + +public: + uint32_t alloc_page() { + spin_lock.lock(); + if (unlikely(pages_available == 0)) { + uint32_t pages_used = pages_allocated; + + pages_allocated++; + page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated); + available_page_pool = (uint32_t *)memrealloc(available_page_pool, sizeof(uint32_t) * pages_allocated); + + page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size); + available_page_pool[0] = pages_used; + + pages_available++; + } + + pages_available--; + uint32_t page = available_page_pool[pages_available]; + spin_lock.unlock(); + + return page; + } + T *get_page(uint32_t p_page_id) { + return page_pool[p_page_id]; + } + + void free_page(uint32_t p_page_id) { + spin_lock.lock(); + available_page_pool[pages_available] = p_page_id; + pages_available++; + spin_lock.unlock(); + } + + uint32_t get_page_size_shift() const { + return get_shift_from_power_of_2(page_size); + } + + uint32_t get_page_size_mask() const { + return page_size - 1; + } + + void reset() { + ERR_FAIL_COND(pages_available < pages_allocated); + if (pages_allocated) { + for (uint32_t i = 0; i < pages_allocated; i++) { + memfree(page_pool[i]); + } + memfree(page_pool); + memfree(available_page_pool); + page_pool = nullptr; + available_page_pool = nullptr; + pages_allocated = 0; + pages_available = 0; + } + } + bool is_configured() const { + return page_size > 0; + } + + void configure(uint32_t p_page_size) { + ERR_FAIL_COND(page_pool != nullptr); //sanity check + ERR_FAIL_COND(p_page_size == 0); + page_size = nearest_power_of_2_templated(p_page_size); + } + + PagedArrayPool(uint32_t p_page_size = 4096) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages + configure(p_page_size); + } + + ~PagedArrayPool() { + ERR_FAIL_COND_MSG(pages_available < pages_allocated, "Pages in use exist at exit in PagedArrayPool"); + reset(); + } +}; + +// PageArray is a local array that is optimized to grow in place, then be cleared often. +// It does so by allocating pages from a PagedArrayPool. +// It is safe to use multiple PagedArrays from different threads, sharing a single PagedArrayPool + +template <class T> +class PagedArray { + PagedArrayPool<T> *page_pool = nullptr; + + T **page_data = nullptr; + uint32_t *page_ids = nullptr; + uint32_t max_pages_used = 0; + uint32_t page_size_shift = 0; + uint32_t page_size_mask = 0; + uint64_t count = 0; + + _FORCE_INLINE_ uint32_t _get_pages_in_use() const { + if (count == 0) { + return 0; + } else { + return ((count - 1) >> page_size_shift) + 1; + } + } + + void _grow_page_array() { + //no more room in the page array to put the new page, make room + if (max_pages_used == 0) { + max_pages_used = 1; + } else { + max_pages_used *= 2; // increase in powers of 2 to keep allocations to minimum + } + page_data = (T **)memrealloc(page_data, sizeof(T *) * max_pages_used); + page_ids = (uint32_t *)memrealloc(page_ids, sizeof(uint32_t) * max_pages_used); + } + +public: + _FORCE_INLINE_ const T &operator[](uint64_t p_index) const { + CRASH_BAD_UNSIGNED_INDEX(p_index, count); + uint32_t page = p_index >> page_size_shift; + uint32_t offset = p_index & page_size_mask; + + return page_data[page][offset]; + } + _FORCE_INLINE_ T &operator[](uint64_t p_index) { + CRASH_BAD_UNSIGNED_INDEX(p_index, count); + uint32_t page = p_index >> page_size_shift; + uint32_t offset = p_index & page_size_mask; + + return page_data[page][offset]; + } + + _FORCE_INLINE_ void push_back(const T &p_value) { + uint32_t remainder = count & page_size_mask; + if (unlikely(remainder == 0)) { + // at 0, so time to request a new page + uint32_t page_count = _get_pages_in_use(); + uint32_t new_page_count = page_count + 1; + + if (unlikely(new_page_count > max_pages_used)) { + ERR_FAIL_COND(page_pool == nullptr); //sanity check + + _grow_page_array(); //keep out of inline + } + + uint32_t page_id = page_pool->alloc_page(); + page_data[page_count] = page_pool->get_page(page_id); + page_ids[page_count] = page_id; + } + + // place the new value + uint32_t page = count >> page_size_shift; + uint32_t offset = count & page_size_mask; + + if (!__has_trivial_constructor(T)) { + memnew_placement(&page_data[page][offset], T(p_value)); + } else { + page_data[page][offset] = p_value; + } + + count++; + } + + _FORCE_INLINE_ void pop_back() { + ERR_FAIL_COND(count == 0); + + if (!__has_trivial_destructor(T)) { + uint32_t page = (count - 1) >> page_size_shift; + uint32_t offset = (count - 1) & page_size_mask; + page_data[page][offset].~T(); + } + + uint32_t remainder = count & page_size_mask; + if (unlikely(remainder == 1)) { + // one element remained, so page must be freed. + uint32_t last_page = _get_pages_in_use() - 1; + page_pool->free_page(page_ids[last_page]); + } + count--; + } + + void clear() { + //destruct if needed + if (!__has_trivial_destructor(T)) { + for (uint64_t i = 0; i < count; i++) { + uint32_t page = i >> page_size_shift; + uint32_t offset = i & page_size_mask; + page_data[page][offset].~T(); + } + } + + //return the pages to the pagepool, so they can be used by another array eventually + uint32_t pages_used = _get_pages_in_use(); + for (uint32_t i = 0; i < pages_used; i++) { + page_pool->free_page(page_ids[i]); + } + + count = 0; + + //note we leave page_data and page_indices intact for next use. If you really want to clear them call reset() + } + + void reset() { + clear(); + if (page_data) { + memfree(page_data); + memfree(page_ids); + page_data = nullptr; + page_ids = nullptr; + max_pages_used = 0; + } + } + + // This takes the pages from a source array and merges them to this one + // resulting order is undefined, but content is merged very efficiently, + // making it ideal to fill content on several threads to later join it. + + void merge_unordered(PagedArray<T> &p_array) { + ERR_FAIL_COND(page_pool != p_array.page_pool); + + uint32_t remainder = count & page_size_mask; + + T *remainder_page = nullptr; + uint32_t remainder_page_id; + + if (remainder > 0) { + uint32_t last_page = _get_pages_in_use() - 1; + remainder_page = page_data[last_page]; + remainder_page_id = page_ids[last_page]; + } + + count -= remainder; + + uint32_t src_pages = p_array._get_pages_in_use(); + uint32_t page_size = page_size_mask + 1; + + for (uint32_t i = 0; i < src_pages; i++) { + uint32_t page_count = _get_pages_in_use(); + uint32_t new_page_count = page_count + 1; + + if (unlikely(new_page_count > max_pages_used)) { + _grow_page_array(); //keep out of inline + } + + page_data[page_count] = p_array.page_data[i]; + page_ids[page_count] = p_array.page_ids[i]; + if (i == src_pages - 1) { + //last page, only increment with remainder + count += p_array.count & page_size_mask; + } else { + count += page_size; + } + } + p_array.count = 0; //take away the other array pages + + //handle the remainder page if exists + if (remainder_page) { + uint32_t new_remainder = count & page_size_mask; + + if (new_remainder > 0) { + //must merge old remainder with new remainder + + T *dst_page = page_data[_get_pages_in_use() - 1]; + uint32_t to_copy = MIN(page_size - new_remainder, remainder); + + for (uint32_t i = 0; i < to_copy; i++) { + if (!__has_trivial_constructor(T)) { + memnew_placement(&dst_page[i + new_remainder], T(remainder_page[i + remainder - to_copy])); + } else { + dst_page[i + new_remainder] = remainder_page[i + remainder - to_copy]; + } + + if (!__has_trivial_destructor(T)) { + remainder_page[i + remainder - to_copy].~T(); + } + } + + remainder -= to_copy; //subtract what was copied from remainder + count += to_copy; //add what was copied to the count + + if (remainder == 0) { + //entire remainder copied, let go of remainder page + page_pool->free_page(remainder_page_id); + remainder_page = nullptr; + } + } + + if (remainder > 0) { + //there is still remainder, append it + uint32_t page_count = _get_pages_in_use(); + uint32_t new_page_count = page_count + 1; + + if (unlikely(new_page_count > max_pages_used)) { + _grow_page_array(); //keep out of inline + } + + page_data[page_count] = remainder_page; + page_ids[page_count] = remainder_page_id; + + count += remainder; + } + } + } + + _FORCE_INLINE_ uint64_t size() const { + return count; + } + + void set_page_pool(PagedArrayPool<T> *p_page_pool) { + ERR_FAIL_COND(max_pages_used > 0); //sanity check + + page_pool = p_page_pool; + page_size_mask = page_pool->get_page_size_mask(); + page_size_shift = page_pool->get_page_size_shift(); + } + + ~PagedArray() { + reset(); + } +}; + +#endif // PAGED_ARRAY_H diff --git a/core/templates/rid_owner.h b/core/templates/rid_owner.h index d1bcb92010..7de4e43648 100644 --- a/core/templates/rid_owner.h +++ b/core/templates/rid_owner.h @@ -346,6 +346,18 @@ public: alloc.free(p_rid); } + _FORCE_INLINE_ uint32_t get_rid_count() const { + return alloc.get_rid_count(); + } + + _FORCE_INLINE_ RID get_rid_by_index(uint32_t p_index) { + return alloc.get_rid_by_index(p_index); + } + + _FORCE_INLINE_ T *get_ptr_by_index(uint32_t p_index) { + return *alloc.get_ptr_by_index(p_index); + } + _FORCE_INLINE_ void get_owned_list(List<RID> *p_owned) { return alloc.get_owned_list(p_owned); } @@ -353,6 +365,7 @@ public: void set_description(const char *p_descrption) { alloc.set_description(p_descrption); } + RID_PtrOwner(uint32_t p_target_chunk_byte_size = 4096) : alloc(p_target_chunk_byte_size) {} }; diff --git a/core/templates/set.h b/core/templates/set.h index d0ac71a710..c323618062 100644 --- a/core/templates/set.h +++ b/core/templates/set.h @@ -576,7 +576,7 @@ public: return e; } - inline bool empty() const { return _data.size_cache == 0; } + inline bool is_empty() const { return _data.size_cache == 0; } inline int size() const { return _data.size_cache; } int calculate_depth() const { diff --git a/core/templates/vector.h b/core/templates/vector.h index 9d45f7c30a..1c06e7e3ab 100644 --- a/core/templates/vector.h +++ b/core/templates/vector.h @@ -79,7 +79,7 @@ public: _FORCE_INLINE_ T *ptrw() { return _cowdata.ptrw(); } _FORCE_INLINE_ const T *ptr() const { return _cowdata.ptr(); } _FORCE_INLINE_ void clear() { resize(0); } - _FORCE_INLINE_ bool empty() const { return _cowdata.empty(); } + _FORCE_INLINE_ bool is_empty() const { return _cowdata.is_empty(); } _FORCE_INLINE_ T get(int p_index) { return _cowdata.get(p_index); } _FORCE_INLINE_ const T &get(int p_index) const { return _cowdata.get(p_index); } @@ -112,6 +112,10 @@ public: sort_custom<_DefaultComparator<T>>(); } + Vector<T> duplicate() { + return *this; + } + void ordered_insert(const T &p_val) { int i; for (i = 0; i < _cowdata.size(); i++) { diff --git a/core/templates/vmap.h b/core/templates/vmap.h index 8d2a3d2a9c..5fdfa1a7b5 100644 --- a/core/templates/vmap.h +++ b/core/templates/vmap.h @@ -54,7 +54,7 @@ private: _FORCE_INLINE_ int _find(const T &p_val, bool &r_exact) const { r_exact = false; - if (_cowdata.empty()) { + if (_cowdata.is_empty()) { return 0; } @@ -89,7 +89,7 @@ private: } _FORCE_INLINE_ int _find_exact(const T &p_val) const { - if (_cowdata.empty()) { + if (_cowdata.is_empty()) { return -1; } @@ -147,7 +147,7 @@ public: } _FORCE_INLINE_ int size() const { return _cowdata.size(); } - _FORCE_INLINE_ bool empty() const { return _cowdata.empty(); } + _FORCE_INLINE_ bool is_empty() const { return _cowdata.is_empty(); } const Pair *get_array() const { return _cowdata.ptr(); diff --git a/core/templates/vset.h b/core/templates/vset.h index 4c0b8717b6..f2ea5c814b 100644 --- a/core/templates/vset.h +++ b/core/templates/vset.h @@ -40,7 +40,7 @@ class VSet { _FORCE_INLINE_ int _find(const T &p_val, bool &r_exact) const { r_exact = false; - if (_data.empty()) { + if (_data.is_empty()) { return 0; } @@ -76,7 +76,7 @@ class VSet { } _FORCE_INLINE_ int _find_exact(const T &p_val) const { - if (_data.empty()) { + if (_data.is_empty()) { return -1; } @@ -126,7 +126,7 @@ public: return _find_exact(p_val); } - _FORCE_INLINE_ bool empty() const { return _data.empty(); } + _FORCE_INLINE_ bool is_empty() const { return _data.is_empty(); } _FORCE_INLINE_ int size() const { return _data.size(); } |