summaryrefslogtreecommitdiff
path: root/core/templates
diff options
context:
space:
mode:
Diffstat (limited to 'core/templates')
-rw-r--r--core/templates/command_queue_mt.h10
-rw-r--r--core/templates/cowdata.h25
-rw-r--r--core/templates/map.h6
-rw-r--r--core/templates/oa_hash_map.h24
-rw-r--r--core/templates/paged_allocator.h2
-rw-r--r--core/templates/pooled_list.h149
-rw-r--r--core/templates/safe_list.h38
-rw-r--r--core/templates/self_list.h2
-rw-r--r--core/templates/set.h4
-rw-r--r--core/templates/thread_work_pool.h4
-rw-r--r--core/templates/vector.h19
11 files changed, 208 insertions, 75 deletions
diff --git a/core/templates/command_queue_mt.h b/core/templates/command_queue_mt.h
index 1ecb81c2a2..7d3e31b5bc 100644
--- a/core/templates/command_queue_mt.h
+++ b/core/templates/command_queue_mt.h
@@ -215,7 +215,7 @@
T *instance; \
M method; \
SEMIC_SEP_LIST(PARAM_DECL, N); \
- virtual void call() { \
+ virtual void call() override { \
(instance->*method)(COMMA_SEP_LIST(ARG, N)); \
} \
};
@@ -227,7 +227,7 @@
T *instance; \
M method; \
SEMIC_SEP_LIST(PARAM_DECL, N); \
- virtual void call() { \
+ virtual void call() override { \
*ret = (instance->*method)(COMMA_SEP_LIST(ARG, N)); \
} \
};
@@ -238,7 +238,7 @@
T *instance; \
M method; \
SEMIC_SEP_LIST(PARAM_DECL, N); \
- virtual void call() { \
+ virtual void call() override { \
(instance->*method)(COMMA_SEP_LIST(ARG, N)); \
} \
};
@@ -311,9 +311,9 @@ class CommandQueueMT {
};
struct SyncCommand : public CommandBase {
- SyncSemaphore *sync_sem;
+ SyncSemaphore *sync_sem = nullptr;
- virtual void post() {
+ virtual void post() override {
sync_sem->sem.post();
}
};
diff --git a/core/templates/cowdata.h b/core/templates/cowdata.h
index 326616b607..f1ac32928f 100644
--- a/core/templates/cowdata.h
+++ b/core/templates/cowdata.h
@@ -86,13 +86,6 @@ private:
return reinterpret_cast<uint32_t *>(_ptr) - 1;
}
- _FORCE_INLINE_ T *_get_data() const {
- if (!_ptr) {
- return nullptr;
- }
- return reinterpret_cast<T *>(_ptr);
- }
-
_FORCE_INLINE_ size_t _get_alloc_size(size_t p_elements) const {
return next_power_of_2(p_elements * sizeof(T));
}
@@ -128,11 +121,11 @@ public:
_FORCE_INLINE_ T *ptrw() {
_copy_on_write();
- return (T *)_get_data();
+ return _ptr;
}
_FORCE_INLINE_ const T *ptr() const {
- return _get_data();
+ return _ptr;
}
_FORCE_INLINE_ int size() const {
@@ -150,19 +143,19 @@ public:
_FORCE_INLINE_ void set(int p_index, const T &p_elem) {
ERR_FAIL_INDEX(p_index, size());
_copy_on_write();
- _get_data()[p_index] = p_elem;
+ _ptr[p_index] = p_elem;
}
_FORCE_INLINE_ T &get_m(int p_index) {
CRASH_BAD_INDEX(p_index, size());
_copy_on_write();
- return _get_data()[p_index];
+ return _ptr[p_index];
}
_FORCE_INLINE_ const T &get(int p_index) const {
CRASH_BAD_INDEX(p_index, size());
- return _get_data()[p_index];
+ return _ptr[p_index];
}
Error resize(int p_size);
@@ -249,7 +242,7 @@ uint32_t CowData<T>::_copy_on_write() {
} else {
for (uint32_t i = 0; i < current_size; i++) {
- memnew_placement(&_data[i], T(_get_data()[i]));
+ memnew_placement(&_data[i], T(_ptr[i]));
}
}
@@ -308,10 +301,8 @@ Error CowData<T>::resize(int p_size) {
// construct the newly created elements
if (!__has_trivial_constructor(T)) {
- T *elems = _get_data();
-
for (int i = *_get_size(); i < p_size; i++) {
- memnew_placement(&elems[i], T);
+ memnew_placement(&_ptr[i], T);
}
}
@@ -321,7 +312,7 @@ Error CowData<T>::resize(int p_size) {
if (!__has_trivial_destructor(T)) {
// deinitialize no longer needed elements
for (uint32_t i = p_size; i < *_get_size(); i++) {
- T *t = &_get_data()[i];
+ T *t = &_ptr[i];
t->~T();
}
}
diff --git a/core/templates/map.h b/core/templates/map.h
index f228640a1e..c54da1dc03 100644
--- a/core/templates/map.h
+++ b/core/templates/map.h
@@ -178,7 +178,7 @@ public:
private:
struct _Data {
Element *_root = nullptr;
- Element *_nil;
+ Element *_nil = nullptr;
int size_cache = 0;
_FORCE_INLINE_ _Data() {
@@ -344,7 +344,7 @@ private:
void _insert_rb_fix(Element *p_new_node) {
Element *node = p_new_node;
Element *nparent = node->parent;
- Element *ngrand_parent;
+ Element *ngrand_parent = nullptr;
while (nparent->color == RED) {
ngrand_parent = nparent->parent;
@@ -500,7 +500,7 @@ private:
Element *rp = ((p_node->left == _data._nil) || (p_node->right == _data._nil)) ? p_node : p_node->_next;
Element *node = (rp->left == _data._nil) ? rp->right : rp->left;
- Element *sibling;
+ Element *sibling = nullptr;
if (rp == rp->parent->left) {
rp->parent->left = node;
sibling = rp->parent->right;
diff --git a/core/templates/oa_hash_map.h b/core/templates/oa_hash_map.h
index c91d27ebe1..25c21d1802 100644
--- a/core/templates/oa_hash_map.h
+++ b/core/templates/oa_hash_map.h
@@ -145,7 +145,7 @@ private:
uint32_t old_capacity = capacity;
// Capacity can't be 0.
- capacity = MAX(1, p_new_capacity);
+ capacity = MAX(1u, p_new_capacity);
TKey *old_keys = keys;
TValue *old_values = values;
@@ -246,13 +246,17 @@ public:
return false;
}
- /**
- * returns true if the value was found, false otherwise.
- *
- * if r_data is not nullptr then the value will be written to the object
- * it points to.
- */
- TValue *lookup_ptr(const TKey &p_key) const {
+ const TValue *lookup_ptr(const TKey &p_key) const {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+
+ if (exists) {
+ return &values[pos];
+ }
+ return nullptr;
+ }
+
+ TValue *lookup_ptr(const TKey &p_key) {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
@@ -306,7 +310,7 @@ public:
bool valid;
const TKey *key;
- TValue *value;
+ TValue *value = nullptr;
private:
uint32_t pos;
@@ -367,7 +371,7 @@ public:
OAHashMap(uint32_t p_initial_capacity = 64) {
// Capacity can't be 0.
- capacity = MAX(1, p_initial_capacity);
+ capacity = MAX(1u, p_initial_capacity);
keys = static_cast<TKey *>(Memory::alloc_static(sizeof(TKey) * capacity));
values = static_cast<TValue *>(Memory::alloc_static(sizeof(TValue) * capacity));
diff --git a/core/templates/paged_allocator.h b/core/templates/paged_allocator.h
index 5bc723787f..b9067e2edd 100644
--- a/core/templates/paged_allocator.h
+++ b/core/templates/paged_allocator.h
@@ -86,10 +86,10 @@ public:
}
p_mem->~T();
available_pool[allocs_available >> page_shift][allocs_available & page_mask] = p_mem;
+ allocs_available++;
if (thread_safe) {
spin_lock.unlock();
}
- allocs_available++;
}
void reset(bool p_allow_unfreed = false) {
diff --git a/core/templates/pooled_list.h b/core/templates/pooled_list.h
index 360fda81f8..f13156b292 100644
--- a/core/templates/pooled_list.h
+++ b/core/templates/pooled_list.h
@@ -28,16 +28,13 @@
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
-#ifndef POOLED_LIST_H
-#define POOLED_LIST_H
-
-#include "core/templates/local_vector.h"
+#pragma once
// Simple template to provide a pool with O(1) allocate and free.
// The freelist could alternatively be a linked list placed within the unused elements
// to use less memory, however a separate freelist is probably more cache friendly.
-//
-// NOTE: Take great care when using this with non POD types. The construction and destruction
+
+// NOTE : Take great care when using this with non POD types. The construction and destruction
// is done in the LocalVector, NOT as part of the pool. So requesting a new item does not guarantee
// a constructor is run, and free does not guarantee a destructor.
// You should generally handle clearing
@@ -45,33 +42,60 @@
// This is by design for fastest use in the BVH. If you want a more general pool
// that does call constructors / destructors on request / free, this should probably be
// a separate template.
-template <class T, bool force_trivial = false>
+
+// The zero_on_first_request feature is optional and is useful for e.g. pools of handles,
+// which may use a ref count which we want to be initialized to zero the first time a handle is created,
+// but left alone on subsequent allocations (as will typically be incremented).
+
+// Note that there is no function to compact the pool - this would
+// invalidate any existing pool IDs held externally.
+// Compaction can be done but would rely on a more complex method
+// of preferentially giving out lower IDs in the freelist first.
+
+#include "core/templates/local_vector.h"
+
+template <class T, class U = uint32_t, bool force_trivial = false, bool zero_on_first_request = false>
class PooledList {
- LocalVector<T, uint32_t, force_trivial> list;
- LocalVector<uint32_t, uint32_t, true> freelist;
+ LocalVector<T, U, force_trivial> list;
+ LocalVector<U, U, true> freelist;
// not all list members are necessarily used
- int _used_size;
+ U _used_size;
public:
PooledList() {
_used_size = 0;
}
- int estimate_memory_use() const {
- return (list.size() * sizeof(T)) + (freelist.size() * sizeof(uint32_t));
+ // Use with care, in most cases you should make sure to
+ // free all elements first (i.e. _used_size would be zero),
+ // although it could also be used without this as an optimization
+ // in some cases.
+ void clear() {
+ list.clear();
+ freelist.clear();
+ _used_size = 0;
+ }
+
+ uint64_t estimate_memory_use() const {
+ return ((uint64_t)list.size() * sizeof(T)) + ((uint64_t)freelist.size() * sizeof(U));
}
- const T &operator[](uint32_t p_index) const {
+ const T &operator[](U p_index) const {
return list[p_index];
}
- T &operator[](uint32_t p_index) {
+ T &operator[](U p_index) {
return list[p_index];
}
- int size() const { return _used_size; }
+ // To be explicit in a pool there is a distinction
+ // between the number of elements that are currently
+ // in use, and the number of elements that have been reserved.
+ // Using size() would be vague.
+ U used_size() const { return _used_size; }
+ U reserved_size() const { return list.size(); }
- T *request(uint32_t &r_id) {
+ T *request(U &r_id) {
_used_size++;
if (freelist.size()) {
@@ -79,19 +103,106 @@ public:
int new_size = freelist.size() - 1;
r_id = freelist[new_size];
freelist.resize(new_size);
+
return &list[r_id];
}
r_id = list.size();
list.resize(r_id + 1);
+
+ static_assert((!zero_on_first_request) || (__is_pod(T)), "zero_on_first_request requires trivial type");
+ if (zero_on_first_request && __is_pod(T)) {
+ list[r_id] = {};
+ }
+
return &list[r_id];
}
- void free(const uint32_t &p_id) {
+ void free(const U &p_id) {
// should not be on free list already
- CRASH_COND(p_id >= list.size());
+ ERR_FAIL_UNSIGNED_INDEX(p_id, list.size());
freelist.push_back(p_id);
+ ERR_FAIL_COND_MSG(!_used_size, "_used_size has become out of sync, have you double freed an item?");
_used_size--;
}
};
-#endif // POOLED_LIST_H
+// a pooled list which automatically keeps a list of the active members
+template <class T, class U = uint32_t, bool force_trivial = false, bool zero_on_first_request = false>
+class TrackedPooledList {
+public:
+ U pool_used_size() const { return _pool.used_size(); }
+ U pool_reserved_size() const { return _pool.reserved_size(); }
+ U active_size() const { return _active_list.size(); }
+
+ // use with care, see the earlier notes in the PooledList clear()
+ void clear() {
+ _pool.clear();
+ _active_list.clear();
+ _active_map.clear();
+ }
+
+ U get_active_id(U p_index) const {
+ return _active_list[p_index];
+ }
+
+ const T &get_active(U p_index) const {
+ return _pool[get_active_id(p_index)];
+ }
+
+ T &get_active(U p_index) {
+ return _pool[get_active_id(p_index)];
+ }
+
+ const T &operator[](U p_index) const {
+ return _pool[p_index];
+ }
+ T &operator[](U p_index) {
+ return _pool[p_index];
+ }
+
+ T *request(U &r_id) {
+ T *item = _pool.request(r_id);
+
+ // add to the active list
+ U active_list_id = _active_list.size();
+ _active_list.push_back(r_id);
+
+ // expand the active map (this should be in sync with the pool list
+ if (_pool.used_size() > _active_map.size()) {
+ _active_map.resize(_pool.used_size());
+ }
+
+ // store in the active map
+ _active_map[r_id] = active_list_id;
+
+ return item;
+ }
+
+ void free(const U &p_id) {
+ _pool.free(p_id);
+
+ // remove from the active list.
+ U list_id = _active_map[p_id];
+
+ // zero the _active map to detect bugs (only in debug?)
+ _active_map[p_id] = -1;
+
+ _active_list.remove_unordered(list_id);
+
+ // keep the replacement in sync with the correct list Id
+ if (list_id < _active_list.size()) {
+ // which pool id has been replaced in the active list
+ U replacement_id = _active_list[list_id];
+
+ // keep that replacements map up to date with the new position
+ _active_map[replacement_id] = list_id;
+ }
+ }
+
+ const LocalVector<U, U> &get_active_list() const { return _active_list; }
+
+private:
+ PooledList<T, U, force_trivial, zero_on_first_request> _pool;
+ LocalVector<U, U> _active_map;
+ LocalVector<U, U> _active_list;
+};
diff --git a/core/templates/safe_list.h b/core/templates/safe_list.h
index 53fc3fe5f9..e850f3bd5e 100644
--- a/core/templates/safe_list.h
+++ b/core/templates/safe_list.h
@@ -75,8 +75,8 @@ public:
class Iterator {
friend class SafeList;
- SafeListNode *cursor;
- SafeList *list;
+ SafeListNode *cursor = nullptr;
+ SafeList *list = nullptr;
Iterator(SafeListNode *p_cursor, SafeList *p_list) :
cursor(p_cursor), list(p_list) {
@@ -203,7 +203,7 @@ public:
}
// Calling this will cause zero to many deallocations.
- void maybe_cleanup() {
+ bool maybe_cleanup() {
SafeListNode *cursor = nullptr;
SafeListNode *new_graveyard_head = nullptr;
do {
@@ -212,7 +212,7 @@ public:
if (active_iterator_count.load() != 0) {
// It's not safe to clean up with an active iterator, because that iterator
// could be pointing to an element that we want to delete.
- return;
+ return false;
}
// Any iterator created after this point will never point to a deleted node.
// Swap it out with the current graveyard head.
@@ -225,6 +225,17 @@ public:
tmp->deletion_fn(tmp->val);
memdelete_allocator<SafeListNode, A>(tmp);
}
+ return true;
+ }
+
+ ~SafeList() {
+#ifdef DEBUG_ENABLED
+ if (!maybe_cleanup()) {
+ ERR_PRINT("There are still iterators around when destructing a SafeList. Memory will be leaked. This is a bug.");
+ }
+#else
+ maybe_cleanup();
+#endif
}
};
@@ -253,8 +264,8 @@ public:
class Iterator {
friend class SafeList;
- SafeListNode *cursor;
- SafeList *list;
+ SafeListNode *cursor = nullptr;
+ SafeList *list = nullptr;
public:
Iterator(SafeListNode *p_cursor, SafeList *p_list) :
@@ -353,11 +364,11 @@ public:
}
// Calling this will cause zero to many deallocations.
- void maybe_cleanup() {
+ bool maybe_cleanup() {
SafeListNode *cursor = graveyard_head;
if (active_iterator_count != 0) {
// It's not safe to clean up with an active iterator, because that iterator could be pointing to an element that we want to delete.
- return;
+ return false;
}
graveyard_head = nullptr;
// Our graveyard list is now unreachable by any active iterators, detached from the main graveyard head and ready for deletion.
@@ -367,6 +378,17 @@ public:
tmp->deletion_fn(tmp->val);
memdelete_allocator<SafeListNode, A>(tmp);
}
+ return true;
+ }
+
+ ~SafeList() {
+#ifdef DEBUG_ENABLED
+ if (!maybe_cleanup()) {
+ ERR_PRINT("There are still iterators around when destructing a SafeList. Memory will be leaked. This is a bug.");
+ }
+#else
+ maybe_cleanup();
+#endif
}
};
diff --git a/core/templates/self_list.h b/core/templates/self_list.h
index 7f2236fa3a..719b5f2e63 100644
--- a/core/templates/self_list.h
+++ b/core/templates/self_list.h
@@ -108,7 +108,7 @@ public:
private:
List *_root = nullptr;
- T *_self;
+ T *_self = nullptr;
SelfList<T> *_next = nullptr;
SelfList<T> *_prev = nullptr;
diff --git a/core/templates/set.h b/core/templates/set.h
index cdc6e8447d..a8a0a77712 100644
--- a/core/templates/set.h
+++ b/core/templates/set.h
@@ -328,7 +328,7 @@ private:
void _insert_rb_fix(Element *p_new_node) {
Element *node = p_new_node;
Element *nparent = node->parent;
- Element *ngrand_parent;
+ Element *ngrand_parent = nullptr;
while (nparent->color == RED) {
ngrand_parent = nparent->parent;
@@ -482,7 +482,7 @@ private:
Element *rp = ((p_node->left == _data._nil) || (p_node->right == _data._nil)) ? p_node : p_node->_next;
Element *node = (rp->left == _data._nil) ? rp->right : rp->left;
- Element *sibling;
+ Element *sibling = nullptr;
if (rp == rp->parent->left) {
rp->parent->left = node;
sibling = rp->parent->right;
diff --git a/core/templates/thread_work_pool.h b/core/templates/thread_work_pool.h
index 957af44f48..b0cebf04f1 100644
--- a/core/templates/thread_work_pool.h
+++ b/core/templates/thread_work_pool.h
@@ -52,7 +52,7 @@ class ThreadWorkPool {
C *instance;
M method;
U userdata;
- virtual void work() {
+ virtual void work() override {
while (true) {
uint32_t work_index = index->fetch_add(1, std::memory_order_relaxed);
if (work_index >= max_elements) {
@@ -68,7 +68,7 @@ class ThreadWorkPool {
Semaphore start;
Semaphore completed;
std::atomic<bool> exit;
- BaseWork *work;
+ BaseWork *work = nullptr;
};
ThreadData *threads = nullptr;
diff --git a/core/templates/vector.h b/core/templates/vector.h
index 0877e04e01..d87e76139b 100644
--- a/core/templates/vector.h
+++ b/core/templates/vector.h
@@ -97,24 +97,29 @@ public:
_FORCE_INLINE_ bool has(const T &p_val) const { return find(p_val) != -1; }
- template <class C>
- void sort_custom() {
+ void sort() {
+ sort_custom<_DefaultComparator<T>>();
+ }
+
+ template <class Comparator, bool Validate = SORT_ARRAY_VALIDATE_ENABLED, class... Args>
+ void sort_custom(Args &&...args) {
int len = _cowdata.size();
if (len == 0) {
return;
}
T *data = ptrw();
- SortArray<T, C> sorter;
+ SortArray<T, Comparator, Validate> sorter{ args... };
sorter.sort(data, len);
}
- void sort() {
- sort_custom<_DefaultComparator<T>>();
+ int bsearch(const T &p_value, bool p_before) {
+ return bsearch_custom<_DefaultComparator<T>>(p_value, p_before);
}
- int bsearch(const T &p_value, bool p_before) {
- SearchArray<T> search;
+ template <class Comparator, class Value, class... Args>
+ int bsearch_custom(const Value &p_value, bool p_before, Args &&...args) {
+ SearchArray<T, Comparator> search{ args... };
return search.bisect(ptrw(), size(), p_value, p_before);
}