summaryrefslogtreecommitdiff
path: root/core/templates
diff options
context:
space:
mode:
Diffstat (limited to 'core/templates')
-rw-r--r--core/templates/bin_sorted_array.h6
-rw-r--r--core/templates/cowdata.h45
-rw-r--r--core/templates/hash_map.h782
-rw-r--r--core/templates/hash_set.h476
-rw-r--r--core/templates/hashfuncs.h383
-rw-r--r--core/templates/local_vector.h20
-rw-r--r--core/templates/lru.h7
-rw-r--r--core/templates/ordered_hash_map.h301
-rw-r--r--core/templates/paged_allocator.h18
-rw-r--r--core/templates/paged_array.h12
-rw-r--r--core/templates/pair.h11
-rw-r--r--core/templates/rb_map.h (renamed from core/templates/map.h)38
-rw-r--r--core/templates/rb_set.h (renamed from core/templates/set.h)33
-rw-r--r--core/templates/rid_owner.h22
-rw-r--r--core/templates/safe_refcount.h5
-rw-r--r--core/templates/thread_work_pool.cpp81
-rw-r--r--core/templates/thread_work_pool.h157
-rw-r--r--core/templates/vector.h6
18 files changed, 1393 insertions, 1010 deletions
diff --git a/core/templates/bin_sorted_array.h b/core/templates/bin_sorted_array.h
index 59ac4cdaa1..38beb9c04d 100644
--- a/core/templates/bin_sorted_array.h
+++ b/core/templates/bin_sorted_array.h
@@ -61,7 +61,7 @@ public:
}
uint64_t move(uint64_t p_idx, uint64_t p_bin) {
- ERR_FAIL_COND_V(p_idx >= array.size(), -1);
+ ERR_FAIL_UNSIGNED_INDEX_V(p_idx, array.size(), -1);
uint64_t current_bin = bin_limits.size() - 1;
while (p_idx > bin_limits[current_bin]) {
@@ -113,7 +113,7 @@ public:
}
void remove_at(uint64_t p_idx) {
- ERR_FAIL_COND(p_idx >= array.size());
+ ERR_FAIL_UNSIGNED_INDEX(p_idx, array.size());
uint64_t new_idx = move(p_idx, 0);
uint64_t swap_idx = array.size() - 1;
@@ -178,4 +178,4 @@ public:
}
};
-#endif //BIN_SORTED_ARRAY_H
+#endif // BIN_SORTED_ARRAY_H
diff --git a/core/templates/cowdata.h b/core/templates/cowdata.h
index f1ac32928f..f98b2308c9 100644
--- a/core/templates/cowdata.h
+++ b/core/templates/cowdata.h
@@ -36,6 +36,7 @@
#include "core/templates/safe_refcount.h"
#include <string.h>
+#include <type_traits>
template <class T>
class Vector;
@@ -158,6 +159,7 @@ public:
return _ptr[p_index];
}
+ template <bool p_ensure_zero = false>
Error resize(int p_size);
_FORCE_INLINE_ void remove_at(int p_index) {
@@ -183,6 +185,8 @@ public:
}
int find(const T &p_val, int p_from = 0) const;
+ int rfind(const T &p_val, int p_from = -1) const;
+ int count(const T &p_val) const;
_FORCE_INLINE_ CowData() {}
_FORCE_INLINE_ ~CowData();
@@ -202,7 +206,7 @@ void CowData<T>::_unref(void *p_data) {
}
// clean up
- if (!__has_trivial_destructor(T)) {
+ if (!std::is_trivially_destructible<T>::value) {
uint32_t *count = _get_size();
T *data = (T *)(count + 1);
@@ -237,7 +241,7 @@ uint32_t CowData<T>::_copy_on_write() {
T *_data = (T *)(mem_new);
// initialize new elements
- if (__has_trivial_copy(T)) {
+ if (std::is_trivially_copyable<T>::value) {
memcpy(mem_new, _ptr, current_size * sizeof(T));
} else {
@@ -255,6 +259,7 @@ uint32_t CowData<T>::_copy_on_write() {
}
template <class T>
+template <bool p_ensure_zero>
Error CowData<T>::resize(int p_size) {
ERR_FAIL_COND_V(p_size < 0, ERR_INVALID_PARAMETER);
@@ -300,16 +305,18 @@ Error CowData<T>::resize(int p_size) {
// construct the newly created elements
- if (!__has_trivial_constructor(T)) {
+ if (!std::is_trivially_constructible<T>::value) {
for (int i = *_get_size(); i < p_size; i++) {
memnew_placement(&_ptr[i], T);
}
+ } else if (p_ensure_zero) {
+ memset((void *)(_ptr + current_size), 0, (p_size - current_size) * sizeof(T));
}
*_get_size() = p_size;
} else if (p_size < current_size) {
- if (!__has_trivial_destructor(T)) {
+ if (!std::is_trivially_destructible<T>::value) {
// deinitialize no longer needed elements
for (uint32_t i = p_size; i < *_get_size(); i++) {
T *t = &_ptr[i];
@@ -350,6 +357,36 @@ int CowData<T>::find(const T &p_val, int p_from) const {
}
template <class T>
+int CowData<T>::rfind(const T &p_val, int p_from) const {
+ const int s = size();
+
+ if (p_from < 0) {
+ p_from = s + p_from;
+ }
+ if (p_from < 0 || p_from >= s) {
+ p_from = s - 1;
+ }
+
+ for (int i = p_from; i >= 0; i--) {
+ if (get(i) == p_val) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+template <class T>
+int CowData<T>::count(const T &p_val) const {
+ int amount = 0;
+ for (int i = 0; i < size(); i++) {
+ if (get(i) == p_val) {
+ amount++;
+ }
+ }
+ return amount;
+}
+
+template <class T>
void CowData<T>::_ref(const CowData *p_from) {
_ref(*p_from);
}
diff --git a/core/templates/hash_map.h b/core/templates/hash_map.h
index fa5677cc70..191f21a3dd 100644
--- a/core/templates/hash_map.h
+++ b/core/templates/hash_map.h
@@ -31,524 +31,560 @@
#ifndef HASH_MAP_H
#define HASH_MAP_H
-#include "core/error/error_macros.h"
#include "core/math/math_funcs.h"
#include "core/os/memory.h"
-#include "core/string/ustring.h"
#include "core/templates/hashfuncs.h"
-#include "core/templates/list.h"
+#include "core/templates/paged_allocator.h"
+#include "core/templates/pair.h"
/**
- * @class HashMap
+ * A HashMap implementation that uses open addressing with Robin Hood hashing.
+ * Robin Hood hashing swaps out entries that have a smaller probing distance
+ * than the to-be-inserted entry, that evens out the average probing distance
+ * and enables faster lookups. Backward shift deletion is employed to further
+ * improve the performance and to avoid infinite loops in rare cases.
*
- * Implementation of a standard Hashing HashMap, for quick lookups of Data associated with a Key.
- * The implementation provides hashers for the default types, if you need a special kind of hasher, provide
- * your own.
- * @param TKey Key, search is based on it, needs to be hasheable. It is unique in this container.
- * @param TData Data, data associated with the key
- * @param Hasher Hasher object, needs to provide a valid static hash function for TKey
- * @param Comparator comparator object, needs to be able to safely compare two TKey values.
- * It needs to ensure that x == x for any items inserted in the map. Bear in mind that nan != nan when implementing an equality check.
- * @param MIN_HASH_TABLE_POWER Miminum size of the hash table, as a power of two. You rarely need to change this parameter.
- * @param RELATIONSHIP Relationship at which the hash table is resized. if amount of elements is RELATIONSHIP
- * times bigger than the hash table, table is resized to solve this condition. if RELATIONSHIP is zero, table is always MIN_HASH_TABLE_POWER.
+ * Keys and values are stored in a double linked list by insertion order. This
+ * has a slight performance overhead on lookup, which can be mostly compensated
+ * using a paged allocator if required.
*
+ * The assignment operator copy the pairs from one map to the other.
*/
-template <class TKey, class TData, class Hasher = HashMapHasherDefault, class Comparator = HashMapComparatorDefault<TKey>, uint8_t MIN_HASH_TABLE_POWER = 3, uint8_t RELATIONSHIP = 8>
+template <class TKey, class TValue>
+struct HashMapElement {
+ HashMapElement *next = nullptr;
+ HashMapElement *prev = nullptr;
+ KeyValue<TKey, TValue> data;
+ HashMapElement() {}
+ HashMapElement(const TKey &p_key, const TValue &p_value) :
+ data(p_key, p_value) {}
+};
+
+template <class TKey, class TValue,
+ class Hasher = HashMapHasherDefault,
+ class Comparator = HashMapComparatorDefault<TKey>,
+ class Allocator = DefaultTypedAllocator<HashMapElement<TKey, TValue>>>
class HashMap {
public:
- struct Pair {
- TKey key;
- TData data;
+ const uint32_t MIN_CAPACITY_INDEX = 2; // Use a prime.
+ const float MAX_OCCUPANCY = 0.75;
+ const uint32_t EMPTY_HASH = 0;
- Pair(const TKey &p_key) :
- key(p_key),
- data() {}
- Pair(const TKey &p_key, const TData &p_data) :
- key(p_key),
- data(p_data) {
- }
- };
+private:
+ Allocator element_alloc;
+ HashMapElement<TKey, TValue> **elements = nullptr;
+ uint32_t *hashes = nullptr;
+ HashMapElement<TKey, TValue> *head_element = nullptr;
+ HashMapElement<TKey, TValue> *tail_element = nullptr;
- struct Element {
- private:
- friend class HashMap;
+ uint32_t capacity_index = 0;
+ uint32_t num_elements = 0;
- uint32_t hash = 0;
- Element *next = nullptr;
- Element() {}
- Pair pair;
+ _FORCE_INLINE_ uint32_t _hash(const TKey &p_key) const {
+ uint32_t hash = Hasher::hash(p_key);
- public:
- const TKey &key() const {
- return pair.key;
+ if (unlikely(hash == EMPTY_HASH)) {
+ hash = EMPTY_HASH + 1;
}
- TData &value() {
- return pair.data;
- }
+ return hash;
+ }
- const TData &value() const {
- return pair.value();
+ static _FORCE_INLINE_ uint32_t _get_probe_length(const uint32_t p_pos, const uint32_t p_hash, const uint32_t p_capacity, const uint64_t p_capacity_inv) {
+ const uint32_t original_pos = fastmod(p_hash, p_capacity_inv, p_capacity);
+ return fastmod(p_pos - original_pos + p_capacity, p_capacity_inv, p_capacity);
+ }
+
+ bool _lookup_pos(const TKey &p_key, uint32_t &r_pos) const {
+ if (elements == nullptr) {
+ return false; // Failed lookups, no elements
}
- Element(const TKey &p_key) :
- pair(p_key) {}
- Element(const Element &p_other) :
- hash(p_other.hash),
- pair(p_other.pair.key, p_other.pair.data) {}
- };
+ const uint32_t capacity = hash_table_size_primes[capacity_index];
+ const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
+ uint32_t hash = _hash(p_key);
+ uint32_t pos = fastmod(hash, capacity_inv, capacity);
+ uint32_t distance = 0;
-private:
- Element **hash_table = nullptr;
- uint8_t hash_table_power = 0;
- uint32_t elements = 0;
+ while (true) {
+ if (hashes[pos] == EMPTY_HASH) {
+ return false;
+ }
- void make_hash_table() {
- ERR_FAIL_COND(hash_table);
+ if (distance > _get_probe_length(pos, hashes[pos], capacity, capacity_inv)) {
+ return false;
+ }
- hash_table = memnew_arr(Element *, (1 << MIN_HASH_TABLE_POWER));
+ if (hashes[pos] == hash && Comparator::compare(elements[pos]->data.key, p_key)) {
+ r_pos = pos;
+ return true;
+ }
- hash_table_power = MIN_HASH_TABLE_POWER;
- elements = 0;
- for (int i = 0; i < (1 << MIN_HASH_TABLE_POWER); i++) {
- hash_table[i] = nullptr;
+ pos = fastmod((pos + 1), capacity_inv, capacity);
+ distance++;
}
}
- void erase_hash_table() {
- ERR_FAIL_COND_MSG(elements, "Cannot erase hash table if there are still elements inside.");
-
- memdelete_arr(hash_table);
- hash_table = nullptr;
- hash_table_power = 0;
- elements = 0;
- }
+ void _insert_with_hash(uint32_t p_hash, HashMapElement<TKey, TValue> *p_value) {
+ const uint32_t capacity = hash_table_size_primes[capacity_index];
+ const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
+ uint32_t hash = p_hash;
+ HashMapElement<TKey, TValue> *value = p_value;
+ uint32_t distance = 0;
+ uint32_t pos = fastmod(hash, capacity_inv, capacity);
- void check_hash_table() {
- int new_hash_table_power = -1;
+ while (true) {
+ if (hashes[pos] == EMPTY_HASH) {
+ elements[pos] = value;
+ hashes[pos] = hash;
- if ((int)elements > ((1 << hash_table_power) * RELATIONSHIP)) {
- /* rehash up */
- new_hash_table_power = hash_table_power + 1;
+ num_elements++;
- while ((int)elements > ((1 << new_hash_table_power) * RELATIONSHIP)) {
- new_hash_table_power++;
+ return;
}
- } else if ((hash_table_power > (int)MIN_HASH_TABLE_POWER) && ((int)elements < ((1 << (hash_table_power - 1)) * RELATIONSHIP))) {
- /* rehash down */
- new_hash_table_power = hash_table_power - 1;
-
- while ((int)elements < ((1 << (new_hash_table_power - 1)) * RELATIONSHIP)) {
- new_hash_table_power--;
+ // Not an empty slot, let's check the probing length of the existing one.
+ uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity, capacity_inv);
+ if (existing_probe_len < distance) {
+ SWAP(hash, hashes[pos]);
+ SWAP(value, elements[pos]);
+ distance = existing_probe_len;
}
- if (new_hash_table_power < (int)MIN_HASH_TABLE_POWER) {
- new_hash_table_power = MIN_HASH_TABLE_POWER;
- }
+ pos = fastmod((pos + 1), capacity_inv, capacity);
+ distance++;
}
+ }
- if (new_hash_table_power == -1) {
- return;
- }
+ void _resize_and_rehash(uint32_t p_new_capacity_index) {
+ uint32_t old_capacity = hash_table_size_primes[capacity_index];
- Element **new_hash_table = memnew_arr(Element *, ((uint64_t)1 << new_hash_table_power));
- ERR_FAIL_COND_MSG(!new_hash_table, "Out of memory.");
+ // Capacity can't be 0.
+ capacity_index = MAX((uint32_t)MIN_CAPACITY_INDEX, p_new_capacity_index);
- for (int i = 0; i < (1 << new_hash_table_power); i++) {
- new_hash_table[i] = nullptr;
- }
+ uint32_t capacity = hash_table_size_primes[capacity_index];
- if (hash_table) {
- for (int i = 0; i < (1 << hash_table_power); i++) {
- while (hash_table[i]) {
- Element *se = hash_table[i];
- hash_table[i] = se->next;
- int new_pos = se->hash & ((1 << new_hash_table_power) - 1);
- se->next = new_hash_table[new_pos];
- new_hash_table[new_pos] = se;
- }
- }
+ HashMapElement<TKey, TValue> **old_elements = elements;
+ uint32_t *old_hashes = hashes;
- memdelete_arr(hash_table);
- }
- hash_table = new_hash_table;
- hash_table_power = new_hash_table_power;
- }
+ num_elements = 0;
+ hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ elements = reinterpret_cast<HashMapElement<TKey, TValue> **>(Memory::alloc_static(sizeof(HashMapElement<TKey, TValue> *) * capacity));
- /* I want to have only one function.. */
- _FORCE_INLINE_ const Element *get_element(const TKey &p_key) const {
- uint32_t hash = Hasher::hash(p_key);
- uint32_t index = hash & ((1 << hash_table_power) - 1);
+ for (uint32_t i = 0; i < capacity; i++) {
+ hashes[i] = 0;
+ elements[i] = nullptr;
+ }
- Element *e = hash_table[index];
+ if (old_capacity == 0) {
+ // Nothing to do.
+ return;
+ }
- while (e) {
- /* checking hash first avoids comparing key, which may take longer */
- if (e->hash == hash && Comparator::compare(e->pair.key, p_key)) {
- /* the pair exists in this hashtable, so just update data */
- return e;
+ for (uint32_t i = 0; i < old_capacity; i++) {
+ if (old_hashes[i] == EMPTY_HASH) {
+ continue;
}
- e = e->next;
+ _insert_with_hash(old_hashes[i], old_elements[i]);
}
- return nullptr;
+ Memory::free_static(old_elements);
+ Memory::free_static(old_hashes);
}
- Element *create_element(const TKey &p_key) {
- /* if element doesn't exist, create it */
- Element *e = memnew(Element(p_key));
- ERR_FAIL_COND_V_MSG(!e, nullptr, "Out of memory.");
- uint32_t hash = Hasher::hash(p_key);
- uint32_t index = hash & ((1 << hash_table_power) - 1);
- e->next = hash_table[index];
- e->hash = hash;
-
- hash_table[index] = e;
- elements++;
+ _FORCE_INLINE_ HashMapElement<TKey, TValue> *_insert(const TKey &p_key, const TValue &p_value, bool p_front_insert = false) {
+ uint32_t capacity = hash_table_size_primes[capacity_index];
+ if (unlikely(elements == nullptr)) {
+ // Allocate on demand to save memory.
- return e;
- }
+ hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ elements = reinterpret_cast<HashMapElement<TKey, TValue> **>(Memory::alloc_static(sizeof(HashMapElement<TKey, TValue> *) * capacity));
- void copy_from(const HashMap &p_t) {
- if (&p_t == this) {
- return; /* much less bother with that */
- }
-
- clear();
-
- if (!p_t.hash_table || p_t.hash_table_power == 0) {
- return; /* not copying from empty table */
+ for (uint32_t i = 0; i < capacity; i++) {
+ hashes[i] = EMPTY_HASH;
+ elements[i] = nullptr;
+ }
}
- hash_table = memnew_arr(Element *, (uint64_t)1 << p_t.hash_table_power);
- hash_table_power = p_t.hash_table_power;
- elements = p_t.elements;
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
- for (int i = 0; i < (1 << p_t.hash_table_power); i++) {
- hash_table[i] = nullptr;
-
- const Element *e = p_t.hash_table[i];
-
- while (e) {
- Element *le = memnew(Element(*e)); /* local element */
+ if (exists) {
+ elements[pos]->data.value = p_value;
+ return elements[pos];
+ } else {
+ if (num_elements + 1 > MAX_OCCUPANCY * capacity) {
+ ERR_FAIL_COND_V_MSG(capacity_index + 1 == HASH_TABLE_SIZE_MAX, nullptr, "Hash table maximum capacity reached, aborting insertion.");
+ _resize_and_rehash(capacity_index + 1);
+ }
- /* add to list and reassign pointers */
- le->next = hash_table[i];
- hash_table[i] = le;
+ HashMapElement<TKey, TValue> *elem = element_alloc.new_allocation(HashMapElement<TKey, TValue>(p_key, p_value));
- e = e->next;
+ if (tail_element == nullptr) {
+ head_element = elem;
+ tail_element = elem;
+ } else if (p_front_insert) {
+ head_element->prev = elem;
+ elem->next = head_element;
+ head_element = elem;
+ } else {
+ tail_element->next = elem;
+ elem->prev = tail_element;
+ tail_element = elem;
}
+
+ uint32_t hash = _hash(p_key);
+ _insert_with_hash(hash, elem);
+ return elem;
}
}
public:
- Element *set(const TKey &p_key, const TData &p_data) {
- return set(Pair(p_key, p_data));
- }
+ _FORCE_INLINE_ uint32_t get_capacity() const { return hash_table_size_primes[capacity_index]; }
+ _FORCE_INLINE_ uint32_t size() const { return num_elements; }
- Element *set(const Pair &p_pair) {
- Element *e = nullptr;
- if (!hash_table) {
- make_hash_table(); // if no table, make one
- } else {
- e = const_cast<Element *>(get_element(p_pair.key));
- }
+ /* Standard Godot Container API */
- /* if we made it up to here, the pair doesn't exist, create and assign */
+ bool is_empty() const {
+ return num_elements == 0;
+ }
- if (!e) {
- e = create_element(p_pair.key);
- if (!e) {
- return nullptr;
- }
- check_hash_table(); // perform mantenience routine
+ void clear() {
+ if (elements == nullptr) {
+ return;
}
+ uint32_t capacity = hash_table_size_primes[capacity_index];
+ for (uint32_t i = 0; i < capacity; i++) {
+ if (hashes[i] == EMPTY_HASH) {
+ continue;
+ }
- e->pair.data = p_pair.data;
- return e;
- }
+ hashes[i] = EMPTY_HASH;
+ element_alloc.delete_allocation(elements[i]);
+ elements[i] = nullptr;
+ }
- bool has(const TKey &p_key) const {
- return getptr(p_key) != nullptr;
+ tail_element = nullptr;
+ head_element = nullptr;
+ num_elements = 0;
}
- /**
- * Get a key from data, return a const reference.
- * WARNING: this doesn't check errors, use either getptr and check nullptr, or check
- * first with has(key)
- */
-
- const TData &get(const TKey &p_key) const {
- const TData *res = getptr(p_key);
- CRASH_COND_MSG(!res, "Map key not found.");
- return *res;
+ TValue &get(const TKey &p_key) {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+ CRASH_COND_MSG(!exists, "HashMap key not found.");
+ return elements[pos]->data.value;
}
- TData &get(const TKey &p_key) {
- TData *res = getptr(p_key);
- CRASH_COND_MSG(!res, "Map key not found.");
- return *res;
+ const TValue &get(const TKey &p_key) const {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+ CRASH_COND_MSG(!exists, "HashMap key not found.");
+ return elements[pos]->data.value;
}
- /**
- * Same as get, except it can return nullptr when item was not found.
- * This is mainly used for speed purposes.
- */
+ const TValue *getptr(const TKey &p_key) const {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
- _FORCE_INLINE_ TData *getptr(const TKey &p_key) {
- if (unlikely(!hash_table)) {
- return nullptr;
+ if (exists) {
+ return &elements[pos]->data.value;
}
+ return nullptr;
+ }
- Element *e = const_cast<Element *>(get_element(p_key));
+ TValue *getptr(const TKey &p_key) {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
- if (e) {
- return &e->pair.data;
+ if (exists) {
+ return &elements[pos]->data.value;
}
-
return nullptr;
}
- _FORCE_INLINE_ const TData *getptr(const TKey &p_key) const {
- if (unlikely(!hash_table)) {
- return nullptr;
- }
+ _FORCE_INLINE_ bool has(const TKey &p_key) const {
+ uint32_t _pos = 0;
+ return _lookup_pos(p_key, _pos);
+ }
- const Element *e = const_cast<Element *>(get_element(p_key));
+ bool erase(const TKey &p_key) {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
- if (e) {
- return &e->pair.data;
+ if (!exists) {
+ return false;
}
- return nullptr;
- }
+ const uint32_t capacity = hash_table_size_primes[capacity_index];
+ const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
+ uint32_t next_pos = fastmod((pos + 1), capacity_inv, capacity);
+ while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
+ SWAP(hashes[next_pos], hashes[pos]);
+ SWAP(elements[next_pos], elements[pos]);
+ pos = next_pos;
+ next_pos = fastmod((pos + 1), capacity_inv, capacity);
+ }
- /**
- * Same as get, except it can return nullptr when item was not found.
- * This version is custom, will take a hash and a custom key (that should support operator==()
- */
+ hashes[pos] = EMPTY_HASH;
- template <class C>
- _FORCE_INLINE_ TData *custom_getptr(C p_custom_key, uint32_t p_custom_hash) {
- if (unlikely(!hash_table)) {
- return nullptr;
+ if (head_element == elements[pos]) {
+ head_element = elements[pos]->next;
}
- uint32_t hash = p_custom_hash;
- uint32_t index = hash & ((1 << hash_table_power) - 1);
-
- Element *e = hash_table[index];
+ if (tail_element == elements[pos]) {
+ tail_element = elements[pos]->prev;
+ }
- while (e) {
- /* checking hash first avoids comparing key, which may take longer */
- if (e->hash == hash && Comparator::compare(e->pair.key, p_custom_key)) {
- /* the pair exists in this hashtable, so just update data */
- return &e->pair.data;
- }
+ if (elements[pos]->prev) {
+ elements[pos]->prev->next = elements[pos]->next;
+ }
- e = e->next;
+ if (elements[pos]->next) {
+ elements[pos]->next->prev = elements[pos]->prev;
}
- return nullptr;
+ element_alloc.delete_allocation(elements[pos]);
+ elements[pos] = nullptr;
+
+ num_elements--;
+ return true;
}
- template <class C>
- _FORCE_INLINE_ const TData *custom_getptr(C p_custom_key, uint32_t p_custom_hash) const {
- if (unlikely(!hash_table)) {
- return nullptr;
+ // Reserves space for a number of elements, useful to avoid many resizes and rehashes.
+ // If adding a known (possibly large) number of elements at once, must be larger than old capacity.
+ void reserve(uint32_t p_new_capacity) {
+ uint32_t new_index = capacity_index;
+
+ while (hash_table_size_primes[new_index] < p_new_capacity) {
+ ERR_FAIL_COND_MSG(new_index + 1 == (uint32_t)HASH_TABLE_SIZE_MAX, nullptr);
+ new_index++;
}
- uint32_t hash = p_custom_hash;
- uint32_t index = hash & ((1 << hash_table_power) - 1);
+ if (new_index == capacity_index) {
+ return;
+ }
- const Element *e = hash_table[index];
+ if (elements == nullptr) {
+ capacity_index = new_index;
+ return; // Unallocated yet.
+ }
+ _resize_and_rehash(new_index);
+ }
- while (e) {
- /* checking hash first avoids comparing key, which may take longer */
- if (e->hash == hash && Comparator::compare(e->pair.key, p_custom_key)) {
- /* the pair exists in this hashtable, so just update data */
- return &e->pair.data;
- }
+ /** Iterator API **/
- e = e->next;
+ struct ConstIterator {
+ _FORCE_INLINE_ const KeyValue<TKey, TValue> &operator*() const {
+ return E->data;
+ }
+ _FORCE_INLINE_ const KeyValue<TKey, TValue> *operator->() const { return &E->data; }
+ _FORCE_INLINE_ ConstIterator &operator++() {
+ if (E) {
+ E = E->next;
+ }
+ return *this;
+ }
+ _FORCE_INLINE_ ConstIterator &operator--() {
+ if (E) {
+ E = E->prev;
+ }
+ return *this;
}
- return nullptr;
- }
+ _FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return E == b.E; }
+ _FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return E != b.E; }
- /**
- * Erase an item, return true if erasing was successful
- */
+ _FORCE_INLINE_ explicit operator bool() const {
+ return E != nullptr;
+ }
- bool erase(const TKey &p_key) {
- if (unlikely(!hash_table)) {
- return false;
+ _FORCE_INLINE_ ConstIterator(const HashMapElement<TKey, TValue> *p_E) { E = p_E; }
+ _FORCE_INLINE_ ConstIterator() {}
+ _FORCE_INLINE_ ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
+ _FORCE_INLINE_ void operator=(const ConstIterator &p_it) {
+ E = p_it.E;
}
- uint32_t hash = Hasher::hash(p_key);
- uint32_t index = hash & ((1 << hash_table_power) - 1);
-
- Element *e = hash_table[index];
- Element *p = nullptr;
- while (e) {
- /* checking hash first avoids comparing key, which may take longer */
- if (e->hash == hash && Comparator::compare(e->pair.key, p_key)) {
- if (p) {
- p->next = e->next;
- } else {
- //begin of list
- hash_table[index] = e->next;
- }
-
- memdelete(e);
- elements--;
-
- if (elements == 0) {
- erase_hash_table();
- } else {
- check_hash_table();
- }
- return true;
+ private:
+ const HashMapElement<TKey, TValue> *E = nullptr;
+ };
+
+ struct Iterator {
+ _FORCE_INLINE_ KeyValue<TKey, TValue> &operator*() const {
+ return E->data;
+ }
+ _FORCE_INLINE_ KeyValue<TKey, TValue> *operator->() const { return &E->data; }
+ _FORCE_INLINE_ Iterator &operator++() {
+ if (E) {
+ E = E->next;
+ }
+ return *this;
+ }
+ _FORCE_INLINE_ Iterator &operator--() {
+ if (E) {
+ E = E->prev;
}
+ return *this;
+ }
- p = e;
- e = e->next;
+ _FORCE_INLINE_ bool operator==(const Iterator &b) const { return E == b.E; }
+ _FORCE_INLINE_ bool operator!=(const Iterator &b) const { return E != b.E; }
+
+ _FORCE_INLINE_ explicit operator bool() const {
+ return E != nullptr;
}
- return false;
- }
+ _FORCE_INLINE_ Iterator(HashMapElement<TKey, TValue> *p_E) { E = p_E; }
+ _FORCE_INLINE_ Iterator() {}
+ _FORCE_INLINE_ Iterator(const Iterator &p_it) { E = p_it.E; }
+ _FORCE_INLINE_ void operator=(const Iterator &p_it) {
+ E = p_it.E;
+ }
+
+ operator ConstIterator() const {
+ return ConstIterator(E);
+ }
- inline const TData &operator[](const TKey &p_key) const { //constref
+ private:
+ HashMapElement<TKey, TValue> *E = nullptr;
+ };
- return get(p_key);
+ _FORCE_INLINE_ Iterator begin() {
+ return Iterator(head_element);
+ }
+ _FORCE_INLINE_ Iterator end() {
+ return Iterator(nullptr);
+ }
+ _FORCE_INLINE_ Iterator last() {
+ return Iterator(tail_element);
}
- inline TData &operator[](const TKey &p_key) { //assignment
- Element *e = nullptr;
- if (!hash_table) {
- make_hash_table(); // if no table, make one
- } else {
- e = const_cast<Element *>(get_element(p_key));
+ _FORCE_INLINE_ Iterator find(const TKey &p_key) {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+ if (!exists) {
+ return end();
}
+ return Iterator(elements[pos]);
+ }
- /* if we made it up to here, the pair doesn't exist, create */
- if (!e) {
- e = create_element(p_key);
- CRASH_COND(!e);
- check_hash_table(); // perform mantenience routine
+ _FORCE_INLINE_ void remove(const Iterator &p_iter) {
+ if (p_iter) {
+ erase(p_iter->key);
}
+ }
- return e->pair.data;
+ _FORCE_INLINE_ ConstIterator begin() const {
+ return ConstIterator(head_element);
+ }
+ _FORCE_INLINE_ ConstIterator end() const {
+ return ConstIterator(nullptr);
+ }
+ _FORCE_INLINE_ ConstIterator last() const {
+ return ConstIterator(tail_element);
}
- /**
- * Get the next key to p_key, and the first key if p_key is null.
- * Returns a pointer to the next key if found, nullptr otherwise.
- * Adding/Removing elements while iterating will, of course, have unexpected results, don't do it.
- *
- * Example:
- *
- * const TKey *k=nullptr;
- *
- * while( (k=table.next(k)) ) {
- *
- * print( *k );
- * }
- *
- */
- const TKey *next(const TKey *p_key) const {
- if (unlikely(!hash_table)) {
- return nullptr;
+ _FORCE_INLINE_ ConstIterator find(const TKey &p_key) const {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+ if (!exists) {
+ return end();
}
+ return ConstIterator(elements[pos]);
+ }
- if (!p_key) { /* get the first key */
-
- for (int i = 0; i < (1 << hash_table_power); i++) {
- if (hash_table[i]) {
- return &hash_table[i]->pair.key;
- }
- }
-
- } else { /* get the next key */
+ /* Indexing */
- const Element *e = get_element(*p_key);
- ERR_FAIL_COND_V_MSG(!e, nullptr, "Invalid key supplied.");
- if (e->next) {
- /* if there is a "next" in the list, return that */
- return &e->next->pair.key;
- } else {
- /* go to next elements */
- uint32_t index = e->hash & ((1 << hash_table_power) - 1);
- index++;
- for (int i = index; i < (1 << hash_table_power); i++) {
- if (hash_table[i]) {
- return &hash_table[i]->pair.key;
- }
- }
- }
+ const TValue &operator[](const TKey &p_key) const {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+ CRASH_COND(!exists);
+ return elements[pos]->data.value;
+ }
- /* nothing found, was at end */
+ TValue &operator[](const TKey &p_key) {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+ if (!exists) {
+ return _insert(p_key, TValue())->data.value;
+ } else {
+ return elements[pos]->data.value;
}
-
- return nullptr; /* nothing found */
}
- inline unsigned int size() const {
- return elements;
- }
+ /* Insert */
- inline bool is_empty() const {
- return elements == 0;
+ Iterator insert(const TKey &p_key, const TValue &p_value, bool p_front_insert = false) {
+ return Iterator(_insert(p_key, p_value, p_front_insert));
}
- void clear() {
- /* clean up */
- if (hash_table) {
- for (int i = 0; i < (1 << hash_table_power); i++) {
- while (hash_table[i]) {
- Element *e = hash_table[i];
- hash_table[i] = e->next;
- memdelete(e);
- }
- }
+ /* Constructors */
- memdelete_arr(hash_table);
+ HashMap(const HashMap &p_other) {
+ reserve(hash_table_size_primes[p_other.capacity_index]);
+
+ if (p_other.num_elements == 0) {
+ return;
}
- hash_table = nullptr;
- hash_table_power = 0;
- elements = 0;
+ for (const KeyValue<TKey, TValue> &E : p_other) {
+ insert(E.key, E.value);
+ }
}
- void operator=(const HashMap &p_table) {
- copy_from(p_table);
- }
+ void operator=(const HashMap &p_other) {
+ if (this == &p_other) {
+ return; // Ignore self assignment.
+ }
+ if (num_elements != 0) {
+ clear();
+ }
- void get_key_list(List<TKey> *r_keys) const {
- if (unlikely(!hash_table)) {
- return;
+ reserve(hash_table_size_primes[p_other.capacity_index]);
+
+ if (p_other.elements == nullptr) {
+ return; // Nothing to copy.
}
- for (int i = 0; i < (1 << hash_table_power); i++) {
- Element *e = hash_table[i];
- while (e) {
- r_keys->push_back(e->pair.key);
- e = e->next;
- }
+
+ for (const KeyValue<TKey, TValue> &E : p_other) {
+ insert(E.key, E.value);
}
}
- HashMap() {}
+ HashMap(uint32_t p_initial_capacity) {
+ // Capacity can't be 0.
+ capacity_index = 0;
+ reserve(p_initial_capacity);
+ }
+ HashMap() {
+ capacity_index = MIN_CAPACITY_INDEX;
+ }
- HashMap(const HashMap &p_table) {
- copy_from(p_table);
+ uint32_t debug_get_hash(uint32_t p_index) {
+ if (num_elements == 0) {
+ return 0;
+ }
+ ERR_FAIL_INDEX_V(p_index, get_capacity(), 0);
+ return hashes[p_index];
+ }
+ Iterator debug_get_element(uint32_t p_index) {
+ if (num_elements == 0) {
+ return Iterator();
+ }
+ ERR_FAIL_INDEX_V(p_index, get_capacity(), Iterator());
+ return Iterator(elements[p_index]);
}
~HashMap() {
clear();
+
+ if (elements != nullptr) {
+ Memory::free_static(elements);
+ Memory::free_static(hashes);
+ }
}
};
diff --git a/core/templates/hash_set.h b/core/templates/hash_set.h
new file mode 100644
index 0000000000..7b3a5d46f8
--- /dev/null
+++ b/core/templates/hash_set.h
@@ -0,0 +1,476 @@
+/*************************************************************************/
+/* hash_set.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef HASH_SET_H
+#define HASH_SET_H
+
+#include "core/math/math_funcs.h"
+#include "core/os/memory.h"
+#include "core/templates/hash_map.h"
+#include "core/templates/hashfuncs.h"
+#include "core/templates/paged_allocator.h"
+
+/**
+ * Implementation of Set using a bidi indexed hash map.
+ * Use RBSet instead of this only if the following conditions are met:
+ *
+ * - You need to keep an iterator or const pointer to Key and you intend to add/remove elements in the meantime.
+ * - Iteration order does matter (via operator<)
+ *
+ */
+
+template <class TKey,
+ class Hasher = HashMapHasherDefault,
+ class Comparator = HashMapComparatorDefault<TKey>>
+class HashSet {
+public:
+ static constexpr uint32_t MIN_CAPACITY_INDEX = 2; // Use a prime.
+ static constexpr float MAX_OCCUPANCY = 0.75;
+ static constexpr uint32_t EMPTY_HASH = 0;
+
+private:
+ TKey *keys = nullptr;
+ uint32_t *hash_to_key = nullptr;
+ uint32_t *key_to_hash = nullptr;
+ uint32_t *hashes = nullptr;
+
+ uint32_t capacity_index = 0;
+ uint32_t num_elements = 0;
+
+ _FORCE_INLINE_ uint32_t _hash(const TKey &p_key) const {
+ uint32_t hash = Hasher::hash(p_key);
+
+ if (unlikely(hash == EMPTY_HASH)) {
+ hash = EMPTY_HASH + 1;
+ }
+
+ return hash;
+ }
+
+ static _FORCE_INLINE_ uint32_t _get_probe_length(const uint32_t p_pos, const uint32_t p_hash, const uint32_t p_capacity, const uint64_t p_capacity_inv) {
+ const uint32_t original_pos = fastmod(p_hash, p_capacity_inv, p_capacity);
+ return fastmod(p_pos - original_pos + p_capacity, p_capacity_inv, p_capacity);
+ }
+
+ bool _lookup_pos(const TKey &p_key, uint32_t &r_pos) const {
+ if (keys == nullptr) {
+ return false; // Failed lookups, no elements
+ }
+
+ const uint32_t capacity = hash_table_size_primes[capacity_index];
+ const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
+ uint32_t hash = _hash(p_key);
+ uint32_t pos = fastmod(hash, capacity_inv, capacity);
+ uint32_t distance = 0;
+
+ while (true) {
+ if (hashes[pos] == EMPTY_HASH) {
+ return false;
+ }
+
+ if (distance > _get_probe_length(pos, hashes[pos], capacity, capacity_inv)) {
+ return false;
+ }
+
+ if (hashes[pos] == hash && Comparator::compare(keys[hash_to_key[pos]], p_key)) {
+ r_pos = hash_to_key[pos];
+ return true;
+ }
+
+ pos = fastmod(pos + 1, capacity_inv, capacity);
+ distance++;
+ }
+ }
+
+ uint32_t _insert_with_hash(uint32_t p_hash, uint32_t p_index) {
+ const uint32_t capacity = hash_table_size_primes[capacity_index];
+ const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
+ uint32_t hash = p_hash;
+ uint32_t index = p_index;
+ uint32_t distance = 0;
+ uint32_t pos = fastmod(hash, capacity_inv, capacity);
+
+ while (true) {
+ if (hashes[pos] == EMPTY_HASH) {
+ hashes[pos] = hash;
+ key_to_hash[index] = pos;
+ hash_to_key[pos] = index;
+ return pos;
+ }
+
+ // Not an empty slot, let's check the probing length of the existing one.
+ uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity, capacity_inv);
+ if (existing_probe_len < distance) {
+ key_to_hash[index] = pos;
+ SWAP(hash, hashes[pos]);
+ SWAP(index, hash_to_key[pos]);
+ distance = existing_probe_len;
+ }
+
+ pos = fastmod(pos + 1, capacity_inv, capacity);
+ distance++;
+ }
+ }
+
+ void _resize_and_rehash(uint32_t p_new_capacity_index) {
+ // Capacity can't be 0.
+ capacity_index = MAX((uint32_t)MIN_CAPACITY_INDEX, p_new_capacity_index);
+
+ uint32_t capacity = hash_table_size_primes[capacity_index];
+
+ uint32_t *old_hashes = hashes;
+ uint32_t *old_key_to_hash = key_to_hash;
+
+ hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ keys = reinterpret_cast<TKey *>(Memory::realloc_static(keys, sizeof(TKey) * capacity));
+ key_to_hash = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ hash_to_key = reinterpret_cast<uint32_t *>(Memory::realloc_static(hash_to_key, sizeof(uint32_t) * capacity));
+
+ for (uint32_t i = 0; i < capacity; i++) {
+ hashes[i] = EMPTY_HASH;
+ }
+
+ for (uint32_t i = 0; i < num_elements; i++) {
+ uint32_t h = old_hashes[old_key_to_hash[i]];
+ _insert_with_hash(h, i);
+ }
+
+ Memory::free_static(old_hashes);
+ Memory::free_static(old_key_to_hash);
+ }
+
+ _FORCE_INLINE_ int32_t _insert(const TKey &p_key) {
+ uint32_t capacity = hash_table_size_primes[capacity_index];
+ if (unlikely(keys == nullptr)) {
+ // Allocate on demand to save memory.
+
+ hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ keys = reinterpret_cast<TKey *>(Memory::alloc_static(sizeof(TKey) * capacity));
+ key_to_hash = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ hash_to_key = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+
+ for (uint32_t i = 0; i < capacity; i++) {
+ hashes[i] = EMPTY_HASH;
+ }
+ }
+
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+
+ if (exists) {
+ return pos;
+ } else {
+ if (num_elements + 1 > MAX_OCCUPANCY * capacity) {
+ ERR_FAIL_COND_V_MSG(capacity_index + 1 == HASH_TABLE_SIZE_MAX, -1, "Hash table maximum capacity reached, aborting insertion.");
+ _resize_and_rehash(capacity_index + 1);
+ }
+
+ uint32_t hash = _hash(p_key);
+ memnew_placement(&keys[num_elements], TKey(p_key));
+ _insert_with_hash(hash, num_elements);
+ num_elements++;
+ return num_elements - 1;
+ }
+ }
+
+ void _init_from(const HashSet &p_other) {
+ capacity_index = p_other.capacity_index;
+ num_elements = p_other.num_elements;
+
+ if (p_other.num_elements == 0) {
+ return;
+ }
+
+ uint32_t capacity = hash_table_size_primes[capacity_index];
+
+ hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ keys = reinterpret_cast<TKey *>(Memory::alloc_static(sizeof(TKey) * capacity));
+ key_to_hash = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+ hash_to_key = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
+
+ for (uint32_t i = 0; i < num_elements; i++) {
+ memnew_placement(&keys[i], TKey(p_other.keys[i]));
+ key_to_hash[i] = p_other.key_to_hash[i];
+ }
+
+ for (uint32_t i = 0; i < capacity; i++) {
+ hashes[i] = p_other.hashes[i];
+ hash_to_key[i] = p_other.hash_to_key[i];
+ }
+ }
+
+public:
+ _FORCE_INLINE_ uint32_t get_capacity() const { return hash_table_size_primes[capacity_index]; }
+ _FORCE_INLINE_ uint32_t size() const { return num_elements; }
+
+ /* Standard Godot Container API */
+
+ bool is_empty() const {
+ return num_elements == 0;
+ }
+
+ void clear() {
+ if (keys == nullptr) {
+ return;
+ }
+ uint32_t capacity = hash_table_size_primes[capacity_index];
+ for (uint32_t i = 0; i < capacity; i++) {
+ hashes[i] = EMPTY_HASH;
+ }
+ for (uint32_t i = 0; i < num_elements; i++) {
+ keys[i].~TKey();
+ }
+
+ num_elements = 0;
+ }
+
+ _FORCE_INLINE_ bool has(const TKey &p_key) const {
+ uint32_t _pos = 0;
+ return _lookup_pos(p_key, _pos);
+ }
+
+ bool erase(const TKey &p_key) {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+
+ if (!exists) {
+ return false;
+ }
+
+ uint32_t key_pos = pos;
+ pos = key_to_hash[pos]; //make hash pos
+
+ const uint32_t capacity = hash_table_size_primes[capacity_index];
+ const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
+ uint32_t next_pos = fastmod(pos + 1, capacity_inv, capacity);
+ while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
+ uint32_t kpos = hash_to_key[pos];
+ uint32_t kpos_next = hash_to_key[next_pos];
+ SWAP(key_to_hash[kpos], key_to_hash[kpos_next]);
+ SWAP(hashes[next_pos], hashes[pos]);
+ SWAP(hash_to_key[next_pos], hash_to_key[pos]);
+
+ pos = next_pos;
+ next_pos = fastmod(pos + 1, capacity_inv, capacity);
+ }
+
+ hashes[pos] = EMPTY_HASH;
+ keys[key_pos].~TKey();
+ num_elements--;
+ if (key_pos < num_elements) {
+ // Not the last key, move the last one here to keep keys lineal
+ memnew_placement(&keys[key_pos], TKey(keys[num_elements]));
+ keys[num_elements].~TKey();
+ key_to_hash[key_pos] = key_to_hash[num_elements];
+ hash_to_key[key_to_hash[num_elements]] = key_pos;
+ }
+
+ return true;
+ }
+
+ // Reserves space for a number of elements, useful to avoid many resizes and rehashes.
+ // If adding a known (possibly large) number of elements at once, must be larger than old capacity.
+ void reserve(uint32_t p_new_capacity) {
+ uint32_t new_index = capacity_index;
+
+ while (hash_table_size_primes[new_index] < p_new_capacity) {
+ ERR_FAIL_COND_MSG(new_index + 1 == (uint32_t)HASH_TABLE_SIZE_MAX, nullptr);
+ new_index++;
+ }
+
+ if (new_index == capacity_index) {
+ return;
+ }
+
+ if (keys == nullptr) {
+ capacity_index = new_index;
+ return; // Unallocated yet.
+ }
+ _resize_and_rehash(new_index);
+ }
+
+ /** Iterator API **/
+
+ struct Iterator {
+ _FORCE_INLINE_ const TKey &operator*() const {
+ return keys[index];
+ }
+ _FORCE_INLINE_ const TKey *operator->() const {
+ return &keys[index];
+ }
+ _FORCE_INLINE_ Iterator &operator++() {
+ index++;
+ if (index >= (int32_t)num_keys) {
+ index = -1;
+ keys = nullptr;
+ num_keys = 0;
+ }
+ return *this;
+ }
+ _FORCE_INLINE_ Iterator &operator--() {
+ index--;
+ if (index < 0) {
+ index = -1;
+ keys = nullptr;
+ num_keys = 0;
+ }
+ return *this;
+ }
+
+ _FORCE_INLINE_ bool operator==(const Iterator &b) const { return keys == b.keys && index == b.index; }
+ _FORCE_INLINE_ bool operator!=(const Iterator &b) const { return keys != b.keys || index != b.index; }
+
+ _FORCE_INLINE_ explicit operator bool() const {
+ return keys != nullptr;
+ }
+
+ _FORCE_INLINE_ Iterator(const TKey *p_keys, uint32_t p_num_keys, int32_t p_index = -1) {
+ keys = p_keys;
+ num_keys = p_num_keys;
+ index = p_index;
+ }
+ _FORCE_INLINE_ Iterator() {}
+ _FORCE_INLINE_ Iterator(const Iterator &p_it) {
+ keys = p_it.keys;
+ num_keys = p_it.num_keys;
+ index = p_it.index;
+ }
+ _FORCE_INLINE_ void operator=(const Iterator &p_it) {
+ keys = p_it.keys;
+ num_keys = p_it.num_keys;
+ index = p_it.index;
+ }
+
+ private:
+ const TKey *keys = nullptr;
+ uint32_t num_keys = 0;
+ int32_t index = -1;
+ };
+
+ _FORCE_INLINE_ Iterator begin() const {
+ return num_elements ? Iterator(keys, num_elements, 0) : Iterator();
+ }
+ _FORCE_INLINE_ Iterator end() const {
+ return Iterator();
+ }
+ _FORCE_INLINE_ Iterator last() const {
+ if (num_elements == 0) {
+ return Iterator();
+ }
+ return Iterator(keys, num_elements, num_elements - 1);
+ }
+
+ _FORCE_INLINE_ Iterator find(const TKey &p_key) const {
+ uint32_t pos = 0;
+ bool exists = _lookup_pos(p_key, pos);
+ if (!exists) {
+ return end();
+ }
+ return Iterator(keys, num_elements, pos);
+ }
+
+ _FORCE_INLINE_ void remove(const Iterator &p_iter) {
+ if (p_iter) {
+ erase(*p_iter);
+ }
+ }
+
+ /* Insert */
+
+ Iterator insert(const TKey &p_key) {
+ uint32_t pos = _insert(p_key);
+ return Iterator(keys, num_elements, pos);
+ }
+
+ /* Constructors */
+
+ HashSet(const HashSet &p_other) {
+ _init_from(p_other);
+ }
+
+ void operator=(const HashSet &p_other) {
+ if (this == &p_other) {
+ return; // Ignore self assignment.
+ }
+
+ clear();
+
+ if (keys != nullptr) {
+ Memory::free_static(keys);
+ Memory::free_static(key_to_hash);
+ Memory::free_static(hash_to_key);
+ Memory::free_static(hashes);
+ keys = nullptr;
+ hashes = nullptr;
+ hash_to_key = nullptr;
+ key_to_hash = nullptr;
+ }
+
+ _init_from(p_other);
+ }
+
+ HashSet(uint32_t p_initial_capacity) {
+ // Capacity can't be 0.
+ capacity_index = 0;
+ reserve(p_initial_capacity);
+ }
+ HashSet() {
+ capacity_index = MIN_CAPACITY_INDEX;
+ }
+
+ void reset() {
+ clear();
+
+ if (keys != nullptr) {
+ Memory::free_static(keys);
+ Memory::free_static(key_to_hash);
+ Memory::free_static(hash_to_key);
+ Memory::free_static(hashes);
+ keys = nullptr;
+ hashes = nullptr;
+ hash_to_key = nullptr;
+ key_to_hash = nullptr;
+ }
+ capacity_index = MIN_CAPACITY_INDEX;
+ }
+
+ ~HashSet() {
+ clear();
+
+ if (keys != nullptr) {
+ Memory::free_static(keys);
+ Memory::free_static(key_to_hash);
+ Memory::free_static(hash_to_key);
+ Memory::free_static(hashes);
+ }
+ }
+};
+
+#endif // HASH_SET_H
diff --git a/core/templates/hashfuncs.h b/core/templates/hashfuncs.h
index 2a129f97d5..d85cdf7adc 100644
--- a/core/templates/hashfuncs.h
+++ b/core/templates/hashfuncs.h
@@ -31,14 +31,24 @@
#ifndef HASHFUNCS_H
#define HASHFUNCS_H
+#include "core/math/aabb.h"
#include "core/math/math_defs.h"
#include "core/math/math_funcs.h"
+#include "core/math/rect2.h"
+#include "core/math/rect2i.h"
+#include "core/math/vector2.h"
+#include "core/math/vector2i.h"
+#include "core/math/vector3.h"
+#include "core/math/vector3i.h"
+#include "core/math/vector4.h"
+#include "core/math/vector4i.h"
#include "core/object/object_id.h"
#include "core/string/node_path.h"
#include "core/string/string_name.h"
#include "core/string/ustring.h"
#include "core/templates/rid.h"
#include "core/typedefs.h"
+
/**
* Hashing functions
*/
@@ -48,30 +58,30 @@
* @param C String
* @return 32-bits hashcode
*/
-static inline uint32_t hash_djb2(const char *p_cstr) {
+static _FORCE_INLINE_ uint32_t hash_djb2(const char *p_cstr) {
const unsigned char *chr = (const unsigned char *)p_cstr;
uint32_t hash = 5381;
uint32_t c;
while ((c = *chr++)) {
- hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
+ hash = ((hash << 5) + hash) ^ c; /* hash * 33 ^ c */
}
return hash;
}
-static inline uint32_t hash_djb2_buffer(const uint8_t *p_buff, int p_len, uint32_t p_prev = 5381) {
+static _FORCE_INLINE_ uint32_t hash_djb2_buffer(const uint8_t *p_buff, int p_len, uint32_t p_prev = 5381) {
uint32_t hash = p_prev;
for (int i = 0; i < p_len; i++) {
- hash = ((hash << 5) + hash) + p_buff[i]; /* hash * 33 + c */
+ hash = ((hash << 5) + hash) ^ p_buff[i]; /* hash * 33 + c */
}
return hash;
}
-static inline uint32_t hash_djb2_one_32(uint32_t p_in, uint32_t p_prev = 5381) {
- return ((p_prev << 5) + p_prev) + p_in;
+static _FORCE_INLINE_ uint32_t hash_djb2_one_32(uint32_t p_in, uint32_t p_prev = 5381) {
+ return ((p_prev << 5) + p_prev) ^ p_in;
}
/**
@@ -81,7 +91,7 @@ static inline uint32_t hash_djb2_one_32(uint32_t p_in, uint32_t p_prev = 5381) {
* @param p_int - 64-bit unsigned integer key to be hashed
* @return unsigned 32-bit value representing hashcode
*/
-static inline uint32_t hash_one_uint64(const uint64_t p_int) {
+static _FORCE_INLINE_ uint32_t hash_one_uint64(const uint64_t p_int) {
uint64_t v = p_int;
v = (~v) + (v << 18); // v = (v << 18) - v - 1;
v = v ^ (v >> 31);
@@ -92,7 +102,134 @@ static inline uint32_t hash_one_uint64(const uint64_t p_int) {
return uint32_t(v);
}
-static inline uint32_t hash_djb2_one_float(double p_in, uint32_t p_prev = 5381) {
+#define HASH_MURMUR3_SEED 0x7F07C65
+// Murmurhash3 32-bit version.
+// All MurmurHash versions are public domain software, and the author disclaims all copyright to their code.
+
+static _FORCE_INLINE_ uint32_t hash_murmur3_one_32(uint32_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
+ p_in *= 0xcc9e2d51;
+ p_in = (p_in << 15) | (p_in >> 17);
+ p_in *= 0x1b873593;
+
+ p_seed ^= p_in;
+ p_seed = (p_seed << 13) | (p_seed >> 19);
+ p_seed = p_seed * 5 + 0xe6546b64;
+
+ return p_seed;
+}
+
+static _FORCE_INLINE_ uint32_t hash_murmur3_one_float(float p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
+ union {
+ float f;
+ uint32_t i;
+ } u;
+
+ // Normalize +/- 0.0 and NaN values so they hash the same.
+ if (p_in == 0.0f) {
+ u.f = 0.0;
+ } else if (Math::is_nan(p_in)) {
+ u.f = NAN;
+ } else {
+ u.f = p_in;
+ }
+
+ return hash_murmur3_one_32(u.i, p_seed);
+}
+
+static _FORCE_INLINE_ uint32_t hash_murmur3_one_64(uint64_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
+ p_seed = hash_murmur3_one_32(p_in & 0xFFFFFFFF, p_seed);
+ return hash_murmur3_one_32(p_in >> 32, p_seed);
+}
+
+static _FORCE_INLINE_ uint32_t hash_murmur3_one_double(double p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
+ union {
+ double d;
+ uint64_t i;
+ } u;
+
+ // Normalize +/- 0.0 and NaN values so they hash the same.
+ if (p_in == 0.0f) {
+ u.d = 0.0;
+ } else if (Math::is_nan(p_in)) {
+ u.d = NAN;
+ } else {
+ u.d = p_in;
+ }
+
+ return hash_murmur3_one_64(u.i, p_seed);
+}
+
+static _FORCE_INLINE_ uint32_t hash_murmur3_one_real(real_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
+#ifdef REAL_T_IS_DOUBLE
+ return hash_murmur3_one_double(p_in, p_seed);
+#else
+ return hash_murmur3_one_float(p_in, p_seed);
+#endif
+}
+
+static _FORCE_INLINE_ uint32_t hash_rotl32(uint32_t x, int8_t r) {
+ return (x << r) | (x >> (32 - r));
+}
+
+static _FORCE_INLINE_ uint32_t hash_fmix32(uint32_t h) {
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+static _FORCE_INLINE_ uint32_t hash_murmur3_buffer(const void *key, int length, const uint32_t seed = HASH_MURMUR3_SEED) {
+ // Although not required, this is a random prime number.
+ const uint8_t *data = (const uint8_t *)key;
+ const int nblocks = length / 4;
+
+ uint32_t h1 = seed;
+
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
+
+ const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);
+
+ for (int i = -nblocks; i; i++) {
+ uint32_t k1 = blocks[i];
+
+ k1 *= c1;
+ k1 = hash_rotl32(k1, 15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = hash_rotl32(h1, 13);
+ h1 = h1 * 5 + 0xe6546b64;
+ }
+
+ const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
+
+ uint32_t k1 = 0;
+
+ switch (length & 3) {
+ case 3:
+ k1 ^= tail[2] << 16;
+ [[fallthrough]];
+ case 2:
+ k1 ^= tail[1] << 8;
+ [[fallthrough]];
+ case 1:
+ k1 ^= tail[0];
+ k1 *= c1;
+ k1 = hash_rotl32(k1, 15);
+ k1 *= c2;
+ h1 ^= k1;
+ };
+
+ // Finalize with additional bit mixing.
+ h1 ^= length;
+ return hash_fmix32(h1);
+}
+
+static _FORCE_INLINE_ uint32_t hash_djb2_one_float(double p_in, uint32_t p_prev = 5381) {
union {
double d;
uint64_t i;
@@ -111,7 +248,7 @@ static inline uint32_t hash_djb2_one_float(double p_in, uint32_t p_prev = 5381)
}
template <class T>
-static inline uint32_t make_uint32_t(T p_in) {
+static _FORCE_INLINE_ uint32_t hash_make_uint32_t(T p_in) {
union {
T t;
uint32_t _u32;
@@ -121,7 +258,7 @@ static inline uint32_t make_uint32_t(T p_in) {
return _u._u32;
}
-static inline uint64_t hash_djb2_one_float_64(double p_in, uint64_t p_prev = 5381) {
+static _FORCE_INLINE_ uint64_t hash_djb2_one_float_64(double p_in, uint64_t p_prev = 5381) {
union {
double d;
uint64_t i;
@@ -139,12 +276,12 @@ static inline uint64_t hash_djb2_one_float_64(double p_in, uint64_t p_prev = 538
return ((p_prev << 5) + p_prev) + u.i;
}
-static inline uint64_t hash_djb2_one_64(uint64_t p_in, uint64_t p_prev = 5381) {
- return ((p_prev << 5) + p_prev) + p_in;
+static _FORCE_INLINE_ uint64_t hash_djb2_one_64(uint64_t p_in, uint64_t p_prev = 5381) {
+ return ((p_prev << 5) + p_prev) ^ p_in;
}
template <class T>
-static inline uint64_t make_uint64_t(T p_in) {
+static _FORCE_INLINE_ uint64_t hash_make_uint64_t(T p_in) {
union {
T t;
uint64_t _u64;
@@ -155,30 +292,96 @@ static inline uint64_t make_uint64_t(T p_in) {
return _u._u64;
}
+template <class T>
+class Ref;
+
struct HashMapHasherDefault {
+ // Generic hash function for any type.
+ template <class T>
+ static _FORCE_INLINE_ uint32_t hash(const T *p_pointer) { return hash_one_uint64((uint64_t)p_pointer); }
+
+ template <class T>
+ static _FORCE_INLINE_ uint32_t hash(const Ref<T> &p_ref) { return hash_one_uint64((uint64_t)p_ref.operator->()); }
+
static _FORCE_INLINE_ uint32_t hash(const String &p_string) { return p_string.hash(); }
static _FORCE_INLINE_ uint32_t hash(const char *p_cstr) { return hash_djb2(p_cstr); }
- static _FORCE_INLINE_ uint32_t hash(const uint64_t p_int) { return hash_one_uint64(p_int); }
- static _FORCE_INLINE_ uint32_t hash(const ObjectID &p_id) { return hash_one_uint64(p_id); }
-
- static _FORCE_INLINE_ uint32_t hash(const int64_t p_int) { return hash(uint64_t(p_int)); }
- static _FORCE_INLINE_ uint32_t hash(const float p_float) { return hash_djb2_one_float(p_float); }
- static _FORCE_INLINE_ uint32_t hash(const double p_double) { return hash_djb2_one_float(p_double); }
- static _FORCE_INLINE_ uint32_t hash(const uint32_t p_int) { return p_int; }
- static _FORCE_INLINE_ uint32_t hash(const int32_t p_int) { return (uint32_t)p_int; }
- static _FORCE_INLINE_ uint32_t hash(const uint16_t p_int) { return p_int; }
- static _FORCE_INLINE_ uint32_t hash(const int16_t p_int) { return (uint32_t)p_int; }
- static _FORCE_INLINE_ uint32_t hash(const uint8_t p_int) { return p_int; }
- static _FORCE_INLINE_ uint32_t hash(const int8_t p_int) { return (uint32_t)p_int; }
- static _FORCE_INLINE_ uint32_t hash(const wchar_t p_wchar) { return (uint32_t)p_wchar; }
- static _FORCE_INLINE_ uint32_t hash(const char16_t p_uchar) { return (uint32_t)p_uchar; }
- static _FORCE_INLINE_ uint32_t hash(const char32_t p_uchar) { return (uint32_t)p_uchar; }
+ static _FORCE_INLINE_ uint32_t hash(const wchar_t p_wchar) { return hash_fmix32(p_wchar); }
+ static _FORCE_INLINE_ uint32_t hash(const char16_t p_uchar) { return hash_fmix32(p_uchar); }
+ static _FORCE_INLINE_ uint32_t hash(const char32_t p_uchar) { return hash_fmix32(p_uchar); }
static _FORCE_INLINE_ uint32_t hash(const RID &p_rid) { return hash_one_uint64(p_rid.get_id()); }
-
static _FORCE_INLINE_ uint32_t hash(const StringName &p_string_name) { return p_string_name.hash(); }
static _FORCE_INLINE_ uint32_t hash(const NodePath &p_path) { return p_path.hash(); }
+ static _FORCE_INLINE_ uint32_t hash(const ObjectID &p_id) { return hash_one_uint64(p_id); }
- //static _FORCE_INLINE_ uint32_t hash(const void* p_ptr) { return uint32_t(uint64_t(p_ptr))*(0x9e3779b1L); }
+ static _FORCE_INLINE_ uint32_t hash(const uint64_t p_int) { return hash_one_uint64(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const int64_t p_int) { return hash_one_uint64(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const float p_float) { return hash_murmur3_one_float(p_float); }
+ static _FORCE_INLINE_ uint32_t hash(const double p_double) { return hash_murmur3_one_double(p_double); }
+ static _FORCE_INLINE_ uint32_t hash(const uint32_t p_int) { return hash_fmix32(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const int32_t p_int) { return hash_fmix32(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const uint16_t p_int) { return hash_fmix32(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const int16_t p_int) { return hash_fmix32(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const uint8_t p_int) { return hash_fmix32(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const int8_t p_int) { return hash_fmix32(p_int); }
+ static _FORCE_INLINE_ uint32_t hash(const Vector2i &p_vec) {
+ uint32_t h = hash_murmur3_one_32(p_vec.x);
+ h = hash_murmur3_one_32(p_vec.y, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const Vector3i &p_vec) {
+ uint32_t h = hash_murmur3_one_32(p_vec.x);
+ h = hash_murmur3_one_32(p_vec.y, h);
+ h = hash_murmur3_one_32(p_vec.z, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const Vector4i &p_vec) {
+ uint32_t h = hash_murmur3_one_32(p_vec.x);
+ h = hash_murmur3_one_32(p_vec.y, h);
+ h = hash_murmur3_one_32(p_vec.z, h);
+ h = hash_murmur3_one_32(p_vec.w, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const Vector2 &p_vec) {
+ uint32_t h = hash_murmur3_one_real(p_vec.x);
+ h = hash_murmur3_one_real(p_vec.y, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const Vector3 &p_vec) {
+ uint32_t h = hash_murmur3_one_real(p_vec.x);
+ h = hash_murmur3_one_real(p_vec.y, h);
+ h = hash_murmur3_one_real(p_vec.z, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const Vector4 &p_vec) {
+ uint32_t h = hash_murmur3_one_real(p_vec.x);
+ h = hash_murmur3_one_real(p_vec.y, h);
+ h = hash_murmur3_one_real(p_vec.z, h);
+ h = hash_murmur3_one_real(p_vec.w, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const Rect2i &p_rect) {
+ uint32_t h = hash_murmur3_one_32(p_rect.position.x);
+ h = hash_murmur3_one_32(p_rect.position.y, h);
+ h = hash_murmur3_one_32(p_rect.size.x, h);
+ h = hash_murmur3_one_32(p_rect.size.y, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const Rect2 &p_rect) {
+ uint32_t h = hash_murmur3_one_real(p_rect.position.x);
+ h = hash_murmur3_one_real(p_rect.position.y, h);
+ h = hash_murmur3_one_real(p_rect.size.x, h);
+ h = hash_murmur3_one_real(p_rect.size.y, h);
+ return hash_fmix32(h);
+ }
+ static _FORCE_INLINE_ uint32_t hash(const AABB &p_aabb) {
+ uint32_t h = hash_murmur3_one_real(p_aabb.position.x);
+ h = hash_murmur3_one_real(p_aabb.position.y, h);
+ h = hash_murmur3_one_real(p_aabb.position.z, h);
+ h = hash_murmur3_one_real(p_aabb.size.x, h);
+ h = hash_murmur3_one_real(p_aabb.size.y, h);
+ h = hash_murmur3_one_real(p_aabb.size.z, h);
+ return hash_fmix32(h);
+ }
};
template <typename T>
@@ -186,14 +389,130 @@ struct HashMapComparatorDefault {
static bool compare(const T &p_lhs, const T &p_rhs) {
return p_lhs == p_rhs;
}
+};
- bool compare(const float &p_lhs, const float &p_rhs) {
+template <>
+struct HashMapComparatorDefault<float> {
+ static bool compare(const float &p_lhs, const float &p_rhs) {
return (p_lhs == p_rhs) || (Math::is_nan(p_lhs) && Math::is_nan(p_rhs));
}
+};
- bool compare(const double &p_lhs, const double &p_rhs) {
+template <>
+struct HashMapComparatorDefault<double> {
+ static bool compare(const double &p_lhs, const double &p_rhs) {
return (p_lhs == p_rhs) || (Math::is_nan(p_lhs) && Math::is_nan(p_rhs));
}
};
+template <>
+struct HashMapComparatorDefault<Vector2> {
+ static bool compare(const Vector2 &p_lhs, const Vector2 &p_rhs) {
+ return ((p_lhs.x == p_rhs.x) || (Math::is_nan(p_lhs.x) && Math::is_nan(p_rhs.x))) && ((p_lhs.y == p_rhs.y) || (Math::is_nan(p_lhs.y) && Math::is_nan(p_rhs.y)));
+ }
+};
+
+template <>
+struct HashMapComparatorDefault<Vector3> {
+ static bool compare(const Vector3 &p_lhs, const Vector3 &p_rhs) {
+ return ((p_lhs.x == p_rhs.x) || (Math::is_nan(p_lhs.x) && Math::is_nan(p_rhs.x))) && ((p_lhs.y == p_rhs.y) || (Math::is_nan(p_lhs.y) && Math::is_nan(p_rhs.y))) && ((p_lhs.z == p_rhs.z) || (Math::is_nan(p_lhs.z) && Math::is_nan(p_rhs.z)));
+ }
+};
+
+constexpr uint32_t HASH_TABLE_SIZE_MAX = 29;
+
+const uint32_t hash_table_size_primes[HASH_TABLE_SIZE_MAX] = {
+ 5,
+ 13,
+ 23,
+ 47,
+ 97,
+ 193,
+ 389,
+ 769,
+ 1543,
+ 3079,
+ 6151,
+ 12289,
+ 24593,
+ 49157,
+ 98317,
+ 196613,
+ 393241,
+ 786433,
+ 1572869,
+ 3145739,
+ 6291469,
+ 12582917,
+ 25165843,
+ 50331653,
+ 100663319,
+ 201326611,
+ 402653189,
+ 805306457,
+ 1610612741,
+};
+
+// Computed with elem_i = UINT64_C (0 x FFFFFFFF FFFFFFFF ) / d_i + 1, where d_i is the i-th element of the above array.
+const uint64_t hash_table_size_primes_inv[HASH_TABLE_SIZE_MAX] = {
+ 3689348814741910324,
+ 1418980313362273202,
+ 802032351030850071,
+ 392483916461905354,
+ 190172619316593316,
+ 95578984837873325,
+ 47420935922132524,
+ 23987963684927896,
+ 11955116055547344,
+ 5991147799191151,
+ 2998982941588287,
+ 1501077717772769,
+ 750081082979285,
+ 375261795343686,
+ 187625172388393,
+ 93822606204624,
+ 46909513691883,
+ 23456218233098,
+ 11728086747027,
+ 5864041509391,
+ 2932024948977,
+ 1466014921160,
+ 733007198436,
+ 366503839517,
+ 183251896093,
+ 91625960335,
+ 45812983922,
+ 22906489714,
+ 11453246088
+};
+
+/**
+ * Fastmod computes ( n mod d ) given the precomputed c much faster than n % d.
+ * The implementation of fastmod is based on the following paper by Daniel Lemire et al.
+ * Faster Remainder by Direct Computation: Applications to Compilers and Software Libraries
+ * https://arxiv.org/abs/1902.01961
+ */
+static _FORCE_INLINE_ uint32_t fastmod(const uint32_t n, const uint64_t c, const uint32_t d) {
+#if defined(_MSC_VER)
+ // Returns the upper 64 bits of the product of two 64-bit unsigned integers.
+ // This intrinsic function is required since MSVC does not support unsigned 128-bit integers.
+#if defined(_M_X64) || defined(_M_ARM64)
+ return __umulh(c * n, d);
+#else
+ // Fallback to the slower method for 32-bit platforms.
+ return n % d;
+#endif // _M_X64 || _M_ARM64
+#else
+#ifdef __SIZEOF_INT128__
+ // Prevent compiler warning, because we know what we are doing.
+ uint64_t lowbits = c * n;
+ __extension__ typedef unsigned __int128 uint128;
+ return static_cast<uint64_t>(((uint128)lowbits * d) >> 64);
+#else
+ // Fallback to the slower method if no 128-bit unsigned integer type is available.
+ return n % d;
+#endif // __SIZEOF_INT128__
+#endif // _MSC_VER
+}
+
#endif // HASHFUNCS_H
diff --git a/core/templates/local_vector.h b/core/templates/local_vector.h
index f4e0748c27..49690f2373 100644
--- a/core/templates/local_vector.h
+++ b/core/templates/local_vector.h
@@ -37,8 +37,11 @@
#include "core/templates/vector.h"
#include <initializer_list>
+#include <type_traits>
-template <class T, class U = uint32_t, bool force_trivial = false>
+// If tight, it grows strictly as much as needed.
+// Otherwise, it grows exponentially (the default and what you want in most cases).
+template <class T, class U = uint32_t, bool force_trivial = false, bool tight = false>
class LocalVector {
private:
U count = 0;
@@ -65,7 +68,7 @@ public:
CRASH_COND_MSG(!data, "Out of memory");
}
- if (!__has_trivial_constructor(T) && !force_trivial) {
+ if (!std::is_trivially_constructible<T>::value && !force_trivial) {
memnew_placement(&data[count++], T(p_elem));
} else {
data[count++] = p_elem;
@@ -78,7 +81,7 @@ public:
for (U i = p_index; i < count; i++) {
data[i] = data[i + 1];
}
- if (!__has_trivial_destructor(T) && !force_trivial) {
+ if (!std::is_trivially_destructible<T>::value && !force_trivial) {
data[count].~T();
}
}
@@ -91,7 +94,7 @@ public:
if (count > p_index) {
data[p_index] = data[count];
}
- if (!__has_trivial_destructor(T) && !force_trivial) {
+ if (!std::is_trivially_destructible<T>::value && !force_trivial) {
data[count].~T();
}
}
@@ -121,7 +124,7 @@ public:
_FORCE_INLINE_ bool is_empty() const { return count == 0; }
_FORCE_INLINE_ U get_capacity() const { return capacity; }
_FORCE_INLINE_ void reserve(U p_size) {
- p_size = nearest_power_of_2_templated(p_size);
+ p_size = tight ? p_size : nearest_power_of_2_templated(p_size);
if (p_size > capacity) {
capacity = p_size;
data = (T *)memrealloc(data, capacity * sizeof(T));
@@ -132,7 +135,7 @@ public:
_FORCE_INLINE_ U size() const { return count; }
void resize(U p_size) {
if (p_size < count) {
- if (!__has_trivial_destructor(T) && !force_trivial) {
+ if (!std::is_trivially_destructible<T>::value && !force_trivial) {
for (U i = p_size; i < count; i++) {
data[i].~T();
}
@@ -149,7 +152,7 @@ public:
data = (T *)memrealloc(data, capacity * sizeof(T));
CRASH_COND_MSG(!data, "Out of memory");
}
- if (!__has_trivial_constructor(T) && !force_trivial) {
+ if (!std::is_trivially_constructible<T>::value && !force_trivial) {
for (U i = count; i < p_size; i++) {
memnew_placement(&data[i], T);
}
@@ -262,4 +265,7 @@ public:
}
};
+template <class T, class U = uint32_t, bool force_trivial = false>
+using TightLocalVector = LocalVector<T, U, force_trivial, true>;
+
#endif // LOCAL_VECTOR_H
diff --git a/core/templates/lru.h b/core/templates/lru.h
index 48ba318b12..3a78de61db 100644
--- a/core/templates/lru.h
+++ b/core/templates/lru.h
@@ -35,7 +35,7 @@
#include "hash_map.h"
#include "list.h"
-template <class TKey, class TData>
+template <class TKey, class TData, class Hasher = HashMapHasherDefault, class Comparator = HashMapComparatorDefault<TKey>>
class LRUCache {
private:
struct Pair {
@@ -52,7 +52,7 @@ private:
typedef typename List<Pair>::Element *Element;
List<Pair> _list;
- HashMap<TKey, Element> _map;
+ HashMap<TKey, Element, Hasher, Comparator> _map;
size_t capacity;
public:
@@ -102,6 +102,7 @@ public:
}
_FORCE_INLINE_ size_t get_capacity() const { return capacity; }
+ _FORCE_INLINE_ size_t get_size() const { return _map.size(); }
void set_capacity(size_t p_capacity) {
if (capacity > 0) {
@@ -123,4 +124,4 @@ public:
}
};
-#endif
+#endif // LRU_H
diff --git a/core/templates/ordered_hash_map.h b/core/templates/ordered_hash_map.h
deleted file mode 100644
index 3d1f3a08ec..0000000000
--- a/core/templates/ordered_hash_map.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*************************************************************************/
-/* ordered_hash_map.h */
-/*************************************************************************/
-/* This file is part of: */
-/* GODOT ENGINE */
-/* https://godotengine.org */
-/*************************************************************************/
-/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
-/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
-/* */
-/* Permission is hereby granted, free of charge, to any person obtaining */
-/* a copy of this software and associated documentation files (the */
-/* "Software"), to deal in the Software without restriction, including */
-/* without limitation the rights to use, copy, modify, merge, publish, */
-/* distribute, sublicense, and/or sell copies of the Software, and to */
-/* permit persons to whom the Software is furnished to do so, subject to */
-/* the following conditions: */
-/* */
-/* The above copyright notice and this permission notice shall be */
-/* included in all copies or substantial portions of the Software. */
-/* */
-/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
-/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
-/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
-/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
-/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
-/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
-/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-/*************************************************************************/
-
-#ifndef ORDERED_HASH_MAP_H
-#define ORDERED_HASH_MAP_H
-
-#include "core/templates/hash_map.h"
-#include "core/templates/list.h"
-#include "core/templates/pair.h"
-
-/**
- * A hash map which allows to iterate elements in insertion order.
- * Insertion, lookup, deletion have O(1) complexity.
- * The API aims to be consistent with Map rather than HashMap, because the
- * former is more frequently used and is more coherent with the rest of the
- * codebase.
- * Deletion during iteration is safe and will preserve the order.
- */
-template <class K, class V, class Hasher = HashMapHasherDefault, class Comparator = HashMapComparatorDefault<K>, uint8_t MIN_HASH_TABLE_POWER = 3, uint8_t RELATIONSHIP = 8>
-class OrderedHashMap {
- typedef List<Pair<const K *, V>> InternalList;
- typedef HashMap<K, typename InternalList::Element *, Hasher, Comparator, MIN_HASH_TABLE_POWER, RELATIONSHIP> InternalMap;
-
- InternalList list;
- InternalMap map;
-
-public:
- class Element {
- friend class OrderedHashMap<K, V, Hasher, Comparator, MIN_HASH_TABLE_POWER, RELATIONSHIP>;
-
- typename InternalList::Element *list_element = nullptr;
- typename InternalList::Element *prev_element = nullptr;
- typename InternalList::Element *next_element = nullptr;
-
- Element(typename InternalList::Element *p_element) {
- list_element = p_element;
-
- if (list_element) {
- next_element = list_element->next();
- prev_element = list_element->prev();
- }
- }
-
- public:
- _FORCE_INLINE_ Element() {}
-
- Element next() const {
- return Element(next_element);
- }
-
- Element prev() const {
- return Element(prev_element);
- }
-
- Element(const Element &other) :
- list_element(other.list_element),
- prev_element(other.prev_element),
- next_element(other.next_element) {
- }
-
- void operator=(const Element &other) {
- list_element = other.list_element;
- next_element = other.next_element;
- prev_element = other.prev_element;
- }
-
- _FORCE_INLINE_ bool operator==(const Element &p_other) const {
- return this->list_element == p_other.list_element;
- }
- _FORCE_INLINE_ bool operator!=(const Element &p_other) const {
- return this->list_element != p_other.list_element;
- }
-
- operator bool() const {
- return (list_element != nullptr);
- }
-
- const K &key() const {
- CRASH_COND(!list_element);
- return *(list_element->get().first);
- }
-
- V &value() {
- CRASH_COND(!list_element);
- return list_element->get().second;
- }
-
- const V &value() const {
- CRASH_COND(!list_element);
- return list_element->get().second;
- }
-
- V &get() {
- CRASH_COND(!list_element);
- return list_element->get().second;
- }
-
- const V &get() const {
- CRASH_COND(!list_element);
- return list_element->get().second;
- }
- };
-
- class ConstElement {
- friend class OrderedHashMap<K, V, Hasher, Comparator, MIN_HASH_TABLE_POWER, RELATIONSHIP>;
-
- const typename InternalList::Element *list_element = nullptr;
-
- ConstElement(const typename InternalList::Element *p_element) :
- list_element(p_element) {
- }
-
- public:
- _FORCE_INLINE_ ConstElement() {}
-
- ConstElement(const ConstElement &other) :
- list_element(other.list_element) {
- }
-
- void operator=(const ConstElement &other) {
- list_element = other.list_element;
- }
-
- ConstElement next() const {
- return ConstElement(list_element ? list_element->next() : nullptr);
- }
-
- ConstElement prev() const {
- return ConstElement(list_element ? list_element->prev() : nullptr);
- }
-
- _FORCE_INLINE_ bool operator==(const ConstElement &p_other) const {
- return this->list_element == p_other.list_element;
- }
- _FORCE_INLINE_ bool operator!=(const ConstElement &p_other) const {
- return this->list_element != p_other.list_element;
- }
-
- operator bool() const {
- return (list_element != nullptr);
- }
-
- const K &key() const {
- CRASH_COND(!list_element);
- return *(list_element->get().first);
- }
-
- const V &value() const {
- CRASH_COND(!list_element);
- return list_element->get().second;
- }
-
- const V &get() const {
- CRASH_COND(!list_element);
- return list_element->get().second;
- }
- };
-
- ConstElement find(const K &p_key) const {
- typename InternalList::Element *const *list_element = map.getptr(p_key);
- if (list_element) {
- return ConstElement(*list_element);
- }
- return ConstElement(nullptr);
- }
-
- Element find(const K &p_key) {
- typename InternalList::Element **list_element = map.getptr(p_key);
- if (list_element) {
- return Element(*list_element);
- }
- return Element(nullptr);
- }
-
- Element insert(const K &p_key, const V &p_value) {
- typename InternalList::Element **list_element = map.getptr(p_key);
- if (list_element) {
- (*list_element)->get().second = p_value;
- return Element(*list_element);
- }
- // Incorrectly set the first value of the pair with a value that will
- // be invalid as soon as we leave this function...
- typename InternalList::Element *new_element = list.push_back(Pair<const K *, V>(&p_key, p_value));
- // ...this is needed here in case the hashmap recursively reference itself...
- typename InternalMap::Element *e = map.set(p_key, new_element);
- // ...now we can set the right value !
- new_element->get().first = &e->key();
-
- return Element(new_element);
- }
-
- void erase(Element &p_element) {
- map.erase(p_element.key());
- list.erase(p_element.list_element);
- p_element.list_element = nullptr;
- }
-
- bool erase(const K &p_key) {
- typename InternalList::Element **list_element = map.getptr(p_key);
- if (list_element) {
- list.erase(*list_element);
- map.erase(p_key);
- return true;
- }
- return false;
- }
-
- inline bool has(const K &p_key) const {
- return map.has(p_key);
- }
-
- const V &operator[](const K &p_key) const {
- ConstElement e = find(p_key);
- CRASH_COND(!e);
- return e.value();
- }
-
- V &operator[](const K &p_key) {
- Element e = find(p_key);
- if (!e) {
- // consistent with Map behaviour
- e = insert(p_key, V());
- }
- return e.value();
- }
-
- inline Element front() {
- return Element(list.front());
- }
-
- inline Element back() {
- return Element(list.back());
- }
-
- inline ConstElement front() const {
- return ConstElement(list.front());
- }
-
- inline ConstElement back() const {
- return ConstElement(list.back());
- }
-
- inline bool is_empty() const { return list.is_empty(); }
- inline int size() const { return list.size(); }
-
- const void *id() const {
- return list.id();
- }
-
- void clear() {
- map.clear();
- list.clear();
- }
-
-private:
- void _copy_from(const OrderedHashMap &p_map) {
- for (ConstElement E = p_map.front(); E; E = E.next()) {
- insert(E.key(), E.value());
- }
- }
-
-public:
- void operator=(const OrderedHashMap &p_map) {
- _copy_from(p_map);
- }
-
- OrderedHashMap(const OrderedHashMap &p_map) {
- _copy_from(p_map);
- }
-
- _FORCE_INLINE_ OrderedHashMap() {}
-};
-
-#endif // ORDERED_HASH_MAP_H
diff --git a/core/templates/paged_allocator.h b/core/templates/paged_allocator.h
index b9067e2edd..43aab052fd 100644
--- a/core/templates/paged_allocator.h
+++ b/core/templates/paged_allocator.h
@@ -31,11 +31,14 @@
#ifndef PAGED_ALLOCATOR_H
#define PAGED_ALLOCATOR_H
+#include "core/core_globals.h"
#include "core/os/memory.h"
#include "core/os/spin_lock.h"
+#include "core/string/ustring.h"
#include "core/typedefs.h"
#include <type_traits>
+#include <typeinfo>
template <class T, bool thread_safe = false>
class PagedAllocator {
@@ -50,6 +53,10 @@ class PagedAllocator {
SpinLock spin_lock;
public:
+ enum {
+ DEFAULT_PAGE_SIZE = 4096
+ };
+
template <class... Args>
T *alloc(const Args &&...p_args) {
if (thread_safe) {
@@ -121,12 +128,19 @@ public:
page_shift = get_shift_from_power_of_2(page_size);
}
- PagedAllocator(uint32_t p_page_size = 4096) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages
+ // Power of 2 recommended because of alignment with OS page sizes.
+ // Even if element is bigger, it's still a multiple and gets rounded to amount of pages.
+ PagedAllocator(uint32_t p_page_size = DEFAULT_PAGE_SIZE) {
configure(p_page_size);
}
~PagedAllocator() {
- ERR_FAIL_COND_MSG(allocs_available < pages_allocated * page_size, "Pages in use exist at exit in PagedAllocator");
+ if (allocs_available < pages_allocated * page_size) {
+ if (CoreGlobals::leak_reporting_enabled) {
+ ERR_FAIL_COND_MSG(allocs_available < pages_allocated * page_size, String("Pages in use exist at exit in PagedAllocator: ") + String(typeid(T).name()));
+ }
+ return;
+ }
reset();
}
};
diff --git a/core/templates/paged_array.h b/core/templates/paged_array.h
index 33d2757bec..f1ede556e6 100644
--- a/core/templates/paged_array.h
+++ b/core/templates/paged_array.h
@@ -35,6 +35,8 @@
#include "core/os/spin_lock.h"
#include "core/typedefs.h"
+#include <type_traits>
+
// PagedArray is used mainly for filling a very large array from multiple threads efficiently and without causing major fragmentation
// PageArrayPool manages central page allocation in a thread safe matter
@@ -197,7 +199,7 @@ public:
uint32_t page = count >> page_size_shift;
uint32_t offset = count & page_size_mask;
- if (!__has_trivial_constructor(T)) {
+ if (!std::is_trivially_constructible<T>::value) {
memnew_placement(&page_data[page][offset], T(p_value));
} else {
page_data[page][offset] = p_value;
@@ -209,7 +211,7 @@ public:
_FORCE_INLINE_ void pop_back() {
ERR_FAIL_COND(count == 0);
- if (!__has_trivial_destructor(T)) {
+ if (!std::is_trivially_destructible<T>::value) {
uint32_t page = (count - 1) >> page_size_shift;
uint32_t offset = (count - 1) & page_size_mask;
page_data[page][offset].~T();
@@ -226,7 +228,7 @@ public:
void clear() {
//destruct if needed
- if (!__has_trivial_destructor(T)) {
+ if (!std::is_trivially_destructible<T>::value) {
for (uint64_t i = 0; i < count; i++) {
uint32_t page = i >> page_size_shift;
uint32_t offset = i & page_size_mask;
@@ -309,13 +311,13 @@ public:
uint32_t to_copy = MIN(page_size - new_remainder, remainder);
for (uint32_t i = 0; i < to_copy; i++) {
- if (!__has_trivial_constructor(T)) {
+ if (!std::is_trivially_constructible<T>::value) {
memnew_placement(&dst_page[i + new_remainder], T(remainder_page[i + remainder - to_copy]));
} else {
dst_page[i + new_remainder] = remainder_page[i + remainder - to_copy];
}
- if (!__has_trivial_destructor(T)) {
+ if (!std::is_trivially_destructible<T>::value) {
remainder_page[i + remainder - to_copy].~T();
}
}
diff --git a/core/templates/pair.h b/core/templates/pair.h
index eb86e21b03..6d33213fe3 100644
--- a/core/templates/pair.h
+++ b/core/templates/pair.h
@@ -31,8 +31,8 @@
#ifndef PAIR_H
#define PAIR_H
+#include "core/templates/hashfuncs.h"
#include "core/typedefs.h"
-
template <class F, class S>
struct Pair {
F first;
@@ -69,6 +69,15 @@ struct PairSort {
}
};
+template <class F, class S>
+struct PairHash {
+ static uint32_t hash(const Pair<F, S> &P) {
+ uint64_t h1 = HashMapHasherDefault::hash(P.first);
+ uint64_t h2 = HashMapHasherDefault::hash(P.second);
+ return hash_one_uint64((h1 << 32) | h2);
+ }
+};
+
template <class K, class V>
struct KeyValue {
const K key;
diff --git a/core/templates/map.h b/core/templates/rb_map.h
index c54da1dc03..3393e6dd3e 100644
--- a/core/templates/map.h
+++ b/core/templates/rb_map.h
@@ -1,5 +1,5 @@
/*************************************************************************/
-/* map.h */
+/* rb_map.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
@@ -28,8 +28,8 @@
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
-#ifndef MAP_H
-#define MAP_H
+#ifndef RB_MAP_H
+#define RB_MAP_H
#include "core/error/error_macros.h"
#include "core/os/memory.h"
@@ -39,7 +39,7 @@
// https://web.archive.org/web/20120507164830/https://web.mit.edu/~emin/www/source_code/red_black_tree/index.html
template <class K, class V, class C = Comparator<K>, class A = DefaultAllocator>
-class Map {
+class RBMap {
enum Color {
RED,
BLACK
@@ -49,7 +49,7 @@ class Map {
public:
class Element {
private:
- friend class Map<K, V, C, A>;
+ friend class RBMap<K, V, C, A>;
int color = RED;
Element *right = nullptr;
Element *left = nullptr;
@@ -111,7 +111,9 @@ public:
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return E != b.E; }
-
+ explicit operator bool() const {
+ return E != nullptr;
+ }
Iterator(Element *p_E) { E = p_E; }
Iterator() {}
Iterator(const Iterator &p_it) { E = p_it.E; }
@@ -136,7 +138,9 @@ public:
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return E != b.E; }
-
+ explicit operator bool() const {
+ return E != nullptr;
+ }
ConstIterator(const Element *p_E) { E = p_E; }
ConstIterator() {}
ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
@@ -572,7 +576,7 @@ private:
memdelete_allocator<Element, A>(p_element);
}
- void _copy_from(const Map &p_map) {
+ void _copy_from(const RBMap &p_map) {
clear();
// not the fastest way, but safeset to write.
for (Element *I = p_map.front(); I; I = I->next()) {
@@ -710,8 +714,12 @@ public:
return e;
}
- inline bool is_empty() const { return _data.size_cache == 0; }
- inline int size() const { return _data.size_cache; }
+ inline bool is_empty() const {
+ return _data.size_cache == 0;
+ }
+ inline int size() const {
+ return _data.size_cache;
+ }
int calculate_depth() const {
// used for debug mostly
@@ -735,19 +743,19 @@ public:
_data._free_root();
}
- void operator=(const Map &p_map) {
+ void operator=(const RBMap &p_map) {
_copy_from(p_map);
}
- Map(const Map &p_map) {
+ RBMap(const RBMap &p_map) {
_copy_from(p_map);
}
- _FORCE_INLINE_ Map() {}
+ _FORCE_INLINE_ RBMap() {}
- ~Map() {
+ ~RBMap() {
clear();
}
};
-#endif // MAP_H
+#endif // RB_MAP_H
diff --git a/core/templates/set.h b/core/templates/rb_set.h
index a8a0a77712..e87ea544fd 100644
--- a/core/templates/set.h
+++ b/core/templates/rb_set.h
@@ -1,5 +1,5 @@
/*************************************************************************/
-/* set.h */
+/* rb_set.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
@@ -28,8 +28,8 @@
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
-#ifndef SET_H
-#define SET_H
+#ifndef RB_SET_H
+#define RB_SET_H
#include "core/os/memory.h"
#include "core/typedefs.h"
@@ -38,7 +38,7 @@
// https://web.archive.org/web/20120507164830/https://web.mit.edu/~emin/www/source_code/red_black_tree/index.html
template <class T, class C = Comparator<T>, class A = DefaultAllocator>
-class Set {
+class RBSet {
enum Color {
RED,
BLACK
@@ -48,7 +48,7 @@ class Set {
public:
class Element {
private:
- friend class Set<T, C, A>;
+ friend class RBSet<T, C, A>;
int color = RED;
Element *right = nullptr;
Element *left = nullptr;
@@ -99,6 +99,7 @@ public:
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return E != b.E; }
+ explicit operator bool() const { return E != nullptr; }
Iterator(Element *p_E) { E = p_E; }
Iterator() {}
Iterator(const Iterator &p_it) { E = p_it.E; }
@@ -128,6 +129,8 @@ public:
_FORCE_INLINE_ ConstIterator() {}
_FORCE_INLINE_ ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
+ explicit operator bool() const { return E != nullptr; }
+
private:
const Element *E = nullptr;
};
@@ -554,7 +557,7 @@ private:
memdelete_allocator<Element, A>(p_element);
}
- void _copy_from(const Set &p_set) {
+ void _copy_from(const RBSet &p_set) {
clear();
// not the fastest way, but safeset to write.
for (Element *I = p_set.front(); I; I = I->next()) {
@@ -661,8 +664,12 @@ public:
return e;
}
- inline bool is_empty() const { return _data.size_cache == 0; }
- inline int size() const { return _data.size_cache; }
+ inline bool is_empty() const {
+ return _data.size_cache == 0;
+ }
+ inline int size() const {
+ return _data.size_cache;
+ }
int calculate_depth() const {
// used for debug mostly
@@ -686,19 +693,19 @@ public:
_data._free_root();
}
- void operator=(const Set &p_set) {
+ void operator=(const RBSet &p_set) {
_copy_from(p_set);
}
- Set(const Set &p_set) {
+ RBSet(const RBSet &p_set) {
_copy_from(p_set);
}
- _FORCE_INLINE_ Set() {}
+ _FORCE_INLINE_ RBSet() {}
- ~Set() {
+ ~RBSet() {
clear();
}
};
-#endif // SET_H
+#endif // RB_SET_H
diff --git a/core/templates/rid_owner.h b/core/templates/rid_owner.h
index 95632cdec2..320faebe98 100644
--- a/core/templates/rid_owner.h
+++ b/core/templates/rid_owner.h
@@ -34,11 +34,11 @@
#include "core/os/memory.h"
#include "core/os/spin_lock.h"
#include "core/string/print_string.h"
+#include "core/templates/hash_set.h"
#include "core/templates/list.h"
#include "core/templates/oa_hash_map.h"
#include "core/templates/rid.h"
#include "core/templates/safe_refcount.h"
-#include "core/templates/set.h"
#include <stdio.h>
#include <typeinfo>
@@ -79,7 +79,7 @@ class RID_Alloc : public RID_AllocBase {
const char *description = nullptr;
- SpinLock spin_lock;
+ mutable SpinLock spin_lock;
_FORCE_INLINE_ RID _allocate_rid() {
if (THREAD_SAFE) {
@@ -220,7 +220,7 @@ public:
memnew_placement(mem, T(p_value));
}
- _FORCE_INLINE_ bool owns(const RID &p_rid) {
+ _FORCE_INLINE_ bool owns(const RID &p_rid) const {
if (THREAD_SAFE) {
spin_lock.lock();
}
@@ -292,7 +292,7 @@ public:
_FORCE_INLINE_ uint32_t get_rid_count() const {
return alloc_count;
}
- void get_owned_list(List<RID> *p_owned) {
+ void get_owned_list(List<RID> *p_owned) const {
if (THREAD_SAFE) {
spin_lock.lock();
}
@@ -308,7 +308,7 @@ public:
}
//used for fast iteration in the elements or RIDs
- void fill_owned_buffer(RID *p_rid_buffer) {
+ void fill_owned_buffer(RID *p_rid_buffer) const {
if (THREAD_SAFE) {
spin_lock.lock();
}
@@ -402,7 +402,7 @@ public:
*ptr = p_new_ptr;
}
- _FORCE_INLINE_ bool owns(const RID &p_rid) {
+ _FORCE_INLINE_ bool owns(const RID &p_rid) const {
return alloc.owns(p_rid);
}
@@ -414,11 +414,11 @@ public:
return alloc.get_rid_count();
}
- _FORCE_INLINE_ void get_owned_list(List<RID> *p_owned) {
+ _FORCE_INLINE_ void get_owned_list(List<RID> *p_owned) const {
return alloc.get_owned_list(p_owned);
}
- void fill_owned_buffer(RID *p_rid_buffer) {
+ void fill_owned_buffer(RID *p_rid_buffer) const {
alloc.fill_owned_buffer(p_rid_buffer);
}
@@ -458,7 +458,7 @@ public:
return alloc.get_or_null(p_rid);
}
- _FORCE_INLINE_ bool owns(const RID &p_rid) {
+ _FORCE_INLINE_ bool owns(const RID &p_rid) const {
return alloc.owns(p_rid);
}
@@ -470,10 +470,10 @@ public:
return alloc.get_rid_count();
}
- _FORCE_INLINE_ void get_owned_list(List<RID> *p_owned) {
+ _FORCE_INLINE_ void get_owned_list(List<RID> *p_owned) const {
return alloc.get_owned_list(p_owned);
}
- void fill_owned_buffer(RID *p_rid_buffer) {
+ void fill_owned_buffer(RID *p_rid_buffer) const {
alloc.fill_owned_buffer(p_rid_buffer);
}
diff --git a/core/templates/safe_refcount.h b/core/templates/safe_refcount.h
index 76f76be96a..1f6551762e 100644
--- a/core/templates/safe_refcount.h
+++ b/core/templates/safe_refcount.h
@@ -111,7 +111,8 @@ public:
if (tmp >= p_value) {
return tmp; // already greater, or equal
}
- if (value.compare_exchange_weak(tmp, p_value, std::memory_order_release)) {
+
+ if (value.compare_exchange_weak(tmp, p_value, std::memory_order_acq_rel)) {
return p_value;
}
}
@@ -123,7 +124,7 @@ public:
if (c == 0) {
return 0;
}
- if (value.compare_exchange_weak(c, c + 1, std::memory_order_release)) {
+ if (value.compare_exchange_weak(c, c + 1, std::memory_order_acq_rel)) {
return c + 1;
}
}
diff --git a/core/templates/thread_work_pool.cpp b/core/templates/thread_work_pool.cpp
deleted file mode 100644
index a75fd06b9b..0000000000
--- a/core/templates/thread_work_pool.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*************************************************************************/
-/* thread_work_pool.cpp */
-/*************************************************************************/
-/* This file is part of: */
-/* GODOT ENGINE */
-/* https://godotengine.org */
-/*************************************************************************/
-/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
-/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
-/* */
-/* Permission is hereby granted, free of charge, to any person obtaining */
-/* a copy of this software and associated documentation files (the */
-/* "Software"), to deal in the Software without restriction, including */
-/* without limitation the rights to use, copy, modify, merge, publish, */
-/* distribute, sublicense, and/or sell copies of the Software, and to */
-/* permit persons to whom the Software is furnished to do so, subject to */
-/* the following conditions: */
-/* */
-/* The above copyright notice and this permission notice shall be */
-/* included in all copies or substantial portions of the Software. */
-/* */
-/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
-/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
-/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
-/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
-/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
-/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
-/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-/*************************************************************************/
-
-#include "thread_work_pool.h"
-
-#include "core/os/os.h"
-
-void ThreadWorkPool::_thread_function(void *p_user) {
- ThreadData *thread = static_cast<ThreadData *>(p_user);
- while (true) {
- thread->start.wait();
- if (thread->exit.load()) {
- break;
- }
- thread->work->work();
- thread->completed.post();
- }
-}
-
-void ThreadWorkPool::init(int p_thread_count) {
- ERR_FAIL_COND(threads != nullptr);
- if (p_thread_count < 0) {
- p_thread_count = OS::get_singleton()->get_default_thread_pool_size();
- }
-
- thread_count = p_thread_count;
- threads = memnew_arr(ThreadData, thread_count);
-
- for (uint32_t i = 0; i < thread_count; i++) {
- threads[i].exit.store(false);
- threads[i].thread.start(&ThreadWorkPool::_thread_function, &threads[i]);
- }
-}
-
-void ThreadWorkPool::finish() {
- if (threads == nullptr) {
- return;
- }
-
- for (uint32_t i = 0; i < thread_count; i++) {
- threads[i].exit.store(true);
- threads[i].start.post();
- }
- for (uint32_t i = 0; i < thread_count; i++) {
- threads[i].thread.wait_to_finish();
- }
-
- memdelete_arr(threads);
- threads = nullptr;
-}
-
-ThreadWorkPool::~ThreadWorkPool() {
- finish();
-}
diff --git a/core/templates/thread_work_pool.h b/core/templates/thread_work_pool.h
deleted file mode 100644
index b0cebf04f1..0000000000
--- a/core/templates/thread_work_pool.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*************************************************************************/
-/* thread_work_pool.h */
-/*************************************************************************/
-/* This file is part of: */
-/* GODOT ENGINE */
-/* https://godotengine.org */
-/*************************************************************************/
-/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
-/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
-/* */
-/* Permission is hereby granted, free of charge, to any person obtaining */
-/* a copy of this software and associated documentation files (the */
-/* "Software"), to deal in the Software without restriction, including */
-/* without limitation the rights to use, copy, modify, merge, publish, */
-/* distribute, sublicense, and/or sell copies of the Software, and to */
-/* permit persons to whom the Software is furnished to do so, subject to */
-/* the following conditions: */
-/* */
-/* The above copyright notice and this permission notice shall be */
-/* included in all copies or substantial portions of the Software. */
-/* */
-/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
-/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
-/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
-/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
-/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
-/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
-/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
-/*************************************************************************/
-
-#ifndef THREAD_WORK_POOL_H
-#define THREAD_WORK_POOL_H
-
-#include "core/os/memory.h"
-#include "core/os/semaphore.h"
-#include "core/os/thread.h"
-
-#include <atomic>
-
-class ThreadWorkPool {
- std::atomic<uint32_t> index;
-
- struct BaseWork {
- std::atomic<uint32_t> *index = nullptr;
- uint32_t max_elements = 0;
- virtual void work() = 0;
- virtual ~BaseWork() = default;
- };
-
- template <class C, class M, class U>
- struct Work : public BaseWork {
- C *instance;
- M method;
- U userdata;
- virtual void work() override {
- while (true) {
- uint32_t work_index = index->fetch_add(1, std::memory_order_relaxed);
- if (work_index >= max_elements) {
- break;
- }
- (instance->*method)(work_index, userdata);
- }
- }
- };
-
- struct ThreadData {
- Thread thread;
- Semaphore start;
- Semaphore completed;
- std::atomic<bool> exit;
- BaseWork *work = nullptr;
- };
-
- ThreadData *threads = nullptr;
- uint32_t thread_count = 0;
- uint32_t threads_working = 0;
- BaseWork *current_work = nullptr;
-
- static void _thread_function(void *p_user);
-
-public:
- template <class C, class M, class U>
- void begin_work(uint32_t p_elements, C *p_instance, M p_method, U p_userdata) {
- ERR_FAIL_COND(!threads); //never initialized
- ERR_FAIL_COND(current_work != nullptr);
-
- index.store(0, std::memory_order_release);
-
- Work<C, M, U> *w = memnew((Work<C, M, U>));
- w->instance = p_instance;
- w->userdata = p_userdata;
- w->method = p_method;
- w->index = &index;
- w->max_elements = p_elements;
-
- current_work = w;
-
- threads_working = MIN(p_elements, thread_count);
-
- for (uint32_t i = 0; i < threads_working; i++) {
- threads[i].work = w;
- threads[i].start.post();
- }
- }
-
- bool is_working() const {
- return current_work != nullptr;
- }
-
- bool is_done_dispatching() const {
- ERR_FAIL_COND_V(current_work == nullptr, true);
- return index.load(std::memory_order_acquire) >= current_work->max_elements;
- }
-
- uint32_t get_work_index() const {
- ERR_FAIL_COND_V(current_work == nullptr, 0);
- uint32_t idx = index.load(std::memory_order_acquire);
- return MIN(idx, current_work->max_elements);
- }
-
- void end_work() {
- ERR_FAIL_COND(current_work == nullptr);
- for (uint32_t i = 0; i < threads_working; i++) {
- threads[i].completed.wait();
- threads[i].work = nullptr;
- }
-
- threads_working = 0;
- memdelete(current_work);
- current_work = nullptr;
- }
-
- template <class C, class M, class U>
- void do_work(uint32_t p_elements, C *p_instance, M p_method, U p_userdata) {
- switch (p_elements) {
- case 0:
- // Nothing to do, so do nothing.
- break;
- case 1:
- // No value in pushing the work to another thread if it's a single job
- // and we're going to wait for it to finish. Just run it right here.
- (p_instance->*p_method)(0, p_userdata);
- break;
- default:
- // Multiple jobs to do; commence threaded business.
- begin_work(p_elements, p_instance, p_method, p_userdata);
- end_work();
- }
- }
-
- _FORCE_INLINE_ int get_thread_count() const { return thread_count; }
- void init(int p_thread_count = -1);
- void finish();
- ~ThreadWorkPool();
-};
-
-#endif // THREAD_POOL_H
diff --git a/core/templates/vector.h b/core/templates/vector.h
index d87e76139b..51595a75f5 100644
--- a/core/templates/vector.h
+++ b/core/templates/vector.h
@@ -89,9 +89,12 @@ public:
_FORCE_INLINE_ void set(int p_index, const T &p_elem) { _cowdata.set(p_index, p_elem); }
_FORCE_INLINE_ int size() const { return _cowdata.size(); }
Error resize(int p_size) { return _cowdata.resize(p_size); }
+ Error resize_zeroed(int p_size) { return _cowdata.template resize<true>(p_size); }
_FORCE_INLINE_ const T &operator[](int p_index) const { return _cowdata.get(p_index); }
Error insert(int p_pos, T p_val) { return _cowdata.insert(p_pos, p_val); }
int find(const T &p_val, int p_from = 0) const { return _cowdata.find(p_val, p_from); }
+ int rfind(const T &p_val, int p_from = -1) const { return _cowdata.rfind(p_val, p_from); }
+ int count(const T &p_val) const { return _cowdata.count(p_val); }
void append_array(Vector<T> p_other);
@@ -143,6 +146,9 @@ public:
Vector<uint8_t> to_byte_array() const {
Vector<uint8_t> ret;
+ if (is_empty()) {
+ return ret;
+ }
ret.resize(size() * sizeof(T));
memcpy(ret.ptrw(), ptr(), sizeof(T) * size());
return ret;