diff options
Diffstat (limited to 'core/templates')
-rw-r--r-- | core/templates/cowdata.h | 21 | ||||
-rw-r--r-- | core/templates/rid_owner.cpp | 2 | ||||
-rw-r--r-- | core/templates/rid_owner.h | 4 | ||||
-rw-r--r-- | core/templates/safe_refcount.cpp | 161 | ||||
-rw-r--r-- | core/templates/safe_refcount.h | 342 |
5 files changed, 244 insertions, 286 deletions
diff --git a/core/templates/cowdata.h b/core/templates/cowdata.h index 4becb1be59..6077f5f107 100644 --- a/core/templates/cowdata.h +++ b/core/templates/cowdata.h @@ -45,6 +45,9 @@ class CharString; template <class T, class V> class VMap; +// CowData is relying on this to be true +static_assert(sizeof(SafeNumeric<uint32_t>) == sizeof(uint32_t)); + template <class T> class CowData { template <class TV> @@ -60,12 +63,12 @@ private: // internal helpers - _FORCE_INLINE_ uint32_t *_get_refcount() const { + _FORCE_INLINE_ SafeNumeric<uint32_t> *_get_refcount() const { if (!_ptr) { return nullptr; } - return reinterpret_cast<uint32_t *>(_ptr) - 2; + return reinterpret_cast<SafeNumeric<uint32_t> *>(_ptr) - 2; } _FORCE_INLINE_ uint32_t *_get_size() const { @@ -192,9 +195,9 @@ void CowData<T>::_unref(void *p_data) { return; } - uint32_t *refc = _get_refcount(); + SafeNumeric<uint32_t> *refc = _get_refcount(); - if (atomic_decrement(refc) > 0) { + if (refc->decrement() > 0) { return; // still in use } // clean up @@ -219,15 +222,15 @@ void CowData<T>::_copy_on_write() { return; } - uint32_t *refc = _get_refcount(); + SafeNumeric<uint32_t> *refc = _get_refcount(); - if (unlikely(*refc > 1)) { + if (unlikely(refc->get() > 1)) { /* in use by more than me */ uint32_t current_size = *_get_size(); uint32_t *mem_new = (uint32_t *)Memory::alloc_static(_get_alloc_size(current_size), true); - *(mem_new - 2) = 1; //refcount + reinterpret_cast<SafeNumeric<uint32_t> *>(mem_new - 2)->set(1); //refcount *(mem_new - 1) = current_size; //size T *_data = (T *)(mem_new); @@ -278,7 +281,7 @@ Error CowData<T>::resize(int p_size) { uint32_t *ptr = (uint32_t *)Memory::alloc_static(alloc_size, true); ERR_FAIL_COND_V(!ptr, ERR_OUT_OF_MEMORY); *(ptr - 1) = 0; //size, currently none - *(ptr - 2) = 1; //refcount + reinterpret_cast<SafeNumeric<uint32_t> *>(ptr - 2)->set(1); //refcount _ptr = (T *)ptr; @@ -359,7 +362,7 @@ void CowData<T>::_ref(const CowData &p_from) { return; //nothing to do } - if (atomic_conditional_increment(p_from._get_refcount()) > 0) { // could reference + if (p_from._get_refcount()->increment() > 0) { // could reference _ptr = p_from._ptr; } } diff --git a/core/templates/rid_owner.cpp b/core/templates/rid_owner.cpp index f75a2eb9df..56f39ab779 100644 --- a/core/templates/rid_owner.cpp +++ b/core/templates/rid_owner.cpp @@ -30,4 +30,4 @@ #include "rid_owner.h" -volatile uint64_t RID_AllocBase::base_id = 1; +SafeNumeric<uint64_t> RID_AllocBase::base_id{ 1 }; diff --git a/core/templates/rid_owner.h b/core/templates/rid_owner.h index 3edc73b1a9..c4aa93c394 100644 --- a/core/templates/rid_owner.h +++ b/core/templates/rid_owner.h @@ -44,7 +44,7 @@ #include <typeinfo> class RID_AllocBase { - static volatile uint64_t base_id; + static SafeNumeric<uint64_t> base_id; protected: static RID _make_from_id(uint64_t p_id) { @@ -54,7 +54,7 @@ protected: } static uint64_t _gen_id() { - return atomic_increment(&base_id); + return base_id.increment(); } static RID _gen_rid() { diff --git a/core/templates/safe_refcount.cpp b/core/templates/safe_refcount.cpp deleted file mode 100644 index a915ee662f..0000000000 --- a/core/templates/safe_refcount.cpp +++ /dev/null @@ -1,161 +0,0 @@ -/*************************************************************************/ -/* safe_refcount.cpp */ -/*************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/*************************************************************************/ -/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/*************************************************************************/ - -#include "safe_refcount.h" - -#if defined(_MSC_VER) - -/* Implementation for MSVC-Windows */ - -// don't pollute my namespace! -#include <windows.h> - -#define ATOMIC_CONDITIONAL_INCREMENT_BODY(m_pw, m_win_type, m_win_cmpxchg, m_cpp_type) \ - /* try to increment until it actually works */ \ - /* taken from boost */ \ - while (true) { \ - m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \ - if (tmp == 0) { \ - return 0; /* if zero, can't add to it anymore */ \ - } \ - if (m_win_cmpxchg((m_win_type volatile *)(m_pw), tmp + 1, tmp) == tmp) { \ - return tmp + 1; \ - } \ - } - -#define ATOMIC_EXCHANGE_IF_GREATER_BODY(m_pw, m_val, m_win_type, m_win_cmpxchg, m_cpp_type) \ - while (true) { \ - m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \ - if (tmp >= m_val) { \ - return tmp; /* already greater, or equal */ \ - } \ - if (m_win_cmpxchg((m_win_type volatile *)(m_pw), m_val, tmp) == tmp) { \ - return m_val; \ - } \ - } - -_ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(volatile uint32_t *pw) { - ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t); -} - -_ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(volatile uint32_t *pw) { - return InterlockedDecrement((LONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint32_t _atomic_increment_impl(volatile uint32_t *pw) { - return InterlockedIncrement((LONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint32_t _atomic_sub_impl(volatile uint32_t *pw, volatile uint32_t val) { - return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val; -} - -_ALWAYS_INLINE_ uint32_t _atomic_add_impl(volatile uint32_t *pw, volatile uint32_t val) { - return InterlockedAdd((LONG volatile *)pw, val); -} - -_ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(volatile uint32_t *pw, volatile uint32_t val) { - ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t); -} - -_ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(volatile uint64_t *pw) { - ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t); -} - -_ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(volatile uint64_t *pw) { - return InterlockedDecrement64((LONGLONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(volatile uint64_t *pw) { - return InterlockedIncrement64((LONGLONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(volatile uint64_t *pw, volatile uint64_t val) { - return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val; -} - -_ALWAYS_INLINE_ uint64_t _atomic_add_impl(volatile uint64_t *pw, volatile uint64_t val) { - return InterlockedAdd64((LONGLONG volatile *)pw, val); -} - -_ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(volatile uint64_t *pw, volatile uint64_t val) { - ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t); -} - -// The actual advertised functions; they'll call the right implementation - -uint32_t atomic_conditional_increment(volatile uint32_t *pw) { - return _atomic_conditional_increment_impl(pw); -} - -uint32_t atomic_decrement(volatile uint32_t *pw) { - return _atomic_decrement_impl(pw); -} - -uint32_t atomic_increment(volatile uint32_t *pw) { - return _atomic_increment_impl(pw); -} - -uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val) { - return _atomic_sub_impl(pw, val); -} - -uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val) { - return _atomic_add_impl(pw, val); -} - -uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val) { - return _atomic_exchange_if_greater_impl(pw, val); -} - -uint64_t atomic_conditional_increment(volatile uint64_t *pw) { - return _atomic_conditional_increment_impl(pw); -} - -uint64_t atomic_decrement(volatile uint64_t *pw) { - return _atomic_decrement_impl(pw); -} - -uint64_t atomic_increment(volatile uint64_t *pw) { - return _atomic_increment_impl(pw); -} - -uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val) { - return _atomic_sub_impl(pw, val); -} - -uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val) { - return _atomic_add_impl(pw, val); -} - -uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val) { - return _atomic_exchange_if_greater_impl(pw, val); -} -#endif diff --git a/core/templates/safe_refcount.h b/core/templates/safe_refcount.h index d94444fad6..6aebc24ec3 100644 --- a/core/templates/safe_refcount.h +++ b/core/templates/safe_refcount.h @@ -31,167 +31,281 @@ #ifndef SAFE_REFCOUNT_H #define SAFE_REFCOUNT_H -#include "core/os/mutex.h" #include "core/typedefs.h" -#include "platform_config.h" -// Atomic functions, these are used for multithread safe reference counters! +#if !defined(NO_THREADS) -#ifdef NO_THREADS +#include <atomic> -/* Bogus implementation unaware of multiprocessing */ +// Design goals for these classes: +// - No automatic conversions or arithmetic operators, +// to keep explicit the use of atomics everywhere. +// - Using acquire-release semantics, even to set the first value. +// The first value may be set relaxedly in many cases, but adding the distinction +// between relaxed and unrelaxed operation to the interface would make it needlessly +// flexible. There's negligible waste in having release semantics for the initial +// value and, as an important benefit, you can be sure the value is properly synchronized +// even with threads that are already running. template <class T> -static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) { - if (*pw == 0) { - return 0; +class SafeNumeric { + std::atomic<T> value; + +public: + _ALWAYS_INLINE_ void set(T p_value) { + value.store(p_value, std::memory_order_release); } - (*pw)++; + _ALWAYS_INLINE_ T get() const { + return value.load(std::memory_order_acquire); + } - return *pw; -} + _ALWAYS_INLINE_ T increment() { + return value.fetch_add(1, std::memory_order_acq_rel) + 1; + } -template <class T> -static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) { - (*pw)--; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postincrement() { + return value.fetch_add(1, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T decrement() { + return value.fetch_sub(1, std::memory_order_acq_rel) - 1; + } -template <class T> -static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) { - (*pw)++; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postdecrement() { + return value.fetch_sub(1, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T add(T p_value) { + return value.fetch_add(p_value, std::memory_order_acq_rel) + p_value; + } -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) { - (*pw) -= val; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postadd(T p_value) { + return value.fetch_add(p_value, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T sub(T p_value) { + return value.fetch_sub(p_value, std::memory_order_acq_rel) - p_value; + } -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) { - (*pw) += val; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postsub(T p_value) { + return value.fetch_sub(p_value, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T exchange_if_greater(T p_value) { + while (true) { + T tmp = value.load(std::memory_order_acquire); + if (tmp >= p_value) { + return tmp; // already greater, or equal + } + if (value.compare_exchange_weak(tmp, p_value, std::memory_order_release)) { + return p_value; + } + } + } -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) { - if (val > *pw) { - *pw = val; + _ALWAYS_INLINE_ T conditional_increment() { + while (true) { + T c = value.load(std::memory_order_acquire); + if (c == 0) { + return 0; + } + if (value.compare_exchange_weak(c, c + 1, std::memory_order_release)) { + return c + 1; + } + } } - return *pw; -} + _ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) { + set(p_value); + } +}; -#elif defined(__GNUC__) +class SafeFlag { + std::atomic_bool flag; -/* Implementation for GCC & Clang */ +public: + _ALWAYS_INLINE_ bool is_set() const { + return flag.load(std::memory_order_acquire); + } -// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes. -// Clang states it supports GCC atomic builtins. + _ALWAYS_INLINE_ void set() { + flag.store(true, std::memory_order_release); + } -template <class T> -static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) { - while (true) { - T tmp = static_cast<T const volatile &>(*pw); - if (tmp == 0) { - return 0; // if zero, can't add to it anymore - } - if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp) { - return tmp + 1; - } + _ALWAYS_INLINE_ void clear() { + flag.store(false, std::memory_order_release); } -} -template <class T> -static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) { - return __sync_sub_and_fetch(pw, 1); -} + _ALWAYS_INLINE_ void set_to(bool p_value) { + flag.store(p_value, std::memory_order_release); + } + + _ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) { + set_to(p_value); + } +}; + +class SafeRefCount { + SafeNumeric<uint32_t> count; + +public: + _ALWAYS_INLINE_ bool ref() { // true on success + return count.conditional_increment() != 0; + } + + _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success + return count.conditional_increment(); + } + + _ALWAYS_INLINE_ bool unref() { // true if must be disposed of + return count.decrement() == 0; + } + + _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of + return count.decrement(); + } + + _ALWAYS_INLINE_ uint32_t get() const { + return count.get(); + } + + _ALWAYS_INLINE_ void init(uint32_t p_value = 1) { + count.set(p_value); + } +}; + +#else template <class T> -static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) { - return __sync_add_and_fetch(pw, 1); -} - -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) { - return __sync_sub_and_fetch(pw, val); -} - -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) { - return __sync_add_and_fetch(pw, val); -} - -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) { - while (true) { - T tmp = static_cast<T const volatile &>(*pw); - if (tmp >= val) { - return tmp; // already greater, or equal +class SafeNumeric { +protected: + T value; + +public: + _ALWAYS_INLINE_ void set(T p_value) { + value = p_value; + } + + _ALWAYS_INLINE_ T get() const { + return value; + } + + _ALWAYS_INLINE_ T increment() { + return ++value; + } + + _ALWAYS_INLINE_ T postincrement() { + return value++; + } + + _ALWAYS_INLINE_ T decrement() { + return --value; + } + + _ALWAYS_INLINE_ T postdecrement() { + return value--; + } + + _ALWAYS_INLINE_ T add(T p_value) { + return value += p_value; + } + + _ALWAYS_INLINE_ T postadd(T p_value) { + T old = value; + value += p_value; + return old; + } + + _ALWAYS_INLINE_ T sub(T p_value) { + return value -= p_value; + } + + _ALWAYS_INLINE_ T postsub(T p_value) { + T old = value; + value -= p_value; + return old; + } + + _ALWAYS_INLINE_ T exchange_if_greater(T p_value) { + if (value < p_value) { + value = p_value; } - if (__sync_val_compare_and_swap(pw, tmp, val) == tmp) { - return val; + return value; + } + + _ALWAYS_INLINE_ T conditional_increment() { + if (value != 0) { + return 0; + } else { + return ++value; } } -} - -#elif defined(_MSC_VER) -// For MSVC use a separate compilation unit to prevent windows.h from polluting -// the global namespace. -uint32_t atomic_conditional_increment(volatile uint32_t *pw); -uint32_t atomic_decrement(volatile uint32_t *pw); -uint32_t atomic_increment(volatile uint32_t *pw); -uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val); -uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val); -uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val); - -uint64_t atomic_conditional_increment(volatile uint64_t *pw); -uint64_t atomic_decrement(volatile uint64_t *pw); -uint64_t atomic_increment(volatile uint64_t *pw); -uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val); -uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val); -uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val); -#else -//no threads supported? -#error Must provide atomic functions for this platform or compiler! -#endif + _ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) : + value(p_value) { + } +}; -struct SafeRefCount { - uint32_t count = 0; +class SafeFlag { +protected: + bool flag; public: - // destroy() is called when weak_count_ drops to zero. - - _ALWAYS_INLINE_ bool ref() { // true on success + _ALWAYS_INLINE_ bool is_set() const { + return flag; + } - return atomic_conditional_increment(&count) != 0; + _ALWAYS_INLINE_ void set() { + flag = true; } - _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success + _ALWAYS_INLINE_ void clear() { + flag = false; + } - return atomic_conditional_increment(&count); + _ALWAYS_INLINE_ void set_to(bool p_value) { + flag = p_value; } - _ALWAYS_INLINE_ bool unref() { // true if must be disposed of + _ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) : + flag(p_value) {} +}; + +class SafeRefCount { + uint32_t count = 0; - return atomic_decrement(&count) == 0; +public: + _ALWAYS_INLINE_ bool ref() { // true on success + if (count != 0) { + ++count; + return true; + } else { + return false; + } } - _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of + _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success + if (count != 0) { + return ++count; + } else { + return 0; + } + } - return atomic_decrement(&count); + _ALWAYS_INLINE_ bool unref() { // true if must be disposed of + return --count == 0; } - _ALWAYS_INLINE_ uint32_t get() const { // nothrow + _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of + return --count; + } + _ALWAYS_INLINE_ uint32_t get() const { return count; } @@ -200,4 +314,6 @@ public: } }; +#endif + #endif // SAFE_REFCOUNT_H |