diff options
Diffstat (limited to 'core')
-rw-r--r-- | core/core_bind.cpp | 10 | ||||
-rw-r--r-- | core/core_bind.h | 3 | ||||
-rw-r--r-- | core/error/error_macros.h | 14 | ||||
-rw-r--r-- | core/io/ip.cpp | 24 | ||||
-rw-r--r-- | core/object/object.cpp | 2 | ||||
-rw-r--r-- | core/object/object.h | 3 | ||||
-rw-r--r-- | core/object/reference.cpp | 4 | ||||
-rw-r--r-- | core/os/memory.cpp | 26 | ||||
-rw-r--r-- | core/os/memory.h | 6 | ||||
-rw-r--r-- | core/os/thread.cpp | 6 | ||||
-rw-r--r-- | core/os/thread.h | 3 | ||||
-rw-r--r-- | core/os/threaded_array_processor.h | 8 | ||||
-rw-r--r-- | core/templates/cowdata.h | 21 | ||||
-rw-r--r-- | core/templates/rid_owner.cpp | 2 | ||||
-rw-r--r-- | core/templates/rid_owner.h | 4 | ||||
-rw-r--r-- | core/templates/safe_refcount.cpp | 161 | ||||
-rw-r--r-- | core/templates/safe_refcount.h | 342 |
17 files changed, 302 insertions, 337 deletions
diff --git a/core/core_bind.cpp b/core/core_bind.cpp index dd99c32fa8..fe49899aa2 100644 --- a/core/core_bind.cpp +++ b/core/core_bind.cpp @@ -1990,7 +1990,7 @@ void _Thread::_start_func(void *ud) { } Error _Thread::start(Object *p_instance, const StringName &p_method, const Variant &p_userdata, Priority p_priority) { - ERR_FAIL_COND_V_MSG(active, ERR_ALREADY_IN_USE, "Thread already started."); + ERR_FAIL_COND_V_MSG(active.is_set(), ERR_ALREADY_IN_USE, "Thread already started."); ERR_FAIL_COND_V(!p_instance, ERR_INVALID_PARAMETER); ERR_FAIL_COND_V(p_method == StringName(), ERR_INVALID_PARAMETER); ERR_FAIL_INDEX_V(p_priority, PRIORITY_MAX, ERR_INVALID_PARAMETER); @@ -1999,7 +1999,7 @@ Error _Thread::start(Object *p_instance, const StringName &p_method, const Varia target_method = p_method; target_instance = p_instance; userdata = p_userdata; - active = true; + active.set(); Ref<_Thread> *ud = memnew(Ref<_Thread>(this)); @@ -2015,14 +2015,14 @@ String _Thread::get_id() const { } bool _Thread::is_active() const { - return active; + return active.is_set(); } Variant _Thread::wait_to_finish() { - ERR_FAIL_COND_V_MSG(!active, Variant(), "Thread must be active to wait for its completion."); + ERR_FAIL_COND_V_MSG(!active.is_set(), Variant(), "Thread must be active to wait for its completion."); thread.wait_to_finish(); Variant r = ret; - active = false; + active.clear(); target_method = StringName(); target_instance = nullptr; userdata = Variant(); diff --git a/core/core_bind.h b/core/core_bind.h index 7f945a9314..46a7a94f2d 100644 --- a/core/core_bind.h +++ b/core/core_bind.h @@ -40,6 +40,7 @@ #include "core/os/os.h" #include "core/os/semaphore.h" #include "core/os/thread.h" +#include "core/templates/safe_refcount.h" class _ResourceLoader : public Object { GDCLASS(_ResourceLoader, Object); @@ -559,7 +560,7 @@ class _Thread : public Reference { protected: Variant ret; Variant userdata; - volatile bool active = false; + SafeFlag active; Object *target_instance = nullptr; StringName target_method; Thread thread; diff --git a/core/error/error_macros.h b/core/error/error_macros.h index 8eb6217ce8..f909a67d55 100644 --- a/core/error/error_macros.h +++ b/core/error/error_macros.h @@ -33,6 +33,8 @@ #include "core/typedefs.h" +#include "core/templates/safe_refcount.h" + class String; enum ErrorHandlerType { @@ -577,10 +579,10 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li */ #define WARN_DEPRECATED \ if (true) { \ - static volatile bool warning_shown = false; \ - if (!warning_shown) { \ + static SafeFlag warning_shown; \ + if (!warning_shown.is_set()) { \ _err_print_error(FUNCTION_STR, __FILE__, __LINE__, "This method has been deprecated and will be removed in the future.", ERR_HANDLER_WARNING); \ - warning_shown = true; \ + warning_shown.set(); \ } \ } else \ ((void)0) @@ -590,10 +592,10 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li */ #define WARN_DEPRECATED_MSG(m_msg) \ if (true) { \ - static volatile bool warning_shown = false; \ - if (!warning_shown) { \ + static SafeFlag warning_shown; \ + if (!warning_shown.is_set()) { \ _err_print_error(FUNCTION_STR, __FILE__, __LINE__, "This method has been deprecated and will be removed in the future.", DEBUG_STR(m_msg), ERR_HANDLER_WARNING); \ - warning_shown = true; \ + warning_shown.set(); \ } \ } else \ ((void)0) diff --git a/core/io/ip.cpp b/core/io/ip.cpp index df95785000..e1d9c19f10 100644 --- a/core/io/ip.cpp +++ b/core/io/ip.cpp @@ -40,13 +40,13 @@ VARIANT_ENUM_CAST(IP::ResolverStatus); struct _IP_ResolverPrivate { struct QueueItem { - volatile IP::ResolverStatus status; + SafeNumeric<IP::ResolverStatus> status; IP_Address response; String hostname; IP::Type type; void clear() { - status = IP::RESOLVER_STATUS_NONE; + status.set(IP::RESOLVER_STATUS_NONE); response = IP_Address(); type = IP::TYPE_NONE; hostname = ""; @@ -61,7 +61,7 @@ struct _IP_ResolverPrivate { IP::ResolverID find_empty_id() const { for (int i = 0; i < IP::RESOLVER_MAX_QUERIES; i++) { - if (queue[i].status == IP::RESOLVER_STATUS_NONE) { + if (queue[i].status.get() == IP::RESOLVER_STATUS_NONE) { return i; } } @@ -77,15 +77,15 @@ struct _IP_ResolverPrivate { void resolve_queues() { for (int i = 0; i < IP::RESOLVER_MAX_QUERIES; i++) { - if (queue[i].status != IP::RESOLVER_STATUS_WAITING) { + if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) { continue; } queue[i].response = IP::get_singleton()->resolve_hostname(queue[i].hostname, queue[i].type); if (!queue[i].response.is_valid()) { - queue[i].status = IP::RESOLVER_STATUS_ERROR; + queue[i].status.set(IP::RESOLVER_STATUS_ERROR); } else { - queue[i].status = IP::RESOLVER_STATUS_DONE; + queue[i].status.set(IP::RESOLVER_STATUS_DONE); } } } @@ -137,10 +137,10 @@ IP::ResolverID IP::resolve_hostname_queue_item(const String &p_hostname, IP::Typ resolver->queue[id].type = p_type; if (resolver->cache.has(key) && resolver->cache[key].is_valid()) { resolver->queue[id].response = resolver->cache[key]; - resolver->queue[id].status = IP::RESOLVER_STATUS_DONE; + resolver->queue[id].status.set(IP::RESOLVER_STATUS_DONE); } else { resolver->queue[id].response = IP_Address(); - resolver->queue[id].status = IP::RESOLVER_STATUS_WAITING; + resolver->queue[id].status.set(IP::RESOLVER_STATUS_WAITING); if (resolver->thread.is_started()) { resolver->sem.post(); } else { @@ -156,12 +156,12 @@ IP::ResolverStatus IP::get_resolve_item_status(ResolverID p_id) const { MutexLock lock(resolver->mutex); - if (resolver->queue[p_id].status == IP::RESOLVER_STATUS_NONE) { + if (resolver->queue[p_id].status.get() == IP::RESOLVER_STATUS_NONE) { ERR_PRINT("Condition status == IP::RESOLVER_STATUS_NONE"); resolver->mutex.unlock(); return IP::RESOLVER_STATUS_NONE; } - return resolver->queue[p_id].status; + return resolver->queue[p_id].status.get(); } IP_Address IP::get_resolve_item_address(ResolverID p_id) const { @@ -169,7 +169,7 @@ IP_Address IP::get_resolve_item_address(ResolverID p_id) const { MutexLock lock(resolver->mutex); - if (resolver->queue[p_id].status != IP::RESOLVER_STATUS_DONE) { + if (resolver->queue[p_id].status.get() != IP::RESOLVER_STATUS_DONE) { ERR_PRINT("Resolve of '" + resolver->queue[p_id].hostname + "'' didn't complete yet."); resolver->mutex.unlock(); return IP_Address(); @@ -183,7 +183,7 @@ void IP::erase_resolve_item(ResolverID p_id) { MutexLock lock(resolver->mutex); - resolver->queue[p_id].status = IP::RESOLVER_STATUS_NONE; + resolver->queue[p_id].status.set(IP::RESOLVER_STATUS_NONE); } void IP::clear_cache(const String &p_hostname) { diff --git a/core/object/object.cpp b/core/object/object.cpp index 8f2eed3200..1a9cce49d8 100644 --- a/core/object/object.cpp +++ b/core/object/object.cpp @@ -1727,7 +1727,7 @@ void *Object::get_script_instance_binding(int p_script_language_index) { if (!_script_instance_bindings[p_script_language_index]) { void *script_data = ScriptServer::get_language(p_script_language_index)->alloc_instance_binding_data(this); if (script_data) { - atomic_increment(&instance_binding_count); + instance_binding_count.increment(); _script_instance_bindings[p_script_language_index] = script_data; } } diff --git a/core/object/object.h b/core/object/object.h index 5021fa47a1..029478873d 100644 --- a/core/object/object.h +++ b/core/object/object.h @@ -37,6 +37,7 @@ #include "core/templates/hash_map.h" #include "core/templates/list.h" #include "core/templates/map.h" +#include "core/templates/safe_refcount.h" #include "core/templates/set.h" #include "core/templates/vmap.h" #include "core/variant/callable_bind.h" @@ -486,7 +487,7 @@ private: friend class Reference; bool type_is_reference = false; - uint32_t instance_binding_count = 0; + SafeNumeric<uint32_t> instance_binding_count; void *_script_instance_bindings[MAX_SCRIPT_INSTANCE_BINDINGS]; Object(bool p_reference); diff --git a/core/object/reference.cpp b/core/object/reference.cpp index 71a52a9ba5..22e4e8a336 100644 --- a/core/object/reference.cpp +++ b/core/object/reference.cpp @@ -62,7 +62,7 @@ bool Reference::reference() { if (get_script_instance()) { get_script_instance()->refcount_incremented(); } - if (instance_binding_count > 0 && !ScriptServer::are_languages_finished()) { + if (instance_binding_count.get() > 0 && !ScriptServer::are_languages_finished()) { for (int i = 0; i < MAX_SCRIPT_INSTANCE_BINDINGS; i++) { if (_script_instance_bindings[i]) { ScriptServer::get_language(i)->refcount_incremented_instance_binding(this); @@ -83,7 +83,7 @@ bool Reference::unreference() { bool script_ret = get_script_instance()->refcount_decremented(); die = die && script_ret; } - if (instance_binding_count > 0 && !ScriptServer::are_languages_finished()) { + if (instance_binding_count.get() > 0 && !ScriptServer::are_languages_finished()) { for (int i = 0; i < MAX_SCRIPT_INSTANCE_BINDINGS; i++) { if (_script_instance_bindings[i]) { bool script_ret = ScriptServer::get_language(i)->refcount_decremented_instance_binding(this); diff --git a/core/os/memory.cpp b/core/os/memory.cpp index 14808c2ce6..5910cb0e7b 100644 --- a/core/os/memory.cpp +++ b/core/os/memory.cpp @@ -60,11 +60,11 @@ void operator delete(void *p_mem, void *p_pointer, size_t check, const char *p_d #endif #ifdef DEBUG_ENABLED -uint64_t Memory::mem_usage = 0; -uint64_t Memory::max_usage = 0; +SafeNumeric<uint64_t> Memory::mem_usage; +SafeNumeric<uint64_t> Memory::max_usage; #endif -uint64_t Memory::alloc_count = 0; +SafeNumeric<uint64_t> Memory::alloc_count; void *Memory::alloc_static(size_t p_bytes, bool p_pad_align) { #ifdef DEBUG_ENABLED @@ -77,7 +77,7 @@ void *Memory::alloc_static(size_t p_bytes, bool p_pad_align) { ERR_FAIL_COND_V(!mem, nullptr); - atomic_increment(&alloc_count); + alloc_count.increment(); if (prepad) { uint64_t *s = (uint64_t *)mem; @@ -86,8 +86,8 @@ void *Memory::alloc_static(size_t p_bytes, bool p_pad_align) { uint8_t *s8 = (uint8_t *)mem; #ifdef DEBUG_ENABLED - atomic_add(&mem_usage, p_bytes); - atomic_exchange_if_greater(&max_usage, mem_usage); + uint64_t new_mem_usage = mem_usage.add(p_bytes); + max_usage.exchange_if_greater(new_mem_usage); #endif return s8 + PAD_ALIGN; } else { @@ -114,10 +114,10 @@ void *Memory::realloc_static(void *p_memory, size_t p_bytes, bool p_pad_align) { #ifdef DEBUG_ENABLED if (p_bytes > *s) { - atomic_add(&mem_usage, p_bytes - *s); - atomic_exchange_if_greater(&max_usage, mem_usage); + uint64_t new_mem_usage = mem_usage.add(p_bytes - *s); + max_usage.exchange_if_greater(new_mem_usage); } else { - atomic_sub(&mem_usage, *s - p_bytes); + mem_usage.sub(*s - p_bytes); } #endif @@ -156,14 +156,14 @@ void Memory::free_static(void *p_ptr, bool p_pad_align) { bool prepad = p_pad_align; #endif - atomic_decrement(&alloc_count); + alloc_count.decrement(); if (prepad) { mem -= PAD_ALIGN; #ifdef DEBUG_ENABLED uint64_t *s = (uint64_t *)mem; - atomic_sub(&mem_usage, *s); + mem_usage.sub(*s); #endif free(mem); @@ -178,7 +178,7 @@ uint64_t Memory::get_mem_available() { uint64_t Memory::get_mem_usage() { #ifdef DEBUG_ENABLED - return mem_usage; + return mem_usage.get(); #else return 0; #endif @@ -186,7 +186,7 @@ uint64_t Memory::get_mem_usage() { uint64_t Memory::get_mem_max_usage() { #ifdef DEBUG_ENABLED - return max_usage; + return max_usage.get(); #else return 0; #endif diff --git a/core/os/memory.h b/core/os/memory.h index c2ae3f4ae7..10e678103d 100644 --- a/core/os/memory.h +++ b/core/os/memory.h @@ -43,11 +43,11 @@ class Memory { Memory(); #ifdef DEBUG_ENABLED - static uint64_t mem_usage; - static uint64_t max_usage; + static SafeNumeric<uint64_t> mem_usage; + static SafeNumeric<uint64_t> max_usage; #endif - static uint64_t alloc_count; + static SafeNumeric<uint64_t> alloc_count; public: static void *alloc_static(size_t p_bytes, bool p_pad_align = false); diff --git a/core/os/thread.cpp b/core/os/thread.cpp index 936e5d5500..88744eed63 100644 --- a/core/os/thread.cpp +++ b/core/os/thread.cpp @@ -34,13 +34,15 @@ #if !defined(NO_THREADS) +#include "core/templates/safe_refcount.h" + Error (*Thread::set_name_func)(const String &) = nullptr; void (*Thread::set_priority_func)(Thread::Priority) = nullptr; void (*Thread::init_func)() = nullptr; void (*Thread::term_func)() = nullptr; Thread::ID Thread::main_thread_id = 1; -Thread::ID Thread::last_thread_id = 1; +SafeNumeric<Thread::ID> Thread::last_thread_id{ 1 }; thread_local Thread::ID Thread::caller_id = 1; void Thread::_set_platform_funcs( @@ -79,7 +81,7 @@ void Thread::start(Thread::Callback p_callback, void *p_user, const Settings &p_ std::thread empty_thread; thread.swap(empty_thread); } - id = atomic_increment(&last_thread_id); + id = last_thread_id.increment(); std::thread new_thread(&Thread::callback, this, p_settings, p_callback, p_user); thread.swap(new_thread); } diff --git a/core/os/thread.h b/core/os/thread.h index b5449d2ed6..76f5be182e 100644 --- a/core/os/thread.h +++ b/core/os/thread.h @@ -34,6 +34,7 @@ #include "core/typedefs.h" #if !defined(NO_THREADS) +#include "core/templates/safe_refcount.h" #include <thread> #endif @@ -61,7 +62,7 @@ private: friend class Main; static ID main_thread_id; - static ID last_thread_id; + static SafeNumeric<Thread::ID> last_thread_id; ID id = 0; static thread_local ID caller_id; diff --git a/core/os/threaded_array_processor.h b/core/os/threaded_array_processor.h index 9538ac5957..4f270001d3 100644 --- a/core/os/threaded_array_processor.h +++ b/core/os/threaded_array_processor.h @@ -40,7 +40,7 @@ template <class C, class U> struct ThreadArrayProcessData { uint32_t elements; - uint32_t index; + SafeNumeric<uint32_t> index; C *instance; U userdata; void (C::*method)(uint32_t, U); @@ -56,7 +56,7 @@ template <class T> void process_array_thread(void *ud) { T &data = *(T *)ud; while (true) { - uint32_t index = atomic_increment(&data.index); + uint32_t index = data.index.increment(); if (index >= data.elements) { break; } @@ -70,9 +70,9 @@ void thread_process_array(uint32_t p_elements, C *p_instance, M p_method, U p_us data.method = p_method; data.instance = p_instance; data.userdata = p_userdata; - data.index = 0; + data.index.set(0); data.elements = p_elements; - data.process(data.index); //process first, let threads increment for next + data.process(0); //process first, let threads increment for next int thread_count = OS::get_singleton()->get_processor_count(); Thread *threads = memnew_arr(Thread, thread_count); diff --git a/core/templates/cowdata.h b/core/templates/cowdata.h index 4becb1be59..6077f5f107 100644 --- a/core/templates/cowdata.h +++ b/core/templates/cowdata.h @@ -45,6 +45,9 @@ class CharString; template <class T, class V> class VMap; +// CowData is relying on this to be true +static_assert(sizeof(SafeNumeric<uint32_t>) == sizeof(uint32_t)); + template <class T> class CowData { template <class TV> @@ -60,12 +63,12 @@ private: // internal helpers - _FORCE_INLINE_ uint32_t *_get_refcount() const { + _FORCE_INLINE_ SafeNumeric<uint32_t> *_get_refcount() const { if (!_ptr) { return nullptr; } - return reinterpret_cast<uint32_t *>(_ptr) - 2; + return reinterpret_cast<SafeNumeric<uint32_t> *>(_ptr) - 2; } _FORCE_INLINE_ uint32_t *_get_size() const { @@ -192,9 +195,9 @@ void CowData<T>::_unref(void *p_data) { return; } - uint32_t *refc = _get_refcount(); + SafeNumeric<uint32_t> *refc = _get_refcount(); - if (atomic_decrement(refc) > 0) { + if (refc->decrement() > 0) { return; // still in use } // clean up @@ -219,15 +222,15 @@ void CowData<T>::_copy_on_write() { return; } - uint32_t *refc = _get_refcount(); + SafeNumeric<uint32_t> *refc = _get_refcount(); - if (unlikely(*refc > 1)) { + if (unlikely(refc->get() > 1)) { /* in use by more than me */ uint32_t current_size = *_get_size(); uint32_t *mem_new = (uint32_t *)Memory::alloc_static(_get_alloc_size(current_size), true); - *(mem_new - 2) = 1; //refcount + reinterpret_cast<SafeNumeric<uint32_t> *>(mem_new - 2)->set(1); //refcount *(mem_new - 1) = current_size; //size T *_data = (T *)(mem_new); @@ -278,7 +281,7 @@ Error CowData<T>::resize(int p_size) { uint32_t *ptr = (uint32_t *)Memory::alloc_static(alloc_size, true); ERR_FAIL_COND_V(!ptr, ERR_OUT_OF_MEMORY); *(ptr - 1) = 0; //size, currently none - *(ptr - 2) = 1; //refcount + reinterpret_cast<SafeNumeric<uint32_t> *>(ptr - 2)->set(1); //refcount _ptr = (T *)ptr; @@ -359,7 +362,7 @@ void CowData<T>::_ref(const CowData &p_from) { return; //nothing to do } - if (atomic_conditional_increment(p_from._get_refcount()) > 0) { // could reference + if (p_from._get_refcount()->increment() > 0) { // could reference _ptr = p_from._ptr; } } diff --git a/core/templates/rid_owner.cpp b/core/templates/rid_owner.cpp index f75a2eb9df..56f39ab779 100644 --- a/core/templates/rid_owner.cpp +++ b/core/templates/rid_owner.cpp @@ -30,4 +30,4 @@ #include "rid_owner.h" -volatile uint64_t RID_AllocBase::base_id = 1; +SafeNumeric<uint64_t> RID_AllocBase::base_id{ 1 }; diff --git a/core/templates/rid_owner.h b/core/templates/rid_owner.h index 3edc73b1a9..c4aa93c394 100644 --- a/core/templates/rid_owner.h +++ b/core/templates/rid_owner.h @@ -44,7 +44,7 @@ #include <typeinfo> class RID_AllocBase { - static volatile uint64_t base_id; + static SafeNumeric<uint64_t> base_id; protected: static RID _make_from_id(uint64_t p_id) { @@ -54,7 +54,7 @@ protected: } static uint64_t _gen_id() { - return atomic_increment(&base_id); + return base_id.increment(); } static RID _gen_rid() { diff --git a/core/templates/safe_refcount.cpp b/core/templates/safe_refcount.cpp deleted file mode 100644 index a915ee662f..0000000000 --- a/core/templates/safe_refcount.cpp +++ /dev/null @@ -1,161 +0,0 @@ -/*************************************************************************/ -/* safe_refcount.cpp */ -/*************************************************************************/ -/* This file is part of: */ -/* GODOT ENGINE */ -/* https://godotengine.org */ -/*************************************************************************/ -/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */ -/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */ -/* */ -/* Permission is hereby granted, free of charge, to any person obtaining */ -/* a copy of this software and associated documentation files (the */ -/* "Software"), to deal in the Software without restriction, including */ -/* without limitation the rights to use, copy, modify, merge, publish, */ -/* distribute, sublicense, and/or sell copies of the Software, and to */ -/* permit persons to whom the Software is furnished to do so, subject to */ -/* the following conditions: */ -/* */ -/* The above copyright notice and this permission notice shall be */ -/* included in all copies or substantial portions of the Software. */ -/* */ -/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ -/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ -/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ -/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ -/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ -/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ -/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -/*************************************************************************/ - -#include "safe_refcount.h" - -#if defined(_MSC_VER) - -/* Implementation for MSVC-Windows */ - -// don't pollute my namespace! -#include <windows.h> - -#define ATOMIC_CONDITIONAL_INCREMENT_BODY(m_pw, m_win_type, m_win_cmpxchg, m_cpp_type) \ - /* try to increment until it actually works */ \ - /* taken from boost */ \ - while (true) { \ - m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \ - if (tmp == 0) { \ - return 0; /* if zero, can't add to it anymore */ \ - } \ - if (m_win_cmpxchg((m_win_type volatile *)(m_pw), tmp + 1, tmp) == tmp) { \ - return tmp + 1; \ - } \ - } - -#define ATOMIC_EXCHANGE_IF_GREATER_BODY(m_pw, m_val, m_win_type, m_win_cmpxchg, m_cpp_type) \ - while (true) { \ - m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \ - if (tmp >= m_val) { \ - return tmp; /* already greater, or equal */ \ - } \ - if (m_win_cmpxchg((m_win_type volatile *)(m_pw), m_val, tmp) == tmp) { \ - return m_val; \ - } \ - } - -_ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(volatile uint32_t *pw) { - ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t); -} - -_ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(volatile uint32_t *pw) { - return InterlockedDecrement((LONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint32_t _atomic_increment_impl(volatile uint32_t *pw) { - return InterlockedIncrement((LONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint32_t _atomic_sub_impl(volatile uint32_t *pw, volatile uint32_t val) { - return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val; -} - -_ALWAYS_INLINE_ uint32_t _atomic_add_impl(volatile uint32_t *pw, volatile uint32_t val) { - return InterlockedAdd((LONG volatile *)pw, val); -} - -_ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(volatile uint32_t *pw, volatile uint32_t val) { - ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t); -} - -_ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(volatile uint64_t *pw) { - ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t); -} - -_ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(volatile uint64_t *pw) { - return InterlockedDecrement64((LONGLONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(volatile uint64_t *pw) { - return InterlockedIncrement64((LONGLONG volatile *)pw); -} - -_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(volatile uint64_t *pw, volatile uint64_t val) { - return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val; -} - -_ALWAYS_INLINE_ uint64_t _atomic_add_impl(volatile uint64_t *pw, volatile uint64_t val) { - return InterlockedAdd64((LONGLONG volatile *)pw, val); -} - -_ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(volatile uint64_t *pw, volatile uint64_t val) { - ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t); -} - -// The actual advertised functions; they'll call the right implementation - -uint32_t atomic_conditional_increment(volatile uint32_t *pw) { - return _atomic_conditional_increment_impl(pw); -} - -uint32_t atomic_decrement(volatile uint32_t *pw) { - return _atomic_decrement_impl(pw); -} - -uint32_t atomic_increment(volatile uint32_t *pw) { - return _atomic_increment_impl(pw); -} - -uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val) { - return _atomic_sub_impl(pw, val); -} - -uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val) { - return _atomic_add_impl(pw, val); -} - -uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val) { - return _atomic_exchange_if_greater_impl(pw, val); -} - -uint64_t atomic_conditional_increment(volatile uint64_t *pw) { - return _atomic_conditional_increment_impl(pw); -} - -uint64_t atomic_decrement(volatile uint64_t *pw) { - return _atomic_decrement_impl(pw); -} - -uint64_t atomic_increment(volatile uint64_t *pw) { - return _atomic_increment_impl(pw); -} - -uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val) { - return _atomic_sub_impl(pw, val); -} - -uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val) { - return _atomic_add_impl(pw, val); -} - -uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val) { - return _atomic_exchange_if_greater_impl(pw, val); -} -#endif diff --git a/core/templates/safe_refcount.h b/core/templates/safe_refcount.h index d94444fad6..6aebc24ec3 100644 --- a/core/templates/safe_refcount.h +++ b/core/templates/safe_refcount.h @@ -31,167 +31,281 @@ #ifndef SAFE_REFCOUNT_H #define SAFE_REFCOUNT_H -#include "core/os/mutex.h" #include "core/typedefs.h" -#include "platform_config.h" -// Atomic functions, these are used for multithread safe reference counters! +#if !defined(NO_THREADS) -#ifdef NO_THREADS +#include <atomic> -/* Bogus implementation unaware of multiprocessing */ +// Design goals for these classes: +// - No automatic conversions or arithmetic operators, +// to keep explicit the use of atomics everywhere. +// - Using acquire-release semantics, even to set the first value. +// The first value may be set relaxedly in many cases, but adding the distinction +// between relaxed and unrelaxed operation to the interface would make it needlessly +// flexible. There's negligible waste in having release semantics for the initial +// value and, as an important benefit, you can be sure the value is properly synchronized +// even with threads that are already running. template <class T> -static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) { - if (*pw == 0) { - return 0; +class SafeNumeric { + std::atomic<T> value; + +public: + _ALWAYS_INLINE_ void set(T p_value) { + value.store(p_value, std::memory_order_release); } - (*pw)++; + _ALWAYS_INLINE_ T get() const { + return value.load(std::memory_order_acquire); + } - return *pw; -} + _ALWAYS_INLINE_ T increment() { + return value.fetch_add(1, std::memory_order_acq_rel) + 1; + } -template <class T> -static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) { - (*pw)--; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postincrement() { + return value.fetch_add(1, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T decrement() { + return value.fetch_sub(1, std::memory_order_acq_rel) - 1; + } -template <class T> -static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) { - (*pw)++; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postdecrement() { + return value.fetch_sub(1, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T add(T p_value) { + return value.fetch_add(p_value, std::memory_order_acq_rel) + p_value; + } -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) { - (*pw) -= val; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postadd(T p_value) { + return value.fetch_add(p_value, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T sub(T p_value) { + return value.fetch_sub(p_value, std::memory_order_acq_rel) - p_value; + } -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) { - (*pw) += val; + // Returns the original value instead of the new one + _ALWAYS_INLINE_ T postsub(T p_value) { + return value.fetch_sub(p_value, std::memory_order_acq_rel); + } - return *pw; -} + _ALWAYS_INLINE_ T exchange_if_greater(T p_value) { + while (true) { + T tmp = value.load(std::memory_order_acquire); + if (tmp >= p_value) { + return tmp; // already greater, or equal + } + if (value.compare_exchange_weak(tmp, p_value, std::memory_order_release)) { + return p_value; + } + } + } -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) { - if (val > *pw) { - *pw = val; + _ALWAYS_INLINE_ T conditional_increment() { + while (true) { + T c = value.load(std::memory_order_acquire); + if (c == 0) { + return 0; + } + if (value.compare_exchange_weak(c, c + 1, std::memory_order_release)) { + return c + 1; + } + } } - return *pw; -} + _ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) { + set(p_value); + } +}; -#elif defined(__GNUC__) +class SafeFlag { + std::atomic_bool flag; -/* Implementation for GCC & Clang */ +public: + _ALWAYS_INLINE_ bool is_set() const { + return flag.load(std::memory_order_acquire); + } -// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes. -// Clang states it supports GCC atomic builtins. + _ALWAYS_INLINE_ void set() { + flag.store(true, std::memory_order_release); + } -template <class T> -static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) { - while (true) { - T tmp = static_cast<T const volatile &>(*pw); - if (tmp == 0) { - return 0; // if zero, can't add to it anymore - } - if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp) { - return tmp + 1; - } + _ALWAYS_INLINE_ void clear() { + flag.store(false, std::memory_order_release); } -} -template <class T> -static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) { - return __sync_sub_and_fetch(pw, 1); -} + _ALWAYS_INLINE_ void set_to(bool p_value) { + flag.store(p_value, std::memory_order_release); + } + + _ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) { + set_to(p_value); + } +}; + +class SafeRefCount { + SafeNumeric<uint32_t> count; + +public: + _ALWAYS_INLINE_ bool ref() { // true on success + return count.conditional_increment() != 0; + } + + _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success + return count.conditional_increment(); + } + + _ALWAYS_INLINE_ bool unref() { // true if must be disposed of + return count.decrement() == 0; + } + + _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of + return count.decrement(); + } + + _ALWAYS_INLINE_ uint32_t get() const { + return count.get(); + } + + _ALWAYS_INLINE_ void init(uint32_t p_value = 1) { + count.set(p_value); + } +}; + +#else template <class T> -static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) { - return __sync_add_and_fetch(pw, 1); -} - -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) { - return __sync_sub_and_fetch(pw, val); -} - -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) { - return __sync_add_and_fetch(pw, val); -} - -template <class T, class V> -static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) { - while (true) { - T tmp = static_cast<T const volatile &>(*pw); - if (tmp >= val) { - return tmp; // already greater, or equal +class SafeNumeric { +protected: + T value; + +public: + _ALWAYS_INLINE_ void set(T p_value) { + value = p_value; + } + + _ALWAYS_INLINE_ T get() const { + return value; + } + + _ALWAYS_INLINE_ T increment() { + return ++value; + } + + _ALWAYS_INLINE_ T postincrement() { + return value++; + } + + _ALWAYS_INLINE_ T decrement() { + return --value; + } + + _ALWAYS_INLINE_ T postdecrement() { + return value--; + } + + _ALWAYS_INLINE_ T add(T p_value) { + return value += p_value; + } + + _ALWAYS_INLINE_ T postadd(T p_value) { + T old = value; + value += p_value; + return old; + } + + _ALWAYS_INLINE_ T sub(T p_value) { + return value -= p_value; + } + + _ALWAYS_INLINE_ T postsub(T p_value) { + T old = value; + value -= p_value; + return old; + } + + _ALWAYS_INLINE_ T exchange_if_greater(T p_value) { + if (value < p_value) { + value = p_value; } - if (__sync_val_compare_and_swap(pw, tmp, val) == tmp) { - return val; + return value; + } + + _ALWAYS_INLINE_ T conditional_increment() { + if (value != 0) { + return 0; + } else { + return ++value; } } -} - -#elif defined(_MSC_VER) -// For MSVC use a separate compilation unit to prevent windows.h from polluting -// the global namespace. -uint32_t atomic_conditional_increment(volatile uint32_t *pw); -uint32_t atomic_decrement(volatile uint32_t *pw); -uint32_t atomic_increment(volatile uint32_t *pw); -uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val); -uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val); -uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val); - -uint64_t atomic_conditional_increment(volatile uint64_t *pw); -uint64_t atomic_decrement(volatile uint64_t *pw); -uint64_t atomic_increment(volatile uint64_t *pw); -uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val); -uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val); -uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val); -#else -//no threads supported? -#error Must provide atomic functions for this platform or compiler! -#endif + _ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) : + value(p_value) { + } +}; -struct SafeRefCount { - uint32_t count = 0; +class SafeFlag { +protected: + bool flag; public: - // destroy() is called when weak_count_ drops to zero. - - _ALWAYS_INLINE_ bool ref() { // true on success + _ALWAYS_INLINE_ bool is_set() const { + return flag; + } - return atomic_conditional_increment(&count) != 0; + _ALWAYS_INLINE_ void set() { + flag = true; } - _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success + _ALWAYS_INLINE_ void clear() { + flag = false; + } - return atomic_conditional_increment(&count); + _ALWAYS_INLINE_ void set_to(bool p_value) { + flag = p_value; } - _ALWAYS_INLINE_ bool unref() { // true if must be disposed of + _ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) : + flag(p_value) {} +}; + +class SafeRefCount { + uint32_t count = 0; - return atomic_decrement(&count) == 0; +public: + _ALWAYS_INLINE_ bool ref() { // true on success + if (count != 0) { + ++count; + return true; + } else { + return false; + } } - _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of + _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success + if (count != 0) { + return ++count; + } else { + return 0; + } + } - return atomic_decrement(&count); + _ALWAYS_INLINE_ bool unref() { // true if must be disposed of + return --count == 0; } - _ALWAYS_INLINE_ uint32_t get() const { // nothrow + _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of + return --count; + } + _ALWAYS_INLINE_ uint32_t get() const { return count; } @@ -200,4 +314,6 @@ public: } }; +#endif + #endif // SAFE_REFCOUNT_H |