summaryrefslogtreecommitdiff
path: root/core/templates/safe_refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'core/templates/safe_refcount.h')
-rw-r--r--core/templates/safe_refcount.h342
1 files changed, 229 insertions, 113 deletions
diff --git a/core/templates/safe_refcount.h b/core/templates/safe_refcount.h
index d94444fad6..6aebc24ec3 100644
--- a/core/templates/safe_refcount.h
+++ b/core/templates/safe_refcount.h
@@ -31,167 +31,281 @@
#ifndef SAFE_REFCOUNT_H
#define SAFE_REFCOUNT_H
-#include "core/os/mutex.h"
#include "core/typedefs.h"
-#include "platform_config.h"
-// Atomic functions, these are used for multithread safe reference counters!
+#if !defined(NO_THREADS)
-#ifdef NO_THREADS
+#include <atomic>
-/* Bogus implementation unaware of multiprocessing */
+// Design goals for these classes:
+// - No automatic conversions or arithmetic operators,
+// to keep explicit the use of atomics everywhere.
+// - Using acquire-release semantics, even to set the first value.
+// The first value may be set relaxedly in many cases, but adding the distinction
+// between relaxed and unrelaxed operation to the interface would make it needlessly
+// flexible. There's negligible waste in having release semantics for the initial
+// value and, as an important benefit, you can be sure the value is properly synchronized
+// even with threads that are already running.
template <class T>
-static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
- if (*pw == 0) {
- return 0;
+class SafeNumeric {
+ std::atomic<T> value;
+
+public:
+ _ALWAYS_INLINE_ void set(T p_value) {
+ value.store(p_value, std::memory_order_release);
}
- (*pw)++;
+ _ALWAYS_INLINE_ T get() const {
+ return value.load(std::memory_order_acquire);
+ }
- return *pw;
-}
+ _ALWAYS_INLINE_ T increment() {
+ return value.fetch_add(1, std::memory_order_acq_rel) + 1;
+ }
-template <class T>
-static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
- (*pw)--;
+ // Returns the original value instead of the new one
+ _ALWAYS_INLINE_ T postincrement() {
+ return value.fetch_add(1, std::memory_order_acq_rel);
+ }
- return *pw;
-}
+ _ALWAYS_INLINE_ T decrement() {
+ return value.fetch_sub(1, std::memory_order_acq_rel) - 1;
+ }
-template <class T>
-static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
- (*pw)++;
+ // Returns the original value instead of the new one
+ _ALWAYS_INLINE_ T postdecrement() {
+ return value.fetch_sub(1, std::memory_order_acq_rel);
+ }
- return *pw;
-}
+ _ALWAYS_INLINE_ T add(T p_value) {
+ return value.fetch_add(p_value, std::memory_order_acq_rel) + p_value;
+ }
-template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
- (*pw) -= val;
+ // Returns the original value instead of the new one
+ _ALWAYS_INLINE_ T postadd(T p_value) {
+ return value.fetch_add(p_value, std::memory_order_acq_rel);
+ }
- return *pw;
-}
+ _ALWAYS_INLINE_ T sub(T p_value) {
+ return value.fetch_sub(p_value, std::memory_order_acq_rel) - p_value;
+ }
-template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
- (*pw) += val;
+ // Returns the original value instead of the new one
+ _ALWAYS_INLINE_ T postsub(T p_value) {
+ return value.fetch_sub(p_value, std::memory_order_acq_rel);
+ }
- return *pw;
-}
+ _ALWAYS_INLINE_ T exchange_if_greater(T p_value) {
+ while (true) {
+ T tmp = value.load(std::memory_order_acquire);
+ if (tmp >= p_value) {
+ return tmp; // already greater, or equal
+ }
+ if (value.compare_exchange_weak(tmp, p_value, std::memory_order_release)) {
+ return p_value;
+ }
+ }
+ }
-template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
- if (val > *pw) {
- *pw = val;
+ _ALWAYS_INLINE_ T conditional_increment() {
+ while (true) {
+ T c = value.load(std::memory_order_acquire);
+ if (c == 0) {
+ return 0;
+ }
+ if (value.compare_exchange_weak(c, c + 1, std::memory_order_release)) {
+ return c + 1;
+ }
+ }
}
- return *pw;
-}
+ _ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) {
+ set(p_value);
+ }
+};
-#elif defined(__GNUC__)
+class SafeFlag {
+ std::atomic_bool flag;
-/* Implementation for GCC & Clang */
+public:
+ _ALWAYS_INLINE_ bool is_set() const {
+ return flag.load(std::memory_order_acquire);
+ }
-// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
-// Clang states it supports GCC atomic builtins.
+ _ALWAYS_INLINE_ void set() {
+ flag.store(true, std::memory_order_release);
+ }
-template <class T>
-static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
- while (true) {
- T tmp = static_cast<T const volatile &>(*pw);
- if (tmp == 0) {
- return 0; // if zero, can't add to it anymore
- }
- if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp) {
- return tmp + 1;
- }
+ _ALWAYS_INLINE_ void clear() {
+ flag.store(false, std::memory_order_release);
}
-}
-template <class T>
-static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
- return __sync_sub_and_fetch(pw, 1);
-}
+ _ALWAYS_INLINE_ void set_to(bool p_value) {
+ flag.store(p_value, std::memory_order_release);
+ }
+
+ _ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) {
+ set_to(p_value);
+ }
+};
+
+class SafeRefCount {
+ SafeNumeric<uint32_t> count;
+
+public:
+ _ALWAYS_INLINE_ bool ref() { // true on success
+ return count.conditional_increment() != 0;
+ }
+
+ _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success
+ return count.conditional_increment();
+ }
+
+ _ALWAYS_INLINE_ bool unref() { // true if must be disposed of
+ return count.decrement() == 0;
+ }
+
+ _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
+ return count.decrement();
+ }
+
+ _ALWAYS_INLINE_ uint32_t get() const {
+ return count.get();
+ }
+
+ _ALWAYS_INLINE_ void init(uint32_t p_value = 1) {
+ count.set(p_value);
+ }
+};
+
+#else
template <class T>
-static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
- return __sync_add_and_fetch(pw, 1);
-}
-
-template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
- return __sync_sub_and_fetch(pw, val);
-}
-
-template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
- return __sync_add_and_fetch(pw, val);
-}
-
-template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
- while (true) {
- T tmp = static_cast<T const volatile &>(*pw);
- if (tmp >= val) {
- return tmp; // already greater, or equal
+class SafeNumeric {
+protected:
+ T value;
+
+public:
+ _ALWAYS_INLINE_ void set(T p_value) {
+ value = p_value;
+ }
+
+ _ALWAYS_INLINE_ T get() const {
+ return value;
+ }
+
+ _ALWAYS_INLINE_ T increment() {
+ return ++value;
+ }
+
+ _ALWAYS_INLINE_ T postincrement() {
+ return value++;
+ }
+
+ _ALWAYS_INLINE_ T decrement() {
+ return --value;
+ }
+
+ _ALWAYS_INLINE_ T postdecrement() {
+ return value--;
+ }
+
+ _ALWAYS_INLINE_ T add(T p_value) {
+ return value += p_value;
+ }
+
+ _ALWAYS_INLINE_ T postadd(T p_value) {
+ T old = value;
+ value += p_value;
+ return old;
+ }
+
+ _ALWAYS_INLINE_ T sub(T p_value) {
+ return value -= p_value;
+ }
+
+ _ALWAYS_INLINE_ T postsub(T p_value) {
+ T old = value;
+ value -= p_value;
+ return old;
+ }
+
+ _ALWAYS_INLINE_ T exchange_if_greater(T p_value) {
+ if (value < p_value) {
+ value = p_value;
}
- if (__sync_val_compare_and_swap(pw, tmp, val) == tmp) {
- return val;
+ return value;
+ }
+
+ _ALWAYS_INLINE_ T conditional_increment() {
+ if (value != 0) {
+ return 0;
+ } else {
+ return ++value;
}
}
-}
-
-#elif defined(_MSC_VER)
-// For MSVC use a separate compilation unit to prevent windows.h from polluting
-// the global namespace.
-uint32_t atomic_conditional_increment(volatile uint32_t *pw);
-uint32_t atomic_decrement(volatile uint32_t *pw);
-uint32_t atomic_increment(volatile uint32_t *pw);
-uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val);
-uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val);
-uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val);
-
-uint64_t atomic_conditional_increment(volatile uint64_t *pw);
-uint64_t atomic_decrement(volatile uint64_t *pw);
-uint64_t atomic_increment(volatile uint64_t *pw);
-uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val);
-uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val);
-uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val);
-#else
-//no threads supported?
-#error Must provide atomic functions for this platform or compiler!
-#endif
+ _ALWAYS_INLINE_ explicit SafeNumeric<T>(T p_value = static_cast<T>(0)) :
+ value(p_value) {
+ }
+};
-struct SafeRefCount {
- uint32_t count = 0;
+class SafeFlag {
+protected:
+ bool flag;
public:
- // destroy() is called when weak_count_ drops to zero.
-
- _ALWAYS_INLINE_ bool ref() { // true on success
+ _ALWAYS_INLINE_ bool is_set() const {
+ return flag;
+ }
- return atomic_conditional_increment(&count) != 0;
+ _ALWAYS_INLINE_ void set() {
+ flag = true;
}
- _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success
+ _ALWAYS_INLINE_ void clear() {
+ flag = false;
+ }
- return atomic_conditional_increment(&count);
+ _ALWAYS_INLINE_ void set_to(bool p_value) {
+ flag = p_value;
}
- _ALWAYS_INLINE_ bool unref() { // true if must be disposed of
+ _ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) :
+ flag(p_value) {}
+};
+
+class SafeRefCount {
+ uint32_t count = 0;
- return atomic_decrement(&count) == 0;
+public:
+ _ALWAYS_INLINE_ bool ref() { // true on success
+ if (count != 0) {
+ ++count;
+ return true;
+ } else {
+ return false;
+ }
}
- _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
+ _ALWAYS_INLINE_ uint32_t refval() { // none-zero on success
+ if (count != 0) {
+ return ++count;
+ } else {
+ return 0;
+ }
+ }
- return atomic_decrement(&count);
+ _ALWAYS_INLINE_ bool unref() { // true if must be disposed of
+ return --count == 0;
}
- _ALWAYS_INLINE_ uint32_t get() const { // nothrow
+ _ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
+ return --count;
+ }
+ _ALWAYS_INLINE_ uint32_t get() const {
return count;
}
@@ -200,4 +314,6 @@ public:
}
};
+#endif
+
#endif // SAFE_REFCOUNT_H