summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRĂ©mi Verschelde <rverschelde@gmail.com>2019-09-01 12:19:44 +0200
committerGitHub <noreply@github.com>2019-09-01 12:19:44 +0200
commitc8ae8f2fb187fc690764e576d5dcc31637b1109c (patch)
tree2d5f11a469713c6b265591ab8e2d29854d0fa0cd
parent5dd51a5b3b3da516c0e8bb2855d2772def037203 (diff)
Revert "Add __atomic_* operators support for atomic operations"
-rw-r--r--core/safe_refcount.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/core/safe_refcount.h b/core/safe_refcount.h
index 0b65ffb9ca..54f540b0c7 100644
--- a/core/safe_refcount.h
+++ b/core/safe_refcount.h
@@ -97,8 +97,8 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V v
/* Implementation for GCC & Clang */
-#include <stdbool.h>
-#include <atomic>
+// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
+// Clang states it supports GCC atomic builtins.
template <class T>
static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
@@ -107,7 +107,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
T tmp = static_cast<T const volatile &>(*pw);
if (tmp == 0)
return 0; // if zero, can't add to it anymore
- if (__atomic_compare_exchange_n(pw, &tmp, tmp + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
+ if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
return tmp + 1;
}
}
@@ -115,25 +115,25 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
template <class T>
static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
- return __atomic_sub_fetch(pw, 1, __ATOMIC_SEQ_CST);
+ return __sync_sub_and_fetch(pw, 1);
}
template <class T>
static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
- return __atomic_add_fetch(pw, 1, __ATOMIC_SEQ_CST);
+ return __sync_add_and_fetch(pw, 1);
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
- return __atomic_sub_fetch(pw, val, __ATOMIC_SEQ_CST);
+ return __sync_sub_and_fetch(pw, val);
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
- return __atomic_add_fetch(pw, val, __ATOMIC_SEQ_CST);
+ return __sync_add_and_fetch(pw, val);
}
template <class T, class V>
@@ -143,7 +143,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V v
T tmp = static_cast<T const volatile &>(*pw);
if (tmp >= val)
return tmp; // already greater, or equal
- if (__atomic_compare_exchange_n(pw, &tmp, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
+ if (__sync_val_compare_and_swap(pw, tmp, val) == tmp)
return val;
}
}