summaryrefslogtreecommitdiff
path: root/core/safe_refcount.h
diff options
context:
space:
mode:
Diffstat (limited to 'core/safe_refcount.h')
-rw-r--r--core/safe_refcount.h50
1 files changed, 25 insertions, 25 deletions
diff --git a/core/safe_refcount.h b/core/safe_refcount.h
index eff209c2db..36bcf5e576 100644
--- a/core/safe_refcount.h
+++ b/core/safe_refcount.h
@@ -44,7 +44,7 @@
/* Bogus implementation unaware of multiprocessing */
template <class T>
-static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
if (*pw == 0)
return 0;
@@ -55,7 +55,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
}
template <class T>
-static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
(*pw)--;
@@ -63,7 +63,7 @@ static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
}
template <class T>
-static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
(*pw)++;
@@ -71,7 +71,7 @@ static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
}
template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
(*pw) -= val;
@@ -79,7 +79,7 @@ static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
}
template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
(*pw) += val;
@@ -87,7 +87,7 @@ static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
}
template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
if (val > *pw)
*pw = val;
@@ -103,7 +103,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V v
// Clang states it supports GCC atomic builtins.
template <class T>
-static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
while (true) {
T tmp = static_cast<T const volatile &>(*pw);
@@ -115,31 +115,31 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
}
template <class T>
-static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
return __sync_sub_and_fetch(pw, 1);
}
template <class T>
-static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
return __sync_add_and_fetch(pw, 1);
}
template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
return __sync_sub_and_fetch(pw, val);
}
template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
return __sync_add_and_fetch(pw, val);
}
template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
while (true) {
T tmp = static_cast<T const volatile &>(*pw);
@@ -153,19 +153,19 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V v
#elif defined(_MSC_VER)
// For MSVC use a separate compilation unit to prevent windows.h from polluting
// the global namespace.
-uint32_t atomic_conditional_increment(register uint32_t *pw);
-uint32_t atomic_decrement(register uint32_t *pw);
-uint32_t atomic_increment(register uint32_t *pw);
-uint32_t atomic_sub(register uint32_t *pw, register uint32_t val);
-uint32_t atomic_add(register uint32_t *pw, register uint32_t val);
-uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val);
-
-uint64_t atomic_conditional_increment(register uint64_t *pw);
-uint64_t atomic_decrement(register uint64_t *pw);
-uint64_t atomic_increment(register uint64_t *pw);
-uint64_t atomic_sub(register uint64_t *pw, register uint64_t val);
-uint64_t atomic_add(register uint64_t *pw, register uint64_t val);
-uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val);
+uint32_t atomic_conditional_increment(volatile uint32_t *pw);
+uint32_t atomic_decrement(volatile uint32_t *pw);
+uint32_t atomic_increment(volatile uint32_t *pw);
+uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val);
+uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val);
+uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val);
+
+uint64_t atomic_conditional_increment(volatile uint64_t *pw);
+uint64_t atomic_decrement(volatile uint64_t *pw);
+uint64_t atomic_increment(volatile uint64_t *pw);
+uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val);
+uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val);
+uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val);
#else
//no threads supported?