summaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/image.cpp21
-rw-r--r--core/safe_refcount.h16
2 files changed, 17 insertions, 20 deletions
diff --git a/core/image.cpp b/core/image.cpp
index 5ce744f709..6211c06ebe 100644
--- a/core/image.cpp
+++ b/core/image.cpp
@@ -754,15 +754,14 @@ static void _scale_lanczos(const uint8_t *__restrict p_src, uint8_t *__restrict
for (int32_t buffer_x = 0; buffer_x < dst_width; buffer_x++) {
- float src_real_x = buffer_x * x_scale;
- int32_t src_x = src_real_x;
-
- int32_t start_x = MAX(0, src_x - half_kernel + 1);
- int32_t end_x = MIN(src_width - 1, src_x + half_kernel);
+ // The corresponding point on the source image
+ float src_x = (buffer_x + 0.5f) * x_scale; // Offset by 0.5 so it uses the pixel's center
+ int32_t start_x = MAX(0, int32_t(src_x) - half_kernel + 1);
+ int32_t end_x = MIN(src_width - 1, int32_t(src_x) + half_kernel);
// Create the kernel used by all the pixels of the column
for (int32_t target_x = start_x; target_x <= end_x; target_x++)
- kernel[target_x - start_x] = _lanczos((src_real_x - target_x) / scale_factor);
+ kernel[target_x - start_x] = _lanczos((target_x + 0.5f - src_x) / scale_factor);
for (int32_t buffer_y = 0; buffer_y < src_height; buffer_y++) {
@@ -805,14 +804,12 @@ static void _scale_lanczos(const uint8_t *__restrict p_src, uint8_t *__restrict
for (int32_t dst_y = 0; dst_y < dst_height; dst_y++) {
- float buffer_real_y = dst_y * y_scale;
- int32_t buffer_y = buffer_real_y;
-
- int32_t start_y = MAX(0, buffer_y - half_kernel + 1);
- int32_t end_y = MIN(src_height - 1, buffer_y + half_kernel);
+ float buffer_y = (dst_y + 0.5f) * y_scale;
+ int32_t start_y = MAX(0, int32_t(buffer_y) - half_kernel + 1);
+ int32_t end_y = MIN(src_height - 1, int32_t(buffer_y) + half_kernel);
for (int32_t target_y = start_y; target_y <= end_y; target_y++)
- kernel[target_y - start_y] = _lanczos((buffer_real_y - target_y) / scale_factor);
+ kernel[target_y - start_y] = _lanczos((target_y + 0.5f - buffer_y) / scale_factor);
for (int32_t dst_x = 0; dst_x < dst_width; dst_x++) {
diff --git a/core/safe_refcount.h b/core/safe_refcount.h
index 54f540b0c7..0b65ffb9ca 100644
--- a/core/safe_refcount.h
+++ b/core/safe_refcount.h
@@ -97,8 +97,8 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V v
/* Implementation for GCC & Clang */
-// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
-// Clang states it supports GCC atomic builtins.
+#include <stdbool.h>
+#include <atomic>
template <class T>
static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
@@ -107,7 +107,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
T tmp = static_cast<T const volatile &>(*pw);
if (tmp == 0)
return 0; // if zero, can't add to it anymore
- if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
+ if (__atomic_compare_exchange_n(pw, &tmp, tmp + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
return tmp + 1;
}
}
@@ -115,25 +115,25 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
template <class T>
static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
- return __sync_sub_and_fetch(pw, 1);
+ return __atomic_sub_fetch(pw, 1, __ATOMIC_SEQ_CST);
}
template <class T>
static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
- return __sync_add_and_fetch(pw, 1);
+ return __atomic_add_fetch(pw, 1, __ATOMIC_SEQ_CST);
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
- return __sync_sub_and_fetch(pw, val);
+ return __atomic_sub_fetch(pw, val, __ATOMIC_SEQ_CST);
}
template <class T, class V>
static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
- return __sync_add_and_fetch(pw, val);
+ return __atomic_add_fetch(pw, val, __ATOMIC_SEQ_CST);
}
template <class T, class V>
@@ -143,7 +143,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V v
T tmp = static_cast<T const volatile &>(*pw);
if (tmp >= val)
return tmp; // already greater, or equal
- if (__sync_val_compare_and_swap(pw, tmp, val) == tmp)
+ if (__atomic_compare_exchange_n(pw, &tmp, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
return val;
}
}