blob: 1164210f23aabdaf4f8fef090cddf6bd74e8bac1 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
|
// Copyright 2009-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "platform.h"
#include "intrinsics.h"
#include "atomic.h"
namespace embree
{
/*! system mutex */
class MutexSys {
friend struct ConditionImplementation;
public:
MutexSys();
~MutexSys();
private:
MutexSys (const MutexSys& other) DELETED; // do not implement
MutexSys& operator= (const MutexSys& other) DELETED; // do not implement
public:
void lock();
bool try_lock();
void unlock();
protected:
void* mutex;
};
/*! spinning mutex */
class SpinLock
{
public:
SpinLock ()
: flag(false) {}
__forceinline bool isLocked() {
return flag.load();
}
__forceinline void lock()
{
while (true)
{
while (flag.load())
{
_mm_pause();
_mm_pause();
}
bool expected = false;
if (flag.compare_exchange_strong(expected,true,std::memory_order_acquire))
break;
}
}
__forceinline bool try_lock()
{
bool expected = false;
if (flag.load() != expected) {
return false;
}
return flag.compare_exchange_strong(expected,true,std::memory_order_acquire);
}
__forceinline void unlock() {
flag.store(false,std::memory_order_release);
}
__forceinline void wait_until_unlocked()
{
while(flag.load())
{
_mm_pause();
_mm_pause();
}
}
public:
atomic<bool> flag;
};
/*! safe mutex lock and unlock helper */
template<typename Mutex> class Lock {
public:
Lock (Mutex& mutex) : mutex(mutex), locked(true) { mutex.lock(); }
Lock (Mutex& mutex, bool locked) : mutex(mutex), locked(locked) {}
~Lock() { if (locked) mutex.unlock(); }
__forceinline void lock() { assert(!locked); locked = true; mutex.lock(); }
__forceinline bool isLocked() const { return locked; }
protected:
Mutex& mutex;
bool locked;
};
}
|