summaryrefslogtreecommitdiff
path: root/core/pool_allocator.cpp
diff options
context:
space:
mode:
authorRémi Verschelde <rverschelde@gmail.com>2020-05-14 13:23:58 +0200
committerRémi Verschelde <rverschelde@gmail.com>2020-05-14 16:54:55 +0200
commit0be6d925dc3c6413bce7a3ccb49631b8e4a6e67a (patch)
treea27e497da7104dd0a64f98a04fa3067668735e91 /core/pool_allocator.cpp
parent710b34b70227becdc652b4ae027fe0ac47409642 (diff)
Style: clang-format: Disable KeepEmptyLinesAtTheStartOfBlocks
Which means that reduz' beloved style which we all became used to will now be changed automatically to remove the first empty line. This makes us lean closer to 1TBS (the one true brace style) instead of hybridating it with some Allman-inspired spacing. There's still the case of braces around single-statement blocks that needs to be addressed (but clang-format can't help with that, but clang-tidy may if we agree about it). Part of #33027.
Diffstat (limited to 'core/pool_allocator.cpp')
-rw-r--r--core/pool_allocator.cpp50
1 files changed, 0 insertions, 50 deletions
diff --git a/core/pool_allocator.cpp b/core/pool_allocator.cpp
index 8fd67a47d2..20b5edd412 100644
--- a/core/pool_allocator.cpp
+++ b/core/pool_allocator.cpp
@@ -53,12 +53,10 @@ void PoolAllocator::mt_unlock() const {
}
bool PoolAllocator::get_free_entry(EntryArrayPos *p_pos) {
-
if (entry_count == entry_max)
return false;
for (int i = 0; i < entry_max; i++) {
-
if (entry_array[i].len == 0) {
*p_pos = i;
return true;
@@ -77,13 +75,11 @@ bool PoolAllocator::get_free_entry(EntryArrayPos *p_pos) {
* @return false if hole found, true if no hole found
*/
bool PoolAllocator::find_hole(EntryArrayPos *p_pos, int p_for_size) {
-
/* position where previous entry ends. Defaults to zero (begin of pool) */
int prev_entry_end_pos = 0;
for (int i = 0; i < entry_count; i++) {
-
Entry &entry = entry_array[entry_indices[i]];
/* determine hole size to previous entry */
@@ -111,13 +107,11 @@ bool PoolAllocator::find_hole(EntryArrayPos *p_pos, int p_for_size) {
}
void PoolAllocator::compact(int p_up_to) {
-
uint32_t prev_entry_end_pos = 0;
if (p_up_to < 0)
p_up_to = entry_count;
for (int i = 0; i < p_up_to; i++) {
-
Entry &entry = entry_array[entry_indices[i]];
/* determine hole size to previous entry */
@@ -126,7 +120,6 @@ void PoolAllocator::compact(int p_up_to) {
/* if we can compact, do it */
if (hole_size > 0 && !entry.lock) {
-
COMPACT_CHUNK(entry, prev_entry_end_pos);
}
@@ -136,11 +129,9 @@ void PoolAllocator::compact(int p_up_to) {
}
void PoolAllocator::compact_up(int p_from) {
-
uint32_t next_entry_end_pos = pool_size; // - static_area_size;
for (int i = entry_count - 1; i >= p_from; i--) {
-
Entry &entry = entry_array[entry_indices[i]];
/* determine hole size to nextious entry */
@@ -149,7 +140,6 @@ void PoolAllocator::compact_up(int p_from) {
/* if we can compact, do it */
if (hole_size > 0 && !entry.lock) {
-
COMPACT_CHUNK(entry, (next_entry_end_pos - aligned(entry.len)));
}
@@ -159,13 +149,10 @@ void PoolAllocator::compact_up(int p_from) {
}
bool PoolAllocator::find_entry_index(EntryIndicesPos *p_map_pos, Entry *p_entry) {
-
EntryArrayPos entry_pos = entry_max;
for (int i = 0; i < entry_count; i++) {
-
if (&entry_array[entry_indices[i]] == p_entry) {
-
entry_pos = i;
break;
}
@@ -179,7 +166,6 @@ bool PoolAllocator::find_entry_index(EntryIndicesPos *p_map_pos, Entry *p_entry)
}
PoolAllocator::ID PoolAllocator::alloc(int p_size) {
-
ERR_FAIL_COND_V(p_size < 1, POOL_ALLOCATOR_INVALID_ID);
#ifdef DEBUG_ENABLED
if (p_size > free_mem)
@@ -221,7 +207,6 @@ PoolAllocator::ID PoolAllocator::alloc(int p_size) {
/* move all entry indices up, make room for this one */
for (int i = entry_count; i > new_entry_indices_pos; i--) {
-
entry_indices[i] = entry_indices[i - 1];
}
@@ -248,7 +233,6 @@ PoolAllocator::ID PoolAllocator::alloc(int p_size) {
}
PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) {
-
unsigned int check = p_mem & CHECK_MASK;
int entry = p_mem >> CHECK_BITS;
ERR_FAIL_INDEX_V(entry, entry_max, nullptr);
@@ -259,7 +243,6 @@ PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) {
}
const PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) const {
-
unsigned int check = p_mem & CHECK_MASK;
int entry = p_mem >> CHECK_BITS;
ERR_FAIL_INDEX_V(entry, entry_max, nullptr);
@@ -270,7 +253,6 @@ const PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) const {
}
void PoolAllocator::free(ID p_mem) {
-
mt_lock();
Entry *e = get_entry(p_mem);
if (!e) {
@@ -288,13 +270,11 @@ void PoolAllocator::free(ID p_mem) {
bool index_found = find_entry_index(&entry_indices_pos, e);
if (!index_found) {
-
mt_unlock();
ERR_FAIL_COND(!index_found);
}
for (int i = entry_indices_pos; i < (entry_count - 1); i++) {
-
entry_indices[i] = entry_indices[i + 1];
}
@@ -305,13 +285,11 @@ void PoolAllocator::free(ID p_mem) {
}
int PoolAllocator::get_size(ID p_mem) const {
-
int size;
mt_lock();
const Entry *e = get_entry(p_mem);
if (!e) {
-
mt_unlock();
ERR_PRINT("!e");
return 0;
@@ -325,7 +303,6 @@ int PoolAllocator::get_size(ID p_mem) const {
}
Error PoolAllocator::resize(ID p_mem, int p_new_size) {
-
mt_lock();
Entry *e = get_entry(p_mem);
@@ -342,12 +319,10 @@ Error PoolAllocator::resize(ID p_mem, int p_new_size) {
uint32_t alloc_size = aligned(p_new_size);
if ((uint32_t)aligned(e->len) == alloc_size) {
-
e->len = p_new_size;
mt_unlock();
return OK;
} else if (e->len > (uint32_t)p_new_size) {
-
free_mem += aligned(e->len);
free_mem -= alloc_size;
e->len = p_new_size;
@@ -368,7 +343,6 @@ Error PoolAllocator::resize(ID p_mem, int p_new_size) {
bool index_found = find_entry_index(&entry_indices_pos, e);
if (!index_found) {
-
mt_unlock();
ERR_FAIL_COND_V(!index_found, ERR_BUG);
}
@@ -423,13 +397,11 @@ Error PoolAllocator::resize(ID p_mem, int p_new_size) {
}
Error PoolAllocator::lock(ID p_mem) {
-
if (!needs_locking)
return OK;
mt_lock();
Entry *e = get_entry(p_mem);
if (!e) {
-
mt_unlock();
ERR_PRINT("!e");
return ERR_INVALID_PARAMETER;
@@ -440,14 +412,12 @@ Error PoolAllocator::lock(ID p_mem) {
}
bool PoolAllocator::is_locked(ID p_mem) const {
-
if (!needs_locking)
return false;
mt_lock();
const Entry *e = ((PoolAllocator *)(this))->get_entry(p_mem);
if (!e) {
-
mt_unlock();
ERR_PRINT("!e");
return false;
@@ -458,9 +428,7 @@ bool PoolAllocator::is_locked(ID p_mem) const {
}
const void *PoolAllocator::get(ID p_mem) const {
-
if (!needs_locking) {
-
const Entry *e = get_entry(p_mem);
ERR_FAIL_COND_V(!e, nullptr);
return &pool[e->pos];
@@ -470,19 +438,16 @@ const void *PoolAllocator::get(ID p_mem) const {
const Entry *e = get_entry(p_mem);
if (!e) {
-
mt_unlock();
ERR_FAIL_COND_V(!e, nullptr);
}
if (e->lock == 0) {
-
mt_unlock();
ERR_PRINT("e->lock == 0");
return nullptr;
}
if ((int)e->pos >= pool_size) {
-
mt_unlock();
ERR_PRINT("e->pos<0 || e->pos>=pool_size");
return nullptr;
@@ -495,9 +460,7 @@ const void *PoolAllocator::get(ID p_mem) const {
}
void *PoolAllocator::get(ID p_mem) {
-
if (!needs_locking) {
-
Entry *e = get_entry(p_mem);
ERR_FAIL_COND_V(!e, nullptr);
return &pool[e->pos];
@@ -507,12 +470,10 @@ void *PoolAllocator::get(ID p_mem) {
Entry *e = get_entry(p_mem);
if (!e) {
-
mt_unlock();
ERR_FAIL_COND_V(!e, nullptr);
}
if (e->lock == 0) {
-
//assert(0);
mt_unlock();
ERR_PRINT("e->lock == 0");
@@ -520,7 +481,6 @@ void *PoolAllocator::get(ID p_mem) {
}
if ((int)e->pos >= pool_size) {
-
mt_unlock();
ERR_PRINT("e->pos<0 || e->pos>=pool_size");
return nullptr;
@@ -532,7 +492,6 @@ void *PoolAllocator::get(ID p_mem) {
return ptr;
}
void PoolAllocator::unlock(ID p_mem) {
-
if (!needs_locking)
return;
mt_lock();
@@ -551,22 +510,18 @@ void PoolAllocator::unlock(ID p_mem) {
}
int PoolAllocator::get_used_mem() const {
-
return pool_size - free_mem;
}
int PoolAllocator::get_free_peak() {
-
return free_mem_peak;
}
int PoolAllocator::get_free_mem() {
-
return free_mem;
}
void PoolAllocator::create_pool(void *p_mem, int p_size, int p_max_entries) {
-
pool = (uint8_t *)p_mem;
pool_size = p_size;
@@ -582,7 +537,6 @@ void PoolAllocator::create_pool(void *p_mem, int p_size, int p_max_entries) {
}
PoolAllocator::PoolAllocator(int p_size, bool p_needs_locking, int p_max_entries) {
-
mem_ptr = memalloc(p_size);
ERR_FAIL_COND(!mem_ptr);
align = 1;
@@ -591,9 +545,7 @@ PoolAllocator::PoolAllocator(int p_size, bool p_needs_locking, int p_max_entries
}
PoolAllocator::PoolAllocator(void *p_mem, int p_size, int p_align, bool p_needs_locking, int p_max_entries) {
-
if (p_align > 1) {
-
uint8_t *mem8 = (uint8_t *)p_mem;
uint64_t ofs = (uint64_t)mem8;
if (ofs % p_align) {
@@ -611,7 +563,6 @@ PoolAllocator::PoolAllocator(void *p_mem, int p_size, int p_align, bool p_needs_
}
PoolAllocator::PoolAllocator(int p_align, int p_size, bool p_needs_locking, int p_max_entries) {
-
ERR_FAIL_COND(p_align < 1);
mem_ptr = Memory::alloc_static(p_size + p_align, true);
uint8_t *mem8 = (uint8_t *)mem_ptr;
@@ -624,7 +575,6 @@ PoolAllocator::PoolAllocator(int p_align, int p_size, bool p_needs_locking, int
}
PoolAllocator::~PoolAllocator() {
-
if (mem_ptr)
memfree(mem_ptr);