summaryrefslogtreecommitdiff
path: root/core/os
diff options
context:
space:
mode:
authorRĂ©mi Verschelde <rverschelde@gmail.com>2020-11-08 10:41:17 +0100
committerGitHub <noreply@github.com>2020-11-08 10:41:17 +0100
commit90edd839d5f67d4252923a720feeef6ad0d76107 (patch)
treea1dd3ae46bf575cb8296df38568dfce237c6ecd8 /core/os
parent30b6db99a99a94c64d906c1b828ff44f79a1bc75 (diff)
parent127458ed175c5aeac8dee7f09d23fae4c8928eb7 (diff)
Merge pull request #43385 from reduz/reorganize-core
Reorganize and clean up core/ directory
Diffstat (limited to 'core/os')
-rw-r--r--core/os/dir_access.cpp2
-rw-r--r--core/os/dir_access.h2
-rw-r--r--core/os/file_access.cpp2
-rw-r--r--core/os/file_access.h2
-rw-r--r--core/os/keyboard.h2
-rw-r--r--core/os/main_loop.cpp2
-rw-r--r--core/os/main_loop.h4
-rw-r--r--core/os/memory.cpp4
-rw-r--r--core/os/memory.h4
-rw-r--r--core/os/midi_driver.h2
-rw-r--r--core/os/mutex.h2
-rw-r--r--core/os/os.cpp2
-rw-r--r--core/os/os.h10
-rw-r--r--core/os/pool_allocator.cpp596
-rw-r--r--core/os/pool_allocator.h150
-rw-r--r--core/os/rw_lock.cpp2
-rw-r--r--core/os/rw_lock.h2
-rw-r--r--core/os/semaphore.h2
-rw-r--r--core/os/spin_lock.h51
-rw-r--r--core/os/thread.h2
-rw-r--r--core/os/threaded_array_processor.h2
21 files changed, 822 insertions, 25 deletions
diff --git a/core/os/dir_access.cpp b/core/os/dir_access.cpp
index 5e1cb8ea29..30b1b51b53 100644
--- a/core/os/dir_access.cpp
+++ b/core/os/dir_access.cpp
@@ -30,10 +30,10 @@
#include "dir_access.h"
+#include "core/config/project_settings.h"
#include "core/os/file_access.h"
#include "core/os/memory.h"
#include "core/os/os.h"
-#include "core/project_settings.h"
String DirAccess::_get_root_path() const {
switch (_access_type) {
diff --git a/core/os/dir_access.h b/core/os/dir_access.h
index 6bce9a4c12..0f4fa9b250 100644
--- a/core/os/dir_access.h
+++ b/core/os/dir_access.h
@@ -31,8 +31,8 @@
#ifndef DIR_ACCESS_H
#define DIR_ACCESS_H
+#include "core/string/ustring.h"
#include "core/typedefs.h"
-#include "core/ustring.h"
//@ TODO, excellent candidate for THREAD_SAFE MACRO, should go through all these and add THREAD_SAFE where it applies
class DirAccess {
diff --git a/core/os/file_access.cpp b/core/os/file_access.cpp
index 9dbb2952f7..ef3eb6800a 100644
--- a/core/os/file_access.cpp
+++ b/core/os/file_access.cpp
@@ -30,11 +30,11 @@
#include "file_access.h"
+#include "core/config/project_settings.h"
#include "core/crypto/crypto_core.h"
#include "core/io/file_access_pack.h"
#include "core/io/marshalls.h"
#include "core/os/os.h"
-#include "core/project_settings.h"
FileAccess::CreateFunc FileAccess::create_func[ACCESS_MAX] = { nullptr, nullptr };
diff --git a/core/os/file_access.h b/core/os/file_access.h
index 48b9ee4269..39b977a4d9 100644
--- a/core/os/file_access.h
+++ b/core/os/file_access.h
@@ -33,8 +33,8 @@
#include "core/math/math_defs.h"
#include "core/os/memory.h"
+#include "core/string/ustring.h"
#include "core/typedefs.h"
-#include "core/ustring.h"
/**
* Multi-Platform abstraction for accessing to files.
diff --git a/core/os/keyboard.h b/core/os/keyboard.h
index 5d11e6a378..92664aff8f 100644
--- a/core/os/keyboard.h
+++ b/core/os/keyboard.h
@@ -31,7 +31,7 @@
#ifndef KEYBOARD_H
#define KEYBOARD_H
-#include "core/ustring.h"
+#include "core/string/ustring.h"
/*
Special Key:
diff --git a/core/os/main_loop.cpp b/core/os/main_loop.cpp
index 6651fb80d7..d29bcd011f 100644
--- a/core/os/main_loop.cpp
+++ b/core/os/main_loop.cpp
@@ -30,7 +30,7 @@
#include "main_loop.h"
-#include "core/script_language.h"
+#include "core/object/script_language.h"
void MainLoop::_bind_methods() {
BIND_VMETHOD(MethodInfo("_initialize"));
diff --git a/core/os/main_loop.h b/core/os/main_loop.h
index 2c34cf193c..8c46ad9b6a 100644
--- a/core/os/main_loop.h
+++ b/core/os/main_loop.h
@@ -32,8 +32,8 @@
#define MAIN_LOOP_H
#include "core/input/input_event.h"
-#include "core/reference.h"
-#include "core/script_language.h"
+#include "core/object/reference.h"
+#include "core/object/script_language.h"
class MainLoop : public Object {
GDCLASS(MainLoop, Object);
diff --git a/core/os/memory.cpp b/core/os/memory.cpp
index 8457c52092..f2723d13f6 100644
--- a/core/os/memory.cpp
+++ b/core/os/memory.cpp
@@ -30,9 +30,9 @@
#include "memory.h"
-#include "core/error_macros.h"
+#include "core/error/error_macros.h"
#include "core/os/copymem.h"
-#include "core/safe_refcount.h"
+#include "core/templates/safe_refcount.h"
#include <stdio.h>
#include <stdlib.h>
diff --git a/core/os/memory.h b/core/os/memory.h
index 46ffb4124b..dee08d4de4 100644
--- a/core/os/memory.h
+++ b/core/os/memory.h
@@ -31,8 +31,8 @@
#ifndef MEMORY_H
#define MEMORY_H
-#include "core/error_macros.h"
-#include "core/safe_refcount.h"
+#include "core/error/error_macros.h"
+#include "core/templates/safe_refcount.h"
#include <stddef.h>
diff --git a/core/os/midi_driver.h b/core/os/midi_driver.h
index bc922e1fcf..f487b31d4c 100644
--- a/core/os/midi_driver.h
+++ b/core/os/midi_driver.h
@@ -32,7 +32,7 @@
#define MIDI_DRIVER_H
#include "core/typedefs.h"
-#include "core/variant.h"
+#include "core/variant/variant.h"
/**
* Multi-Platform abstraction for accessing to MIDI.
diff --git a/core/os/mutex.h b/core/os/mutex.h
index d42cbed821..778bdaba09 100644
--- a/core/os/mutex.h
+++ b/core/os/mutex.h
@@ -31,7 +31,7 @@
#ifndef MUTEX_H
#define MUTEX_H
-#include "core/error_list.h"
+#include "core/error/error_list.h"
#include "core/typedefs.h"
#if !defined(NO_THREADS)
diff --git a/core/os/os.cpp b/core/os/os.cpp
index 3a398316bd..552bf043bf 100644
--- a/core/os/os.cpp
+++ b/core/os/os.cpp
@@ -30,11 +30,11 @@
#include "os.h"
+#include "core/config/project_settings.h"
#include "core/input/input.h"
#include "core/os/dir_access.h"
#include "core/os/file_access.h"
#include "core/os/midi_driver.h"
-#include "core/project_settings.h"
#include "core/version_generated.gen.h"
#include "servers/audio_server.h"
diff --git a/core/os/os.h b/core/os/os.h
index 4c1d930107..a1e75b5ee9 100644
--- a/core/os/os.h
+++ b/core/os/os.h
@@ -31,13 +31,13 @@
#ifndef OS_H
#define OS_H
-#include "core/engine.h"
-#include "core/image.h"
+#include "core/config/engine.h"
+#include "core/io/image.h"
#include "core/io/logger.h"
-#include "core/list.h"
#include "core/os/main_loop.h"
-#include "core/ustring.h"
-#include "core/vector.h"
+#include "core/string/ustring.h"
+#include "core/templates/list.h"
+#include "core/templates/vector.h"
#include <stdarg.h>
diff --git a/core/os/pool_allocator.cpp b/core/os/pool_allocator.cpp
new file mode 100644
index 0000000000..52536ff45d
--- /dev/null
+++ b/core/os/pool_allocator.cpp
@@ -0,0 +1,596 @@
+/*************************************************************************/
+/* pool_allocator.cpp */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#include "pool_allocator.h"
+
+#include "core/error/error_macros.h"
+#include "core/os/copymem.h"
+#include "core/os/memory.h"
+#include "core/os/os.h"
+#include "core/string/print_string.h"
+
+#include <assert.h>
+
+#define COMPACT_CHUNK(m_entry, m_to_pos) \
+ do { \
+ void *_dst = &((unsigned char *)pool)[m_to_pos]; \
+ void *_src = &((unsigned char *)pool)[(m_entry).pos]; \
+ movemem(_dst, _src, aligned((m_entry).len)); \
+ (m_entry).pos = m_to_pos; \
+ } while (0);
+
+void PoolAllocator::mt_lock() const {
+}
+
+void PoolAllocator::mt_unlock() const {
+}
+
+bool PoolAllocator::get_free_entry(EntryArrayPos *p_pos) {
+ if (entry_count == entry_max) {
+ return false;
+ }
+
+ for (int i = 0; i < entry_max; i++) {
+ if (entry_array[i].len == 0) {
+ *p_pos = i;
+ return true;
+ }
+ }
+
+ ERR_PRINT("Out of memory Chunks!");
+
+ return false; //
+}
+
+/**
+ * Find a hole
+ * @param p_pos The hole is behind the block pointed by this variable upon return. if pos==entry_count, then allocate at end
+ * @param p_for_size hole size
+ * @return false if hole found, true if no hole found
+ */
+bool PoolAllocator::find_hole(EntryArrayPos *p_pos, int p_for_size) {
+ /* position where previous entry ends. Defaults to zero (begin of pool) */
+
+ int prev_entry_end_pos = 0;
+
+ for (int i = 0; i < entry_count; i++) {
+ Entry &entry = entry_array[entry_indices[i]];
+
+ /* determine hole size to previous entry */
+
+ int hole_size = entry.pos - prev_entry_end_pos;
+
+ /* determine if what we want fits in that hole */
+ if (hole_size >= p_for_size) {
+ *p_pos = i;
+ return true;
+ }
+
+ /* prepare for next one */
+ prev_entry_end_pos = entry_end(entry);
+ }
+
+ /* No holes between entries, check at the end..*/
+
+ if ((pool_size - prev_entry_end_pos) >= p_for_size) {
+ *p_pos = entry_count;
+ return true;
+ }
+
+ return false;
+}
+
+void PoolAllocator::compact(int p_up_to) {
+ uint32_t prev_entry_end_pos = 0;
+
+ if (p_up_to < 0) {
+ p_up_to = entry_count;
+ }
+ for (int i = 0; i < p_up_to; i++) {
+ Entry &entry = entry_array[entry_indices[i]];
+
+ /* determine hole size to previous entry */
+
+ int hole_size = entry.pos - prev_entry_end_pos;
+
+ /* if we can compact, do it */
+ if (hole_size > 0 && !entry.lock) {
+ COMPACT_CHUNK(entry, prev_entry_end_pos);
+ }
+
+ /* prepare for next one */
+ prev_entry_end_pos = entry_end(entry);
+ }
+}
+
+void PoolAllocator::compact_up(int p_from) {
+ uint32_t next_entry_end_pos = pool_size; // - static_area_size;
+
+ for (int i = entry_count - 1; i >= p_from; i--) {
+ Entry &entry = entry_array[entry_indices[i]];
+
+ /* determine hole size to nextious entry */
+
+ int hole_size = next_entry_end_pos - (entry.pos + aligned(entry.len));
+
+ /* if we can compact, do it */
+ if (hole_size > 0 && !entry.lock) {
+ COMPACT_CHUNK(entry, (next_entry_end_pos - aligned(entry.len)));
+ }
+
+ /* prepare for next one */
+ next_entry_end_pos = entry.pos;
+ }
+}
+
+bool PoolAllocator::find_entry_index(EntryIndicesPos *p_map_pos, Entry *p_entry) {
+ EntryArrayPos entry_pos = entry_max;
+
+ for (int i = 0; i < entry_count; i++) {
+ if (&entry_array[entry_indices[i]] == p_entry) {
+ entry_pos = i;
+ break;
+ }
+ }
+
+ if (entry_pos == entry_max) {
+ return false;
+ }
+
+ *p_map_pos = entry_pos;
+ return true;
+}
+
+PoolAllocator::ID PoolAllocator::alloc(int p_size) {
+ ERR_FAIL_COND_V(p_size < 1, POOL_ALLOCATOR_INVALID_ID);
+#ifdef DEBUG_ENABLED
+ if (p_size > free_mem) {
+ OS::get_singleton()->debug_break();
+ }
+#endif
+ ERR_FAIL_COND_V(p_size > free_mem, POOL_ALLOCATOR_INVALID_ID);
+
+ mt_lock();
+
+ if (entry_count == entry_max) {
+ mt_unlock();
+ ERR_PRINT("entry_count==entry_max");
+ return POOL_ALLOCATOR_INVALID_ID;
+ }
+
+ int size_to_alloc = aligned(p_size);
+
+ EntryIndicesPos new_entry_indices_pos;
+
+ if (!find_hole(&new_entry_indices_pos, size_to_alloc)) {
+ /* No hole could be found, try compacting mem */
+ compact();
+ /* Then search again */
+
+ if (!find_hole(&new_entry_indices_pos, size_to_alloc)) {
+ mt_unlock();
+ ERR_FAIL_V_MSG(POOL_ALLOCATOR_INVALID_ID, "Memory can't be compacted further.");
+ }
+ }
+
+ EntryArrayPos new_entry_array_pos;
+
+ bool found_free_entry = get_free_entry(&new_entry_array_pos);
+
+ if (!found_free_entry) {
+ mt_unlock();
+ ERR_FAIL_V_MSG(POOL_ALLOCATOR_INVALID_ID, "No free entry found in PoolAllocator.");
+ }
+
+ /* move all entry indices up, make room for this one */
+ for (int i = entry_count; i > new_entry_indices_pos; i--) {
+ entry_indices[i] = entry_indices[i - 1];
+ }
+
+ entry_indices[new_entry_indices_pos] = new_entry_array_pos;
+
+ entry_count++;
+
+ Entry &entry = entry_array[entry_indices[new_entry_indices_pos]];
+
+ entry.len = p_size;
+ entry.pos = (new_entry_indices_pos == 0) ? 0 : entry_end(entry_array[entry_indices[new_entry_indices_pos - 1]]); //alloc either at beginning or end of previous
+ entry.lock = 0;
+ entry.check = (check_count++) & CHECK_MASK;
+ free_mem -= size_to_alloc;
+ if (free_mem < free_mem_peak) {
+ free_mem_peak = free_mem;
+ }
+
+ ID retval = (entry_indices[new_entry_indices_pos] << CHECK_BITS) | entry.check;
+ mt_unlock();
+
+ //ERR_FAIL_COND_V( (uintptr_t)get(retval)%align != 0, retval );
+
+ return retval;
+}
+
+PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) {
+ unsigned int check = p_mem & CHECK_MASK;
+ int entry = p_mem >> CHECK_BITS;
+ ERR_FAIL_INDEX_V(entry, entry_max, nullptr);
+ ERR_FAIL_COND_V(entry_array[entry].check != check, nullptr);
+ ERR_FAIL_COND_V(entry_array[entry].len == 0, nullptr);
+
+ return &entry_array[entry];
+}
+
+const PoolAllocator::Entry *PoolAllocator::get_entry(ID p_mem) const {
+ unsigned int check = p_mem & CHECK_MASK;
+ int entry = p_mem >> CHECK_BITS;
+ ERR_FAIL_INDEX_V(entry, entry_max, nullptr);
+ ERR_FAIL_COND_V(entry_array[entry].check != check, nullptr);
+ ERR_FAIL_COND_V(entry_array[entry].len == 0, nullptr);
+
+ return &entry_array[entry];
+}
+
+void PoolAllocator::free(ID p_mem) {
+ mt_lock();
+ Entry *e = get_entry(p_mem);
+ if (!e) {
+ mt_unlock();
+ ERR_PRINT("!e");
+ return;
+ }
+ if (e->lock) {
+ mt_unlock();
+ ERR_PRINT("e->lock");
+ return;
+ }
+
+ EntryIndicesPos entry_indices_pos;
+
+ bool index_found = find_entry_index(&entry_indices_pos, e);
+ if (!index_found) {
+ mt_unlock();
+ ERR_FAIL_COND(!index_found);
+ }
+
+ for (int i = entry_indices_pos; i < (entry_count - 1); i++) {
+ entry_indices[i] = entry_indices[i + 1];
+ }
+
+ entry_count--;
+ free_mem += aligned(e->len);
+ e->clear();
+ mt_unlock();
+}
+
+int PoolAllocator::get_size(ID p_mem) const {
+ int size;
+ mt_lock();
+
+ const Entry *e = get_entry(p_mem);
+ if (!e) {
+ mt_unlock();
+ ERR_PRINT("!e");
+ return 0;
+ }
+
+ size = e->len;
+
+ mt_unlock();
+
+ return size;
+}
+
+Error PoolAllocator::resize(ID p_mem, int p_new_size) {
+ mt_lock();
+ Entry *e = get_entry(p_mem);
+
+ if (!e) {
+ mt_unlock();
+ ERR_FAIL_COND_V(!e, ERR_INVALID_PARAMETER);
+ }
+
+ if (needs_locking && e->lock) {
+ mt_unlock();
+ ERR_FAIL_COND_V(e->lock, ERR_ALREADY_IN_USE);
+ }
+
+ uint32_t alloc_size = aligned(p_new_size);
+
+ if ((uint32_t)aligned(e->len) == alloc_size) {
+ e->len = p_new_size;
+ mt_unlock();
+ return OK;
+ } else if (e->len > (uint32_t)p_new_size) {
+ free_mem += aligned(e->len);
+ free_mem -= alloc_size;
+ e->len = p_new_size;
+ mt_unlock();
+ return OK;
+ }
+
+ //p_new_size = align(p_new_size)
+ int _free = free_mem; // - static_area_size;
+
+ if (uint32_t(_free + aligned(e->len)) < alloc_size) {
+ mt_unlock();
+ ERR_FAIL_V(ERR_OUT_OF_MEMORY);
+ }
+
+ EntryIndicesPos entry_indices_pos;
+
+ bool index_found = find_entry_index(&entry_indices_pos, e);
+
+ if (!index_found) {
+ mt_unlock();
+ ERR_FAIL_COND_V(!index_found, ERR_BUG);
+ }
+
+ //no need to move stuff around, it fits before the next block
+ uint32_t next_pos;
+ if (entry_indices_pos + 1 == entry_count) {
+ next_pos = pool_size; // - static_area_size;
+ } else {
+ next_pos = entry_array[entry_indices[entry_indices_pos + 1]].pos;
+ }
+
+ if ((next_pos - e->pos) > alloc_size) {
+ free_mem += aligned(e->len);
+ e->len = p_new_size;
+ free_mem -= alloc_size;
+ mt_unlock();
+ return OK;
+ }
+ //it doesn't fit, compact around BEFORE current index (make room behind)
+
+ compact(entry_indices_pos + 1);
+
+ if ((next_pos - e->pos) > alloc_size) {
+ //now fits! hooray!
+ free_mem += aligned(e->len);
+ e->len = p_new_size;
+ free_mem -= alloc_size;
+ mt_unlock();
+ if (free_mem < free_mem_peak) {
+ free_mem_peak = free_mem;
+ }
+ return OK;
+ }
+
+ //STILL doesn't fit, compact around AFTER current index (make room after)
+
+ compact_up(entry_indices_pos + 1);
+
+ if ((entry_array[entry_indices[entry_indices_pos + 1]].pos - e->pos) > alloc_size) {
+ //now fits! hooray!
+ free_mem += aligned(e->len);
+ e->len = p_new_size;
+ free_mem -= alloc_size;
+ mt_unlock();
+ if (free_mem < free_mem_peak) {
+ free_mem_peak = free_mem;
+ }
+ return OK;
+ }
+
+ mt_unlock();
+ ERR_FAIL_V(ERR_OUT_OF_MEMORY);
+}
+
+Error PoolAllocator::lock(ID p_mem) {
+ if (!needs_locking) {
+ return OK;
+ }
+ mt_lock();
+ Entry *e = get_entry(p_mem);
+ if (!e) {
+ mt_unlock();
+ ERR_PRINT("!e");
+ return ERR_INVALID_PARAMETER;
+ }
+ e->lock++;
+ mt_unlock();
+ return OK;
+}
+
+bool PoolAllocator::is_locked(ID p_mem) const {
+ if (!needs_locking) {
+ return false;
+ }
+
+ mt_lock();
+ const Entry *e = ((PoolAllocator *)(this))->get_entry(p_mem);
+ if (!e) {
+ mt_unlock();
+ ERR_PRINT("!e");
+ return false;
+ }
+ bool locked = e->lock;
+ mt_unlock();
+ return locked;
+}
+
+const void *PoolAllocator::get(ID p_mem) const {
+ if (!needs_locking) {
+ const Entry *e = get_entry(p_mem);
+ ERR_FAIL_COND_V(!e, nullptr);
+ return &pool[e->pos];
+ }
+
+ mt_lock();
+ const Entry *e = get_entry(p_mem);
+
+ if (!e) {
+ mt_unlock();
+ ERR_FAIL_COND_V(!e, nullptr);
+ }
+ if (e->lock == 0) {
+ mt_unlock();
+ ERR_PRINT("e->lock == 0");
+ return nullptr;
+ }
+
+ if ((int)e->pos >= pool_size) {
+ mt_unlock();
+ ERR_PRINT("e->pos<0 || e->pos>=pool_size");
+ return nullptr;
+ }
+ const void *ptr = &pool[e->pos];
+
+ mt_unlock();
+
+ return ptr;
+}
+
+void *PoolAllocator::get(ID p_mem) {
+ if (!needs_locking) {
+ Entry *e = get_entry(p_mem);
+ ERR_FAIL_COND_V(!e, nullptr);
+ return &pool[e->pos];
+ }
+
+ mt_lock();
+ Entry *e = get_entry(p_mem);
+
+ if (!e) {
+ mt_unlock();
+ ERR_FAIL_COND_V(!e, nullptr);
+ }
+ if (e->lock == 0) {
+ //assert(0);
+ mt_unlock();
+ ERR_PRINT("e->lock == 0");
+ return nullptr;
+ }
+
+ if ((int)e->pos >= pool_size) {
+ mt_unlock();
+ ERR_PRINT("e->pos<0 || e->pos>=pool_size");
+ return nullptr;
+ }
+ void *ptr = &pool[e->pos];
+
+ mt_unlock();
+
+ return ptr;
+}
+
+void PoolAllocator::unlock(ID p_mem) {
+ if (!needs_locking) {
+ return;
+ }
+ mt_lock();
+ Entry *e = get_entry(p_mem);
+ if (!e) {
+ mt_unlock();
+ ERR_FAIL_COND(!e);
+ }
+ if (e->lock == 0) {
+ mt_unlock();
+ ERR_PRINT("e->lock == 0");
+ return;
+ }
+ e->lock--;
+ mt_unlock();
+}
+
+int PoolAllocator::get_used_mem() const {
+ return pool_size - free_mem;
+}
+
+int PoolAllocator::get_free_peak() {
+ return free_mem_peak;
+}
+
+int PoolAllocator::get_free_mem() {
+ return free_mem;
+}
+
+void PoolAllocator::create_pool(void *p_mem, int p_size, int p_max_entries) {
+ pool = (uint8_t *)p_mem;
+ pool_size = p_size;
+
+ entry_array = memnew_arr(Entry, p_max_entries);
+ entry_indices = memnew_arr(int, p_max_entries);
+ entry_max = p_max_entries;
+ entry_count = 0;
+
+ free_mem = p_size;
+ free_mem_peak = p_size;
+
+ check_count = 0;
+}
+
+PoolAllocator::PoolAllocator(int p_size, bool p_needs_locking, int p_max_entries) {
+ mem_ptr = memalloc(p_size);
+ ERR_FAIL_COND(!mem_ptr);
+ align = 1;
+ create_pool(mem_ptr, p_size, p_max_entries);
+ needs_locking = p_needs_locking;
+}
+
+PoolAllocator::PoolAllocator(void *p_mem, int p_size, int p_align, bool p_needs_locking, int p_max_entries) {
+ if (p_align > 1) {
+ uint8_t *mem8 = (uint8_t *)p_mem;
+ uint64_t ofs = (uint64_t)mem8;
+ if (ofs % p_align) {
+ int dif = p_align - (ofs % p_align);
+ mem8 += p_align - (ofs % p_align);
+ p_size -= dif;
+ p_mem = (void *)mem8;
+ }
+ }
+
+ create_pool(p_mem, p_size, p_max_entries);
+ needs_locking = p_needs_locking;
+ align = p_align;
+ mem_ptr = nullptr;
+}
+
+PoolAllocator::PoolAllocator(int p_align, int p_size, bool p_needs_locking, int p_max_entries) {
+ ERR_FAIL_COND(p_align < 1);
+ mem_ptr = Memory::alloc_static(p_size + p_align, true);
+ uint8_t *mem8 = (uint8_t *)mem_ptr;
+ uint64_t ofs = (uint64_t)mem8;
+ if (ofs % p_align) {
+ mem8 += p_align - (ofs % p_align);
+ }
+ create_pool(mem8, p_size, p_max_entries);
+ needs_locking = p_needs_locking;
+ align = p_align;
+}
+
+PoolAllocator::~PoolAllocator() {
+ if (mem_ptr) {
+ memfree(mem_ptr);
+ }
+
+ memdelete_arr(entry_array);
+ memdelete_arr(entry_indices);
+}
diff --git a/core/os/pool_allocator.h b/core/os/pool_allocator.h
new file mode 100644
index 0000000000..7d77af6266
--- /dev/null
+++ b/core/os/pool_allocator.h
@@ -0,0 +1,150 @@
+/*************************************************************************/
+/* pool_allocator.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef POOL_ALLOCATOR_H
+#define POOL_ALLOCATOR_H
+
+#include "core/typedefs.h"
+
+/**
+ @author Juan Linietsky <reduzio@gmail.com>
+ * Generic Pool Allocator.
+ * This is a generic memory pool allocator, with locking, compacting and alignment. (@TODO alignment)
+ * It used as a standard way to manage alloction in a specific region of memory, such as texture memory,
+ * audio sample memory, or just any kind of memory overall.
+ * (@TODO) abstraction should be greater, because in many platforms, you need to manage a nonreachable memory.
+*/
+
+enum {
+
+ POOL_ALLOCATOR_INVALID_ID = -1 ///< default invalid value. use INVALID_ID( id ) to test
+};
+
+class PoolAllocator {
+public:
+ typedef int ID;
+
+private:
+ enum {
+ CHECK_BITS = 8,
+ CHECK_LEN = (1 << CHECK_BITS),
+ CHECK_MASK = CHECK_LEN - 1
+
+ };
+
+ struct Entry {
+ unsigned int pos = 0;
+ unsigned int len = 0;
+ unsigned int lock = 0;
+ unsigned int check = 0;
+
+ inline void clear() {
+ pos = 0;
+ len = 0;
+ lock = 0;
+ check = 0;
+ }
+ Entry() {}
+ };
+
+ typedef int EntryArrayPos;
+ typedef int EntryIndicesPos;
+
+ Entry *entry_array;
+ int *entry_indices;
+ int entry_max;
+ int entry_count;
+
+ uint8_t *pool;
+ void *mem_ptr;
+ int pool_size;
+
+ int free_mem;
+ int free_mem_peak;
+
+ unsigned int check_count;
+ int align;
+
+ bool needs_locking;
+
+ inline int entry_end(const Entry &p_entry) const {
+ return p_entry.pos + aligned(p_entry.len);
+ }
+ inline int aligned(int p_size) const {
+ int rem = p_size % align;
+ if (rem) {
+ p_size += align - rem;
+ }
+
+ return p_size;
+ }
+
+ void compact(int p_up_to = -1);
+ void compact_up(int p_from = 0);
+ bool get_free_entry(EntryArrayPos *p_pos);
+ bool find_hole(EntryArrayPos *p_pos, int p_for_size);
+ bool find_entry_index(EntryIndicesPos *p_map_pos, Entry *p_entry);
+ Entry *get_entry(ID p_mem);
+ const Entry *get_entry(ID p_mem) const;
+
+ void create_pool(void *p_mem, int p_size, int p_max_entries);
+
+protected:
+ virtual void mt_lock() const; ///< Reimplement for custom mt locking
+ virtual void mt_unlock() const; ///< Reimplement for custom mt locking
+
+public:
+ enum {
+ DEFAULT_MAX_ALLOCS = 4096,
+ };
+
+ ID alloc(int p_size); ///< Alloc memory, get an ID on success, POOL_ALOCATOR_INVALID_ID on failure
+ void free(ID p_mem); ///< Free allocated memory
+ Error resize(ID p_mem, int p_new_size); ///< resize a memory chunk
+ int get_size(ID p_mem) const;
+
+ int get_free_mem(); ///< get free memory
+ int get_used_mem() const;
+ int get_free_peak(); ///< get free memory
+
+ Error lock(ID p_mem); //@todo move this out
+ void *get(ID p_mem);
+ const void *get(ID p_mem) const;
+ void unlock(ID p_mem);
+ bool is_locked(ID p_mem) const;
+
+ PoolAllocator(int p_size, bool p_needs_locking = false, int p_max_entries = DEFAULT_MAX_ALLOCS);
+ PoolAllocator(void *p_mem, int p_size, int p_align = 1, bool p_needs_locking = false, int p_max_entries = DEFAULT_MAX_ALLOCS);
+ PoolAllocator(int p_align, int p_size, bool p_needs_locking = false, int p_max_entries = DEFAULT_MAX_ALLOCS);
+
+ virtual ~PoolAllocator();
+};
+
+#endif // POOL_ALLOCATOR_H
diff --git a/core/os/rw_lock.cpp b/core/os/rw_lock.cpp
index a668fe2b4c..669f05c6b0 100644
--- a/core/os/rw_lock.cpp
+++ b/core/os/rw_lock.cpp
@@ -30,7 +30,7 @@
#include "rw_lock.h"
-#include "core/error_macros.h"
+#include "core/error/error_macros.h"
#include <stddef.h>
diff --git a/core/os/rw_lock.h b/core/os/rw_lock.h
index 1035072cce..1190102a83 100644
--- a/core/os/rw_lock.h
+++ b/core/os/rw_lock.h
@@ -31,7 +31,7 @@
#ifndef RW_LOCK_H
#define RW_LOCK_H
-#include "core/error_list.h"
+#include "core/error/error_list.h"
class RWLock {
protected:
diff --git a/core/os/semaphore.h b/core/os/semaphore.h
index 077e04704b..b170cada3a 100644
--- a/core/os/semaphore.h
+++ b/core/os/semaphore.h
@@ -31,7 +31,7 @@
#ifndef SEMAPHORE_H
#define SEMAPHORE_H
-#include "core/error_list.h"
+#include "core/error/error_list.h"
#include "core/typedefs.h"
#if !defined(NO_THREADS)
diff --git a/core/os/spin_lock.h b/core/os/spin_lock.h
new file mode 100644
index 0000000000..1bb810bb29
--- /dev/null
+++ b/core/os/spin_lock.h
@@ -0,0 +1,51 @@
+/*************************************************************************/
+/* spin_lock.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef SPIN_LOCK_H
+#define SPIN_LOCK_H
+
+#include "core/typedefs.h"
+
+#include <atomic>
+
+class SpinLock {
+ std::atomic_flag locked = ATOMIC_FLAG_INIT;
+
+public:
+ _ALWAYS_INLINE_ void lock() {
+ while (locked.test_and_set(std::memory_order_acquire)) {
+ ;
+ }
+ }
+ _ALWAYS_INLINE_ void unlock() {
+ locked.clear(std::memory_order_release);
+ }
+};
+#endif // SPIN_LOCK_H
diff --git a/core/os/thread.h b/core/os/thread.h
index f761d4ca43..d68476e683 100644
--- a/core/os/thread.h
+++ b/core/os/thread.h
@@ -31,8 +31,8 @@
#ifndef THREAD_H
#define THREAD_H
+#include "core/string/ustring.h"
#include "core/typedefs.h"
-#include "core/ustring.h"
typedef void (*ThreadCreateCallback)(void *p_userdata);
diff --git a/core/os/threaded_array_processor.h b/core/os/threaded_array_processor.h
index d27399e4cc..ed141a5339 100644
--- a/core/os/threaded_array_processor.h
+++ b/core/os/threaded_array_processor.h
@@ -35,7 +35,7 @@
#include "core/os/os.h"
#include "core/os/thread.h"
#include "core/os/thread_safe.h"
-#include "core/safe_refcount.h"
+#include "core/templates/safe_refcount.h"
template <class C, class U>
struct ThreadArrayProcessData {