summaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/SCsub19
-rw-r--r--core/core_bind.cpp20
-rw-r--r--core/core_bind.h8
-rw-r--r--core/core_constants.cpp2
-rw-r--r--core/input/input_event.cpp171
-rw-r--r--core/input/input_event.h36
-rw-r--r--core/io/file_access_compressed.cpp34
-rw-r--r--core/io/file_access_compressed.h28
-rw-r--r--core/io/file_access_encrypted.cpp41
-rw-r--r--core/io/file_access_encrypted.h16
-rw-r--r--core/io/file_access_memory.cpp21
-rw-r--r--core/io/file_access_memory.h16
-rw-r--r--core/io/file_access_network.cpp40
-rw-r--r--core/io/file_access_network.h38
-rw-r--r--core/io/file_access_pack.cpp36
-rw-r--r--core/io/file_access_pack.h22
-rw-r--r--core/io/file_access_zip.cpp58
-rw-r--r--core/io/file_access_zip.h11
-rw-r--r--core/io/pck_packer.cpp2
-rw-r--r--core/io/resource_format_binary.cpp4
-rw-r--r--core/io/zip_io.cpp2
-rw-r--r--core/math/bvh.h695
-rw-r--r--core/math/bvh_abb.h276
-rw-r--r--core/math/bvh_cull.inc534
-rw-r--r--core/math/bvh_debug.inc68
-rw-r--r--core/math/bvh_integrity.inc42
-rw-r--r--core/math/bvh_logic.inc230
-rw-r--r--core/math/bvh_misc.inc55
-rw-r--r--core/math/bvh_pair.inc62
-rw-r--r--core/math/bvh_public.inc421
-rw-r--r--core/math/bvh_refit.inc141
-rw-r--r--core/math/bvh_split.inc294
-rw-r--r--core/math/bvh_structs.inc181
-rw-r--r--core/math/bvh_tree.h422
-rw-r--r--core/math/rect2.h12
-rw-r--r--core/math/vector2.h32
-rw-r--r--core/math/vector3.cpp8
-rw-r--r--core/math/vector3.h15
-rw-r--r--core/os/dir_access.h2
-rw-r--r--core/os/file_access.cpp20
-rw-r--r--core/os/file_access.h12
-rw-r--r--core/os/keyboard.cpp6
-rw-r--r--core/os/keyboard.h2
-rw-r--r--core/templates/pooled_list.h95
-rw-r--r--core/variant/callable.h7
45 files changed, 3899 insertions, 358 deletions
diff --git a/core/SCsub b/core/SCsub
index 21829553a7..bdf8544840 100644
--- a/core/SCsub
+++ b/core/SCsub
@@ -12,25 +12,28 @@ import os
txt = "0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0"
if "SCRIPT_AES256_ENCRYPTION_KEY" in os.environ:
- e = os.environ["SCRIPT_AES256_ENCRYPTION_KEY"]
- txt = ""
+ key = os.environ["SCRIPT_AES256_ENCRYPTION_KEY"]
ec_valid = True
- if len(e) != 64:
+ if len(key) != 64:
ec_valid = False
else:
-
- for i in range(len(e) >> 1):
+ txt = ""
+ for i in range(len(key) >> 1):
if i > 0:
txt += ","
- txts = "0x" + e[i * 2 : i * 2 + 2]
+ txts = "0x" + key[i * 2 : i * 2 + 2]
try:
int(txts, 16)
except Exception:
ec_valid = False
txt += txts
if not ec_valid:
- txt = "0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0"
- print("Invalid AES256 encryption key, not 64 bits hex: " + e)
+ print("Error: Invalid AES256 encryption key, not 64 hexadecimal characters: '" + key + "'.")
+ print(
+ "Unset 'SCRIPT_AES256_ENCRYPTION_KEY' in your environment "
+ "or make sure that it contains exactly 64 hexadecimal characters."
+ )
+ Exit(255)
# NOTE: It is safe to generate this file here, since this is still executed serially
with open("script_encryption_key.gen.cpp", "w") as f:
diff --git a/core/core_bind.cpp b/core/core_bind.cpp
index 84d8d0d4d3..2f5d7cb081 100644
--- a/core/core_bind.cpp
+++ b/core/core_bind.cpp
@@ -1259,6 +1259,7 @@ String _File::get_path_absolute() const {
void _File::seek(int64_t p_position) {
ERR_FAIL_COND_MSG(!f, "File must be opened before use.");
+ ERR_FAIL_COND_MSG(p_position < 0, "Seek position must be a positive integer.");
f->seek(p_position);
}
@@ -1267,12 +1268,12 @@ void _File::seek_end(int64_t p_position) {
f->seek_end(p_position);
}
-int64_t _File::get_position() const {
+uint64_t _File::get_position() const {
ERR_FAIL_COND_V_MSG(!f, 0, "File must be opened before use.");
return f->get_position();
}
-int64_t _File::get_len() const {
+uint64_t _File::get_len() const {
ERR_FAIL_COND_V_MSG(!f, 0, "File must be opened before use.");
return f->get_len();
}
@@ -1317,7 +1318,7 @@ real_t _File::get_real() const {
return f->get_real();
}
-Vector<uint8_t> _File::get_buffer(int p_length) const {
+Vector<uint8_t> _File::get_buffer(int64_t p_length) const {
Vector<uint8_t> data;
ERR_FAIL_COND_V_MSG(!f, data, "File must be opened before use.");
@@ -1330,8 +1331,7 @@ Vector<uint8_t> _File::get_buffer(int p_length) const {
ERR_FAIL_COND_V_MSG(err != OK, data, "Can't resize data to " + itos(p_length) + " elements.");
uint8_t *w = data.ptrw();
- int len = f->get_buffer(&w[0], p_length);
- ERR_FAIL_COND_V(len < 0, Vector<uint8_t>());
+ int64_t len = f->get_buffer(&w[0], p_length);
if (len < p_length) {
data.resize(len);
@@ -1344,7 +1344,7 @@ String _File::get_as_text() const {
ERR_FAIL_COND_V_MSG(!f, String(), "File must be opened before use.");
String text;
- size_t original_pos = f->get_position();
+ uint64_t original_pos = f->get_position();
f->seek(0);
String l = get_line();
@@ -1473,7 +1473,7 @@ void _File::store_csv_line(const Vector<String> &p_values, const String &p_delim
void _File::store_buffer(const Vector<uint8_t> &p_buffer) {
ERR_FAIL_COND_MSG(!f, "File must be opened before use.");
- int len = p_buffer.size();
+ uint64_t len = p_buffer.size();
if (len == 0) {
return;
}
@@ -1721,9 +1721,9 @@ bool _Directory::dir_exists(String p_dir) {
return d->dir_exists(p_dir);
}
-int _Directory::get_space_left() {
- ERR_FAIL_COND_V_MSG(!is_open(), 0, "Directory must be opened before use.");
- return d->get_space_left() / 1024 * 1024; //return value in megabytes, given binding is int
+uint64_t _Directory::get_space_left() {
+ ERR_FAIL_COND_V_MSG(!d, 0, "Directory must be opened before use.");
+ return d->get_space_left() / 1024 * 1024; // Truncate to closest MiB.
}
Error _Directory::copy(String p_from, String p_to) {
diff --git a/core/core_bind.h b/core/core_bind.h
index 3920116ca4..8bd96d8268 100644
--- a/core/core_bind.h
+++ b/core/core_bind.h
@@ -390,8 +390,8 @@ public:
void seek(int64_t p_position); // Seek to a given position.
void seek_end(int64_t p_position = 0); // Seek from the end of file.
- int64_t get_position() const; // Get position in the file.
- int64_t get_len() const; // Get size of the file.
+ uint64_t get_position() const; // Get position in the file.
+ uint64_t get_len() const; // Get size of the file.
bool eof_reached() const; // Reading passed EOF.
@@ -406,7 +406,7 @@ public:
Variant get_var(bool p_allow_objects = false) const;
- Vector<uint8_t> get_buffer(int p_length) const; // Get an array of bytes.
+ Vector<uint8_t> get_buffer(int64_t p_length) const; // Get an array of bytes.
String get_line() const;
Vector<String> get_csv_line(const String &p_delim = ",") const;
String get_as_text() const;
@@ -486,7 +486,7 @@ public:
bool file_exists(String p_file);
bool dir_exists(String p_dir);
- int get_space_left();
+ uint64_t get_space_left();
Error copy(String p_from, String p_to);
Error rename(String p_from, String p_to);
diff --git a/core/core_constants.cpp b/core/core_constants.cpp
index f40928350a..a0a41015dc 100644
--- a/core/core_constants.cpp
+++ b/core/core_constants.cpp
@@ -161,7 +161,7 @@ void register_global_constants() {
BIND_CORE_ENUM_CONSTANT(KEY_PAGEUP);
BIND_CORE_ENUM_CONSTANT(KEY_PAGEDOWN);
BIND_CORE_ENUM_CONSTANT(KEY_SHIFT);
- BIND_CORE_ENUM_CONSTANT(KEY_CONTROL);
+ BIND_CORE_ENUM_CONSTANT(KEY_CTRL);
BIND_CORE_ENUM_CONSTANT(KEY_META);
BIND_CORE_ENUM_CONSTANT(KEY_ALT);
BIND_CORE_ENUM_CONSTANT(KEY_CAPSLOCK);
diff --git a/core/input/input_event.cpp b/core/input/input_event.cpp
index 46629d742e..6f063c217f 100644
--- a/core/input/input_event.cpp
+++ b/core/input/input_event.cpp
@@ -147,66 +147,66 @@ bool InputEventWithModifiers::is_storing_command() const {
return store_command;
}
-void InputEventWithModifiers::set_shift(bool p_enabled) {
- shift = p_enabled;
+void InputEventWithModifiers::set_shift_pressed(bool p_enabled) {
+ shift_pressed = p_enabled;
}
-bool InputEventWithModifiers::get_shift() const {
- return shift;
+bool InputEventWithModifiers::is_shift_pressed() const {
+ return shift_pressed;
}
-void InputEventWithModifiers::set_alt(bool p_enabled) {
- alt = p_enabled;
+void InputEventWithModifiers::set_alt_pressed(bool p_enabled) {
+ alt_pressed = p_enabled;
}
-bool InputEventWithModifiers::get_alt() const {
- return alt;
+bool InputEventWithModifiers::is_alt_pressed() const {
+ return alt_pressed;
}
-void InputEventWithModifiers::set_control(bool p_enabled) {
- control = p_enabled;
+void InputEventWithModifiers::set_ctrl_pressed(bool p_enabled) {
+ ctrl_pressed = p_enabled;
}
-bool InputEventWithModifiers::get_control() const {
- return control;
+bool InputEventWithModifiers::is_ctrl_pressed() const {
+ return ctrl_pressed;
}
-void InputEventWithModifiers::set_metakey(bool p_enabled) {
- meta = p_enabled;
+void InputEventWithModifiers::set_meta_pressed(bool p_enabled) {
+ meta_pressed = p_enabled;
}
-bool InputEventWithModifiers::get_metakey() const {
- return meta;
+bool InputEventWithModifiers::is_meta_pressed() const {
+ return meta_pressed;
}
-void InputEventWithModifiers::set_command(bool p_enabled) {
- command = p_enabled;
+void InputEventWithModifiers::set_command_pressed(bool p_enabled) {
+ command_pressed = p_enabled;
}
-bool InputEventWithModifiers::get_command() const {
- return command;
+bool InputEventWithModifiers::is_command_pressed() const {
+ return command_pressed;
}
void InputEventWithModifiers::set_modifiers_from_event(const InputEventWithModifiers *event) {
- set_alt(event->get_alt());
- set_shift(event->get_shift());
- set_control(event->get_control());
- set_metakey(event->get_metakey());
+ set_alt_pressed(event->is_alt_pressed());
+ set_shift_pressed(event->is_shift_pressed());
+ set_ctrl_pressed(event->is_ctrl_pressed());
+ set_meta_pressed(event->is_meta_pressed());
}
String InputEventWithModifiers::as_text() const {
Vector<String> mod_names;
- if (get_control()) {
- mod_names.push_back(find_keycode_name(KEY_CONTROL));
+ if (is_ctrl_pressed()) {
+ mod_names.push_back(find_keycode_name(KEY_CTRL));
}
- if (get_shift()) {
+ if (is_shift_pressed()) {
mod_names.push_back(find_keycode_name(KEY_SHIFT));
}
- if (get_alt()) {
+ if (is_alt_pressed()) {
mod_names.push_back(find_keycode_name(KEY_ALT));
}
- if (get_metakey()) {
+ if (is_meta_pressed()) {
mod_names.push_back(find_keycode_name(KEY_META));
}
@@ -225,27 +225,27 @@ void InputEventWithModifiers::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_store_command", "enable"), &InputEventWithModifiers::set_store_command);
ClassDB::bind_method(D_METHOD("is_storing_command"), &InputEventWithModifiers::is_storing_command);
- ClassDB::bind_method(D_METHOD("set_alt", "enable"), &InputEventWithModifiers::set_alt);
- ClassDB::bind_method(D_METHOD("get_alt"), &InputEventWithModifiers::get_alt);
+ ClassDB::bind_method(D_METHOD("set_alt_pressed", "pressed"), &InputEventWithModifiers::set_alt_pressed);
+ ClassDB::bind_method(D_METHOD("is_alt_pressed"), &InputEventWithModifiers::is_alt_pressed);
- ClassDB::bind_method(D_METHOD("set_shift", "enable"), &InputEventWithModifiers::set_shift);
- ClassDB::bind_method(D_METHOD("get_shift"), &InputEventWithModifiers::get_shift);
+ ClassDB::bind_method(D_METHOD("set_shift_pressed", "pressed"), &InputEventWithModifiers::set_shift_pressed);
+ ClassDB::bind_method(D_METHOD("is_shift_pressed"), &InputEventWithModifiers::is_shift_pressed);
- ClassDB::bind_method(D_METHOD("set_control", "enable"), &InputEventWithModifiers::set_control);
- ClassDB::bind_method(D_METHOD("get_control"), &InputEventWithModifiers::get_control);
+ ClassDB::bind_method(D_METHOD("set_ctrl_pressed", "pressed"), &InputEventWithModifiers::set_ctrl_pressed);
+ ClassDB::bind_method(D_METHOD("is_ctrl_pressed"), &InputEventWithModifiers::is_ctrl_pressed);
- ClassDB::bind_method(D_METHOD("set_metakey", "enable"), &InputEventWithModifiers::set_metakey);
- ClassDB::bind_method(D_METHOD("get_metakey"), &InputEventWithModifiers::get_metakey);
+ ClassDB::bind_method(D_METHOD("set_meta_pressed", "pressed"), &InputEventWithModifiers::set_meta_pressed);
+ ClassDB::bind_method(D_METHOD("is_meta_pressed"), &InputEventWithModifiers::is_meta_pressed);
- ClassDB::bind_method(D_METHOD("set_command", "enable"), &InputEventWithModifiers::set_command);
- ClassDB::bind_method(D_METHOD("get_command"), &InputEventWithModifiers::get_command);
+ ClassDB::bind_method(D_METHOD("set_command_pressed", "pressed"), &InputEventWithModifiers::set_command_pressed);
+ ClassDB::bind_method(D_METHOD("is_command_pressed"), &InputEventWithModifiers::is_command_pressed);
ADD_PROPERTY(PropertyInfo(Variant::BOOL, "store_command"), "set_store_command", "is_storing_command");
- ADD_PROPERTY(PropertyInfo(Variant::BOOL, "alt"), "set_alt", "get_alt");
- ADD_PROPERTY(PropertyInfo(Variant::BOOL, "shift"), "set_shift", "get_shift");
- ADD_PROPERTY(PropertyInfo(Variant::BOOL, "control"), "set_control", "get_control");
- ADD_PROPERTY(PropertyInfo(Variant::BOOL, "meta"), "set_metakey", "get_metakey");
- ADD_PROPERTY(PropertyInfo(Variant::BOOL, "command"), "set_command", "get_command");
+ ADD_PROPERTY(PropertyInfo(Variant::BOOL, "alt_pressed"), "set_alt_pressed", "is_alt_pressed");
+ ADD_PROPERTY(PropertyInfo(Variant::BOOL, "shift_pressed"), "set_shift_pressed", "is_shift_pressed");
+ ADD_PROPERTY(PropertyInfo(Variant::BOOL, "ctrl_pressed"), "set_ctrl_pressed", "is_ctrl_pressed");
+ ADD_PROPERTY(PropertyInfo(Variant::BOOL, "meta_pressed"), "set_meta_pressed", "is_meta_pressed");
+ ADD_PROPERTY(PropertyInfo(Variant::BOOL, "command_pressed"), "set_command_pressed", "is_command_pressed");
}
void InputEventWithModifiers::_validate_property(PropertyInfo &property) const {
@@ -253,18 +253,18 @@ void InputEventWithModifiers::_validate_property(PropertyInfo &property) const {
// If we only want to Store "Command".
#ifdef APPLE_STYLE_KEYS
// Don't store "Meta" on Mac.
- if (property.name == "meta") {
+ if (property.name == "meta_pressed") {
property.usage ^= PROPERTY_USAGE_STORAGE;
}
#else
- // Don't store "Control".
- if (property.name == "control") {
+ // Don't store "Ctrl".
+ if (property.name == "ctrl_pressed") {
property.usage ^= PROPERTY_USAGE_STORAGE;
}
#endif
} else {
- // We don't want to store command, only control or meta (on mac).
- if (property.name == "command") {
+ // We don't want to store command, only ctrl or meta (on mac).
+ if (property.name == "command_pressed") {
property.usage ^= PROPERTY_USAGE_STORAGE;
}
}
@@ -314,16 +314,16 @@ bool InputEventKey::is_echo() const {
uint32_t InputEventKey::get_keycode_with_modifiers() const {
uint32_t sc = keycode;
- if (get_control()) {
+ if (is_ctrl_pressed()) {
sc |= KEY_MASK_CTRL;
}
- if (get_alt()) {
+ if (is_alt_pressed()) {
sc |= KEY_MASK_ALT;
}
- if (get_shift()) {
+ if (is_shift_pressed()) {
sc |= KEY_MASK_SHIFT;
}
- if (get_metakey()) {
+ if (is_meta_pressed()) {
sc |= KEY_MASK_META;
}
@@ -332,16 +332,16 @@ uint32_t InputEventKey::get_keycode_with_modifiers() const {
uint32_t InputEventKey::get_physical_keycode_with_modifiers() const {
uint32_t sc = physical_keycode;
- if (get_control()) {
+ if (is_ctrl_pressed()) {
sc |= KEY_MASK_CTRL;
}
- if (get_alt()) {
+ if (is_alt_pressed()) {
sc |= KEY_MASK_ALT;
}
- if (get_shift()) {
+ if (is_shift_pressed()) {
sc |= KEY_MASK_SHIFT;
}
- if (get_metakey()) {
+ if (is_meta_pressed()) {
sc |= KEY_MASK_META;
}
@@ -372,16 +372,16 @@ String InputEventKey::to_string() {
String kc = "";
String physical = "false";
if (keycode == 0) {
- kc = itos(physical_keycode) + " " + keycode_get_string(physical_keycode);
+ kc = itos(physical_keycode) + " (" + keycode_get_string(physical_keycode) + ")";
physical = "true";
} else {
- kc = itos(keycode) + " " + keycode_get_string(keycode);
+ kc = itos(keycode) + " (" + keycode_get_string(keycode) + ")";
}
String mods = InputEventWithModifiers::as_text();
- mods = mods == "" ? TTR("None") : mods;
+ mods = mods == "" ? TTR("none") : mods;
- return vformat("InputEventKey: keycode=%s mods=%s physical=%s pressed=%s echo=%s", kc, mods, physical, p, e);
+ return vformat("InputEventKey: keycode=%s, mods=%s, physical=%s, pressed=%s, echo=%s", kc, mods, physical, p, e);
}
Ref<InputEventKey> InputEventKey::create_reference(uint32_t p_keycode) {
@@ -391,19 +391,19 @@ Ref<InputEventKey> InputEventKey::create_reference(uint32_t p_keycode) {
ie->set_unicode(p_keycode & KEY_CODE_MASK);
if (p_keycode & KEY_MASK_SHIFT) {
- ie->set_shift(true);
+ ie->set_shift_pressed(true);
}
if (p_keycode & KEY_MASK_ALT) {
- ie->set_alt(true);
+ ie->set_alt_pressed(true);
}
if (p_keycode & KEY_MASK_CTRL) {
- ie->set_control(true);
+ ie->set_ctrl_pressed(true);
}
if (p_keycode & KEY_MASK_CMD) {
- ie->set_command(true);
+ ie->set_command_pressed(true);
}
if (p_keycode & KEY_MASK_META) {
- ie->set_metakey(true);
+ ie->set_meta_pressed(true);
}
return ie;
@@ -667,11 +667,11 @@ String InputEventMouseButton::to_string() {
}
String mods = InputEventWithModifiers::as_text();
- mods = mods == "" ? TTR("None") : mods;
+ mods = mods == "" ? TTR("none") : mods;
// Work around the fact vformat can only take 5 substitutions but 6 need to be passed.
- String index_and_mods = vformat("button_index=%s mods=%s", button_index, mods);
- return vformat("InputEventMouseButton: %s pressed=%s position=(%s) button_mask=%s double_click=%s", index_and_mods, p, String(get_position()), itos(get_button_mask()), d);
+ String index_and_mods = vformat("button_index=%s, mods=%s", button_index, mods);
+ return vformat("InputEventMouseButton: %s, pressed=%s, position=(%s), button_mask=%d, double_click=%s", index_and_mods, p, String(get_position()), get_button_mask(), d);
}
void InputEventMouseButton::_bind_methods() {
@@ -780,7 +780,9 @@ String InputEventMouseMotion::to_string() {
break;
}
- return "InputEventMouseMotion : button_mask=" + button_mask_string + ", position=(" + String(get_position()) + "), relative=(" + String(get_relative()) + "), speed=(" + String(get_speed()) + "), pressure=(" + rtos(get_pressure()) + "), tilt=(" + String(get_tilt()) + ")";
+ // Work around the fact vformat can only take 5 substitutions but 6 need to be passed.
+ String mask_and_position = vformat("button_mask=%s, position=(%s)", button_mask_string, String(get_position()));
+ return vformat("InputEventMouseMotion: %s, relative=(%s), speed=(%s), pressure=%.2f, tilt=(%s)", mask_and_position, String(get_relative()), String(get_speed()), get_pressure(), String(get_tilt()));
}
bool InputEventMouseMotion::accumulate(const Ref<InputEvent> &p_event) {
@@ -801,19 +803,19 @@ bool InputEventMouseMotion::accumulate(const Ref<InputEvent> &p_event) {
return false;
}
- if (get_shift() != motion->get_shift()) {
+ if (is_shift_pressed() != motion->is_shift_pressed()) {
return false;
}
- if (get_control() != motion->get_control()) {
+ if (is_ctrl_pressed() != motion->is_ctrl_pressed()) {
return false;
}
- if (get_alt() != motion->get_alt()) {
+ if (is_alt_pressed() != motion->is_alt_pressed()) {
return false;
}
- if (get_metakey() != motion->get_metakey()) {
+ if (is_meta_pressed() != motion->is_meta_pressed()) {
return false;
}
@@ -918,11 +920,11 @@ static const char *_joy_axis_descriptions[JOY_AXIS_MAX] = {
String InputEventJoypadMotion::as_text() const {
String desc = axis < JOY_AXIS_MAX ? RTR(_joy_axis_descriptions[axis]) : TTR("Unknown Joypad Axis");
- return vformat(TTR("Joypad Motion on Axis %s (%s) with Value %s"), itos(axis), desc, String(Variant(axis_value)));
+ return vformat(TTR("Joypad Motion on Axis %d (%s) with Value %.2f"), axis, desc, axis_value);
}
String InputEventJoypadMotion::to_string() {
- return "InputEventJoypadMotion : axis=" + itos(axis) + ", axis_value=" + String(Variant(axis_value));
+ return vformat("InputEventJoypadMotion: axis=%d, axis_value=%.2f", axis, axis_value);
}
void InputEventJoypadMotion::_bind_methods() {
@@ -1033,7 +1035,8 @@ String InputEventJoypadButton::as_text() const {
}
String InputEventJoypadButton::to_string() {
- return "InputEventJoypadButton : button_index=" + itos(button_index) + ", pressed=" + (pressed ? "true" : "false") + ", pressure=" + String(Variant(pressure));
+ String p = pressed ? "true" : "false";
+ return vformat("InputEventJoypadButton: button_index=%d, pressed=%s, pressure=%.2f", button_index, p, pressure);
}
Ref<InputEventJoypadButton> InputEventJoypadButton::create_reference(int p_btn_index) {
@@ -1104,7 +1107,8 @@ String InputEventScreenTouch::as_text() const {
}
String InputEventScreenTouch::to_string() {
- return "InputEventScreenTouch : index=" + itos(index) + ", pressed=" + (pressed ? "true" : "false") + ", position=(" + String(get_position()) + ")";
+ String p = pressed ? "true" : "false";
+ return vformat("InputEventScreenTouch: index=%d, pressed=%s, position=(%s)", index, p, String(get_position()));
}
void InputEventScreenTouch::_bind_methods() {
@@ -1177,7 +1181,7 @@ String InputEventScreenDrag::as_text() const {
}
String InputEventScreenDrag::to_string() {
- return "InputEventScreenDrag : index=" + itos(index) + ", position=(" + String(get_position()) + "), relative=(" + String(get_relative()) + "), speed=(" + String(get_speed()) + ")";
+ return vformat("InputEventScreenDrag: index=%d, position=(%s), relative=(%s), speed=(%s)", index, String(get_position()), String(get_relative()), String(get_speed()));
}
void InputEventScreenDrag::_bind_methods() {
@@ -1264,7 +1268,8 @@ String InputEventAction::as_text() const {
}
String InputEventAction::to_string() {
- return "InputEventAction : action=" + action + ", pressed=(" + (pressed ? "true" : "false");
+ String p = pressed ? "true" : "false";
+ return vformat("InputEventAction: action=\"%s\", pressed=%s", action, p);
}
void InputEventAction::_bind_methods() {
@@ -1331,7 +1336,7 @@ String InputEventMagnifyGesture::as_text() const {
}
String InputEventMagnifyGesture::to_string() {
- return "InputEventMagnifyGesture : factor=" + rtos(get_factor()) + ", position=(" + String(get_position()) + ")";
+ return vformat("InputEventMagnifyGesture: factor=%.2f, position=(%s)", factor, String(get_position()));
}
void InputEventMagnifyGesture::_bind_methods() {
@@ -1371,7 +1376,7 @@ String InputEventPanGesture::as_text() const {
}
String InputEventPanGesture::to_string() {
- return "InputEventPanGesture : delta=(" + String(get_delta()) + "), position=(" + String(get_position()) + ")";
+ return vformat("InputEventPanGesture: delta=(%s), position=(%s)", String(get_delta()), String(get_position()));
}
void InputEventPanGesture::_bind_methods() {
@@ -1452,7 +1457,7 @@ String InputEventMIDI::as_text() const {
}
String InputEventMIDI::to_string() {
- return vformat("InputEvenMIDI: channel=%s message=%s pitch=%s velocity=%s pressure=%s", itos(channel), itos(message), itos(pitch), itos(velocity), itos(pressure));
+ return vformat("InputEventMIDI: channel=%d, message=%d, pitch=%d, velocity=%d, pressure=%d", channel, message, pitch, velocity, pressure);
}
void InputEventMIDI::_bind_methods() {
diff --git a/core/input/input_event.h b/core/input/input_event.h
index 5a33ee7b9c..eed0d79326 100644
--- a/core/input/input_event.h
+++ b/core/input/input_event.h
@@ -170,21 +170,21 @@ class InputEventWithModifiers : public InputEventFromWindow {
bool store_command = true;
- bool shift = false;
- bool alt = false;
+ bool shift_pressed = false;
+ bool alt_pressed = false;
#ifdef APPLE_STYLE_KEYS
union {
- bool command;
- bool meta = false; //< windows/mac key
+ bool command_pressed;
+ bool meta_pressed = false; //< windows/mac key
};
- bool control = false;
+ bool ctrl_pressed = false;
#else
union {
- bool command; //< windows/mac key
- bool control = false;
+ bool command_pressed; //< windows/mac key
+ bool ctrl_pressed = false;
};
- bool meta = false; //< windows/mac key
+ bool meta_pressed = false; //< windows/mac key
#endif
protected:
@@ -195,20 +195,20 @@ public:
void set_store_command(bool p_enabled);
bool is_storing_command() const;
- void set_shift(bool p_enabled);
- bool get_shift() const;
+ void set_shift_pressed(bool p_pressed);
+ bool is_shift_pressed() const;
- void set_alt(bool p_enabled);
- bool get_alt() const;
+ void set_alt_pressed(bool p_pressed);
+ bool is_alt_pressed() const;
- void set_control(bool p_enabled);
- bool get_control() const;
+ void set_ctrl_pressed(bool p_pressed);
+ bool is_ctrl_pressed() const;
- void set_metakey(bool p_enabled);
- bool get_metakey() const;
+ void set_meta_pressed(bool p_pressed);
+ bool is_meta_pressed() const;
- void set_command(bool p_enabled);
- bool get_command() const;
+ void set_command_pressed(bool p_pressed);
+ bool is_command_pressed() const;
void set_modifiers_from_event(const InputEventWithModifiers *event);
diff --git a/core/io/file_access_compressed.cpp b/core/io/file_access_compressed.cpp
index b2440629e3..efcaa80fc5 100644
--- a/core/io/file_access_compressed.cpp
+++ b/core/io/file_access_compressed.cpp
@@ -32,7 +32,7 @@
#include "core/string/print_string.h"
-void FileAccessCompressed::configure(const String &p_magic, Compression::Mode p_mode, int p_block_size) {
+void FileAccessCompressed::configure(const String &p_magic, Compression::Mode p_mode, uint32_t p_block_size) {
magic = p_magic.ascii().get_data();
if (magic.length() > 4) {
magic = magic.substr(0, 4);
@@ -67,10 +67,10 @@ Error FileAccessCompressed::open_after_magic(FileAccess *p_base) {
ERR_FAIL_V_MSG(ERR_FILE_CORRUPT, "Can't open compressed file '" + p_base->get_path() + "' with block size 0, it is corrupted.");
}
read_total = f->get_32();
- int bc = (read_total / block_size) + 1;
- int acc_ofs = f->get_position() + bc * 4;
- int max_bs = 0;
- for (int i = 0; i < bc; i++) {
+ uint32_t bc = (read_total / block_size) + 1;
+ uint64_t acc_ofs = f->get_position() + bc * 4;
+ uint32_t max_bs = 0;
+ for (uint32_t i = 0; i < bc; i++) {
ReadBlock rb;
rb.offset = acc_ofs;
rb.csize = f->get_32();
@@ -148,15 +148,15 @@ void FileAccessCompressed::close() {
f->store_32(cmode); //write compression mode 4
f->store_32(block_size); //write block size 4
f->store_32(write_max); //max amount of data written 4
- int bc = (write_max / block_size) + 1;
+ uint32_t bc = (write_max / block_size) + 1;
- for (int i = 0; i < bc; i++) {
+ for (uint32_t i = 0; i < bc; i++) {
f->store_32(0); //compressed sizes, will update later
}
Vector<int> block_sizes;
- for (int i = 0; i < bc; i++) {
- int bl = i == (bc - 1) ? write_max % block_size : block_size;
+ for (uint32_t i = 0; i < bc; i++) {
+ uint32_t bl = i == (bc - 1) ? write_max % block_size : block_size;
uint8_t *bp = &write_ptr[i * block_size];
Vector<uint8_t> cblock;
@@ -168,7 +168,7 @@ void FileAccessCompressed::close() {
}
f->seek(16); //ok write block sizes
- for (int i = 0; i < bc; i++) {
+ for (uint32_t i = 0; i < bc; i++) {
f->store_32(block_sizes[i]);
}
f->seek_end();
@@ -190,8 +190,9 @@ bool FileAccessCompressed::is_open() const {
return f != nullptr;
}
-void FileAccessCompressed::seek(size_t p_position) {
+void FileAccessCompressed::seek(uint64_t p_position) {
ERR_FAIL_COND_MSG(!f, "File must be opened before use.");
+
if (writing) {
ERR_FAIL_COND(p_position > write_max);
@@ -204,7 +205,7 @@ void FileAccessCompressed::seek(size_t p_position) {
} else {
at_end = false;
read_eof = false;
- int block_idx = p_position / block_size;
+ uint32_t block_idx = p_position / block_size;
if (block_idx != read_block) {
read_block = block_idx;
f->seek(read_blocks[read_block].offset);
@@ -227,7 +228,7 @@ void FileAccessCompressed::seek_end(int64_t p_position) {
}
}
-size_t FileAccessCompressed::get_position() const {
+uint64_t FileAccessCompressed::get_position() const {
ERR_FAIL_COND_V_MSG(!f, 0, "File must be opened before use.");
if (writing) {
return write_pos;
@@ -236,7 +237,7 @@ size_t FileAccessCompressed::get_position() const {
}
}
-size_t FileAccessCompressed::get_len() const {
+uint64_t FileAccessCompressed::get_len() const {
ERR_FAIL_COND_V_MSG(!f, 0, "File must be opened before use.");
if (writing) {
return write_max;
@@ -285,9 +286,8 @@ uint8_t FileAccessCompressed::get_8() const {
return ret;
}
-int FileAccessCompressed::get_buffer(uint8_t *p_dst, int p_length) const {
+uint64_t FileAccessCompressed::get_buffer(uint8_t *p_dst, uint64_t p_length) const {
ERR_FAIL_COND_V(!p_dst && p_length > 0, -1);
- ERR_FAIL_COND_V(p_length < 0, -1);
ERR_FAIL_COND_V_MSG(!f, -1, "File must be opened before use.");
ERR_FAIL_COND_V_MSG(writing, -1, "File has not been opened in read mode.");
@@ -296,7 +296,7 @@ int FileAccessCompressed::get_buffer(uint8_t *p_dst, int p_length) const {
return 0;
}
- for (int i = 0; i < p_length; i++) {
+ for (uint64_t i = 0; i < p_length; i++) {
p_dst[i] = read_ptr[read_pos];
read_pos++;
if (read_pos >= read_block_size) {
diff --git a/core/io/file_access_compressed.h b/core/io/file_access_compressed.h
index 118d05ea57..d8a81c2417 100644
--- a/core/io/file_access_compressed.h
+++ b/core/io/file_access_compressed.h
@@ -37,34 +37,34 @@
class FileAccessCompressed : public FileAccess {
Compression::Mode cmode = Compression::MODE_ZSTD;
bool writing = false;
- uint32_t write_pos = 0;
+ uint64_t write_pos = 0;
uint8_t *write_ptr = nullptr;
uint32_t write_buffer_size = 0;
- uint32_t write_max = 0;
+ uint64_t write_max = 0;
uint32_t block_size = 0;
mutable bool read_eof = false;
mutable bool at_end = false;
struct ReadBlock {
- int csize;
- int offset;
+ uint32_t csize;
+ uint64_t offset;
};
mutable Vector<uint8_t> comp_buffer;
uint8_t *read_ptr = nullptr;
- mutable int read_block = 0;
- int read_block_count = 0;
- mutable int read_block_size = 0;
- mutable int read_pos = 0;
+ mutable uint32_t read_block = 0;
+ uint32_t read_block_count = 0;
+ mutable uint32_t read_block_size = 0;
+ mutable uint64_t read_pos = 0;
Vector<ReadBlock> read_blocks;
- uint32_t read_total = 0;
+ uint64_t read_total = 0;
String magic = "GCMP";
mutable Vector<uint8_t> buffer;
FileAccess *f = nullptr;
public:
- void configure(const String &p_magic, Compression::Mode p_mode = Compression::MODE_ZSTD, int p_block_size = 4096);
+ void configure(const String &p_magic, Compression::Mode p_mode = Compression::MODE_ZSTD, uint32_t p_block_size = 4096);
Error open_after_magic(FileAccess *p_base);
@@ -72,15 +72,15 @@ public:
virtual void close(); ///< close a file
virtual bool is_open() const; ///< true when file is open
- virtual void seek(size_t p_position); ///< seek to a given position
+ virtual void seek(uint64_t p_position); ///< seek to a given position
virtual void seek_end(int64_t p_position = 0); ///< seek from the end of file
- virtual size_t get_position() const; ///< get position in the file
- virtual size_t get_len() const; ///< get size of the file
+ virtual uint64_t get_position() const; ///< get position in the file
+ virtual uint64_t get_len() const; ///< get size of the file
virtual bool eof_reached() const; ///< reading passed EOF
virtual uint8_t get_8() const; ///< get a byte
- virtual int get_buffer(uint8_t *p_dst, int p_length) const;
+ virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const;
virtual Error get_error() const; ///< get last error
diff --git a/core/io/file_access_encrypted.cpp b/core/io/file_access_encrypted.cpp
index 13377a3a25..9a6bee7348 100644
--- a/core/io/file_access_encrypted.cpp
+++ b/core/io/file_access_encrypted.cpp
@@ -70,13 +70,13 @@ Error FileAccessEncrypted::open_and_parse(FileAccess *p_base, const Vector<uint8
base = p_base->get_position();
ERR_FAIL_COND_V(p_base->get_len() < base + length, ERR_FILE_CORRUPT);
- uint32_t ds = length;
+ uint64_t ds = length;
if (ds % 16) {
ds += 16 - (ds % 16);
}
data.resize(ds);
- uint32_t blen = p_base->get_buffer(data.ptrw(), ds);
+ uint64_t blen = p_base->get_buffer(data.ptrw(), ds);
ERR_FAIL_COND_V(blen != ds, ERR_FILE_CORRUPT);
{
@@ -141,7 +141,7 @@ void FileAccessEncrypted::release() {
void FileAccessEncrypted::_release() {
if (writing) {
Vector<uint8_t> compressed;
- size_t len = data.size();
+ uint64_t len = data.size();
if (len % 16) {
len += 16 - (len % 16);
}
@@ -198,9 +198,9 @@ String FileAccessEncrypted::get_path_absolute() const {
}
}
-void FileAccessEncrypted::seek(size_t p_position) {
- if (p_position > (size_t)data.size()) {
- p_position = data.size();
+void FileAccessEncrypted::seek(uint64_t p_position) {
+ if (p_position > get_len()) {
+ p_position = get_len();
}
pos = p_position;
@@ -208,14 +208,14 @@ void FileAccessEncrypted::seek(size_t p_position) {
}
void FileAccessEncrypted::seek_end(int64_t p_position) {
- seek(data.size() + p_position);
+ seek(get_len() + p_position);
}
-size_t FileAccessEncrypted::get_position() const {
+uint64_t FileAccessEncrypted::get_position() const {
return pos;
}
-size_t FileAccessEncrypted::get_len() const {
+uint64_t FileAccessEncrypted::get_len() const {
return data.size();
}
@@ -225,7 +225,7 @@ bool FileAccessEncrypted::eof_reached() const {
uint8_t FileAccessEncrypted::get_8() const {
ERR_FAIL_COND_V_MSG(writing, 0, "File has not been opened in read mode.");
- if (pos >= data.size()) {
+ if (pos >= get_len()) {
eofed = true;
return 0;
}
@@ -235,13 +235,12 @@ uint8_t FileAccessEncrypted::get_8() const {
return b;
}
-int FileAccessEncrypted::get_buffer(uint8_t *p_dst, int p_length) const {
+uint64_t FileAccessEncrypted::get_buffer(uint8_t *p_dst, uint64_t p_length) const {
ERR_FAIL_COND_V(!p_dst && p_length > 0, -1);
- ERR_FAIL_COND_V(p_length < 0, -1);
ERR_FAIL_COND_V_MSG(writing, -1, "File has not been opened in read mode.");
- int to_copy = MIN(p_length, data.size() - pos);
- for (int i = 0; i < to_copy; i++) {
+ uint64_t to_copy = MIN(p_length, get_len() - pos);
+ for (uint64_t i = 0; i < to_copy; i++) {
p_dst[i] = data[pos++];
}
@@ -256,16 +255,16 @@ Error FileAccessEncrypted::get_error() const {
return eofed ? ERR_FILE_EOF : OK;
}
-void FileAccessEncrypted::store_buffer(const uint8_t *p_src, int p_length) {
+void FileAccessEncrypted::store_buffer(const uint8_t *p_src, uint64_t p_length) {
ERR_FAIL_COND_MSG(!writing, "File has not been opened in write mode.");
- if (pos < data.size()) {
- for (int i = 0; i < p_length; i++) {
+ if (pos < get_len()) {
+ for (uint64_t i = 0; i < p_length; i++) {
store_8(p_src[i]);
}
- } else if (pos == data.size()) {
+ } else if (pos == get_len()) {
data.resize(pos + p_length);
- for (int i = 0; i < p_length; i++) {
+ for (uint64_t i = 0; i < p_length; i++) {
data.write[pos + i] = p_src[i];
}
pos += p_length;
@@ -281,10 +280,10 @@ void FileAccessEncrypted::flush() {
void FileAccessEncrypted::store_8(uint8_t p_dest) {
ERR_FAIL_COND_MSG(!writing, "File has not been opened in write mode.");
- if (pos < data.size()) {
+ if (pos < get_len()) {
data.write[pos] = p_dest;
pos++;
- } else if (pos == data.size()) {
+ } else if (pos == get_len()) {
data.push_back(p_dest);
pos++;
}
diff --git a/core/io/file_access_encrypted.h b/core/io/file_access_encrypted.h
index 969052d04f..8bea8c2585 100644
--- a/core/io/file_access_encrypted.h
+++ b/core/io/file_access_encrypted.h
@@ -47,10 +47,10 @@ private:
Vector<uint8_t> key;
bool writing = false;
FileAccess *file = nullptr;
- size_t base = 0;
- size_t length = 0;
+ uint64_t base = 0;
+ uint64_t length = 0;
Vector<uint8_t> data;
- mutable int pos = 0;
+ mutable uint64_t pos = 0;
mutable bool eofed = false;
bool use_magic = true;
@@ -68,21 +68,21 @@ public:
virtual String get_path() const; /// returns the path for the current open file
virtual String get_path_absolute() const; /// returns the absolute path for the current open file
- virtual void seek(size_t p_position); ///< seek to a given position
+ virtual void seek(uint64_t p_position); ///< seek to a given position
virtual void seek_end(int64_t p_position = 0); ///< seek from the end of file
- virtual size_t get_position() const; ///< get position in the file
- virtual size_t get_len() const; ///< get size of the file
+ virtual uint64_t get_position() const; ///< get position in the file
+ virtual uint64_t get_len() const; ///< get size of the file
virtual bool eof_reached() const; ///< reading passed EOF
virtual uint8_t get_8() const; ///< get a byte
- virtual int get_buffer(uint8_t *p_dst, int p_length) const;
+ virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const;
virtual Error get_error() const; ///< get last error
virtual void flush();
virtual void store_8(uint8_t p_dest); ///< store a byte
- virtual void store_buffer(const uint8_t *p_src, int p_length); ///< store an array of bytes
+ virtual void store_buffer(const uint8_t *p_src, uint64_t p_length); ///< store an array of bytes
virtual bool file_exists(const String &p_name); ///< return true if a file exists
diff --git a/core/io/file_access_memory.cpp b/core/io/file_access_memory.cpp
index af155a77a8..14e24d6668 100644
--- a/core/io/file_access_memory.cpp
+++ b/core/io/file_access_memory.cpp
@@ -71,7 +71,7 @@ bool FileAccessMemory::file_exists(const String &p_name) {
return files && (files->find(name) != nullptr);
}
-Error FileAccessMemory::open_custom(const uint8_t *p_data, int p_len) {
+Error FileAccessMemory::open_custom(const uint8_t *p_data, uint64_t p_len) {
data = (uint8_t *)p_data;
length = p_len;
pos = 0;
@@ -102,7 +102,7 @@ bool FileAccessMemory::is_open() const {
return data != nullptr;
}
-void FileAccessMemory::seek(size_t p_position) {
+void FileAccessMemory::seek(uint64_t p_position) {
ERR_FAIL_COND(!data);
pos = p_position;
}
@@ -112,12 +112,12 @@ void FileAccessMemory::seek_end(int64_t p_position) {
pos = length + p_position;
}
-size_t FileAccessMemory::get_position() const {
+uint64_t FileAccessMemory::get_position() const {
ERR_FAIL_COND_V(!data, 0);
return pos;
}
-size_t FileAccessMemory::get_len() const {
+uint64_t FileAccessMemory::get_len() const {
ERR_FAIL_COND_V(!data, 0);
return length;
}
@@ -136,13 +136,12 @@ uint8_t FileAccessMemory::get_8() const {
return ret;
}
-int FileAccessMemory::get_buffer(uint8_t *p_dst, int p_length) const {
+uint64_t FileAccessMemory::get_buffer(uint8_t *p_dst, uint64_t p_length) const {
ERR_FAIL_COND_V(!p_dst && p_length > 0, -1);
- ERR_FAIL_COND_V(p_length < 0, -1);
ERR_FAIL_COND_V(!data, -1);
- int left = length - pos;
- int read = MIN(p_length, left);
+ uint64_t left = length - pos;
+ uint64_t read = MIN(p_length, left);
if (read < p_length) {
WARN_PRINT("Reading less data than requested");
@@ -168,9 +167,9 @@ void FileAccessMemory::store_8(uint8_t p_byte) {
data[pos++] = p_byte;
}
-void FileAccessMemory::store_buffer(const uint8_t *p_src, int p_length) {
- int left = length - pos;
- int write = MIN(p_length, left);
+void FileAccessMemory::store_buffer(const uint8_t *p_src, uint64_t p_length) {
+ uint64_t left = length - pos;
+ uint64_t write = MIN(p_length, left);
if (write < p_length) {
WARN_PRINT("Writing less data than requested");
}
diff --git a/core/io/file_access_memory.h b/core/io/file_access_memory.h
index 0e3b0ad7b1..cc589dc259 100644
--- a/core/io/file_access_memory.h
+++ b/core/io/file_access_memory.h
@@ -35,8 +35,8 @@
class FileAccessMemory : public FileAccess {
uint8_t *data = nullptr;
- int length = 0;
- mutable int pos = 0;
+ uint64_t length = 0;
+ mutable uint64_t pos = 0;
static FileAccess *create();
@@ -44,27 +44,27 @@ public:
static void register_file(String p_name, Vector<uint8_t> p_data);
static void cleanup();
- virtual Error open_custom(const uint8_t *p_data, int p_len); ///< open a file
+ virtual Error open_custom(const uint8_t *p_data, uint64_t p_len); ///< open a file
virtual Error _open(const String &p_path, int p_mode_flags); ///< open a file
virtual void close(); ///< close a file
virtual bool is_open() const; ///< true when file is open
- virtual void seek(size_t p_position); ///< seek to a given position
+ virtual void seek(uint64_t p_position); ///< seek to a given position
virtual void seek_end(int64_t p_position); ///< seek from the end of file
- virtual size_t get_position() const; ///< get position in the file
- virtual size_t get_len() const; ///< get size of the file
+ virtual uint64_t get_position() const; ///< get position in the file
+ virtual uint64_t get_len() const; ///< get size of the file
virtual bool eof_reached() const; ///< reading passed EOF
virtual uint8_t get_8() const; ///< get a byte
- virtual int get_buffer(uint8_t *p_dst, int p_length) const; ///< get an array of bytes
+ virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const; ///< get an array of bytes
virtual Error get_error() const; ///< get last error
virtual void flush();
virtual void store_8(uint8_t p_byte); ///< store a byte
- virtual void store_buffer(const uint8_t *p_src, int p_length); ///< store an array of bytes
+ virtual void store_buffer(const uint8_t *p_src, uint64_t p_length); ///< store an array of bytes
virtual bool file_exists(const String &p_name); ///< return true if a file exists
diff --git a/core/io/file_access_network.cpp b/core/io/file_access_network.cpp
index 4cc73bcd22..dedd5523ed 100644
--- a/core/io/file_access_network.cpp
+++ b/core/io/file_access_network.cpp
@@ -126,7 +126,7 @@ void FileAccessNetworkClient::_thread_func() {
if (status != OK) {
fa->_respond(0, Error(status));
} else {
- uint64_t len = get_64();
+ int64_t len = get_64();
fa->_respond(len, Error(status));
}
@@ -135,7 +135,7 @@ void FileAccessNetworkClient::_thread_func() {
} break;
case FileAccessNetwork::RESPONSE_DATA: {
int64_t offset = get_64();
- uint32_t len = get_32();
+ int32_t len = get_32();
Vector<uint8_t> block;
block.resize(len);
@@ -219,13 +219,13 @@ FileAccessNetworkClient::~FileAccessNetworkClient() {
thread.wait_to_finish();
}
-void FileAccessNetwork::_set_block(int p_offset, const Vector<uint8_t> &p_block) {
- int page = p_offset / page_size;
+void FileAccessNetwork::_set_block(uint64_t p_offset, const Vector<uint8_t> &p_block) {
+ int32_t page = p_offset / page_size;
ERR_FAIL_INDEX(page, pages.size());
if (page < pages.size() - 1) {
ERR_FAIL_COND(p_block.size() != page_size);
} else {
- ERR_FAIL_COND((p_block.size() != (int)(total_size % page_size)));
+ ERR_FAIL_COND((uint64_t)p_block.size() != total_size % page_size);
}
{
@@ -240,7 +240,7 @@ void FileAccessNetwork::_set_block(int p_offset, const Vector<uint8_t> &p_block)
}
}
-void FileAccessNetwork::_respond(size_t p_len, Error p_status) {
+void FileAccessNetwork::_respond(uint64_t p_len, Error p_status) {
DEBUG_PRINT("GOT RESPONSE - len: " + itos(p_len) + " status: " + itos(p_status));
response = p_status;
if (response != OK) {
@@ -248,7 +248,7 @@ void FileAccessNetwork::_respond(size_t p_len, Error p_status) {
}
opened = true;
total_size = p_len;
- int pc = ((total_size - 1) / page_size) + 1;
+ int32_t pc = ((total_size - 1) / page_size) + 1;
pages.resize(pc);
}
@@ -307,8 +307,9 @@ bool FileAccessNetwork::is_open() const {
return opened;
}
-void FileAccessNetwork::seek(size_t p_position) {
+void FileAccessNetwork::seek(uint64_t p_position) {
ERR_FAIL_COND_MSG(!opened, "File must be opened before use.");
+
eof_flag = p_position > total_size;
if (p_position >= total_size) {
@@ -322,12 +323,12 @@ void FileAccessNetwork::seek_end(int64_t p_position) {
seek(total_size + p_position);
}
-size_t FileAccessNetwork::get_position() const {
+uint64_t FileAccessNetwork::get_position() const {
ERR_FAIL_COND_V_MSG(!opened, 0, "File must be opened before use.");
return pos;
}
-size_t FileAccessNetwork::get_len() const {
+uint64_t FileAccessNetwork::get_len() const {
ERR_FAIL_COND_V_MSG(!opened, 0, "File must be opened before use.");
return total_size;
}
@@ -343,7 +344,7 @@ uint8_t FileAccessNetwork::get_8() const {
return v;
}
-void FileAccessNetwork::_queue_page(int p_page) const {
+void FileAccessNetwork::_queue_page(int32_t p_page) const {
if (p_page >= pages.size()) {
return;
}
@@ -354,7 +355,7 @@ void FileAccessNetwork::_queue_page(int p_page) const {
FileAccessNetworkClient::BlockRequest br;
br.id = id;
- br.offset = size_t(p_page) * page_size;
+ br.offset = (uint64_t)p_page * page_size;
br.size = page_size;
nc->block_requests.push_back(br);
pages.write[p_page].queued = true;
@@ -365,11 +366,9 @@ void FileAccessNetwork::_queue_page(int p_page) const {
}
}
-int FileAccessNetwork::get_buffer(uint8_t *p_dst, int p_length) const {
+uint64_t FileAccessNetwork::get_buffer(uint8_t *p_dst, uint64_t p_length) const {
ERR_FAIL_COND_V(!p_dst && p_length > 0, -1);
- ERR_FAIL_COND_V(p_length < 0, -1);
- //bool eof=false;
if (pos + p_length > total_size) {
eof_flag = true;
}
@@ -377,18 +376,16 @@ int FileAccessNetwork::get_buffer(uint8_t *p_dst, int p_length) const {
p_length = total_size - pos;
}
- //FileAccessNetworkClient *nc = FileAccessNetworkClient::singleton;
-
uint8_t *buff = last_page_buff;
- for (int i = 0; i < p_length; i++) {
- int page = pos / page_size;
+ for (uint64_t i = 0; i < p_length; i++) {
+ int32_t page = pos / page_size;
if (page != last_page) {
buffer_mutex.lock();
if (pages[page].buffer.is_empty()) {
waiting_on_page = page;
- for (int j = 0; j < read_ahead; j++) {
+ for (int32_t j = 0; j < read_ahead; j++) {
_queue_page(page + j);
}
buffer_mutex.unlock();
@@ -396,10 +393,9 @@ int FileAccessNetwork::get_buffer(uint8_t *p_dst, int p_length) const {
page_sem.wait();
DEBUG_PRINT("done");
} else {
- for (int j = 0; j < read_ahead; j++) {
+ for (int32_t j = 0; j < read_ahead; j++) {
_queue_page(page + j);
}
- //queue pages
buffer_mutex.unlock();
}
diff --git a/core/io/file_access_network.h b/core/io/file_access_network.h
index 1f5de3e5dd..4810cca195 100644
--- a/core/io/file_access_network.h
+++ b/core/io/file_access_network.h
@@ -40,9 +40,9 @@ class FileAccessNetwork;
class FileAccessNetworkClient {
struct BlockRequest {
- int id;
+ int32_t id;
uint64_t offset;
- int size;
+ int32_t size;
};
List<BlockRequest> block_requests;
@@ -54,17 +54,17 @@ class FileAccessNetworkClient {
Mutex blockrequest_mutex;
Map<int, FileAccessNetwork *> accesses;
Ref<StreamPeerTCP> client;
- int last_id = 0;
- int lockcount = 0;
+ int32_t last_id = 0;
+ int32_t lockcount = 0;
Vector<uint8_t> block;
void _thread_func();
static void _thread_func(void *s);
- void put_32(int p_32);
+ void put_32(int32_t p_32);
void put_64(int64_t p_64);
- int get_32();
+ int32_t get_32();
int64_t get_64();
void lock_mutex();
void unlock_mutex();
@@ -86,15 +86,15 @@ class FileAccessNetwork : public FileAccess {
Semaphore page_sem;
Mutex buffer_mutex;
bool opened = false;
- size_t total_size;
- mutable size_t pos = 0;
- int id;
+ uint64_t total_size;
+ mutable uint64_t pos = 0;
+ int32_t id;
mutable bool eof_flag = false;
- mutable int last_page = -1;
+ mutable int32_t last_page = -1;
mutable uint8_t *last_page_buff = nullptr;
- int page_size;
- int read_ahead;
+ int32_t page_size;
+ int32_t read_ahead;
mutable int waiting_on_page = -1;
@@ -110,9 +110,9 @@ class FileAccessNetwork : public FileAccess {
uint64_t exists_modtime;
friend class FileAccessNetworkClient;
- void _queue_page(int p_page) const;
- void _respond(size_t p_len, Error p_status);
- void _set_block(int p_offset, const Vector<uint8_t> &p_block);
+ void _queue_page(int32_t p_page) const;
+ void _respond(uint64_t p_len, Error p_status);
+ void _set_block(uint64_t p_offset, const Vector<uint8_t> &p_block);
public:
enum Command {
@@ -134,15 +134,15 @@ public:
virtual void close(); ///< close a file
virtual bool is_open() const; ///< true when file is open
- virtual void seek(size_t p_position); ///< seek to a given position
+ virtual void seek(uint64_t p_position); ///< seek to a given position
virtual void seek_end(int64_t p_position = 0); ///< seek from the end of file
- virtual size_t get_position() const; ///< get position in the file
- virtual size_t get_len() const; ///< get size of the file
+ virtual uint64_t get_position() const; ///< get position in the file
+ virtual uint64_t get_len() const; ///< get size of the file
virtual bool eof_reached() const; ///< reading passed EOF
virtual uint8_t get_8() const; ///< get a byte
- virtual int get_buffer(uint8_t *p_dst, int p_length) const;
+ virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const;
virtual Error get_error() const; ///< get last error
diff --git a/core/io/file_access_pack.cpp b/core/io/file_access_pack.cpp
index e24dc40166..3e1c51b733 100644
--- a/core/io/file_access_pack.cpp
+++ b/core/io/file_access_pack.cpp
@@ -36,7 +36,7 @@
#include <stdio.h>
-Error PackedData::add_pack(const String &p_path, bool p_replace_files, size_t p_offset) {
+Error PackedData::add_pack(const String &p_path, bool p_replace_files, uint64_t p_offset) {
for (int i = 0; i < sources.size(); i++) {
if (sources[i]->try_open_pack(p_path, p_replace_files, p_offset)) {
return OK;
@@ -46,17 +46,16 @@ Error PackedData::add_pack(const String &p_path, bool p_replace_files, size_t p_
return ERR_FILE_UNRECOGNIZED;
}
-void PackedData::add_path(const String &pkg_path, const String &path, uint64_t ofs, uint64_t size, const uint8_t *p_md5, PackSource *p_src, bool p_replace_files, bool p_encrypted) {
- PathMD5 pmd5(path.md5_buffer());
- //printf("adding path %s, %lli, %lli\n", path.utf8().get_data(), pmd5.a, pmd5.b);
+void PackedData::add_path(const String &p_pkg_path, const String &p_path, uint64_t p_ofs, uint64_t p_size, const uint8_t *p_md5, PackSource *p_src, bool p_replace_files, bool p_encrypted) {
+ PathMD5 pmd5(p_path.md5_buffer());
bool exists = files.has(pmd5);
PackedFile pf;
pf.encrypted = p_encrypted;
- pf.pack = pkg_path;
- pf.offset = ofs;
- pf.size = size;
+ pf.pack = p_pkg_path;
+ pf.offset = p_ofs;
+ pf.size = p_size;
for (int i = 0; i < 16; i++) {
pf.md5[i] = p_md5[i];
}
@@ -68,7 +67,7 @@ void PackedData::add_path(const String &pkg_path, const String &path, uint64_t o
if (!exists) {
//search for dir
- String p = path.replace_first("res://", "");
+ String p = p_path.replace_first("res://", "");
PackedDir *cd = root;
if (p.find("/") != -1) { //in a subdir
@@ -87,7 +86,7 @@ void PackedData::add_path(const String &pkg_path, const String &path, uint64_t o
}
}
}
- String filename = path.get_file();
+ String filename = p_path.get_file();
// Don't add as a file if the path points to a directory
if (!filename.is_empty()) {
cd->files.insert(filename);
@@ -126,7 +125,7 @@ PackedData::~PackedData() {
//////////////////////////////////////////////////////////////////
-bool PackedSourcePCK::try_open_pack(const String &p_path, bool p_replace_files, size_t p_offset) {
+bool PackedSourcePCK::try_open_pack(const String &p_path, bool p_replace_files, uint64_t p_offset) {
FileAccess *f = FileAccess::open(p_path, FileAccess::READ);
if (!f) {
return false;
@@ -261,7 +260,7 @@ bool FileAccessPack::is_open() const {
return f->is_open();
}
-void FileAccessPack::seek(size_t p_position) {
+void FileAccessPack::seek(uint64_t p_position) {
if (p_position > pf.size) {
eof = true;
} else {
@@ -276,11 +275,11 @@ void FileAccessPack::seek_end(int64_t p_position) {
seek(pf.size + p_position);
}
-size_t FileAccessPack::get_position() const {
+uint64_t FileAccessPack::get_position() const {
return pos;
}
-size_t FileAccessPack::get_len() const {
+uint64_t FileAccessPack::get_len() const {
return pf.size;
}
@@ -298,18 +297,17 @@ uint8_t FileAccessPack::get_8() const {
return f->get_8();
}
-int FileAccessPack::get_buffer(uint8_t *p_dst, int p_length) const {
+uint64_t FileAccessPack::get_buffer(uint8_t *p_dst, uint64_t p_length) const {
ERR_FAIL_COND_V(!p_dst && p_length > 0, -1);
- ERR_FAIL_COND_V(p_length < 0, -1);
if (eof) {
return 0;
}
- uint64_t to_read = p_length;
+ int64_t to_read = p_length;
if (to_read + pos > pf.size) {
eof = true;
- to_read = int64_t(pf.size) - int64_t(pos);
+ to_read = (int64_t)pf.size - (int64_t)pos;
}
pos += p_length;
@@ -342,7 +340,7 @@ void FileAccessPack::store_8(uint8_t p_dest) {
ERR_FAIL();
}
-void FileAccessPack::store_buffer(const uint8_t *p_src, int p_length) {
+void FileAccessPack::store_buffer(const uint8_t *p_src, uint64_t p_length) {
ERR_FAIL();
}
@@ -549,7 +547,7 @@ Error DirAccessPack::remove(String p_name) {
return ERR_UNAVAILABLE;
}
-size_t DirAccessPack::get_space_left() {
+uint64_t DirAccessPack::get_space_left() {
return 0;
}
diff --git a/core/io/file_access_pack.h b/core/io/file_access_pack.h
index 955108f455..e47c9ea543 100644
--- a/core/io/file_access_pack.h
+++ b/core/io/file_access_pack.h
@@ -112,13 +112,13 @@ private:
public:
void add_pack_source(PackSource *p_source);
- void add_path(const String &pkg_path, const String &path, uint64_t ofs, uint64_t size, const uint8_t *p_md5, PackSource *p_src, bool p_replace_files, bool p_encrypted = false); // for PackSource
+ void add_path(const String &p_pkg_path, const String &p_path, uint64_t p_ofs, uint64_t p_size, const uint8_t *p_md5, PackSource *p_src, bool p_replace_files, bool p_encrypted = false); // for PackSource
void set_disabled(bool p_disabled) { disabled = p_disabled; }
_FORCE_INLINE_ bool is_disabled() const { return disabled; }
static PackedData *get_singleton() { return singleton; }
- Error add_pack(const String &p_path, bool p_replace_files, size_t p_offset);
+ Error add_pack(const String &p_path, bool p_replace_files, uint64_t p_offset);
_FORCE_INLINE_ FileAccess *try_open_path(const String &p_path);
_FORCE_INLINE_ bool has_path(const String &p_path);
@@ -132,21 +132,21 @@ public:
class PackSource {
public:
- virtual bool try_open_pack(const String &p_path, bool p_replace_files, size_t p_offset) = 0;
+ virtual bool try_open_pack(const String &p_path, bool p_replace_files, uint64_t p_offset) = 0;
virtual FileAccess *get_file(const String &p_path, PackedData::PackedFile *p_file) = 0;
virtual ~PackSource() {}
};
class PackedSourcePCK : public PackSource {
public:
- virtual bool try_open_pack(const String &p_path, bool p_replace_files, size_t p_offset);
+ virtual bool try_open_pack(const String &p_path, bool p_replace_files, uint64_t p_offset);
virtual FileAccess *get_file(const String &p_path, PackedData::PackedFile *p_file);
};
class FileAccessPack : public FileAccess {
PackedData::PackedFile pf;
- mutable size_t pos;
+ mutable uint64_t pos;
mutable bool eof;
uint64_t off;
@@ -160,16 +160,16 @@ public:
virtual void close();
virtual bool is_open() const;
- virtual void seek(size_t p_position);
+ virtual void seek(uint64_t p_position);
virtual void seek_end(int64_t p_position = 0);
- virtual size_t get_position() const;
- virtual size_t get_len() const;
+ virtual uint64_t get_position() const;
+ virtual uint64_t get_len() const;
virtual bool eof_reached() const;
virtual uint8_t get_8() const;
- virtual int get_buffer(uint8_t *p_dst, int p_length) const;
+ virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const;
virtual void set_endian_swap(bool p_swap);
@@ -178,7 +178,7 @@ public:
virtual void flush();
virtual void store_8(uint8_t p_dest);
- virtual void store_buffer(const uint8_t *p_src, int p_length);
+ virtual void store_buffer(const uint8_t *p_src, uint64_t p_length);
virtual bool file_exists(const String &p_name);
@@ -243,7 +243,7 @@ public:
virtual Error rename(String p_from, String p_to);
virtual Error remove(String p_name);
- size_t get_space_left();
+ uint64_t get_space_left();
virtual String get_filesystem_type() const;
diff --git a/core/io/file_access_zip.cpp b/core/io/file_access_zip.cpp
index 397b577612..304e24ee90 100644
--- a/core/io/file_access_zip.cpp
+++ b/core/io/file_access_zip.cpp
@@ -43,14 +43,14 @@ static void *godot_open(void *data, const char *p_fname, int mode) {
return nullptr;
}
- FileAccess *f = (FileAccess *)data;
- f->open(p_fname, FileAccess::READ);
+ FileAccess *f = FileAccess::open(p_fname, FileAccess::READ);
+ ERR_FAIL_COND_V(!f, nullptr);
- return f->is_open() ? data : nullptr;
+ return f;
}
static uLong godot_read(void *data, void *fdata, void *buf, uLong size) {
- FileAccess *f = (FileAccess *)data;
+ FileAccess *f = (FileAccess *)fdata;
f->get_buffer((uint8_t *)buf, size);
return size;
}
@@ -60,14 +60,14 @@ static uLong godot_write(voidpf opaque, voidpf stream, const void *buf, uLong si
}
static long godot_tell(voidpf opaque, voidpf stream) {
- FileAccess *f = (FileAccess *)opaque;
+ FileAccess *f = (FileAccess *)stream;
return f->get_position();
}
static long godot_seek(voidpf opaque, voidpf stream, uLong offset, int origin) {
- FileAccess *f = (FileAccess *)opaque;
+ FileAccess *f = (FileAccess *)stream;
- int pos = offset;
+ uint64_t pos = offset;
switch (origin) {
case ZLIB_FILEFUNC_SEEK_CUR:
pos = f->get_position() + offset;
@@ -84,13 +84,17 @@ static long godot_seek(voidpf opaque, voidpf stream, uLong offset, int origin) {
}
static int godot_close(voidpf opaque, voidpf stream) {
- FileAccess *f = (FileAccess *)opaque;
- f->close();
+ FileAccess *f = (FileAccess *)stream;
+ if (f) {
+ f->close();
+ memdelete(f);
+ f = nullptr;
+ }
return 0;
}
static int godot_testerror(voidpf opaque, voidpf stream) {
- FileAccess *f = (FileAccess *)opaque;
+ FileAccess *f = (FileAccess *)stream;
return f->get_error() != OK ? 1 : 0;
}
@@ -105,23 +109,18 @@ static void godot_free(voidpf opaque, voidpf address) {
void ZipArchive::close_handle(unzFile p_file) const {
ERR_FAIL_COND_MSG(!p_file, "Cannot close a file if none is open.");
- FileAccess *f = (FileAccess *)unzGetOpaque(p_file);
unzCloseCurrentFile(p_file);
unzClose(p_file);
- memdelete(f);
}
unzFile ZipArchive::get_file_handle(String p_file) const {
ERR_FAIL_COND_V_MSG(!file_exists(p_file), nullptr, "File '" + p_file + " doesn't exist.");
File file = files[p_file];
- FileAccess *f = FileAccess::open(packages[file.package].filename, FileAccess::READ);
- ERR_FAIL_COND_V_MSG(!f, nullptr, "Cannot open file '" + packages[file.package].filename + "'.");
-
zlib_filefunc_def io;
memset(&io, 0, sizeof(io));
- io.opaque = f;
+ io.opaque = nullptr;
io.zopen_file = godot_open;
io.zread_file = godot_read;
io.zwrite_file = godot_write;
@@ -135,7 +134,7 @@ unzFile ZipArchive::get_file_handle(String p_file) const {
io.free_mem = godot_free;
unzFile pkg = unzOpen2(packages[file.package].filename.utf8().get_data(), &io);
- ERR_FAIL_COND_V(!pkg, nullptr);
+ ERR_FAIL_COND_V_MSG(!pkg, nullptr, "Cannot open file '" + packages[file.package].filename + "'.");
int unz_err = unzGoToFilePos(pkg, &file.file_pos);
if (unz_err != UNZ_OK || unzOpenCurrentFile(pkg) != UNZ_OK) {
unzClose(pkg);
@@ -145,8 +144,7 @@ unzFile ZipArchive::get_file_handle(String p_file) const {
return pkg;
}
-bool ZipArchive::try_open_pack(const String &p_path, bool p_replace_files, size_t p_offset = 0) {
- //printf("opening zip pack %s, %i, %i\n", p_name.utf8().get_data(), p_name.extension().nocasecmp_to("zip"), p_name.extension().nocasecmp_to("pcz"));
+bool ZipArchive::try_open_pack(const String &p_path, bool p_replace_files, uint64_t p_offset = 0) {
// load with offset feature only supported for PCK files
ERR_FAIL_COND_V_MSG(p_offset != 0, false, "Invalid PCK data. Note that loading files with a non-zero offset isn't supported with ZIP archives.");
@@ -155,12 +153,9 @@ bool ZipArchive::try_open_pack(const String &p_path, bool p_replace_files, size_
}
zlib_filefunc_def io;
+ memset(&io, 0, sizeof(io));
- FileAccess *fa = FileAccess::open(p_path, FileAccess::READ);
- if (!fa) {
- return false;
- }
- io.opaque = fa;
+ io.opaque = nullptr;
io.zopen_file = godot_open;
io.zread_file = godot_read;
io.zwrite_file = godot_write;
@@ -269,8 +264,9 @@ bool FileAccessZip::is_open() const {
return zfile != nullptr;
}
-void FileAccessZip::seek(size_t p_position) {
+void FileAccessZip::seek(uint64_t p_position) {
ERR_FAIL_COND(!zfile);
+
unzSeekCurrentFile(zfile, p_position);
}
@@ -279,12 +275,12 @@ void FileAccessZip::seek_end(int64_t p_position) {
unzSeekCurrentFile(zfile, get_len() + p_position);
}
-size_t FileAccessZip::get_position() const {
+uint64_t FileAccessZip::get_position() const {
ERR_FAIL_COND_V(!zfile, 0);
return unztell(zfile);
}
-size_t FileAccessZip::get_len() const {
+uint64_t FileAccessZip::get_len() const {
ERR_FAIL_COND_V(!zfile, 0);
return file_info.uncompressed_size;
}
@@ -301,17 +297,17 @@ uint8_t FileAccessZip::get_8() const {
return ret;
}
-int FileAccessZip::get_buffer(uint8_t *p_dst, int p_length) const {
+uint64_t FileAccessZip::get_buffer(uint8_t *p_dst, uint64_t p_length) const {
ERR_FAIL_COND_V(!p_dst && p_length > 0, -1);
- ERR_FAIL_COND_V(p_length < 0, -1);
ERR_FAIL_COND_V(!zfile, -1);
+
at_eof = unzeof(zfile);
if (at_eof) {
return 0;
}
- int read = unzReadCurrentFile(zfile, p_dst, p_length);
+ int64_t read = unzReadCurrentFile(zfile, p_dst, p_length);
ERR_FAIL_COND_V(read < 0, read);
- if (read < p_length) {
+ if ((uint64_t)read < p_length) {
at_eof = true;
}
return read;
diff --git a/core/io/file_access_zip.h b/core/io/file_access_zip.h
index 8559f871ce..91bdaafb68 100644
--- a/core/io/file_access_zip.h
+++ b/core/io/file_access_zip.h
@@ -67,7 +67,7 @@ public:
bool file_exists(String p_name) const;
- virtual bool try_open_pack(const String &p_path, bool p_replace_files, size_t p_offset);
+ virtual bool try_open_pack(const String &p_path, bool p_replace_files, uint64_t p_offset);
FileAccess *get_file(const String &p_path, PackedData::PackedFile *p_file);
static ZipArchive *get_singleton();
@@ -87,20 +87,21 @@ public:
virtual void close(); ///< close a file
virtual bool is_open() const; ///< true when file is open
- virtual void seek(size_t p_position); ///< seek to a given position
+ virtual void seek(uint64_t p_position); ///< seek to a given position
virtual void seek_end(int64_t p_position = 0); ///< seek from the end of file
- virtual size_t get_position() const; ///< get position in the file
- virtual size_t get_len() const; ///< get size of the file
+ virtual uint64_t get_position() const; ///< get position in the file
+ virtual uint64_t get_len() const; ///< get size of the file
virtual bool eof_reached() const; ///< reading passed EOF
virtual uint8_t get_8() const; ///< get a byte
- virtual int get_buffer(uint8_t *p_dst, int p_length) const;
+ virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const;
virtual Error get_error() const; ///< get last error
virtual void flush();
virtual void store_8(uint8_t p_dest); ///< store a byte
+
virtual bool file_exists(const String &p_name); ///< return true if a file exists
virtual uint64_t _get_modified_time(const String &p_file) { return 0; } // todo
diff --git a/core/io/pck_packer.cpp b/core/io/pck_packer.cpp
index a0697ca18b..4fe22e57d8 100644
--- a/core/io/pck_packer.cpp
+++ b/core/io/pck_packer.cpp
@@ -236,7 +236,7 @@ Error PCKPacker::flush(bool p_verbose) {
}
while (to_write > 0) {
- int read = src->get_buffer(buf, MIN(to_write, buf_max));
+ uint64_t read = src->get_buffer(buf, MIN(to_write, buf_max));
ftmp->store_buffer(buf, read);
to_write -= read;
}
diff --git a/core/io/resource_format_binary.cpp b/core/io/resource_format_binary.cpp
index c4eb2a20bb..50c9b2371a 100644
--- a/core/io/resource_format_binary.cpp
+++ b/core/io/resource_format_binary.cpp
@@ -1157,8 +1157,8 @@ Error ResourceFormatLoaderBinary::rename_dependencies(const String &p_path, cons
save_ustring(fw, get_ustring(f)); //type
- size_t md_ofs = f->get_position();
- size_t importmd_ofs = f->get_64();
+ uint64_t md_ofs = f->get_position();
+ uint64_t importmd_ofs = f->get_64();
fw->store_64(0); //metadata offset
for (int i = 0; i < 14; i++) {
diff --git a/core/io/zip_io.cpp b/core/io/zip_io.cpp
index fe46868dd0..e0e491dc85 100644
--- a/core/io/zip_io.cpp
+++ b/core/io/zip_io.cpp
@@ -68,7 +68,7 @@ long zipio_tell(voidpf opaque, voidpf stream) {
long zipio_seek(voidpf opaque, voidpf stream, uLong offset, int origin) {
FileAccess *f = *(FileAccess **)opaque;
- int pos = offset;
+ uint64_t pos = offset;
switch (origin) {
case ZLIB_FILEFUNC_SEEK_CUR:
pos = f->get_position() + offset;
diff --git a/core/math/bvh.h b/core/math/bvh.h
new file mode 100644
index 0000000000..cefbc9b0db
--- /dev/null
+++ b/core/math/bvh.h
@@ -0,0 +1,695 @@
+/*************************************************************************/
+/* bvh.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef BVH_H
+#define BVH_H
+
+// BVH
+// This class provides a wrapper around BVH tree, which contains most of the functionality
+// for a dynamic BVH with templated leaf size.
+// However BVH also adds facilities for pairing, to maintain compatibility with Godot 3.2.
+// Pairing is a collision pairing system, on top of the basic BVH.
+
+// Some notes on the use of BVH / Octree from Godot 3.2.
+// This is not well explained elsewhere.
+// The rendering tree mask and types that are sent to the BVH are NOT layer masks.
+// They are INSTANCE_TYPES (defined in visual_server.h), e.g. MESH, MULTIMESH, PARTICLES etc.
+// Thus the lights do no cull by layer mask in the BVH.
+
+// Layer masks are implemented in the renderers as a later step, and light_cull_mask appears to be
+// implemented in GLES3 but not GLES2. Layer masks are not yet implemented for directional lights.
+
+#include "bvh_tree.h"
+
+#define BVHTREE_CLASS BVH_Tree<T, 2, MAX_ITEMS, USE_PAIRS, Bounds, Point>
+
+template <class T, bool USE_PAIRS = false, int MAX_ITEMS = 32, class Bounds = AABB, class Point = Vector3>
+class BVH_Manager {
+public:
+ // note we are using uint32_t instead of BVHHandle, losing type safety, but this
+ // is for compatibility with octree
+ typedef void *(*PairCallback)(void *, uint32_t, T *, int, uint32_t, T *, int);
+ typedef void (*UnpairCallback)(void *, uint32_t, T *, int, uint32_t, T *, int, void *);
+
+ // these 2 are crucial for fine tuning, and can be applied manually
+ // see the variable declarations for more info.
+ void params_set_node_expansion(real_t p_value) {
+ if (p_value >= 0.0) {
+ tree._node_expansion = p_value;
+ tree._auto_node_expansion = false;
+ } else {
+ tree._auto_node_expansion = true;
+ }
+ }
+
+ void params_set_pairing_expansion(real_t p_value) {
+ if (p_value >= 0.0) {
+ tree._pairing_expansion = p_value;
+ tree._auto_pairing_expansion = false;
+ } else {
+ tree._auto_pairing_expansion = true;
+ }
+ }
+
+ void set_pair_callback(PairCallback p_callback, void *p_userdata) {
+ pair_callback = p_callback;
+ pair_callback_userdata = p_userdata;
+ }
+ void set_unpair_callback(UnpairCallback p_callback, void *p_userdata) {
+ unpair_callback = p_callback;
+ unpair_callback_userdata = p_userdata;
+ }
+
+ BVHHandle create(T *p_userdata, bool p_active, const Bounds &p_aabb = Bounds(), int p_subindex = 0, bool p_pairable = false, uint32_t p_pairable_type = 0, uint32_t p_pairable_mask = 1) {
+ // not sure if absolutely necessary to flush collisions here. It will cost performance to, instead
+ // of waiting for update, so only uncomment this if there are bugs.
+ if (USE_PAIRS) {
+ //_check_for_collisions();
+ }
+
+#ifdef TOOLS_ENABLED
+ if (!USE_PAIRS) {
+ if (p_pairable) {
+ WARN_PRINT_ONCE("creating pairable item in BVH with USE_PAIRS set to false");
+ }
+ }
+#endif
+
+ BVHHandle h = tree.item_add(p_userdata, p_active, p_aabb, p_subindex, p_pairable, p_pairable_type, p_pairable_mask);
+
+ if (USE_PAIRS) {
+ // for safety initialize the expanded AABB
+ Bounds &expanded_aabb = tree._pairs[h.id()].expanded_aabb;
+ expanded_aabb = p_aabb;
+ expanded_aabb.grow_by(tree._pairing_expansion);
+
+ // force a collision check no matter the AABB
+ if (p_active) {
+ _add_changed_item(h, p_aabb, false);
+ _check_for_collisions(true);
+ }
+ }
+
+ return h;
+ }
+
+ ////////////////////////////////////////////////////
+ // wrapper versions that use uint32_t instead of handle
+ // for backward compatibility. Less type safe
+ void move(uint32_t p_handle, const Bounds &p_aabb) {
+ BVHHandle h;
+ h.set(p_handle);
+ move(h, p_aabb);
+ }
+
+ void erase(uint32_t p_handle) {
+ BVHHandle h;
+ h.set(p_handle);
+ erase(h);
+ }
+
+ void force_collision_check(uint32_t p_handle) {
+ BVHHandle h;
+ h.set(p_handle);
+ force_collision_check(h);
+ }
+
+ bool activate(uint32_t p_handle, const Bounds &p_aabb, bool p_delay_collision_check = false) {
+ BVHHandle h;
+ h.set(p_handle);
+ return activate(h, p_aabb, p_delay_collision_check);
+ }
+
+ bool deactivate(uint32_t p_handle) {
+ BVHHandle h;
+ h.set(p_handle);
+ return deactivate(h);
+ }
+
+ void set_pairable(uint32_t p_handle, bool p_pairable, uint32_t p_pairable_type, uint32_t p_pairable_mask, bool p_force_collision_check = true) {
+ BVHHandle h;
+ h.set(p_handle);
+ set_pairable(h, p_pairable, p_pairable_type, p_pairable_mask, p_force_collision_check);
+ }
+
+ bool is_pairable(uint32_t p_handle) const {
+ BVHHandle h;
+ h.set(p_handle);
+ return item_is_pairable(h);
+ }
+ int get_subindex(uint32_t p_handle) const {
+ BVHHandle h;
+ h.set(p_handle);
+ return item_get_subindex(h);
+ }
+
+ T *get(uint32_t p_handle) const {
+ BVHHandle h;
+ h.set(p_handle);
+ return item_get_userdata(h);
+ }
+
+ ////////////////////////////////////////////////////
+
+ void move(BVHHandle p_handle, const Bounds &p_aabb) {
+ if (tree.item_move(p_handle, p_aabb)) {
+ if (USE_PAIRS) {
+ _add_changed_item(p_handle, p_aabb);
+ }
+ }
+ }
+
+ void erase(BVHHandle p_handle) {
+ // call unpair and remove all references to the item
+ // before deleting from the tree
+ if (USE_PAIRS) {
+ _remove_changed_item(p_handle);
+ }
+
+ tree.item_remove(p_handle);
+
+ _check_for_collisions(true);
+ }
+
+ // use in conjunction with activate if you have deferred the collision check, and
+ // set pairable has never been called.
+ // (deferred collision checks are a workaround for visual server for historical reasons)
+ void force_collision_check(BVHHandle p_handle) {
+ if (USE_PAIRS) {
+ // the aabb should already be up to date in the BVH
+ Bounds aabb;
+ item_get_AABB(p_handle, aabb);
+
+ // add it as changed even if aabb not different
+ _add_changed_item(p_handle, aabb, false);
+
+ // force an immediate full collision check, much like calls to set_pairable
+ _check_for_collisions(true);
+ }
+ }
+
+ // these should be read as set_visible for render trees,
+ // but generically this makes items add or remove from the
+ // tree internally, to speed things up by ignoring inactive items
+ bool activate(BVHHandle p_handle, const Bounds &p_aabb, bool p_delay_collision_check = false) {
+ // sending the aabb here prevents the need for the BVH to maintain
+ // a redundant copy of the aabb.
+ // returns success
+ if (tree.item_activate(p_handle, p_aabb)) {
+ if (USE_PAIRS) {
+ // in the special case of the render tree, when setting visibility we are using the combination of
+ // activate then set_pairable. This would case 2 sets of collision checks. For efficiency here we allow
+ // deferring to have a single collision check at the set_pairable call.
+ // Watch for bugs! This may cause bugs if set_pairable is not called.
+ if (!p_delay_collision_check) {
+ _add_changed_item(p_handle, p_aabb, false);
+
+ // force an immediate collision check, much like calls to set_pairable
+ _check_for_collisions(true);
+ }
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ bool deactivate(BVHHandle p_handle) {
+ // returns success
+ if (tree.item_deactivate(p_handle)) {
+ // call unpair and remove all references to the item
+ // before deleting from the tree
+ if (USE_PAIRS) {
+ _remove_changed_item(p_handle);
+
+ // force check for collisions, much like an erase was called
+ _check_for_collisions(true);
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ bool get_active(BVHHandle p_handle) const {
+ return tree.item_get_active(p_handle);
+ }
+
+ // call e.g. once per frame (this does a trickle optimize)
+ void update() {
+ tree.update();
+ _check_for_collisions();
+#ifdef BVH_INTEGRITY_CHECKS
+ tree.integrity_check_all();
+#endif
+ }
+
+ // this can be called more frequently than per frame if necessary
+ void update_collisions() {
+ _check_for_collisions();
+ }
+
+ // prefer calling this directly as type safe
+ void set_pairable(const BVHHandle &p_handle, bool p_pairable, uint32_t p_pairable_type, uint32_t p_pairable_mask, bool p_force_collision_check = true) {
+ // Returns true if the pairing state has changed.
+ bool state_changed = tree.item_set_pairable(p_handle, p_pairable, p_pairable_type, p_pairable_mask);
+
+ if (USE_PAIRS) {
+ // not sure if absolutely necessary to flush collisions here. It will cost performance to, instead
+ // of waiting for update, so only uncomment this if there are bugs.
+ //_check_for_collisions();
+
+ if ((p_force_collision_check || state_changed) && get_active(p_handle)) {
+ // when the pairable state changes, we need to force a collision check because newly pairable
+ // items may be in collision, and unpairable items might move out of collision.
+ // We cannot depend on waiting for the next update, because that may come much later.
+ Bounds aabb;
+ item_get_AABB(p_handle, aabb);
+
+ // passing false disables the optimization which prevents collision checks if
+ // the aabb hasn't changed
+ _add_changed_item(p_handle, aabb, false);
+
+ // force an immediate collision check (probably just for this one item)
+ // but it must be a FULL collision check, also checking pairable state and masks.
+ // This is because AABB intersecting objects may have changed pairable state / mask
+ // such that they should no longer be paired. E.g. lights.
+ _check_for_collisions(true);
+ } // only if active
+ }
+ }
+
+ // cull tests
+ int cull_aabb(const Bounds &p_aabb, T **p_result_array, int p_result_max, int *p_subindex_array = nullptr, uint32_t p_mask = 0xFFFFFFFF) {
+ typename BVHTREE_CLASS::CullParams params;
+
+ params.result_count_overall = 0;
+ params.result_max = p_result_max;
+ params.result_array = p_result_array;
+ params.subindex_array = p_subindex_array;
+ params.mask = p_mask;
+ params.pairable_type = 0;
+ params.test_pairable_only = false;
+ params.abb.from(p_aabb);
+
+ tree.cull_aabb(params);
+
+ return params.result_count_overall;
+ }
+
+ int cull_segment(const Point &p_from, const Point &p_to, T **p_result_array, int p_result_max, int *p_subindex_array = nullptr, uint32_t p_mask = 0xFFFFFFFF) {
+ typename BVHTREE_CLASS::CullParams params;
+
+ params.result_count_overall = 0;
+ params.result_max = p_result_max;
+ params.result_array = p_result_array;
+ params.subindex_array = p_subindex_array;
+ params.mask = p_mask;
+ params.pairable_type = 0;
+
+ params.segment.from = p_from;
+ params.segment.to = p_to;
+
+ tree.cull_segment(params);
+
+ return params.result_count_overall;
+ }
+
+ int cull_point(const Point &p_point, T **p_result_array, int p_result_max, int *p_subindex_array = nullptr, uint32_t p_mask = 0xFFFFFFFF) {
+ typename BVHTREE_CLASS::CullParams params;
+
+ params.result_count_overall = 0;
+ params.result_max = p_result_max;
+ params.result_array = p_result_array;
+ params.subindex_array = p_subindex_array;
+ params.mask = p_mask;
+ params.pairable_type = 0;
+
+ params.point = p_point;
+
+ tree.cull_point(params);
+ return params.result_count_overall;
+ }
+
+ int cull_convex(const Vector<Plane> &p_convex, T **p_result_array, int p_result_max, uint32_t p_mask = 0xFFFFFFFF) {
+ if (!p_convex.size()) {
+ return 0;
+ }
+
+ Vector<Vector3> convex_points = Geometry3D::compute_convex_mesh_points(&p_convex[0], p_convex.size());
+ if (convex_points.size() == 0) {
+ return 0;
+ }
+
+ typename BVHTREE_CLASS::CullParams params;
+ params.result_count_overall = 0;
+ params.result_max = p_result_max;
+ params.result_array = p_result_array;
+ params.subindex_array = nullptr;
+ params.mask = p_mask;
+ params.pairable_type = 0;
+
+ params.hull.planes = &p_convex[0];
+ params.hull.num_planes = p_convex.size();
+ params.hull.points = &convex_points[0];
+ params.hull.num_points = convex_points.size();
+
+ tree.cull_convex(params);
+
+ return params.result_count_overall;
+ }
+
+private:
+ // do this after moving etc.
+ void _check_for_collisions(bool p_full_check = false) {
+ if (!changed_items.size()) {
+ // noop
+ return;
+ }
+
+ Bounds bb;
+
+ typename BVHTREE_CLASS::CullParams params;
+
+ params.result_count_overall = 0;
+ params.result_max = INT_MAX;
+ params.result_array = nullptr;
+ params.subindex_array = nullptr;
+ params.mask = 0xFFFFFFFF;
+ params.pairable_type = 0;
+
+ for (unsigned int n = 0; n < changed_items.size(); n++) {
+ const BVHHandle &h = changed_items[n];
+
+ // use the expanded aabb for pairing
+ const Bounds &expanded_aabb = tree._pairs[h.id()].expanded_aabb;
+ BVHABB_CLASS abb;
+ abb.from(expanded_aabb);
+
+ // find all the existing paired aabbs that are no longer
+ // paired, and send callbacks
+ _find_leavers(h, abb, p_full_check);
+
+ uint32_t changed_item_ref_id = h.id();
+
+ // set up the test from this item.
+ // this includes whether to test the non pairable tree,
+ // and the item mask.
+ tree.item_fill_cullparams(h, params);
+
+ params.abb = abb;
+
+ params.result_count_overall = 0; // might not be needed
+ tree.cull_aabb(params, false);
+
+ for (unsigned int i = 0; i < tree._cull_hits.size(); i++) {
+ uint32_t ref_id = tree._cull_hits[i];
+
+ // don't collide against ourself
+ if (ref_id == changed_item_ref_id) {
+ continue;
+ }
+
+#ifdef BVH_CHECKS
+ // if neither are pairable, they should ignore each other
+ // THIS SHOULD NEVER HAPPEN .. now we only test the pairable tree
+ // if the changed item is not pairable
+ CRASH_COND(params.test_pairable_only && !tree._extra[ref_id].pairable);
+#endif
+
+ // checkmasks is already done in the cull routine.
+ BVHHandle h_collidee;
+ h_collidee.set_id(ref_id);
+
+ // find NEW enterers, and send callbacks for them only
+ _collide(h, h_collidee);
+ }
+ }
+ _reset();
+ }
+
+public:
+ void item_get_AABB(BVHHandle p_handle, Bounds &r_aabb) {
+ BVHABB_CLASS abb;
+ tree.item_get_ABB(p_handle, abb);
+ abb.to(r_aabb);
+ }
+
+private:
+ // supplemental funcs
+ bool item_is_pairable(BVHHandle p_handle) const { return _get_extra(p_handle).pairable; }
+ T *item_get_userdata(BVHHandle p_handle) const { return _get_extra(p_handle).userdata; }
+ int item_get_subindex(BVHHandle p_handle) const { return _get_extra(p_handle).subindex; }
+
+ void _unpair(BVHHandle p_from, BVHHandle p_to) {
+ tree._handle_sort(p_from, p_to);
+
+ typename BVHTREE_CLASS::ItemExtra &exa = tree._extra[p_from.id()];
+ typename BVHTREE_CLASS::ItemExtra &exb = tree._extra[p_to.id()];
+
+ // if the userdata is the same, no collisions should occur
+ if ((exa.userdata == exb.userdata) && exa.userdata) {
+ return;
+ }
+
+ typename BVHTREE_CLASS::ItemPairs &pairs_from = tree._pairs[p_from.id()];
+ typename BVHTREE_CLASS::ItemPairs &pairs_to = tree._pairs[p_to.id()];
+
+ void *ud_from = pairs_from.remove_pair_to(p_to);
+ pairs_to.remove_pair_to(p_from);
+
+ // callback
+ if (unpair_callback) {
+ unpair_callback(pair_callback_userdata, p_from, exa.userdata, exa.subindex, p_to, exb.userdata, exb.subindex, ud_from);
+ }
+ }
+
+ // returns true if unpair
+ bool _find_leavers_process_pair(typename BVHTREE_CLASS::ItemPairs &p_pairs_from, const BVHABB_CLASS &p_abb_from, BVHHandle p_from, BVHHandle p_to, bool p_full_check) {
+ BVHABB_CLASS abb_to;
+ tree.item_get_ABB(p_to, abb_to);
+
+ // do they overlap?
+ if (p_abb_from.intersects(abb_to)) {
+ // the full check for pairable / non pairable and mask changes is extra expense
+ // this need not be done in most cases (for speed) except in the case where set_pairable is called
+ // where the masks etc of the objects in question may have changed
+ if (!p_full_check) {
+ return false;
+ }
+ const typename BVHTREE_CLASS::ItemExtra &exa = _get_extra(p_from);
+ const typename BVHTREE_CLASS::ItemExtra &exb = _get_extra(p_to);
+
+ // one of the two must be pairable to still pair
+ // if neither are pairable, we always unpair
+ if (exa.pairable || exb.pairable) {
+ // the masks must still be compatible to pair
+ // i.e. if there is a hit between the two, then they should stay paired
+ if (tree._cull_pairing_mask_test_hit(exa.pairable_mask, exa.pairable_type, exb.pairable_mask, exb.pairable_type)) {
+ return false;
+ }
+ }
+ }
+
+ _unpair(p_from, p_to);
+ return true;
+ }
+
+ // find all the existing paired aabbs that are no longer
+ // paired, and send callbacks
+ void _find_leavers(BVHHandle p_handle, const BVHABB_CLASS &expanded_abb_from, bool p_full_check) {
+ typename BVHTREE_CLASS::ItemPairs &p_from = tree._pairs[p_handle.id()];
+
+ BVHABB_CLASS abb_from = expanded_abb_from;
+
+ // remove from pairing list for every partner
+ for (unsigned int n = 0; n < p_from.extended_pairs.size(); n++) {
+ BVHHandle h_to = p_from.extended_pairs[n].handle;
+ if (_find_leavers_process_pair(p_from, abb_from, p_handle, h_to, p_full_check)) {
+ // we need to keep the counter n up to date if we deleted a pair
+ // as the number of items in p_from.extended_pairs will have decreased by 1
+ // and we don't want to miss an item
+ n--;
+ }
+ }
+ }
+
+ // find NEW enterers, and send callbacks for them only
+ // handle a and b
+ void _collide(BVHHandle p_ha, BVHHandle p_hb) {
+ // only have to do this oneway, lower ID then higher ID
+ tree._handle_sort(p_ha, p_hb);
+
+ const typename BVHTREE_CLASS::ItemExtra &exa = _get_extra(p_ha);
+ const typename BVHTREE_CLASS::ItemExtra &exb = _get_extra(p_hb);
+
+ // if the userdata is the same, no collisions should occur
+ if ((exa.userdata == exb.userdata) && exa.userdata) {
+ return;
+ }
+
+ typename BVHTREE_CLASS::ItemPairs &p_from = tree._pairs[p_ha.id()];
+ typename BVHTREE_CLASS::ItemPairs &p_to = tree._pairs[p_hb.id()];
+
+ // does this pair exist already?
+ // or only check the one with lower number of pairs for greater speed
+ if (p_from.num_pairs <= p_to.num_pairs) {
+ if (p_from.contains_pair_to(p_hb)) {
+ return;
+ }
+ } else {
+ if (p_to.contains_pair_to(p_ha)) {
+ return;
+ }
+ }
+
+ // callback
+ void *callback_userdata = nullptr;
+
+ if (pair_callback) {
+ callback_userdata = pair_callback(pair_callback_userdata, p_ha, exa.userdata, exa.subindex, p_hb, exb.userdata, exb.subindex);
+ }
+
+ // new pair! .. only really need to store the userdata on the lower handle, but both have storage so...
+ p_from.add_pair_to(p_hb, callback_userdata);
+ p_to.add_pair_to(p_ha, callback_userdata);
+ }
+
+ // if we remove an item, we need to immediately remove the pairs, to prevent reading the pair after deletion
+ void _remove_pairs_containing(BVHHandle p_handle) {
+ typename BVHTREE_CLASS::ItemPairs &p_from = tree._pairs[p_handle.id()];
+
+ // remove from pairing list for every partner.
+ // can't easily use a for loop here, because removing changes the size of the list
+ while (p_from.extended_pairs.size()) {
+ BVHHandle h_to = p_from.extended_pairs[0].handle;
+ _unpair(p_handle, h_to);
+ }
+ }
+
+private:
+ const typename BVHTREE_CLASS::ItemExtra &_get_extra(BVHHandle p_handle) const {
+ return tree._extra[p_handle.id()];
+ }
+ const typename BVHTREE_CLASS::ItemRef &_get_ref(BVHHandle p_handle) const {
+ return tree._refs[p_handle.id()];
+ }
+
+ void _reset() {
+ changed_items.clear();
+ _tick++;
+ }
+
+ void _add_changed_item(BVHHandle p_handle, const Bounds &aabb, bool p_check_aabb = true) {
+ // Note that non pairable items can pair with pairable,
+ // so all types must be added to the list
+
+ // aabb check with expanded aabb. This greatly decreases processing
+ // at the cost of slightly less accurate pairing checks
+ // Note this pairing AABB is separate from the AABB in the actual tree
+ Bounds &expanded_aabb = tree._pairs[p_handle.id()].expanded_aabb;
+
+ // passing p_check_aabb false disables the optimization which prevents collision checks if
+ // the aabb hasn't changed. This is needed where set_pairable has been called, but the position
+ // has not changed.
+ if (p_check_aabb && expanded_aabb.encloses(aabb)) {
+ return;
+ }
+
+ // ALWAYS update the new expanded aabb, even if already changed once
+ // this tick, because it is vital that the AABB is kept up to date
+ expanded_aabb = aabb;
+ expanded_aabb.grow_by(tree._pairing_expansion);
+
+ // this code is to ensure that changed items only appear once on the updated list
+ // collision checking them multiple times is not needed, and repeats the same thing
+ uint32_t &last_updated_tick = tree._extra[p_handle.id()].last_updated_tick;
+
+ if (last_updated_tick == _tick) {
+ return; // already on changed list
+ }
+
+ // mark as on list
+ last_updated_tick = _tick;
+
+ // add to the list
+ changed_items.push_back(p_handle);
+ }
+
+ void _remove_changed_item(BVHHandle p_handle) {
+ // Care has to be taken here for items that are deleted. The ref ID
+ // could be reused on the same tick for new items. This is probably
+ // rare but should be taken into consideration
+
+ // callbacks
+ _remove_pairs_containing(p_handle);
+
+ // remove from changed items (not very efficient yet)
+ for (int n = 0; n < (int)changed_items.size(); n++) {
+ if (changed_items[n] == p_handle) {
+ changed_items.remove_unordered(n);
+
+ // because we are using an unordered remove,
+ // the last changed item will now be at spot 'n',
+ // and we need to redo it, so we prevent moving on to
+ // the next n at the next for iteration.
+ n--;
+ }
+ }
+
+ // reset the last updated tick (may not be necessary but just in case)
+ tree._extra[p_handle.id()].last_updated_tick = 0;
+ }
+
+ PairCallback pair_callback;
+ UnpairCallback unpair_callback;
+ void *pair_callback_userdata;
+ void *unpair_callback_userdata;
+
+ BVHTREE_CLASS tree;
+
+ // for collision pairing,
+ // maintain a list of all items moved etc on each frame / tick
+ LocalVector<BVHHandle, uint32_t, true> changed_items;
+ uint32_t _tick;
+
+public:
+ BVH_Manager() {
+ _tick = 1; // start from 1 so items with 0 indicate never updated
+ pair_callback = nullptr;
+ unpair_callback = nullptr;
+ pair_callback_userdata = nullptr;
+ unpair_callback_userdata = nullptr;
+ }
+};
+
+#undef BVHTREE_CLASS
+
+#endif // BVH_H
diff --git a/core/math/bvh_abb.h b/core/math/bvh_abb.h
new file mode 100644
index 0000000000..bd9a01a87e
--- /dev/null
+++ b/core/math/bvh_abb.h
@@ -0,0 +1,276 @@
+/*************************************************************************/
+/* bvh_abb.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef BVH_ABB_H
+#define BVH_ABB_H
+
+// special optimized version of axis aligned bounding box
+template <class Bounds = AABB, class Point = Vector3>
+struct BVH_ABB {
+ struct ConvexHull {
+ // convex hulls (optional)
+ const Plane *planes;
+ int num_planes;
+ const Vector3 *points;
+ int num_points;
+ };
+
+ struct Segment {
+ Point from;
+ Point to;
+ };
+
+ enum IntersectResult {
+ IR_MISS = 0,
+ IR_PARTIAL,
+ IR_FULL,
+ };
+
+ // we store mins with a negative value in order to test them with SIMD
+ Point min;
+ Point neg_max;
+
+ bool operator==(const BVH_ABB &o) const { return (min == o.min) && (neg_max == o.neg_max); }
+ bool operator!=(const BVH_ABB &o) const { return (*this == o) == false; }
+
+ void set(const Point &_min, const Point &_max) {
+ min = _min;
+ neg_max = -_max;
+ }
+
+ // to and from standard AABB
+ void from(const Bounds &p_aabb) {
+ min = p_aabb.position;
+ neg_max = -(p_aabb.position + p_aabb.size);
+ }
+
+ void to(Bounds &r_aabb) const {
+ r_aabb.position = min;
+ r_aabb.size = calculate_size();
+ }
+
+ void merge(const BVH_ABB &p_o) {
+ for (int axis = 0; axis < Point::AXIS_COUNT; ++axis) {
+ neg_max[axis] = MIN(neg_max[axis], p_o.neg_max[axis]);
+ min[axis] = MIN(min[axis], p_o.min[axis]);
+ }
+ }
+
+ Point calculate_size() const {
+ return -neg_max - min;
+ }
+
+ Point calculate_centre() const {
+ return Point((calculate_size() * 0.5) + min);
+ }
+
+ real_t get_proximity_to(const BVH_ABB &p_b) const {
+ const Point d = (min - neg_max) - (p_b.min - p_b.neg_max);
+ real_t proximity = 0.0;
+ for (int axis = 0; axis < Point::AXIS_COUNT; ++axis) {
+ proximity += Math::abs(d[axis]);
+ }
+ return proximity;
+ }
+
+ int select_by_proximity(const BVH_ABB &p_a, const BVH_ABB &p_b) const {
+ return (get_proximity_to(p_a) < get_proximity_to(p_b) ? 0 : 1);
+ }
+
+ uint32_t find_cutting_planes(const BVH_ABB::ConvexHull &p_hull, uint32_t *p_plane_ids) const {
+ uint32_t count = 0;
+
+ for (int n = 0; n < p_hull.num_planes; n++) {
+ const Plane &p = p_hull.planes[n];
+ if (intersects_plane(p)) {
+ p_plane_ids[count++] = n;
+ }
+ }
+
+ return count;
+ }
+
+ bool intersects_plane(const Plane &p_p) const {
+ Vector3 size = calculate_size();
+ Vector3 half_extents = size * 0.5;
+ Vector3 ofs = min + half_extents;
+
+ // forward side of plane?
+ Vector3 point_offset(
+ (p_p.normal.x < 0) ? -half_extents.x : half_extents.x,
+ (p_p.normal.y < 0) ? -half_extents.y : half_extents.y,
+ (p_p.normal.z < 0) ? -half_extents.z : half_extents.z);
+ Vector3 point = point_offset + ofs;
+
+ if (!p_p.is_point_over(point)) {
+ return false;
+ }
+
+ point = -point_offset + ofs;
+ if (p_p.is_point_over(point)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool intersects_convex_optimized(const ConvexHull &p_hull, const uint32_t *p_plane_ids, uint32_t p_num_planes) const {
+ Vector3 size = calculate_size();
+ Vector3 half_extents = size * 0.5;
+ Vector3 ofs = min + half_extents;
+
+ for (unsigned int i = 0; i < p_num_planes; i++) {
+ const Plane &p = p_hull.planes[p_plane_ids[i]];
+ Vector3 point(
+ (p.normal.x > 0) ? -half_extents.x : half_extents.x,
+ (p.normal.y > 0) ? -half_extents.y : half_extents.y,
+ (p.normal.z > 0) ? -half_extents.z : half_extents.z);
+ point += ofs;
+ if (p.is_point_over(point)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool intersects_convex_partial(const ConvexHull &p_hull) const {
+ Bounds bb;
+ to(bb);
+ return bb.intersects_convex_shape(p_hull.planes, p_hull.num_planes, p_hull.points, p_hull.num_points);
+ }
+
+ IntersectResult intersects_convex(const ConvexHull &p_hull) const {
+ if (intersects_convex_partial(p_hull)) {
+ // fully within? very important for tree checks
+ if (is_within_convex(p_hull)) {
+ return IR_FULL;
+ }
+
+ return IR_PARTIAL;
+ }
+
+ return IR_MISS;
+ }
+
+ bool is_within_convex(const ConvexHull &p_hull) const {
+ // use half extents routine
+ Bounds bb;
+ to(bb);
+ return bb.inside_convex_shape(p_hull.planes, p_hull.num_planes);
+ }
+
+ bool is_point_within_hull(const ConvexHull &p_hull, const Vector3 &p_pt) const {
+ for (int n = 0; n < p_hull.num_planes; n++) {
+ if (p_hull.planes[n].distance_to(p_pt) > 0.0f) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool intersects_segment(const Segment &p_s) const {
+ Bounds bb;
+ to(bb);
+ return bb.intersects_segment(p_s.from, p_s.to);
+ }
+
+ bool intersects_point(const Point &p_pt) const {
+ if (_any_lessthan(-p_pt, neg_max)) {
+ return false;
+ }
+ if (_any_lessthan(p_pt, min)) {
+ return false;
+ }
+ return true;
+ }
+
+ bool intersects(const BVH_ABB &p_o) const {
+ if (_any_morethan(p_o.min, -neg_max)) {
+ return false;
+ }
+ if (_any_morethan(min, -p_o.neg_max)) {
+ return false;
+ }
+ return true;
+ }
+
+ bool is_other_within(const BVH_ABB &p_o) const {
+ if (_any_lessthan(p_o.neg_max, neg_max)) {
+ return false;
+ }
+ if (_any_lessthan(p_o.min, min)) {
+ return false;
+ }
+ return true;
+ }
+
+ void grow(const Point &p_change) {
+ neg_max -= p_change;
+ min -= p_change;
+ }
+
+ void expand(real_t p_change) {
+ Point change;
+ change.set_all(p_change);
+ grow(change);
+ }
+
+ // Actually surface area metric.
+ float get_area() const {
+ Point d = calculate_size();
+ return 2.0f * (d.x * d.y + d.y * d.z + d.z * d.x);
+ }
+
+ void set_to_max_opposite_extents() {
+ neg_max.set_all(FLT_MAX);
+ min = neg_max;
+ }
+
+ bool _any_morethan(const Point &p_a, const Point &p_b) const {
+ for (int axis = 0; axis < Point::AXIS_COUNT; ++axis) {
+ if (p_a[axis] > p_b[axis]) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool _any_lessthan(const Point &p_a, const Point &p_b) const {
+ for (int axis = 0; axis < Point::AXIS_COUNT; ++axis) {
+ if (p_a[axis] < p_b[axis]) {
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+#endif // BVH_ABB_H
diff --git a/core/math/bvh_cull.inc b/core/math/bvh_cull.inc
new file mode 100644
index 0000000000..cba8ea6cb3
--- /dev/null
+++ b/core/math/bvh_cull.inc
@@ -0,0 +1,534 @@
+public:
+// cull parameters is a convenient way of passing a bunch
+// of arguments through the culling functions without
+// writing loads of code. Not all members are used for some cull checks
+struct CullParams {
+ int result_count_overall; // both trees
+ int result_count; // this tree only
+ int result_max;
+ T **result_array;
+ int *subindex_array;
+
+ // nobody truly understands how masks are intended to work.
+ uint32_t mask;
+ uint32_t pairable_type;
+
+ // optional components for different tests
+ Vector3 point;
+ BVHABB_CLASS abb;
+ typename BVHABB_CLASS::ConvexHull hull;
+ typename BVHABB_CLASS::Segment segment;
+
+ // when collision testing, non pairable moving items
+ // only need to be tested against the pairable tree.
+ // collisions with other non pairable items are irrelevant.
+ bool test_pairable_only;
+};
+
+private:
+void _cull_translate_hits(CullParams &p) {
+ int num_hits = _cull_hits.size();
+ int left = p.result_max - p.result_count_overall;
+
+ if (num_hits > left) {
+ num_hits = left;
+ }
+
+ int out_n = p.result_count_overall;
+
+ for (int n = 0; n < num_hits; n++) {
+ uint32_t ref_id = _cull_hits[n];
+
+ const ItemExtra &ex = _extra[ref_id];
+ p.result_array[out_n] = ex.userdata;
+
+ if (p.subindex_array) {
+ p.subindex_array[out_n] = ex.subindex;
+ }
+
+ out_n++;
+ }
+
+ p.result_count = num_hits;
+ p.result_count_overall += num_hits;
+}
+
+public:
+int cull_convex(CullParams &r_params, bool p_translate_hits = true) {
+ _cull_hits.clear();
+ r_params.result_count = 0;
+
+ for (int n = 0; n < NUM_TREES; n++) {
+ if (_root_node_id[n] == BVHCommon::INVALID) {
+ continue;
+ }
+
+ _cull_convex_iterative(_root_node_id[n], r_params);
+ }
+
+ if (p_translate_hits) {
+ _cull_translate_hits(r_params);
+ }
+
+ return r_params.result_count;
+}
+
+int cull_segment(CullParams &r_params, bool p_translate_hits = true) {
+ _cull_hits.clear();
+ r_params.result_count = 0;
+
+ for (int n = 0; n < NUM_TREES; n++) {
+ if (_root_node_id[n] == BVHCommon::INVALID) {
+ continue;
+ }
+
+ _cull_segment_iterative(_root_node_id[n], r_params);
+ }
+
+ if (p_translate_hits) {
+ _cull_translate_hits(r_params);
+ }
+
+ return r_params.result_count;
+}
+
+int cull_point(CullParams &r_params, bool p_translate_hits = true) {
+ _cull_hits.clear();
+ r_params.result_count = 0;
+
+ for (int n = 0; n < NUM_TREES; n++) {
+ if (_root_node_id[n] == BVHCommon::INVALID) {
+ continue;
+ }
+
+ _cull_point_iterative(_root_node_id[n], r_params);
+ }
+
+ if (p_translate_hits) {
+ _cull_translate_hits(r_params);
+ }
+
+ return r_params.result_count;
+}
+
+int cull_aabb(CullParams &r_params, bool p_translate_hits = true) {
+ _cull_hits.clear();
+ r_params.result_count = 0;
+
+ for (int n = 0; n < NUM_TREES; n++) {
+ if (_root_node_id[n] == BVHCommon::INVALID) {
+ continue;
+ }
+
+ if ((n == 0) && r_params.test_pairable_only) {
+ continue;
+ }
+
+ _cull_aabb_iterative(_root_node_id[n], r_params);
+ }
+
+ if (p_translate_hits) {
+ _cull_translate_hits(r_params);
+ }
+
+ return r_params.result_count;
+}
+
+bool _cull_hits_full(const CullParams &p) {
+ // instead of checking every hit, we can do a lazy check for this condition.
+ // it isn't a problem if we write too much _cull_hits because they only the
+ // result_max amount will be translated and outputted. But we might as
+ // well stop our cull checks after the maximum has been reached.
+ return (int)_cull_hits.size() >= p.result_max;
+}
+
+// write this logic once for use in all routines
+// double check this as a possible source of bugs in future.
+bool _cull_pairing_mask_test_hit(uint32_t p_maskA, uint32_t p_typeA, uint32_t p_maskB, uint32_t p_typeB) const {
+ // double check this as a possible source of bugs in future.
+ bool A_match_B = p_maskA & p_typeB;
+
+ if (!A_match_B) {
+ bool B_match_A = p_maskB & p_typeA;
+ if (!B_match_A) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void _cull_hit(uint32_t p_ref_id, CullParams &p) {
+ // take into account masks etc
+ // this would be more efficient to do before plane checks,
+ // but done here for ease to get started
+ if (USE_PAIRS) {
+ const ItemExtra &ex = _extra[p_ref_id];
+
+ if (!_cull_pairing_mask_test_hit(p.mask, p.pairable_type, ex.pairable_mask, ex.pairable_type)) {
+ return;
+ }
+ }
+
+ _cull_hits.push_back(p_ref_id);
+}
+
+bool _cull_segment_iterative(uint32_t p_node_id, CullParams &r_params) {
+ // our function parameters to keep on a stack
+ struct CullSegParams {
+ uint32_t node_id;
+ };
+
+ // most of the iterative functionality is contained in this helper class
+ BVH_IterativeInfo<CullSegParams> ii;
+
+ // alloca must allocate the stack from this function, it cannot be allocated in the
+ // helper class
+ ii.stack = (CullSegParams *)alloca(ii.get_alloca_stacksize());
+
+ // seed the stack
+ ii.get_first()->node_id = p_node_id;
+
+ CullSegParams csp;
+
+ // while there are still more nodes on the stack
+ while (ii.pop(csp)) {
+ TNode &tnode = _nodes[csp.node_id];
+
+ if (tnode.is_leaf()) {
+ // lazy check for hits full up condition
+ if (_cull_hits_full(r_params)) {
+ return false;
+ }
+
+ TLeaf &leaf = _node_get_leaf(tnode);
+
+ // test children individually
+ for (int n = 0; n < leaf.num_items; n++) {
+ const BVHABB_CLASS &aabb = leaf.get_aabb(n);
+
+ if (aabb.intersects_segment(r_params.segment)) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+ // register hit
+ _cull_hit(child_id, r_params);
+ }
+ }
+ } else {
+ // test children individually
+ for (int n = 0; n < tnode.num_children; n++) {
+ uint32_t child_id = tnode.children[n];
+ const BVHABB_CLASS &child_abb = _nodes[child_id].aabb;
+
+ if (child_abb.intersects_segment(r_params.segment)) {
+ // add to the stack
+ CullSegParams *child = ii.request();
+ child->node_id = child_id;
+ }
+ }
+ }
+
+ } // while more nodes to pop
+
+ // true indicates results are not full
+ return true;
+}
+
+bool _cull_point_iterative(uint32_t p_node_id, CullParams &r_params) {
+ // our function parameters to keep on a stack
+ struct CullPointParams {
+ uint32_t node_id;
+ };
+
+ // most of the iterative functionality is contained in this helper class
+ BVH_IterativeInfo<CullPointParams> ii;
+
+ // alloca must allocate the stack from this function, it cannot be allocated in the
+ // helper class
+ ii.stack = (CullPointParams *)alloca(ii.get_alloca_stacksize());
+
+ // seed the stack
+ ii.get_first()->node_id = p_node_id;
+
+ CullPointParams cpp;
+
+ // while there are still more nodes on the stack
+ while (ii.pop(cpp)) {
+ TNode &tnode = _nodes[cpp.node_id];
+ // no hit with this node?
+ if (!tnode.aabb.intersects_point(r_params.point)) {
+ continue;
+ }
+
+ if (tnode.is_leaf()) {
+ // lazy check for hits full up condition
+ if (_cull_hits_full(r_params)) {
+ return false;
+ }
+
+ TLeaf &leaf = _node_get_leaf(tnode);
+
+ // test children individually
+ for (int n = 0; n < leaf.num_items; n++) {
+ if (leaf.get_aabb(n).intersects_point(r_params.point)) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+ // register hit
+ _cull_hit(child_id, r_params);
+ }
+ }
+ } else {
+ // test children individually
+ for (int n = 0; n < tnode.num_children; n++) {
+ uint32_t child_id = tnode.children[n];
+
+ // add to the stack
+ CullPointParams *child = ii.request();
+ child->node_id = child_id;
+ }
+ }
+
+ } // while more nodes to pop
+
+ // true indicates results are not full
+ return true;
+}
+
+bool _cull_aabb_iterative(uint32_t p_node_id, CullParams &r_params, bool p_fully_within = false) {
+ // our function parameters to keep on a stack
+ struct CullAABBParams {
+ uint32_t node_id;
+ bool fully_within;
+ };
+
+ // most of the iterative functionality is contained in this helper class
+ BVH_IterativeInfo<CullAABBParams> ii;
+
+ // alloca must allocate the stack from this function, it cannot be allocated in the
+ // helper class
+ ii.stack = (CullAABBParams *)alloca(ii.get_alloca_stacksize());
+
+ // seed the stack
+ ii.get_first()->node_id = p_node_id;
+ ii.get_first()->fully_within = p_fully_within;
+
+ CullAABBParams cap;
+
+ // while there are still more nodes on the stack
+ while (ii.pop(cap)) {
+ TNode &tnode = _nodes[cap.node_id];
+
+ if (tnode.is_leaf()) {
+ // lazy check for hits full up condition
+ if (_cull_hits_full(r_params)) {
+ return false;
+ }
+
+ TLeaf &leaf = _node_get_leaf(tnode);
+
+ // if fully within we can just add all items
+ // as long as they pass mask checks
+ if (cap.fully_within) {
+ for (int n = 0; n < leaf.num_items; n++) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+ // register hit
+ _cull_hit(child_id, r_params);
+ }
+ } else {
+ for (int n = 0; n < leaf.num_items; n++) {
+ const BVHABB_CLASS &aabb = leaf.get_aabb(n);
+
+ if (aabb.intersects(r_params.abb)) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+ // register hit
+ _cull_hit(child_id, r_params);
+ }
+ }
+ } // not fully within
+ } else {
+ if (!cap.fully_within) {
+ // test children individually
+ for (int n = 0; n < tnode.num_children; n++) {
+ uint32_t child_id = tnode.children[n];
+ const BVHABB_CLASS &child_abb = _nodes[child_id].aabb;
+
+ if (child_abb.intersects(r_params.abb)) {
+ // is the node totally within the aabb?
+ bool fully_within = r_params.abb.is_other_within(child_abb);
+
+ // add to the stack
+ CullAABBParams *child = ii.request();
+
+ // should always return valid child
+ child->node_id = child_id;
+ child->fully_within = fully_within;
+ }
+ }
+ } else {
+ for (int n = 0; n < tnode.num_children; n++) {
+ uint32_t child_id = tnode.children[n];
+
+ // add to the stack
+ CullAABBParams *child = ii.request();
+
+ // should always return valid child
+ child->node_id = child_id;
+ child->fully_within = true;
+ }
+ }
+ }
+
+ } // while more nodes to pop
+
+ // true indicates results are not full
+ return true;
+}
+
+// returns full up with results
+bool _cull_convex_iterative(uint32_t p_node_id, CullParams &r_params, bool p_fully_within = false) {
+ // our function parameters to keep on a stack
+ struct CullConvexParams {
+ uint32_t node_id;
+ bool fully_within;
+ };
+
+ // most of the iterative functionality is contained in this helper class
+ BVH_IterativeInfo<CullConvexParams> ii;
+
+ // alloca must allocate the stack from this function, it cannot be allocated in the
+ // helper class
+ ii.stack = (CullConvexParams *)alloca(ii.get_alloca_stacksize());
+
+ // seed the stack
+ ii.get_first()->node_id = p_node_id;
+ ii.get_first()->fully_within = p_fully_within;
+
+ // preallocate these as a once off to be reused
+ uint32_t max_planes = r_params.hull.num_planes;
+ uint32_t *plane_ids = (uint32_t *)alloca(sizeof(uint32_t) * max_planes);
+
+ CullConvexParams ccp;
+
+ // while there are still more nodes on the stack
+ while (ii.pop(ccp)) {
+ const TNode &tnode = _nodes[ccp.node_id];
+
+ if (!ccp.fully_within) {
+ typename BVHABB_CLASS::IntersectResult res = tnode.aabb.intersects_convex(r_params.hull);
+
+ switch (res) {
+ default: {
+ continue; // miss, just move on to the next node in the stack
+ } break;
+ case BVHABB_CLASS::IR_PARTIAL: {
+ } break;
+ case BVHABB_CLASS::IR_FULL: {
+ ccp.fully_within = true;
+ } break;
+ }
+
+ } // if not fully within already
+
+ if (tnode.is_leaf()) {
+ // lazy check for hits full up condition
+ if (_cull_hits_full(r_params)) {
+ return false;
+ }
+
+ const TLeaf &leaf = _node_get_leaf(tnode);
+
+ // if fully within, simply add all items to the result
+ // (taking into account masks)
+ if (ccp.fully_within) {
+ for (int n = 0; n < leaf.num_items; n++) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+ // register hit
+ _cull_hit(child_id, r_params);
+ }
+
+ } else {
+ // we can either use a naive check of all the planes against the AABB,
+ // or an optimized check, which finds in advance which of the planes can possibly
+ // cut the AABB, and only tests those. This can be much faster.
+#define BVH_CONVEX_CULL_OPTIMIZED
+#ifdef BVH_CONVEX_CULL_OPTIMIZED
+ // first find which planes cut the aabb
+ uint32_t num_planes = tnode.aabb.find_cutting_planes(r_params.hull, plane_ids);
+ BVH_ASSERT(num_planes <= max_planes);
+
+//#define BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
+#ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
+ // rigorous check
+ uint32_t results[MAX_ITEMS];
+ uint32_t num_results = 0;
+#endif
+
+ // test children individually
+ for (int n = 0; n < leaf.num_items; n++) {
+ //const Item &item = leaf.get_item(n);
+ const BVHABB_CLASS &aabb = leaf.get_aabb(n);
+
+ if (aabb.intersects_convex_optimized(r_params.hull, plane_ids, num_planes)) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+#ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
+ results[num_results++] = child_id;
+#endif
+
+ // register hit
+ _cull_hit(child_id, r_params);
+ }
+ }
+
+#ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
+ uint32_t test_count = 0;
+
+ for (int n = 0; n < leaf.num_items; n++) {
+ const BVHABB_CLASS &aabb = leaf.get_aabb(n);
+
+ if (aabb.intersects_convex_partial(r_params.hull)) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+ CRASH_COND(child_id != results[test_count++]);
+ CRASH_COND(test_count > num_results);
+ }
+ }
+#endif
+
+#else
+ // not BVH_CONVEX_CULL_OPTIMIZED
+ // test children individually
+ for (int n = 0; n < leaf.num_items; n++) {
+ const BVHABB_CLASS &aabb = leaf.get_aabb(n);
+
+ if (aabb.intersects_convex_partial(r_params.hull)) {
+ uint32_t child_id = leaf.get_item_ref_id(n);
+
+ // full up with results? exit early, no point in further testing
+ if (!_cull_hit(child_id, r_params))
+ return false;
+ }
+ }
+#endif // BVH_CONVEX_CULL_OPTIMIZED
+ } // if not fully within
+ } else {
+ for (int n = 0; n < tnode.num_children; n++) {
+ uint32_t child_id = tnode.children[n];
+
+ // add to the stack
+ CullConvexParams *child = ii.request();
+
+ // should always return valid child
+ child->node_id = child_id;
+ child->fully_within = ccp.fully_within;
+ }
+ }
+
+ } // while more nodes to pop
+
+ // true indicates results are not full
+ return true;
+}
diff --git a/core/math/bvh_debug.inc b/core/math/bvh_debug.inc
new file mode 100644
index 0000000000..a97304334c
--- /dev/null
+++ b/core/math/bvh_debug.inc
@@ -0,0 +1,68 @@
+public:
+#ifdef BVH_VERBOSE
+void _debug_recursive_print_tree(int p_tree_id) const {
+ if (_root_node_id[p_tree_id] != BVHCommon::INVALID)
+ _debug_recursive_print_tree_node(_root_node_id[p_tree_id]);
+}
+
+String _debug_aabb_to_string(const BVHABB_CLASS &aabb) const {
+ String sz = "(";
+ sz += itos(aabb.min.x);
+ sz += " ~ ";
+ sz += itos(-aabb.neg_max.x);
+ sz += ") (";
+
+ sz += itos(aabb.min.y);
+ sz += " ~ ";
+ sz += itos(-aabb.neg_max.y);
+ sz += ") (";
+
+ sz += itos(aabb.min.z);
+ sz += " ~ ";
+ sz += itos(-aabb.neg_max.z);
+ sz += ") ";
+
+ Vector3 size = aabb.calculate_size();
+ float vol = size.x * size.y * size.z;
+ sz += "vol " + itos(vol);
+
+ return sz;
+}
+
+void _debug_recursive_print_tree_node(uint32_t p_node_id, int depth = 0) const {
+ const TNode &tnode = _nodes[p_node_id];
+
+ String sz = "";
+ for (int n = 0; n < depth; n++) {
+ sz += "\t";
+ }
+ sz += itos(p_node_id);
+
+ if (tnode.is_leaf()) {
+ sz += " L";
+ sz += itos(tnode.height) + " ";
+ const TLeaf &leaf = _node_get_leaf(tnode);
+
+ sz += "[";
+ for (int n = 0; n < leaf.num_items; n++) {
+ if (n)
+ sz += ", ";
+ sz += "r";
+ sz += itos(leaf.get_item_ref_id(n));
+ }
+ sz += "] ";
+ } else {
+ sz += " N";
+ sz += itos(tnode.height) + " ";
+ }
+
+ sz += _debug_aabb_to_string(tnode.aabb);
+ print_line(sz);
+
+ if (!tnode.is_leaf()) {
+ for (int n = 0; n < tnode.num_children; n++) {
+ _debug_recursive_print_tree_node(tnode.children[n], depth + 1);
+ }
+ }
+}
+#endif
diff --git a/core/math/bvh_integrity.inc b/core/math/bvh_integrity.inc
new file mode 100644
index 0000000000..02e9d30097
--- /dev/null
+++ b/core/math/bvh_integrity.inc
@@ -0,0 +1,42 @@
+void _integrity_check_all() {
+#ifdef BVH_INTEGRITY_CHECKS
+ for (int n = 0; n < NUM_TREES; n++) {
+ uint32_t root = _root_node_id[n];
+ if (root != BVHCommon::INVALID) {
+ _integrity_check_down(root);
+ }
+ }
+#endif
+}
+
+void _integrity_check_up(uint32_t p_node_id) {
+ TNode &node = _nodes[p_node_id];
+
+ BVHABB_CLASS abb = node.aabb;
+ node_update_aabb(node);
+
+ BVHABB_CLASS abb2 = node.aabb;
+ abb2.expand(-_node_expansion);
+
+ CRASH_COND(!abb.is_other_within(abb2));
+}
+
+void _integrity_check_down(uint32_t p_node_id) {
+ const TNode &node = _nodes[p_node_id];
+
+ if (node.is_leaf()) {
+ _integrity_check_up(p_node_id);
+ } else {
+ CRASH_COND(node.num_children != 2);
+
+ for (int n = 0; n < node.num_children; n++) {
+ uint32_t child_id = node.children[n];
+
+ // check the children parent pointers are correct
+ TNode &child = _nodes[child_id];
+ CRASH_COND(child.parent_id != p_node_id);
+
+ _integrity_check_down(child_id);
+ }
+ }
+}
diff --git a/core/math/bvh_logic.inc b/core/math/bvh_logic.inc
new file mode 100644
index 0000000000..d84c3f7830
--- /dev/null
+++ b/core/math/bvh_logic.inc
@@ -0,0 +1,230 @@
+
+// for slow incremental optimization, we will periodically remove each
+// item from the tree and reinsert, to give it a chance to find a better position
+void _logic_item_remove_and_reinsert(uint32_t p_ref_id) {
+ // get the reference
+ ItemRef &ref = _refs[p_ref_id];
+
+ // no need to optimize inactive items
+ if (!ref.is_active()) {
+ return;
+ }
+
+ // special case of debug draw
+ if (ref.item_id == BVHCommon::INVALID) {
+ return;
+ }
+
+ BVH_ASSERT(ref.tnode_id != BVHCommon::INVALID);
+
+ // some overlay elaborate way to find out which tree the node is in!
+ BVHHandle temp_handle;
+ temp_handle.set_id(p_ref_id);
+ _current_tree = _handle_get_tree_id(temp_handle);
+
+ // remove and reinsert
+ BVHABB_CLASS abb;
+ node_remove_item(p_ref_id, &abb);
+
+ // we must choose where to add to tree
+ ref.tnode_id = _logic_choose_item_add_node(_root_node_id[_current_tree], abb);
+ _node_add_item(ref.tnode_id, p_ref_id, abb);
+
+ refit_upward_and_balance(ref.tnode_id);
+}
+
+// from randy gaul balance function
+BVHABB_CLASS _logic_abb_merge(const BVHABB_CLASS &a, const BVHABB_CLASS &b) {
+ BVHABB_CLASS c = a;
+ c.merge(b);
+ return c;
+}
+
+//--------------------------------------------------------------------------------------------------
+/**
+@file q3DynamicAABBTree.h
+@author Randy Gaul
+@date 10/10/2014
+ Copyright (c) 2014 Randy Gaul http://www.randygaul.net
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not
+ be misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+*/
+//--------------------------------------------------------------------------------------------------
+
+// This function is based on the 'Balance' function from Randy Gaul's qu3e
+// https://github.com/RandyGaul/qu3e
+// It is MODIFIED from qu3e version.
+// This is the only function used (and _logic_abb_merge helper function).
+int32_t _logic_balance(int32_t iA) {
+ // return iA; // uncomment this to bypass balance
+
+ TNode *A = &_nodes[iA];
+
+ if (A->is_leaf() || A->height == 1) {
+ return iA;
+ }
+
+ /* A
+ / \
+ B C
+ / \ / \
+ D E F G
+ */
+
+ CRASH_COND(A->num_children != 2);
+ int32_t iB = A->children[0];
+ int32_t iC = A->children[1];
+ TNode *B = &_nodes[iB];
+ TNode *C = &_nodes[iC];
+
+ int32_t balance = C->height - B->height;
+
+ // C is higher, promote C
+ if (balance > 1) {
+ int32_t iF = C->children[0];
+ int32_t iG = C->children[1];
+ TNode *F = &_nodes[iF];
+ TNode *G = &_nodes[iG];
+
+ // grandParent point to C
+ if (A->parent_id != BVHCommon::INVALID) {
+ if (_nodes[A->parent_id].children[0] == iA) {
+ _nodes[A->parent_id].children[0] = iC;
+
+ } else {
+ _nodes[A->parent_id].children[1] = iC;
+ }
+ } else {
+ // check this .. seems dodgy
+ change_root_node(iC);
+ }
+
+ // Swap A and C
+ C->children[0] = iA;
+ C->parent_id = A->parent_id;
+ A->parent_id = iC;
+
+ // Finish rotation
+ if (F->height > G->height) {
+ C->children[1] = iF;
+ A->children[1] = iG;
+ G->parent_id = iA;
+ A->aabb = _logic_abb_merge(B->aabb, G->aabb);
+ C->aabb = _logic_abb_merge(A->aabb, F->aabb);
+
+ A->height = 1 + MAX(B->height, G->height);
+ C->height = 1 + MAX(A->height, F->height);
+ }
+
+ else {
+ C->children[1] = iG;
+ A->children[1] = iF;
+ F->parent_id = iA;
+ A->aabb = _logic_abb_merge(B->aabb, F->aabb);
+ C->aabb = _logic_abb_merge(A->aabb, G->aabb);
+
+ A->height = 1 + MAX(B->height, F->height);
+ C->height = 1 + MAX(A->height, G->height);
+ }
+
+ return iC;
+ }
+
+ // B is higher, promote B
+ else if (balance < -1) {
+ int32_t iD = B->children[0];
+ int32_t iE = B->children[1];
+ TNode *D = &_nodes[iD];
+ TNode *E = &_nodes[iE];
+
+ // grandParent point to B
+ if (A->parent_id != BVHCommon::INVALID) {
+ if (_nodes[A->parent_id].children[0] == iA) {
+ _nodes[A->parent_id].children[0] = iB;
+ } else {
+ _nodes[A->parent_id].children[1] = iB;
+ }
+ }
+
+ else {
+ // check this .. seems dodgy
+ change_root_node(iB);
+ }
+
+ // Swap A and B
+ B->children[1] = iA;
+ B->parent_id = A->parent_id;
+ A->parent_id = iB;
+
+ // Finish rotation
+ if (D->height > E->height) {
+ B->children[0] = iD;
+ A->children[0] = iE;
+ E->parent_id = iA;
+ A->aabb = _logic_abb_merge(C->aabb, E->aabb);
+ B->aabb = _logic_abb_merge(A->aabb, D->aabb);
+
+ A->height = 1 + MAX(C->height, E->height);
+ B->height = 1 + MAX(A->height, D->height);
+ }
+
+ else {
+ B->children[0] = iE;
+ A->children[0] = iD;
+ D->parent_id = iA;
+ A->aabb = _logic_abb_merge(C->aabb, D->aabb);
+ B->aabb = _logic_abb_merge(A->aabb, E->aabb);
+
+ A->height = 1 + MAX(C->height, D->height);
+ B->height = 1 + MAX(A->height, E->height);
+ }
+
+ return iB;
+ }
+
+ return iA;
+}
+
+// either choose an existing node to add item to, or create a new node and return this
+uint32_t _logic_choose_item_add_node(uint32_t p_node_id, const BVHABB_CLASS &p_aabb) {
+ while (true) {
+ BVH_ASSERT(p_node_id != BVHCommon::INVALID);
+ TNode &tnode = _nodes[p_node_id];
+
+ if (tnode.is_leaf()) {
+ // if a leaf, and non full, use this to add to
+ if (!node_is_leaf_full(tnode)) {
+ return p_node_id;
+ }
+
+ // else split the leaf, and use one of the children to add to
+ return split_leaf(p_node_id, p_aabb);
+ }
+
+ // this should not happen???
+ // is still happening, need to debug and find circumstances. Is not that serious
+ // but would be nice to prevent. I think it only happens with the root node.
+ if (tnode.num_children == 1) {
+ WARN_PRINT_ONCE("BVH::recursive_choose_item_add_node, node with 1 child, recovering");
+ p_node_id = tnode.children[0];
+ } else {
+ BVH_ASSERT(tnode.num_children == 2);
+ TNode &childA = _nodes[tnode.children[0]];
+ TNode &childB = _nodes[tnode.children[1]];
+ int which = p_aabb.select_by_proximity(childA.aabb, childB.aabb);
+
+ p_node_id = tnode.children[which];
+ }
+ }
+}
diff --git a/core/math/bvh_misc.inc b/core/math/bvh_misc.inc
new file mode 100644
index 0000000000..71aa0e4fe0
--- /dev/null
+++ b/core/math/bvh_misc.inc
@@ -0,0 +1,55 @@
+
+int _handle_get_tree_id(BVHHandle p_handle) const {
+ if (USE_PAIRS) {
+ int tree = 0;
+ if (_extra[p_handle.id()].pairable) {
+ tree = 1;
+ }
+ return tree;
+ }
+ return 0;
+}
+
+public:
+void _handle_sort(BVHHandle &p_ha, BVHHandle &p_hb) const {
+ if (p_ha.id() > p_hb.id()) {
+ BVHHandle temp = p_hb;
+ p_hb = p_ha;
+ p_ha = temp;
+ }
+}
+
+private:
+void create_root_node(int p_tree) {
+ // if there is no root node, create one
+ if (_root_node_id[p_tree] == BVHCommon::INVALID) {
+ uint32_t root_node_id;
+ TNode *node = _nodes.request(root_node_id);
+ node->clear();
+ _root_node_id[p_tree] = root_node_id;
+
+ // make the root node a leaf
+ uint32_t leaf_id;
+ TLeaf *leaf = _leaves.request(leaf_id);
+ leaf->clear();
+ node->neg_leaf_id = -(int)leaf_id;
+ }
+}
+
+bool node_is_leaf_full(TNode &tnode) const {
+ const TLeaf &leaf = _node_get_leaf(tnode);
+ return leaf.is_full();
+}
+
+public:
+TLeaf &_node_get_leaf(TNode &tnode) {
+ BVH_ASSERT(tnode.is_leaf());
+ return _leaves[tnode.get_leaf_id()];
+}
+
+const TLeaf &_node_get_leaf(const TNode &tnode) const {
+ BVH_ASSERT(tnode.is_leaf());
+ return _leaves[tnode.get_leaf_id()];
+}
+
+private:
diff --git a/core/math/bvh_pair.inc b/core/math/bvh_pair.inc
new file mode 100644
index 0000000000..839db59a3a
--- /dev/null
+++ b/core/math/bvh_pair.inc
@@ -0,0 +1,62 @@
+public:
+// note .. maybe this can be attached to another node structure?
+// depends which works best for cache.
+struct ItemPairs {
+ struct Link {
+ void set(BVHHandle h, void *ud) {
+ handle = h;
+ userdata = ud;
+ }
+ BVHHandle handle;
+ void *userdata;
+ };
+
+ void clear() {
+ num_pairs = 0;
+ extended_pairs.reset();
+ expanded_aabb = Bounds();
+ }
+
+ Bounds expanded_aabb;
+
+ // maybe we can just use the number in the vector TODO
+ int32_t num_pairs;
+ LocalVector<Link> extended_pairs;
+
+ void add_pair_to(BVHHandle h, void *p_userdata) {
+ Link temp;
+ temp.set(h, p_userdata);
+
+ extended_pairs.push_back(temp);
+ num_pairs++;
+ }
+
+ uint32_t find_pair_to(BVHHandle h) const {
+ for (int n = 0; n < num_pairs; n++) {
+ if (extended_pairs[n].handle == h) {
+ return n;
+ }
+ }
+ return -1;
+ }
+
+ bool contains_pair_to(BVHHandle h) const {
+ return find_pair_to(h) != BVHCommon::INVALID;
+ }
+
+ // return success
+ void *remove_pair_to(BVHHandle h) {
+ void *userdata = nullptr;
+
+ for (int n = 0; n < num_pairs; n++) {
+ if (extended_pairs[n].handle == h) {
+ userdata = extended_pairs[n].userdata;
+ extended_pairs.remove_unordered(n);
+ num_pairs--;
+ break;
+ }
+ }
+
+ return userdata;
+ }
+};
diff --git a/core/math/bvh_public.inc b/core/math/bvh_public.inc
new file mode 100644
index 0000000000..f1b6d6b1bf
--- /dev/null
+++ b/core/math/bvh_public.inc
@@ -0,0 +1,421 @@
+public:
+BVHHandle item_add(T *p_userdata, bool p_active, const Bounds &p_aabb, int32_t p_subindex, bool p_pairable, uint32_t p_pairable_type, uint32_t p_pairable_mask, bool p_invisible = false) {
+#ifdef BVH_VERBOSE_TREE
+ VERBOSE_PRINT("\nitem_add BEFORE");
+ _debug_recursive_print_tree(0);
+ VERBOSE_PRINT("\n");
+#endif
+
+ BVHABB_CLASS abb;
+ abb.from(p_aabb);
+
+ // handle to be filled with the new item ref
+ BVHHandle handle;
+
+ // ref id easier to pass around than handle
+ uint32_t ref_id;
+
+ // this should never fail
+ ItemRef *ref = _refs.request(ref_id);
+
+ // the extra data should be parallel list to the references
+ uint32_t extra_id;
+ ItemExtra *extra = _extra.request(extra_id);
+ BVH_ASSERT(extra_id == ref_id);
+
+ // pairs info
+ if (USE_PAIRS) {
+ uint32_t pairs_id;
+ ItemPairs *pairs = _pairs.request(pairs_id);
+ pairs->clear();
+ BVH_ASSERT(pairs_id == ref_id);
+ }
+
+ extra->subindex = p_subindex;
+ extra->userdata = p_userdata;
+ extra->last_updated_tick = 0;
+
+ // add an active reference to the list for slow incremental optimize
+ // this list must be kept in sync with the references as they are added or removed.
+ extra->active_ref_id = _active_refs.size();
+ _active_refs.push_back(ref_id);
+
+ if (USE_PAIRS) {
+ extra->pairable_mask = p_pairable_mask;
+ extra->pairable_type = p_pairable_type;
+ extra->pairable = p_pairable;
+ } else {
+ // just for safety, in case this gets queried etc
+ extra->pairable = 0;
+ p_pairable = false;
+ }
+
+ // assign to handle to return
+ handle.set_id(ref_id);
+
+ _current_tree = 0;
+ if (p_pairable) {
+ _current_tree = 1;
+ }
+
+ create_root_node(_current_tree);
+
+ // we must choose where to add to tree
+ if (p_active) {
+ ref->tnode_id = _logic_choose_item_add_node(_root_node_id[_current_tree], abb);
+
+ bool refit = _node_add_item(ref->tnode_id, ref_id, abb);
+
+ if (refit) {
+ // only need to refit from the parent
+ const TNode &add_node = _nodes[ref->tnode_id];
+ if (add_node.parent_id != BVHCommon::INVALID) {
+ refit_upward_and_balance(add_node.parent_id);
+ }
+ }
+ } else {
+ ref->set_inactive();
+ }
+
+#ifdef BVH_VERBOSE
+ // memory use
+ int mem = _refs.estimate_memory_use();
+ mem += _nodes.estimate_memory_use();
+
+ String sz = _debug_aabb_to_string(abb);
+ VERBOSE_PRINT("\titem_add [" + itos(ref_id) + "] " + itos(_refs.size()) + " refs,\t" + itos(_nodes.size()) + " nodes " + sz);
+ VERBOSE_PRINT("mem use : " + itos(mem) + ", num nodes : " + itos(_nodes.size()));
+
+#endif
+
+ return handle;
+}
+
+void _debug_print_refs() {
+#ifdef BVH_VERBOSE_TREE
+ print_line("refs.....");
+ for (int n = 0; n < _refs.size(); n++) {
+ const ItemRef &ref = _refs[n];
+ print_line("tnode_id " + itos(ref.tnode_id) + ", item_id " + itos(ref.item_id));
+ }
+
+#endif
+}
+
+// returns false if noop
+bool item_move(BVHHandle p_handle, const Bounds &p_aabb) {
+ uint32_t ref_id = p_handle.id();
+
+ // get the reference
+ ItemRef &ref = _refs[ref_id];
+ if (!ref.is_active()) {
+ return false;
+ }
+
+ BVHABB_CLASS abb;
+ abb.from(p_aabb);
+
+ BVH_ASSERT(ref.tnode_id != BVHCommon::INVALID);
+ TNode &tnode = _nodes[ref.tnode_id];
+
+ // does it fit within the current aabb?
+ if (tnode.aabb.is_other_within(abb)) {
+ // do nothing .. fast path .. not moved enough to need refit
+
+ // however we WILL update the exact aabb in the leaf, as this will be needed
+ // for accurate collision detection
+ TLeaf &leaf = _node_get_leaf(tnode);
+
+ BVHABB_CLASS &leaf_abb = leaf.get_aabb(ref.item_id);
+
+ // no change?
+ if (leaf_abb == abb) {
+ return false;
+ }
+
+ leaf_abb = abb;
+ _integrity_check_all();
+
+ return true;
+ }
+
+ _current_tree = _handle_get_tree_id(p_handle);
+
+ // remove and reinsert
+ node_remove_item(ref_id);
+
+ // we must choose where to add to tree
+ ref.tnode_id = _logic_choose_item_add_node(_root_node_id[_current_tree], abb);
+
+ // add to the tree
+ bool needs_refit = _node_add_item(ref.tnode_id, ref_id, abb);
+
+ // only need to refit from the PARENT
+ if (needs_refit) {
+ // only need to refit from the parent
+ const TNode &add_node = _nodes[ref.tnode_id];
+ if (add_node.parent_id != BVHCommon::INVALID) {
+ // not sure we need to rebalance all the time, this can be done less often
+ refit_upward(add_node.parent_id);
+ }
+ //refit_upward_and_balance(add_node.parent_id);
+ }
+
+ return true;
+}
+
+void item_remove(BVHHandle p_handle) {
+ uint32_t ref_id = p_handle.id();
+
+ _current_tree = _handle_get_tree_id(p_handle);
+
+ VERBOSE_PRINT("item_remove [" + itos(ref_id) + "] ");
+
+ ////////////////////////////////////////
+ // remove the active reference from the list for slow incremental optimize
+ // this list must be kept in sync with the references as they are added or removed.
+ uint32_t active_ref_id = _extra[ref_id].active_ref_id;
+ uint32_t ref_id_moved_back = _active_refs[_active_refs.size() - 1];
+
+ // swap back and decrement for fast unordered remove
+ _active_refs[active_ref_id] = ref_id_moved_back;
+ _active_refs.resize(_active_refs.size() - 1);
+
+ // keep the moved active reference up to date
+ _extra[ref_id_moved_back].active_ref_id = active_ref_id;
+ ////////////////////////////////////////
+
+ // remove the item from the node (only if active)
+ if (_refs[ref_id].is_active()) {
+ node_remove_item(ref_id);
+ }
+
+ // remove the item reference
+ _refs.free(ref_id);
+ _extra.free(ref_id);
+ if (USE_PAIRS) {
+ _pairs.free(ref_id);
+ }
+
+ // don't think refit_all is necessary?
+ //refit_all(_current_tree);
+
+#ifdef BVH_VERBOSE_TREE
+ _debug_recursive_print_tree(_current_tree);
+#endif
+}
+
+// returns success
+bool item_activate(BVHHandle p_handle, const Bounds &p_aabb) {
+ uint32_t ref_id = p_handle.id();
+ ItemRef &ref = _refs[ref_id];
+ if (ref.is_active()) {
+ // noop
+ return false;
+ }
+
+ // add to tree
+ BVHABB_CLASS abb;
+ abb.from(p_aabb);
+
+ _current_tree = _handle_get_tree_id(p_handle);
+
+ // we must choose where to add to tree
+ ref.tnode_id = _logic_choose_item_add_node(_root_node_id[_current_tree], abb);
+ _node_add_item(ref.tnode_id, ref_id, abb);
+
+ refit_upward_and_balance(ref.tnode_id);
+
+ return true;
+}
+
+// returns success
+bool item_deactivate(BVHHandle p_handle) {
+ uint32_t ref_id = p_handle.id();
+ ItemRef &ref = _refs[ref_id];
+ if (!ref.is_active()) {
+ // noop
+ return false;
+ }
+
+ // remove from tree
+ BVHABB_CLASS abb;
+ node_remove_item(ref_id, &abb);
+
+ // mark as inactive
+ ref.set_inactive();
+ return true;
+}
+
+bool item_get_active(BVHHandle p_handle) const {
+ uint32_t ref_id = p_handle.id();
+ const ItemRef &ref = _refs[ref_id];
+ return ref.is_active();
+}
+
+// during collision testing, we want to set the mask and whether pairable for the item testing from
+void item_fill_cullparams(BVHHandle p_handle, CullParams &r_params) const {
+ uint32_t ref_id = p_handle.id();
+ const ItemExtra &extra = _extra[ref_id];
+
+ // testing from a non pairable item, we only want to test pairable items
+ r_params.test_pairable_only = extra.pairable == 0;
+
+ // we take into account the mask of the item testing from
+ r_params.mask = extra.pairable_mask;
+ r_params.pairable_type = extra.pairable_type;
+}
+
+bool item_is_pairable(const BVHHandle &p_handle) {
+ uint32_t ref_id = p_handle.id();
+ const ItemExtra &extra = _extra[ref_id];
+ return extra.pairable != 0;
+}
+
+void item_get_ABB(const BVHHandle &p_handle, BVHABB_CLASS &r_abb) {
+ // change tree?
+ uint32_t ref_id = p_handle.id();
+ const ItemRef &ref = _refs[ref_id];
+
+ TNode &tnode = _nodes[ref.tnode_id];
+ TLeaf &leaf = _node_get_leaf(tnode);
+
+ r_abb = leaf.get_aabb(ref.item_id);
+}
+
+bool item_set_pairable(const BVHHandle &p_handle, bool p_pairable, uint32_t p_pairable_type, uint32_t p_pairable_mask) {
+ // change tree?
+ uint32_t ref_id = p_handle.id();
+
+ ItemExtra &ex = _extra[ref_id];
+ ItemRef &ref = _refs[ref_id];
+
+ bool active = ref.is_active();
+ bool pairable_changed = (ex.pairable != 0) != p_pairable;
+ bool state_changed = pairable_changed || (ex.pairable_type != p_pairable_type) || (ex.pairable_mask != p_pairable_mask);
+
+ ex.pairable_type = p_pairable_type;
+ ex.pairable_mask = p_pairable_mask;
+
+ if (active && pairable_changed) {
+ // record abb
+ TNode &tnode = _nodes[ref.tnode_id];
+ TLeaf &leaf = _node_get_leaf(tnode);
+ BVHABB_CLASS abb = leaf.get_aabb(ref.item_id);
+
+ // make sure current tree is correct prior to changing
+ _current_tree = _handle_get_tree_id(p_handle);
+
+ // remove from old tree
+ node_remove_item(ref_id);
+
+ // we must set the pairable AFTER getting the current tree
+ // because the pairable status determines which tree
+ ex.pairable = p_pairable;
+
+ // add to new tree
+ _current_tree = _handle_get_tree_id(p_handle);
+ create_root_node(_current_tree);
+
+ // we must choose where to add to tree
+ ref.tnode_id = _logic_choose_item_add_node(_root_node_id[_current_tree], abb);
+ bool needs_refit = _node_add_item(ref.tnode_id, ref_id, abb);
+
+ // only need to refit from the PARENT
+ if (needs_refit) {
+ // only need to refit from the parent
+ const TNode &add_node = _nodes[ref.tnode_id];
+ if (add_node.parent_id != BVHCommon::INVALID) {
+ refit_upward_and_balance(add_node.parent_id);
+ }
+ }
+ } else {
+ // always keep this up to date
+ ex.pairable = p_pairable;
+ }
+
+ return state_changed;
+}
+
+void incremental_optimize() {
+ // first update all aabbs as one off step..
+ // this is cheaper than doing it on each move as each leaf may get touched multiple times
+ // in a frame.
+ for (int n = 0; n < NUM_TREES; n++) {
+ if (_root_node_id[n] != BVHCommon::INVALID) {
+ refit_branch(_root_node_id[n]);
+ }
+ }
+
+ // now do small section reinserting to get things moving
+ // gradually, and keep items in the right leaf
+ if (_current_active_ref >= _active_refs.size()) {
+ _current_active_ref = 0;
+ }
+
+ // special case
+ if (!_active_refs.size()) {
+ return;
+ }
+
+ uint32_t ref_id = _active_refs[_current_active_ref++];
+
+ _logic_item_remove_and_reinsert(ref_id);
+
+#ifdef BVH_VERBOSE
+ /*
+ // memory use
+ int mem_refs = _refs.estimate_memory_use();
+ int mem_nodes = _nodes.estimate_memory_use();
+ int mem_leaves = _leaves.estimate_memory_use();
+
+ String sz;
+ sz += "mem_refs : " + itos(mem_refs) + " ";
+ sz += "mem_nodes : " + itos(mem_nodes) + " ";
+ sz += "mem_leaves : " + itos(mem_leaves) + " ";
+ sz += ", num nodes : " + itos(_nodes.size());
+ print_line(sz);
+ */
+#endif
+}
+
+void update() {
+ incremental_optimize();
+
+ // keep the expansion values up to date with the world bound
+//#define BVH_ALLOW_AUTO_EXPANSION
+#ifdef BVH_ALLOW_AUTO_EXPANSION
+ if (_auto_node_expansion || _auto_pairing_expansion) {
+ BVHABB_CLASS world_bound;
+ world_bound.set_to_max_opposite_extents();
+
+ bool bound_valid = false;
+
+ for (int n = 0; n < NUM_TREES; n++) {
+ uint32_t node_id = _root_node_id[n];
+ if (node_id != BVHCommon::INVALID) {
+ world_bound.merge(_nodes[node_id].aabb);
+ bound_valid = true;
+ }
+ }
+
+ // if there are no nodes, do nothing, but if there are...
+ if (bound_valid) {
+ Bounds bb;
+ world_bound.to(bb);
+ real_t size = bb.get_longest_axis_size();
+
+ // automatic AI decision for best parameters.
+ // These can be overridden in project settings.
+
+ // these magic numbers are determined by experiment
+ if (_auto_node_expansion) {
+ _node_expansion = size * 0.025;
+ }
+ if (_auto_pairing_expansion) {
+ _pairing_expansion = size * 0.009;
+ }
+ }
+ }
+#endif
+}
diff --git a/core/math/bvh_refit.inc b/core/math/bvh_refit.inc
new file mode 100644
index 0000000000..514c853ac5
--- /dev/null
+++ b/core/math/bvh_refit.inc
@@ -0,0 +1,141 @@
+void _debug_node_verify_bound(uint32_t p_node_id) {
+ TNode &node = _nodes[p_node_id];
+ BVHABB_CLASS abb_before = node.aabb;
+
+ node_update_aabb(node);
+
+ BVHABB_CLASS abb_after = node.aabb;
+ CRASH_COND(abb_before != abb_after);
+}
+
+void node_update_aabb(TNode &tnode) {
+ tnode.aabb.set_to_max_opposite_extents();
+ tnode.height = 0;
+
+ if (!tnode.is_leaf()) {
+ for (int n = 0; n < tnode.num_children; n++) {
+ uint32_t child_node_id = tnode.children[n];
+
+ // merge with child aabb
+ const TNode &tchild = _nodes[child_node_id];
+ tnode.aabb.merge(tchild.aabb);
+
+ // do heights at the same time
+ if (tchild.height > tnode.height) {
+ tnode.height = tchild.height;
+ }
+ }
+
+ // the height of a non leaf is always 1 bigger than the biggest child
+ tnode.height++;
+
+#ifdef BVH_CHECKS
+ if (!tnode.num_children) {
+ // the 'blank' aabb will screw up parent aabbs
+ WARN_PRINT("BVH_Tree::TNode no children, AABB is undefined");
+ }
+#endif
+ } else {
+ // leaf
+ const TLeaf &leaf = _node_get_leaf(tnode);
+
+ for (int n = 0; n < leaf.num_items; n++) {
+ tnode.aabb.merge(leaf.get_aabb(n));
+ }
+
+ // now the leaf items are unexpanded, we expand only in the node AABB
+ tnode.aabb.expand(_node_expansion);
+#ifdef BVH_CHECKS
+ if (!leaf.num_items) {
+ // the 'blank' aabb will screw up parent aabbs
+ WARN_PRINT("BVH_Tree::TLeaf no items, AABB is undefined");
+ }
+#endif
+ }
+}
+
+void refit_all(int p_tree_id) {
+ refit_downward(_root_node_id[p_tree_id]);
+}
+
+void refit_upward(uint32_t p_node_id) {
+ while (p_node_id != BVHCommon::INVALID) {
+ TNode &tnode = _nodes[p_node_id];
+ node_update_aabb(tnode);
+ p_node_id = tnode.parent_id;
+ }
+}
+
+void refit_upward_and_balance(uint32_t p_node_id) {
+ while (p_node_id != BVHCommon::INVALID) {
+ uint32_t before = p_node_id;
+ p_node_id = _logic_balance(p_node_id);
+
+ if (before != p_node_id) {
+ VERBOSE_PRINT("REBALANCED!");
+ }
+
+ TNode &tnode = _nodes[p_node_id];
+
+ // update overall aabb from the children
+ node_update_aabb(tnode);
+
+ p_node_id = tnode.parent_id;
+ }
+}
+
+void refit_downward(uint32_t p_node_id) {
+ TNode &tnode = _nodes[p_node_id];
+
+ // do children first
+ if (!tnode.is_leaf()) {
+ for (int n = 0; n < tnode.num_children; n++) {
+ refit_downward(tnode.children[n]);
+ }
+ }
+
+ node_update_aabb(tnode);
+}
+
+// go down to the leaves, then refit upward
+void refit_branch(uint32_t p_node_id) {
+ // our function parameters to keep on a stack
+ struct RefitParams {
+ uint32_t node_id;
+ };
+
+ // most of the iterative functionality is contained in this helper class
+ BVH_IterativeInfo<RefitParams> ii;
+
+ // alloca must allocate the stack from this function, it cannot be allocated in the
+ // helper class
+ ii.stack = (RefitParams *)alloca(ii.get_alloca_stacksize());
+
+ // seed the stack
+ ii.get_first()->node_id = p_node_id;
+
+ RefitParams rp;
+
+ // while there are still more nodes on the stack
+ while (ii.pop(rp)) {
+ TNode &tnode = _nodes[rp.node_id];
+
+ // do children first
+ if (!tnode.is_leaf()) {
+ for (int n = 0; n < tnode.num_children; n++) {
+ uint32_t child_id = tnode.children[n];
+
+ // add to the stack
+ RefitParams *child = ii.request();
+ child->node_id = child_id;
+ }
+ } else {
+ // leaf .. only refit upward if dirty
+ TLeaf &leaf = _node_get_leaf(tnode);
+ if (leaf.is_dirty()) {
+ leaf.set_dirty(false);
+ refit_upward(p_node_id);
+ }
+ }
+ } // while more nodes to pop
+}
diff --git a/core/math/bvh_split.inc b/core/math/bvh_split.inc
new file mode 100644
index 0000000000..3fcc4c7b10
--- /dev/null
+++ b/core/math/bvh_split.inc
@@ -0,0 +1,294 @@
+void _split_inform_references(uint32_t p_node_id) {
+ TNode &node = _nodes[p_node_id];
+ TLeaf &leaf = _node_get_leaf(node);
+
+ for (int n = 0; n < leaf.num_items; n++) {
+ uint32_t ref_id = leaf.get_item_ref_id(n);
+
+ ItemRef &ref = _refs[ref_id];
+ ref.tnode_id = p_node_id;
+ ref.item_id = n;
+ }
+}
+
+void _split_leaf_sort_groups_simple(int &num_a, int &num_b, uint16_t *group_a, uint16_t *group_b, const BVHABB_CLASS *temp_bounds, const BVHABB_CLASS full_bound) {
+ // special case for low leaf sizes .. should static compile out
+ if (MAX_ITEMS < 4) {
+ uint32_t ind = group_a[0];
+
+ // add to b
+ group_b[num_b++] = ind;
+
+ // remove from a
+ group_a[0] = group_a[num_a - 1];
+ num_a--;
+ return;
+ }
+
+ Point centre = full_bound.calculate_centre();
+ Point size = full_bound.calculate_size();
+
+ int order[3];
+
+ order[0] = size.min_axis();
+ order[2] = size.max_axis();
+ order[1] = 3 - (order[0] + order[2]);
+
+ // simplest case, split on the longest axis
+ int split_axis = order[0];
+ for (int a = 0; a < num_a; a++) {
+ uint32_t ind = group_a[a];
+
+ if (temp_bounds[ind].min.coord[split_axis] > centre.coord[split_axis]) {
+ // add to b
+ group_b[num_b++] = ind;
+
+ // remove from a
+ group_a[a] = group_a[num_a - 1];
+ num_a--;
+
+ // do this one again, as it has been replaced
+ a--;
+ }
+ }
+
+ // detect when split on longest axis failed
+ int min_threshold = MAX_ITEMS / 4;
+ int min_group_size[3];
+ min_group_size[0] = MIN(num_a, num_b);
+ if (min_group_size[0] < min_threshold) {
+ // slow but sure .. first move everything back into a
+ for (int b = 0; b < num_b; b++) {
+ group_a[num_a++] = group_b[b];
+ }
+ num_b = 0;
+
+ // now calculate the best split
+ for (int axis = 1; axis < 3; axis++) {
+ split_axis = order[axis];
+ int count = 0;
+
+ for (int a = 0; a < num_a; a++) {
+ uint32_t ind = group_a[a];
+
+ if (temp_bounds[ind].min.coord[split_axis] > centre.coord[split_axis]) {
+ count++;
+ }
+ }
+
+ min_group_size[axis] = MIN(count, num_a - count);
+ } // for axis
+
+ // best axis
+ int best_axis = 0;
+ int best_min = min_group_size[0];
+ for (int axis = 1; axis < 3; axis++) {
+ if (min_group_size[axis] > best_min) {
+ best_min = min_group_size[axis];
+ best_axis = axis;
+ }
+ }
+
+ // now finally do the split
+ if (best_min > 0) {
+ split_axis = order[best_axis];
+
+ for (int a = 0; a < num_a; a++) {
+ uint32_t ind = group_a[a];
+
+ if (temp_bounds[ind].min.coord[split_axis] > centre.coord[split_axis]) {
+ // add to b
+ group_b[num_b++] = ind;
+
+ // remove from a
+ group_a[a] = group_a[num_a - 1];
+ num_a--;
+
+ // do this one again, as it has been replaced
+ a--;
+ }
+ }
+ } // if there was a split!
+ } // if the longest axis wasn't a good split
+
+ // special case, none crossed threshold
+ if (!num_b) {
+ uint32_t ind = group_a[0];
+
+ // add to b
+ group_b[num_b++] = ind;
+
+ // remove from a
+ group_a[0] = group_a[num_a - 1];
+ num_a--;
+ }
+ // opposite problem! :)
+ if (!num_a) {
+ uint32_t ind = group_b[0];
+
+ // add to a
+ group_a[num_a++] = ind;
+
+ // remove from b
+ group_b[0] = group_b[num_b - 1];
+ num_b--;
+ }
+}
+
+void _split_leaf_sort_groups(int &num_a, int &num_b, uint16_t *group_a, uint16_t *group_b, const BVHABB_CLASS *temp_bounds) {
+ BVHABB_CLASS groupb_aabb;
+ groupb_aabb.set_to_max_opposite_extents();
+ for (int n = 0; n < num_b; n++) {
+ int which = group_b[n];
+ groupb_aabb.merge(temp_bounds[which]);
+ }
+ BVHABB_CLASS groupb_aabb_new;
+
+ BVHABB_CLASS rest_aabb;
+
+ float best_size = FLT_MAX;
+ int best_candidate = -1;
+
+ // find most likely from a to move into b
+ for (int check = 0; check < num_a; check++) {
+ rest_aabb.set_to_max_opposite_extents();
+ groupb_aabb_new = groupb_aabb;
+
+ // find aabb of all the rest
+ for (int rest = 0; rest < num_a; rest++) {
+ if (rest == check) {
+ continue;
+ }
+
+ int which = group_a[rest];
+ rest_aabb.merge(temp_bounds[which]);
+ }
+
+ groupb_aabb_new.merge(temp_bounds[group_a[check]]);
+
+ // now compare the sizes
+ float size = groupb_aabb_new.get_area() + rest_aabb.get_area();
+ if (size < best_size) {
+ best_size = size;
+ best_candidate = check;
+ }
+ }
+
+ // we should now have the best, move it from group a to group b
+ group_b[num_b++] = group_a[best_candidate];
+
+ // remove best candidate from group a
+ num_a--;
+ group_a[best_candidate] = group_a[num_a];
+}
+
+uint32_t split_leaf(uint32_t p_node_id, const BVHABB_CLASS &p_added_item_aabb) {
+ return split_leaf_complex(p_node_id, p_added_item_aabb);
+}
+
+// aabb is the new inserted node
+uint32_t split_leaf_complex(uint32_t p_node_id, const BVHABB_CLASS &p_added_item_aabb) {
+ VERBOSE_PRINT("split_leaf");
+
+ // note the tnode before and AFTER splitting may be a different address
+ // in memory because the vector could get relocated. So we need to reget
+ // the tnode after the split
+ BVH_ASSERT(_nodes[p_node_id].is_leaf());
+
+ // first create child leaf nodes
+ uint32_t *child_ids = (uint32_t *)alloca(sizeof(uint32_t) * MAX_CHILDREN);
+
+ for (int n = 0; n < MAX_CHILDREN; n++) {
+ // create node children
+ TNode *child_node = _nodes.request(child_ids[n]);
+
+ child_node->clear();
+
+ // back link to parent
+ child_node->parent_id = p_node_id;
+
+ // make each child a leaf node
+ node_make_leaf(child_ids[n]);
+ }
+
+ // don't get any leaves or nodes till AFTER the split
+ TNode &tnode = _nodes[p_node_id];
+ uint32_t orig_leaf_id = tnode.get_leaf_id();
+ const TLeaf &orig_leaf = _node_get_leaf(tnode);
+
+ // store the final child ids
+ for (int n = 0; n < MAX_CHILDREN; n++) {
+ tnode.children[n] = child_ids[n];
+ }
+
+ // mark as no longer a leaf node
+ tnode.num_children = MAX_CHILDREN;
+
+ // 2 groups, A and B, and assign children to each to split equally
+ int max_children = orig_leaf.num_items + 1; // plus 1 for the wildcard .. the item being added
+ //CRASH_COND(max_children > MAX_CHILDREN);
+
+ uint16_t *group_a = (uint16_t *)alloca(sizeof(uint16_t) * max_children);
+ uint16_t *group_b = (uint16_t *)alloca(sizeof(uint16_t) * max_children);
+
+ // we are copying the ABBs. This is ugly, but we need one extra for the inserted item...
+ BVHABB_CLASS *temp_bounds = (BVHABB_CLASS *)alloca(sizeof(BVHABB_CLASS) * max_children);
+
+ int num_a = max_children;
+ int num_b = 0;
+
+ // setup - start with all in group a
+ for (int n = 0; n < orig_leaf.num_items; n++) {
+ group_a[n] = n;
+ temp_bounds[n] = orig_leaf.get_aabb(n);
+ }
+ // wildcard
+ int wildcard = orig_leaf.num_items;
+
+ group_a[wildcard] = wildcard;
+ temp_bounds[wildcard] = p_added_item_aabb;
+
+ // we can choose here either an equal split, or just 1 in the new leaf
+ _split_leaf_sort_groups_simple(num_a, num_b, group_a, group_b, temp_bounds, tnode.aabb);
+
+ uint32_t wildcard_node = BVHCommon::INVALID;
+
+ // now there should be equal numbers in both groups
+ for (int n = 0; n < num_a; n++) {
+ int which = group_a[n];
+
+ if (which != wildcard) {
+ const BVHABB_CLASS &source_item_aabb = orig_leaf.get_aabb(which);
+ uint32_t source_item_ref_id = orig_leaf.get_item_ref_id(which);
+ //const Item &source_item = orig_leaf.get_item(which);
+ _node_add_item(tnode.children[0], source_item_ref_id, source_item_aabb);
+ } else {
+ wildcard_node = tnode.children[0];
+ }
+ }
+ for (int n = 0; n < num_b; n++) {
+ int which = group_b[n];
+
+ if (which != wildcard) {
+ const BVHABB_CLASS &source_item_aabb = orig_leaf.get_aabb(which);
+ uint32_t source_item_ref_id = orig_leaf.get_item_ref_id(which);
+ //const Item &source_item = orig_leaf.get_item(which);
+ _node_add_item(tnode.children[1], source_item_ref_id, source_item_aabb);
+ } else {
+ wildcard_node = tnode.children[1];
+ }
+ }
+
+ // now remove all items from the parent and replace with the child nodes
+ _leaves.free(orig_leaf_id);
+
+ // we should keep the references up to date!
+ for (int n = 0; n < MAX_CHILDREN; n++) {
+ _split_inform_references(tnode.children[n]);
+ }
+
+ refit_upward(p_node_id);
+
+ BVH_ASSERT(wildcard_node != BVHCommon::INVALID);
+ return wildcard_node;
+}
diff --git a/core/math/bvh_structs.inc b/core/math/bvh_structs.inc
new file mode 100644
index 0000000000..4133ba6c10
--- /dev/null
+++ b/core/math/bvh_structs.inc
@@ -0,0 +1,181 @@
+
+public:
+struct ItemRef {
+ uint32_t tnode_id; // -1 is invalid
+ uint32_t item_id; // in the leaf
+
+ bool is_active() const { return tnode_id != BVHCommon::INACTIVE; }
+ void set_inactive() {
+ tnode_id = BVHCommon::INACTIVE;
+ item_id = BVHCommon::INACTIVE;
+ }
+};
+
+// extra info kept in separate parallel list to the references,
+// as this is less used as keeps cache better
+struct ItemExtra {
+ uint32_t last_updated_tick;
+ uint32_t pairable;
+ uint32_t pairable_mask;
+ uint32_t pairable_type;
+
+ int32_t subindex;
+
+ // the active reference is a separate list of which references
+ // are active so that we can slowly iterate through it over many frames for
+ // slow optimize.
+ uint32_t active_ref_id;
+
+ T *userdata;
+};
+
+// this is an item OR a child node depending on whether a leaf node
+struct Item {
+ BVHABB_CLASS aabb;
+ uint32_t item_ref_id;
+};
+
+// tree leaf
+struct TLeaf {
+ uint16_t num_items;
+
+private:
+ uint16_t dirty;
+ // separate data orientated lists for faster SIMD traversal
+ uint32_t item_ref_ids[MAX_ITEMS];
+ BVHABB_CLASS aabbs[MAX_ITEMS];
+
+public:
+ // accessors
+ BVHABB_CLASS &get_aabb(uint32_t p_id) { return aabbs[p_id]; }
+ const BVHABB_CLASS &get_aabb(uint32_t p_id) const { return aabbs[p_id]; }
+
+ uint32_t &get_item_ref_id(uint32_t p_id) { return item_ref_ids[p_id]; }
+ const uint32_t &get_item_ref_id(uint32_t p_id) const { return item_ref_ids[p_id]; }
+
+ bool is_dirty() const { return dirty; }
+ void set_dirty(bool p) { dirty = p; }
+
+ void clear() {
+ num_items = 0;
+ set_dirty(true);
+ }
+ bool is_full() const { return num_items >= MAX_ITEMS; }
+
+ void remove_item_unordered(uint32_t p_id) {
+ BVH_ASSERT(p_id < num_items);
+ num_items--;
+ aabbs[p_id] = aabbs[num_items];
+ item_ref_ids[p_id] = item_ref_ids[num_items];
+ }
+
+ uint32_t request_item() {
+ if (num_items < MAX_ITEMS) {
+ uint32_t id = num_items;
+ num_items++;
+ return id;
+ }
+ return -1;
+ }
+};
+
+// tree node
+struct TNode {
+ BVHABB_CLASS aabb;
+ // either number of children if positive
+ // or leaf id if negative (leaf id 0 is disallowed)
+ union {
+ int32_t num_children;
+ int32_t neg_leaf_id;
+ };
+ uint32_t parent_id; // or -1
+ uint16_t children[MAX_CHILDREN];
+
+ // height in the tree, where leaves are 0, and all above are 1+
+ // (or the highest where there is a tie off)
+ int32_t height;
+
+ bool is_leaf() const { return num_children < 0; }
+ void set_leaf_id(int id) { neg_leaf_id = -id; }
+ int get_leaf_id() const { return -neg_leaf_id; }
+
+ void clear() {
+ num_children = 0;
+ parent_id = BVHCommon::INVALID;
+ height = 0; // or -1 for testing
+
+ // for safety set to improbable value
+ aabb.set_to_max_opposite_extents();
+
+ // other members are not blanked for speed .. they may be uninitialized
+ }
+
+ bool is_full_of_children() const { return num_children >= MAX_CHILDREN; }
+
+ void remove_child_internal(uint32_t child_num) {
+ children[child_num] = children[num_children - 1];
+ num_children--;
+ }
+
+ int find_child(uint32_t p_child_node_id) {
+ BVH_ASSERT(!is_leaf());
+
+ for (int n = 0; n < num_children; n++) {
+ if (children[n] == p_child_node_id) {
+ return n;
+ }
+ }
+
+ // not found
+ return -1;
+ }
+};
+
+// instead of using linked list we maintain
+// item references (for quick lookup)
+PooledList<ItemRef, true> _refs;
+PooledList<ItemExtra, true> _extra;
+PooledList<ItemPairs> _pairs;
+
+// these 2 are not in sync .. nodes != leaves!
+PooledList<TNode, true> _nodes;
+PooledList<TLeaf, true> _leaves;
+
+// we can maintain an un-ordered list of which references are active,
+// in order to do a slow incremental optimize of the tree over each frame.
+// This will work best if dynamic objects and static objects are in a different tree.
+LocalVector<uint32_t, uint32_t, true> _active_refs;
+uint32_t _current_active_ref = 0;
+
+// instead of translating directly to the userdata output,
+// we keep an intermediate list of hits as reference IDs, which can be used
+// for pairing collision detection
+LocalVector<uint32_t, uint32_t, true> _cull_hits;
+
+// we now have multiple root nodes, allowing us to store
+// more than 1 tree. This can be more efficient, while sharing the same
+// common lists
+enum { NUM_TREES = 2,
+};
+
+// Tree 0 - Non pairable
+// Tree 1 - Pairable
+// This is more efficient because in physics we only need check non pairable against the pairable tree.
+uint32_t _root_node_id[NUM_TREES];
+int _current_tree = 0;
+
+// these values may need tweaking according to the project
+// the bound of the world, and the average velocities of the objects
+
+// node expansion is important in the rendering tree
+// larger values give less re-insertion as items move...
+// but on the other hand over estimates the bounding box of nodes.
+// we can either use auto mode, where the expansion is based on the root node size, or specify manually
+real_t _node_expansion = 0.5;
+bool _auto_node_expansion = true;
+
+// pairing expansion important for physics pairing
+// larger values gives more 'sticky' pairing, and is less likely to exhibit tunneling
+// we can either use auto mode, where the expansion is based on the root node size, or specify manually
+real_t _pairing_expansion = 0.1;
+bool _auto_pairing_expansion = true;
diff --git a/core/math/bvh_tree.h b/core/math/bvh_tree.h
new file mode 100644
index 0000000000..64c5f6e254
--- /dev/null
+++ b/core/math/bvh_tree.h
@@ -0,0 +1,422 @@
+/*************************************************************************/
+/* bvh_tree.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#ifndef BVH_TREE_H
+#define BVH_TREE_H
+
+// BVH Tree
+// This is an implementation of a dynamic BVH with templated leaf size.
+// This differs from most dynamic BVH in that it can handle more than 1 object
+// in leaf nodes. This can make it far more efficient in certain circumstances.
+// It also means that the splitting logic etc have to be completely different
+// to a simpler tree.
+// Note that MAX_CHILDREN should be fixed at 2 for now.
+
+#include "core/math/aabb.h"
+#include "core/math/bvh_abb.h"
+#include "core/math/geometry_3d.h"
+#include "core/math/vector3.h"
+#include "core/string/print_string.h"
+#include "core/templates/local_vector.h"
+#include "core/templates/pooled_list.h"
+#include <limits.h>
+
+#define BVHABB_CLASS BVH_ABB<Bounds, Point>
+
+// never do these checks in release
+#if defined(TOOLS_ENABLED) && defined(DEBUG_ENABLED)
+//#define BVH_VERBOSE
+//#define BVH_VERBOSE_TREE
+
+//#define BVH_VERBOSE_FRAME
+//#define BVH_CHECKS
+//#define BVH_INTEGRITY_CHECKS
+#endif
+
+// debug only assert
+#ifdef BVH_CHECKS
+#define BVH_ASSERT(a) CRASH_COND((a) == false)
+#else
+#define BVH_ASSERT(a)
+#endif
+
+#ifdef BVH_VERBOSE
+#define VERBOSE_PRINT print_line
+#else
+#define VERBOSE_PRINT(a)
+#endif
+
+// really just a namespace
+struct BVHCommon {
+ // these could possibly also be the same constant,
+ // although this may be useful for debugging.
+ // or use zero for invalid and +1 based indices.
+ static const uint32_t INVALID = (0xffffffff);
+ static const uint32_t INACTIVE = (0xfffffffe);
+};
+
+// really a handle, can be anything
+// note that zero is a valid reference for the BVH .. this may involve using
+// a plus one based ID for clients that expect 0 to be invalid.
+struct BVHHandle {
+ // conversion operator
+ operator uint32_t() const { return _data; }
+ void set(uint32_t p_value) { _data = p_value; }
+
+ uint32_t _data;
+
+ void set_invalid() { _data = BVHCommon::INVALID; }
+ bool is_invalid() const { return _data == BVHCommon::INVALID; }
+ uint32_t id() const { return _data; }
+ void set_id(uint32_t p_id) { _data = p_id; }
+
+ bool operator==(const BVHHandle &p_h) const { return _data == p_h._data; }
+ bool operator!=(const BVHHandle &p_h) const { return (*this == p_h) == false; }
+};
+
+// helper class to make iterative versions of recursive functions
+template <class T>
+class BVH_IterativeInfo {
+public:
+ enum {
+ ALLOCA_STACK_SIZE = 128
+ };
+
+ int32_t depth = 1;
+ int32_t threshold = ALLOCA_STACK_SIZE - 2;
+ T *stack;
+ //only used in rare occasions when you run out of alloca memory
+ // because tree is too unbalanced.
+ LocalVector<T> aux_stack;
+ int32_t get_alloca_stacksize() const { return ALLOCA_STACK_SIZE * sizeof(T); }
+
+ T *get_first() const {
+ return &stack[0];
+ }
+
+ // pop the last member of the stack, or return false
+ bool pop(T &r_value) {
+ if (!depth) {
+ return false;
+ }
+
+ depth--;
+ r_value = stack[depth];
+ return true;
+ }
+
+ // request new addition to stack
+ T *request() {
+ if (depth > threshold) {
+ if (aux_stack.is_empty()) {
+ aux_stack.resize(ALLOCA_STACK_SIZE * 2);
+ memcpy(aux_stack.ptr(), stack, get_alloca_stacksize());
+ } else {
+ aux_stack.resize(aux_stack.size() * 2);
+ }
+ stack = aux_stack.ptr();
+ threshold = aux_stack.size() - 2;
+ }
+ return &stack[depth++];
+ }
+};
+
+template <class T, int MAX_CHILDREN, int MAX_ITEMS, bool USE_PAIRS = false, class Bounds = AABB, class Point = Vector3>
+class BVH_Tree {
+ friend class BVH;
+
+#include "bvh_pair.inc"
+#include "bvh_structs.inc"
+
+public:
+ BVH_Tree() {
+ for (int n = 0; n < NUM_TREES; n++) {
+ _root_node_id[n] = BVHCommon::INVALID;
+ }
+
+ // disallow zero leaf ids
+ // (as these ids are stored as negative numbers in the node)
+ uint32_t dummy_leaf_id;
+ _leaves.request(dummy_leaf_id);
+ }
+
+private:
+ bool node_add_child(uint32_t p_node_id, uint32_t p_child_node_id) {
+ TNode &tnode = _nodes[p_node_id];
+ if (tnode.is_full_of_children()) {
+ return false;
+ }
+
+ tnode.children[tnode.num_children] = p_child_node_id;
+ tnode.num_children += 1;
+
+ // back link in the child to the parent
+ TNode &tnode_child = _nodes[p_child_node_id];
+ tnode_child.parent_id = p_node_id;
+
+ return true;
+ }
+
+ void node_replace_child(uint32_t p_parent_id, uint32_t p_old_child_id, uint32_t p_new_child_id) {
+ TNode &parent = _nodes[p_parent_id];
+ BVH_ASSERT(!parent.is_leaf());
+
+ int child_num = parent.find_child(p_old_child_id);
+ BVH_ASSERT(child_num != BVHCommon::INVALID);
+ parent.children[child_num] = p_new_child_id;
+
+ TNode &new_child = _nodes[p_new_child_id];
+ new_child.parent_id = p_parent_id;
+ }
+
+ void node_remove_child(uint32_t p_parent_id, uint32_t p_child_id, bool p_prevent_sibling = false) {
+ TNode &parent = _nodes[p_parent_id];
+ BVH_ASSERT(!parent.is_leaf());
+
+ int child_num = parent.find_child(p_child_id);
+ BVH_ASSERT(child_num != BVHCommon::INVALID);
+
+ parent.remove_child_internal(child_num);
+
+ // no need to keep back references for children at the moment
+
+ uint32_t sibling_id; // always a node id, as tnode is never a leaf
+ bool sibling_present = false;
+
+ // if there are more children, or this is the root node, don't try and delete
+ if (parent.num_children > 1) {
+ return;
+ }
+
+ // if there is 1 sibling, it can be moved to be a child of the
+ if (parent.num_children == 1) {
+ // else there is now a redundant node with one child, which can be removed
+ sibling_id = parent.children[0];
+ sibling_present = true;
+ }
+
+ // now there may be no children in this node .. in which case it can be deleted
+ // remove node if empty
+ // remove link from parent
+ uint32_t grandparent_id = parent.parent_id;
+
+ // special case for root node
+ if (grandparent_id == BVHCommon::INVALID) {
+ if (sibling_present) {
+ // change the root node
+ change_root_node(sibling_id);
+
+ // delete the old root node as no longer needed
+ _nodes.free(p_parent_id);
+ }
+
+ return;
+ }
+
+ if (sibling_present) {
+ node_replace_child(grandparent_id, p_parent_id, sibling_id);
+ } else {
+ node_remove_child(grandparent_id, p_parent_id, true);
+ }
+
+ // put the node on the free list to recycle
+ _nodes.free(p_parent_id);
+ }
+
+ // this relies on _current_tree being accurate
+ void change_root_node(uint32_t p_new_root_id) {
+ _root_node_id[_current_tree] = p_new_root_id;
+ TNode &root = _nodes[p_new_root_id];
+
+ // mark no parent
+ root.parent_id = BVHCommon::INVALID;
+ }
+
+ void node_make_leaf(uint32_t p_node_id) {
+ uint32_t child_leaf_id;
+ TLeaf *child_leaf = _leaves.request(child_leaf_id);
+ child_leaf->clear();
+
+ // zero is reserved at startup, to prevent this id being used
+ // (as they are stored as negative values in the node, and zero is already taken)
+ BVH_ASSERT(child_leaf_id != 0);
+
+ TNode &node = _nodes[p_node_id];
+ node.neg_leaf_id = -(int)child_leaf_id;
+ }
+
+ void node_remove_item(uint32_t p_ref_id, BVHABB_CLASS *r_old_aabb = nullptr) {
+ // get the reference
+ ItemRef &ref = _refs[p_ref_id];
+ uint32_t owner_node_id = ref.tnode_id;
+
+ // debug draw special
+ // This may not be needed
+ if (owner_node_id == BVHCommon::INVALID) {
+ return;
+ }
+
+ TNode &tnode = _nodes[owner_node_id];
+ CRASH_COND(!tnode.is_leaf());
+
+ TLeaf &leaf = _node_get_leaf(tnode);
+
+ // if the aabb is not determining the corner size, then there is no need to refit!
+ // (optimization, as merging AABBs takes a lot of time)
+ const BVHABB_CLASS &old_aabb = leaf.get_aabb(ref.item_id);
+
+ // shrink a little to prevent using corner aabbs
+ // in order to miss the corners first we shrink by node_expansion
+ // (which is added to the overall bound of the leaf), then we also
+ // shrink by an epsilon, in order to miss out the very corner aabbs
+ // which are important in determining the bound. Any other aabb
+ // within this can be removed and not affect the overall bound.
+ BVHABB_CLASS node_bound = tnode.aabb;
+ node_bound.expand(-_node_expansion - 0.001f);
+ bool refit = true;
+
+ if (node_bound.is_other_within(old_aabb)) {
+ refit = false;
+ }
+
+ // record the old aabb if required (for incremental remove_and_reinsert)
+ if (r_old_aabb) {
+ *r_old_aabb = old_aabb;
+ }
+
+ leaf.remove_item_unordered(ref.item_id);
+
+ if (leaf.num_items) {
+ // the swapped item has to have its reference changed to, to point to the new item id
+ uint32_t swapped_ref_id = leaf.get_item_ref_id(ref.item_id);
+
+ ItemRef &swapped_ref = _refs[swapped_ref_id];
+
+ swapped_ref.item_id = ref.item_id;
+
+ // only have to refit if it is an edge item
+ // This is a VERY EXPENSIVE STEP
+ // we defer the refit updates until the update function is called once per frame
+ if (refit) {
+ leaf.set_dirty(true);
+ }
+ } else {
+ // remove node if empty
+ // remove link from parent
+ if (tnode.parent_id != BVHCommon::INVALID) {
+ // DANGER .. this can potentially end up with root node with 1 child ...
+ // we don't want this and must check for it
+
+ uint32_t parent_id = tnode.parent_id;
+
+ node_remove_child(parent_id, owner_node_id);
+ refit_upward(parent_id);
+
+ // put the node on the free list to recycle
+ _nodes.free(owner_node_id);
+ }
+
+ // else if no parent, it is the root node. Do not delete
+ }
+
+ ref.tnode_id = BVHCommon::INVALID;
+ ref.item_id = BVHCommon::INVALID; // unset
+ }
+
+ // returns true if needs refit of PARENT tree only, the node itself AABB is calculated
+ // within this routine
+ bool _node_add_item(uint32_t p_node_id, uint32_t p_ref_id, const BVHABB_CLASS &p_aabb) {
+ ItemRef &ref = _refs[p_ref_id];
+ ref.tnode_id = p_node_id;
+
+ TNode &node = _nodes[p_node_id];
+ BVH_ASSERT(node.is_leaf());
+ TLeaf &leaf = _node_get_leaf(node);
+
+ // optimization - we only need to do a refit
+ // if the added item is changing the AABB of the node.
+ // in most cases it won't.
+ bool needs_refit = true;
+
+ // expand bound now
+ BVHABB_CLASS expanded = p_aabb;
+ expanded.expand(_node_expansion);
+
+ // the bound will only be valid if there is an item in there already
+ if (leaf.num_items) {
+ if (node.aabb.is_other_within(expanded)) {
+ // no change to node AABBs
+ needs_refit = false;
+ } else {
+ node.aabb.merge(expanded);
+ }
+ } else {
+ // bound of the node = the new aabb
+ node.aabb = expanded;
+ }
+
+ ref.item_id = leaf.request_item();
+ BVH_ASSERT(ref.item_id != BVHCommon::INVALID);
+
+ // set the aabb of the new item
+ leaf.get_aabb(ref.item_id) = p_aabb;
+
+ // back reference on the item back to the item reference
+ leaf.get_item_ref_id(ref.item_id) = p_ref_id;
+
+ return needs_refit;
+ }
+
+ uint32_t _node_create_another_child(uint32_t p_node_id, const BVHABB_CLASS &p_aabb) {
+ uint32_t child_node_id;
+ TNode *child_node = _nodes.request(child_node_id);
+ child_node->clear();
+
+ // may not be necessary
+ child_node->aabb = p_aabb;
+
+ node_add_child(p_node_id, child_node_id);
+
+ return child_node_id;
+ }
+
+#include "bvh_cull.inc"
+#include "bvh_debug.inc"
+#include "bvh_integrity.inc"
+#include "bvh_logic.inc"
+#include "bvh_misc.inc"
+#include "bvh_public.inc"
+#include "bvh_refit.inc"
+#include "bvh_split.inc"
+};
+
+#undef VERBOSE_PRINT
+
+#endif // BVH_TREE_H
diff --git a/core/math/rect2.h b/core/math/rect2.h
index 512499bdb2..1dc027cf72 100644
--- a/core/math/rect2.h
+++ b/core/math/rect2.h
@@ -182,13 +182,17 @@ struct Rect2 {
inline Rect2 grow(real_t p_amount) const {
Rect2 g = *this;
- g.position.x -= p_amount;
- g.position.y -= p_amount;
- g.size.width += p_amount * 2;
- g.size.height += p_amount * 2;
+ g.grow_by(p_amount);
return g;
}
+ inline void grow_by(real_t p_amount) {
+ position.x -= p_amount;
+ position.y -= p_amount;
+ size.width += p_amount * 2;
+ size.height += p_amount * 2;
+ }
+
inline Rect2 grow_side(Side p_side, real_t p_amount) const {
Rect2 g = *this;
g = g.grow_individual((SIDE_LEFT == p_side) ? p_amount : 0,
diff --git a/core/math/vector2.h b/core/math/vector2.h
index edc6e3a3ef..6abe0f5ea9 100644
--- a/core/math/vector2.h
+++ b/core/math/vector2.h
@@ -37,18 +37,26 @@
struct Vector2i;
struct Vector2 {
+ static const int AXIS_COUNT = 2;
+
enum Axis {
AXIS_X,
AXIS_Y,
};
union {
- real_t x = 0;
- real_t width;
- };
- union {
- real_t y = 0;
- real_t height;
+ struct {
+ union {
+ real_t x;
+ real_t width;
+ };
+ union {
+ real_t y;
+ real_t height;
+ };
+ };
+
+ real_t coord[2] = { 0 };
};
_FORCE_INLINE_ real_t &operator[](int p_idx) {
@@ -58,6 +66,18 @@ struct Vector2 {
return p_idx ? y : x;
}
+ _FORCE_INLINE_ void set_all(real_t p_value) {
+ x = y = p_value;
+ }
+
+ _FORCE_INLINE_ int min_axis() const {
+ return x < y ? 0 : 1;
+ }
+
+ _FORCE_INLINE_ int max_axis() const {
+ return x < y ? 1 : 0;
+ }
+
void normalize();
Vector2 normalized() const;
bool is_normalized() const;
diff --git a/core/math/vector3.cpp b/core/math/vector3.cpp
index f0629d3db8..d4317d506c 100644
--- a/core/math/vector3.cpp
+++ b/core/math/vector3.cpp
@@ -52,14 +52,6 @@ real_t Vector3::get_axis(int p_axis) const {
return operator[](p_axis);
}
-int Vector3::min_axis() const {
- return x < y ? (x < z ? 0 : 2) : (y < z ? 1 : 2);
-}
-
-int Vector3::max_axis() const {
- return x < y ? (y < z ? 2 : 1) : (x < z ? 2 : 0);
-}
-
void Vector3::snap(Vector3 p_step) {
x = Math::snapped(x, p_step.x);
y = Math::snapped(y, p_step.y);
diff --git a/core/math/vector3.h b/core/math/vector3.h
index 377581bb45..b47c3cc916 100644
--- a/core/math/vector3.h
+++ b/core/math/vector3.h
@@ -38,6 +38,8 @@
class Basis;
struct Vector3 {
+ static const int AXIS_COUNT = 3;
+
enum Axis {
AXIS_X,
AXIS_Y,
@@ -65,8 +67,17 @@ struct Vector3 {
void set_axis(int p_axis, real_t p_value);
real_t get_axis(int p_axis) const;
- int min_axis() const;
- int max_axis() const;
+ _FORCE_INLINE_ void set_all(real_t p_value) {
+ x = y = z = p_value;
+ }
+
+ _FORCE_INLINE_ int min_axis() const {
+ return x < y ? (x < z ? 0 : 2) : (y < z ? 1 : 2);
+ }
+
+ _FORCE_INLINE_ int max_axis() const {
+ return x < y ? (y < z ? 2 : 1) : (x < z ? 2 : 0);
+ }
_FORCE_INLINE_ real_t length() const;
_FORCE_INLINE_ real_t length_squared() const;
diff --git a/core/os/dir_access.h b/core/os/dir_access.h
index 7f0bcd372d..ec738d30d5 100644
--- a/core/os/dir_access.h
+++ b/core/os/dir_access.h
@@ -87,7 +87,7 @@ public:
virtual bool is_readable(String p_dir) { return true; };
virtual bool is_writable(String p_dir) { return true; };
static bool exists(String p_dir);
- virtual size_t get_space_left() = 0;
+ virtual uint64_t get_space_left() = 0;
Error copy_dir(String p_from, String p_to, int p_chmod_flags = -1);
virtual Error copy(String p_from, String p_to, int p_chmod_flags = -1);
diff --git a/core/os/file_access.cpp b/core/os/file_access.cpp
index ad234c2d49..d00d0ac5bb 100644
--- a/core/os/file_access.cpp
+++ b/core/os/file_access.cpp
@@ -367,10 +367,10 @@ Vector<String> FileAccess::get_csv_line(const String &p_delim) const {
return strings;
}
-int FileAccess::get_buffer(uint8_t *p_dst, int p_length) const {
+uint64_t FileAccess::get_buffer(uint8_t *p_dst, uint64_t p_length) const {
ERR_FAIL_COND_V(!p_dst && p_length > 0, -1);
- ERR_FAIL_COND_V(p_length < 0, -1);
- int i = 0;
+
+ uint64_t i = 0;
for (i = 0; i < p_length && !eof_reached(); i++) {
p_dst[i] = get_8();
}
@@ -380,11 +380,11 @@ int FileAccess::get_buffer(uint8_t *p_dst, int p_length) const {
String FileAccess::get_as_utf8_string() const {
Vector<uint8_t> sourcef;
- int len = get_len();
+ uint64_t len = get_len();
sourcef.resize(len + 1);
uint8_t *w = sourcef.ptrw();
- int r = get_buffer(w, len);
+ uint64_t r = get_buffer(w, len);
ERR_FAIL_COND_V(r != len, String());
w[len] = 0;
@@ -550,8 +550,8 @@ void FileAccess::store_csv_line(const Vector<String> &p_values, const String &p_
store_line(line);
}
-void FileAccess::store_buffer(const uint8_t *p_src, int p_length) {
- for (int i = 0; i < p_length; i++) {
+void FileAccess::store_buffer(const uint8_t *p_src, uint64_t p_length) {
+ for (uint64_t i = 0; i < p_length; i++) {
store_8(p_src[i]);
}
}
@@ -601,7 +601,7 @@ String FileAccess::get_md5(const String &p_file) {
unsigned char step[32768];
while (true) {
- int br = f->get_buffer(step, 32768);
+ uint64_t br = f->get_buffer(step, 32768);
if (br > 0) {
ctx.update(step, br);
}
@@ -629,7 +629,7 @@ String FileAccess::get_multiple_md5(const Vector<String> &p_file) {
unsigned char step[32768];
while (true) {
- int br = f->get_buffer(step, 32768);
+ uint64_t br = f->get_buffer(step, 32768);
if (br > 0) {
ctx.update(step, br);
}
@@ -658,7 +658,7 @@ String FileAccess::get_sha256(const String &p_file) {
unsigned char step[32768];
while (true) {
- int br = f->get_buffer(step, 32768);
+ uint64_t br = f->get_buffer(step, 32768);
if (br > 0) {
ctx.update(step, br);
}
diff --git a/core/os/file_access.h b/core/os/file_access.h
index 1c78204c1d..f9749c0fd1 100644
--- a/core/os/file_access.h
+++ b/core/os/file_access.h
@@ -93,10 +93,10 @@ public:
virtual String get_path() const { return ""; } /// returns the path for the current open file
virtual String get_path_absolute() const { return ""; } /// returns the absolute path for the current open file
- virtual void seek(size_t p_position) = 0; ///< seek to a given position
- virtual void seek_end(int64_t p_position = 0) = 0; ///< seek from the end of file
- virtual size_t get_position() const = 0; ///< get position in the file
- virtual size_t get_len() const = 0; ///< get size of the file
+ virtual void seek(uint64_t p_position) = 0; ///< seek to a given position
+ virtual void seek_end(int64_t p_position = 0) = 0; ///< seek from the end of file with negative offset
+ virtual uint64_t get_position() const = 0; ///< get position in the file
+ virtual uint64_t get_len() const = 0; ///< get size of the file
virtual bool eof_reached() const = 0; ///< reading passed EOF
@@ -109,7 +109,7 @@ public:
virtual double get_double() const;
virtual real_t get_real() const;
- virtual int get_buffer(uint8_t *p_dst, int p_length) const; ///< get an array of bytes
+ virtual uint64_t get_buffer(uint8_t *p_dst, uint64_t p_length) const; ///< get an array of bytes
virtual String get_line() const;
virtual String get_token() const;
virtual Vector<String> get_csv_line(const String &p_delim = ",") const;
@@ -142,7 +142,7 @@ public:
virtual void store_pascal_string(const String &p_string);
virtual String get_pascal_string();
- virtual void store_buffer(const uint8_t *p_src, int p_length); ///< store an array of bytes
+ virtual void store_buffer(const uint8_t *p_src, uint64_t p_length); ///< store an array of bytes
virtual bool file_exists(const String &p_name) = 0; ///< return true if a file exists
diff --git a/core/os/keyboard.cpp b/core/os/keyboard.cpp
index 4b2cafd8fe..4c5f0b5220 100644
--- a/core/os/keyboard.cpp
+++ b/core/os/keyboard.cpp
@@ -60,7 +60,7 @@ static const _KeyCodeText _keycodes[] = {
{KEY_PAGEUP ,"PageUp"},
{KEY_PAGEDOWN ,"PageDown"},
{KEY_SHIFT ,"Shift"},
- {KEY_CONTROL ,"Control"},
+ {KEY_CTRL ,"Ctrl"},
#ifdef OSX_ENABLED
{KEY_META ,"Command"},
#else
@@ -314,7 +314,7 @@ bool keycode_has_unicode(uint32_t p_keycode) {
case KEY_PAGEUP:
case KEY_PAGEDOWN:
case KEY_SHIFT:
- case KEY_CONTROL:
+ case KEY_CTRL:
case KEY_META:
case KEY_ALT:
case KEY_CAPSLOCK:
@@ -401,7 +401,7 @@ String keycode_get_string(uint32_t p_code) {
codestr += "+";
}
if (p_code & KEY_MASK_CTRL) {
- codestr += find_keycode_name(KEY_CONTROL);
+ codestr += find_keycode_name(KEY_CTRL);
codestr += "+";
}
if (p_code & KEY_MASK_META) {
diff --git a/core/os/keyboard.h b/core/os/keyboard.h
index f6fe5fc070..33f9213c4e 100644
--- a/core/os/keyboard.h
+++ b/core/os/keyboard.h
@@ -68,7 +68,7 @@ enum Key {
KEY_PAGEUP = SPKEY | 0x13,
KEY_PAGEDOWN = SPKEY | 0x14,
KEY_SHIFT = SPKEY | 0x15,
- KEY_CONTROL = SPKEY | 0x16,
+ KEY_CTRL = SPKEY | 0x16,
KEY_META = SPKEY | 0x17,
KEY_ALT = SPKEY | 0x18,
KEY_CAPSLOCK = SPKEY | 0x19,
diff --git a/core/templates/pooled_list.h b/core/templates/pooled_list.h
new file mode 100644
index 0000000000..b4a6d2d1dd
--- /dev/null
+++ b/core/templates/pooled_list.h
@@ -0,0 +1,95 @@
+/*************************************************************************/
+/* pooled_list.h */
+/*************************************************************************/
+/* This file is part of: */
+/* GODOT ENGINE */
+/* https://godotengine.org */
+/*************************************************************************/
+/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
+/* */
+/* Permission is hereby granted, free of charge, to any person obtaining */
+/* a copy of this software and associated documentation files (the */
+/* "Software"), to deal in the Software without restriction, including */
+/* without limitation the rights to use, copy, modify, merge, publish, */
+/* distribute, sublicense, and/or sell copies of the Software, and to */
+/* permit persons to whom the Software is furnished to do so, subject to */
+/* the following conditions: */
+/* */
+/* The above copyright notice and this permission notice shall be */
+/* included in all copies or substantial portions of the Software. */
+/* */
+/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
+/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
+/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
+/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
+/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
+/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
+/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+/*************************************************************************/
+
+#pragma once
+
+// Simple template to provide a pool with O(1) allocate and free.
+// The freelist could alternatively be a linked list placed within the unused elements
+// to use less memory, however a separate freelist is probably more cache friendly.
+
+// NOTE : Take great care when using this with non POD types. The construction and destruction
+// is done in the LocalVector, NOT as part of the pool. So requesting a new item does not guarantee
+// a constructor is run, and free does not guarantee a destructor.
+// You should generally handle clearing
+// an item explicitly after a request, as it may contain 'leftovers'.
+// This is by design for fastest use in the BVH. If you want a more general pool
+// that does call constructors / destructors on request / free, this should probably be
+// a separate template.
+
+#include "core/templates/local_vector.h"
+
+template <class T, bool force_trivial = false>
+class PooledList {
+ LocalVector<T, uint32_t, force_trivial> list;
+ LocalVector<uint32_t, uint32_t, true> freelist;
+
+ // not all list members are necessarily used
+ int _used_size;
+
+public:
+ PooledList() {
+ _used_size = 0;
+ }
+
+ int estimate_memory_use() const {
+ return (list.size() * sizeof(T)) + (freelist.size() * sizeof(uint32_t));
+ }
+
+ const T &operator[](uint32_t p_index) const {
+ return list[p_index];
+ }
+ T &operator[](uint32_t p_index) {
+ return list[p_index];
+ }
+
+ int size() const { return _used_size; }
+
+ T *request(uint32_t &r_id) {
+ _used_size++;
+
+ if (freelist.size()) {
+ // pop from freelist
+ int new_size = freelist.size() - 1;
+ r_id = freelist[new_size];
+ freelist.resize(new_size);
+ return &list[r_id];
+ }
+
+ r_id = list.size();
+ list.resize(r_id + 1);
+ return &list[r_id];
+ }
+ void free(const uint32_t &p_id) {
+ // should not be on free list already
+ CRASH_COND(p_id >= list.size());
+ freelist.push_back(p_id);
+ _used_size--;
+ }
+};
diff --git a/core/variant/callable.h b/core/variant/callable.h
index d91bebfa5f..20d0804292 100644
--- a/core/variant/callable.h
+++ b/core/variant/callable.h
@@ -44,9 +44,9 @@ class CallableCustom;
// is required. It is designed for the standard case (object and method)
// but can be optimized or customized.
+// Enforce 16 bytes with `alignas` to avoid arch-specific alignment issues on x86 vs armv7.
class Callable {
- //needs to be max 16 bytes in 64 bits
- StringName method;
+ alignas(8) StringName method;
union {
uint64_t object = 0;
CallableCustom *custom;
@@ -138,8 +138,9 @@ public:
// be put inside a Variant, but it is not
// used by the engine itself.
+// Enforce 16 bytes with `alignas` to avoid arch-specific alignment issues on x86 vs armv7.
class Signal {
- StringName name;
+ alignas(8) StringName name;
ObjectID object;
public: