diff options
215 files changed, 7876 insertions, 6710 deletions
diff --git a/core/config/project_settings.cpp b/core/config/project_settings.cpp index 3a7fc828aa..c9b615fb0a 100644 --- a/core/config/project_settings.cpp +++ b/core/config/project_settings.cpp @@ -1240,6 +1240,9 @@ ProjectSettings::ProjectSettings() { custom_prop_info["rendering/driver/threads/thread_model"] = PropertyInfo(Variant::INT, "rendering/driver/threads/thread_model", PROPERTY_HINT_ENUM, "Single-Unsafe,Single-Safe,Multi-Threaded"); GLOBAL_DEF("physics/2d/run_on_separate_thread", false); GLOBAL_DEF("physics/3d/run_on_separate_thread", false); + // Required to make the project setting appear even if the physics engine is GodotPhysics, + // while also making it appear in the ProjectSettings class documentation. + GLOBAL_DEF("physics/3d/smooth_trimesh_collision", false); GLOBAL_DEF("debug/settings/profiler/max_functions", 16384); custom_prop_info["debug/settings/profiler/max_functions"] = PropertyInfo(Variant::INT, "debug/settings/profiler/max_functions", PROPERTY_HINT_RANGE, "128,65535,1"); diff --git a/core/extension/gdnative_interface.h b/core/extension/gdnative_interface.h index a2e0c7cc73..62934d1d73 100644 --- a/core/extension/gdnative_interface.h +++ b/core/extension/gdnative_interface.h @@ -460,8 +460,8 @@ typedef enum { GDNATIVE_INITIALIZATION_CORE, GDNATIVE_INITIALIZATION_SERVERS, GDNATIVE_INITIALIZATION_SCENE, - GDNATIVE_INITIALIZATION_EDITOR, GDNATIVE_INITIALIZATION_DRIVER, + GDNATIVE_INITIALIZATION_EDITOR, GDNATIVE_MAX_INITIALIZATION_LEVEL, } GDNativeInitializationLevel; diff --git a/core/extension/native_extension.cpp b/core/extension/native_extension.cpp index e1db99fe5d..325ccec6c4 100644 --- a/core/extension/native_extension.cpp +++ b/core/extension/native_extension.cpp @@ -325,6 +325,7 @@ void NativeExtension::_bind_methods() { BIND_ENUM_CONSTANT(INITIALIZATION_LEVEL_CORE); BIND_ENUM_CONSTANT(INITIALIZATION_LEVEL_SERVERS); BIND_ENUM_CONSTANT(INITIALIZATION_LEVEL_SCENE); + BIND_ENUM_CONSTANT(INITIALIZATION_LEVEL_DRIVER); BIND_ENUM_CONSTANT(INITIALIZATION_LEVEL_EDITOR); } diff --git a/core/extension/native_extension.h b/core/extension/native_extension.h index b98e4925d2..ebfedfb29a 100644 --- a/core/extension/native_extension.h +++ b/core/extension/native_extension.h @@ -71,8 +71,8 @@ public: INITIALIZATION_LEVEL_CORE, INITIALIZATION_LEVEL_SERVERS, INITIALIZATION_LEVEL_SCENE, - INITIALIZATION_LEVEL_EDITOR, INITIALIZATION_LEVEL_DRIVER, + INITIALIZATION_LEVEL_EDITOR, }; bool is_library_open() const; diff --git a/core/input/input_map.cpp b/core/input/input_map.cpp index 41083b4c47..ab94c00999 100644 --- a/core/input/input_map.cpp +++ b/core/input/input_map.cpp @@ -344,7 +344,7 @@ static const _BuiltinActionDisplayName _builtin_action_display_names[] = { { "ui_filedialog_refresh", TTRC("Refresh") }, { "ui_filedialog_show_hidden", TTRC("Show Hidden") }, { "ui_swap_input_direction ", TTRC("Swap Input Direction") }, - { "", TTRC("")} + { "", ""} /* clang-format on */ }; diff --git a/core/io/pck_packer.cpp b/core/io/pck_packer.cpp index 272ace3438..b3bf0cff2d 100644 --- a/core/io/pck_packer.cpp +++ b/core/io/pck_packer.cpp @@ -257,10 +257,7 @@ Error PCKPacker::flush(bool p_verbose) { count += 1; const int file_num = files.size(); if (p_verbose && (file_num > 0)) { - if (count % 100 == 0) { - printf("%i/%i (%.2f)\r", count, file_num, float(count) / file_num * 100); - fflush(stdout); - } + print_line(vformat("[%d/%d - %d%%] PCKPacker flush: %s -> %s", count, file_num, float(count) / file_num * 100, files[i].src_path, files[i].path)); } } diff --git a/core/math/basis.cpp b/core/math/basis.cpp index e34c1c1315..84f9d12bb1 100644 --- a/core/math/basis.cpp +++ b/core/math/basis.cpp @@ -37,7 +37,7 @@ (elements[row1][col1] * elements[row2][col2] - elements[row1][col2] * elements[row2][col1]) void Basis::from_z(const Vector3 &p_z) { - if (Math::abs(p_z.z) > Math_SQRT12) { + if (Math::abs(p_z.z) > (real_t)Math_SQRT12) { // choose p in y-z plane real_t a = p_z[1] * p_z[1] + p_z[2] * p_z[2]; real_t k = 1.0f / Math::sqrt(a); @@ -153,7 +153,7 @@ Basis Basis::diagonalize() { int ite = 0; Basis acc_rot; - while (off_matrix_norm_2 > CMP_EPSILON2 && ite++ < ite_max) { + while (off_matrix_norm_2 > (real_t)CMP_EPSILON2 && ite++ < ite_max) { real_t el01_2 = elements[0][1] * elements[0][1]; real_t el02_2 = elements[0][2] * elements[0][2]; real_t el12_2 = elements[1][2] * elements[1][2]; @@ -463,8 +463,8 @@ Vector3 Basis::get_euler(EulerOrder p_order) const { Vector3 euler; real_t sy = elements[0][2]; - if (sy < (1.0f - CMP_EPSILON)) { - if (sy > -(1.0f - CMP_EPSILON)) { + if (sy < (1.0f - (real_t)CMP_EPSILON)) { + if (sy > -(1.0f - (real_t)CMP_EPSILON)) { // is this a pure Y rotation? if (elements[1][0] == 0 && elements[0][1] == 0 && elements[1][2] == 0 && elements[2][1] == 0 && elements[1][1] == 1) { // return the simplest form (human friendlier in editor and scripts) @@ -498,8 +498,8 @@ Vector3 Basis::get_euler(EulerOrder p_order) const { Vector3 euler; real_t sz = elements[0][1]; - if (sz < (1.0f - CMP_EPSILON)) { - if (sz > -(1.0f - CMP_EPSILON)) { + if (sz < (1.0f - (real_t)CMP_EPSILON)) { + if (sz > -(1.0f - (real_t)CMP_EPSILON)) { euler.x = Math::atan2(elements[2][1], elements[1][1]); euler.y = Math::atan2(elements[0][2], elements[0][0]); euler.z = Math::asin(-sz); @@ -529,8 +529,8 @@ Vector3 Basis::get_euler(EulerOrder p_order) const { real_t m12 = elements[1][2]; - if (m12 < (1 - CMP_EPSILON)) { - if (m12 > -(1 - CMP_EPSILON)) { + if (m12 < (1 - (real_t)CMP_EPSILON)) { + if (m12 > -(1 - (real_t)CMP_EPSILON)) { // is this a pure X rotation? if (elements[1][0] == 0 && elements[0][1] == 0 && elements[0][2] == 0 && elements[2][0] == 0 && elements[0][0] == 1) { // return the simplest form (human friendlier in editor and scripts) @@ -565,8 +565,8 @@ Vector3 Basis::get_euler(EulerOrder p_order) const { Vector3 euler; real_t sz = elements[1][0]; - if (sz < (1.0f - CMP_EPSILON)) { - if (sz > -(1.0f - CMP_EPSILON)) { + if (sz < (1.0f - (real_t)CMP_EPSILON)) { + if (sz > -(1.0f - (real_t)CMP_EPSILON)) { euler.x = Math::atan2(-elements[1][2], elements[1][1]); euler.y = Math::atan2(-elements[2][0], elements[0][0]); euler.z = Math::asin(sz); @@ -593,8 +593,8 @@ Vector3 Basis::get_euler(EulerOrder p_order) const { // -cx*sy sx cx*cy Vector3 euler; real_t sx = elements[2][1]; - if (sx < (1.0f - CMP_EPSILON)) { - if (sx > -(1.0f - CMP_EPSILON)) { + if (sx < (1.0f - (real_t)CMP_EPSILON)) { + if (sx > -(1.0f - (real_t)CMP_EPSILON)) { euler.x = Math::asin(sx); euler.y = Math::atan2(-elements[2][0], elements[2][2]); euler.z = Math::atan2(-elements[0][1], elements[1][1]); @@ -621,8 +621,8 @@ Vector3 Basis::get_euler(EulerOrder p_order) const { // -sy cy*sx cy*cx Vector3 euler; real_t sy = elements[2][0]; - if (sy < (1.0f - CMP_EPSILON)) { - if (sy > -(1.0f - CMP_EPSILON)) { + if (sy < (1.0f - (real_t)CMP_EPSILON)) { + if (sy > -(1.0f - (real_t)CMP_EPSILON)) { euler.x = Math::atan2(elements[2][1], elements[2][2]); euler.y = Math::asin(-sy); euler.z = Math::atan2(elements[1][0], elements[0][0]); diff --git a/core/math/camera_matrix.cpp b/core/math/camera_matrix.cpp index f5d746ef0f..f4392c74b7 100644 --- a/core/math/camera_matrix.cpp +++ b/core/math/camera_matrix.cpp @@ -436,9 +436,7 @@ void CameraMatrix::invert() { int pvt_i[4], pvt_j[4]; /* Locations of pivot matrix */ real_t pvt_val; /* Value of current pivot element */ real_t hold; /* Temporary storage */ - real_t determinat; /* Determinant */ - - determinat = 1.0; + real_t determinant = 1.0f; for (k = 0; k < 4; k++) { /** Locate k'th pivot element **/ pvt_val = matrix[k][k]; /** Initialize for search **/ @@ -446,7 +444,7 @@ void CameraMatrix::invert() { pvt_j[k] = k; for (i = k; i < 4; i++) { for (j = k; j < 4; j++) { - if (Math::absd(matrix[i][j]) > Math::absd(pvt_val)) { + if (Math::abs(matrix[i][j]) > Math::abs(pvt_val)) { pvt_i[k] = i; pvt_j[k] = j; pvt_val = matrix[i][j]; @@ -455,9 +453,9 @@ void CameraMatrix::invert() { } /** Product of pivots, gives determinant when finished **/ - determinat *= pvt_val; - if (Math::absd(determinat) < 1e-7) { - return; //(false); /** Matrix is singular (zero determinant). **/ + determinant *= pvt_val; + if (Math::is_zero_approx(determinant)) { + return; /** Matrix is singular (zero determinant). **/ } /** "Interchange" rows (with sign change stuff) **/ diff --git a/core/math/color.h b/core/math/color.h index 429807e4a6..b90a0f33a2 100644 --- a/core/math/color.h +++ b/core/math/color.h @@ -138,7 +138,7 @@ struct _NO_DISCARD_ Color { float cMax = MAX(cRed, MAX(cGreen, cBlue)); - float expp = MAX(-B - 1.0f, floor(Math::log(cMax) / Math_LN2)) + 1.0f + B; + float expp = MAX(-B - 1.0f, floor(Math::log(cMax) / (real_t)Math_LN2)) + 1.0f + B; float sMax = (float)floor((cMax / Math::pow(2.0f, expp - B - N)) + 0.5f); diff --git a/core/math/face3.cpp b/core/math/face3.cpp index 9c968df19b..5bc1bc25e6 100644 --- a/core/math/face3.cpp +++ b/core/math/face3.cpp @@ -42,7 +42,7 @@ int Face3::split_by_plane(const Plane &p_plane, Face3 p_res[3], bool p_is_point_ int below_count = 0; for (int i = 0; i < 3; i++) { - if (p_plane.has_point(vertex[i], CMP_EPSILON)) { // point is in plane + if (p_plane.has_point(vertex[i], (real_t)CMP_EPSILON)) { // point is in plane ERR_FAIL_COND_V(above_count >= 4, 0); above[above_count++] = vertex[i]; @@ -117,7 +117,7 @@ bool Face3::intersects_segment(const Vector3 &p_from, const Vector3 &p_dir, Vect bool Face3::is_degenerate() const { Vector3 normal = vec3_cross(vertex[0] - vertex[1], vertex[0] - vertex[2]); - return (normal.length_squared() < CMP_EPSILON2); + return (normal.length_squared() < (real_t)CMP_EPSILON2); } Face3::Side Face3::get_side_of(const Face3 &p_face, ClockDirection p_clock_dir) const { diff --git a/core/math/geometry_2d.cpp b/core/math/geometry_2d.cpp index b1af91c49c..46b7d99b43 100644 --- a/core/math/geometry_2d.cpp +++ b/core/math/geometry_2d.cpp @@ -218,10 +218,10 @@ Vector<Vector<Point2>> Geometry2D::_polypaths_do_operation(PolyBooleanOperation // Need to scale points (Clipper's requirement for robust computation). for (int i = 0; i != p_polypath_a.size(); ++i) { - path_a << IntPoint(p_polypath_a[i].x * SCALE_FACTOR, p_polypath_a[i].y * SCALE_FACTOR); + path_a << IntPoint(p_polypath_a[i].x * (real_t)SCALE_FACTOR, p_polypath_a[i].y * (real_t)SCALE_FACTOR); } for (int i = 0; i != p_polypath_b.size(); ++i) { - path_b << IntPoint(p_polypath_b[i].x * SCALE_FACTOR, p_polypath_b[i].y * SCALE_FACTOR); + path_b << IntPoint(p_polypath_b[i].x * (real_t)SCALE_FACTOR, p_polypath_b[i].y * (real_t)SCALE_FACTOR); } Clipper clp; clp.AddPath(path_a, ptSubject, !is_a_open); // Forward compatible with Clipper 10.0.0. @@ -246,8 +246,8 @@ Vector<Vector<Point2>> Geometry2D::_polypaths_do_operation(PolyBooleanOperation for (Paths::size_type j = 0; j < scaled_path.size(); ++j) { polypath.push_back(Point2( - static_cast<real_t>(scaled_path[j].X) / SCALE_FACTOR, - static_cast<real_t>(scaled_path[j].Y) / SCALE_FACTOR)); + static_cast<real_t>(scaled_path[j].X) / (real_t)SCALE_FACTOR, + static_cast<real_t>(scaled_path[j].Y) / (real_t)SCALE_FACTOR)); } polypaths.push_back(polypath); } @@ -290,17 +290,17 @@ Vector<Vector<Point2>> Geometry2D::_polypath_offset(const Vector<Point2> &p_poly et = etOpenRound; break; } - ClipperOffset co(2.0, 0.25f * SCALE_FACTOR); // Defaults from ClipperOffset. + ClipperOffset co(2.0, 0.25f * (real_t)SCALE_FACTOR); // Defaults from ClipperOffset. Path path; // Need to scale points (Clipper's requirement for robust computation). for (int i = 0; i != p_polypath.size(); ++i) { - path << IntPoint(p_polypath[i].x * SCALE_FACTOR, p_polypath[i].y * SCALE_FACTOR); + path << IntPoint(p_polypath[i].x * (real_t)SCALE_FACTOR, p_polypath[i].y * (real_t)SCALE_FACTOR); } co.AddPath(path, jt, et); Paths paths; - co.Execute(paths, p_delta * SCALE_FACTOR); // Inflate/deflate. + co.Execute(paths, p_delta * (real_t)SCALE_FACTOR); // Inflate/deflate. // Have to scale points down now. Vector<Vector<Point2>> polypaths; @@ -312,8 +312,8 @@ Vector<Vector<Point2>> Geometry2D::_polypath_offset(const Vector<Point2> &p_poly for (Paths::size_type j = 0; j < scaled_path.size(); ++j) { polypath.push_back(Point2( - static_cast<real_t>(scaled_path[j].X) / SCALE_FACTOR, - static_cast<real_t>(scaled_path[j].Y) / SCALE_FACTOR)); + static_cast<real_t>(scaled_path[j].X) / (real_t)SCALE_FACTOR, + static_cast<real_t>(scaled_path[j].Y) / (real_t)SCALE_FACTOR)); } polypaths.push_back(polypath); } diff --git a/core/math/geometry_2d.h b/core/math/geometry_2d.h index 4fdb8ee36a..62786d69be 100644 --- a/core/math/geometry_2d.h +++ b/core/math/geometry_2d.h @@ -51,20 +51,20 @@ public: real_t f = d2.dot(r); real_t s, t; // Check if either or both segments degenerate into points. - if (a <= CMP_EPSILON && e <= CMP_EPSILON) { + if (a <= (real_t)CMP_EPSILON && e <= (real_t)CMP_EPSILON) { // Both segments degenerate into points. c1 = p1; c2 = p2; return Math::sqrt((c1 - c2).dot(c1 - c2)); } - if (a <= CMP_EPSILON) { + if (a <= (real_t)CMP_EPSILON) { // First segment degenerates into a point. s = 0.0; t = f / e; // s = 0 => t = (b*s + f) / e = f / e t = CLAMP(t, 0.0f, 1.0f); } else { real_t c = d1.dot(r); - if (e <= CMP_EPSILON) { + if (e <= (real_t)CMP_EPSILON) { // Second segment degenerates into a point. t = 0.0; s = CLAMP(-c / a, 0.0f, 1.0f); // t = 0 => s = (b*t - c) / a = -c / a @@ -185,7 +185,7 @@ public: D = Vector2(D.x * Bn.x + D.y * Bn.y, D.y * Bn.x - D.x * Bn.y); // Fail if C x B and D x B have the same sign (segments don't intersect). - if ((C.y < -CMP_EPSILON && D.y < -CMP_EPSILON) || (C.y > CMP_EPSILON && D.y > CMP_EPSILON)) { + if ((C.y < (real_t)-CMP_EPSILON && D.y < (real_t)-CMP_EPSILON) || (C.y > (real_t)CMP_EPSILON && D.y > (real_t)CMP_EPSILON)) { return false; } @@ -198,7 +198,7 @@ public: real_t ABpos = D.x + (C.x - D.x) * D.y / (D.y - C.y); // Fail if segment C-D crosses line A-B outside of segment A-B. - if (ABpos < 0 || ABpos > 1.0f) { + if ((ABpos < 0) || (ABpos > 1)) { return false; } diff --git a/core/math/geometry_3d.cpp b/core/math/geometry_3d.cpp index 7eeb37df46..bd22bffb1f 100644 --- a/core/math/geometry_3d.cpp +++ b/core/math/geometry_3d.cpp @@ -879,7 +879,7 @@ Vector<Vector3> Geometry3D::compute_convex_mesh_points(const Plane *p_planes, in for (int n = 0; n < p_plane_count; n++) { if (n != i && n != j && n != k) { real_t dp = p_planes[n].normal.dot(convex_shape_point); - if (dp - p_planes[n].d > CMP_EPSILON) { + if (dp - p_planes[n].d > (real_t)CMP_EPSILON) { excluded = true; break; } diff --git a/core/math/geometry_3d.h b/core/math/geometry_3d.h index 482c7ea604..59c56906f4 100644 --- a/core/math/geometry_3d.h +++ b/core/math/geometry_3d.h @@ -76,7 +76,7 @@ public: real_t tc, tN, tD = D; // tc = tN / tD, default tD = D >= 0 // Compute the line parameters of the two closest points. - if (D < CMP_EPSILON) { // The lines are almost parallel. + if (D < (real_t)CMP_EPSILON) { // The lines are almost parallel. sN = 0.0f; // Force using point P0 on segment S1 sD = 1.0f; // to prevent possible division by 0.0 later. tN = e; @@ -142,7 +142,7 @@ public: Vector3 s = p_from - p_v0; real_t u = f * s.dot(h); - if (u < 0.0f || u > 1.0f) { + if ((u < 0.0f) || (u > 1.0f)) { return false; } @@ -150,7 +150,7 @@ public: real_t v = f * p_dir.dot(q); - if (v < 0.0f || u + v > 1.0f) { + if ((v < 0.0f) || (u + v > 1.0f)) { return false; } @@ -183,7 +183,7 @@ public: Vector3 s = p_from - p_v0; real_t u = f * s.dot(h); - if (u < 0.0f || u > 1.0f) { + if ((u < 0.0f) || (u > 1.0f)) { return false; } @@ -191,7 +191,7 @@ public: real_t v = f * rel.dot(q); - if (v < 0.0f || u + v > 1.0f) { + if ((v < 0.0f) || (u + v > 1.0f)) { return false; } @@ -199,7 +199,7 @@ public: // the intersection point is on the line. real_t t = f * e2.dot(q); - if (t > CMP_EPSILON && t <= 1.0f) { // Ray intersection. + if (t > (real_t)CMP_EPSILON && t <= 1.0f) { // Ray intersection. if (r_res) { *r_res = p_from + rel * t; } @@ -213,7 +213,7 @@ public: Vector3 sphere_pos = p_sphere_pos - p_from; Vector3 rel = (p_to - p_from); real_t rel_l = rel.length(); - if (rel_l < CMP_EPSILON) { + if (rel_l < (real_t)CMP_EPSILON) { return false; // Both points are the same. } Vector3 normal = rel / rel_l; @@ -229,7 +229,7 @@ public: real_t inters_d2 = p_sphere_radius * p_sphere_radius - ray_distance * ray_distance; real_t inters_d = sphere_d; - if (inters_d2 >= CMP_EPSILON) { + if (inters_d2 >= (real_t)CMP_EPSILON) { inters_d -= Math::sqrt(inters_d2); } @@ -253,7 +253,7 @@ public: static inline bool segment_intersects_cylinder(const Vector3 &p_from, const Vector3 &p_to, real_t p_height, real_t p_radius, Vector3 *r_res = nullptr, Vector3 *r_norm = nullptr, int p_cylinder_axis = 2) { Vector3 rel = (p_to - p_from); real_t rel_l = rel.length(); - if (rel_l < CMP_EPSILON) { + if (rel_l < (real_t)CMP_EPSILON) { return false; // Both points are the same. } @@ -269,7 +269,7 @@ public: Vector3 axis_dir; - if (crs_l < CMP_EPSILON) { + if (crs_l < (real_t)CMP_EPSILON) { Vector3 side_axis; side_axis[(p_cylinder_axis + 1) % 3] = 1.0f; // Any side axis OK. axis_dir = side_axis; @@ -285,7 +285,7 @@ public: // Convert to 2D. real_t w2 = p_radius * p_radius - dist * dist; - if (w2 < CMP_EPSILON) { + if (w2 < (real_t)CMP_EPSILON) { return false; // Avoid numerical error. } Size2 size(Math::sqrt(w2), p_height * 0.5f); @@ -366,7 +366,7 @@ public: Vector3 rel = p_to - p_from; real_t rel_l = rel.length(); - if (rel_l < CMP_EPSILON) { + if (rel_l < (real_t)CMP_EPSILON) { return false; } @@ -379,7 +379,7 @@ public: real_t den = p.normal.dot(dir); - if (Math::abs(den) <= CMP_EPSILON) { + if (Math::abs(den) <= (real_t)CMP_EPSILON) { continue; // Ignore parallel plane. } @@ -564,11 +564,11 @@ public: for (int a = 0; a < polygon.size(); a++) { real_t dist = p_plane.distance_to(polygon[a]); - if (dist < -CMP_POINT_IN_PLANE_EPSILON) { + if (dist < (real_t)-CMP_POINT_IN_PLANE_EPSILON) { location_cache[a] = LOC_INSIDE; inside_count++; } else { - if (dist > CMP_POINT_IN_PLANE_EPSILON) { + if (dist > (real_t)CMP_POINT_IN_PLANE_EPSILON) { location_cache[a] = LOC_OUTSIDE; outside_count++; } else { diff --git a/core/math/math_funcs.h b/core/math/math_funcs.h index 47e5ab2709..8c0b87cf4a 100644 --- a/core/math/math_funcs.h +++ b/core/math/math_funcs.h @@ -64,7 +64,7 @@ public: static _ALWAYS_INLINE_ float sinc(float p_x) { return p_x == 0 ? 1 : ::sin(p_x) / p_x; } static _ALWAYS_INLINE_ double sinc(double p_x) { return p_x == 0 ? 1 : ::sin(p_x) / p_x; } - static _ALWAYS_INLINE_ float sincn(float p_x) { return sinc(Math_PI * p_x); } + static _ALWAYS_INLINE_ float sincn(float p_x) { return sinc((float)Math_PI * p_x); } static _ALWAYS_INLINE_ double sincn(double p_x) { return sinc(Math_PI * p_x); } static _ALWAYS_INLINE_ double cosh(double p_x) { return ::cosh(p_x); } @@ -187,7 +187,7 @@ public: static _ALWAYS_INLINE_ double fposmod(double p_x, double p_y) { double value = Math::fmod(p_x, p_y); - if ((value < 0 && p_y > 0) || (value > 0 && p_y < 0)) { + if (((value < 0) && (p_y > 0)) || ((value > 0) && (p_y < 0))) { value += p_y; } value += 0.0; @@ -195,7 +195,7 @@ public: } static _ALWAYS_INLINE_ float fposmod(float p_x, float p_y) { float value = Math::fmod(p_x, p_y); - if ((value < 0 && p_y > 0) || (value > 0 && p_y < 0)) { + if (((value < 0) && (p_y > 0)) || ((value > 0) && (p_y < 0))) { value += p_y; } value += 0.0f; @@ -220,17 +220,17 @@ public: static _ALWAYS_INLINE_ int64_t posmod(int64_t p_x, int64_t p_y) { int64_t value = p_x % p_y; - if ((value < 0 && p_y > 0) || (value > 0 && p_y < 0)) { + if (((value < 0) && (p_y > 0)) || ((value > 0) && (p_y < 0))) { value += p_y; } return value; } static _ALWAYS_INLINE_ double deg2rad(double p_y) { return p_y * (Math_PI / 180.0); } - static _ALWAYS_INLINE_ float deg2rad(float p_y) { return p_y * (Math_PI / 180.0); } + static _ALWAYS_INLINE_ float deg2rad(float p_y) { return p_y * (float)(Math_PI / 180.0); } static _ALWAYS_INLINE_ double rad2deg(double p_y) { return p_y * (180.0 / Math_PI); } - static _ALWAYS_INLINE_ float rad2deg(float p_y) { return p_y * (180.0 / Math_PI); } + static _ALWAYS_INLINE_ float rad2deg(float p_y) { return p_y * (float)(180.0 / Math_PI); } static _ALWAYS_INLINE_ double lerp(double p_from, double p_to, double p_weight) { return p_from + (p_to - p_from) * p_weight; } static _ALWAYS_INLINE_ float lerp(float p_from, float p_to, float p_weight) { return p_from + (p_to - p_from) * p_weight; } @@ -285,10 +285,10 @@ public: static _ALWAYS_INLINE_ float move_toward(float p_from, float p_to, float p_delta) { return abs(p_to - p_from) <= p_delta ? p_to : p_from + SIGN(p_to - p_from) * p_delta; } static _ALWAYS_INLINE_ double linear2db(double p_linear) { return Math::log(p_linear) * 8.6858896380650365530225783783321; } - static _ALWAYS_INLINE_ float linear2db(float p_linear) { return Math::log(p_linear) * 8.6858896380650365530225783783321; } + static _ALWAYS_INLINE_ float linear2db(float p_linear) { return Math::log(p_linear) * (float)8.6858896380650365530225783783321; } static _ALWAYS_INLINE_ double db2linear(double p_db) { return Math::exp(p_db * 0.11512925464970228420089957273422); } - static _ALWAYS_INLINE_ float db2linear(float p_db) { return Math::exp(p_db * 0.11512925464970228420089957273422); } + static _ALWAYS_INLINE_ float db2linear(float p_db) { return Math::exp(p_db * (float)0.11512925464970228420089957273422); } static _ALWAYS_INLINE_ double round(double p_val) { return ::round(p_val); } static _ALWAYS_INLINE_ float round(float p_val) { return ::roundf(p_val); } @@ -345,9 +345,9 @@ public: return true; } // Then check for approximate equality. - float tolerance = CMP_EPSILON * abs(a); - if (tolerance < CMP_EPSILON) { - tolerance = CMP_EPSILON; + float tolerance = (float)CMP_EPSILON * abs(a); + if (tolerance < (float)CMP_EPSILON) { + tolerance = (float)CMP_EPSILON; } return abs(a - b) < tolerance; } @@ -362,7 +362,7 @@ public: } static _ALWAYS_INLINE_ bool is_zero_approx(float s) { - return abs(s) < CMP_EPSILON; + return abs(s) < (float)CMP_EPSILON; } static _ALWAYS_INLINE_ bool is_equal_approx(double a, double b) { diff --git a/core/math/plane.cpp b/core/math/plane.cpp index 0ce8aed51c..6881ad4014 100644 --- a/core/math/plane.cpp +++ b/core/math/plane.cpp @@ -106,7 +106,7 @@ bool Plane::intersects_ray(const Vector3 &p_from, const Vector3 &p_dir, Vector3 real_t dist = (normal.dot(p_from) - d) / den; //printf("dist is %i\n",dist); - if (dist > CMP_EPSILON) { //this is a ray, before the emitting pos (p_from) doesn't exist + if (dist > (real_t)CMP_EPSILON) { //this is a ray, before the emitting pos (p_from) doesn't exist return false; } @@ -129,7 +129,7 @@ bool Plane::intersects_segment(const Vector3 &p_begin, const Vector3 &p_end, Vec real_t dist = (normal.dot(p_begin) - d) / den; //printf("dist is %i\n",dist); - if (dist < -CMP_EPSILON || dist > (1.0f + CMP_EPSILON)) { + if (dist < (real_t)-CMP_EPSILON || dist > (1.0f + (real_t)CMP_EPSILON)) { return false; } diff --git a/core/math/quaternion.cpp b/core/math/quaternion.cpp index ade252d628..0a650a8578 100644 --- a/core/math/quaternion.cpp +++ b/core/math/quaternion.cpp @@ -129,7 +129,7 @@ Quaternion Quaternion::slerp(const Quaternion &p_to, const real_t &p_weight) con // calculate coefficients - if ((1.0f - cosom) > CMP_EPSILON) { + if ((1.0f - cosom) > (real_t)CMP_EPSILON) { // standard case (slerp) omega = Math::acos(cosom); sinom = Math::sin(omega); diff --git a/core/math/quaternion.h b/core/math/quaternion.h index f8a2c6456e..38729ac3df 100644 --- a/core/math/quaternion.h +++ b/core/math/quaternion.h @@ -145,7 +145,7 @@ struct _NO_DISCARD_ Quaternion { Vector3 c = v0.cross(v1); real_t d = v0.dot(v1); - if (d < -1.0f + CMP_EPSILON) { + if (d < -1.0f + (real_t)CMP_EPSILON) { x = 0; y = 1; z = 0; diff --git a/core/math/random_pcg.h b/core/math/random_pcg.h index 974dbbfc2e..65fcf67664 100644 --- a/core/math/random_pcg.h +++ b/core/math/random_pcg.h @@ -129,7 +129,7 @@ public: return p_mean + p_deviation * (cos(Math_TAU * randd()) * sqrt(-2.0 * log(randd()))); // Box-Muller transform } _FORCE_INLINE_ float randfn(float p_mean, float p_deviation) { - return p_mean + p_deviation * (cos(Math_TAU * randf()) * sqrt(-2.0 * log(randf()))); // Box-Muller transform + return p_mean + p_deviation * (cos((float)Math_TAU * randf()) * sqrt(-2.0 * log(randf()))); // Box-Muller transform } double random(double p_from, double p_to); diff --git a/core/math/transform_2d.cpp b/core/math/transform_2d.cpp index 55c1f06ff5..71953e4130 100644 --- a/core/math/transform_2d.cpp +++ b/core/math/transform_2d.cpp @@ -71,12 +71,12 @@ void Transform2D::rotate(const real_t p_phi) { real_t Transform2D::get_skew() const { real_t det = basis_determinant(); - return Math::acos(elements[0].normalized().dot(SIGN(det) * elements[1].normalized())) - Math_PI * 0.5f; + return Math::acos(elements[0].normalized().dot(SIGN(det) * elements[1].normalized())) - (real_t)Math_PI * 0.5f; } void Transform2D::set_skew(const real_t p_angle) { real_t det = basis_determinant(); - elements[1] = SIGN(det) * elements[0].rotated((Math_PI * 0.5f + p_angle)).normalized() * elements[1].length(); + elements[1] = SIGN(det) * elements[0].rotated(((real_t)Math_PI * 0.5f + p_angle)).normalized() * elements[1].length(); } real_t Transform2D::get_rotation() const { diff --git a/core/math/vector2.cpp b/core/math/vector2.cpp index ed4266b115..a27227905c 100644 --- a/core/math/vector2.cpp +++ b/core/math/vector2.cpp @@ -163,7 +163,7 @@ Vector2 Vector2::move_toward(const Vector2 &p_to, const real_t p_delta) const { Vector2 v = *this; Vector2 vd = p_to - v; real_t len = vd.length(); - return len <= p_delta || len < CMP_EPSILON ? p_to : v + vd / len * p_delta; + return len <= p_delta || len < (real_t)CMP_EPSILON ? p_to : v + vd / len * p_delta; } // slide returns the component of the vector along the given plane, specified by its normal vector. diff --git a/core/math/vector3.cpp b/core/math/vector3.cpp index 9b3902346e..87b2ac7104 100644 --- a/core/math/vector3.cpp +++ b/core/math/vector3.cpp @@ -97,7 +97,7 @@ Vector3 Vector3::move_toward(const Vector3 &p_to, const real_t p_delta) const { Vector3 v = *this; Vector3 vd = p_to - v; real_t len = vd.length(); - return len <= p_delta || len < CMP_EPSILON ? p_to : v + vd / len * p_delta; + return len <= p_delta || len < (real_t)CMP_EPSILON ? p_to : v + vd / len * p_delta; } Vector2 Vector3::octahedron_encode() const { diff --git a/core/object/undo_redo.cpp b/core/object/undo_redo.cpp index f72dec8edf..b78328fb42 100644 --- a/core/object/undo_redo.cpp +++ b/core/object/undo_redo.cpp @@ -34,6 +34,20 @@ #include "core/os/os.h" #include "core/templates/local_vector.h" +void UndoRedo::Operation::delete_reference() { + if (type != Operation::TYPE_REFERENCE) { + return; + } + if (ref.is_valid()) { + ref.unref(); + } else { + Object *obj = ObjectDB::get_instance(object); + if (obj) { + memdelete(obj); + } + } +} + void UndoRedo::_discard_redo() { if (current_action == actions.size() - 1) { return; @@ -41,16 +55,7 @@ void UndoRedo::_discard_redo() { for (int i = current_action + 1; i < actions.size(); i++) { for (Operation &E : actions.write[i].do_ops) { - if (E.type == Operation::TYPE_REFERENCE) { - if (E.ref.is_valid()) { - E.ref.unref(); - } else { - Object *obj = ObjectDB::get_instance(E.object); - if (obj) { - memdelete(obj); - } - } - } + E.delete_reference(); } //ERASE do data } @@ -97,13 +102,7 @@ void UndoRedo::create_action(const String &p_name, MergeMode p_mode) { for (unsigned int i = 0; i < to_remove.size(); i++) { List<Operation>::Element *E = to_remove[i]; // Delete all object references - if (E->get().type == Operation::TYPE_REFERENCE) { - Object *obj = ObjectDB::get_instance(E->get().object); - - if (obj) { - memdelete(obj); - } - } + E->get().delete_reference(); E->erase(); } } @@ -270,16 +269,7 @@ void UndoRedo::_pop_history_tail() { } for (Operation &E : actions.write[0].undo_ops) { - if (E.type == Operation::TYPE_REFERENCE) { - if (E.ref.is_valid()) { - E.ref.unref(); - } else { - Object *obj = ObjectDB::get_instance(E.object); - if (obj) { - memdelete(obj); - } - } - } + E.delete_reference(); } actions.remove_at(0); diff --git a/core/object/undo_redo.h b/core/object/undo_redo.h index 75639f8abf..5eede74e2d 100644 --- a/core/object/undo_redo.h +++ b/core/object/undo_redo.h @@ -66,6 +66,8 @@ private: ObjectID object; StringName name; Variant args[VARIANT_ARG_MAX]; + + void delete_reference(); }; struct Action { diff --git a/core/typedefs.h b/core/typedefs.h index 5929b5123b..2c32d102da 100644 --- a/core/typedefs.h +++ b/core/typedefs.h @@ -103,7 +103,7 @@ #endif #ifndef SIGN -#define SIGN(m_v) (((m_v) == 0) ? (0.0) : (((m_v) < 0) ? (-1.0) : (+1.0))) +#define SIGN(m_v) (((m_v) == 0) ? (0.0f) : (((m_v) < 0) ? (-1.0f) : (+1.0f))) #endif #ifndef MIN diff --git a/doc/classes/Area2D.xml b/doc/classes/Area2D.xml index ed3f873251..1eb74768f5 100644 --- a/doc/classes/Area2D.xml +++ b/doc/classes/Area2D.xml @@ -118,8 +118,8 @@ Emitted when one of another Area2D's [Shape2D]s enters one of this Area2D's [Shape2D]s. Requires [member monitoring] to be set to [code]true[/code]. [code]area_rid[/code] the [RID] of the other Area2D's [CollisionObject2D] used by the [PhysicsServer2D]. [code]area[/code] the other Area2D. - [code]area_shape_index[/code] the index of the [Shape2D] of the other Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]area.shape_owner_get_owner(area_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]area_shape_index[/code] the index of the [Shape2D] of the other Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]area.shape_owner_get_owner(area.shape_find_owner(area_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="area_shape_exited"> @@ -131,8 +131,8 @@ Emitted when one of another Area2D's [Shape2D]s exits one of this Area2D's [Shape2D]s. Requires [member monitoring] to be set to [code]true[/code]. [code]area_rid[/code] the [RID] of the other Area2D's [CollisionObject2D] used by the [PhysicsServer2D]. [code]area[/code] the other Area2D. - [code]area_shape_index[/code] the index of the [Shape2D] of the other Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]area.shape_owner_get_owner(area_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]area_shape_index[/code] the index of the [Shape2D] of the other Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]area.shape_owner_get_owner(area.shape_find_owner(area_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="body_entered"> @@ -158,8 +158,8 @@ Emitted when one of a [PhysicsBody2D] or [TileMap]'s [Shape2D]s enters one of this Area2D's [Shape2D]s. Requires [member monitoring] to be set to [code]true[/code]. [TileMap]s are detected if the [TileSet] has Collision [Shape2D]s. [code]body_rid[/code] the [RID] of the [PhysicsBody2D] or [TileSet]'s [CollisionObject2D] used by the [PhysicsServer2D]. [code]body[/code] the [Node], if it exists in the tree, of the [PhysicsBody2D] or [TileMap]. - [code]body_shape_index[/code] the index of the [Shape2D] of the [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape2D] of the [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="body_shape_exited"> @@ -171,8 +171,8 @@ Emitted when one of a [PhysicsBody2D] or [TileMap]'s [Shape2D]s exits one of this Area2D's [Shape2D]s. Requires [member monitoring] to be set to [code]true[/code]. [TileMap]s are detected if the [TileSet] has Collision [Shape2D]s. [code]body_rid[/code] the [RID] of the [PhysicsBody2D] or [TileSet]'s [CollisionObject2D] used by the [PhysicsServer2D]. [code]body[/code] the [Node], if it exists in the tree, of the [PhysicsBody2D] or [TileMap]. - [code]body_shape_index[/code] the index of the [Shape2D] of the [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape2D] of the [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape2D] of this Area2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> </signals> diff --git a/doc/classes/Area3D.xml b/doc/classes/Area3D.xml index 3d893c1ae4..7d14fd825b 100644 --- a/doc/classes/Area3D.xml +++ b/doc/classes/Area3D.xml @@ -137,8 +137,8 @@ Emitted when one of another Area3D's [Shape3D]s enters one of this Area3D's [Shape3D]s. Requires [member monitoring] to be set to [code]true[/code]. [code]area_rid[/code] the [RID] of the other Area3D's [CollisionObject3D] used by the [PhysicsServer3D]. [code]area[/code] the other Area3D. - [code]area_shape_index[/code] the index of the [Shape3D] of the other Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]area.shape_owner_get_owner(area_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]area_shape_index[/code] the index of the [Shape3D] of the other Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]area.shape_owner_get_owner(area.shape_find_owner(area_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="area_shape_exited"> @@ -150,8 +150,8 @@ Emitted when one of another Area3D's [Shape3D]s exits one of this Area3D's [Shape3D]s. Requires [member monitoring] to be set to [code]true[/code]. [code]area_rid[/code] the [RID] of the other Area3D's [CollisionObject3D] used by the [PhysicsServer3D]. [code]area[/code] the other Area3D. - [code]area_shape_index[/code] the index of the [Shape3D] of the other Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]area.shape_owner_get_owner(area_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]area_shape_index[/code] the index of the [Shape3D] of the other Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]area.shape_owner_get_owner(area.shape_find_owner(area_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="body_entered"> @@ -177,8 +177,8 @@ Emitted when one of a [PhysicsBody3D] or [GridMap]'s [Shape3D]s enters one of this Area3D's [Shape3D]s. Requires [member monitoring] to be set to [code]true[/code]. [GridMap]s are detected if the [MeshLibrary] has Collision [Shape3D]s. [code]body_rid[/code] the [RID] of the [PhysicsBody3D] or [MeshLibrary]'s [CollisionObject3D] used by the [PhysicsServer3D]. [code]body[/code] the [Node], if it exists in the tree, of the [PhysicsBody3D] or [GridMap]. - [code]body_shape_index[/code] the index of the [Shape3D] of the [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape3D] of the [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="body_shape_exited"> @@ -190,8 +190,8 @@ Emitted when one of a [PhysicsBody3D] or [GridMap]'s [Shape3D]s enters one of this Area3D's [Shape3D]s. Requires [member monitoring] to be set to [code]true[/code]. [GridMap]s are detected if the [MeshLibrary] has Collision [Shape3D]s. [code]body_rid[/code] the [RID] of the [PhysicsBody3D] or [MeshLibrary]'s [CollisionObject3D] used by the [PhysicsServer3D]. [code]body[/code] the [Node], if it exists in the tree, of the [PhysicsBody3D] or [GridMap]. - [code]body_shape_index[/code] the index of the [Shape3D] of the [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape3D] of the [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape3D] of this Area3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> </signals> diff --git a/doc/classes/DisplayServer.xml b/doc/classes/DisplayServer.xml index be8811d629..5a67170086 100644 --- a/doc/classes/DisplayServer.xml +++ b/doc/classes/DisplayServer.xml @@ -544,6 +544,12 @@ <description> </description> </method> + <method name="window_get_active_popup" qualifiers="const"> + <return type="int" /> + <description> + Returns ID of the active popup window, or [constant INVALID_WINDOW_ID] if there is none. + </description> + </method> <method name="window_get_attached_instance_id" qualifiers="const"> <return type="int" /> <argument index="0" name="window_id" type="int" default="0" /> @@ -592,6 +598,13 @@ [b]Note:[/b] This method is implemented on Android, Linux, macOS and Windows. </description> </method> + <method name="window_get_popup_safe_rect" qualifiers="const"> + <return type="Rect2i" /> + <argument index="0" name="window" type="int" /> + <description> + Returns the bounding box of control, or menu item that was used to open the popup window, in the screen coordinate system. + </description> + </method> <method name="window_get_position" qualifiers="const"> <return type="Vector2i" /> <argument index="0" name="window_id" type="int" default="0" /> @@ -749,6 +762,14 @@ [b]Note:[/b] This method is implemented on Linux, macOS and Windows. </description> </method> + <method name="window_set_popup_safe_rect"> + <return type="void" /> + <argument index="0" name="window" type="int" /> + <argument index="1" name="rect" type="Rect2i" /> + <description> + Sets the bounding box of control, or menu item that was used to open the popup window, in the screen coordinate system. Clicking this area will not auto-close this popup. + </description> + </method> <method name="window_set_position"> <return type="void" /> <argument index="0" name="position" type="Vector2i" /> @@ -930,16 +951,24 @@ Regardless of the platform, enabling fullscreen will change the window size to match the monitor's size. Therefore, make sure your project supports [url=$DOCS_URL/tutorials/rendering/multiple_resolutions.html]multiple resolutions[/url] when enabling fullscreen mode. </constant> <constant name="WINDOW_FLAG_RESIZE_DISABLED" value="0" enum="WindowFlags"> + Window can't be resizing by dragging its resize grip. It's still possible to resize the window using [method window_set_size]. This flag is ignored for full screen windows. </constant> <constant name="WINDOW_FLAG_BORDERLESS" value="1" enum="WindowFlags"> + Window do not have native title bar and other decorations. This flag is ignored for full-screen windows. </constant> <constant name="WINDOW_FLAG_ALWAYS_ON_TOP" value="2" enum="WindowFlags"> + Window is floating above other regular windows. This flag is ignored for full-screen windows. </constant> <constant name="WINDOW_FLAG_TRANSPARENT" value="3" enum="WindowFlags"> + Window is will be destroyed with its transient parent and displayed on top of non-exclusive full-screen parent window. Transient windows can't enter full-screen mode. </constant> <constant name="WINDOW_FLAG_NO_FOCUS" value="4" enum="WindowFlags"> + Window can't be focused. No-focus window will ignore all input, except mouse clicks. + </constant> + <constant name="WINDOW_FLAG_POPUP" value="5" enum="WindowFlags"> + Window is part of menu or [OptionButton] dropdown. This flag can't be changed when window is visible. An active popup window will exclusivly receive all input, without stealing focus from its parent. Popup windows are automatically closed when uses click outside it, or when an application is switched. Popup window must have [constant WINDOW_FLAG_TRANSPARENT] set. </constant> - <constant name="WINDOW_FLAG_MAX" value="5" enum="WindowFlags"> + <constant name="WINDOW_FLAG_MAX" value="6" enum="WindowFlags"> </constant> <constant name="WINDOW_EVENT_MOUSE_ENTER" value="0" enum="WindowEvent"> </constant> diff --git a/doc/classes/FogVolume.xml b/doc/classes/FogVolume.xml index c869141ef6..d28a6a8783 100644 --- a/doc/classes/FogVolume.xml +++ b/doc/classes/FogVolume.xml @@ -12,6 +12,7 @@ <members> <member name="extents" type="Vector3" setter="set_extents" getter="get_extents" default="Vector3(1, 1, 1)"> Sets the size of the [FogVolume] when [member shape] is [constant RenderingServer.FOG_VOLUME_SHAPE_ELLIPSOID] or [constant RenderingServer.FOG_VOLUME_SHAPE_BOX]. + [b]Note:[/b] Thin fog volumes may appear to flicker when the camera moves or rotates. This can be alleviated by increasing [member ProjectSettings.rendering/environment/volumetric_fog/volume_depth] (at a performance cost) or by decreasing [member Environment.volumetric_fog_length] (at no performance cost, but at the cost of lower fog range). Alternatively, the [FogVolume] can be made thicker and use a lower density in the [member material]. </member> <member name="material" type="Material" setter="set_material" getter="get_material"> Sets the [Material] to be used by the [FogVolume]. Can be either a [FogMaterial] or a custom [ShaderMaterial]. diff --git a/doc/classes/ItemList.xml b/doc/classes/ItemList.xml index 875d8d27b2..8b564c01c9 100644 --- a/doc/classes/ItemList.xml +++ b/doc/classes/ItemList.xml @@ -401,7 +401,7 @@ <member name="select_mode" type="int" setter="set_select_mode" getter="get_select_mode" enum="ItemList.SelectMode" default="0"> Allows single or multiple item selection. See the [enum SelectMode] constants. </member> - <member name="text_overrun_behavior" type="int" setter="set_text_overrun_behavior" getter="get_text_overrun_behavior" enum="TextParagraph.OverrunBehavior" default="0"> + <member name="text_overrun_behavior" type="int" setter="set_text_overrun_behavior" getter="get_text_overrun_behavior" enum="TextParagraph.OverrunBehavior" default="3"> Sets the clipping behavior when the text exceeds an item's bounding rectangle. See [enum TextParagraph.OverrunBehavior] for a description of all modes. </member> </members> diff --git a/doc/classes/Line2D.xml b/doc/classes/Line2D.xml index 88574c0028..e2cc43bb75 100644 --- a/doc/classes/Line2D.xml +++ b/doc/classes/Line2D.xml @@ -121,10 +121,10 @@ Takes the left pixels of the texture and renders it over the whole line. </constant> <constant name="LINE_TEXTURE_TILE" value="1" enum="LineTextureMode"> - Tiles the texture over the line. The texture must be imported with [b]Repeat[/b] enabled for it to work properly. + Tiles the texture over the line. [member CanvasItem.texture_repeat] of the [Line2D] node must be [constant CanvasItem.TEXTURE_REPEAT_ENABLED] or [constant CanvasItem.TEXTURE_REPEAT_MIRROR] for it to work properly. </constant> <constant name="LINE_TEXTURE_STRETCH" value="2" enum="LineTextureMode"> - Stretches the texture across the line. Import the texture with [b]Repeat[/b] disabled for best results. + Stretches the texture across the line. [member CanvasItem.texture_repeat] of the [Line2D] node must be [constant CanvasItem.TEXTURE_REPEAT_DISABLED] for best results. </constant> </constants> </class> diff --git a/doc/classes/NativeExtension.xml b/doc/classes/NativeExtension.xml index e5e11c1c95..ccdbb617ab 100644 --- a/doc/classes/NativeExtension.xml +++ b/doc/classes/NativeExtension.xml @@ -43,7 +43,9 @@ </constant> <constant name="INITIALIZATION_LEVEL_SCENE" value="2" enum="InitializationLevel"> </constant> - <constant name="INITIALIZATION_LEVEL_EDITOR" value="3" enum="InitializationLevel"> + <constant name="INITIALIZATION_LEVEL_DRIVER" value="3" enum="InitializationLevel"> + </constant> + <constant name="INITIALIZATION_LEVEL_EDITOR" value="4" enum="InitializationLevel"> </constant> </constants> </class> diff --git a/doc/classes/Popup.xml b/doc/classes/Popup.xml index 06efadb071..3fcf0a9b8f 100644 --- a/doc/classes/Popup.xml +++ b/doc/classes/Popup.xml @@ -4,15 +4,13 @@ Popup is a base window container for popup-like subwindows. </brief_description> <description> - Popup is a base window container for popup-like subwindows. It's a modal by default (see [member close_on_parent_focus]) and has helpers for custom popup behavior. + Popup is a base window container for popup-like subwindows. It's a modal by default (see [member popup_window]) and has helpers for custom popup behavior. </description> <tutorials> </tutorials> <members> <member name="borderless" type="bool" setter="set_flag" getter="get_flag" overrides="Window" default="true" /> - <member name="close_on_parent_focus" type="bool" setter="set_close_on_parent_focus" getter="get_close_on_parent_focus" default="true"> - If true, the [Popup] will close when its parent [Window] is focused. - </member> + <member name="popup_window" type="bool" setter="set_flag" getter="get_flag" overrides="Window" default="true" /> <member name="transient" type="bool" setter="set_transient" getter="is_transient" overrides="Window" default="true" /> <member name="unresizable" type="bool" setter="set_flag" getter="get_flag" overrides="Window" default="true" /> <member name="visible" type="bool" setter="set_visible" getter="is_visible" overrides="Window" default="false" /> diff --git a/doc/classes/ProjectSettings.xml b/doc/classes/ProjectSettings.xml index a3810bb575..be2c1ad372 100644 --- a/doc/classes/ProjectSettings.xml +++ b/doc/classes/ProjectSettings.xml @@ -1504,6 +1504,10 @@ <member name="physics/3d/sleep_threshold_linear" type="float" setter="" getter="" default="0.1"> Threshold linear velocity under which a 3D physics body will be considered inactive. See [constant PhysicsServer3D.SPACE_PARAM_BODY_LINEAR_VELOCITY_SLEEP_THRESHOLD]. </member> + <member name="physics/3d/smooth_trimesh_collision" type="bool" setter="" getter="" default="false"> + If [code]true[/code], smooths out collision with trimesh shapes ([ConcavePolygonShape3D]) by telling the Bullet physics engine to generate internal edge information for every trimesh shape created. + [b]Note:[/b] Only effective if [member physics/3d/physics_engine] is set to [code]Bullet[/code], [i]not[/i] [code]DEFAULT[/code] or [code]GodotPhysics[/code]. + </member> <member name="physics/3d/solver/contact_max_allowed_penetration" type="float" setter="" getter="" default="0.01"> Maximum distance a shape can penetrate another shape before it is considered a collision. See [constant PhysicsServer3D.SPACE_PARAM_CONTACT_MAX_ALLOWED_PENETRATION]. </member> diff --git a/doc/classes/Rect2i.xml b/doc/classes/Rect2i.xml index 78ab4b9103..49fdd8e7e8 100644 --- a/doc/classes/Rect2i.xml +++ b/doc/classes/Rect2i.xml @@ -153,7 +153,6 @@ <argument index="0" name="b" type="Rect2i" /> <description> Returns [code]true[/code] if the [Rect2i] overlaps with [code]b[/code] (i.e. they have at least one point in common). - If [code]include_borders[/code] is [code]true[/code], they will also be considered overlapping if their borders touch, even without intersection. </description> </method> <method name="merge" qualifiers="const"> diff --git a/doc/classes/RenderingServer.xml b/doc/classes/RenderingServer.xml index 07e7ebc55b..ba3f5e10f5 100644 --- a/doc/classes/RenderingServer.xml +++ b/doc/classes/RenderingServer.xml @@ -4157,13 +4157,17 @@ <constant name="ENV_TONE_MAPPER_ACES" value="3" enum="EnvironmentToneMapper"> Use the ACES tonemapper. </constant> - <constant name="ENV_SSR_ROUGNESS_QUALITY_DISABLED" value="0" enum="EnvironmentSSRRoughnessQuality"> + <constant name="ENV_SSR_ROUGHNESS_QUALITY_DISABLED" value="0" enum="EnvironmentSSRRoughnessQuality"> + Lowest quality of roughness filter for screen-space reflections. Rough materials will not have blurrier screen-space reflections compared to smooth (non-rough) materials. This is the fastest option. </constant> - <constant name="ENV_SSR_ROUGNESS_QUALITY_LOW" value="1" enum="EnvironmentSSRRoughnessQuality"> + <constant name="ENV_SSR_ROUGHNESS_QUALITY_LOW" value="1" enum="EnvironmentSSRRoughnessQuality"> + Low quality of roughness filter for screen-space reflections. </constant> - <constant name="ENV_SSR_ROUGNESS_QUALITY_MEDIUM" value="2" enum="EnvironmentSSRRoughnessQuality"> + <constant name="ENV_SSR_ROUGHNESS_QUALITY_MEDIUM" value="2" enum="EnvironmentSSRRoughnessQuality"> + Medium quality of roughness filter for screen-space reflections. </constant> - <constant name="ENV_SSR_ROUGNESS_QUALITY_HIGH" value="3" enum="EnvironmentSSRRoughnessQuality"> + <constant name="ENV_SSR_ROUGHNESS_QUALITY_HIGH" value="3" enum="EnvironmentSSRRoughnessQuality"> + High quality of roughness filter for screen-space reflections. This is the slowest option. </constant> <constant name="ENV_SSAO_QUALITY_VERY_LOW" value="0" enum="EnvironmentSSAOQuality"> Lowest quality of screen-space ambient occlusion. diff --git a/doc/classes/RigidDynamicBody2D.xml b/doc/classes/RigidDynamicBody2D.xml index 696ad7a98e..087156989e 100644 --- a/doc/classes/RigidDynamicBody2D.xml +++ b/doc/classes/RigidDynamicBody2D.xml @@ -219,8 +219,8 @@ Emitted when one of this RigidDynamicBody2D's [Shape2D]s collides with another [PhysicsBody2D] or [TileMap]'s [Shape2D]s. Requires [member contact_monitor] to be set to [code]true[/code] and [member contacts_reported] to be set high enough to detect all the collisions. [TileMap]s are detected if the [TileSet] has Collision [Shape2D]s. [code]body_rid[/code] the [RID] of the other [PhysicsBody2D] or [TileSet]'s [CollisionObject2D] used by the [PhysicsServer2D]. [code]body[/code] the [Node], if it exists in the tree, of the other [PhysicsBody2D] or [TileMap]. - [code]body_shape_index[/code] the index of the [Shape2D] of the other [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape2D] of this RigidDynamicBody2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape2D] of the other [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape2D] of this RigidDynamicBody2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="body_shape_exited"> @@ -232,8 +232,8 @@ Emitted when the collision between one of this RigidDynamicBody2D's [Shape2D]s and another [PhysicsBody2D] or [TileMap]'s [Shape2D]s ends. Requires [member contact_monitor] to be set to [code]true[/code] and [member contacts_reported] to be set high enough to detect all the collisions. [TileMap]s are detected if the [TileSet] has Collision [Shape2D]s. [code]body_rid[/code] the [RID] of the other [PhysicsBody2D] or [TileSet]'s [CollisionObject2D] used by the [PhysicsServer2D]. [code]body[/code] the [Node], if it exists in the tree, of the other [PhysicsBody2D] or [TileMap]. - [code]body_shape_index[/code] the index of the [Shape2D] of the other [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape2D] of this RigidDynamicBody2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape2D] of the other [PhysicsBody2D] or [TileMap] used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape2D] of this RigidDynamicBody2D used by the [PhysicsServer2D]. Get the [CollisionShape2D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. </description> </signal> <signal name="sleeping_state_changed"> diff --git a/doc/classes/RigidDynamicBody3D.xml b/doc/classes/RigidDynamicBody3D.xml index 5fd53a7638..85cdcc7f8f 100644 --- a/doc/classes/RigidDynamicBody3D.xml +++ b/doc/classes/RigidDynamicBody3D.xml @@ -225,8 +225,8 @@ Emitted when one of this RigidDynamicBody3D's [Shape3D]s collides with another [PhysicsBody3D] or [GridMap]'s [Shape3D]s. Requires [member contact_monitor] to be set to [code]true[/code] and [member contacts_reported] to be set high enough to detect all the collisions. [GridMap]s are detected if the [MeshLibrary] has Collision [Shape3D]s. [code]body_rid[/code] the [RID] of the other [PhysicsBody3D] or [MeshLibrary]'s [CollisionObject3D] used by the [PhysicsServer3D]. [code]body[/code] the [Node], if it exists in the tree, of the other [PhysicsBody3D] or [GridMap]. - [code]body_shape_index[/code] the index of the [Shape3D] of the other [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape3D] of this RigidDynamicBody3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape3D] of the other [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape3D] of this RigidDynamicBody3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. [b]Note:[/b] Bullet physics cannot identify the shape index when using a [ConcavePolygonShape3D]. Don't use multiple [CollisionShape3D]s when using a [ConcavePolygonShape3D] with Bullet physics if you need shape indices. </description> </signal> @@ -239,8 +239,8 @@ Emitted when the collision between one of this RigidDynamicBody3D's [Shape3D]s and another [PhysicsBody3D] or [GridMap]'s [Shape3D]s ends. Requires [member contact_monitor] to be set to [code]true[/code] and [member contacts_reported] to be set high enough to detect all the collisions. [GridMap]s are detected if the [MeshLibrary] has Collision [Shape3D]s. [code]body_rid[/code] the [RID] of the other [PhysicsBody3D] or [MeshLibrary]'s [CollisionObject3D] used by the [PhysicsServer3D]. [GridMap]s are detected if the Meshes have [Shape3D]s. [code]body[/code] the [Node], if it exists in the tree, of the other [PhysicsBody3D] or [GridMap]. - [code]body_shape_index[/code] the index of the [Shape3D] of the other [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body_shape_index)[/code]. - [code]local_shape_index[/code] the index of the [Shape3D] of this RigidDynamicBody3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(local_shape_index)[/code]. + [code]body_shape_index[/code] the index of the [Shape3D] of the other [PhysicsBody3D] or [GridMap] used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]body.shape_owner_get_owner(body.shape_find_owner(body_shape_index))[/code]. + [code]local_shape_index[/code] the index of the [Shape3D] of this RigidDynamicBody3D used by the [PhysicsServer3D]. Get the [CollisionShape3D] node with [code]self.shape_owner_get_owner(self.shape_find_owner(local_shape_index))[/code]. [b]Note:[/b] Bullet physics cannot identify the shape index when using a [ConcavePolygonShape3D]. Don't use multiple [CollisionShape3D]s when using a [ConcavePolygonShape3D] with Bullet physics if you need shape indices. </description> </signal> diff --git a/doc/classes/SceneTree.xml b/doc/classes/SceneTree.xml index 288c35f159..f3dfc727b0 100644 --- a/doc/classes/SceneTree.xml +++ b/doc/classes/SceneTree.xml @@ -232,13 +232,6 @@ </member> </members> <signals> - <signal name="files_dropped"> - <argument index="0" name="files" type="PackedStringArray" /> - <argument index="1" name="screen" type="int" /> - <description> - Emitted when files are dragged from the OS file manager and dropped in the game window. The arguments are a list of file paths and the identifier of the screen where the drag originated. - </description> - </signal> <signal name="node_added"> <argument index="0" name="node" type="Node" /> <description> diff --git a/doc/classes/SubViewport.xml b/doc/classes/SubViewport.xml index c439c1d016..b62c294f2c 100644 --- a/doc/classes/SubViewport.xml +++ b/doc/classes/SubViewport.xml @@ -4,6 +4,7 @@ Creates a sub-view into the screen. </brief_description> <description> + [SubViewport] is a [Viewport] that isn't a [Window], i.e. it doesn't draw anything by itself. To display something, [SubViewport]'s [member size] must be non-zero and it should be either put inside a [SubViewportContainer] or assigned to a [ViewportTexture]. </description> <tutorials> <link title="Using Viewports">$DOCS_URL/tutorials/rendering/viewports.html</link> diff --git a/doc/classes/SubViewportContainer.xml b/doc/classes/SubViewportContainer.xml index 050186a883..c8babb8f43 100644 --- a/doc/classes/SubViewportContainer.xml +++ b/doc/classes/SubViewportContainer.xml @@ -4,7 +4,7 @@ Control for holding [SubViewport]s. </brief_description> <description> - A [Container] node that holds a [SubViewport], automatically setting its size. + A [Container] node that holds a [SubViewport]. It uses the [SubViewport]'s size as minimum size, unless [member stretch] is enabled. [b]Note:[/b] Changing a SubViewportContainer's [member Control.rect_scale] will cause its contents to appear distorted. To change its visual size without causing distortion, adjust the node's margins instead (if it's not already in a container). [b]Note:[/b] The SubViewportContainer forwards mouse-enter and mouse-exit notifications to its sub-viewports. </description> @@ -12,7 +12,7 @@ </tutorials> <members> <member name="stretch" type="bool" setter="set_stretch" getter="is_stretch_enabled" default="false"> - If [code]true[/code], the sub-viewport will be scaled to the control's size. + If [code]true[/code], the sub-viewport will be automatically resized to the control's size. </member> <member name="stretch_shrink" type="int" setter="set_stretch_shrink" getter="get_stretch_shrink" default="1"> Divides the sub-viewport's effective resolution by this value while preserving its scale. This can be used to speed up rendering. diff --git a/doc/classes/TabBar.xml b/doc/classes/TabBar.xml index 40d6e9f26c..41e1e255ae 100644 --- a/doc/classes/TabBar.xml +++ b/doc/classes/TabBar.xml @@ -57,6 +57,13 @@ Returns the [Texture2D] for the tab at index [code]tab_idx[/code] or [code]null[/code] if the tab has no [Texture2D]. </description> </method> + <method name="get_tab_idx_at_point" qualifiers="const"> + <return type="int" /> + <argument index="0" name="point" type="Vector2" /> + <description> + Returns the index of the tab at local coordinates [code]point[/code]. Returns [code]-1[/code] if the point is outside the control boundaries or if there's no tab at the queried position. + </description> + </method> <method name="get_tab_language" qualifiers="const"> <return type="String" /> <argument index="0" name="tab_idx" type="int" /> diff --git a/doc/classes/TabContainer.xml b/doc/classes/TabContainer.xml index 3986983155..3f4ec81c95 100644 --- a/doc/classes/TabContainer.xml +++ b/doc/classes/TabContainer.xml @@ -43,20 +43,6 @@ Returns the number of tabs. </description> </method> - <method name="get_tab_disabled" qualifiers="const"> - <return type="bool" /> - <argument index="0" name="tab_idx" type="int" /> - <description> - Returns [code]true[/code] if the tab at index [code]tab_idx[/code] is disabled. - </description> - </method> - <method name="get_tab_hidden" qualifiers="const"> - <return type="bool" /> - <argument index="0" name="tab_idx" type="int" /> - <description> - Returns [code]true[/code] if the tab at index [code]tab_idx[/code] is hidden. - </description> - </method> <method name="get_tab_icon" qualifiers="const"> <return type="Texture2D" /> <argument index="0" name="tab_idx" type="int" /> @@ -71,6 +57,13 @@ Returns the index of the tab at local coordinates [code]point[/code]. Returns [code]-1[/code] if the point is outside the control boundaries or if there's no tab at the queried position. </description> </method> + <method name="get_tab_idx_from_control" qualifiers="const"> + <return type="int" /> + <argument index="0" name="control" type="Control" /> + <description> + Returns the index of the tab tied to the given [code]control[/code]. The control must be a child of the [TabContainer]. + </description> + </method> <method name="get_tab_title" qualifiers="const"> <return type="String" /> <argument index="0" name="tab_idx" type="int" /> @@ -84,11 +77,25 @@ Returns the [TabContainer] rearrange group id. </description> </method> + <method name="is_tab_disabled" qualifiers="const"> + <return type="bool" /> + <argument index="0" name="tab_idx" type="int" /> + <description> + Returns [code]true[/code] if the tab at index [code]tab_idx[/code] is disabled. + </description> + </method> + <method name="is_tab_hidden" qualifiers="const"> + <return type="bool" /> + <argument index="0" name="tab_idx" type="int" /> + <description> + Returns [code]true[/code] if the tab at index [code]tab_idx[/code] is hidden. + </description> + </method> <method name="set_popup"> <return type="void" /> <argument index="0" name="popup" type="Node" /> <description> - If set on a [Popup] node instance, a popup menu icon appears in the top-right corner of the [TabContainer]. Clicking it will expand the [Popup] node. + If set on a [Popup] node instance, a popup menu icon appears in the top-right corner of the [TabContainer] (setting it to [code]null[/code] will make it go away). Clicking it will expand the [Popup] node. </description> </method> <method name="set_tab_disabled"> @@ -120,7 +127,7 @@ <argument index="0" name="tab_idx" type="int" /> <argument index="1" name="title" type="String" /> <description> - Sets a title for the tab at index [code]tab_idx[/code]. Tab titles default to the name of the indexed child node. + Sets a custom title for the tab at index [code]tab_idx[/code] (tab titles default to the name of the indexed child node). Set it to blank to make it the child's name again. </description> </method> <method name="set_tabs_rearrange_group"> @@ -135,13 +142,17 @@ <member name="all_tabs_in_front" type="bool" setter="set_all_tabs_in_front" getter="is_all_tabs_in_front" default="false"> If [code]true[/code], all tabs are drawn in front of the panel. If [code]false[/code], inactive tabs are drawn behind the panel. </member> + <member name="clip_tabs" type="bool" setter="set_clip_tabs" getter="get_clip_tabs" default="true"> + If [code]true[/code], tabs overflowing this node's width will be hidden, displaying two navigation buttons instead. Otherwise, this node's minimum size is updated so that all tabs are visible. + </member> <member name="current_tab" type="int" setter="set_current_tab" getter="get_current_tab" default="0"> The current tab index. When set, this index's [Control] node's [code]visible[/code] property is set to [code]true[/code] and all others are set to [code]false[/code]. </member> <member name="drag_to_rearrange_enabled" type="bool" setter="set_drag_to_rearrange_enabled" getter="get_drag_to_rearrange_enabled" default="false"> If [code]true[/code], tabs can be rearranged with mouse drag. </member> - <member name="tab_alignment" type="int" setter="set_tab_alignment" getter="get_tab_alignment" enum="TabContainer.AlignmentMode" default="1"> + <member name="tab_alignment" type="int" setter="set_tab_alignment" getter="get_tab_alignment" enum="TabBar.AlignmentMode" default="1"> + Sets the position at which tabs will be placed. See [enum TabBar.AlignmentMode] for details. </member> <member name="tabs_visible" type="bool" setter="set_tabs_visible" getter="are_tabs_visible" default="true"> If [code]true[/code], tabs are visible. If [code]false[/code], tabs' content and titles are hidden. @@ -169,14 +180,6 @@ </description> </signal> </signals> - <constants> - <constant name="ALIGNMENT_LEFT" value="0" enum="AlignmentMode"> - </constant> - <constant name="ALIGNMENT_CENTER" value="1" enum="AlignmentMode"> - </constant> - <constant name="ALIGNMENT_RIGHT" value="2" enum="AlignmentMode"> - </constant> - </constants> <theme_items> <theme_item name="font_disabled_color" data_type="color" type="Color" default="Color(0.875, 0.875, 0.875, 0.5)"> Font color of disabled tabs. @@ -197,7 +200,8 @@ The size of the tab text outline. </theme_item> <theme_item name="side_margin" data_type="constant" type="int" default="8"> - The space at the left and right edges of the tab bar. + The space at the left or right edges of the tab bar, accordingly with the current [member tab_alignment]. + The margin is ignored with [code]ALIGNMENT_RIGHT[/code] if the tabs are clipped (see [member clip_tabs]) or a popup has been set (see [method set_popup]). The margin is always ignored with [code]ALIGNMENT_CENTER[/code]. </theme_item> <theme_item name="font" data_type="font" type="Font"> The font used to draw tab names. diff --git a/doc/classes/TileMap.xml b/doc/classes/TileMap.xml index e5ecff178b..9453bb9e2a 100644 --- a/doc/classes/TileMap.xml +++ b/doc/classes/TileMap.xml @@ -57,6 +57,14 @@ Clears all cells on the given layer. </description> </method> + <method name="erase_cell"> + <return type="void" /> + <argument index="0" name="layer" type="int" /> + <argument index="1" name="coords" type="Vector2i" /> + <description> + Erases the cell on layer [code]layer[/code] at coordinates [code]coords[/code]. + </description> + </method> <method name="fix_invalid_tiles"> <return type="void" /> <description> @@ -227,7 +235,7 @@ <argument index="1" name="coords" type="Vector2i" /> <argument index="2" name="source_id" type="int" default="-1" /> <argument index="3" name="atlas_coords" type="Vector2i" default="Vector2i(-1, -1)" /> - <argument index="4" name="alternative_tile" type="int" default="-1" /> + <argument index="4" name="alternative_tile" type="int" default="0" /> <description> Sets the tile indentifiers for the cell on layer [code]layer[/code] at coordinates [code]coords[/code]. Each tile of the [TileSet] is identified using three parts: - The source identifier [code]source_id[/code] identifies a [TileSetSource] identifier. See [method TileSet.set_source_id], diff --git a/doc/classes/Viewport.xml b/doc/classes/Viewport.xml index 531b09c6a0..ce61f51b9a 100644 --- a/doc/classes/Viewport.xml +++ b/doc/classes/Viewport.xml @@ -1,12 +1,11 @@ <?xml version="1.0" encoding="UTF-8" ?> <class name="Viewport" inherits="Node" version="4.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="../class.xsd"> <brief_description> - Creates a sub-view into the screen. + Base class for viewports. </brief_description> <description> A Viewport creates a different view into the screen, or a sub-view inside another viewport. Children 2D Nodes will display on it, and children Camera3D 3D nodes will render on it too. Optionally, a viewport can have its own 2D or 3D world, so they don't share what they draw with other viewports. - If a viewport is a child of a [SubViewportContainer], it will automatically take up its size, otherwise it must be set manually. Viewports can also choose to be audio listeners, so they generate positional audio depending on a 2D or 3D camera child of it. Also, viewports can be assigned to different screens in case the devices have multiple screens. Finally, viewports can also behave as render targets, in which case they will not be visible unless the associated texture is used to draw. diff --git a/doc/classes/Window.xml b/doc/classes/Window.xml index 912d3cb16c..9853f906bc 100644 --- a/doc/classes/Window.xml +++ b/doc/classes/Window.xml @@ -306,6 +306,8 @@ Set's the window's current mode. [b]Note:[/b] Fullscreen mode is not exclusive fullscreen on Windows and Linux. </member> + <member name="popup_window" type="bool" setter="set_flag" getter="get_flag" default="false"> + </member> <member name="position" type="Vector2i" setter="set_position" getter="get_position" default="Vector2i(0, 0)"> The window's position in pixels. </member> @@ -346,6 +348,7 @@ <signal name="files_dropped"> <argument index="0" name="files" type="PackedStringArray" /> <description> + Emitted when files are dragged from the OS file manager and dropped in the game window. The argument is a list of file paths. </description> </signal> <signal name="focus_entered"> @@ -416,7 +419,9 @@ </constant> <constant name="FLAG_NO_FOCUS" value="4" enum="Flags"> </constant> - <constant name="FLAG_MAX" value="5" enum="Flags"> + <constant name="FLAG_POPUP" value="5" enum="Flags"> + </constant> + <constant name="FLAG_MAX" value="6" enum="Flags"> </constant> <constant name="CONTENT_SCALE_MODE_DISABLED" value="0" enum="ContentScaleMode"> </constant> diff --git a/drivers/vulkan/SCsub b/drivers/vulkan/SCsub index 8fe75367a8..b6ceb1cdea 100644 --- a/drivers/vulkan/SCsub +++ b/drivers/vulkan/SCsub @@ -40,6 +40,9 @@ elif env["platform"] == "android": # Our current NDK version only provides old Vulkan headers, # so we have to limit VMA. env_thirdparty_vma.AppendUnique(CPPDEFINES=["VMA_VULKAN_VERSION=1000000"]) +elif env["platform"] == "osx" or env["platform"] == "iphone": + # MoltenVK supports only Vulkan 1.1 API, limit VMA to the same version. + env_thirdparty_vma.AppendUnique(CPPDEFINES=["VMA_VULKAN_VERSION=1001000"]) env_thirdparty_vma.add_source_files(thirdparty_obj, thirdparty_sources_vma) diff --git a/drivers/vulkan/rendering_device_vulkan.cpp b/drivers/vulkan/rendering_device_vulkan.cpp index cda8871f3e..9cc505cff1 100644 --- a/drivers/vulkan/rendering_device_vulkan.cpp +++ b/drivers/vulkan/rendering_device_vulkan.cpp @@ -1335,8 +1335,13 @@ Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size, allocInfo.requiredFlags = 0; allocInfo.preferredFlags = 0; allocInfo.memoryTypeBits = 0; - allocInfo.pool = p_size <= SMALL_ALLOCATION_MAX_SIZE ? small_allocs_pool : nullptr; + allocInfo.pool = nullptr; allocInfo.pUserData = nullptr; + if (p_size <= SMALL_ALLOCATION_MAX_SIZE) { + uint32_t mem_type_index = 0; + vmaFindMemoryTypeIndexForBufferInfo(allocator, &bufferInfo, &allocInfo, &mem_type_index); + allocInfo.pool = _find_or_create_small_allocs_pool(mem_type_index); + } VkResult err = vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &p_buffer->buffer, &p_buffer->allocation, nullptr); ERR_FAIL_COND_V_MSG(err, ERR_CANT_CREATE, "Can't create buffer of size: " + itos(p_size) + ", error " + itos(err) + "."); @@ -1843,12 +1848,17 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T VmaAllocationCreateInfo allocInfo; allocInfo.flags = 0; - allocInfo.pool = image_size <= SMALL_ALLOCATION_MAX_SIZE ? small_allocs_pool : nullptr; + allocInfo.pool = nullptr; allocInfo.usage = p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT ? VMA_MEMORY_USAGE_CPU_ONLY : VMA_MEMORY_USAGE_GPU_ONLY; allocInfo.requiredFlags = 0; allocInfo.preferredFlags = 0; allocInfo.memoryTypeBits = 0; allocInfo.pUserData = nullptr; + if (image_size <= SMALL_ALLOCATION_MAX_SIZE) { + uint32_t mem_type_index = 0; + vmaFindMemoryTypeIndexForImageInfo(allocator, &image_create_info, &allocInfo, &mem_type_index); + allocInfo.pool = _find_or_create_small_allocs_pool(mem_type_index); + } Texture texture; @@ -4753,19 +4763,22 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve for (uint32_t j = 0; j < sc_count; j++) { int32_t existing = -1; RenderingDeviceVulkanShaderBinarySpecializationConstant sconst; - sconst.constant_id = spec_constants[j]->constant_id; - switch (spec_constants[j]->constant_type) { + SpvReflectSpecializationConstant *spc = spec_constants[j]; + + sconst.constant_id = spc->constant_id; + sconst.int_value = 0.0; // clear previous value JIC + switch (spc->constant_type) { case SPV_REFLECT_SPECIALIZATION_CONSTANT_BOOL: { sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL; - sconst.bool_value = spec_constants[j]->default_value.int_bool_value != 0; + sconst.bool_value = spc->default_value.int_bool_value != 0; } break; case SPV_REFLECT_SPECIALIZATION_CONSTANT_INT: { sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT; - sconst.int_value = spec_constants[j]->default_value.int_bool_value; + sconst.int_value = spc->default_value.int_bool_value; } break; case SPV_REFLECT_SPECIALIZATION_CONSTANT_FLOAT: { sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT; - sconst.float_value = spec_constants[j]->default_value.float_value; + sconst.float_value = spc->default_value.float_value; } break; } sconst.stage_flags = 1 << p_spirv[i].shader_stage; @@ -8714,6 +8727,30 @@ void RenderingDeviceVulkan::sync() { local_device_processing = false; } +VmaPool RenderingDeviceVulkan::_find_or_create_small_allocs_pool(uint32_t p_mem_type_index) { + if (small_allocs_pools.has(p_mem_type_index)) { + return small_allocs_pools[p_mem_type_index]; + } + + print_verbose("Creating VMA small objects pool for memory type index " + itos(p_mem_type_index)); + + VmaPoolCreateInfo pci; + pci.memoryTypeIndex = p_mem_type_index; + pci.flags = 0; + pci.blockSize = 0; + pci.minBlockCount = 0; + pci.maxBlockCount = SIZE_MAX; + pci.priority = 0.5f; + pci.minAllocationAlignment = 0; + pci.pMemoryAllocateNext = nullptr; + VmaPool pool = VK_NULL_HANDLE; + VkResult res = vmaCreatePool(allocator, &pci, &pool); + small_allocs_pools[p_mem_type_index] = pool; // Don't try to create it again if failed the first time + ERR_FAIL_COND_V_MSG(res, pool, "vmaCreatePool failed with error " + itos(res) + "."); + + return pool; +} + void RenderingDeviceVulkan::_free_pending_resources(int p_frame) { //free in dependency usage order, so nothing weird happens //pipelines @@ -8834,9 +8871,9 @@ uint64_t RenderingDeviceVulkan::get_memory_usage(MemoryType p_type) const { } else if (p_type == MEMORY_TEXTURES) { return image_memory; } else { - VmaStats stats; - vmaCalculateStats(allocator, &stats); - return stats.total.usedBytes; + VmaTotalStatistics stats; + vmaCalculateStatistics(allocator, &stats); + return stats.total.statistics.allocationBytes; } } @@ -8935,18 +8972,6 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de vmaCreateAllocator(&allocatorInfo, &allocator); } - { //create pool for small objects - VmaPoolCreateInfo pci; - pci.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; - pci.blockSize = 0; - pci.minBlockCount = 0; - pci.maxBlockCount = SIZE_MAX; - pci.priority = 0.5f; - pci.minAllocationAlignment = 0; - pci.pMemoryAllocateNext = nullptr; - vmaCreatePool(allocator, &pci, &small_allocs_pool); - } - frames = memnew_arr(Frame, frame_count); frame = 0; //create setup and frame buffers @@ -9415,7 +9440,11 @@ void RenderingDeviceVulkan::finalize() { for (int i = 0; i < staging_buffer_blocks.size(); i++) { vmaDestroyBuffer(allocator, staging_buffer_blocks[i].buffer, staging_buffer_blocks[i].allocation); } - vmaDestroyPool(allocator, small_allocs_pool); + while (small_allocs_pools.size()) { + Map<uint32_t, VmaPool>::Element *E = small_allocs_pools.front(); + vmaDestroyPool(allocator, E->get()); + small_allocs_pools.erase(E); + } vmaDestroyAllocator(allocator); while (vertex_formats.size()) { diff --git a/drivers/vulkan/rendering_device_vulkan.h b/drivers/vulkan/rendering_device_vulkan.h index 3e4327667b..126f6f8ad0 100644 --- a/drivers/vulkan/rendering_device_vulkan.h +++ b/drivers/vulkan/rendering_device_vulkan.h @@ -1016,7 +1016,8 @@ class RenderingDeviceVulkan : public RenderingDevice { void _free_pending_resources(int p_frame); VmaAllocator allocator = nullptr; - VmaPool small_allocs_pool = nullptr; + Map<uint32_t, VmaPool> small_allocs_pools; + VmaPool _find_or_create_small_allocs_pool(uint32_t p_mem_type_index); VulkanContext *context = nullptr; diff --git a/editor/action_map_editor.cpp b/editor/action_map_editor.cpp index 3eab494761..92d53cc005 100644 --- a/editor/action_map_editor.cpp +++ b/editor/action_map_editor.cpp @@ -611,7 +611,7 @@ InputEventConfigurationDialog::InputEventConfigurationDialog() { add_child(main_vbox); tab_container = memnew(TabContainer); - tab_container->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tab_container->set_tab_alignment(TabBar::ALIGNMENT_LEFT); tab_container->set_use_hidden_tabs_for_min_size(true); tab_container->set_v_size_flags(Control::SIZE_EXPAND_FILL); tab_container->connect("tab_selected", callable_mp(this, &InputEventConfigurationDialog::_tab_selected)); diff --git a/editor/debugger/editor_debugger_node.cpp b/editor/debugger/editor_debugger_node.cpp index 7c9a984b6a..d5a4f5d138 100644 --- a/editor/debugger/editor_debugger_node.cpp +++ b/editor/debugger/editor_debugger_node.cpp @@ -39,6 +39,7 @@ #include "editor/scene_tree_dock.h" #include "scene/gui/menu_button.h" #include "scene/gui/tab_container.h" +#include "scene/resources/packed_scene.h" template <typename Func> void _for_all(TabContainer *p_node, const Func &p_func) { @@ -60,7 +61,7 @@ EditorDebuggerNode::EditorDebuggerNode() { add_theme_constant_override("margin_right", -EditorNode::get_singleton()->get_gui_base()->get_theme_stylebox(SNAME("BottomPanelDebuggerOverride"), SNAME("EditorStyles"))->get_margin(SIDE_RIGHT)); tabs = memnew(TabContainer); - tabs->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tabs->set_tab_alignment(TabBar::ALIGNMENT_LEFT); tabs->set_tabs_visible(false); tabs->connect("tab_changed", callable_mp(this, &EditorDebuggerNode::_debugger_changed)); add_child(tabs); @@ -141,11 +142,22 @@ void EditorDebuggerNode::_error_selected(const String &p_file, int p_line, int p } void EditorDebuggerNode::_text_editor_stack_goto(const ScriptEditorDebugger *p_debugger) { - const String file = p_debugger->get_stack_script_file(); + String file = p_debugger->get_stack_script_file(); if (file.is_empty()) { return; } - stack_script = ResourceLoader::load(file); + if (file.is_resource_file()) { + stack_script = ResourceLoader::load(file); + } else { + // If the script is built-in, it can be opened only if the scene is loaded in memory. + int i = file.find("::"); + int j = file.rfind("(", i); + if (j > -1) { // If the script is named, the string is "name (file)", so we need to extract the path. + file = file.substr(j + 1, file.find(")", i) - j - 1); + } + Ref<PackedScene> ps = ResourceLoader::load(file.get_slice("::", 0)); + stack_script = ResourceLoader::load(file); + } const int line = p_debugger->get_stack_script_line() - 1; emit_signal(SNAME("goto_script_line"), stack_script, line); emit_signal(SNAME("set_execution"), stack_script, line); diff --git a/editor/debugger/editor_visual_profiler.cpp b/editor/debugger/editor_visual_profiler.cpp index 2a1b0029d4..5df86c70fe 100644 --- a/editor/debugger/editor_visual_profiler.cpp +++ b/editor/debugger/editor_visual_profiler.cpp @@ -143,12 +143,12 @@ void EditorVisualProfiler::_item_selected() { } void EditorVisualProfiler::_update_plot() { - int w = graph->get_size().width; - int h = graph->get_size().height; + const int w = graph->get_size().width; + const int h = graph->get_size().height; bool reset_texture = false; - int desired_len = w * h * 4; + const int desired_len = w * h * 4; if (graph_image.size() != desired_len) { reset_texture = true; @@ -156,12 +156,13 @@ void EditorVisualProfiler::_update_plot() { } uint8_t *wr = graph_image.ptrw(); + const Color background_color = get_theme_color("dark_color_2", "Editor"); - //clear + // Clear the previous frame and set the background color. for (int i = 0; i < desired_len; i += 4) { - wr[i + 0] = 0; - wr[i + 1] = 0; - wr[i + 2] = 0; + wr[i + 0] = Math::fast_ftoi(background_color.r * 255); + wr[i + 1] = Math::fast_ftoi(background_color.g * 255); + wr[i + 2] = Math::fast_ftoi(background_color.b * 255); wr[i + 3] = 255; } @@ -259,9 +260,9 @@ void EditorVisualProfiler::_update_plot() { uint8_t r, g, b; if (column_cpu[j].a == 0) { - r = 0; - g = 0; - b = 0; + r = Math::fast_ftoi(background_color.r * 255); + g = Math::fast_ftoi(background_color.g * 255); + b = Math::fast_ftoi(background_color.b * 255); } else { r = CLAMP((column_cpu[j].r / column_cpu[j].a) * 255.0, 0, 255); g = CLAMP((column_cpu[j].g / column_cpu[j].a) * 255.0, 0, 255); @@ -279,9 +280,9 @@ void EditorVisualProfiler::_update_plot() { uint8_t r, g, b; if (column_gpu[j].a == 0) { - r = 0; - g = 0; - b = 0; + r = Math::fast_ftoi(background_color.r * 255); + g = Math::fast_ftoi(background_color.g * 255); + b = Math::fast_ftoi(background_color.b * 255); } else { r = CLAMP((column_gpu[j].r / column_gpu[j].a) * 255.0, 0, 255); g = CLAMP((column_gpu[j].g / column_gpu[j].a) * 255.0, 0, 255); @@ -440,8 +441,11 @@ void EditorVisualProfiler::_graph_tex_draw() { if (last_metric < 0) { return; } + Ref<Font> font = get_theme_font(SNAME("font"), SNAME("Label")); int font_size = get_theme_font_size(SNAME("font_size"), SNAME("Label")); + const Color color = get_theme_color(SNAME("font_color"), SNAME("Editor")); + if (seeking) { int max_frames = frame_metrics.size(); int frame = cursor_metric_edit->get_value() - (frame_metrics[last_metric].frame_number - max_frames + 1); @@ -451,10 +455,9 @@ void EditorVisualProfiler::_graph_tex_draw() { int half_width = graph->get_size().x / 2; int cur_x = frame * half_width / max_frames; - //cur_x /= 2.0; - graph->draw_line(Vector2(cur_x, 0), Vector2(cur_x, graph->get_size().y), Color(1, 1, 1, 0.8)); - graph->draw_line(Vector2(cur_x + half_width, 0), Vector2(cur_x + half_width, graph->get_size().y), Color(1, 1, 1, 0.8)); + graph->draw_line(Vector2(cur_x, 0), Vector2(cur_x, graph->get_size().y), color * Color(1, 1, 1)); + graph->draw_line(Vector2(cur_x + half_width, 0), Vector2(cur_x + half_width, graph->get_size().y), color * Color(1, 1, 1)); } if (graph_height_cpu > 0) { @@ -462,10 +465,10 @@ void EditorVisualProfiler::_graph_tex_draw() { int half_width = graph->get_size().x / 2; - graph->draw_line(Vector2(0, frame_y), Vector2(half_width, frame_y), Color(1, 1, 1, 0.3)); + graph->draw_line(Vector2(0, frame_y), Vector2(half_width, frame_y), color * Color(1, 1, 1, 0.5)); - String limit_str = String::num(graph_limit, 2); - graph->draw_string(font, Vector2(half_width - font->get_string_size(limit_str, font_size).x - 2, frame_y - 2), limit_str, HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, Color(1, 1, 1, 0.6)); + const String limit_str = String::num(graph_limit, 2) + " ms"; + graph->draw_string(font, Vector2(half_width - font->get_string_size(limit_str, font_size).x - 2, frame_y - 2), limit_str, HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, color * Color(1, 1, 1, 0.75)); } if (graph_height_gpu > 0) { @@ -473,14 +476,14 @@ void EditorVisualProfiler::_graph_tex_draw() { int half_width = graph->get_size().x / 2; - graph->draw_line(Vector2(half_width, frame_y), Vector2(graph->get_size().x, frame_y), Color(1, 1, 1, 0.3)); + graph->draw_line(Vector2(half_width, frame_y), Vector2(graph->get_size().x, frame_y), color * Color(1, 1, 1, 0.5)); - String limit_str = String::num(graph_limit, 2); - graph->draw_string(font, Vector2(half_width * 2 - font->get_string_size(limit_str, font_size).x - 2, frame_y - 2), limit_str, HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, Color(1, 1, 1, 0.6)); + const String limit_str = String::num(graph_limit, 2) + " ms"; + graph->draw_string(font, Vector2(half_width * 2 - font->get_string_size(limit_str, font_size).x - 2, frame_y - 2), limit_str, HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, color * Color(1, 1, 1, 0.75)); } - graph->draw_string(font, Vector2(font->get_string_size("X", font_size).x, font->get_ascent(font_size) + 2), "CPU:", HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, Color(1, 1, 1, 0.8)); - graph->draw_string(font, Vector2(font->get_string_size("X", font_size).x + graph->get_size().width / 2, font->get_ascent(font_size) + 2), "GPU:", HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, Color(1, 1, 1, 0.8)); + graph->draw_string(font, Vector2(font->get_string_size("X", font_size).x, font->get_ascent(font_size) + 2), "CPU:", HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, color * Color(1, 1, 1)); + graph->draw_string(font, Vector2(font->get_string_size("X", font_size).x + graph->get_size().width / 2, font->get_ascent(font_size) + 2), "GPU:", HORIZONTAL_ALIGNMENT_LEFT, -1, font_size, color * Color(1, 1, 1)); } void EditorVisualProfiler::_graph_tex_mouse_exit() { diff --git a/editor/debugger/script_editor_debugger.cpp b/editor/debugger/script_editor_debugger.cpp index 645d7608f3..0b9631c816 100644 --- a/editor/debugger/script_editor_debugger.cpp +++ b/editor/debugger/script_editor_debugger.cpp @@ -135,15 +135,15 @@ void ScriptEditorDebugger::debug_continue() { void ScriptEditorDebugger::update_tabs() { if (error_count == 0 && warning_count == 0) { errors_tab->set_name(TTR("Errors")); - tabs->set_tab_icon(errors_tab->get_index(), Ref<Texture2D>()); + tabs->set_tab_icon(tabs->get_tab_idx_from_control(errors_tab), Ref<Texture2D>()); } else { errors_tab->set_name(TTR("Errors") + " (" + itos(error_count + warning_count) + ")"); if (error_count >= 1 && warning_count >= 1) { - tabs->set_tab_icon(errors_tab->get_index(), get_theme_icon(SNAME("ErrorWarning"), SNAME("EditorIcons"))); + tabs->set_tab_icon(tabs->get_tab_idx_from_control(errors_tab), get_theme_icon(SNAME("ErrorWarning"), SNAME("EditorIcons"))); } else if (error_count >= 1) { - tabs->set_tab_icon(errors_tab->get_index(), get_theme_icon(SNAME("Error"), SNAME("EditorIcons"))); + tabs->set_tab_icon(tabs->get_tab_idx_from_control(errors_tab), get_theme_icon(SNAME("Error"), SNAME("EditorIcons"))); } else { - tabs->set_tab_icon(errors_tab->get_index(), get_theme_icon(SNAME("Warning"), SNAME("EditorIcons"))); + tabs->set_tab_icon(tabs->get_tab_idx_from_control(errors_tab), get_theme_icon(SNAME("Warning"), SNAME("EditorIcons"))); } } } @@ -1658,7 +1658,7 @@ bool ScriptEditorDebugger::has_capture(const StringName &p_name) { ScriptEditorDebugger::ScriptEditorDebugger() { tabs = memnew(TabContainer); - tabs->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tabs->set_tab_alignment(TabBar::ALIGNMENT_LEFT); tabs->add_theme_style_override("panel", EditorNode::get_singleton()->get_gui_base()->get_theme_stylebox(SNAME("DebuggerPanel"), SNAME("EditorStyles"))); tabs->connect("tab_changed", callable_mp(this, &ScriptEditorDebugger::_tab_changed)); diff --git a/editor/editor_file_dialog.cpp b/editor/editor_file_dialog.cpp index e6343100df..0fef4597be 100644 --- a/editor/editor_file_dialog.cpp +++ b/editor/editor_file_dialog.cpp @@ -394,7 +394,8 @@ void EditorFileDialog::_action_pressed() { return; } - String f = dir_access->get_current_dir().plus_file(file->get_text()); + String file_text = file->get_text(); + String f = file_text.is_absolute_path() ? file_text : dir_access->get_current_dir().plus_file(file_text); if ((mode == FILE_MODE_OPEN_ANY || mode == FILE_MODE_OPEN_FILE) && dir_access->file_exists(f)) { _save_to_recent(); diff --git a/editor/editor_help.cpp b/editor/editor_help.cpp index fe39f7acc9..39c8509148 100644 --- a/editor/editor_help.cpp +++ b/editor/editor_help.cpp @@ -477,9 +477,9 @@ void EditorHelp::_update_method_descriptions(const DocData::ClassDoc p_classdoc, class_desc->add_text(" "); class_desc->push_color(comment_color); if (p_classdoc.is_script_doc) { - class_desc->append_text(TTR("There is currently no description for this " + p_method_type + ".")); + class_desc->append_text(vformat(TTR("There is currently no description for this %s."), p_method_type)); } else { - class_desc->append_text(TTR("There is currently no description for this " + p_method_type + ". Please help us by [color=$color][url=$url]contributing one[/url][/color]!").replace("$url", CONTRIBUTE_URL).replace("$color", link_color_text)); + class_desc->append_text(vformat(TTR("There is currently no description for this %s. Please help us by [color=$color][url=$url]contributing one[/url][/color]!"), p_method_type).replace("$url", CONTRIBUTE_URL).replace("$color", link_color_text)); } class_desc->pop(); } diff --git a/editor/editor_node.cpp b/editor/editor_node.cpp index cda5e6b537..ddb9c93f04 100644 --- a/editor/editor_node.cpp +++ b/editor/editor_node.cpp @@ -313,6 +313,7 @@ void EditorNode::disambiguate_filenames(const Vector<String> p_full_paths, Vecto } } +// TODO: This REALLY should be done in a better way than replacing all tabs after almost EVERY action. void EditorNode::_update_scene_tabs() { bool show_rb = EditorSettings::get_singleton()->get("interface/scene_tabs/show_script_button"); @@ -330,6 +331,9 @@ void EditorNode::_update_scene_tabs() { disambiguate_filenames(full_path_names, disambiguated_scene_names); + // Workaround to ignore the tab_changed signal from the first added tab. + scene_tabs->disconnect("tab_changed", callable_mp(this, &EditorNode::_scene_tab_changed)); + scene_tabs->clear_tabs(); Ref<Texture2D> script_icon = gui_base->get_theme_icon(SNAME("Script"), SNAME("EditorIcons")); for (int i = 0; i < editor_data.get_edited_scene_count(); i++) { @@ -388,6 +392,9 @@ void EditorNode::_update_scene_tabs() { scene_tab_add->set_position(Point2(last_tab.position.x + last_tab.size.width + hsep, last_tab.position.y)); } } + + // Reconnect after everything is done. + scene_tabs->connect("tab_changed", callable_mp(this, &EditorNode::_scene_tab_changed)); } void EditorNode::_version_control_menu_option(int p_idx) { @@ -4226,7 +4233,7 @@ void EditorNode::_dock_floating_close_request(Control *p_control) { p_control->get_parent()->remove_child(p_control); dock_slot[window_slot]->add_child(p_control); - dock_slot[window_slot]->move_child(p_control, MIN((int)window->get_meta("dock_index"), dock_slot[window_slot]->get_child_count())); + dock_slot[window_slot]->move_child(p_control, MIN((int)window->get_meta("dock_index"), dock_slot[window_slot]->get_tab_count())); dock_slot[window_slot]->set_current_tab(window->get_meta("dock_index")); window->queue_delete(); @@ -4466,13 +4473,13 @@ void EditorNode::_dock_select_draw() { if (i == dock_select_rect_over) { dock_select->draw_rect(r, used_selected); - } else if (dock_slot[i]->get_child_count() == 0) { + } else if (dock_slot[i]->get_tab_count() == 0) { dock_select->draw_rect(r, unused); } else { dock_select->draw_rect(r, used); } - for (int j = 0; j < MIN(3, dock_slot[i]->get_child_count()); j++) { + for (int j = 0; j < MIN(3, dock_slot[i]->get_tab_count()); j++) { int xofs = (r.size.width / 3) * j; Color c = used; if (i == dock_popup_selected && (dock_slot[i]->get_current_tab() > 3 || dock_slot[i]->get_current_tab() == j)) { @@ -4584,7 +4591,7 @@ void EditorNode::_update_dock_slots_visibility() { for (int i = 0; i < DOCK_SLOT_MAX; i++) { int tabs_visible = 0; for (int j = 0; j < dock_slot[i]->get_tab_count(); j++) { - if (!dock_slot[i]->get_tab_hidden(j)) { + if (!dock_slot[i]->is_tab_hidden(j)) { tabs_visible++; } } @@ -5648,11 +5655,11 @@ void EditorNode::_feature_profile_changed() { TabContainer *node_tabs = cast_to<TabContainer>(NodeDock::get_singleton()->get_parent()); TabContainer *fs_tabs = cast_to<TabContainer>(FileSystemDock::get_singleton()->get_parent()); if (profile.is_valid()) { - node_tabs->set_tab_hidden(NodeDock::get_singleton()->get_index(), profile->is_feature_disabled(EditorFeatureProfile::FEATURE_NODE_DOCK)); + node_tabs->set_tab_hidden(node_tabs->get_tab_idx_from_control(NodeDock::get_singleton()), profile->is_feature_disabled(EditorFeatureProfile::FEATURE_NODE_DOCK)); // The Import dock is useless without the FileSystem dock. Ensure the configuration is valid. bool fs_dock_disabled = profile->is_feature_disabled(EditorFeatureProfile::FEATURE_FILESYSTEM_DOCK); - fs_tabs->set_tab_hidden(FileSystemDock::get_singleton()->get_index(), fs_dock_disabled); - import_tabs->set_tab_hidden(ImportDock::get_singleton()->get_index(), fs_dock_disabled || profile->is_feature_disabled(EditorFeatureProfile::FEATURE_IMPORT_DOCK)); + fs_tabs->set_tab_hidden(fs_tabs->get_tab_idx_from_control(FileSystemDock::get_singleton()), fs_dock_disabled); + import_tabs->set_tab_hidden(import_tabs->get_tab_idx_from_control(ImportDock::get_singleton()), fs_dock_disabled || profile->is_feature_disabled(EditorFeatureProfile::FEATURE_IMPORT_DOCK)); main_editor_buttons[EDITOR_3D]->set_visible(!profile->is_feature_disabled(EditorFeatureProfile::FEATURE_3D)); main_editor_buttons[EDITOR_SCRIPT]->set_visible(!profile->is_feature_disabled(EditorFeatureProfile::FEATURE_SCRIPT)); @@ -5665,9 +5672,9 @@ void EditorNode::_feature_profile_changed() { _editor_select(EDITOR_2D); } } else { - import_tabs->set_tab_hidden(ImportDock::get_singleton()->get_index(), false); - node_tabs->set_tab_hidden(NodeDock::get_singleton()->get_index(), false); - fs_tabs->set_tab_hidden(FileSystemDock::get_singleton()->get_index(), false); + import_tabs->set_tab_hidden(import_tabs->get_tab_idx_from_control(ImportDock::get_singleton()), false); + node_tabs->set_tab_hidden(node_tabs->get_tab_idx_from_control(NodeDock::get_singleton()), false); + fs_tabs->set_tab_hidden(fs_tabs->get_tab_idx_from_control(FileSystemDock::get_singleton()), false); ImportDock::get_singleton()->set_visible(true); NodeDock::get_singleton()->set_visible(true); FileSystemDock::get_singleton()->set_visible(true); @@ -6205,7 +6212,7 @@ EditorNode::EditorNode() { dock_slot[i]->set_v_size_flags(Control::SIZE_EXPAND_FILL); dock_slot[i]->set_popup(dock_select_popup); dock_slot[i]->connect("pre_popup_pressed", callable_mp(this, &EditorNode::_dock_pre_popup), varray(i)); - dock_slot[i]->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + dock_slot[i]->set_tab_alignment(TabBar::ALIGNMENT_LEFT); dock_slot[i]->set_drag_to_rearrange_enabled(true); dock_slot[i]->set_tabs_rearrange_group(1); dock_slot[i]->connect("tab_changed", callable_mp(this, &EditorNode::_dock_tab_changed)); @@ -6714,23 +6721,23 @@ EditorNode::EditorNode() { // Scene: Top left dock_slot[DOCK_SLOT_LEFT_UR]->add_child(SceneTreeDock::get_singleton()); - dock_slot[DOCK_SLOT_LEFT_UR]->set_tab_title(SceneTreeDock::get_singleton()->get_index(), TTR("Scene")); + dock_slot[DOCK_SLOT_LEFT_UR]->set_tab_title(dock_slot[DOCK_SLOT_LEFT_UR]->get_tab_idx_from_control(SceneTreeDock::get_singleton()), TTR("Scene")); // Import: Top left, behind Scene dock_slot[DOCK_SLOT_LEFT_UR]->add_child(ImportDock::get_singleton()); - dock_slot[DOCK_SLOT_LEFT_UR]->set_tab_title(ImportDock::get_singleton()->get_index(), TTR("Import")); + dock_slot[DOCK_SLOT_LEFT_UR]->set_tab_title(dock_slot[DOCK_SLOT_LEFT_UR]->get_tab_idx_from_control(ImportDock::get_singleton()), TTR("Import")); // FileSystem: Bottom left dock_slot[DOCK_SLOT_LEFT_BR]->add_child(FileSystemDock::get_singleton()); - dock_slot[DOCK_SLOT_LEFT_BR]->set_tab_title(FileSystemDock::get_singleton()->get_index(), TTR("FileSystem")); + dock_slot[DOCK_SLOT_LEFT_BR]->set_tab_title(dock_slot[DOCK_SLOT_LEFT_BR]->get_tab_idx_from_control(FileSystemDock::get_singleton()), TTR("FileSystem")); // Inspector: Full height right dock_slot[DOCK_SLOT_RIGHT_UL]->add_child(InspectorDock::get_singleton()); - dock_slot[DOCK_SLOT_RIGHT_UL]->set_tab_title(InspectorDock::get_singleton()->get_index(), TTR("Inspector")); + dock_slot[DOCK_SLOT_RIGHT_UL]->set_tab_title(dock_slot[DOCK_SLOT_RIGHT_UL]->get_tab_idx_from_control(InspectorDock::get_singleton()), TTR("Inspector")); // Node: Full height right, behind Inspector dock_slot[DOCK_SLOT_RIGHT_UL]->add_child(NodeDock::get_singleton()); - dock_slot[DOCK_SLOT_RIGHT_UL]->set_tab_title(NodeDock::get_singleton()->get_index(), TTR("Node")); + dock_slot[DOCK_SLOT_RIGHT_UL]->set_tab_title(dock_slot[DOCK_SLOT_RIGHT_UL]->get_tab_idx_from_control(NodeDock::get_singleton()), TTR("Node")); // Hide unused dock slots and vsplits dock_slot[DOCK_SLOT_LEFT_UL]->hide(); diff --git a/editor/editor_settings_dialog.cpp b/editor/editor_settings_dialog.cpp index 18324f9971..75bba64e2e 100644 --- a/editor/editor_settings_dialog.cpp +++ b/editor/editor_settings_dialog.cpp @@ -662,7 +662,7 @@ EditorSettingsDialog::EditorSettingsDialog() { undo_redo = memnew(UndoRedo); tabs = memnew(TabContainer); - tabs->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tabs->set_tab_alignment(TabBar::ALIGNMENT_LEFT); tabs->connect("tab_changed", callable_mp(this, &EditorSettingsDialog::_tabs_tab_changed)); add_child(tabs); diff --git a/editor/editor_themes.cpp b/editor/editor_themes.cpp index 05aa638a4b..cdf51dda20 100644 --- a/editor/editor_themes.cpp +++ b/editor/editor_themes.cpp @@ -411,9 +411,11 @@ Ref<Theme> create_editor_theme(const Ref<Theme> p_theme) { // Colors bool dark_theme = EditorSettings::get_singleton()->is_dark_theme(); - const Color dark_color_1 = base_color.lerp(Color(0, 0, 0, 1), contrast); - const Color dark_color_2 = base_color.lerp(Color(0, 0, 0, 1), contrast * 1.5); - const Color dark_color_3 = base_color.lerp(Color(0, 0, 0, 1), contrast * 2); + // Ensure base colors are in the 0..1 luminance range to avoid 8-bit integer overflow or text rendering issues. + // Some places in the editor use 8-bit integer colors. + const Color dark_color_1 = base_color.lerp(Color(0, 0, 0, 1), contrast).clamp(); + const Color dark_color_2 = base_color.lerp(Color(0, 0, 0, 1), contrast * 1.5).clamp(); + const Color dark_color_3 = base_color.lerp(Color(0, 0, 0, 1), contrast * 2).clamp(); const Color background_color = dark_color_2; diff --git a/editor/import/resource_importer_scene.cpp b/editor/import/resource_importer_scene.cpp index 3eb12353b5..e7c605aaf0 100644 --- a/editor/import/resource_importer_scene.cpp +++ b/editor/import/resource_importer_scene.cpp @@ -1649,14 +1649,6 @@ void ResourceImporterScene::_generate_meshes(Node *p_node, const Dictionary &p_m } } - if (generate_lods) { - src_mesh_node->get_mesh()->generate_lods(merge_angle, split_angle); - } - - if (create_shadow_meshes) { - src_mesh_node->get_mesh()->create_shadow_mesh(); - } - if (bake_lightmaps) { Transform3D xf; Node3D *n = src_mesh_node; @@ -1689,6 +1681,14 @@ void ResourceImporterScene::_generate_meshes(Node *p_node, const Dictionary &p_m } } + if (generate_lods) { + src_mesh_node->get_mesh()->generate_lods(merge_angle, split_angle); + } + + if (create_shadow_meshes) { + src_mesh_node->get_mesh()->create_shadow_mesh(); + } + if (!save_to_file.is_empty()) { Ref<Mesh> existing = Ref<Resource>(ResourceCache::get(save_to_file)); if (existing.is_valid()) { diff --git a/editor/localization_editor.cpp b/editor/localization_editor.cpp index cd9986d527..a766650cd9 100644 --- a/editor/localization_editor.cpp +++ b/editor/localization_editor.cpp @@ -477,7 +477,7 @@ LocalizationEditor::LocalizationEditor() { localization_changed = "localization_changed"; TabContainer *translations = memnew(TabContainer); - translations->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + translations->set_tab_alignment(TabBar::ALIGNMENT_LEFT); translations->set_v_size_flags(Control::SIZE_EXPAND_FILL); add_child(translations); diff --git a/editor/plugins/node_3d_editor_plugin.h b/editor/plugins/node_3d_editor_plugin.h index 48423d1c83..5c0bfab34e 100644 --- a/editor/plugins/node_3d_editor_plugin.h +++ b/editor/plugins/node_3d_editor_plugin.h @@ -378,7 +378,7 @@ private: Node3DEditor *spatial_editor; Camera3D *previewing; - Camera3D *preview; + Camera3D *preview = nullptr; bool previewing_cinema; bool _is_node_locked(const Node *p_node); diff --git a/editor/plugins/script_editor_plugin.cpp b/editor/plugins/script_editor_plugin.cpp index bd4064708b..a4a2919034 100644 --- a/editor/plugins/script_editor_plugin.cpp +++ b/editor/plugins/script_editor_plugin.cpp @@ -407,8 +407,8 @@ void ScriptEditor::_breaked(bool p_breaked, bool p_can_debug) { return; } - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -447,8 +447,8 @@ void ScriptEditor::_goto_script_line(REF p_script, int p_line) { void ScriptEditor::_set_execution(REF p_script, int p_line) { Ref<Script> script = Object::cast_to<Script>(*p_script); if (script.is_valid() && (script->has_source_code() || script->get_path().is_resource_file())) { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -463,8 +463,8 @@ void ScriptEditor::_set_execution(REF p_script, int p_line) { void ScriptEditor::_clear_execution(REF p_script) { Ref<Script> script = Object::cast_to<Script>(*p_script); if (script.is_valid() && (script->has_source_code() || script->get_path().is_resource_file())) { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -480,8 +480,8 @@ void ScriptEditor::_set_breakpoint(REF p_script, int p_line, bool p_enabled) { Ref<Script> script = Object::cast_to<Script>(*p_script); if (script.is_valid() && (script->has_source_code() || script->get_path().is_resource_file())) { // Update if open. - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (se && se->get_edited_resource()->get_path() == script->get_path()) { se->set_breakpoint(p_line, p_enabled); return; @@ -509,8 +509,8 @@ void ScriptEditor::_set_breakpoint(REF p_script, int p_line, bool p_enabled) { } void ScriptEditor::_clear_breakpoints() { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (se) { se->clear_breakpoints(); } @@ -547,11 +547,11 @@ Array ScriptEditor::_get_cached_breakpoints_for_script(const String &p_path) con ScriptEditorBase *ScriptEditor::_get_current_editor() const { int selected = tab_container->get_current_tab(); - if (selected < 0 || selected >= tab_container->get_child_count()) { + if (selected < 0 || selected >= tab_container->get_tab_count()) { return nullptr; } - return Object::cast_to<ScriptEditorBase>(tab_container->get_child(selected)); + return Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(selected)); } void ScriptEditor::_update_history_arrows() { @@ -590,7 +590,7 @@ void ScriptEditor::_go_to_tab(int p_idx) { } } - Control *c = Object::cast_to<Control>(tab_container->get_child(p_idx)); + Control *c = Object::cast_to<Control>(tab_container->get_tab_control(p_idx)); if (!c) { return; } @@ -750,11 +750,11 @@ void ScriptEditor::_show_error_dialog(String p_path) { void ScriptEditor::_close_tab(int p_idx, bool p_save, bool p_history_back) { int selected = p_idx; - if (selected < 0 || selected >= tab_container->get_child_count()) { + if (selected < 0 || selected >= tab_container->get_tab_count()) { return; } - Node *tselected = tab_container->get_child(selected); + Node *tselected = tab_container->get_tab_control(selected); ScriptEditorBase *current = Object::cast_to<ScriptEditorBase>(tselected); if (current) { @@ -762,7 +762,7 @@ void ScriptEditor::_close_tab(int p_idx, bool p_save, bool p_history_back) { if (p_save && file.is_valid()) { // Do not try to save internal scripts, but prompt to save in-memory // scripts which are not saved to disk yet (have empty path). - if (file->is_built_in()) { + if (!file->is_built_in()) { save_current_script(); } } @@ -805,8 +805,8 @@ void ScriptEditor::_close_tab(int p_idx, bool p_save, bool p_history_back) { _save_editor_state(current); } memdelete(tselected); - if (idx >= tab_container->get_child_count()) { - idx = tab_container->get_child_count() - 1; + if (idx >= tab_container->get_tab_count()) { + idx = tab_container->get_tab_count() - 1; } if (idx >= 0) { if (history_pos >= 0) { @@ -836,9 +836,9 @@ void ScriptEditor::_close_discard_current_tab(const String &p_str) { } void ScriptEditor::_close_docs_tab() { - int child_count = tab_container->get_child_count(); + int child_count = tab_container->get_tab_count(); for (int i = child_count - 1; i >= 0; i--) { - EditorHelp *se = Object::cast_to<EditorHelp>(tab_container->get_child(i)); + EditorHelp *se = Object::cast_to<EditorHelp>(tab_container->get_tab_control(i)); if (se) { _close_tab(i, true, false); @@ -856,7 +856,7 @@ void ScriptEditor::_copy_script_path() { void ScriptEditor::_close_other_tabs() { int current_idx = tab_container->get_current_tab(); - for (int i = tab_container->get_child_count() - 1; i >= 0; i--) { + for (int i = tab_container->get_tab_count() - 1; i >= 0; i--) { if (i != current_idx) { script_close_queue.push_back(i); } @@ -865,7 +865,7 @@ void ScriptEditor::_close_other_tabs() { } void ScriptEditor::_close_all_tabs() { - for (int i = tab_container->get_child_count() - 1; i >= 0; i--) { + for (int i = tab_container->get_tab_count() - 1; i >= 0; i--) { script_close_queue.push_back(i); } _queue_close_tabs(); @@ -877,7 +877,7 @@ void ScriptEditor::_queue_close_tabs() { script_close_queue.pop_front(); tab_container->set_current_tab(idx); - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(idx)); + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(idx)); if (se) { // Maybe there are unsaved changes. if (se->is_unsaved()) { @@ -900,8 +900,8 @@ void ScriptEditor::_ask_close_current_unsaved_tab(ScriptEditorBase *current) { void ScriptEditor::_resave_scripts(const String &p_str) { apply_scripts(); - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -941,8 +941,8 @@ void ScriptEditor::_resave_scripts(const String &p_str) { } void ScriptEditor::_reload_scripts() { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -985,8 +985,8 @@ void ScriptEditor::_reload_scripts() { } void ScriptEditor::_res_saved_callback(const Ref<Resource> &p_res) { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -1004,8 +1004,8 @@ void ScriptEditor::_res_saved_callback(const Ref<Resource> &p_res) { void ScriptEditor::_scene_saved_callback(const String &p_path) { // If scene was saved, mark all built-in scripts from that scene as saved. - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -1048,8 +1048,8 @@ bool ScriptEditor::_test_script_times_on_disk(RES p_for_script) { bool need_reload = false; bool use_autoreload = bool(EDITOR_DEF("text_editor/behavior/files/auto_reload_scripts_on_external_change", false)); - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (se) { RES edited_res = se->get_edited_resource(); if (p_for_script.is_valid() && edited_res.is_valid() && p_for_script != edited_res) { @@ -1449,7 +1449,7 @@ void ScriptEditor::_menu_option(int p_option) { } } break; case WINDOW_MOVE_DOWN: { - if (tab_container->get_current_tab() < tab_container->get_child_count() - 1) { + if (tab_container->get_current_tab() < tab_container->get_tab_count() - 1) { tab_container->move_child(current, tab_container->get_current_tab() + 1); tab_container->set_current_tab(tab_container->get_current_tab() + 1); _update_script_names(); @@ -1495,7 +1495,7 @@ void ScriptEditor::_menu_option(int p_option) { } } break; case WINDOW_MOVE_DOWN: { - if (tab_container->get_current_tab() < tab_container->get_child_count() - 1) { + if (tab_container->get_current_tab() < tab_container->get_tab_count() - 1) { tab_container->move_child(help, tab_container->get_current_tab() + 1); tab_container->set_current_tab(tab_container->get_current_tab() + 1); _update_script_names(); @@ -1545,9 +1545,9 @@ void ScriptEditor::_show_save_theme_as_dialog() { } bool ScriptEditor::_has_docs_tab() const { - const int child_count = tab_container->get_child_count(); + const int child_count = tab_container->get_tab_count(); for (int i = 0; i < child_count; i++) { - if (Object::cast_to<EditorHelp>(tab_container->get_child(i))) { + if (Object::cast_to<EditorHelp>(tab_container->get_tab_control(i))) { return true; } } @@ -1555,9 +1555,9 @@ bool ScriptEditor::_has_docs_tab() const { } bool ScriptEditor::_has_script_tab() const { - const int child_count = tab_container->get_child_count(); + const int child_count = tab_container->get_tab_count(); for (int i = 0; i < child_count; i++) { - if (Object::cast_to<ScriptEditorBase>(tab_container->get_child(i))) { + if (Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i))) { return true; } } @@ -1581,9 +1581,9 @@ void ScriptEditor::_prepare_file_menu() { menu->set_item_disabled(menu->get_item_index(WINDOW_PREV), history_pos <= 0); menu->set_item_disabled(menu->get_item_index(WINDOW_NEXT), history_pos >= history.size() - 1); - menu->set_item_disabled(menu->get_item_index(FILE_CLOSE), tab_container->get_child_count() < 1); - menu->set_item_disabled(menu->get_item_index(CLOSE_ALL), tab_container->get_child_count() < 1); - menu->set_item_disabled(menu->get_item_index(CLOSE_OTHER_TABS), tab_container->get_child_count() <= 1); + menu->set_item_disabled(menu->get_item_index(FILE_CLOSE), tab_container->get_tab_count() < 1); + menu->set_item_disabled(menu->get_item_index(CLOSE_ALL), tab_container->get_tab_count() < 1); + menu->set_item_disabled(menu->get_item_index(CLOSE_OTHER_TABS), tab_container->get_tab_count() <= 1); menu->set_item_disabled(menu->get_item_index(CLOSE_DOCS), !_has_docs_tab()); menu->set_item_disabled(menu->get_item_index(FILE_RUN), current_is_doc); @@ -1682,8 +1682,8 @@ bool ScriptEditor::can_take_away_focus() const { } void ScriptEditor::close_builtin_scripts_from_scene(const String &p_scene) { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (se) { Ref<Script> script = se->get_edited_resource(); @@ -1713,8 +1713,8 @@ void ScriptEditor::notify_script_changed(const Ref<Script> &p_script) { void ScriptEditor::get_breakpoints(List<String> *p_breakpoints) { Set<String> loaded_scripts; - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -1766,7 +1766,7 @@ void ScriptEditor::_members_overview_selected(int p_idx) { } void ScriptEditor::_help_overview_selected(int p_idx) { - Node *current = tab_container->get_child(tab_container->get_current_tab()); + Node *current = tab_container->get_tab_control(tab_container->get_current_tab()); EditorHelp *se = Object::cast_to<EditorHelp>(current); if (!se) { return; @@ -1782,7 +1782,7 @@ void ScriptEditor::_script_selected(int p_idx) { } void ScriptEditor::ensure_select_current() { - if (tab_container->get_child_count() && tab_container->get_current_tab() >= 0) { + if (tab_container->get_tab_count() && tab_container->get_current_tab() >= 0) { ScriptEditorBase *se = _get_current_editor(); if (se) { se->enable_editor(); @@ -1893,12 +1893,12 @@ void ScriptEditor::_update_members_overview() { void ScriptEditor::_update_help_overview_visibility() { int selected = tab_container->get_current_tab(); - if (selected < 0 || selected >= tab_container->get_child_count()) { + if (selected < 0 || selected >= tab_container->get_tab_count()) { help_overview->set_visible(false); return; } - Node *current = tab_container->get_child(tab_container->get_current_tab()); + Node *current = tab_container->get_tab_control(tab_container->get_current_tab()); EditorHelp *se = Object::cast_to<EditorHelp>(current); if (!se) { help_overview->set_visible(false); @@ -1920,11 +1920,11 @@ void ScriptEditor::_update_help_overview() { help_overview->clear(); int selected = tab_container->get_current_tab(); - if (selected < 0 || selected >= tab_container->get_child_count()) { + if (selected < 0 || selected >= tab_container->get_tab_count()) { return; } - Node *current = tab_container->get_child(tab_container->get_current_tab()); + Node *current = tab_container->get_tab_control(tab_container->get_current_tab()); EditorHelp *se = Object::cast_to<EditorHelp>(current); if (!se) { return; @@ -1947,7 +1947,7 @@ void ScriptEditor::_update_script_colors() { for (int i = 0; i < script_list->get_item_count(); i++) { int c = script_list->get_item_metadata(i); - Node *n = tab_container->get_child(c); + Node *n = tab_container->get_tab_control(c); if (!n) { continue; } @@ -1990,8 +1990,8 @@ void ScriptEditor::_update_script_names() { Vector<_ScriptEditorItemData> sedata; - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (se) { Ref<Texture2D> icon = se->get_theme_icon(); String path = se->get_edited_resource()->get_path(); @@ -2080,7 +2080,7 @@ void ScriptEditor::_update_script_names() { } } - EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_child(i)); + EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_tab_control(i)); if (eh) { String name = eh->get_class(); Ref<Texture2D> icon = get_theme_icon(SNAME("Help"), SNAME("EditorIcons")); @@ -2172,8 +2172,8 @@ void ScriptEditor::_update_script_names() { } void ScriptEditor::_update_script_connections() { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptTextEditor *ste = Object::cast_to<ScriptTextEditor>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptTextEditor *ste = Object::cast_to<ScriptTextEditor>(tab_container->get_tab_control(i)); if (!ste) { continue; } @@ -2322,8 +2322,8 @@ bool ScriptEditor::edit(const RES &p_resource, int p_line, int p_col, bool p_gra WARN_PRINT("Couldn't open external text editor, using internal"); } - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -2498,8 +2498,8 @@ void ScriptEditor::save_current_script() { void ScriptEditor::save_all_scripts() { Vector<String> scenes_to_save; - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -2574,8 +2574,8 @@ void ScriptEditor::save_all_scripts() { } void ScriptEditor::apply_scripts() const { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -2624,8 +2624,8 @@ RES ScriptEditor::open_file(const String &p_file) { } void ScriptEditor::_editor_stop() { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -2641,8 +2641,8 @@ void ScriptEditor::_add_callback(Object *p_obj, const String &p_function, const EditorNode::get_singleton()->push_item(script.ptr()); - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -2712,8 +2712,8 @@ void ScriptEditor::_editor_settings_changed() { EditorSettings::get_singleton()->load_text_editor_theme(); } - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -2751,8 +2751,8 @@ void ScriptEditor::_files_moved(const String &p_old_file, const String &p_new_fi } void ScriptEditor::_file_removed(const String &p_removed_file) { - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -2815,11 +2815,11 @@ void ScriptEditor::_split_dragged(float) { } Variant ScriptEditor::get_drag_data_fw(const Point2 &p_point, Control *p_from) { - if (tab_container->get_child_count() == 0) { + if (tab_container->get_tab_count() == 0) { return Variant(); } - Node *cur_node = tab_container->get_child(tab_container->get_current_tab()); + Node *cur_node = tab_container->get_tab_control(tab_container->get_current_tab()); HBoxContainer *drag_preview = memnew(HBoxContainer); String preview_name = ""; @@ -2975,7 +2975,7 @@ void ScriptEditor::drop_data_fw(const Point2 &p_point, const Variant &p_data, Co if (script_list->get_item_count() > 0) { new_index = script_list->get_item_metadata(script_list->get_item_at_position(p_point)); } - int num_tabs_before = tab_container->get_child_count(); + int num_tabs_before = tab_container->get_tab_count(); for (int i = 0; i < files.size(); i++) { String file = files[i]; if (file.is_empty() || !FileAccess::exists(file)) { @@ -2988,11 +2988,11 @@ void ScriptEditor::drop_data_fw(const Point2 &p_point, const Variant &p_data, Co RES res = open_file(file); if (res.is_valid()) { - if (tab_container->get_child_count() > num_tabs_before) { - tab_container->move_child(tab_container->get_child(tab_container->get_child_count() - 1), new_index); - num_tabs_before = tab_container->get_child_count(); + if (tab_container->get_tab_count() > num_tabs_before) { + tab_container->move_child(tab_container->get_tab_control(tab_container->get_tab_count() - 1), new_index); + num_tabs_before = tab_container->get_tab_count(); } else { /* Maybe script was already open */ - tab_container->move_child(tab_container->get_child(tab_container->get_current_tab()), new_index); + tab_container->move_child(tab_container->get_tab_control(tab_container->get_current_tab()), new_index); } } } @@ -3081,11 +3081,11 @@ void ScriptEditor::_make_script_list_context_menu() { context_menu->clear(); int selected = tab_container->get_current_tab(); - if (selected < 0 || selected >= tab_container->get_child_count()) { + if (selected < 0 || selected >= tab_container->get_tab_count()) { return; } - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(selected)); + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(selected)); if (se) { context_menu->add_shortcut(ED_GET_SHORTCUT("script_editor/save"), FILE_SAVE); context_menu->add_shortcut(ED_GET_SHORTCUT("script_editor/save_as"), FILE_SAVE_AS); @@ -3113,11 +3113,11 @@ void ScriptEditor::_make_script_list_context_menu() { context_menu->add_shortcut(ED_GET_SHORTCUT("script_editor/window_sort"), WINDOW_SORT); context_menu->add_shortcut(ED_GET_SHORTCUT("script_editor/toggle_scripts_panel"), TOGGLE_SCRIPTS_PANEL); - context_menu->set_item_disabled(context_menu->get_item_index(CLOSE_ALL), tab_container->get_child_count() <= 0); - context_menu->set_item_disabled(context_menu->get_item_index(CLOSE_OTHER_TABS), tab_container->get_child_count() <= 1); + context_menu->set_item_disabled(context_menu->get_item_index(CLOSE_ALL), tab_container->get_tab_count() <= 0); + context_menu->set_item_disabled(context_menu->get_item_index(CLOSE_OTHER_TABS), tab_container->get_tab_count() <= 1); context_menu->set_item_disabled(context_menu->get_item_index(WINDOW_MOVE_UP), tab_container->get_current_tab() <= 0); - context_menu->set_item_disabled(context_menu->get_item_index(WINDOW_MOVE_DOWN), tab_container->get_current_tab() >= tab_container->get_child_count() - 1); - context_menu->set_item_disabled(context_menu->get_item_index(WINDOW_SORT), tab_container->get_child_count() <= 1); + context_menu->set_item_disabled(context_menu->get_item_index(WINDOW_MOVE_DOWN), tab_container->get_current_tab() >= tab_container->get_tab_count() - 1); + context_menu->set_item_disabled(context_menu->get_item_index(WINDOW_SORT), tab_container->get_tab_count() <= 1); context_menu->set_position(get_screen_position() + get_local_mouse_position()); context_menu->reset_size(); @@ -3181,7 +3181,7 @@ void ScriptEditor::set_window_layout(Ref<ConfigFile> p_layout) { } if (!script_info.is_empty()) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(tab_container->get_tab_count() - 1)); + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(tab_container->get_tab_count() - 1)); if (se) { se->set_edit_state(script_info["state"]); } @@ -3196,8 +3196,8 @@ void ScriptEditor::set_window_layout(Ref<ConfigFile> p_layout) { _help_class_open(path); } - for (int i = 0; i < tab_container->get_child_count(); i++) { - tab_container->get_child(i)->set_meta("__editor_pass", Variant()); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + tab_container->get_tab_control(i)->set_meta("__editor_pass", Variant()); } if (p_layout->has_section_key("ScriptEditor", "script_split_offset")) { @@ -3237,8 +3237,8 @@ void ScriptEditor::get_window_layout(Ref<ConfigFile> p_layout) { Array scripts; Array helps; - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (se) { String path = se->get_edited_resource()->get_path(); if (!path.is_resource_file()) { @@ -3249,7 +3249,7 @@ void ScriptEditor::get_window_layout(Ref<ConfigFile> p_layout) { scripts.push_back(path); } - EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_child(i)); + EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_tab_control(i)); if (eh) { helps.push_back(eh->get_class()); @@ -3270,8 +3270,8 @@ void ScriptEditor::_help_class_open(const String &p_class) { return; } - for (int i = 0; i < tab_container->get_child_count(); i++) { - EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_tab_control(i)); if (eh && eh->get_class() == p_class) { _go_to_tab(i); @@ -3296,8 +3296,8 @@ void ScriptEditor::_help_class_open(const String &p_class) { void ScriptEditor::_help_class_goto(const String &p_desc) { String cname = p_desc.get_slice(":", 1); - for (int i = 0; i < tab_container->get_child_count(); i++) { - EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_tab_control(i)); if (eh && eh->get_class() == cname) { _go_to_tab(i); @@ -3323,8 +3323,8 @@ void ScriptEditor::_help_class_goto(const String &p_desc) { void ScriptEditor::update_doc(const String &p_name) { ERR_FAIL_COND(!EditorHelp::get_doc_data()->has_doc(p_name)); - for (int i = 0; i < tab_container->get_child_count(); i++) { - EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + EditorHelp *eh = Object::cast_to<EditorHelp>(tab_container->get_tab_control(i)); if (eh && eh->get_class() == p_name) { eh->update_doc(); return; @@ -3333,10 +3333,10 @@ void ScriptEditor::update_doc(const String &p_name) { } void ScriptEditor::_update_selected_editor_menu() { - for (int i = 0; i < tab_container->get_child_count(); i++) { + for (int i = 0; i < tab_container->get_tab_count(); i++) { bool current = tab_container->get_current_tab() == i; - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (se && se->get_edit_menu()) { if (current) { se->get_edit_menu()->show(); @@ -3356,7 +3356,7 @@ void ScriptEditor::_update_selected_editor_menu() { script_search_menu->get_popup()->add_shortcut(ED_SHORTCUT("script_editor/find_in_files", TTR("Find in Files"), KeyModifierMask::CMD | KeyModifierMask::SHIFT | Key::F), SEARCH_IN_FILES); script_search_menu->show(); } else { - if (tab_container->get_child_count() == 0) { + if (tab_container->get_tab_count() == 0) { script_search_menu->get_popup()->add_shortcut(ED_SHORTCUT("script_editor/find_in_files", TTR("Find in Files"), KeyModifierMask::CMD | KeyModifierMask::SHIFT | Key::F), SEARCH_IN_FILES); script_search_menu->show(); } else { @@ -3416,8 +3416,8 @@ void ScriptEditor::_history_back() { Vector<Ref<Script>> ScriptEditor::get_open_scripts() const { Vector<Ref<Script>> out_scripts = Vector<Ref<Script>>(); - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } @@ -3433,8 +3433,8 @@ Vector<Ref<Script>> ScriptEditor::get_open_scripts() const { Array ScriptEditor::_get_open_script_editors() const { Array script_editors; - for (int i = 0; i < tab_container->get_child_count(); i++) { - ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_child(i)); + for (int i = 0; i < tab_container->get_tab_count(); i++) { + ScriptEditorBase *se = Object::cast_to<ScriptEditorBase>(tab_container->get_tab_control(i)); if (!se) { continue; } diff --git a/editor/plugins/sprite_2d_editor_plugin.cpp b/editor/plugins/sprite_2d_editor_plugin.cpp index 3489ac2c1e..6a63875324 100644 --- a/editor/plugins/sprite_2d_editor_plugin.cpp +++ b/editor/plugins/sprite_2d_editor_plugin.cpp @@ -122,8 +122,8 @@ void Sprite2DEditor::_menu_option(int p_option) { switch (p_option) { case MENU_OPTION_CONVERT_TO_MESH_2D: { - debug_uv_dialog->get_ok_button()->set_text(TTR("Create Mesh2D")); - debug_uv_dialog->set_title(TTR("Mesh2D Preview")); + debug_uv_dialog->get_ok_button()->set_text(TTR("Create MeshInstance2D")); + debug_uv_dialog->set_title(TTR("MeshInstance2D Preview")); _update_mesh_data(); debug_uv_dialog->popup_centered(); @@ -338,7 +338,7 @@ void Sprite2DEditor::_convert_to_mesh_2d_node() { mesh_instance->set_mesh(mesh); UndoRedo *ur = EditorNode::get_singleton()->get_undo_redo(); - ur->create_action(TTR("Convert to Mesh2D")); + ur->create_action(TTR("Convert to MeshInstance2D")); ur->add_do_method(SceneTreeDock::get_singleton(), "replace_node", node, mesh_instance, true, false); ur->add_do_reference(mesh_instance); ur->add_undo_method(SceneTreeDock::get_singleton(), "replace_node", mesh_instance, node, false, false); @@ -498,6 +498,20 @@ void Sprite2DEditor::_debug_uv_draw() { } } +void Sprite2DEditor::_notification(int p_what) { + switch (p_what) { + case NOTIFICATION_ENTER_TREE: + case NOTIFICATION_THEME_CHANGED: { + options->set_icon(get_theme_icon(SNAME("Sprite2D"), SNAME("EditorIcons"))); + + options->get_popup()->set_item_icon(MENU_OPTION_CONVERT_TO_MESH_2D, get_theme_icon(SNAME("MeshInstance2D"), SNAME("EditorIcons"))); + options->get_popup()->set_item_icon(MENU_OPTION_CONVERT_TO_POLYGON_2D, get_theme_icon(SNAME("Polygon2D"), SNAME("EditorIcons"))); + options->get_popup()->set_item_icon(MENU_OPTION_CREATE_COLLISION_POLY_2D, get_theme_icon(SNAME("CollisionPolygon2D"), SNAME("EditorIcons"))); + options->get_popup()->set_item_icon(MENU_OPTION_CREATE_LIGHT_OCCLUDER_2D, get_theme_icon(SNAME("LightOccluder2D"), SNAME("EditorIcons"))); + } break; + } +} + void Sprite2DEditor::_bind_methods() { ClassDB::bind_method("_add_as_sibling_or_child", &Sprite2DEditor::_add_as_sibling_or_child); } @@ -508,9 +522,8 @@ Sprite2DEditor::Sprite2DEditor() { CanvasItemEditor::get_singleton()->add_control_to_menu_panel(options); options->set_text(TTR("Sprite2D")); - options->set_icon(EditorNode::get_singleton()->get_gui_base()->get_theme_icon(SNAME("Sprite2D"), SNAME("EditorIcons"))); - options->get_popup()->add_item(TTR("Convert to Mesh2D"), MENU_OPTION_CONVERT_TO_MESH_2D); + options->get_popup()->add_item(TTR("Convert to MeshInstance2D"), MENU_OPTION_CONVERT_TO_MESH_2D); options->get_popup()->add_item(TTR("Convert to Polygon2D"), MENU_OPTION_CONVERT_TO_POLYGON_2D); options->get_popup()->add_item(TTR("Create CollisionPolygon2D Sibling"), MENU_OPTION_CREATE_COLLISION_POLY_2D); options->get_popup()->add_item(TTR("Create LightOccluder2D Sibling"), MENU_OPTION_CREATE_LIGHT_OCCLUDER_2D); @@ -522,8 +535,6 @@ Sprite2DEditor::Sprite2DEditor() { add_child(err_dialog); debug_uv_dialog = memnew(ConfirmationDialog); - debug_uv_dialog->get_ok_button()->set_text(TTR("Create Mesh2D")); - debug_uv_dialog->set_title(TTR("Mesh 2D Preview")); VBoxContainer *vb = memnew(VBoxContainer); debug_uv_dialog->add_child(vb); ScrollContainer *scroll = memnew(ScrollContainer); diff --git a/editor/plugins/sprite_2d_editor_plugin.h b/editor/plugins/sprite_2d_editor_plugin.h index 3e4cc17cdd..46953b0937 100644 --- a/editor/plugins/sprite_2d_editor_plugin.h +++ b/editor/plugins/sprite_2d_editor_plugin.h @@ -87,6 +87,7 @@ class Sprite2DEditor : public Control { protected: void _node_removed(Node *p_node); + void _notification(int p_what); static void _bind_methods(); public: diff --git a/editor/plugins/theme_editor_plugin.cpp b/editor/plugins/theme_editor_plugin.cpp index a03f036b72..74e3372730 100644 --- a/editor/plugins/theme_editor_plugin.cpp +++ b/editor/plugins/theme_editor_plugin.cpp @@ -1887,7 +1887,7 @@ ThemeItemEditorDialog::ThemeItemEditorDialog(ThemeTypeEditor *p_theme_type_edito theme_type_editor = p_theme_type_editor; tc = memnew(TabContainer); - tc->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tc->set_tab_alignment(TabBar::ALIGNMENT_LEFT); add_child(tc); // Edit Items tab. diff --git a/editor/plugins/tiles/tile_map_editor.cpp b/editor/plugins/tiles/tile_map_editor.cpp index 8e3aa4f59d..4a0fc0b29f 100644 --- a/editor/plugins/tiles/tile_map_editor.cpp +++ b/editor/plugins/tiles/tile_map_editor.cpp @@ -4024,7 +4024,7 @@ TileMapEditor::TileMapEditor() { // Layer selector. layers_selection_popup = memnew(PopupMenu); layers_selection_popup->connect("id_pressed", callable_mp(this, &TileMapEditor::_layers_selection_id_pressed)); - layers_selection_popup->set_close_on_parent_focus(false); + layers_selection_popup->set_flag(Window::FLAG_POPUP, false); layers_selection_button = memnew(Button); layers_selection_button->set_toggle_mode(true); diff --git a/editor/plugins/tiles/tile_set_atlas_source_editor.cpp b/editor/plugins/tiles/tile_set_atlas_source_editor.cpp index ade591cde6..0c78a0f1c0 100644 --- a/editor/plugins/tiles/tile_set_atlas_source_editor.cpp +++ b/editor/plugins/tiles/tile_set_atlas_source_editor.cpp @@ -108,7 +108,7 @@ void TileSetAtlasSourceEditor::TileSetAtlasSourceProxyObject::_bind_methods() { ClassDB::bind_method(D_METHOD("set_id", "id"), &TileSetAtlasSourceEditor::TileSetAtlasSourceProxyObject::set_id); ClassDB::bind_method(D_METHOD("get_id"), &TileSetAtlasSourceEditor::TileSetAtlasSourceProxyObject::get_id); - ADD_PROPERTY(PropertyInfo(Variant::INT, "id"), "set_id", "get_id"); + ADD_PROPERTY(PropertyInfo(Variant::INT, "id", PROPERTY_HINT_RANGE, "0," + itos(INT_MAX) + ",1"), "set_id", "get_id"); ADD_SIGNAL(MethodInfo("changed", PropertyInfo(Variant::STRING, "what"))); } diff --git a/editor/plugins/version_control_editor_plugin.cpp b/editor/plugins/version_control_editor_plugin.cpp index b1d5b348c4..443d5975cd 100644 --- a/editor/plugins/version_control_editor_plugin.cpp +++ b/editor/plugins/version_control_editor_plugin.cpp @@ -329,7 +329,7 @@ void VersionControlEditorPlugin::register_editor() { if (!EditorVCSInterface::get_singleton()) { EditorNode::get_singleton()->add_control_to_dock(EditorNode::DOCK_SLOT_RIGHT_UL, version_commit_dock); TabContainer *dock_vbc = (TabContainer *)version_commit_dock->get_parent_control(); - dock_vbc->set_tab_title(version_commit_dock->get_index(), TTR("Commit")); + dock_vbc->set_tab_title(dock_vbc->get_tab_idx_from_control(version_commit_dock), TTR("Commit")); Button *vc = EditorNode::get_singleton()->add_bottom_panel_item(TTR("Version Control"), version_control_dock); set_version_control_tool_button(vc); diff --git a/editor/plugins/visual_shader_editor_plugin.cpp b/editor/plugins/visual_shader_editor_plugin.cpp index 446ad12104..a821faf6b3 100644 --- a/editor/plugins/visual_shader_editor_plugin.cpp +++ b/editor/plugins/visual_shader_editor_plugin.cpp @@ -4458,8 +4458,8 @@ VisualShaderEditor::VisualShaderEditor() { add_options.push_back(AddOption("ColorOp", "Color", "Common", "VisualShaderNodeColorOp", TTR("Color operator."), {}, VisualShaderNode::PORT_TYPE_VECTOR_3D)); add_options.push_back(AddOption("Grayscale", "Color", "Functions", "VisualShaderNodeColorFunc", TTR("Grayscale function."), { VisualShaderNodeColorFunc::FUNC_GRAYSCALE }, VisualShaderNode::PORT_TYPE_VECTOR_3D)); - add_options.push_back(AddOption("HSV2RGB", "Color", "Functions", "VisualShaderNodeVectorFunc", TTR("Converts HSV vector to RGB equivalent."), { VisualShaderNodeVectorFunc::FUNC_HSV2RGB }, VisualShaderNode::PORT_TYPE_VECTOR_3D)); - add_options.push_back(AddOption("RGB2HSV", "Color", "Functions", "VisualShaderNodeVectorFunc", TTR("Converts RGB vector to HSV equivalent."), { VisualShaderNodeVectorFunc::FUNC_RGB2HSV }, VisualShaderNode::PORT_TYPE_VECTOR_3D)); + add_options.push_back(AddOption("HSV2RGB", "Color", "Functions", "VisualShaderNodeVectorFunc", TTR("Converts HSV vector to RGB equivalent."), { VisualShaderNodeVectorFunc::FUNC_HSV2RGB, VisualShaderNodeVectorFunc::OP_TYPE_VECTOR_3D }, VisualShaderNode::PORT_TYPE_VECTOR_3D)); + add_options.push_back(AddOption("RGB2HSV", "Color", "Functions", "VisualShaderNodeVectorFunc", TTR("Converts RGB vector to HSV equivalent."), { VisualShaderNodeVectorFunc::FUNC_RGB2HSV, VisualShaderNodeVectorFunc::OP_TYPE_VECTOR_3D }, VisualShaderNode::PORT_TYPE_VECTOR_3D)); add_options.push_back(AddOption("Sepia", "Color", "Functions", "VisualShaderNodeColorFunc", TTR("Sepia function."), { VisualShaderNodeColorFunc::FUNC_SEPIA }, VisualShaderNode::PORT_TYPE_VECTOR_3D)); add_options.push_back(AddOption("Burn", "Color", "Operators", "VisualShaderNodeColorOp", TTR("Burn operator."), { VisualShaderNodeColorOp::OP_BURN }, VisualShaderNode::PORT_TYPE_VECTOR_3D)); diff --git a/editor/project_export.cpp b/editor/project_export.cpp index 55a4dc2c67..1a111bcbfc 100644 --- a/editor/project_export.cpp +++ b/editor/project_export.cpp @@ -1055,7 +1055,7 @@ ProjectExportDialog::ProjectExportDialog() { // Subsections. sections = memnew(TabContainer); - sections->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + sections->set_tab_alignment(TabBar::ALIGNMENT_LEFT); sections->set_use_hidden_tabs_for_min_size(true); settings_vb->add_child(sections); sections->set_v_size_flags(Control::SIZE_EXPAND_FILL); diff --git a/editor/project_manager.cpp b/editor/project_manager.cpp index 87d008d144..79aed36eeb 100644 --- a/editor/project_manager.cpp +++ b/editor/project_manager.cpp @@ -2566,7 +2566,7 @@ ProjectManager::ProjectManager() { tabs = memnew(TabContainer); center_box->add_child(tabs); tabs->set_anchors_and_offsets_preset(Control::PRESET_WIDE); - tabs->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tabs->set_tab_alignment(TabBar::ALIGNMENT_LEFT); tabs->connect("tab_changed", callable_mp(this, &ProjectManager::_on_tab_changed)); HBoxContainer *projects_hb = memnew(HBoxContainer); diff --git a/editor/project_settings_editor.cpp b/editor/project_settings_editor.cpp index 03179733d5..4d708dc7e0 100644 --- a/editor/project_settings_editor.cpp +++ b/editor/project_settings_editor.cpp @@ -559,7 +559,7 @@ ProjectSettingsEditor::ProjectSettingsEditor(EditorData *p_data) { data = p_data; tab_container = memnew(TabContainer); - tab_container->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tab_container->set_tab_alignment(TabBar::ALIGNMENT_LEFT); tab_container->set_use_hidden_tabs_for_min_size(true); add_child(tab_container); diff --git a/editor/rename_dialog.cpp b/editor/rename_dialog.cpp index 46751058d0..93c5b9ad4c 100644 --- a/editor/rename_dialog.cpp +++ b/editor/rename_dialog.cpp @@ -114,7 +114,7 @@ RenameDialog::RenameDialog(SceneTreeEditor *p_scene_tree_editor, UndoRedo *p_und vbc->add_child(cbut_collapse_features); tabc_features = memnew(TabContainer); - tabc_features->set_tab_alignment(TabContainer::ALIGNMENT_LEFT); + tabc_features->set_tab_alignment(TabBar::ALIGNMENT_LEFT); tabc_features->set_use_hidden_tabs_for_min_size(true); vbc->add_child(tabc_features); diff --git a/main/main.cpp b/main/main.cpp index 246a26025c..da79020bac 100644 --- a/main/main.cpp +++ b/main/main.cpp @@ -425,6 +425,7 @@ Error Main::test_setup() { ResourceLoader::load_path_remaps(); register_scene_types(); + register_driver_types(); #ifdef TOOLS_ENABLED ClassDB::set_current_api(ClassDB::API_EDITOR); @@ -435,7 +436,6 @@ Error Main::test_setup() { register_platform_apis(); register_module_types(); - register_driver_types(); // Theme needs modules to be initialized so that sub-resources can be loaded. initialize_theme(); @@ -458,13 +458,13 @@ void Main::test_cleanup() { ResourceLoader::remove_custom_loaders(); ResourceSaver::remove_custom_savers(); - unregister_driver_types(); #ifdef TOOLS_ENABLED EditorNode::unregister_editor_types(); #endif unregister_module_types(); unregister_platform_apis(); + unregister_driver_types(); unregister_scene_types(); unregister_server_types(); @@ -1890,6 +1890,10 @@ Error Main::setup2(Thread::ID p_main_tid_override) { register_scene_types(); + MAIN_PRINT("Main: Load Driver Types"); + + register_driver_types(); + #ifdef TOOLS_ENABLED ClassDB::set_current_api(ClassDB::API_EDITOR); EditorNode::register_editor_types(); @@ -1925,14 +1929,12 @@ Error Main::setup2(Thread::ID p_main_tid_override) { camera_server = CameraServer::create(); - MAIN_PRINT("Main: Load Physics, Drivers, Scripts"); + MAIN_PRINT("Main: Load Physics"); initialize_physics(); initialize_navigation_server(); register_server_singletons(); - register_driver_types(); - // This loads global classes, so it must happen before custom loaders and savers are registered ScriptServer::init_languages(); @@ -2816,8 +2818,6 @@ void Main::cleanup(bool p_force) { xr_server->set_primary_interface(Ref<XRInterface>()); } - unregister_driver_types(); - #ifdef TOOLS_ENABLED EditorNode::unregister_editor_types(); #endif @@ -2826,6 +2826,7 @@ void Main::cleanup(bool p_force) { unregister_module_types(); unregister_platform_apis(); + unregister_driver_types(); unregister_scene_types(); unregister_server_types(); diff --git a/misc/dist/ios_xcode/godot_ios.xcodeproj/project.pbxproj b/misc/dist/ios_xcode/godot_ios.xcodeproj/project.pbxproj index e2505de3bf..69899cbe8d 100644 --- a/misc/dist/ios_xcode/godot_ios.xcodeproj/project.pbxproj +++ b/misc/dist/ios_xcode/godot_ios.xcodeproj/project.pbxproj @@ -45,6 +45,7 @@ D0BCFE3418AEBDA2004A7AAE /* $binary.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = "$binary.app"; sourceTree = BUILT_PRODUCTS_DIR; }; D0BCFE4318AEBDA2004A7AAE /* $binary-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "$binary-Info.plist"; sourceTree = "<group>"; }; D0BCFE4518AEBDA2004A7AAE /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = "<group>"; }; + $pbx_locale_file_reference D0BCFE7718AEBFEB004A7AAE /* $binary.pck */ = {isa = PBXFileReference; lastKnownFileType = file; path = "$binary.pck"; sourceTree = "<group>"; }; $pbx_launch_screen_file_reference /* End PBXFileReference section */ @@ -207,6 +208,7 @@ isa = PBXVariantGroup; children = ( D0BCFE4518AEBDA2004A7AAE /* en */, + $pbx_locale_build_reference ); name = InfoPlist.strings; sourceTree = "<group>"; diff --git a/misc/dist/osx_template.app/Contents/Info.plist b/misc/dist/osx_template.app/Contents/Info.plist index a087550290..43399ec6ce 100644 --- a/misc/dist/osx_template.app/Contents/Info.plist +++ b/misc/dist/osx_template.app/Contents/Info.plist @@ -8,6 +8,8 @@ <string>$binary</string> <key>CFBundleName</key> <string>$name</string> + <key>CFBundleDisplayName</key> + <string>$name</string> <key>CFBundleGetInfoString</key> <string>$info</string> <key>CFBundleIconFile</key> diff --git a/modules/bullet/shape_bullet.cpp b/modules/bullet/shape_bullet.cpp index 77a583ad86..cf6bcb6c85 100644 --- a/modules/bullet/shape_bullet.cpp +++ b/modules/bullet/shape_bullet.cpp @@ -422,7 +422,7 @@ void ConcavePolygonShapeBullet::setup(Vector<Vector3> p_faces) { meshShape = bulletnew(btBvhTriangleMeshShape(shapeInterface, useQuantizedAabbCompression)); - if (GLOBAL_DEF("physics/3d/smooth_trimesh_collision", false)) { + if (GLOBAL_GET("physics/3d/smooth_trimesh_collision")) { btTriangleInfoMap *triangleInfoMap = new btTriangleInfoMap(); btGenerateInternalEdgeInfo(meshShape, triangleInfoMap); } diff --git a/modules/gdscript/doc_classes/@GDScript.xml b/modules/gdscript/doc_classes/@GDScript.xml index 4d6320d8c3..d9fab01dce 100644 --- a/modules/gdscript/doc_classes/@GDScript.xml +++ b/modules/gdscript/doc_classes/@GDScript.xml @@ -186,6 +186,7 @@ <description> Returns an array with the given range. Range can be 1 argument [code]N[/code] (0 to [code]N[/code] - 1), two arguments ([code]initial[/code], [code]final - 1[/code]) or three arguments ([code]initial[/code], [code]final - 1[/code], [code]increment[/code]). Returns an empty array if the range isn't valid (e.g. [code]range(2, 5, -1)[/code] or [code]range(5, 5, 1)[/code]). Returns an array with the given range. [code]range()[/code] can have 1 argument N ([code]0[/code] to [code]N - 1[/code]), two arguments ([code]initial[/code], [code]final - 1[/code]) or three arguments ([code]initial[/code], [code]final - 1[/code], [code]increment[/code]). [code]increment[/code] can be negative. If [code]increment[/code] is negative, [code]final - 1[/code] will become [code]final + 1[/code]. Also, the initial value must be greater than the final value for the loop to run. + [code]range()(/code] converts all arguments to [int] before processing. [codeblock] print(range(4)) print(range(2, 5)) @@ -211,6 +212,17 @@ 6 3 [/codeblock] + To iterate over [float], convert them in the loop. + [codeblock] + for i in range (3, 0, -1): + print(i / 10.0) + [/codeblock] + Output: + [codeblock] + 0.3 + 0.2 + 0.1 + [/codeblock] </description> </method> <method name="str" qualifiers="vararg"> diff --git a/modules/gdscript/gdscript.cpp b/modules/gdscript/gdscript.cpp index 58a788e255..8bf5fd1eda 100644 --- a/modules/gdscript/gdscript.cpp +++ b/modules/gdscript/gdscript.cpp @@ -796,7 +796,7 @@ void GDScript::_set_subclass_path(Ref<GDScript> &p_sc, const String &p_path) { String GDScript::_get_debug_path() const { if (is_built_in() && !get_name().is_empty()) { - return get_name() + " (" + get_path().get_slice("::", 0) + ")"; + return get_name() + " (" + get_path() + ")"; } else { return get_path(); } diff --git a/modules/gdscript/gdscript_analyzer.cpp b/modules/gdscript/gdscript_analyzer.cpp index 9a79f3d016..204dde4d6a 100644 --- a/modules/gdscript/gdscript_analyzer.cpp +++ b/modules/gdscript/gdscript_analyzer.cpp @@ -1251,7 +1251,7 @@ void GDScriptAnalyzer::resolve_for(GDScriptParser::ForNode *p_for) { bool list_resolved = false; // Optimize constant range() call to not allocate an array. - // Use int, Vector2, Vector3 instead, which also can be used as range iterators. + // Use int, Vector2i, Vector3i instead, which also can be used as range iterators. if (p_for->list && p_for->list->type == GDScriptParser::Node::CALL) { GDScriptParser::CallNode *call = static_cast<GDScriptParser::CallNode *>(p_for->list); GDScriptParser::Node::Type callee_type = call->get_callee_type(); diff --git a/modules/gltf/gltf_document.cpp b/modules/gltf/gltf_document.cpp index 2c42879bd3..c70081a620 100644 --- a/modules/gltf/gltf_document.cpp +++ b/modules/gltf/gltf_document.cpp @@ -6470,8 +6470,8 @@ void GLTFDocument::_convert_animation(Ref<GLTFState> state, AnimationPlayer *ap, gltf_animation->get_tracks().insert(transform_track_i.key, track); } } - } else if (String(orig_track_path).contains(":blend_shapes/")) { - const Vector<String> node_suffix = String(orig_track_path).split(":blend_shapes/"); + } else if (String(orig_track_path).contains(":") && animation->track_get_type(track_i) == Animation::TYPE_BLEND_SHAPE) { + const Vector<String> node_suffix = String(orig_track_path).split(":"); const NodePath path = node_suffix[0]; const String suffix = node_suffix[1]; Node *node = ap->get_parent()->get_node_or_null(path); diff --git a/modules/gridmap/doc_classes/GridMap.xml b/modules/gridmap/doc_classes/GridMap.xml index 049d372671..407ce961c8 100644 --- a/modules/gridmap/doc_classes/GridMap.xml +++ b/modules/gridmap/doc_classes/GridMap.xml @@ -117,15 +117,6 @@ Optionally, the item's orientation can be passed. For valid orientation values, see [method Basis.get_orthogonal_index]. </description> </method> - <method name="set_clip"> - <return type="void" /> - <argument index="0" name="enabled" type="bool" /> - <argument index="1" name="clipabove" type="bool" default="true" /> - <argument index="2" name="floor" type="int" default="0" /> - <argument index="3" name="axis" type="int" enum="Vector3.Axis" default="0" /> - <description> - </description> - </method> <method name="set_collision_layer_value"> <return type="void" /> <argument index="0" name="layer_number" type="int" /> diff --git a/modules/gridmap/grid_map.cpp b/modules/gridmap/grid_map.cpp index 7c4d33ff17..02fe4d93de 100644 --- a/modules/gridmap/grid_map.cpp +++ b/modules/gridmap/grid_map.cpp @@ -874,8 +874,6 @@ void GridMap::_bind_methods() { ClassDB::bind_method(D_METHOD("set_center_z", "enable"), &GridMap::set_center_z); ClassDB::bind_method(D_METHOD("get_center_z"), &GridMap::get_center_z); - ClassDB::bind_method(D_METHOD("set_clip", "enabled", "clipabove", "floor", "axis"), &GridMap::set_clip, DEFVAL(true), DEFVAL(0), DEFVAL(Vector3::AXIS_X)); - ClassDB::bind_method(D_METHOD("clear"), &GridMap::clear); ClassDB::bind_method(D_METHOD("get_used_cells"), &GridMap::get_used_cells); @@ -909,28 +907,6 @@ void GridMap::_bind_methods() { ADD_SIGNAL(MethodInfo("cell_size_changed", PropertyInfo(Variant::VECTOR3, "cell_size"))); } -void GridMap::set_clip(bool p_enabled, bool p_clip_above, int p_floor, Vector3::Axis p_axis) { - if (!p_enabled && !clip) { - return; - } - if (clip && p_enabled && clip_floor == p_floor && p_clip_above == clip_above && p_axis == clip_axis) { - return; - } - - clip = p_enabled; - clip_floor = p_floor; - clip_axis = p_axis; - clip_above = p_clip_above; - - //make it all update - for (KeyValue<OctantKey, Octant *> &E : octant_map) { - Octant *g = E.value; - g->dirty = true; - } - awaiting_update = true; - _update_octants_callback(); -} - void GridMap::set_cell_scale(float p_scale) { cell_scale = p_scale; _recreate_octant_data(); diff --git a/modules/gridmap/grid_map.h b/modules/gridmap/grid_map.h index 83d5af1324..b09cabfe25 100644 --- a/modules/gridmap/grid_map.h +++ b/modules/gridmap/grid_map.h @@ -150,14 +150,8 @@ class GridMap : public Node3D { bool center_z = true; float cell_scale = 1.0; - bool clip = false; - bool clip_above = true; - int clip_floor = 0; - bool recreating_octants = false; - Vector3::Axis clip_axis = Vector3::AXIS_Z; - Ref<MeshLibrary> mesh_library; Map<OctantKey, Octant *> octant_map; @@ -260,8 +254,6 @@ public: Vector3i world_to_map(const Vector3 &p_world_position) const; Vector3 map_to_world(const Vector3i &p_map_position) const; - void set_clip(bool p_enabled, bool p_clip_above = true, int p_floor = 0, Vector3::Axis p_axis = Vector3::AXIS_X); - void set_cell_scale(float p_scale); float get_cell_scale() const; diff --git a/modules/gridmap/grid_map_editor_plugin.cpp b/modules/gridmap/grid_map_editor_plugin.cpp index 80856d37c2..a05905cbc3 100644 --- a/modules/gridmap/grid_map_editor_plugin.cpp +++ b/modules/gridmap/grid_map_editor_plugin.cpp @@ -63,17 +63,6 @@ void GridMapEditor::_menu_option(int p_option) { floor->set_value(floor->get_value() + 1); } break; - case MENU_OPTION_CLIP_DISABLED: - case MENU_OPTION_CLIP_ABOVE: - case MENU_OPTION_CLIP_BELOW: { - clip_mode = ClipMode(p_option - MENU_OPTION_CLIP_DISABLED); - for (int i = 0; i < 3; i++) { - int index = options->get_popup()->get_item_index(MENU_OPTION_CLIP_DISABLED + i); - options->get_popup()->set_item_checked(index, i == clip_mode); - } - - _update_clip(); - } break; case MENU_OPTION_X_AXIS: case MENU_OPTION_Y_AXIS: case MENU_OPTION_Z_AXIS: { @@ -98,7 +87,6 @@ void GridMapEditor::_menu_option(int p_option) { } edit_axis = Vector3::Axis(new_axis); update_grid(); - _update_clip(); } break; case MENU_OPTION_CURSOR_ROTATE_Y: { @@ -943,24 +931,12 @@ void GridMapEditor::edit(GridMap *p_gridmap) { set_process(true); - clip_mode = p_gridmap->has_meta("_editor_clip_") ? ClipMode(p_gridmap->get_meta("_editor_clip_").operator int()) : CLIP_DISABLED; - _draw_grids(node->get_cell_size()); update_grid(); - _update_clip(); node->connect("cell_size_changed", callable_mp(this, &GridMapEditor::_draw_grids)); } -void GridMapEditor::_update_clip() { - node->set_meta("_editor_clip_", clip_mode); - if (clip_mode == CLIP_DISABLED) { - node->set_clip(false); - } else { - node->set_clip(true, clip_mode == CLIP_ABOVE, edit_floor[edit_axis], edit_axis); - } -} - void GridMapEditor::update_grid() { grid_xform.origin.x -= 1; // Force update in hackish way. @@ -1147,7 +1123,6 @@ void GridMapEditor::_floor_changed(float p_value) { edit_floor[edit_axis] = p_value; node->set_meta("_editor_floor_", Vector3(edit_floor[0], edit_floor[1], edit_floor[2])); update_grid(); - _update_clip(); _update_selection_transform(); } @@ -1198,11 +1173,6 @@ GridMapEditor::GridMapEditor() { options->get_popup()->add_item(TTR("Previous Floor"), MENU_OPTION_PREV_LEVEL, Key::Q); options->get_popup()->add_item(TTR("Next Floor"), MENU_OPTION_NEXT_LEVEL, Key::E); options->get_popup()->add_separator(); - options->get_popup()->add_radio_check_item(TTR("Clip Disabled"), MENU_OPTION_CLIP_DISABLED); - options->get_popup()->set_item_checked(options->get_popup()->get_item_index(MENU_OPTION_CLIP_DISABLED), true); - options->get_popup()->add_radio_check_item(TTR("Clip Above"), MENU_OPTION_CLIP_ABOVE); - options->get_popup()->add_radio_check_item(TTR("Clip Below"), MENU_OPTION_CLIP_BELOW); - options->get_popup()->add_separator(); options->get_popup()->add_radio_check_item(TTR("Edit X Axis"), MENU_OPTION_X_AXIS, Key::Z); options->get_popup()->add_radio_check_item(TTR("Edit Y Axis"), MENU_OPTION_Y_AXIS, Key::X); options->get_popup()->add_radio_check_item(TTR("Edit Z Axis"), MENU_OPTION_Z_AXIS, Key::C); diff --git a/modules/gridmap/grid_map_editor_plugin.h b/modules/gridmap/grid_map_editor_plugin.h index a25f14becd..c44c4ca7e0 100644 --- a/modules/gridmap/grid_map_editor_plugin.h +++ b/modules/gridmap/grid_map_editor_plugin.h @@ -55,12 +55,6 @@ class GridMapEditor : public VBoxContainer { INPUT_PASTE, }; - enum ClipMode { - CLIP_DISABLED, - CLIP_ABOVE, - CLIP_BELOW - }; - enum DisplayMode { DISPLAY_THUMBNAIL, DISPLAY_LIST @@ -94,7 +88,6 @@ class GridMapEditor : public VBoxContainer { GridMap *node = nullptr; MeshLibrary *last_mesh_library; - ClipMode clip_mode = CLIP_DISABLED; Transform3D grid_xform; Transform3D edit_grid_xform; @@ -159,9 +152,6 @@ class GridMapEditor : public VBoxContainer { MENU_OPTION_NEXT_LEVEL, MENU_OPTION_PREV_LEVEL, MENU_OPTION_LOCK_VIEW, - MENU_OPTION_CLIP_DISABLED, - MENU_OPTION_CLIP_ABOVE, - MENU_OPTION_CLIP_BELOW, MENU_OPTION_X_AXIS, MENU_OPTION_Y_AXIS, MENU_OPTION_Z_AXIS, @@ -200,7 +190,6 @@ class GridMapEditor : public VBoxContainer { void _item_selected_cbk(int idx); void _update_cursor_transform(); void _update_cursor_instance(); - void _update_clip(); void _update_theme(); void _text_changed(const String &p_text); diff --git a/modules/mono/editor/bindings_generator.cpp b/modules/mono/editor/bindings_generator.cpp index 07128770b7..272283432d 100644 --- a/modules/mono/editor/bindings_generator.cpp +++ b/modules/mono/editor/bindings_generator.cpp @@ -278,12 +278,12 @@ String BindingsGenerator::bbcode_to_xml(const String &p_bbcode, const TypeInterf } else if (code_tag) { xml_output.append("["); pos = brk_pos + 1; - } else if (tag.begins_with("method ") || tag.begins_with("member ") || tag.begins_with("signal ") || tag.begins_with("enum ") || tag.begins_with("constant ")) { + } else if (tag.begins_with("method ") || tag.begins_with("member ") || tag.begins_with("signal ") || tag.begins_with("enum ") || tag.begins_with("constant ") || tag.begins_with("theme_item ")) { const int tag_end = tag.find(" "); const String link_tag = tag.substr(0, tag_end); const String link_target = tag.substr(tag_end + 1, tag.length()).lstrip(" "); - Vector<String> link_target_parts = link_target.split("."); + const Vector<String> link_target_parts = link_target.split("."); if (link_target_parts.size() <= 0 || link_target_parts.size() > 2) { ERR_PRINT("Invalid reference format: '" + tag + "'."); @@ -311,201 +311,18 @@ String BindingsGenerator::bbcode_to_xml(const String &p_bbcode, const TypeInterf } if (link_tag == "method") { - if (!target_itype || !target_itype->is_object_type) { - if (OS::get_singleton()->is_stdout_verbose()) { - if (target_itype) { - OS::get_singleton()->print("Cannot resolve method reference for non-Godot.Object type in documentation: %s\n", link_target.utf8().get_data()); - } else { - OS::get_singleton()->print("Cannot resolve type from method reference in documentation: %s\n", link_target.utf8().get_data()); - } - } - - // TODO Map what we can - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } else { - const MethodInterface *target_imethod = target_itype->find_method_by_name(target_cname); - - if (target_imethod) { - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); - xml_output.append(target_itype->proxy_name); - xml_output.append("."); - xml_output.append(target_imethod->proxy_name); - xml_output.append("\"/>"); - } - } + _append_xml_method(xml_output, target_itype, target_cname, link_target, link_target_parts); } else if (link_tag == "member") { - if (!target_itype || !target_itype->is_object_type) { - if (OS::get_singleton()->is_stdout_verbose()) { - if (target_itype) { - OS::get_singleton()->print("Cannot resolve member reference for non-Godot.Object type in documentation: %s\n", link_target.utf8().get_data()); - } else { - OS::get_singleton()->print("Cannot resolve type from member reference in documentation: %s\n", link_target.utf8().get_data()); - } - } - - // TODO Map what we can - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } else { - const PropertyInterface *target_iprop = target_itype->find_property_by_name(target_cname); - - if (target_iprop) { - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); - xml_output.append(target_itype->proxy_name); - xml_output.append("."); - xml_output.append(target_iprop->proxy_name); - xml_output.append("\"/>"); - } - } + _append_xml_member(xml_output, target_itype, target_cname, link_target, link_target_parts); } else if (link_tag == "signal") { - if (!target_itype || !target_itype->is_object_type) { - if (OS::get_singleton()->is_stdout_verbose()) { - if (target_itype) { - OS::get_singleton()->print("Cannot resolve signal reference for non-Godot.Object type in documentation: %s\n", link_target.utf8().get_data()); - } else { - OS::get_singleton()->print("Cannot resolve type from signal reference in documentation: %s\n", link_target.utf8().get_data()); - } - } - - // TODO Map what we can - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } else { - const SignalInterface *target_isignal = target_itype->find_signal_by_name(target_cname); - - if (target_isignal) { - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); - xml_output.append(target_itype->proxy_name); - xml_output.append("."); - xml_output.append(target_isignal->proxy_name); - xml_output.append("\"/>"); - } else { - ERR_PRINT("Cannot resolve signal reference in documentation: '" + link_target + "'."); - - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } - } + _append_xml_signal(xml_output, target_itype, target_cname, link_target, link_target_parts); } else if (link_tag == "enum") { - const StringName search_cname = !target_itype ? target_cname : StringName(target_itype->name + "." + (String)target_cname); - - const Map<StringName, TypeInterface>::Element *enum_match = enum_types.find(search_cname); - - if (!enum_match && search_cname != target_cname) { - enum_match = enum_types.find(target_cname); - } - - if (enum_match) { - const TypeInterface &target_enum_itype = enum_match->value(); - - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); - xml_output.append(target_enum_itype.proxy_name); // Includes nesting class if any - xml_output.append("\"/>"); - } else { - ERR_PRINT("Cannot resolve enum reference in documentation: '" + link_target + "'."); - - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } + _append_xml_enum(xml_output, target_itype, target_cname, link_target, link_target_parts); } else if (link_tag == "constant") { - if (!target_itype || !target_itype->is_object_type) { - if (OS::get_singleton()->is_stdout_verbose()) { - if (target_itype) { - OS::get_singleton()->print("Cannot resolve constant reference for non-Godot.Object type in documentation: %s\n", link_target.utf8().get_data()); - } else { - OS::get_singleton()->print("Cannot resolve type from constant reference in documentation: %s\n", link_target.utf8().get_data()); - } - } - - // TODO Map what we can - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } else if (!target_itype && target_cname == name_cache.type_at_GlobalScope) { - const String target_name = (String)target_cname; - - // Try to find as a global constant - const ConstantInterface *target_iconst = find_constant_by_name(target_name, global_constants); - - if (target_iconst) { - // Found global constant - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "." BINDINGS_GLOBAL_SCOPE_CLASS "."); - xml_output.append(target_iconst->proxy_name); - xml_output.append("\"/>"); - } else { - // Try to find as global enum constant - const EnumInterface *target_ienum = nullptr; - - for (const EnumInterface &ienum : global_enums) { - target_ienum = &ienum; - target_iconst = find_constant_by_name(target_name, target_ienum->constants); - if (target_iconst) { - break; - } - } - - if (target_iconst) { - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); - xml_output.append(target_ienum->cname); - xml_output.append("."); - xml_output.append(target_iconst->proxy_name); - xml_output.append("\"/>"); - } else { - ERR_PRINT("Cannot resolve global constant reference in documentation: '" + link_target + "'."); - - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } - } - } else { - const String target_name = (String)target_cname; - - // Try to find the constant in the current class - const ConstantInterface *target_iconst = find_constant_by_name(target_name, target_itype->constants); - - if (target_iconst) { - // Found constant in current class - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); - xml_output.append(target_itype->proxy_name); - xml_output.append("."); - xml_output.append(target_iconst->proxy_name); - xml_output.append("\"/>"); - } else { - // Try to find as enum constant in the current class - const EnumInterface *target_ienum = nullptr; - - for (const EnumInterface &ienum : target_itype->enums) { - target_ienum = &ienum; - target_iconst = find_constant_by_name(target_name, target_ienum->constants); - if (target_iconst) { - break; - } - } - - if (target_iconst) { - xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); - xml_output.append(target_itype->proxy_name); - xml_output.append("."); - xml_output.append(target_ienum->cname); - xml_output.append("."); - xml_output.append(target_iconst->proxy_name); - xml_output.append("\"/>"); - } else { - ERR_PRINT("Cannot resolve constant reference in documentation: '" + link_target + "'."); - - xml_output.append("<c>"); - xml_output.append(link_target); - xml_output.append("</c>"); - } - } - } + _append_xml_constant(xml_output, target_itype, target_cname, link_target, link_target_parts); + } else if (link_tag == "theme_item") { + // We do not declare theme_items in any way in C#, so there is nothing to reference + _append_xml_undeclared(xml_output, link_target); } pos = brk_end + 1; @@ -670,6 +487,240 @@ String BindingsGenerator::bbcode_to_xml(const String &p_bbcode, const TypeInterf return xml_output.as_string(); } +void BindingsGenerator::_append_xml_method(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts) { + if (p_link_target_parts[0] == name_cache.type_at_GlobalScope) { + if (OS::get_singleton()->is_stdout_verbose()) { + OS::get_singleton()->print("Cannot resolve @GlobalScope method reference in documentation: %s\n", p_link_target.utf8().get_data()); + } + + // TODO Map what we can + _append_xml_undeclared(p_xml_output, p_link_target); + } else if (!p_target_itype || !p_target_itype->is_object_type) { + if (OS::get_singleton()->is_stdout_verbose()) { + if (p_target_itype) { + OS::get_singleton()->print("Cannot resolve method reference for non-Godot.Object type in documentation: %s\n", p_link_target.utf8().get_data()); + } else { + OS::get_singleton()->print("Cannot resolve type from method reference in documentation: %s\n", p_link_target.utf8().get_data()); + } + } + + // TODO Map what we can + _append_xml_undeclared(p_xml_output, p_link_target); + } else { + if (p_target_cname == "_init") { + // The _init method is not declared in C#, reference the constructor instead + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(p_target_itype->proxy_name); + p_xml_output.append("."); + p_xml_output.append(p_target_itype->proxy_name); + p_xml_output.append("()\"/>"); + } else { + const MethodInterface *target_imethod = p_target_itype->find_method_by_name(p_target_cname); + + if (target_imethod) { + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(p_target_itype->proxy_name); + p_xml_output.append("."); + p_xml_output.append(target_imethod->proxy_name); + p_xml_output.append("\"/>"); + } else { + ERR_PRINT("Cannot resolve method reference in documentation: '" + p_link_target + "'."); + _append_xml_undeclared(p_xml_output, p_link_target); + } + } + } +} + +void BindingsGenerator::_append_xml_member(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts) { + if (p_link_target.find("/") >= 0) { + // Properties with '/' (slash) in the name are not declared in C#, so there is nothing to reference. + _append_xml_undeclared(p_xml_output, p_link_target); + } else if (!p_target_itype || !p_target_itype->is_object_type) { + if (OS::get_singleton()->is_stdout_verbose()) { + if (p_target_itype) { + OS::get_singleton()->print("Cannot resolve member reference for non-Godot.Object type in documentation: %s\n", p_link_target.utf8().get_data()); + } else { + OS::get_singleton()->print("Cannot resolve type from member reference in documentation: %s\n", p_link_target.utf8().get_data()); + } + } + + // TODO Map what we can + _append_xml_undeclared(p_xml_output, p_link_target); + } else { + const TypeInterface *current_itype = p_target_itype; + const PropertyInterface *target_iprop = nullptr; + + while (target_iprop == nullptr && current_itype != nullptr) { + target_iprop = current_itype->find_property_by_name(p_target_cname); + if (target_iprop == nullptr) { + current_itype = _get_type_or_null(TypeReference(current_itype->base_name)); + } + } + + if (target_iprop) { + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(current_itype->proxy_name); + p_xml_output.append("."); + p_xml_output.append(target_iprop->proxy_name); + p_xml_output.append("\"/>"); + } else { + ERR_PRINT("Cannot resolve member reference in documentation: '" + p_link_target + "'."); + _append_xml_undeclared(p_xml_output, p_link_target); + } + } +} + +void BindingsGenerator::_append_xml_signal(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts) { + if (!p_target_itype || !p_target_itype->is_object_type) { + if (OS::get_singleton()->is_stdout_verbose()) { + if (p_target_itype) { + OS::get_singleton()->print("Cannot resolve signal reference for non-Godot.Object type in documentation: %s\n", p_link_target.utf8().get_data()); + } else { + OS::get_singleton()->print("Cannot resolve type from signal reference in documentation: %s\n", p_link_target.utf8().get_data()); + } + } + + // TODO Map what we can + _append_xml_undeclared(p_xml_output, p_link_target); + } else { + const SignalInterface *target_isignal = p_target_itype->find_signal_by_name(p_target_cname); + + if (target_isignal) { + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(p_target_itype->proxy_name); + p_xml_output.append("."); + p_xml_output.append(target_isignal->proxy_name); + p_xml_output.append("\"/>"); + } else { + ERR_PRINT("Cannot resolve signal reference in documentation: '" + p_link_target + "'."); + _append_xml_undeclared(p_xml_output, p_link_target); + } + } +} + +void BindingsGenerator::_append_xml_enum(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts) { + const StringName search_cname = !p_target_itype ? p_target_cname : StringName(p_target_itype->name + "." + (String)p_target_cname); + + const Map<StringName, TypeInterface>::Element *enum_match = enum_types.find(search_cname); + + if (!enum_match && search_cname != p_target_cname) { + enum_match = enum_types.find(p_target_cname); + } + + if (enum_match) { + const TypeInterface &target_enum_itype = enum_match->value(); + + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(target_enum_itype.proxy_name); // Includes nesting class if any + p_xml_output.append("\"/>"); + } else { + ERR_PRINT("Cannot resolve enum reference in documentation: '" + p_link_target + "'."); + _append_xml_undeclared(p_xml_output, p_link_target); + } +} + +void BindingsGenerator::_append_xml_constant(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts) { + if (p_link_target_parts[0] == name_cache.type_at_GlobalScope) { + _append_xml_constant_in_global_scope(p_xml_output, p_target_cname, p_link_target); + } else if (!p_target_itype || !p_target_itype->is_object_type) { + // Search in @GlobalScope as a last resort if no class was specified + if (p_link_target_parts.size() == 1) { + _append_xml_constant_in_global_scope(p_xml_output, p_target_cname, p_link_target); + return; + } + + if (OS::get_singleton()->is_stdout_verbose()) { + if (p_target_itype) { + OS::get_singleton()->print("Cannot resolve constant reference for non-Godot.Object type in documentation: %s\n", p_link_target.utf8().get_data()); + } else { + OS::get_singleton()->print("Cannot resolve type from constant reference in documentation: %s\n", p_link_target.utf8().get_data()); + } + } + + // TODO Map what we can + _append_xml_undeclared(p_xml_output, p_link_target); + } else { + // Try to find the constant in the current class + const ConstantInterface *target_iconst = find_constant_by_name(p_target_cname, p_target_itype->constants); + + if (target_iconst) { + // Found constant in current class + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(p_target_itype->proxy_name); + p_xml_output.append("."); + p_xml_output.append(target_iconst->proxy_name); + p_xml_output.append("\"/>"); + } else { + // Try to find as enum constant in the current class + const EnumInterface *target_ienum = nullptr; + + for (const EnumInterface &ienum : p_target_itype->enums) { + target_ienum = &ienum; + target_iconst = find_constant_by_name(p_target_cname, target_ienum->constants); + if (target_iconst) { + break; + } + } + + if (target_iconst) { + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(p_target_itype->proxy_name); + p_xml_output.append("."); + p_xml_output.append(target_ienum->cname); + p_xml_output.append("."); + p_xml_output.append(target_iconst->proxy_name); + p_xml_output.append("\"/>"); + } else if (p_link_target_parts.size() == 1) { + // Also search in @GlobalScope as a last resort if no class was specified + _append_xml_constant_in_global_scope(p_xml_output, p_target_cname, p_link_target); + } else { + ERR_PRINT("Cannot resolve constant reference in documentation: '" + p_link_target + "'."); + _append_xml_undeclared(p_xml_output, p_link_target); + } + } + } +} + +void BindingsGenerator::_append_xml_constant_in_global_scope(StringBuilder &p_xml_output, const String &p_target_cname, const String &p_link_target) { + // Try to find as a global constant + const ConstantInterface *target_iconst = find_constant_by_name(p_target_cname, global_constants); + + if (target_iconst) { + // Found global constant + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "." BINDINGS_GLOBAL_SCOPE_CLASS "."); + p_xml_output.append(target_iconst->proxy_name); + p_xml_output.append("\"/>"); + } else { + // Try to find as global enum constant + const EnumInterface *target_ienum = nullptr; + + for (const EnumInterface &ienum : global_enums) { + target_ienum = &ienum; + target_iconst = find_constant_by_name(p_target_cname, target_ienum->constants); + if (target_iconst) { + break; + } + } + + if (target_iconst) { + p_xml_output.append("<see cref=\"" BINDINGS_NAMESPACE "."); + p_xml_output.append(target_ienum->cname); + p_xml_output.append("."); + p_xml_output.append(target_iconst->proxy_name); + p_xml_output.append("\"/>"); + } else { + ERR_PRINT("Cannot resolve global constant reference in documentation: '" + p_link_target + "'."); + _append_xml_undeclared(p_xml_output, p_link_target); + } + } +} + +void BindingsGenerator::_append_xml_undeclared(StringBuilder &p_xml_output, const String &p_link_target) { + p_xml_output.append("<c>"); + p_xml_output.append(p_link_target); + p_xml_output.append("</c>"); +} + int BindingsGenerator::_determine_enum_prefix(const EnumInterface &p_ienum) { CRASH_COND(p_ienum.constants.is_empty()); diff --git a/modules/mono/editor/bindings_generator.h b/modules/mono/editor/bindings_generator.h index 5460f018f0..f601ffde2b 100644 --- a/modules/mono/editor/bindings_generator.h +++ b/modules/mono/editor/bindings_generator.h @@ -658,6 +658,14 @@ class BindingsGenerator { String bbcode_to_xml(const String &p_bbcode, const TypeInterface *p_itype); + void _append_xml_method(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts); + void _append_xml_member(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts); + void _append_xml_signal(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts); + void _append_xml_enum(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts); + void _append_xml_constant(StringBuilder &p_xml_output, const TypeInterface *p_target_itype, const StringName &p_target_cname, const String &p_link_target, const Vector<String> &p_link_target_parts); + void _append_xml_constant_in_global_scope(StringBuilder &p_xml_output, const String &p_target_cname, const String &p_link_target); + void _append_xml_undeclared(StringBuilder &p_xml_output, const String &p_link_target); + int _determine_enum_prefix(const EnumInterface &p_ienum); void _apply_prefix_to_enum_constants(EnumInterface &p_ienum, int p_prefix_length); diff --git a/modules/openxr/extensions/openxr_vulkan_extension.cpp b/modules/openxr/extensions/openxr_vulkan_extension.cpp index ba790500f9..c7c840fdf3 100644 --- a/modules/openxr/extensions/openxr_vulkan_extension.cpp +++ b/modules/openxr/extensions/openxr_vulkan_extension.cpp @@ -305,7 +305,7 @@ bool OpenXRVulkanExtension::get_swapchain_image_data(XrSwapchain p_swapchain, in for (uint64_t i = 0; i < swapchain_length; i++) { images[i].type = XR_TYPE_SWAPCHAIN_IMAGE_VULKAN_KHR; images[i].next = nullptr; - images[i].image = nullptr; + images[i].image = VK_NULL_HANDLE; } result = xrEnumerateSwapchainImages(p_swapchain, swapchain_length, &swapchain_length, (XrSwapchainImageBaseHeader *)images); @@ -325,10 +325,65 @@ bool OpenXRVulkanExtension::get_swapchain_image_data(XrSwapchain p_swapchain, in *r_swapchain_graphics_data = data; data->is_multiview = (p_array_size > 1); - RenderingDevice::DataFormat format = RenderingDevice::DATA_FORMAT_R8G8B8A8_SRGB; // TODO set this based on p_swapchain_format - RenderingDevice::TextureSamples samples = RenderingDevice::TEXTURE_SAMPLES_1; // TODO set this based on p_sample_count + RenderingDevice::DataFormat format = RenderingDevice::DATA_FORMAT_R8G8B8A8_SRGB; + RenderingDevice::TextureSamples samples = RenderingDevice::TEXTURE_SAMPLES_1; uint64_t usage_flags = RenderingDevice::TEXTURE_USAGE_SAMPLING_BIT | RenderingDevice::TEXTURE_USAGE_COLOR_ATTACHMENT_BIT; + switch (p_swapchain_format) { + case VK_FORMAT_R8G8B8A8_SRGB: + // Even though this is an sRGB framebuffer format we're using UNORM here. + // The reason here is because Godot does a linear to sRGB conversion while + // with the sRGB format, this conversion would be doubled by the hardware. + // This also means we're reading the values as is for our preview on screen. + // The OpenXR runtime however is still treating this as an sRGB format and + // will thus do an sRGB -> Linear conversion as expected. + // format = RenderingDevice::DATA_FORMAT_R8G8B8A8_SRGB; + format = RenderingDevice::DATA_FORMAT_R8G8B8A8_UNORM; + break; + case VK_FORMAT_B8G8R8A8_SRGB: + // format = RenderingDevice::DATA_FORMAT_B8G8R8A8_SRGB; + format = RenderingDevice::DATA_FORMAT_B8G8R8A8_UNORM; + break; + case VK_FORMAT_R8G8B8A8_UINT: + format = RenderingDevice::DATA_FORMAT_R8G8B8A8_UINT; + break; + case VK_FORMAT_B8G8R8A8_UINT: + format = RenderingDevice::DATA_FORMAT_B8G8R8A8_UINT; + break; + default: + // continue with our default value + print_line("Unsupported swapchain format ", p_swapchain_format); + break; + } + + switch (p_sample_count) { + case 1: + samples = RenderingDevice::TEXTURE_SAMPLES_1; + break; + case 2: + samples = RenderingDevice::TEXTURE_SAMPLES_2; + break; + case 4: + samples = RenderingDevice::TEXTURE_SAMPLES_4; + break; + case 8: + samples = RenderingDevice::TEXTURE_SAMPLES_8; + break; + case 16: + samples = RenderingDevice::TEXTURE_SAMPLES_16; + break; + case 32: + samples = RenderingDevice::TEXTURE_SAMPLES_32; + break; + case 64: + samples = RenderingDevice::TEXTURE_SAMPLES_64; + break; + default: + // continue with our default value + print_line("Unsupported sample count ", p_sample_count); + break; + } + Vector<RID> image_rids; Vector<RID> framebuffers; diff --git a/modules/openxr/openxr_api.cpp b/modules/openxr/openxr_api.cpp index 23fd0404d5..e3da214cc8 100644 --- a/modules/openxr/openxr_api.cpp +++ b/modules/openxr/openxr_api.cpp @@ -819,7 +819,7 @@ bool OpenXRAPI::create_swapchain(int64_t p_swapchain_format, uint32_t p_width, u return false; } - if (!graphics_extension->get_swapchain_image_data(new_swapchain, p_swapchain_format, p_width, p_height, p_array_size, p_array_size, r_swapchain_graphics_data)) { + if (!graphics_extension->get_swapchain_image_data(new_swapchain, p_swapchain_format, p_width, p_height, p_sample_count, p_array_size, r_swapchain_graphics_data)) { xrDestroySwapchain(new_swapchain); return false; } diff --git a/modules/text_server_adv/SCsub b/modules/text_server_adv/SCsub index 6a06619840..5e5c284b57 100644 --- a/modules/text_server_adv/SCsub +++ b/modules/text_server_adv/SCsub @@ -49,6 +49,7 @@ if env["builtin_harfbuzz"]: "src/hb-aat-map.cc", "src/hb-blob.cc", "src/hb-buffer-serialize.cc", + "src/hb-buffer-verify.cc", "src/hb-buffer.cc", "src/hb-common.cc", #'src/hb-coretext.cc', diff --git a/modules/visual_script/editor/visual_script_editor.cpp b/modules/visual_script/editor/visual_script_editor.cpp index ff4b6a1f4b..813902b54e 100644 --- a/modules/visual_script/editor/visual_script_editor.cpp +++ b/modules/visual_script/editor/visual_script_editor.cpp @@ -1102,6 +1102,7 @@ void VisualScriptEditor::_update_members() { List<StringName> var_names; script->get_variable_list(&var_names); + var_names.sort_custom<StringName::AlphCompare>(); for (const StringName &E : var_names) { TreeItem *ti = members->create_item(variables); diff --git a/platform/iphone/export/export_plugin.cpp b/platform/iphone/export/export_plugin.cpp index 69c6df8a38..2eaf5e47ac 100644 --- a/platform/iphone/export/export_plugin.cpp +++ b/platform/iphone/export/export_plugin.cpp @@ -389,6 +389,36 @@ void EditorExportPlatformIOS::_fix_config_file(const Ref<EditorExportPreset> &p_ String value = value_format.format(value_dictionary, "$_"); strnew += lines[i].replace("$launch_screen_background_color", value) + "\n"; + } else if (lines[i].find("$pbx_locale_file_reference") != -1) { + String locale_files; + Vector<String> translations = ProjectSettings::get_singleton()->get("internationalization/locale/translations"); + if (translations.size() > 0) { + int index = 0; + for (const String &E : translations) { + Ref<Translation> tr = ResourceLoader::load(E); + if (tr.is_valid()) { + String lang = tr->get_locale(); + locale_files += "D0BCFE4518AEBDA2004A" + itos(index).pad_zeros(4) + " /* " + lang + " */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = " + lang + "; path = " + lang + ".lproj/InfoPlist.strings; sourceTree = \"<group>\"; };"; + } + index++; + } + } + strnew += lines[i].replace("$pbx_locale_file_reference", locale_files); + } else if (lines[i].find("$pbx_locale_build_reference") != -1) { + String locale_files; + Vector<String> translations = ProjectSettings::get_singleton()->get("internationalization/locale/translations"); + if (translations.size() > 0) { + int index = 0; + for (const String &E : translations) { + Ref<Translation> tr = ResourceLoader::load(E); + if (tr.is_valid()) { + String lang = tr->get_locale(); + locale_files += "D0BCFE4518AEBDA2004A" + itos(index).pad_zeros(4) + " /* " + lang + " */,"; + } + index++; + } + } + strnew += lines[i].replace("$pbx_locale_build_reference", locale_files); } else { strnew += lines[i] + "\n"; } @@ -1593,6 +1623,29 @@ Error EditorExportPlatformIOS::export_project(const Ref<EditorExportPreset> &p_p return ERR_FILE_NOT_FOUND; } + Vector<String> translations = ProjectSettings::get_singleton()->get("internationalization/locale/translations"); + if (translations.size() > 0) { + { + String fname = dest_dir + binary_name + "/en.lproj"; + tmp_app_path->make_dir_recursive(fname); + FileAccessRef f = FileAccess::open(fname + "/InfoPlist.strings", FileAccess::WRITE); + f->store_line("CFBundleDisplayName = \"" + ProjectSettings::get_singleton()->get("application/config/name").operator String() + "\";"); + } + + for (const String &E : translations) { + Ref<Translation> tr = ResourceLoader::load(E); + if (tr.is_valid()) { + String fname = dest_dir + binary_name + "/" + tr->get_locale() + ".lproj"; + tmp_app_path->make_dir_recursive(fname); + FileAccessRef f = FileAccess::open(fname + "/InfoPlist.strings", FileAccess::WRITE); + String prop = "application/config/name_" + tr->get_locale(); + if (ProjectSettings::get_singleton()->has_setting(prop)) { + f->store_line("CFBundleDisplayName = \"" + ProjectSettings::get_singleton()->get(prop).operator String() + "\";"); + } + } + } + } + // Copy project static libs to the project Vector<Ref<EditorExportPlugin>> export_plugins = EditorExport::get_singleton()->get_export_plugins(); for (int i = 0; i < export_plugins.size(); i++) { diff --git a/platform/iphone/godot_view.mm b/platform/iphone/godot_view.mm index da71312fc4..e48dd2e507 100644 --- a/platform/iphone/godot_view.mm +++ b/platform/iphone/godot_view.mm @@ -154,6 +154,8 @@ static const float earth_gravity = 9.80665; [self initTouches]; + self.multipleTouchEnabled = YES; + // Configure and start accelerometer if (!self.motionManager) { self.motionManager = [[CMMotionManager alloc] init]; diff --git a/platform/linuxbsd/display_server_x11.cpp b/platform/linuxbsd/display_server_x11.cpp index bca38d9f20..1ed4d8fb32 100644 --- a/platform/linuxbsd/display_server_x11.cpp +++ b/platform/linuxbsd/display_server_x11.cpp @@ -109,6 +109,15 @@ struct Hints { unsigned long status = 0; }; +static String get_atom_name(Display *p_disp, Atom p_atom) { + char *name = XGetAtomName(p_disp, p_atom); + ERR_FAIL_NULL_V_MSG(name, String(), "Atom is invalid."); + String ret; + ret.parse_utf8(name); + XFree(name); + return ret; +} + bool DisplayServerX11::has_feature(Feature p_feature) const { switch (p_feature) { case FEATURE_SUBWINDOWS: @@ -435,7 +444,7 @@ String DisplayServerX11::_clipboard_get_impl(Atom p_source, Window x11_window, A Window selection_owner = XGetSelectionOwner(x11_display, p_source); if (selection_owner == x11_window) { static const char *target_type = "PRIMARY"; - if (p_source != None && String(XGetAtomName(x11_display, p_source)) == target_type) { + if (p_source != None && get_atom_name(x11_display, p_source) == target_type) { return internal_clipboard_primary; } else { return internal_clipboard; @@ -1173,6 +1182,7 @@ void DisplayServerX11::show_window(WindowID p_id) { _THREAD_SAFE_METHOD_ const WindowData &wd = windows[p_id]; + popup_open(p_id); DEBUG_LOG_X11("show_window: %lu (%u) \n", wd.x11_window, p_id); @@ -1183,7 +1193,9 @@ void DisplayServerX11::delete_sub_window(WindowID p_id) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND(!windows.has(p_id)); - ERR_FAIL_COND_MSG(p_id == MAIN_WINDOW_ID, "Main window can't be deleted"); //ma + ERR_FAIL_COND_MSG(p_id == MAIN_WINDOW_ID, "Main window can't be deleted"); + + popup_close(p_id); WindowData &wd = windows[p_id]; @@ -1458,8 +1470,8 @@ void DisplayServerX11::window_set_transient(WindowID p_window, WindowID p_parent // Set focus to parent sub window to avoid losing all focus when closing a nested sub-menu. // RevertToPointerRoot is used to make sure we don't lose all focus in case // a subwindow and its parent are both destroyed. - if (wd_window.menu_type && !wd_window.no_focus && wd_window.focused) { - if (!wd_parent.no_focus) { + if (!wd_window.no_focus && !wd_window.is_popup && wd_window.focused) { + if (!wd_parent.no_focus && !wd_window.is_popup) { XSetInputFocus(x11_display, wd_parent.x11_window, RevertToPointerRoot, CurrentTime); } } @@ -2073,6 +2085,18 @@ void DisplayServerX11::window_set_flag(WindowFlags p_flag, bool p_enabled, Windo case WINDOW_FLAG_TRANSPARENT: { //todo reimplement } break; + case WINDOW_FLAG_NO_FOCUS: { + wd.no_focus = p_enabled; + } break; + case WINDOW_FLAG_POPUP: { + XWindowAttributes xwa; + XSync(x11_display, False); + XGetWindowAttributes(x11_display, wd.x11_window, &xwa); + + ERR_FAIL_COND_MSG(p_window == MAIN_WINDOW_ID, "Main window can't be popup."); + ERR_FAIL_COND_MSG((xwa.map_state == IsViewable) && (wd.is_popup != p_enabled), "Pupup flag can't changed while window is opened."); + wd.is_popup = p_enabled; + } break; default: { } } @@ -2114,6 +2138,12 @@ bool DisplayServerX11::window_get_flag(WindowFlags p_flag, WindowID p_window) co case WINDOW_FLAG_TRANSPARENT: { //todo reimplement } break; + case WINDOW_FLAG_NO_FOCUS: { + return wd.no_focus; + } break; + case WINDOW_FLAG_POPUP: { + return wd.is_popup; + } break; default: { } } @@ -2427,8 +2457,7 @@ String DisplayServerX11::keyboard_get_layout_language(int p_index) const { Atom names = kbd->names->symbols; if (names != None) { - char *name = XGetAtomName(x11_display, names); - Vector<String> info = String(name).split("+"); + Vector<String> info = get_atom_name(x11_display, names).split("+"); if (p_index >= 0 && p_index < _group_count) { if (p_index + 1 < info.size()) { ret = info[p_index + 1]; // Skip "pc" at the start and "inet"/"group" at the end of symbols. @@ -2438,7 +2467,6 @@ String DisplayServerX11::keyboard_get_layout_language(int p_index) const { } else { ERR_PRINT("Index " + itos(p_index) + "is out of bounds (" + itos(_group_count) + ")."); } - XFree(name); } XkbFreeKeyboard(kbd, 0, true); } @@ -2465,9 +2493,7 @@ String DisplayServerX11::keyboard_get_layout_name(int p_index) const { } if (p_index >= 0 && p_index < _group_count) { - char *full_name = XGetAtomName(x11_display, groups[p_index]); - ret.parse_utf8(full_name); - XFree(full_name); + ret = get_atom_name(x11_display, groups[p_index]); } else { ERR_PRINT("Index " + itos(p_index) + "is out of bounds (" + itos(_group_count) + ")."); } @@ -2530,7 +2556,7 @@ static Atom pick_target_from_list(Display *p_display, Atom *p_list, int p_count) for (int i = 0; i < p_count; i++) { Atom atom = p_list[i]; - if (atom != None && String(XGetAtomName(p_display, atom)) == target_type) { + if (atom != None && get_atom_name(p_display, atom) == target_type) { return atom; } } @@ -2539,15 +2565,15 @@ static Atom pick_target_from_list(Display *p_display, Atom *p_list, int p_count) static Atom pick_target_from_atoms(Display *p_disp, Atom p_t1, Atom p_t2, Atom p_t3) { static const char *target_type = "text/uri-list"; - if (p_t1 != None && String(XGetAtomName(p_disp, p_t1)) == target_type) { + if (p_t1 != None && get_atom_name(p_disp, p_t1) == target_type) { return p_t1; } - if (p_t2 != None && String(XGetAtomName(p_disp, p_t2)) == target_type) { + if (p_t2 != None && get_atom_name(p_disp, p_t2) == target_type) { return p_t2; } - if (p_t3 != None && String(XGetAtomName(p_disp, p_t3)) == target_type) { + if (p_t3 != None && get_atom_name(p_disp, p_t3) == target_type) { return p_t3; } @@ -2869,7 +2895,7 @@ Atom DisplayServerX11::_process_selection_request_target(Atom p_target, Window p // is the owner during a selection request. CharString clip; static const char *target_type = "PRIMARY"; - if (p_selection != None && String(XGetAtomName(x11_display, p_selection)) == target_type) { + if (p_selection != None && get_atom_name(x11_display, p_selection) == target_type) { clip = internal_clipboard_primary.utf8(); } else { clip = internal_clipboard.utf8(); @@ -3028,23 +3054,36 @@ void DisplayServerX11::_dispatch_input_event(const Ref<InputEvent> &p_event) { Variant ret; Callable::CallError ce; + { + List<WindowID>::Element *E = popup_list.front(); + if (E && Object::cast_to<InputEventKey>(*p_event)) { + // Redirect keyboard input to active popup. + if (windows.has(E->get())) { + Callable callable = windows[E->get()].input_event_callback; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); + } + } + return; + } + } + Ref<InputEventFromWindow> event_from_window = p_event; if (event_from_window.is_valid() && event_from_window->get_window_id() != INVALID_WINDOW_ID) { - //send to a window - ERR_FAIL_COND(!windows.has(event_from_window->get_window_id())); - Callable callable = windows[event_from_window->get_window_id()].input_event_callback; - if (callable.is_null()) { - return; + // Send to a single window. + if (windows.has(event_from_window->get_window_id())) { + Callable callable = windows[event_from_window->get_window_id()].input_event_callback; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); + } } - callable.call((const Variant **)&evp, 1, ret, ce); } else { - //send to all windows + // Send to all windows. for (KeyValue<WindowID, WindowData> &E : windows) { Callable callable = E.value.input_event_callback; - if (callable.is_null()) { - continue; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); } - callable.call((const Variant **)&evp, 1, ret, ce); } } } @@ -3136,6 +3175,108 @@ void DisplayServerX11::_check_pending_events(LocalVector<XEvent> &r_events) { } } +DisplayServer::WindowID DisplayServerX11::window_get_active_popup() const { + const List<WindowID>::Element *E = popup_list.back(); + if (E) { + return E->get(); + } else { + return INVALID_WINDOW_ID; + } +} + +void DisplayServerX11::window_set_popup_safe_rect(WindowID p_window, const Rect2i &p_rect) { + _THREAD_SAFE_METHOD_ + + ERR_FAIL_COND(!windows.has(p_window)); + WindowData &wd = windows[p_window]; + wd.parent_safe_rect = p_rect; +} + +Rect2i DisplayServerX11::window_get_popup_safe_rect(WindowID p_window) const { + _THREAD_SAFE_METHOD_ + + ERR_FAIL_COND_V(!windows.has(p_window), Rect2i()); + const WindowData &wd = windows[p_window]; + return wd.parent_safe_rect; +} + +void DisplayServerX11::popup_open(WindowID p_window) { + WindowData &wd = windows[p_window]; + if (wd.is_popup) { + // Close all popups, up to current popup parent, or every popup if new window is not transient. + List<WindowID>::Element *E = popup_list.back(); + while (E) { + if (wd.transient_parent != E->get() || wd.transient_parent == INVALID_WINDOW_ID) { + _send_window_event(windows[E->get()], DisplayServerX11::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->prev(); + popup_list.erase(E); + E = F; + } else { + break; + } + } + + time_since_popup = OS::get_singleton()->get_ticks_msec(); + popup_list.push_back(p_window); + } +} + +void DisplayServerX11::popup_close(WindowID p_window) { + List<WindowID>::Element *E = popup_list.find(p_window); + while (E) { + _send_window_event(windows[E->get()], DisplayServerX11::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->next(); + popup_list.erase(E); + E = F; + } +} + +void DisplayServerX11::mouse_process_popups() { + if (popup_list.is_empty()) { + return; + } + + uint64_t delta = OS::get_singleton()->get_ticks_msec() - time_since_popup; + if (delta < 250) { + return; + } + + int number_of_screens = XScreenCount(x11_display); + for (int i = 0; i < number_of_screens; i++) { + Window root, child; + int root_x, root_y, win_x, win_y; + unsigned int mask; + if (XQueryPointer(x11_display, XRootWindow(x11_display, i), &root, &child, &root_x, &root_y, &win_x, &win_y, &mask)) { + XWindowAttributes root_attrs; + XGetWindowAttributes(x11_display, root, &root_attrs); + Vector2i pos = Vector2i(root_attrs.x + root_x, root_attrs.y + root_y); + if ((pos != last_mouse_monitor_pos) || (mask != last_mouse_monitor_mask)) { + if (((mask & Button1Mask) || (mask & Button2Mask) || (mask & Button3Mask) || (mask & Button4Mask) || (mask & Button5Mask))) { + List<WindowID>::Element *E = popup_list.back(); + while (E) { + // Popup window area. + Rect2i win_rect = Rect2i(window_get_position(E->get()), window_get_size(E->get())); + // Area of the parent window, which responsible for opening sub-menu. + Rect2i safe_rect = window_get_popup_safe_rect(E->get()); + if (win_rect.has_point(pos)) { + break; + } else if (safe_rect != Rect2i() && safe_rect.has_point(pos)) { + break; + } else { + _send_window_event(windows[E->get()], DisplayServerX11::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->prev(); + popup_list.erase(E); + E = F; + } + } + } + } + last_mouse_monitor_mask = mask; + last_mouse_monitor_pos = pos; + } + } +} + void DisplayServerX11::process_events() { _THREAD_SAFE_METHOD_ @@ -3144,6 +3285,8 @@ void DisplayServerX11::process_events() { ++frame; #endif + mouse_process_popups(); + if (app_focused) { //verify that one of the windows has focus, else send focus out notification bool focus_found = false; @@ -3374,7 +3517,7 @@ void DisplayServerX11::process_events() { // Set focus when menu window is started. // RevertToPointerRoot is used to make sure we don't lose all focus in case // a subwindow and its parent are both destroyed. - if (wd.menu_type && !wd.no_focus) { + if (!wd.no_focus && !wd.is_popup) { XSetInputFocus(x11_display, wd.x11_window, RevertToPointerRoot, CurrentTime); } } break; @@ -3520,7 +3663,7 @@ void DisplayServerX11::process_events() { // Set focus when menu window is re-used. // RevertToPointerRoot is used to make sure we don't lose all focus in case // a subwindow and its parent are both destroyed. - if (wd.menu_type && !wd.no_focus) { + if (!wd.no_focus && !wd.is_popup) { XSetInputFocus(x11_display, wd.x11_window, RevertToPointerRoot, CurrentTime); } @@ -3561,7 +3704,7 @@ void DisplayServerX11::process_events() { // Ensure window focus on click. // RevertToPointerRoot is used to make sure we don't lose all focus in case // a subwindow and its parent are both destroyed. - if (!wd.no_focus) { + if (!wd.no_focus && !wd.is_popup) { XSetInputFocus(x11_display, wd.x11_window, RevertToPointerRoot, CurrentTime); } @@ -3776,6 +3919,7 @@ void DisplayServerX11::process_events() { Property p = _read_property(x11_display, windows[window_id].x11_window, XInternAtom(x11_display, "PRIMARY", 0)); Vector<String> files = String((char *)p.data).split("\n", false); + XFree(p.data); for (int i = 0; i < files.size(); i++) { files.write[i] = files[i].replace("file://", "").uri_decode().strip_edges(); } @@ -3818,6 +3962,7 @@ void DisplayServerX11::process_events() { if (more_than_3) { Property p = _read_property(x11_display, source, XInternAtom(x11_display, "XdndTypeList", False)); requested = pick_target_from_list(x11_display, (Atom *)p.data, p.nitems); + XFree(p.data); } else { requested = pick_target_from_atoms(x11_display, event.xclient.data.l[2], event.xclient.data.l[3], event.xclient.data.l[4]); } @@ -4121,21 +4266,20 @@ DisplayServerX11::WindowID DisplayServerX11::_create_window(WindowMode p_mode, V WindowID id = window_id_counter++; WindowData &wd = windows[id]; - if ((id != MAIN_WINDOW_ID) && (p_flags & WINDOW_FLAG_BORDERLESS_BIT)) { - wd.menu_type = true; - } - if (p_flags & WINDOW_FLAG_NO_FOCUS_BIT) { - wd.menu_type = true; wd.no_focus = true; } + if (p_flags & WINDOW_FLAG_POPUP_BIT) { + wd.is_popup = true; + } + // Setup for menu subwindows: // - override_redirect forces the WM not to interfere with the window, to avoid delays due to // handling decorations and placement. // On the other hand, focus changes need to be handled manually when this is set. // - save_under is a hint for the WM to keep the content of windows behind to avoid repaint. - if (wd.menu_type) { + if (wd.is_popup || wd.no_focus) { windowAttributes.override_redirect = True; windowAttributes.save_under = True; valuemask |= CWOverrideRedirect | CWSaveUnder; @@ -4146,7 +4290,7 @@ DisplayServerX11::WindowID DisplayServerX11::_create_window(WindowMode p_mode, V // Enable receiving notification when the window is initialized (MapNotify) // so the focus can be set at the right time. - if (wd.menu_type && !wd.no_focus) { + if (!wd.no_focus && !wd.is_popup) { XSelectInput(x11_display, wd.x11_window, StructureNotifyMask); } @@ -4243,7 +4387,7 @@ DisplayServerX11::WindowID DisplayServerX11::_create_window(WindowMode p_mode, V } } - if (wd.menu_type) { + if (wd.is_popup || wd.no_focus) { // Set Utility type to disable fade animations. Atom type_atom = XInternAtom(x11_display, "_NET_WM_WINDOW_TYPE_UTILITY", False); Atom wt_atom = XInternAtom(x11_display, "_NET_WM_WINDOW_TYPE", False); diff --git a/platform/linuxbsd/display_server_x11.h b/platform/linuxbsd/display_server_x11.h index 2d07361deb..63d32d939d 100644 --- a/platform/linuxbsd/display_server_x11.h +++ b/platform/linuxbsd/display_server_x11.h @@ -133,7 +133,6 @@ class DisplayServerX11 : public DisplayServer { ObjectID instance_id; - bool menu_type = false; bool no_focus = false; //better to guess on the fly, given WM can change it @@ -145,12 +144,21 @@ class DisplayServerX11 : public DisplayServer { Vector2i last_position_before_fs; bool focused = true; bool minimized = false; + bool is_popup = false; + + Rect2i parent_safe_rect; unsigned int focus_order = 0; }; Map<WindowID, WindowData> windows; + unsigned int last_mouse_monitor_mask = 0; + Vector2i last_mouse_monitor_pos; + uint64_t time_since_popup = 0; + + List<WindowID> popup_list; + WindowID last_focused_window = INVALID_WINDOW_ID; WindowID window_id_counter = MAIN_WINDOW_ID; @@ -283,6 +291,10 @@ protected: void _window_changed(XEvent *event); public: + void mouse_process_popups(); + void popup_open(WindowID p_window); + void popup_close(WindowID p_window); + virtual bool has_feature(Feature p_feature) const override; virtual String get_name() const override; @@ -317,6 +329,10 @@ public: virtual void show_window(WindowID p_id) override; virtual void delete_sub_window(WindowID p_id) override; + virtual WindowID window_get_active_popup() const override; + virtual void window_set_popup_safe_rect(WindowID p_window, const Rect2i &p_rect) override; + virtual Rect2i window_get_popup_safe_rect(WindowID p_window) const override; + virtual WindowID get_window_at_screen_position(const Point2i &p_position) const override; virtual int64_t window_get_native_handle(HandleType p_handle_type, WindowID p_window = MAIN_WINDOW_ID) const override; diff --git a/platform/osx/display_server_osx.h b/platform/osx/display_server_osx.h index 2b57983ca7..cc9ac162ea 100644 --- a/platform/osx/display_server_osx.h +++ b/platform/osx/display_server_osx.h @@ -104,8 +104,14 @@ public: bool borderless = false; bool resize_disabled = false; bool no_focus = false; + bool is_popup = false; + + Rect2i parent_safe_rect; }; + List<WindowID> popup_list; + uint64_t time_since_popup = 0; + private: #if defined(GLES3_ENABLED) GLManager_OSX *gl_manager = nullptr; @@ -154,6 +160,7 @@ private: float display_max_scale = 1.f; Point2i origin; bool displays_arrangement_dirty = true; + bool is_resizing = false; CursorShape cursor_shape = CURSOR_ARROW; NSCursor *cursors[CURSOR_MAX]; @@ -197,6 +204,11 @@ public: void push_to_key_event_buffer(const KeyEvent &p_event); void update_im_text(const Point2i &p_selection, const String &p_text); void set_last_focused_window(WindowID p_window); + void mouse_process_popups(bool p_close = false); + void popup_open(WindowID p_window); + void popup_close(WindowID p_window); + void set_is_resizing(bool p_is_resizing); + bool get_is_resizing() const; void window_update(WindowID p_window); void window_destroy(WindowID p_window); @@ -259,6 +271,10 @@ public: virtual void show_window(WindowID p_id) override; virtual void delete_sub_window(WindowID p_id) override; + virtual WindowID window_get_active_popup() const override; + virtual void window_set_popup_safe_rect(WindowID p_window, const Rect2i &p_rect) override; + virtual Rect2i window_get_popup_safe_rect(WindowID p_window) const override; + virtual void window_set_rect_changed_callback(const Callable &p_callable, WindowID p_window = MAIN_WINDOW_ID) override; virtual void window_set_window_event_callback(const Callable &p_callable, WindowID p_window = MAIN_WINDOW_ID) override; virtual void window_set_input_event_callback(const Callable &p_callable, WindowID p_window = MAIN_WINDOW_ID) override; diff --git a/platform/osx/display_server_osx.mm b/platform/osx/display_server_osx.mm index b2201eabbc..a4cd8f58bd 100644 --- a/platform/osx/display_server_osx.mm +++ b/platform/osx/display_server_osx.mm @@ -324,27 +324,39 @@ void DisplayServerOSX::_dispatch_input_event(const Ref<InputEvent> &p_event) { Variant ret; Callable::CallError ce; + { + List<WindowID>::Element *E = popup_list.front(); + if (E && Object::cast_to<InputEventKey>(*p_event)) { + // Redirect keyboard input to active popup. + if (windows.has(E->get())) { + Callable callable = windows[E->get()].input_event_callback; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); + } + } + in_dispatch_input_event = false; + return; + } + } + Ref<InputEventFromWindow> event_from_window = p_event; if (event_from_window.is_valid() && event_from_window->get_window_id() != INVALID_WINDOW_ID) { // Send to a window. if (windows.has(event_from_window->get_window_id())) { Callable callable = windows[event_from_window->get_window_id()].input_event_callback; - if (callable.is_null()) { - return; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); } - callable.call((const Variant **)&evp, 1, ret, ce); } } else { // Send to all windows. - for (Map<WindowID, WindowData>::Element *E = windows.front(); E; E = E->next()) { - Callable callable = E->get().input_event_callback; - if (callable.is_null()) { - continue; + for (KeyValue<WindowID, WindowData> &E : windows) { + Callable callable = E.value.input_event_callback; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); } - callable.call((const Variant **)&evp, 1, ret, ce); } } - in_dispatch_input_event = false; } } @@ -513,6 +525,9 @@ DisplayServerOSX::WindowData &DisplayServerOSX::get_window(WindowID p_window) { } void DisplayServerOSX::send_event(NSEvent *p_event) { + if ([p_event type] == NSEventTypeLeftMouseDown || [p_event type] == NSEventTypeRightMouseDown || [p_event type] == NSEventTypeOtherMouseDown) { + mouse_process_popups(); + } // Special case handling of command-period, which is traditionally a special // shortcut in macOS and doesn't arrive at our regular keyDown handler. if ([p_event type] == NSEventTypeKeyDown) { @@ -584,6 +599,14 @@ void DisplayServerOSX::set_last_focused_window(WindowID p_window) { last_focused_window = p_window; } +void DisplayServerOSX::set_is_resizing(bool p_is_resizing) { + is_resizing = p_is_resizing; +} + +bool DisplayServerOSX::get_is_resizing() const { + return is_resizing; +} + void DisplayServerOSX::window_update(WindowID p_window) { #if defined(GLES3_ENABLED) if (gl_manager) { @@ -1366,7 +1389,8 @@ DisplayServer::WindowID DisplayServerOSX::create_sub_window(WindowMode p_mode, V void DisplayServerOSX::show_window(WindowID p_id) { WindowData &wd = windows[p_id]; - if (wd.no_focus) { + popup_open(p_id); + if (wd.no_focus || wd.is_popup) { [wd.window_object orderFront:nil]; } else { [wd.window_object makeKeyAndOrderFront:nil]; @@ -1809,7 +1833,7 @@ void DisplayServerOSX::window_set_flag(WindowFlags p_flag, bool p_enabled, Windo } _update_window_style(wd); if ([wd.window_object isVisible]) { - if (wd.no_focus) { + if (wd.no_focus || wd.is_popup) { [wd.window_object orderFront:nil]; } else { [wd.window_object makeKeyAndOrderFront:nil]; @@ -1838,6 +1862,11 @@ void DisplayServerOSX::window_set_flag(WindowFlags p_flag, bool p_enabled, Windo case WINDOW_FLAG_NO_FOCUS: { wd.no_focus = p_enabled; } break; + case WINDOW_FLAG_POPUP: { + ERR_FAIL_COND_MSG(p_window == MAIN_WINDOW_ID, "Main window can't be popup."); + ERR_FAIL_COND_MSG([wd.window_object isVisible] && (wd.is_popup != p_enabled), "Pupup flag can't changed while window is opened."); + wd.is_popup = p_enabled; + } break; default: { } } @@ -1869,6 +1898,9 @@ bool DisplayServerOSX::window_get_flag(WindowFlags p_flag, WindowID p_window) co case WINDOW_FLAG_NO_FOCUS: { return wd.no_focus; } break; + case WINDOW_FLAG_POPUP: { + return wd.is_popup; + } break; default: { } } @@ -1888,7 +1920,7 @@ void DisplayServerOSX::window_move_to_foreground(WindowID p_window) { const WindowData &wd = windows[p_window]; [[NSApplication sharedApplication] activateIgnoringOtherApps:YES]; - if (wd.no_focus) { + if (wd.no_focus || wd.is_popup) { [wd.window_object orderFront:nil]; } else { [wd.window_object makeKeyAndOrderFront:nil]; @@ -2446,6 +2478,120 @@ void DisplayServerOSX::register_osx_driver() { register_create_function("osx", create_func, get_rendering_drivers_func); } +DisplayServer::WindowID DisplayServerOSX::window_get_active_popup() const { + const List<WindowID>::Element *E = popup_list.back(); + if (E) { + return E->get(); + } else { + return INVALID_WINDOW_ID; + } +} + +void DisplayServerOSX::window_set_popup_safe_rect(WindowID p_window, const Rect2i &p_rect) { + _THREAD_SAFE_METHOD_ + + ERR_FAIL_COND(!windows.has(p_window)); + WindowData &wd = windows[p_window]; + wd.parent_safe_rect = p_rect; +} + +Rect2i DisplayServerOSX::window_get_popup_safe_rect(WindowID p_window) const { + _THREAD_SAFE_METHOD_ + + ERR_FAIL_COND_V(!windows.has(p_window), Rect2i()); + const WindowData &wd = windows[p_window]; + return wd.parent_safe_rect; +} + +void DisplayServerOSX::popup_open(WindowID p_window) { + WindowData &wd = windows[p_window]; + if (wd.is_popup) { + bool was_empty = popup_list.is_empty(); + // Close all popups, up to current popup parent, or every popup if new window is not transient. + List<WindowID>::Element *E = popup_list.back(); + while (E) { + if (wd.transient_parent != E->get() || wd.transient_parent == INVALID_WINDOW_ID) { + send_window_event(windows[E->get()], DisplayServerOSX::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->prev(); + popup_list.erase(E); + E = F; + } else { + break; + } + } + + if (was_empty && popup_list.is_empty()) { + // Inform OS that popup was opened, to close other native popups. + [[NSDistributedNotificationCenter defaultCenter] postNotificationName:@"com.apple.HIToolbox.beginMenuTrackingNotification" object:@"org.godotengine.godot.popup_window"]; + } + time_since_popup = OS::get_singleton()->get_ticks_msec(); + popup_list.push_back(p_window); + } +} + +void DisplayServerOSX::popup_close(WindowID p_window) { + bool was_empty = popup_list.is_empty(); + List<WindowID>::Element *E = popup_list.find(p_window); + while (E) { + send_window_event(windows[E->get()], DisplayServerOSX::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->next(); + popup_list.erase(E); + E = F; + } + if (!was_empty && popup_list.is_empty()) { + // Inform OS that all popups are closed. + [[NSDistributedNotificationCenter defaultCenter] postNotificationName:@"com.apple.HIToolbox.endMenuTrackingNotification" object:@"org.godotengine.godot.popup_window"]; + } +} + +void DisplayServerOSX::mouse_process_popups(bool p_close) { + _THREAD_SAFE_METHOD_ + + bool was_empty = popup_list.is_empty(); + if (p_close) { + // Close all popups. + List<WindowID>::Element *E = popup_list.front(); + while (E) { + send_window_event(windows[E->get()], DisplayServerOSX::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->next(); + popup_list.erase(E); + E = F; + } + if (!was_empty) { + // Inform OS that all popups are closed. + [[NSDistributedNotificationCenter defaultCenter] postNotificationName:@"com.apple.HIToolbox.endMenuTrackingNotification" object:@"org.godotengine.godot.popup_window"]; + } + } else { + uint64_t delta = OS::get_singleton()->get_ticks_msec() - time_since_popup; + if (delta < 250) { + return; + } + + Point2i pos = mouse_get_position(); + List<WindowID>::Element *E = popup_list.back(); + while (E) { + // Popup window area. + Rect2i win_rect = Rect2i(window_get_position(E->get()), window_get_size(E->get())); + // Area of the parent window, which responsible for opening sub-menu. + Rect2i safe_rect = window_get_popup_safe_rect(E->get()); + if (win_rect.has_point(pos)) { + break; + } else if (safe_rect != Rect2i() && safe_rect.has_point(pos)) { + break; + } else { + send_window_event(windows[E->get()], DisplayServerOSX::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->prev(); + popup_list.erase(E); + E = F; + } + } + if (!was_empty && popup_list.is_empty()) { + // Inform OS that all popups are closed. + [[NSDistributedNotificationCenter defaultCenter] postNotificationName:@"com.apple.HIToolbox.endMenuTrackingNotification" object:@"org.godotengine.godot.popup_window"]; + } + } +} + DisplayServerOSX::DisplayServerOSX(const String &p_rendering_driver, WindowMode p_mode, VSyncMode p_vsync_mode, uint32_t p_flags, const Vector2i &p_resolution, Error &r_error) { Input::get_singleton()->set_event_dispatch_function(_dispatch_input_events); diff --git a/platform/osx/export/export_plugin.cpp b/platform/osx/export/export_plugin.cpp index 24b9bc02a2..4d0fc9add6 100644 --- a/platform/osx/export/export_plugin.cpp +++ b/platform/osx/export/export_plugin.cpp @@ -787,6 +787,7 @@ Error EditorExportPlatformOSX::export_project(const Ref<EditorExportPreset> &p_p String fname = tmp_app_path_name + "/Contents/Resources/en.lproj"; tmp_app_dir->make_dir_recursive(fname); FileAccessRef f = FileAccess::open(fname + "/InfoPlist.strings", FileAccess::WRITE); + f->store_line("CFBundleDisplayName = \"" + ProjectSettings::get_singleton()->get("application/config/name").operator String() + "\";"); } for (const String &E : translations) { @@ -795,6 +796,10 @@ Error EditorExportPlatformOSX::export_project(const Ref<EditorExportPreset> &p_p String fname = tmp_app_path_name + "/Contents/Resources/" + tr->get_locale() + ".lproj"; tmp_app_dir->make_dir_recursive(fname); FileAccessRef f = FileAccess::open(fname + "/InfoPlist.strings", FileAccess::WRITE); + String prop = "application/config/name_" + tr->get_locale(); + if (ProjectSettings::get_singleton()->has_setting(prop)) { + f->store_line("CFBundleDisplayName = \"" + ProjectSettings::get_singleton()->get(prop).operator String() + "\";"); + } } } } diff --git a/platform/osx/godot_application_delegate.mm b/platform/osx/godot_application_delegate.mm index be284ba543..dc82075c44 100644 --- a/platform/osx/godot_application_delegate.mm +++ b/platform/osx/godot_application_delegate.mm @@ -68,6 +68,10 @@ } - (void)applicationDidResignActive:(NSNotification *)notification { + DisplayServerOSX *ds = (DisplayServerOSX *)DisplayServer::get_singleton(); + if (ds) { + ds->mouse_process_popups(true); + } if (OS::get_singleton()->get_main_loop()) { OS::get_singleton()->get_main_loop()->notification(MainLoop::NOTIFICATION_APPLICATION_FOCUS_OUT); } diff --git a/platform/osx/godot_content_view.mm b/platform/osx/godot_content_view.mm index 76d9cfb081..e96f0a8098 100644 --- a/platform/osx/godot_content_view.mm +++ b/platform/osx/godot_content_view.mm @@ -281,7 +281,7 @@ } DisplayServerOSX::WindowData &wd = ds->get_window(window_id); - return !wd.no_focus; + return !wd.no_focus && !wd.is_popup; } - (BOOL)acceptsFirstResponder { diff --git a/platform/osx/godot_window.mm b/platform/osx/godot_window.mm index 772a2ddb9f..d43853a94b 100644 --- a/platform/osx/godot_window.mm +++ b/platform/osx/godot_window.mm @@ -52,7 +52,7 @@ } DisplayServerOSX::WindowData &wd = ds->get_window(window_id); - return !wd.no_focus; + return !wd.no_focus && !wd.is_popup; } - (BOOL)canBecomeMainWindow { @@ -63,7 +63,7 @@ } DisplayServerOSX::WindowData &wd = ds->get_window(window_id); - return !wd.no_focus; + return !wd.no_focus && !wd.is_popup; } @end diff --git a/platform/osx/godot_window_delegate.mm b/platform/osx/godot_window_delegate.mm index 1742be987d..9f49a6a4e9 100644 --- a/platform/osx/godot_window_delegate.mm +++ b/platform/osx/godot_window_delegate.mm @@ -54,6 +54,8 @@ return; } + ds->popup_close(window_id); + DisplayServerOSX::WindowData &wd = ds->get_window(window_id); while (wd.transient_children.size()) { ds->window_set_transient(wd.transient_children.front()->get(), DisplayServerOSX::INVALID_WINDOW_ID); @@ -147,6 +149,20 @@ } } +- (void)windowWillStartLiveResize:(NSNotification *)notification { + DisplayServerOSX *ds = (DisplayServerOSX *)DisplayServer::get_singleton(); + if (ds) { + ds->set_is_resizing(true); + } +} + +- (void)windowDidEndLiveResize:(NSNotification *)notification { + DisplayServerOSX *ds = (DisplayServerOSX *)DisplayServer::get_singleton(); + if (ds) { + ds->set_is_resizing(false); + } +} + - (void)windowDidResize:(NSNotification *)notification { DisplayServerOSX *ds = (DisplayServerOSX *)DisplayServer::get_singleton(); if (!ds || !ds->has_window(window_id)) { diff --git a/platform/osx/os_osx.mm b/platform/osx/os_osx.mm index 6700f8fe82..7e0cf9f9cc 100644 --- a/platform/osx/os_osx.mm +++ b/platform/osx/os_osx.mm @@ -58,7 +58,8 @@ _FORCE_INLINE_ String OS_OSX::get_framework_executable(const String &p_path) { void OS_OSX::pre_wait_observer_cb(CFRunLoopObserverRef p_observer, CFRunLoopActivity p_activiy, void *p_context) { // Prevent main loop from sleeping and redraw window during resize / modal popups. - if (get_singleton()->get_main_loop()) { + DisplayServerOSX *ds = (DisplayServerOSX *)DisplayServer::get_singleton(); + if (get_singleton()->get_main_loop() && ds && (get_singleton()->get_render_thread_mode() != RENDER_SEPARATE_THREAD || !ds->get_is_resizing())) { Main::force_redraw(); if (!Main::is_iterating()) { // Avoid cyclic loop. Main::iteration(); diff --git a/platform/windows/display_server_windows.cpp b/platform/windows/display_server_windows.cpp index 6b4b342389..163f5c350b 100644 --- a/platform/windows/display_server_windows.cpp +++ b/platform/windows/display_server_windows.cpp @@ -546,6 +546,9 @@ DisplayServer::WindowID DisplayServerWindows::create_sub_window(WindowMode p_mod if (p_flags & WINDOW_FLAG_NO_FOCUS_BIT) { wd.no_focus = true; } + if (p_flags & WINDOW_FLAG_POPUP_BIT) { + wd.is_popup = true; + } // Inherit icons from MAIN_WINDOW for all sub windows. HICON mainwindow_icon = (HICON)SendMessage(windows[MAIN_WINDOW_ID].hWnd, WM_GETICON, ICON_SMALL, 0); @@ -563,13 +566,14 @@ void DisplayServerWindows::show_window(WindowID p_id) { ERR_FAIL_COND(!windows.has(p_id)); WindowData &wd = windows[p_id]; + popup_open(p_id); if (p_id != MAIN_WINDOW_ID) { _update_window_style(p_id); } - ShowWindow(wd.hWnd, wd.no_focus ? SW_SHOWNOACTIVATE : SW_SHOW); // Show the window. - if (!wd.no_focus) { + ShowWindow(wd.hWnd, (wd.no_focus || wd.is_popup) ? SW_SHOWNOACTIVATE : SW_SHOW); // Show the window. + if (!wd.no_focus && !wd.is_popup) { SetForegroundWindow(wd.hWnd); // Slightly higher priority. SetFocus(wd.hWnd); // Set keyboard focus. } @@ -581,6 +585,8 @@ void DisplayServerWindows::delete_sub_window(WindowID p_window) { ERR_FAIL_COND(!windows.has(p_window)); ERR_FAIL_COND_MSG(p_window == MAIN_WINDOW_ID, "Main window cannot be deleted."); + popup_close(p_window); + WindowData &wd = windows[p_window]; while (wd.transient_children.size()) { @@ -1019,6 +1025,7 @@ void DisplayServerWindows::_get_window_style(bool p_main_window, bool p_fullscre r_style_ex = WS_EX_WINDOWEDGE; if (p_main_window) { r_style_ex |= WS_EX_APPWINDOW; + r_style |= WS_VISIBLE; } if (p_fullscreen || p_borderless) { @@ -1037,11 +1044,15 @@ void DisplayServerWindows::_get_window_style(bool p_main_window, bool p_fullscre r_style = WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU; } } - r_style |= WS_VISIBLE; if (p_no_activate_focus) { r_style_ex |= WS_EX_TOPMOST | WS_EX_NOACTIVATE; } + + if (!p_borderless && !p_no_activate_focus) { + r_style |= WS_VISIBLE; + } + r_style |= WS_CLIPCHILDREN | WS_CLIPSIBLINGS; r_style_ex |= WS_EX_ACCEPTFILES; } @@ -1055,12 +1066,12 @@ void DisplayServerWindows::_update_window_style(WindowID p_window, bool p_repain DWORD style = 0; DWORD style_ex = 0; - _get_window_style(p_window == MAIN_WINDOW_ID, wd.fullscreen, wd.multiwindow_fs, wd.borderless, wd.resizable, wd.maximized, wd.no_focus, style, style_ex); + _get_window_style(p_window == MAIN_WINDOW_ID, wd.fullscreen, wd.multiwindow_fs, wd.borderless, wd.resizable, wd.maximized, wd.no_focus || wd.is_popup, style, style_ex); SetWindowLongPtr(wd.hWnd, GWL_STYLE, style); SetWindowLongPtr(wd.hWnd, GWL_EXSTYLE, style_ex); - SetWindowPos(wd.hWnd, wd.always_on_top ? HWND_TOPMOST : HWND_NOTOPMOST, 0, 0, 0, 0, SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE | (wd.no_focus ? SWP_NOACTIVATE : 0)); + SetWindowPos(wd.hWnd, wd.always_on_top ? HWND_TOPMOST : HWND_NOTOPMOST, 0, 0, 0, 0, SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE | ((wd.no_focus || wd.is_popup) ? SWP_NOACTIVATE : 0)); if (p_repaint) { RECT rect; @@ -1211,6 +1222,7 @@ void DisplayServerWindows::window_set_flag(WindowFlags p_flag, bool p_enabled, W wd.borderless = p_enabled; _update_window_style(p_window); _update_window_mouse_passthrough(p_window); + ShowWindow(wd.hWnd, (wd.no_focus || wd.is_popup) ? SW_SHOWNOACTIVATE : SW_SHOW); // Show the window. } break; case WINDOW_FLAG_ALWAYS_ON_TOP: { ERR_FAIL_COND_MSG(wd.transient_parent != INVALID_WINDOW_ID && p_enabled, "Transient windows can't become on top"); @@ -1224,6 +1236,11 @@ void DisplayServerWindows::window_set_flag(WindowFlags p_flag, bool p_enabled, W wd.no_focus = p_enabled; _update_window_style(p_window); } break; + case WINDOW_FLAG_POPUP: { + ERR_FAIL_COND_MSG(p_window == MAIN_WINDOW_ID, "Main window can't be popup."); + ERR_FAIL_COND_MSG(IsWindowVisible(wd.hWnd) && (wd.is_popup != p_enabled), "Pupup flag can't changed while window is opened."); + wd.is_popup = p_enabled; + } break; case WINDOW_FLAG_MAX: break; } @@ -1250,6 +1267,9 @@ bool DisplayServerWindows::window_get_flag(WindowFlags p_flag, WindowID p_window case WINDOW_FLAG_NO_FOCUS: { return wd.no_focus; } break; + case WINDOW_FLAG_POPUP: { + return wd.is_popup; + } break; case WINDOW_FLAG_MAX: break; } @@ -1278,7 +1298,9 @@ void DisplayServerWindows::window_move_to_foreground(WindowID p_window) { ERR_FAIL_COND(!windows.has(p_window)); WindowData &wd = windows[p_window]; - SetForegroundWindow(wd.hWnd); + if (!wd.no_focus && !wd.is_popup) { + SetForegroundWindow(wd.hWnd); + } } bool DisplayServerWindows::window_can_draw(WindowID p_window) const { @@ -1989,33 +2011,145 @@ void DisplayServerWindows::_dispatch_input_event(const Ref<InputEvent> &p_event) Variant ret; Callable::CallError ce; + { + List<WindowID>::Element *E = popup_list.front(); + if (E && Object::cast_to<InputEventKey>(*p_event)) { + // Redirect keyboard input to active popup. + if (windows.has(E->get())) { + Callable callable = windows[E->get()].input_event_callback; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); + } + } + in_dispatch_input_event = false; + return; + } + } + Ref<InputEventFromWindow> event_from_window = p_event; if (event_from_window.is_valid() && event_from_window->get_window_id() != INVALID_WINDOW_ID) { // Send to a single window. - if (!windows.has(event_from_window->get_window_id())) { - in_dispatch_input_event = false; - ERR_FAIL_MSG("DisplayServerWindows: Invalid window id in input event."); - } - Callable callable = windows[event_from_window->get_window_id()].input_event_callback; - if (callable.is_null()) { - in_dispatch_input_event = false; - return; + if (windows.has(event_from_window->get_window_id())) { + Callable callable = windows[event_from_window->get_window_id()].input_event_callback; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); + } } - callable.call((const Variant **)&evp, 1, ret, ce); } else { // Send to all windows. for (const KeyValue<WindowID, WindowData> &E : windows) { const Callable callable = E.value.input_event_callback; - if (callable.is_null()) { - continue; + if (callable.is_valid()) { + callable.call((const Variant **)&evp, 1, ret, ce); } - callable.call((const Variant **)&evp, 1, ret, ce); } } in_dispatch_input_event = false; } +LRESULT CALLBACK MouseProc(int code, WPARAM wParam, LPARAM lParam) { + DisplayServerWindows *ds_win = static_cast<DisplayServerWindows *>(DisplayServer::get_singleton()); + if (ds_win) { + return ds_win->MouseProc(code, wParam, lParam); + } else { + return ::CallNextHookEx(nullptr, code, wParam, lParam); + } +} + +DisplayServer::WindowID DisplayServerWindows::window_get_active_popup() const { + const List<WindowID>::Element *E = popup_list.back(); + if (E) { + return E->get(); + } else { + return INVALID_WINDOW_ID; + } +} + +void DisplayServerWindows::window_set_popup_safe_rect(WindowID p_window, const Rect2i &p_rect) { + _THREAD_SAFE_METHOD_ + + ERR_FAIL_COND(!windows.has(p_window)); + WindowData &wd = windows[p_window]; + wd.parent_safe_rect = p_rect; +} + +Rect2i DisplayServerWindows::window_get_popup_safe_rect(WindowID p_window) const { + _THREAD_SAFE_METHOD_ + + ERR_FAIL_COND_V(!windows.has(p_window), Rect2i()); + const WindowData &wd = windows[p_window]; + return wd.parent_safe_rect; +} + +void DisplayServerWindows::popup_open(WindowID p_window) { + WindowData &wd = windows[p_window]; + if (wd.is_popup) { + // Close all popups, up to current popup parent, or every popup if new window is not transient. + List<WindowID>::Element *E = popup_list.back(); + while (E) { + if (wd.transient_parent != E->get() || wd.transient_parent == INVALID_WINDOW_ID) { + _send_window_event(windows[E->get()], DisplayServerWindows::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->prev(); + popup_list.erase(E); + E = F; + } else { + break; + } + } + + time_since_popup = OS::get_singleton()->get_ticks_msec(); + popup_list.push_back(p_window); + } +} + +void DisplayServerWindows::popup_close(WindowID p_window) { + List<WindowID>::Element *E = popup_list.find(p_window); + while (E) { + _send_window_event(windows[E->get()], DisplayServerWindows::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->next(); + popup_list.erase(E); + E = F; + } +} + +LRESULT DisplayServerWindows::MouseProc(int code, WPARAM wParam, LPARAM lParam) { + _THREAD_SAFE_METHOD_ + uint64_t delta = OS::get_singleton()->get_ticks_msec() - time_since_popup; + if (delta > 250) { + switch (wParam) { + case WM_NCLBUTTONDOWN: + case WM_NCRBUTTONDOWN: + case WM_NCMBUTTONDOWN: + case WM_LBUTTONDOWN: + case WM_RBUTTONDOWN: + case WM_MBUTTONDOWN: { + MOUSEHOOKSTRUCT *ms = (MOUSEHOOKSTRUCT *)lParam; + Point2i pos = Point2i(ms->pt.x, ms->pt.y); + List<WindowID>::Element *E = popup_list.back(); + while (E) { + // Popup window area. + Rect2i win_rect = Rect2i(window_get_position(E->get()), window_get_size(E->get())); + // Area of the parent window, which responsible for opening sub-menu. + Rect2i safe_rect = window_get_popup_safe_rect(E->get()); + if (win_rect.has_point(pos)) { + break; + } else if (safe_rect != Rect2i() && safe_rect.has_point(pos)) { + break; + } else { + _send_window_event(windows[E->get()], DisplayServerWindows::WINDOW_EVENT_CLOSE_REQUEST); + List<WindowID>::Element *F = E->prev(); + popup_list.erase(E); + E = F; + } + } + + } break; + } + } + return ::CallNextHookEx(mouse_monitor, code, wParam, lParam); +} + // Our default window procedure to handle processing of window-related system messages/events. // Also known as DefProc or DefWindowProc. // See: https://docs.microsoft.com/en-us/windows/win32/winmsg/window-procedures @@ -2048,6 +2182,13 @@ LRESULT DisplayServerWindows::WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARA // Process window messages. switch (uMsg) { + case WM_MOUSEACTIVATE: { + if (windows[window_id].no_focus) { + return MA_NOACTIVATEANDEAT; // Do not activate, and discard mouse messages. + } else if (windows[window_id].is_popup) { + return MA_NOACTIVATE; // Do not activate, but process mouse messages. + } + } break; case WM_SETFOCUS: { windows[window_id].window_has_focus = true; last_focused_window = window_id; @@ -2309,7 +2450,7 @@ LRESULT DisplayServerWindows::WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARA mm->set_relative(Vector2(mm->get_position() - Vector2(old_x, old_y))); old_x = mm->get_position().x; old_y = mm->get_position().y; - if (windows[window_id].window_has_focus) { + if (windows[window_id].window_has_focus || window_get_active_popup() == window_id) { Input::get_singleton()->parse_input_event(mm); } } @@ -2451,7 +2592,7 @@ LRESULT DisplayServerWindows::WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARA mm->set_relative(Vector2(mm->get_position() - Vector2(old_x, old_y))); old_x = mm->get_position().x; old_y = mm->get_position().y; - if (windows[window_id].window_has_focus) { + if (windows[window_id].window_has_focus || window_get_active_popup() == window_id) { Input::get_singleton()->parse_input_event(mm); } @@ -2551,7 +2692,7 @@ LRESULT DisplayServerWindows::WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARA mm->set_relative(Vector2(mm->get_position() - Vector2(old_x, old_y))); old_x = mm->get_position().x; old_y = mm->get_position().y; - if (windows[window_id].window_has_focus) { + if (windows[window_id].window_has_focus || window_get_active_popup() == window_id) { Input::get_singleton()->parse_input_event(mm); } @@ -3462,6 +3603,8 @@ DisplayServerWindows::DisplayServerWindows(const String &p_rendering_driver, Win } #endif + HHOOK mouse_monitor = SetWindowsHookEx(WH_MOUSE, ::MouseProc, nullptr, GetCurrentThreadId()); + Point2i window_position( (screen_get_size(0).width - p_resolution.width) / 2, (screen_get_size(0).height - p_resolution.height) / 2); @@ -3545,6 +3688,10 @@ DisplayServerWindows::~DisplayServerWindows() { cursors_cache.clear(); + if (mouse_monitor) { + UnhookWindowsHookEx(mouse_monitor); + } + if (user_proc) { SetWindowLongPtr(windows[MAIN_WINDOW_ID].hWnd, GWLP_WNDPROC, (LONG_PTR)user_proc); } diff --git a/platform/windows/display_server_windows.h b/platform/windows/display_server_windows.h index 7561f9bb77..a56a2b83ac 100644 --- a/platform/windows/display_server_windows.h +++ b/platform/windows/display_server_windows.h @@ -387,9 +387,15 @@ class DisplayServerWindows : public DisplayServer { WindowID transient_parent = INVALID_WINDOW_ID; Set<WindowID> transient_children; + + bool is_popup = false; + Rect2i parent_safe_rect; }; JoypadWindows *joypad; + HHOOK mouse_monitor = nullptr; + List<WindowID> popup_list; + uint64_t time_since_popup = 0; WindowID _create_window(WindowMode p_mode, VSyncMode p_vsync_mode, uint32_t p_flags, const Rect2i &p_rect); WindowID window_id_counter = MAIN_WINDOW_ID; @@ -440,6 +446,10 @@ class DisplayServerWindows : public DisplayServer { public: LRESULT WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam); + LRESULT MouseProc(int code, WPARAM wParam, LPARAM lParam); + + void popup_open(WindowID p_window); + void popup_close(WindowID p_window); virtual bool has_feature(Feature p_feature) const override; virtual String get_name() const override; @@ -474,6 +484,10 @@ public: virtual void show_window(WindowID p_window) override; virtual void delete_sub_window(WindowID p_window) override; + virtual WindowID window_get_active_popup() const override; + virtual void window_set_popup_safe_rect(WindowID p_window, const Rect2i &p_rect) override; + virtual Rect2i window_get_popup_safe_rect(WindowID p_window) const override; + virtual int64_t window_get_native_handle(HandleType p_handle_type, WindowID p_window = MAIN_WINDOW_ID) const override; virtual WindowID get_window_at_screen_position(const Point2i &p_position) const override; diff --git a/scene/2d/animated_sprite_2d.cpp b/scene/2d/animated_sprite_2d.cpp index 2d05d46342..257e334873 100644 --- a/scene/2d/animated_sprite_2d.cpp +++ b/scene/2d/animated_sprite_2d.cpp @@ -158,14 +158,14 @@ void AnimatedSprite2D::_notification(int p_what) { return; } - double speed = frames->get_animation_speed(animation) * speed_scale; - if (speed == 0) { - return; //do nothing - } - double remaining = get_process_delta_time(); while (remaining) { + double speed = frames->get_animation_speed(animation) * speed_scale; + if (speed == 0) { + return; // Do nothing. + } + if (timeout <= 0) { timeout = _get_frame_duration(); diff --git a/scene/2d/joint_2d.cpp b/scene/2d/joint_2d.cpp index 0467c39746..c2773191ea 100644 --- a/scene/2d/joint_2d.cpp +++ b/scene/2d/joint_2d.cpp @@ -128,7 +128,7 @@ void Joint2D::set_node_a(const NodePath &p_node_a) { return; } - if (joint.is_valid()) { + if (is_configured()) { _disconnect_signals(); } @@ -145,7 +145,7 @@ void Joint2D::set_node_b(const NodePath &p_node_b) { return; } - if (joint.is_valid()) { + if (is_configured()) { _disconnect_signals(); } @@ -159,15 +159,18 @@ NodePath Joint2D::get_node_b() const { void Joint2D::_notification(int p_what) { switch (p_what) { - case NOTIFICATION_READY: { + case NOTIFICATION_POST_ENTER_TREE: { + if (is_configured()) { + _disconnect_signals(); + } _update_joint(); } break; case NOTIFICATION_EXIT_TREE: { - if (joint.is_valid()) { + if (is_configured()) { _disconnect_signals(); - _update_joint(true); } + _update_joint(true); } break; } } @@ -187,7 +190,9 @@ void Joint2D::set_exclude_nodes_from_collision(bool p_enable) { if (exclude_from_collision == p_enable) { return; } - + if (is_configured()) { + _disconnect_signals(); + } _update_joint(true); exclude_from_collision = p_enable; _update_joint(); diff --git a/scene/2d/tile_map.cpp b/scene/2d/tile_map.cpp index 0d50d7f8d6..db33e6561a 100644 --- a/scene/2d/tile_map.cpp +++ b/scene/2d/tile_map.cpp @@ -1997,6 +1997,10 @@ void TileMap::set_cell(int p_layer, const Vector2i &p_coords, int p_source_id, c } } +void TileMap::erase_cell(int p_layer, const Vector2i &p_coords) { + set_cell(p_layer, p_coords, TileSet::INVALID_SOURCE, TileSetSource::INVALID_ATLAS_COORDS, TileSetSource::INVALID_TILE_ALTERNATIVE); +} + int TileMap::get_cell_source_id(int p_layer, const Vector2i &p_coords, bool p_use_proxies) const { ERR_FAIL_INDEX_V(p_layer, (int)layers.size(), TileSet::INVALID_SOURCE); @@ -3622,7 +3626,8 @@ void TileMap::_bind_methods() { ClassDB::bind_method(D_METHOD("set_navigation_visibility_mode", "navigation_visibility_mode"), &TileMap::set_navigation_visibility_mode); ClassDB::bind_method(D_METHOD("get_navigation_visibility_mode"), &TileMap::get_navigation_visibility_mode); - ClassDB::bind_method(D_METHOD("set_cell", "layer", "coords", "source_id", "atlas_coords", "alternative_tile"), &TileMap::set_cell, DEFVAL(TileSet::INVALID_SOURCE), DEFVAL(TileSetSource::INVALID_ATLAS_COORDS), DEFVAL(TileSetSource::INVALID_TILE_ALTERNATIVE)); + ClassDB::bind_method(D_METHOD("set_cell", "layer", "coords", "source_id", "atlas_coords", "alternative_tile"), &TileMap::set_cell, DEFVAL(TileSet::INVALID_SOURCE), DEFVAL(TileSetSource::INVALID_ATLAS_COORDS), DEFVAL(0)); + ClassDB::bind_method(D_METHOD("erase_cell", "layer", "coords"), &TileMap::erase_cell); ClassDB::bind_method(D_METHOD("get_cell_source_id", "layer", "coords", "use_proxies"), &TileMap::get_cell_source_id); ClassDB::bind_method(D_METHOD("get_cell_atlas_coords", "layer", "coords", "use_proxies"), &TileMap::get_cell_atlas_coords); ClassDB::bind_method(D_METHOD("get_cell_alternative_tile", "layer", "coords", "use_proxies"), &TileMap::get_cell_alternative_tile); diff --git a/scene/2d/tile_map.h b/scene/2d/tile_map.h index 0da04bfeae..a0655dea2a 100644 --- a/scene/2d/tile_map.h +++ b/scene/2d/tile_map.h @@ -321,7 +321,8 @@ public: VisibilityMode get_navigation_visibility_mode(); // Cells accessors. - void set_cell(int p_layer, const Vector2i &p_coords, int p_source_id = -1, const Vector2i p_atlas_coords = TileSetSource::INVALID_ATLAS_COORDS, int p_alternative_tile = TileSetSource::INVALID_TILE_ALTERNATIVE); + void set_cell(int p_layer, const Vector2i &p_coords, int p_source_id = -1, const Vector2i p_atlas_coords = TileSetSource::INVALID_ATLAS_COORDS, int p_alternative_tile = 0); + void erase_cell(int p_layer, const Vector2i &p_coords); int get_cell_source_id(int p_layer, const Vector2i &p_coords, bool p_use_proxies = false) const; Vector2i get_cell_atlas_coords(int p_layer, const Vector2i &p_coords, bool p_use_proxies = false) const; int get_cell_alternative_tile(int p_layer, const Vector2i &p_coords, bool p_use_proxies = false) const; diff --git a/scene/3d/joint_3d.cpp b/scene/3d/joint_3d.cpp index 36abd0a5c5..ce7c0d8292 100644 --- a/scene/3d/joint_3d.cpp +++ b/scene/3d/joint_3d.cpp @@ -124,7 +124,7 @@ void Joint3D::set_node_a(const NodePath &p_node_a) { return; } - if (joint.is_valid()) { + if (is_configured()) { _disconnect_signals(); } @@ -141,7 +141,7 @@ void Joint3D::set_node_b(const NodePath &p_node_b) { return; } - if (joint.is_valid()) { + if (is_configured()) { _disconnect_signals(); } @@ -166,15 +166,18 @@ int Joint3D::get_solver_priority() const { void Joint3D::_notification(int p_what) { switch (p_what) { - case NOTIFICATION_READY: { + case NOTIFICATION_POST_ENTER_TREE: { + if (is_configured()) { + _disconnect_signals(); + } _update_joint(); } break; case NOTIFICATION_EXIT_TREE: { - if (joint.is_valid()) { + if (is_configured()) { _disconnect_signals(); - _update_joint(true); } + _update_joint(true); } break; } } @@ -183,6 +186,10 @@ void Joint3D::set_exclude_nodes_from_collision(bool p_enable) { if (exclude_from_collision == p_enable) { return; } + if (is_configured()) { + _disconnect_signals(); + } + _update_joint(true); exclude_from_collision = p_enable; _update_joint(); } diff --git a/scene/3d/sprite_3d.cpp b/scene/3d/sprite_3d.cpp index b9fb3e9287..ce281c79bc 100644 --- a/scene/3d/sprite_3d.cpp +++ b/scene/3d/sprite_3d.cpp @@ -1019,14 +1019,14 @@ void AnimatedSprite3D::_notification(int p_what) { return; } - float speed = frames->get_animation_speed(animation); - if (speed == 0) { - return; //do nothing - } - double remaining = get_process_delta_time(); while (remaining) { + double speed = frames->get_animation_speed(animation); + if (speed == 0) { + return; // Do nothing. + } + if (timeout <= 0) { timeout = 1.0 / speed; diff --git a/scene/gui/button.cpp b/scene/gui/button.cpp index 27e8b102be..724714b93b 100644 --- a/scene/gui/button.cpp +++ b/scene/gui/button.cpp @@ -583,8 +583,8 @@ void Button::_bind_methods() { Button::Button(const String &p_text) { text_buf.instantiate(); text_buf->set_flags(TextServer::BREAK_MANDATORY); - set_mouse_filter(MOUSE_FILTER_STOP); + set_text(p_text); } diff --git a/scene/gui/check_button.cpp b/scene/gui/check_button.cpp index 5e3131f8a0..527b0061ac 100644 --- a/scene/gui/check_button.cpp +++ b/scene/gui/check_button.cpp @@ -111,9 +111,12 @@ void CheckButton::_notification(int p_what) { } } -CheckButton::CheckButton() { +CheckButton::CheckButton(const String &p_text) : + Button(p_text) { set_toggle_mode(true); + set_text_alignment(HORIZONTAL_ALIGNMENT_LEFT); + if (is_layout_rtl()) { _set_internal_margin(SIDE_LEFT, get_icon_size().width); } else { diff --git a/scene/gui/check_button.h b/scene/gui/check_button.h index 9a72d04db2..7d4bb8bdfc 100644 --- a/scene/gui/check_button.h +++ b/scene/gui/check_button.h @@ -42,7 +42,7 @@ protected: void _notification(int p_what); public: - CheckButton(); + CheckButton(const String &p_text = String()); ~CheckButton(); }; diff --git a/scene/gui/color_picker.cpp b/scene/gui/color_picker.cpp index 3ea2a9795d..aa4391e9f1 100644 --- a/scene/gui/color_picker.cpp +++ b/scene/gui/color_picker.cpp @@ -1429,7 +1429,8 @@ void ColorPickerButton::_bind_methods() { ADD_PROPERTY(PropertyInfo(Variant::BOOL, "edit_alpha"), "set_edit_alpha", "is_editing_alpha"); } -ColorPickerButton::ColorPickerButton() { +ColorPickerButton::ColorPickerButton(const String &p_text) : + Button(p_text) { set_toggle_mode(true); } diff --git a/scene/gui/color_picker.h b/scene/gui/color_picker.h index d6067b1cf4..6f3e16009c 100644 --- a/scene/gui/color_picker.h +++ b/scene/gui/color_picker.h @@ -231,7 +231,7 @@ public: ColorPicker *get_picker(); PopupPanel *get_popup(); - ColorPickerButton(); + ColorPickerButton(const String &p_text = String()); }; VARIANT_ENUM_CAST(ColorPicker::PickerShapeType); diff --git a/scene/gui/file_dialog.cpp b/scene/gui/file_dialog.cpp index 79aaf5c511..678229683f 100644 --- a/scene/gui/file_dialog.cpp +++ b/scene/gui/file_dialog.cpp @@ -262,7 +262,8 @@ void FileDialog::_action_pressed() { return; } - String f = dir_access->get_current_dir().plus_file(file->get_text()); + String file_text = file->get_text(); + String f = file_text.is_absolute_path() ? file_text : dir_access->get_current_dir().plus_file(file_text); if ((mode == FILE_MODE_OPEN_ANY || mode == FILE_MODE_OPEN_FILE) && dir_access->file_exists(f)) { emit_signal(SNAME("file_selected"), f); diff --git a/scene/gui/item_list.cpp b/scene/gui/item_list.cpp index 9585b4d51d..e83524b06c 100644 --- a/scene/gui/item_list.cpp +++ b/scene/gui/item_list.cpp @@ -1241,7 +1241,7 @@ void ItemList::_notification(int p_what) { text_ofs.x = size.width - text_ofs.x - max_len; } - items.write[i].text_buf->set_width(max_len); + items.write[i].text_buf->set_width(width - text_ofs.x); if (rtl) { items.write[i].text_buf->set_alignment(HORIZONTAL_ALIGNMENT_RIGHT); @@ -1253,7 +1253,9 @@ void ItemList::_notification(int p_what) { items[i].text_buf->draw_outline(get_canvas_item(), text_ofs, outline_size, font_outline_color); } - items[i].text_buf->draw(get_canvas_item(), text_ofs, modulate); + if (width - text_ofs.x > 0) { + items[i].text_buf->draw(get_canvas_item(), text_ofs, modulate); + } } } diff --git a/scene/gui/item_list.h b/scene/gui/item_list.h index 77e910870f..96735678c1 100644 --- a/scene/gui/item_list.h +++ b/scene/gui/item_list.h @@ -99,7 +99,7 @@ private: SelectMode select_mode = SELECT_SINGLE; IconMode icon_mode = ICON_MODE_LEFT; VScrollBar *scroll_bar; - TextParagraph::OverrunBehavior text_overrun_behavior = TextParagraph::OVERRUN_NO_TRIMMING; + TextParagraph::OverrunBehavior text_overrun_behavior = TextParagraph::OVERRUN_TRIM_ELLIPSIS; uint64_t search_time_msec = 0; String search_string; diff --git a/scene/gui/line_edit.cpp b/scene/gui/line_edit.cpp index 883eb1a1ba..6ad296d7c7 100644 --- a/scene/gui/line_edit.cpp +++ b/scene/gui/line_edit.cpp @@ -847,7 +847,8 @@ void LineEdit::_notification(int p_what) { // Draw carets. ofs.x = x_ofs + scroll_offset; if (draw_caret || drag_caret_force_displayed) { - const int caret_width = get_theme_constant(SNAME("caret_width")) * get_theme_default_base_scale(); + // Prevent carets from disappearing at theme scales below 1.0 (if the caret width is 1). + const int caret_width = get_theme_constant(SNAME("caret_width")) * MAX(1, get_theme_default_base_scale()); if (ime_text.length() == 0) { // Normal caret. @@ -2436,7 +2437,7 @@ void LineEdit::_ensure_menu() { } } -LineEdit::LineEdit() { +LineEdit::LineEdit(const String &p_placeholder) { text_rid = TS->create_shaped_text(); _create_undo_state(); @@ -2451,6 +2452,8 @@ LineEdit::LineEdit() { caret_blink_timer->connect("timeout", callable_mp(this, &LineEdit::_toggle_draw_caret)); set_caret_blink_enabled(false); + set_placeholder(p_placeholder); + set_editable(true); // Initialise to opposite first, so we get past the early-out in set_editable. } diff --git a/scene/gui/line_edit.h b/scene/gui/line_edit.h index 1519c09d73..444c9a1c50 100644 --- a/scene/gui/line_edit.h +++ b/scene/gui/line_edit.h @@ -332,7 +332,7 @@ public: void show_virtual_keyboard(); - LineEdit(); + LineEdit(const String &p_placeholder = String()); ~LineEdit(); }; diff --git a/scene/gui/link_button.cpp b/scene/gui/link_button.cpp index 8f40f717c2..dc4f09d22d 100644 --- a/scene/gui/link_button.cpp +++ b/scene/gui/link_button.cpp @@ -317,8 +317,10 @@ void LinkButton::_bind_methods() { ADD_PROPERTY(PropertyInfo(Variant::ARRAY, "structured_text_bidi_override_options"), "set_structured_text_bidi_override_options", "get_structured_text_bidi_override_options"); } -LinkButton::LinkButton() { +LinkButton::LinkButton(const String &p_text) { text_buf.instantiate(); set_focus_mode(FOCUS_NONE); set_default_cursor_shape(CURSOR_POINTING_HAND); + + set_text(p_text); } diff --git a/scene/gui/link_button.h b/scene/gui/link_button.h index a455e866b1..f996558f32 100644 --- a/scene/gui/link_button.h +++ b/scene/gui/link_button.h @@ -90,7 +90,7 @@ public: void set_underline_mode(UnderlineMode p_underline_mode); UnderlineMode get_underline_mode() const; - LinkButton(); + LinkButton(const String &p_text = String()); }; VARIANT_ENUM_CAST(LinkButton::UnderlineMode); diff --git a/scene/gui/menu_button.cpp b/scene/gui/menu_button.cpp index 46d8a61ca1..c04690cdb3 100644 --- a/scene/gui/menu_button.cpp +++ b/scene/gui/menu_button.cpp @@ -227,7 +227,8 @@ void MenuButton::set_disable_shortcuts(bool p_disabled) { disable_shortcuts = p_disabled; } -MenuButton::MenuButton() { +MenuButton::MenuButton(const String &p_text) : + Button(p_text) { set_flat(true); set_toggle_mode(true); set_disable_shortcuts(false); diff --git a/scene/gui/menu_button.h b/scene/gui/menu_button.h index 3647a69d33..9cfb780255 100644 --- a/scene/gui/menu_button.h +++ b/scene/gui/menu_button.h @@ -67,7 +67,7 @@ public: void set_item_count(int p_count); int get_item_count() const; - MenuButton(); + MenuButton(const String &p_text = String()); ~MenuButton(); }; diff --git a/scene/gui/option_button.cpp b/scene/gui/option_button.cpp index 698d74843c..b3804e73d9 100644 --- a/scene/gui/option_button.cpp +++ b/scene/gui/option_button.cpp @@ -412,7 +412,8 @@ void OptionButton::_bind_methods() { ADD_SIGNAL(MethodInfo("item_focused", PropertyInfo(Variant::INT, "index"))); } -OptionButton::OptionButton() { +OptionButton::OptionButton(const String &p_text) : + Button(p_text) { set_toggle_mode(true); set_text_alignment(HORIZONTAL_ALIGNMENT_LEFT); if (is_layout_rtl()) { diff --git a/scene/gui/option_button.h b/scene/gui/option_button.h index adf2bb90ef..5352fe18a6 100644 --- a/scene/gui/option_button.h +++ b/scene/gui/option_button.h @@ -94,7 +94,7 @@ public: virtual void get_translatable_strings(List<String> *p_strings) const override; - OptionButton(); + OptionButton(const String &p_text = String()); ~OptionButton(); }; diff --git a/scene/gui/popup.cpp b/scene/gui/popup.cpp index 4a5dc57e36..24b91cd16a 100644 --- a/scene/gui/popup.cpp +++ b/scene/gui/popup.cpp @@ -42,26 +42,30 @@ void Popup::_input_from_window(const Ref<InputEvent> &p_event) { } void Popup::_initialize_visible_parents() { - visible_parents.clear(); - - Window *parent_window = this; - while (parent_window) { - parent_window = parent_window->get_parent_visible_window(); - if (parent_window) { - visible_parents.push_back(parent_window); - parent_window->connect("focus_entered", callable_mp(this, &Popup::_parent_focused)); - parent_window->connect("tree_exited", callable_mp(this, &Popup::_deinitialize_visible_parents)); + if (is_embedded()) { + visible_parents.clear(); + + Window *parent_window = this; + while (parent_window) { + parent_window = parent_window->get_parent_visible_window(); + if (parent_window) { + visible_parents.push_back(parent_window); + parent_window->connect("focus_entered", callable_mp(this, &Popup::_parent_focused)); + parent_window->connect("tree_exited", callable_mp(this, &Popup::_deinitialize_visible_parents)); + } } } } void Popup::_deinitialize_visible_parents() { - for (uint32_t i = 0; i < visible_parents.size(); ++i) { - visible_parents[i]->disconnect("focus_entered", callable_mp(this, &Popup::_parent_focused)); - visible_parents[i]->disconnect("tree_exited", callable_mp(this, &Popup::_deinitialize_visible_parents)); - } + if (is_embedded()) { + for (uint32_t i = 0; i < visible_parents.size(); ++i) { + visible_parents[i]->disconnect("focus_entered", callable_mp(this, &Popup::_parent_focused)); + visible_parents[i]->disconnect("tree_exited", callable_mp(this, &Popup::_deinitialize_visible_parents)); + } - visible_parents.clear(); + visible_parents.clear(); + } } void Popup::_notification(int p_what) { @@ -94,7 +98,7 @@ void Popup::_notification(int p_what) { } void Popup::_parent_focused() { - if (popped_up && close_on_parent_focus) { + if (popped_up && get_flag(FLAG_POPUP)) { _close_pressed(); } } @@ -111,19 +115,7 @@ void Popup::set_as_minsize() { set_size(get_contents_minimum_size()); } -void Popup::set_close_on_parent_focus(bool p_close) { - close_on_parent_focus = p_close; -} - -bool Popup::get_close_on_parent_focus() { - return close_on_parent_focus; -} - void Popup::_bind_methods() { - ClassDB::bind_method(D_METHOD("set_close_on_parent_focus", "close"), &Popup::set_close_on_parent_focus); - ClassDB::bind_method(D_METHOD("get_close_on_parent_focus"), &Popup::get_close_on_parent_focus); - ADD_PROPERTY(PropertyInfo(Variant::BOOL, "close_on_parent_focus"), "set_close_on_parent_focus", "get_close_on_parent_focus"); - ADD_SIGNAL(MethodInfo("popup_hide")); } @@ -184,6 +176,7 @@ Popup::Popup() { set_transient(true); set_flag(FLAG_BORDERLESS, true); set_flag(FLAG_RESIZE_DISABLED, true); + set_flag(FLAG_POPUP, true); connect("window_input", callable_mp(this, &Popup::_input_from_window)); } diff --git a/scene/gui/popup.h b/scene/gui/popup.h index 5678043b23..a3c56c9ff1 100644 --- a/scene/gui/popup.h +++ b/scene/gui/popup.h @@ -42,7 +42,6 @@ class Popup : public Window { LocalVector<Window *> visible_parents; bool popped_up = false; - bool close_on_parent_focus = true; void _input_from_window(const Ref<InputEvent> &p_event); @@ -61,9 +60,6 @@ protected: public: void set_as_minsize(); - void set_close_on_parent_focus(bool p_close); - bool get_close_on_parent_focus(); - Popup(); ~Popup(); }; diff --git a/scene/gui/popup_menu.cpp b/scene/gui/popup_menu.cpp index deca1451ee..af2edfa090 100644 --- a/scene/gui/popup_menu.cpp +++ b/scene/gui/popup_menu.cpp @@ -205,7 +205,6 @@ void PopupMenu::_activate_submenu(int p_over) { submenu_pos.x = this_pos.x - submenu_size.width; } - submenu_popup->set_close_on_parent_focus(false); submenu_popup->set_position(submenu_pos); PopupMenu *submenu_pum = Object::cast_to<PopupMenu>(submenu_popup); @@ -223,6 +222,11 @@ void PopupMenu::_activate_submenu(int p_over) { // Set autohide areas. + Rect2 safe_area = this_rect; + safe_area.position.y += items[p_over]._ofs_cache + scroll_offset + style->get_offset().height - vsep / 2; + safe_area.size.y = items[p_over]._height_cache; + DisplayServer::get_singleton()->window_set_popup_safe_rect(submenu_popup->get_window_id(), safe_area); + // Make the position of the parent popup relative to submenu popup. this_rect.position = this_rect.position - submenu_pum->get_position(); diff --git a/scene/gui/rich_text_label.cpp b/scene/gui/rich_text_label.cpp index dd07831b83..1c9eb14a24 100644 --- a/scene/gui/rich_text_label.cpp +++ b/scene/gui/rich_text_label.cpp @@ -4712,7 +4712,7 @@ Dictionary RichTextLabel::parse_expressions_for_values(Vector<String> p_expressi return d; } -RichTextLabel::RichTextLabel() { +RichTextLabel::RichTextLabel(const String &p_text) { main = memnew(ItemFrame); main->index = 0; current = main; @@ -4734,6 +4734,8 @@ RichTextLabel::RichTextLabel() { vscroll->set_step(1); vscroll->hide(); + set_text(p_text); + set_clip_contents(true); } diff --git a/scene/gui/rich_text_label.h b/scene/gui/rich_text_label.h index 53c2046c8f..076b68a0da 100644 --- a/scene/gui/rich_text_label.h +++ b/scene/gui/rich_text_label.h @@ -620,7 +620,7 @@ public: void set_fixed_size_to_width(int p_width); virtual Size2 get_minimum_size() const override; - RichTextLabel(); + RichTextLabel(const String &p_text = String()); ~RichTextLabel(); }; diff --git a/scene/gui/tab_bar.cpp b/scene/gui/tab_bar.cpp index ce60da762f..0e088a44e5 100644 --- a/scene/gui/tab_bar.cpp +++ b/scene/gui/tab_bar.cpp @@ -552,10 +552,6 @@ void TabBar::set_current_tab(int p_current) { emit_signal(SNAME("tab_selected"), current); return; } - // Triggered by dragging a tab from another TabBar to the selected index, to ensure that tab_changed is emitted. - if (previous == -1) { - previous = current; - } emit_signal(SNAME("tab_selected"), current); @@ -954,9 +950,17 @@ void TabBar::add_tab(const String &p_str, const Ref<Texture2D> &p_icon) { } update(); update_minimum_size(); + + if (tabs.size() == 1 && is_inside_tree()) { + emit_signal(SNAME("tab_changed"), 0); + } } void TabBar::clear_tabs() { + if (tabs.is_empty()) { + return; + } + tabs.clear(); offset = 0; max_drawn_tab = 0; @@ -971,14 +975,16 @@ void TabBar::clear_tabs() { void TabBar::remove_tab(int p_idx) { ERR_FAIL_INDEX(p_idx, tabs.size()); tabs.remove_at(p_idx); - if (current >= p_idx) { + + bool is_tab_changing = current == p_idx && !tabs.is_empty(); + + if (current >= p_idx && current > 0) { current--; } - if (current < 0) { + if (tabs.is_empty()) { offset = 0; max_drawn_tab = 0; - current = 0; previous = 0; } else { offset = MIN(offset, tabs.size() - 1); @@ -986,7 +992,7 @@ void TabBar::remove_tab(int p_idx) { _update_cache(); _ensure_no_over_offset(); - if (scroll_to_selected && !tabs.is_empty()) { + if (scroll_to_selected) { ensure_tab_visible(current); } } @@ -994,15 +1000,18 @@ void TabBar::remove_tab(int p_idx) { update(); update_minimum_size(); notify_property_list_changed(); + + if (is_tab_changing && is_inside_tree()) { + emit_signal(SNAME("tab_changed"), current); + } } Variant TabBar::get_drag_data(const Point2 &p_point) { if (!drag_to_rearrange_enabled) { - return Variant(); + return Control::get_drag_data(p_point); // Allow stuff like TabContainer to override it. } int tab_over = get_tab_idx_at_point(p_point); - if (tab_over < 0) { return Variant(); } @@ -1025,12 +1034,13 @@ Variant TabBar::get_drag_data(const Point2 &p_point) { drag_data["type"] = "tab_element"; drag_data["tab_element"] = tab_over; drag_data["from_path"] = get_path(); + return drag_data; } bool TabBar::can_drop_data(const Point2 &p_point, const Variant &p_data) const { if (!drag_to_rearrange_enabled) { - return false; + return Control::can_drop_data(p_point, p_data); // Allow stuff like TabContainer to override it. } Dictionary d = p_data; @@ -1052,16 +1062,16 @@ bool TabBar::can_drop_data(const Point2 &p_point, const Variant &p_data) const { } } } + return false; } void TabBar::drop_data(const Point2 &p_point, const Variant &p_data) { if (!drag_to_rearrange_enabled) { + Control::drop_data(p_point, p_data); // Allow stuff like TabContainer to override it. return; } - int hover_now = get_tab_idx_at_point(p_point); - Dictionary d = p_data; if (!d.has("type")) { return; @@ -1069,6 +1079,7 @@ void TabBar::drop_data(const Point2 &p_point, const Variant &p_data) { if (String(d["type"]) == "tab_element") { int tab_from_id = d["tab_element"]; + int hover_now = get_tab_idx_at_point(p_point); NodePath from_path = d["from_path"]; NodePath to_path = get_path(); @@ -1096,15 +1107,25 @@ void TabBar::drop_data(const Point2 &p_point, const Variant &p_data) { hover_now = get_tab_count(); } - // Workaround to ensure that tab_changed is emitted. - if (current == hover_now) { - current = -1; + from_tabs->remove_tab(tab_from_id); + tabs.insert(hover_now, moving_tab); + + if (tabs.size() > 1) { + if (current >= hover_now) { + current++; + } + if (previous >= hover_now) { + previous++; + } } - tabs.insert(hover_now, moving_tab); - from_tabs->remove_tab(tab_from_id); set_current_tab(hover_now); update_minimum_size(); + + if (tabs.size() == 1) { + emit_signal(SNAME("tab_selected"), 0); + emit_signal(SNAME("tab_changed"), 0); + } } } } @@ -1157,17 +1178,33 @@ bool TabBar::get_clip_tabs() const { return clip_tabs; } -void TabBar::move_tab(int from, int to) { - if (from == to) { +void TabBar::move_tab(int p_from, int p_to) { + if (p_from == p_to) { return; } - ERR_FAIL_INDEX(from, tabs.size()); - ERR_FAIL_INDEX(to, tabs.size()); + ERR_FAIL_INDEX(p_from, tabs.size()); + ERR_FAIL_INDEX(p_to, tabs.size()); + + Tab tab_from = tabs[p_from]; + tabs.remove_at(p_from); + tabs.insert(p_to, tab_from); - Tab tab_from = tabs[from]; - tabs.remove_at(from); - tabs.insert(to, tab_from); + if (current == p_from) { + current = p_to; + } else if (current > p_from && current <= p_to) { + current--; + } else if (current < p_from && current >= p_to) { + current++; + } + + if (previous == p_from) { + previous = p_to; + } else if (previous > p_from && previous >= p_to) { + previous--; + } else if (previous < p_from && previous <= p_to) { + previous++; + } _update_cache(); _ensure_no_over_offset(); @@ -1466,6 +1503,7 @@ void TabBar::_bind_methods() { ClassDB::bind_method(D_METHOD("is_tab_hidden", "tab_idx"), &TabBar::is_tab_hidden); ClassDB::bind_method(D_METHOD("remove_tab", "tab_idx"), &TabBar::remove_tab); ClassDB::bind_method(D_METHOD("add_tab", "title", "icon"), &TabBar::add_tab, DEFVAL(""), DEFVAL(Ref<Texture2D>())); + ClassDB::bind_method(D_METHOD("get_tab_idx_at_point", "point"), &TabBar::get_tab_idx_at_point); ClassDB::bind_method(D_METHOD("set_tab_alignment", "alignment"), &TabBar::set_tab_alignment); ClassDB::bind_method(D_METHOD("get_tab_alignment"), &TabBar::get_tab_alignment); ClassDB::bind_method(D_METHOD("set_clip_tabs", "clip_tabs"), &TabBar::set_clip_tabs); diff --git a/scene/gui/tab_bar.h b/scene/gui/tab_bar.h index b428538570..82ae8bce3f 100644 --- a/scene/gui/tab_bar.h +++ b/scene/gui/tab_bar.h @@ -126,7 +126,6 @@ protected: Variant get_drag_data(const Point2 &p_point) override; bool can_drop_data(const Point2 &p_point, const Variant &p_data) const override; void drop_data(const Point2 &p_point, const Variant &p_data) override; - int get_tab_idx_at_point(const Point2 &p_point) const; public: void add_tab(const String &p_str = "", const Ref<Texture2D> &p_icon = Ref<Texture2D>()); @@ -156,13 +155,15 @@ public: void set_tab_button_icon(int p_tab, const Ref<Texture2D> &p_icon); Ref<Texture2D> get_tab_button_icon(int p_tab) const; + int get_tab_idx_at_point(const Point2 &p_point) const; + void set_tab_alignment(AlignmentMode p_alignment); AlignmentMode get_tab_alignment() const; void set_clip_tabs(bool p_clip_tabs); bool get_clip_tabs() const; - void move_tab(int from, int to); + void move_tab(int p_from, int p_to); void set_tab_close_display_policy(CloseButtonDisplayPolicy p_policy); CloseButtonDisplayPolicy get_tab_close_display_policy() const; diff --git a/scene/gui/tab_container.cpp b/scene/gui/tab_container.cpp index 31a5e41086..9953637e4f 100644 --- a/scene/gui/tab_container.cpp +++ b/scene/gui/tab_container.cpp @@ -30,44 +30,17 @@ #include "tab_container.h" -#include "core/object/message_queue.h" -#include "core/string/translation.h" #include "scene/gui/box_container.h" #include "scene/gui/label.h" #include "scene/gui/texture_rect.h" int TabContainer::_get_top_margin() const { - if (!tabs_visible) { - return 0; + int height = 0; + if (tabs_visible && get_tab_count() > 0) { + height = tab_bar->get_minimum_size().height; } - // Respect the minimum tab height. - Ref<StyleBox> tab_unselected = get_theme_stylebox(SNAME("tab_unselected")); - Ref<StyleBox> tab_selected = get_theme_stylebox(SNAME("tab_selected")); - Ref<StyleBox> tab_disabled = get_theme_stylebox(SNAME("tab_disabled")); - - int tab_height = MAX(MAX(tab_unselected->get_minimum_size().height, tab_selected->get_minimum_size().height), tab_disabled->get_minimum_size().height); - - // Font height or higher icon wins. - int content_height = 0; - - Vector<Control *> tabs = _get_tabs(); - for (int i = 0; i < tabs.size(); i++) { - content_height = MAX(content_height, text_buf[i]->get_size().y); - - Control *c = tabs[i]; - if (!c->has_meta("_tab_icon")) { - continue; - } - - Ref<Texture2D> tex = c->get_meta("_tab_icon"); - if (!tex.is_valid()) { - continue; - } - content_height = MAX(content_height, tex->get_size().height); - } - - return tab_height + content_height; + return height; } void TabContainer::gui_input(const Ref<InputEvent> &p_event) { @@ -113,77 +86,6 @@ void TabContainer::gui_input(const Ref<InputEvent> &p_event) { return; } } - - // Do not activate tabs when tabs is empty. - if (get_tab_count() == 0) { - return; - } - - Vector<Control *> tabs = _get_tabs(); - - // Handle navigation buttons. - if (buttons_visible_cache) { - int popup_ofs = 0; - if (popup) { - popup_ofs = menu->get_width(); - } - - Ref<Texture2D> increment = get_theme_icon(SNAME("increment")); - Ref<Texture2D> decrement = get_theme_icon(SNAME("decrement")); - if (is_layout_rtl()) { - if (pos.x < popup_ofs + decrement->get_width()) { - if (last_tab_cache < tabs.size() - 1) { - first_tab_cache += 1; - update(); - } - return; - } else if (pos.x < popup_ofs + increment->get_width() + decrement->get_width()) { - if (first_tab_cache > 0) { - first_tab_cache -= 1; - update(); - } - return; - } - } else { - if (pos.x > size.width - increment->get_width() - popup_ofs && pos.x) { - if (last_tab_cache < tabs.size() - 1) { - first_tab_cache += 1; - update(); - } - return; - } else if (pos.x > size.width - increment->get_width() - decrement->get_width() - popup_ofs) { - if (first_tab_cache > 0) { - first_tab_cache -= 1; - update(); - } - return; - } - } - } - - // Activate the clicked tab. - if (is_layout_rtl()) { - pos.x = size.width - pos.x; - } - - if (pos.x < tabs_ofs_cache) { - return; - } - - pos.x -= tabs_ofs_cache; - for (int i = first_tab_cache; i <= last_tab_cache; i++) { - if (get_tab_hidden(i)) { - continue; - } - int tab_width = _get_tab_width(i); - if (pos.x < tab_width) { - if (!get_tab_disabled(i)) { - set_current_tab(i); - } - break; - } - pos.x -= tab_width; - } } Ref<InputEventMouseMotion> mm = p_event; @@ -194,9 +96,8 @@ void TabContainer::gui_input(const Ref<InputEvent> &p_event) { // Mouse must be on tabs in the tab header area. if (pos.y > _get_top_margin()) { - if (menu_hovered || highlight_arrow > -1) { + if (menu_hovered) { menu_hovered = false; - highlight_arrow = -1; update(); } return; @@ -208,7 +109,6 @@ void TabContainer::gui_input(const Ref<InputEvent> &p_event) { if (pos.x <= menu->get_width()) { if (!menu_hovered) { menu_hovered = true; - highlight_arrow = -1; update(); return; } @@ -220,7 +120,6 @@ void TabContainer::gui_input(const Ref<InputEvent> &p_event) { if (pos.x >= size.width - menu->get_width()) { if (!menu_hovered) { menu_hovered = true; - highlight_arrow = -1; update(); return; } @@ -234,102 +133,19 @@ void TabContainer::gui_input(const Ref<InputEvent> &p_event) { return; } } - - // Do not activate tabs when tabs is empty. - if ((get_tab_count() == 0 || !buttons_visible_cache) && menu_hovered) { - highlight_arrow = -1; - update(); - return; - } - - int popup_ofs = 0; - if (popup) { - popup_ofs = menu->get_width(); - } - - Ref<Texture2D> increment = get_theme_icon(SNAME("increment")); - Ref<Texture2D> decrement = get_theme_icon(SNAME("decrement")); - - if (is_layout_rtl()) { - if (pos.x <= popup_ofs + decrement->get_width()) { - if (highlight_arrow != 1) { - highlight_arrow = 1; - update(); - } - } else if (pos.x <= popup_ofs + increment->get_width() + decrement->get_width()) { - if (highlight_arrow != 0) { - highlight_arrow = 0; - update(); - } - } else if (highlight_arrow > -1) { - highlight_arrow = -1; - update(); - } - } else { - if (pos.x >= size.width - increment->get_width() - popup_ofs) { - if (highlight_arrow != 1) { - highlight_arrow = 1; - update(); - } - } else if (pos.x >= size.width - increment->get_width() - decrement->get_width() - popup_ofs) { - if (highlight_arrow != 0) { - highlight_arrow = 0; - update(); - } - } else if (highlight_arrow > -1) { - highlight_arrow = -1; - update(); - } - } } } void TabContainer::_notification(int p_what) { switch (p_what) { + case NOTIFICATION_READY: case NOTIFICATION_RESIZED: { - Vector<Control *> tabs = _get_tabs(); - int side_margin = get_theme_constant(SNAME("side_margin")); - Ref<Texture2D> menu = get_theme_icon(SNAME("menu")); - Ref<Texture2D> increment = get_theme_icon(SNAME("increment")); - Ref<Texture2D> decrement = get_theme_icon(SNAME("decrement")); - int header_width = get_size().width - side_margin * 2; - - // Find the width of the header area. - Popup *popup = get_popup(); - if (popup) { - header_width -= menu->get_width(); - } - if (buttons_visible_cache) { - header_width -= increment->get_width() + decrement->get_width(); - } - if (popup || buttons_visible_cache) { - header_width += side_margin; - } - - // Find the width of all tabs after first_tab_cache. - int all_tabs_width = 0; - for (int i = first_tab_cache; i < tabs.size(); i++) { - int tab_width = _get_tab_width(i); - all_tabs_width += tab_width; - } - - // Check if tabs before first_tab_cache would fit into the header area. - for (int i = first_tab_cache - 1; i >= 0; i--) { - int tab_width = _get_tab_width(i); - - if (all_tabs_width + tab_width > header_width) { - break; - } - - all_tabs_width += tab_width; - first_tab_cache--; - } + _update_margins(); } break; case NOTIFICATION_DRAW: { RID canvas = get_canvas_item(); Size2 size = get_size(); - bool rtl = is_layout_rtl(); // Draw only the tab area if the header is hidden. Ref<StyleBox> panel = get_theme_stylebox(SNAME("panel")); @@ -338,481 +154,171 @@ void TabContainer::_notification(int p_what) { return; } - Vector<Control *> tabs = _get_tabs(); - Ref<StyleBox> tab_unselected = get_theme_stylebox(SNAME("tab_unselected")); - Ref<StyleBox> tab_selected = get_theme_stylebox(SNAME("tab_selected")); - Ref<StyleBox> tab_disabled = get_theme_stylebox(SNAME("tab_disabled")); - Ref<Texture2D> increment = get_theme_icon(SNAME("increment")); - Ref<Texture2D> increment_hl = get_theme_icon(SNAME("increment_highlight")); - Ref<Texture2D> decrement = get_theme_icon(SNAME("decrement")); - Ref<Texture2D> decrement_hl = get_theme_icon(SNAME("decrement_highlight")); - Ref<Texture2D> menu = get_theme_icon(SNAME("menu")); - Ref<Texture2D> menu_hl = get_theme_icon(SNAME("menu_highlight")); - Color font_selected_color = get_theme_color(SNAME("font_selected_color")); - Color font_unselected_color = get_theme_color(SNAME("font_unselected_color")); - Color font_disabled_color = get_theme_color(SNAME("font_disabled_color")); - int side_margin = get_theme_constant(SNAME("side_margin")); - - // Find out start and width of the header area. - int header_x = side_margin; - int header_width = size.width - side_margin * 2; int header_height = _get_top_margin(); - Popup *popup = get_popup(); - if (popup) { - header_width -= menu->get_width(); - } - - // Check if all tabs would fit into the header area. - int all_tabs_width = 0; - for (int i = 0; i < tabs.size(); i++) { - if (get_tab_hidden(i)) { - continue; - } - int tab_width = _get_tab_width(i); - all_tabs_width += tab_width; - - if (all_tabs_width > header_width) { - // Not all tabs are visible at the same time - reserve space for navigation buttons. - buttons_visible_cache = true; - header_width -= decrement->get_width() + increment->get_width(); - break; - } else { - buttons_visible_cache = false; - } - } - // With buttons, a right side margin does not need to be respected. - if (popup || buttons_visible_cache) { - header_width += side_margin; - } - - if (!buttons_visible_cache) { - first_tab_cache = 0; - } - - // Go through the visible tabs to find the width they occupy. - all_tabs_width = 0; - Vector<int> tab_widths; - for (int i = first_tab_cache; i < tabs.size(); i++) { - if (get_tab_hidden(i)) { - tab_widths.push_back(0); - continue; - } - int tab_width = _get_tab_width(i); - if (all_tabs_width + tab_width > header_width && tab_widths.size() > 0) { - break; - } - all_tabs_width += tab_width; - tab_widths.push_back(tab_width); - } - - // Find the offset at which to draw tabs, according to the alignment. - switch (alignment) { - case ALIGNMENT_LEFT: - tabs_ofs_cache = header_x; - break; - case ALIGNMENT_CENTER: - tabs_ofs_cache = header_x + (header_width / 2) - (all_tabs_width / 2); - break; - case ALIGNMENT_RIGHT: - tabs_ofs_cache = header_x + header_width - all_tabs_width; - break; - } - - if (all_tabs_in_front) { - // Draw the tab area. - panel->draw(canvas, Rect2(0, header_height, size.width, size.height - header_height)); - } - - // Draw unselected tabs in back - int x = 0; - int x_current = 0; - int index = 0; - for (int i = 0; i < tab_widths.size(); i++) { - index = i + first_tab_cache; - if (get_tab_hidden(index)) { - continue; - } - - int tab_width = tab_widths[i]; - if (index == current) { - x_current = x; - } else if (get_tab_disabled(index)) { - if (rtl) { - _draw_tab(tab_disabled, font_disabled_color, index, size.width - (tabs_ofs_cache + x) - tab_width); - } else { - _draw_tab(tab_disabled, font_disabled_color, index, tabs_ofs_cache + x); - } - } else { - if (rtl) { - _draw_tab(tab_unselected, font_unselected_color, index, size.width - (tabs_ofs_cache + x) - tab_width); - } else { - _draw_tab(tab_unselected, font_unselected_color, index, tabs_ofs_cache + x); - } - } - x += tab_width; - last_tab_cache = index; - } + panel->draw(canvas, Rect2(0, header_height, size.width, size.height - header_height)); - if (!all_tabs_in_front) { - // Draw the tab area. - panel->draw(canvas, Rect2(0, header_height, size.width, size.height - header_height)); - } + // Draw the popup menu. + if (get_popup()) { + Ref<Texture2D> menu = get_theme_icon(SNAME("menu")); + Ref<Texture2D> menu_hl = get_theme_icon(SNAME("menu_highlight")); - // Draw selected tab in front. Only draw selected tab when it's in visible range. - if (tabs.size() > 0 && current - first_tab_cache < tab_widths.size() && current >= first_tab_cache) { - Ref<StyleBox> current_style_box = get_tab_disabled(current) ? tab_disabled : tab_selected; - if (rtl) { - _draw_tab(current_style_box, font_selected_color, current, size.width - (tabs_ofs_cache + x_current) - tab_widths[current]); - } else { - _draw_tab(current_style_box, font_selected_color, current, tabs_ofs_cache + x_current); - } - } + int x = is_layout_rtl() ? 0 : get_size().width - menu->get_width(); - // Draw the popup menu. - if (rtl) { - x = 0; - } else { - x = get_size().width; - } - if (popup) { - if (!rtl) { - x -= menu->get_width(); - } if (menu_hovered) { menu_hl->draw(get_canvas_item(), Size2(x, (header_height - menu_hl->get_height()) / 2)); } else { menu->draw(get_canvas_item(), Size2(x, (header_height - menu->get_height()) / 2)); } - if (rtl) { - x += menu->get_width(); - } - } - - // Draw the navigation buttons. - if (buttons_visible_cache) { - if (rtl) { - if (last_tab_cache < tabs.size() - 1) { - draw_texture(highlight_arrow == 1 ? decrement_hl : decrement, Point2(x, (header_height - increment->get_height()) / 2)); - } else { - draw_texture(decrement, Point2(x, (header_height - increment->get_height()) / 2), Color(1, 1, 1, 0.5)); - } - x += increment->get_width(); - - if (first_tab_cache > 0) { - draw_texture(highlight_arrow == 0 ? increment_hl : increment, Point2(x, (header_height - decrement->get_height()) / 2)); - } else { - draw_texture(increment, Point2(x, (header_height - decrement->get_height()) / 2), Color(1, 1, 1, 0.5)); - } - x += decrement->get_width(); - } else { - x -= increment->get_width(); - if (last_tab_cache < tabs.size() - 1) { - draw_texture(highlight_arrow == 1 ? increment_hl : increment, Point2(x, (header_height - increment->get_height()) / 2)); - } else { - draw_texture(increment, Point2(x, (header_height - increment->get_height()) / 2), Color(1, 1, 1, 0.5)); - } - - x -= decrement->get_width(); - if (first_tab_cache > 0) { - draw_texture(highlight_arrow == 0 ? decrement_hl : decrement, Point2(x, (header_height - decrement->get_height()) / 2)); - } else { - draw_texture(decrement, Point2(x, (header_height - decrement->get_height()) / 2), Color(1, 1, 1, 0.5)); - } - } } } break; case NOTIFICATION_TRANSLATION_CHANGED: case NOTIFICATION_LAYOUT_DIRECTION_CHANGED: case NOTIFICATION_THEME_CHANGED: { - Vector<Control *> tabs = _get_tabs(); - for (int i = 0; i < tabs.size(); i++) { - text_buf.write[i]->clear(); - } - _theme_changing = true; + theme_changing = true; call_deferred(SNAME("_on_theme_changed")); // Wait until all changed theme. } break; } } -void TabContainer::_draw_tab(Ref<StyleBox> &p_tab_style, Color &p_font_color, int p_index, float p_x) { - Control *control = get_tab_control(p_index); - RID canvas = get_canvas_item(); - Color font_outline_color = get_theme_color(SNAME("font_outline_color")); - int outline_size = get_theme_constant(SNAME("outline_size")); - int icon_text_distance = get_theme_constant(SNAME("icon_separation")); - int tab_width = _get_tab_width(p_index); - int header_height = _get_top_margin(); - - // Draw the tab background. - Rect2 tab_rect(p_x, 0, tab_width, header_height); - p_tab_style->draw(canvas, tab_rect); - - // Draw the tab contents. - String text = control->has_meta("_tab_name") ? String(atr(String(control->get_meta("_tab_name")))) : String(atr(control->get_name())); - - int x_content = tab_rect.position.x + p_tab_style->get_margin(SIDE_LEFT); - int top_margin = p_tab_style->get_margin(SIDE_TOP); - int y_center = top_margin + (tab_rect.size.y - p_tab_style->get_minimum_size().y) / 2; - - // Draw the tab icon. - if (control->has_meta("_tab_icon")) { - Ref<Texture2D> icon = control->get_meta("_tab_icon"); - if (icon.is_valid()) { - int y = y_center - (icon->get_height() / 2); - icon->draw(canvas, Point2i(x_content, y)); - if (!text.is_empty()) { - x_content += icon->get_width() + icon_text_distance; - } - } - } - - // Draw the tab text. - Point2i text_pos(x_content, y_center - text_buf[p_index]->get_size().y / 2); - if (outline_size > 0 && font_outline_color.a > 0) { - text_buf[p_index]->draw_outline(canvas, text_pos, outline_size, font_outline_color); - } - text_buf[p_index]->draw(canvas, text_pos, p_font_color); -} - -void TabContainer::_refresh_texts() { - text_buf.clear(); - Vector<Control *> tabs = _get_tabs(); - bool rtl = is_layout_rtl(); - Ref<Font> font = get_theme_font(SNAME("font")); - int font_size = get_theme_font_size(SNAME("font_size")); - for (int i = 0; i < tabs.size(); i++) { - Control *control = Object::cast_to<Control>(tabs[i]); - String text = control->has_meta("_tab_name") ? String(atr(String(control->get_meta("_tab_name")))) : String(atr(control->get_name())); - - Ref<TextLine> name; - name.instantiate(); - name->set_direction(rtl ? TextServer::DIRECTION_RTL : TextServer::DIRECTION_LTR); - name->add_string(text, font, font_size, Dictionary(), TranslationServer::get_singleton()->get_tool_locale()); - text_buf.push_back(name); - } -} - void TabContainer::_on_theme_changed() { - if (!_theme_changing) { + if (!theme_changing) { return; } - _refresh_texts(); - - update_minimum_size(); + tab_bar->add_theme_style_override(SNAME("tab_unselected"), get_theme_stylebox(SNAME("tab_unselected"))); + tab_bar->add_theme_style_override(SNAME("tab_selected"), get_theme_stylebox(SNAME("tab_selected"))); + tab_bar->add_theme_style_override(SNAME("tab_disabled"), get_theme_stylebox(SNAME("tab_disabled"))); + tab_bar->add_theme_icon_override(SNAME("increment"), get_theme_icon(SNAME("increment"))); + tab_bar->add_theme_icon_override(SNAME("increment_highlight"), get_theme_icon(SNAME("increment_highlight"))); + tab_bar->add_theme_icon_override(SNAME("decrement"), get_theme_icon(SNAME("decrement"))); + tab_bar->add_theme_icon_override(SNAME("decrement_highlight"), get_theme_icon(SNAME("decrement_highlight"))); + tab_bar->add_theme_color_override(SNAME("font_selected_color"), get_theme_color(SNAME("font_selected_color"))); + tab_bar->add_theme_color_override(SNAME("font_unselected_color"), get_theme_color(SNAME("font_unselected_color"))); + tab_bar->add_theme_color_override(SNAME("font_disabled_color"), get_theme_color(SNAME("font_disabled_color"))); + tab_bar->add_theme_color_override(SNAME("font_outline_color"), get_theme_color(SNAME("font_outline_color"))); + tab_bar->add_theme_font_override(SNAME("font"), get_theme_font(SNAME("font"))); + tab_bar->add_theme_constant_override(SNAME("font_size"), get_theme_constant(SNAME("font_size"))); + tab_bar->add_theme_constant_override(SNAME("icon_separation"), get_theme_constant(SNAME("icon_separation"))); + tab_bar->add_theme_constant_override(SNAME("outline_size"), get_theme_constant(SNAME("outline_size"))); + + _update_margins(); if (get_tab_count() > 0) { _repaint(); - update(); + } else { + update_minimum_size(); } - _theme_changing = false; + update(); + + theme_changing = false; } void TabContainer::_repaint() { Ref<StyleBox> sb = get_theme_stylebox(SNAME("panel")); - Vector<Control *> tabs = _get_tabs(); - for (int i = 0; i < tabs.size(); i++) { - Control *c = tabs[i]; + Vector<Control *> controls = _get_tab_controls(); + int current = get_current_tab(); + + for (int i = 0; i < controls.size(); i++) { + Control *c = controls[i]; + if (i == current) { c->show(); c->set_anchors_and_offsets_preset(Control::PRESET_WIDE); + if (tabs_visible) { c->set_offset(SIDE_TOP, _get_top_margin()); } + c->set_offset(SIDE_TOP, c->get_offset(SIDE_TOP) + sb->get_margin(SIDE_TOP)); c->set_offset(SIDE_LEFT, c->get_offset(SIDE_LEFT) + sb->get_margin(SIDE_LEFT)); c->set_offset(SIDE_RIGHT, c->get_offset(SIDE_RIGHT) - sb->get_margin(SIDE_RIGHT)); c->set_offset(SIDE_BOTTOM, c->get_offset(SIDE_BOTTOM) - sb->get_margin(SIDE_BOTTOM)); - } else { c->hide(); } } -} - -void TabContainer::_on_mouse_exited() { - if (menu_hovered || highlight_arrow > -1) { - menu_hovered = false; - highlight_arrow = -1; - update(); - } -} - -int TabContainer::_get_tab_width(int p_index) const { - ERR_FAIL_INDEX_V(p_index, get_tab_count(), 0); - Control *control = get_tab_control(p_index); - if (!control || get_tab_hidden(p_index)) { - return 0; - } - - // Get the width of the text displayed on the tab. - Ref<Font> font = get_theme_font(SNAME("font")); - int font_size = get_theme_font_size(SNAME("font_size")); - String text = control->has_meta("_tab_name") ? String(atr(String(control->get_meta("_tab_name")))) : String(atr(control->get_name())); - int width = font->get_string_size(text, font_size).width; - - // Add space for a tab icon. - if (control->has_meta("_tab_icon")) { - Ref<Texture2D> icon = control->get_meta("_tab_icon"); - if (icon.is_valid()) { - width += icon->get_width(); - if (!text.is_empty()) { - width += get_theme_constant(SNAME("icon_separation")); - } - } - } - - // Respect a minimum size. - Ref<StyleBox> tab_unselected = get_theme_stylebox(SNAME("tab_unselected")); - Ref<StyleBox> tab_selected = get_theme_stylebox(SNAME("tab_selected")); - Ref<StyleBox> tab_disabled = get_theme_stylebox(SNAME("tab_disabled")); - if (get_tab_disabled(p_index)) { - width += tab_disabled->get_minimum_size().width; - } else if (p_index == current) { - width += tab_selected->get_minimum_size().width; - } else { - width += tab_unselected->get_minimum_size().width; - } - return width; + update_minimum_size(); } -Vector<Control *> TabContainer::_get_tabs() const { - Vector<Control *> controls; - for (int i = 0; i < get_child_count(); i++) { - Control *control = Object::cast_to<Control>(get_child(i)); - if (!control || control->is_set_as_top_level()) { - continue; - } - - controls.push_back(control); - } - return controls; -} +void TabContainer::_update_margins() { + int menu_width = get_theme_icon(SNAME("menu"))->get_width(); + int side_margin = get_theme_constant(SNAME("side_margin")); -void TabContainer::_child_renamed_callback() { - _refresh_texts(); - update(); -} + // Directly check for validity, to avoid errors when quitting. + bool has_popup = popup_obj_id.is_valid(); -void TabContainer::add_child_notify(Node *p_child) { - Container::add_child_notify(p_child); + if (get_tab_count() == 0) { + tab_bar->set_offset(SIDE_LEFT, 0); + tab_bar->set_offset(SIDE_RIGHT, has_popup ? -menu_width : 0); - Control *c = Object::cast_to<Control>(p_child); - if (!c || c->is_set_as_top_level()) { return; } - _refresh_texts(); - call_deferred(SNAME("_repaint")); - update(); - - bool first = (_get_tabs().size() == 1); - if (first) { - current = 0; - previous = 0; - } - - p_child->connect("renamed", callable_mp(this, &TabContainer::_child_renamed_callback)); - if (first && is_inside_tree()) { - emit_signal(SNAME("tab_changed"), current); - } -} - -void TabContainer::move_child_notify(Node *p_child) { - Container::move_child_notify(p_child); + switch (get_tab_alignment()) { + case TabBar::ALIGNMENT_LEFT: { + tab_bar->set_offset(SIDE_LEFT, side_margin); + tab_bar->set_offset(SIDE_RIGHT, has_popup ? -menu_width : 0); + } break; - Control *c = Object::cast_to<Control>(p_child); - if (!c || c->is_set_as_top_level()) { - return; - } + case TabBar::ALIGNMENT_CENTER: { + tab_bar->set_offset(SIDE_LEFT, 0); + tab_bar->set_offset(SIDE_RIGHT, has_popup ? -menu_width : 0); + } break; - _update_current_tab(); - update(); -} + case TabBar::ALIGNMENT_RIGHT: { + tab_bar->set_offset(SIDE_LEFT, 0); -int TabContainer::get_tab_count() const { - return _get_tabs().size(); -} - -void TabContainer::set_current_tab(int p_current) { - ERR_FAIL_INDEX(p_current, get_tab_count()); + if (has_popup) { + tab_bar->set_offset(SIDE_RIGHT, -menu_width); + return; + } - int pending_previous = current; - current = p_current; + int first_tab_pos = tab_bar->get_tab_rect(0).position.x; + Rect2 last_tab_rect = tab_bar->get_tab_rect(get_tab_count() - 1); + int total_tabs_width = last_tab_rect.position.x - first_tab_pos + last_tab_rect.size.width; - _repaint(); + // Calculate if all the tabs would still fit if the margin was present. + if (get_clip_tabs() && (tab_bar->get_offset_buttons_visible() || (get_tab_count() > 1 && (total_tabs_width + side_margin) > get_size().width))) { + tab_bar->set_offset(SIDE_RIGHT, has_popup ? -menu_width : 0); + } else { + tab_bar->set_offset(SIDE_RIGHT, -side_margin); + } + } break; - if (pending_previous == current) { - emit_signal(SNAME("tab_selected"), current); - } else { - previous = pending_previous; - emit_signal(SNAME("tab_selected"), current); - emit_signal(SNAME("tab_changed"), current); + case TabBar::ALIGNMENT_MAX: + break; // Can't happen, but silences warning. } - - update(); -} - -int TabContainer::get_current_tab() const { - return current; } -int TabContainer::get_previous_tab() const { - return previous; -} - -Control *TabContainer::get_tab_control(int p_idx) const { - Vector<Control *> tabs = _get_tabs(); - if (p_idx >= 0 && p_idx < tabs.size()) { - return tabs[p_idx]; - } else { - return nullptr; +void TabContainer::_on_mouse_exited() { + if (menu_hovered) { + menu_hovered = false; + update(); } } -Control *TabContainer::get_current_tab_control() const { - return get_tab_control(current); -} - -void TabContainer::remove_child_notify(Node *p_child) { - Container::remove_child_notify(p_child); +Vector<Control *> TabContainer::_get_tab_controls() const { + Vector<Control *> controls; + for (int i = 0; i < get_child_count(); i++) { + Control *control = Object::cast_to<Control>(get_child(i)); + if (!control || control->is_set_as_top_level() || control == tab_bar) { + continue; + } - Control *c = Object::cast_to<Control>(p_child); - if (!c || c->is_set_as_top_level()) { - return; + controls.push_back(control); } - // Defer the call because tab is not yet removed (remove_child_notify is called right before p_child is actually removed). - call_deferred(SNAME("_update_current_tab")); - - p_child->disconnect("renamed", callable_mp(this, &TabContainer::_child_renamed_callback)); - - update(); -} - -void TabContainer::_update_current_tab() { - _refresh_texts(); - - int tc = get_tab_count(); - if (current >= tc) { - current = tc - 1; - } - if (current < 0) { - current = 0; - } else { - set_current_tab(current); - } + return controls; } -Variant TabContainer::get_drag_data(const Point2 &p_point) { +Variant TabContainer::_get_drag_data_fw(const Point2 &p_point, Control *p_from_control) { if (!drag_to_rearrange_enabled) { return Variant(); } int tab_over = get_tab_idx_at_point(p_point); - if (tab_over < 0) { return Variant(); } @@ -825,18 +331,20 @@ Variant TabContainer::get_drag_data(const Point2 &p_point) { tf->set_texture(icon); drag_preview->add_child(tf); } + Label *label = memnew(Label(get_tab_title(tab_over))); - drag_preview->add_child(label); set_drag_preview(drag_preview); + drag_preview->add_child(label); Dictionary drag_data; drag_data["type"] = "tabc_element"; drag_data["tabc_element"] = tab_over; drag_data["from_path"] = get_path(); + return drag_data; } -bool TabContainer::can_drop_data(const Point2 &p_point, const Variant &p_data) const { +bool TabContainer::_can_drop_data_fw(const Point2 &p_point, const Variant &p_data, Control *p_from_control) const { if (!drag_to_rearrange_enabled) { return false; } @@ -852,7 +360,7 @@ bool TabContainer::can_drop_data(const Point2 &p_point, const Variant &p_data) c if (from_path == to_path) { return true; } else if (get_tabs_rearrange_group() != -1) { - // drag and drop between other TabContainers + // Drag and drop between other TabContainers. Node *from_node = get_node(from_path); TabContainer *from_tabc = Object::cast_to<TabContainer>(from_node); if (from_tabc && from_tabc->get_tabs_rearrange_group() == get_tabs_rearrange_group()) { @@ -860,10 +368,11 @@ bool TabContainer::can_drop_data(const Point2 &p_point, const Variant &p_data) c } } } + return false; } -void TabContainer::drop_data(const Point2 &p_point, const Variant &p_data) { +void TabContainer::_drop_data_fw(const Point2 &p_point, const Variant &p_data, Control *p_from_control) { if (!drag_to_rearrange_enabled) { return; } @@ -883,85 +392,192 @@ void TabContainer::drop_data(const Point2 &p_point, const Variant &p_data) { if (hover_now < 0) { hover_now = get_tab_count() - 1; } - move_child(get_tab_control(tab_from_id), get_tab_control(hover_now)->get_index()); + + move_child(get_tab_control(tab_from_id), get_tab_control(hover_now)->get_index(false)); set_current_tab(hover_now); } else if (get_tabs_rearrange_group() != -1) { - // drag and drop between TabContainers + // Drag and drop between TabContainers. Node *from_node = get_node(from_path); TabContainer *from_tabc = Object::cast_to<TabContainer>(from_node); if (from_tabc && from_tabc->get_tabs_rearrange_group() == get_tabs_rearrange_group()) { Control *moving_tabc = from_tabc->get_tab_control(tab_from_id); from_tabc->remove_child(moving_tabc); - add_child(moving_tabc, false, INTERNAL_MODE_FRONT); + add_child(moving_tabc, true); + if (hover_now < 0) { hover_now = get_tab_count() - 1; } - move_child(moving_tabc, get_tab_control(hover_now)->get_index()); + + move_child(moving_tabc, get_tab_control(hover_now)->get_index(false)); + set_current_tab(hover_now); - emit_signal(SNAME("tab_changed"), hover_now); } } } - update(); } -int TabContainer::get_tab_idx_at_point(const Point2 &p_point) const { - if (get_tab_count() == 0) { - return -1; +void TabContainer::_on_tab_changed(int p_tab) { + call_deferred(SNAME("_repaint")); + + emit_signal(SNAME("tab_changed"), p_tab); +} + +void TabContainer::_on_tab_selected(int p_tab) { + if (p_tab != get_previous_tab()) { + call_deferred(SNAME("_repaint")); } - // must be on tabs in the tab header area. - if (p_point.y > _get_top_margin()) { - return -1; + emit_signal(SNAME("tab_selected"), p_tab); +} + +void TabContainer::_child_renamed_callback() { + Vector<Control *> controls = _get_tab_controls(); + for (int i = 0; i < controls.size(); i++) { + if (!controls[i]->has_meta("_tab_name") && String(controls[i]->get_name()) != get_tab_title(i)) { + tab_bar->set_tab_title(i, controls[i]->get_name()); + return; + } } +} - Size2 size = get_size(); - int button_ofs = 0; - int px = p_point.x; +void TabContainer::add_child_notify(Node *p_child) { + if (p_child == tab_bar) { + return; + } + + Container::add_child_notify(p_child); - if (is_layout_rtl()) { - px = size.width - px; + Control *c = Object::cast_to<Control>(p_child); + if (!c || c->is_set_as_top_level()) { + return; } + c->hide(); + + tab_bar->add_tab(p_child->get_name()); - if (px < tabs_ofs_cache) { - return -1; + _update_margins(); + + p_child->connect("renamed", callable_mp(this, &TabContainer::_child_renamed_callback)); + + // TabBar won't emit the "tab_changed" signal when not inside the tree. + if (!is_inside_tree()) { + call_deferred("_repaint"); } +} - Popup *popup = get_popup(); - if (popup) { - Ref<Texture2D> menu = get_theme_icon(SNAME("menu")); - button_ofs += menu->get_width(); +void TabContainer::move_child_notify(Node *p_child) { + if (p_child == tab_bar) { + return; } - if (buttons_visible_cache) { - Ref<Texture2D> increment = get_theme_icon(SNAME("increment")); - Ref<Texture2D> decrement = get_theme_icon(SNAME("decrement")); - button_ofs += increment->get_width() + decrement->get_width(); + + Container::move_child_notify(p_child); + + Control *c = Object::cast_to<Control>(p_child); + if (c && !c->is_set_as_top_level()) { + int old_idx = -1; + String tab_name = c->has_meta("_tab_name") ? String(c->get_meta("_tab_name")) : String(c->get_name()); + + // Find the previous tab index of the control. + for (int i = 0; i < get_tab_count(); i++) { + if (get_tab_title(i) == tab_name) { + old_idx = i; + break; + } + } + + tab_bar->move_tab(old_idx, get_tab_idx_from_control(c)); } - if (px > size.width - button_ofs) { - return -1; +} + +void TabContainer::remove_child_notify(Node *p_child) { + if (p_child == tab_bar) { + return; } - // get the tab at the point - Vector<Control *> tabs = _get_tabs(); - px -= tabs_ofs_cache; - for (int i = first_tab_cache; i <= last_tab_cache; i++) { - int tab_width = _get_tab_width(i); - if (px < tab_width) { + Container::remove_child_notify(p_child); + + Control *c = Object::cast_to<Control>(p_child); + if (!c || c->is_set_as_top_level()) { + return; + } + + tab_bar->remove_tab(get_tab_idx_from_control(c)); + + _update_margins(); + + if (p_child->has_meta("_tab_name")) { + p_child->remove_meta("_tab_name"); + } + p_child->disconnect("renamed", callable_mp(this, &TabContainer::_child_renamed_callback)); + + // TabBar won't emit the "tab_changed" signal when not inside the tree. + if (!is_inside_tree()) { + call_deferred("_repaint"); + } +} + +int TabContainer::get_tab_count() const { + return tab_bar->get_tab_count(); +} + +void TabContainer::set_current_tab(int p_current) { + tab_bar->set_current_tab(p_current); +} + +int TabContainer::get_current_tab() const { + return tab_bar->get_current_tab(); +} + +int TabContainer::get_previous_tab() const { + return tab_bar->get_previous_tab(); +} + +Control *TabContainer::get_tab_control(int p_idx) const { + Vector<Control *> controls = _get_tab_controls(); + if (p_idx >= 0 && p_idx < controls.size()) { + return controls[p_idx]; + } else { + return nullptr; + } +} + +Control *TabContainer::get_current_tab_control() const { + return get_tab_control(tab_bar->get_current_tab()); +} + +int TabContainer::get_tab_idx_at_point(const Point2 &p_point) const { + return tab_bar->get_tab_idx_at_point(p_point); +} + +int TabContainer::get_tab_idx_from_control(Control *p_child) const { + ERR_FAIL_NULL_V(p_child, -1); + ERR_FAIL_COND_V(p_child->get_parent() != this, -1); + + Vector<Control *> controls = _get_tab_controls(); + for (int i = 0; i < controls.size(); i++) { + if (controls[i] == p_child) { return i; } - px -= tab_width; } + return -1; } -void TabContainer::set_tab_alignment(AlignmentMode p_alignment) { - ERR_FAIL_INDEX(p_alignment, 3); - alignment = p_alignment; - update(); +void TabContainer::set_tab_alignment(TabBar::AlignmentMode p_alignment) { + tab_bar->set_tab_alignment(p_alignment); + _update_margins(); } -TabContainer::AlignmentMode TabContainer::get_tab_alignment() const { - return alignment; +TabBar::AlignmentMode TabContainer::get_tab_alignment() const { + return tab_bar->get_tab_alignment(); +} + +void TabContainer::set_clip_tabs(bool p_clip_tabs) { + tab_bar->set_clip_tabs(p_clip_tabs); +} + +bool TabContainer::get_clip_tabs() const { + return tab_bar->get_clip_tabs(); } void TabContainer::set_tabs_visible(bool p_visible) { @@ -970,11 +586,12 @@ void TabContainer::set_tabs_visible(bool p_visible) { } tabs_visible = p_visible; + tab_bar->set_visible(tabs_visible); - Vector<Control *> tabs = _get_tabs(); - for (int i = 0; i < tabs.size(); i++) { - Control *c = tabs[i]; - if (p_visible) { + Vector<Control *> controls = _get_tab_controls(); + for (int i = 0; i < controls.size(); i++) { + Control *c = controls[i]; + if (tabs_visible) { c->set_offset(SIDE_TOP, _get_top_margin()); } else { c->set_offset(SIDE_TOP, 0); @@ -996,7 +613,8 @@ void TabContainer::set_all_tabs_in_front(bool p_in_front) { all_tabs_in_front = p_in_front; - update(); + remove_child(tab_bar); + add_child(tab_bar, false, all_tabs_in_front ? INTERNAL_MODE_BACK : INTERNAL_MODE_FRONT); } bool TabContainer::is_all_tabs_in_front() const { @@ -1006,95 +624,61 @@ bool TabContainer::is_all_tabs_in_front() const { void TabContainer::set_tab_title(int p_tab, const String &p_title) { Control *child = get_tab_control(p_tab); ERR_FAIL_COND(!child); - child->set_meta("_tab_name", p_title); - _refresh_texts(); - update(); -} -String TabContainer::get_tab_title(int p_tab) const { - Control *child = get_tab_control(p_tab); - ERR_FAIL_COND_V(!child, ""); - if (child->has_meta("_tab_name")) { - return child->get_meta("_tab_name"); + if (p_title.is_empty()) { + tab_bar->set_tab_title(p_tab, String(child->get_name())); + + if (child->has_meta("_tab_name")) { + child->remove_meta("_tab_name"); + } } else { - return child->get_name(); + tab_bar->set_tab_title(p_tab, p_title); + child->set_meta("_tab_name", p_title); } } +String TabContainer::get_tab_title(int p_tab) const { + return tab_bar->get_tab_title(p_tab); +} + void TabContainer::set_tab_icon(int p_tab, const Ref<Texture2D> &p_icon) { - Control *child = get_tab_control(p_tab); - ERR_FAIL_COND(!child); - child->set_meta("_tab_icon", p_icon); - update(); + tab_bar->set_tab_icon(p_tab, p_icon); } Ref<Texture2D> TabContainer::get_tab_icon(int p_tab) const { - Control *child = get_tab_control(p_tab); - ERR_FAIL_COND_V(!child, Ref<Texture2D>()); - if (child->has_meta("_tab_icon")) { - return child->get_meta("_tab_icon"); - } else { - return Ref<Texture2D>(); - } + return tab_bar->get_tab_icon(p_tab); } void TabContainer::set_tab_disabled(int p_tab, bool p_disabled) { - Control *child = get_tab_control(p_tab); - ERR_FAIL_COND(!child); - child->set_meta("_tab_disabled", p_disabled); - update(); + tab_bar->set_tab_disabled(p_tab, p_disabled); } -bool TabContainer::get_tab_disabled(int p_tab) const { - Control *child = get_tab_control(p_tab); - ERR_FAIL_COND_V(!child, false); - if (child->has_meta("_tab_disabled")) { - return child->get_meta("_tab_disabled"); - } else { - return false; - } +bool TabContainer::is_tab_disabled(int p_tab) const { + return tab_bar->is_tab_disabled(p_tab); } void TabContainer::set_tab_hidden(int p_tab, bool p_hidden) { Control *child = get_tab_control(p_tab); ERR_FAIL_COND(!child); - child->set_meta("_tab_hidden", p_hidden); - update(); - for (int i = 0; i < get_tab_count(); i++) { - int try_tab = (p_tab + 1 + i) % get_tab_count(); - if (get_tab_disabled(try_tab) || get_tab_hidden(try_tab)) { - continue; - } - - set_current_tab(try_tab); - return; - } - //assumed no other tab can be switched to, just hide + tab_bar->set_tab_hidden(p_tab, p_hidden); child->hide(); } -bool TabContainer::get_tab_hidden(int p_tab) const { - Control *child = get_tab_control(p_tab); - ERR_FAIL_COND_V(!child, false); - if (child->has_meta("_tab_hidden")) { - return child->get_meta("_tab_hidden"); - } else { - return false; - } +bool TabContainer::is_tab_hidden(int p_tab) const { + return tab_bar->is_tab_hidden(p_tab); } void TabContainer::get_translatable_strings(List<String> *p_strings) const { - Vector<Control *> tabs = _get_tabs(); - for (int i = 0; i < tabs.size(); i++) { - Control *c = tabs[i]; + Vector<Control *> controls = _get_tab_controls(); + for (int i = 0; i < controls.size(); i++) { + Control *c = controls[i]; if (!c->has_meta("_tab_name")) { continue; } String name = c->get_meta("_tab_name"); - if (!name.is_empty()) { p_strings->push_back(name); } @@ -1104,9 +688,26 @@ void TabContainer::get_translatable_strings(List<String> *p_strings) const { Size2 TabContainer::get_minimum_size() const { Size2 ms; - Vector<Control *> tabs = _get_tabs(); - for (int i = 0; i < tabs.size(); i++) { - Control *c = tabs[i]; + if (tabs_visible) { + ms = tab_bar->get_minimum_size(); + + if (!get_clip_tabs()) { + if (get_popup()) { + ms.x += get_theme_icon(SNAME("menu"))->get_width(); + } + + int side_margin = get_theme_constant(SNAME("side_margin")); + if (side_margin > 0 && get_tab_alignment() != TabBar::ALIGNMENT_CENTER && + (get_tab_alignment() != TabBar::ALIGNMENT_RIGHT || !get_popup())) { + ms.x += side_margin; + } + } + } + + Vector<Control *> controls = _get_tab_controls(); + int max_control_height = 0; + for (int i = 0; i < controls.size(); i++) { + Control *c = controls[i]; if (!c->is_visible_in_tree() && !use_hidden_tabs_for_min_size) { continue; @@ -1114,29 +715,28 @@ Size2 TabContainer::get_minimum_size() const { Size2 cms = c->get_combined_minimum_size(); ms.x = MAX(ms.x, cms.x); - ms.y = MAX(ms.y, cms.y); + max_control_height = MAX(max_control_height, cms.y); } + ms.y += max_control_height; - Ref<StyleBox> tab_unselected = get_theme_stylebox(SNAME("tab_unselected")); - Ref<StyleBox> tab_selected = get_theme_stylebox(SNAME("tab_selected")); - Ref<StyleBox> tab_disabled = get_theme_stylebox(SNAME("tab_disabled")); - - if (tabs_visible) { - ms.y += MAX(MAX(tab_unselected->get_minimum_size().y, tab_selected->get_minimum_size().y), tab_disabled->get_minimum_size().y); - ms.y += _get_top_margin(); - } - - Ref<StyleBox> sb = get_theme_stylebox(SNAME("panel")); - ms += sb->get_minimum_size(); + Size2 panel_ms = get_theme_stylebox(SNAME("panel"))->get_minimum_size(); + ms.x = MAX(ms.x, panel_ms.x); + ms.y += panel_ms.y; return ms; } void TabContainer::set_popup(Node *p_popup) { - ERR_FAIL_NULL(p_popup); + bool had_popup = get_popup(); + Popup *popup = Object::cast_to<Popup>(p_popup); popup_obj_id = popup ? popup->get_instance_id() : ObjectID(); - update(); + + if (had_popup != bool(popup)) { + update(); + _update_margins(); + update_minimum_size(); + } } Popup *TabContainer::get_popup() const { @@ -1151,6 +751,7 @@ Popup *TabContainer::get_popup() const { popup_obj_id = ObjectID(); } } + return nullptr; } @@ -1163,15 +764,16 @@ bool TabContainer::get_drag_to_rearrange_enabled() const { } void TabContainer::set_tabs_rearrange_group(int p_group_id) { - tabs_rearrange_group = p_group_id; + tab_bar->set_tabs_rearrange_group(p_group_id); } int TabContainer::get_tabs_rearrange_group() const { - return tabs_rearrange_group; + return tab_bar->get_tabs_rearrange_group(); } void TabContainer::set_use_hidden_tabs_for_min_size(bool p_use_hidden_tabs) { use_hidden_tabs_for_min_size = p_use_hidden_tabs; + update_minimum_size(); } bool TabContainer::get_use_hidden_tabs_for_min_size() const { @@ -1195,6 +797,8 @@ void TabContainer::_bind_methods() { ClassDB::bind_method(D_METHOD("get_tab_control", "tab_idx"), &TabContainer::get_tab_control); ClassDB::bind_method(D_METHOD("set_tab_alignment", "alignment"), &TabContainer::set_tab_alignment); ClassDB::bind_method(D_METHOD("get_tab_alignment"), &TabContainer::get_tab_alignment); + ClassDB::bind_method(D_METHOD("set_clip_tabs", "clip_tabs"), &TabContainer::set_clip_tabs); + ClassDB::bind_method(D_METHOD("get_clip_tabs"), &TabContainer::get_clip_tabs); ClassDB::bind_method(D_METHOD("set_tabs_visible", "visible"), &TabContainer::set_tabs_visible); ClassDB::bind_method(D_METHOD("are_tabs_visible"), &TabContainer::are_tabs_visible); ClassDB::bind_method(D_METHOD("set_all_tabs_in_front", "is_front"), &TabContainer::set_all_tabs_in_front); @@ -1204,23 +808,25 @@ void TabContainer::_bind_methods() { ClassDB::bind_method(D_METHOD("set_tab_icon", "tab_idx", "icon"), &TabContainer::set_tab_icon); ClassDB::bind_method(D_METHOD("get_tab_icon", "tab_idx"), &TabContainer::get_tab_icon); ClassDB::bind_method(D_METHOD("set_tab_disabled", "tab_idx", "disabled"), &TabContainer::set_tab_disabled); - ClassDB::bind_method(D_METHOD("get_tab_disabled", "tab_idx"), &TabContainer::get_tab_disabled); + ClassDB::bind_method(D_METHOD("is_tab_disabled", "tab_idx"), &TabContainer::is_tab_disabled); ClassDB::bind_method(D_METHOD("set_tab_hidden", "tab_idx", "hidden"), &TabContainer::set_tab_hidden); - ClassDB::bind_method(D_METHOD("get_tab_hidden", "tab_idx"), &TabContainer::get_tab_hidden); + ClassDB::bind_method(D_METHOD("is_tab_hidden", "tab_idx"), &TabContainer::is_tab_hidden); ClassDB::bind_method(D_METHOD("get_tab_idx_at_point", "point"), &TabContainer::get_tab_idx_at_point); + ClassDB::bind_method(D_METHOD("get_tab_idx_from_control", "control"), &TabContainer::get_tab_idx_from_control); ClassDB::bind_method(D_METHOD("set_popup", "popup"), &TabContainer::set_popup); ClassDB::bind_method(D_METHOD("get_popup"), &TabContainer::get_popup); ClassDB::bind_method(D_METHOD("set_drag_to_rearrange_enabled", "enabled"), &TabContainer::set_drag_to_rearrange_enabled); ClassDB::bind_method(D_METHOD("get_drag_to_rearrange_enabled"), &TabContainer::get_drag_to_rearrange_enabled); ClassDB::bind_method(D_METHOD("set_tabs_rearrange_group", "group_id"), &TabContainer::set_tabs_rearrange_group); ClassDB::bind_method(D_METHOD("get_tabs_rearrange_group"), &TabContainer::get_tabs_rearrange_group); - ClassDB::bind_method(D_METHOD("set_use_hidden_tabs_for_min_size", "enabled"), &TabContainer::set_use_hidden_tabs_for_min_size); ClassDB::bind_method(D_METHOD("get_use_hidden_tabs_for_min_size"), &TabContainer::get_use_hidden_tabs_for_min_size); ClassDB::bind_method(D_METHOD("_repaint"), &TabContainer::_repaint); ClassDB::bind_method(D_METHOD("_on_theme_changed"), &TabContainer::_on_theme_changed); - ClassDB::bind_method(D_METHOD("_update_current_tab"), &TabContainer::_update_current_tab); + ClassDB::bind_method(D_METHOD("_get_drag_data_fw"), &TabContainer::_get_drag_data_fw); + ClassDB::bind_method(D_METHOD("_can_drop_data_fw"), &TabContainer::_can_drop_data_fw); + ClassDB::bind_method(D_METHOD("_drop_data_fw"), &TabContainer::_drop_data_fw); ADD_SIGNAL(MethodInfo("tab_changed", PropertyInfo(Variant::INT, "tab"))); ADD_SIGNAL(MethodInfo("tab_selected", PropertyInfo(Variant::INT, "tab"))); @@ -1228,16 +834,20 @@ void TabContainer::_bind_methods() { ADD_PROPERTY(PropertyInfo(Variant::INT, "tab_alignment", PROPERTY_HINT_ENUM, "Left,Center,Right"), "set_tab_alignment", "get_tab_alignment"); ADD_PROPERTY(PropertyInfo(Variant::INT, "current_tab", PROPERTY_HINT_RANGE, "-1,4096,1", PROPERTY_USAGE_EDITOR), "set_current_tab", "get_current_tab"); + ADD_PROPERTY(PropertyInfo(Variant::BOOL, "clip_tabs"), "set_clip_tabs", "get_clip_tabs"); ADD_PROPERTY(PropertyInfo(Variant::BOOL, "tabs_visible"), "set_tabs_visible", "are_tabs_visible"); ADD_PROPERTY(PropertyInfo(Variant::BOOL, "all_tabs_in_front"), "set_all_tabs_in_front", "is_all_tabs_in_front"); ADD_PROPERTY(PropertyInfo(Variant::BOOL, "drag_to_rearrange_enabled"), "set_drag_to_rearrange_enabled", "get_drag_to_rearrange_enabled"); ADD_PROPERTY(PropertyInfo(Variant::BOOL, "use_hidden_tabs_for_min_size"), "set_use_hidden_tabs_for_min_size", "get_use_hidden_tabs_for_min_size"); - - BIND_ENUM_CONSTANT(ALIGNMENT_LEFT); - BIND_ENUM_CONSTANT(ALIGNMENT_CENTER); - BIND_ENUM_CONSTANT(ALIGNMENT_RIGHT); } TabContainer::TabContainer() { + tab_bar = memnew(TabBar); + tab_bar->set_drag_forwarding(this); + add_child(tab_bar, false, INTERNAL_MODE_FRONT); + tab_bar->set_anchors_and_offsets_preset(Control::PRESET_TOP_WIDE); + tab_bar->connect("tab_changed", callable_mp(this, &TabContainer::_on_tab_changed)); + tab_bar->connect("tab_selected", callable_mp(this, &TabContainer::_on_tab_selected)); + connect("mouse_exited", callable_mp(this, &TabContainer::_on_mouse_exited)); } diff --git a/scene/gui/tab_container.h b/scene/gui/tab_container.h index ee1b3fea51..1322f08206 100644 --- a/scene/gui/tab_container.h +++ b/scene/gui/tab_container.h @@ -33,46 +33,32 @@ #include "scene/gui/container.h" #include "scene/gui/popup.h" -#include "scene/resources/text_line.h" +#include "scene/gui/tab_bar.h" class TabContainer : public Container { GDCLASS(TabContainer, Container); -public: - enum AlignmentMode { - ALIGNMENT_LEFT, - ALIGNMENT_CENTER, - ALIGNMENT_RIGHT, - }; - -private: - int first_tab_cache = 0; - int tabs_ofs_cache = 0; - int last_tab_cache = 0; - int current = 0; - int previous = 0; + TabBar *tab_bar; bool tabs_visible = true; bool all_tabs_in_front = false; - bool buttons_visible_cache = false; bool menu_hovered = false; - int highlight_arrow = -1; - AlignmentMode alignment = ALIGNMENT_CENTER; - int _get_top_margin() const; mutable ObjectID popup_obj_id; bool drag_to_rearrange_enabled = false; bool use_hidden_tabs_for_min_size = false; - int tabs_rearrange_group = -1; + bool theme_changing = false; - Vector<Ref<TextLine>> text_buf; - Vector<Control *> _get_tabs() const; - int _get_tab_width(int p_index) const; - bool _theme_changing = false; + int _get_top_margin() const; + Vector<Control *> _get_tab_controls() const; void _on_theme_changed(); void _repaint(); + void _update_margins(); void _on_mouse_exited(); - void _update_current_tab(); - void _draw_tab(Ref<StyleBox> &p_tab_style, Color &p_font_color, int p_index, float p_x); - void _refresh_texts(); + void _on_tab_changed(int p_tab); + void _on_tab_selected(int p_tab); + + Variant _get_drag_data_fw(const Point2 &p_point, Control *p_from_control); + bool _can_drop_data_fw(const Point2 &p_point, const Variant &p_data, Control *p_from_control) const; + void _drop_data_fw(const Point2 &p_point, const Variant &p_data, Control *p_from_control); protected: void _child_renamed_callback(); @@ -81,17 +67,17 @@ protected: virtual void add_child_notify(Node *p_child) override; virtual void move_child_notify(Node *p_child) override; virtual void remove_child_notify(Node *p_child) override; + static void _bind_methods(); - Variant get_drag_data(const Point2 &p_point) override; - bool can_drop_data(const Point2 &p_point, const Variant &p_data) const override; - void drop_data(const Point2 &p_point, const Variant &p_data) override; +public: int get_tab_idx_at_point(const Point2 &p_point) const; + int get_tab_idx_from_control(Control *p_child) const; - static void _bind_methods(); + void set_tab_alignment(TabBar::AlignmentMode p_alignment); + TabBar::AlignmentMode get_tab_alignment() const; -public: - void set_tab_alignment(AlignmentMode p_alignment); - AlignmentMode get_tab_alignment() const; + void set_clip_tabs(bool p_clip_tabs); + bool get_clip_tabs() const; void set_tabs_visible(bool p_visible); bool are_tabs_visible() const; @@ -106,10 +92,10 @@ public: Ref<Texture2D> get_tab_icon(int p_tab) const; void set_tab_disabled(int p_tab, bool p_disabled); - bool get_tab_disabled(int p_tab) const; + bool is_tab_disabled(int p_tab) const; void set_tab_hidden(int p_tab, bool p_hidden); - bool get_tab_hidden(int p_tab) const; + bool is_tab_hidden(int p_tab) const; int get_tab_count() const; void set_current_tab(int p_current); @@ -139,6 +125,4 @@ public: TabContainer(); }; -VARIANT_ENUM_CAST(TabContainer::AlignmentMode); - #endif // TAB_CONTAINER_H diff --git a/scene/gui/text_edit.cpp b/scene/gui/text_edit.cpp index 5a3c622c86..05fda7128c 100644 --- a/scene/gui/text_edit.cpp +++ b/scene/gui/text_edit.cpp @@ -1283,7 +1283,8 @@ void TextEdit::_notification(int p_what) { } // Carets. - const int caret_width = get_theme_constant(SNAME("caret_width")) * get_theme_default_base_scale(); + // Prevent carets from disappearing at theme scales below 1.0 (if the caret width is 1). + const int caret_width = get_theme_constant(SNAME("caret_width")) * MAX(1, get_theme_default_base_scale()); if (!clipped && caret.line == line && line_wrap_index == caret_wrap_index) { caret.draw_pos.y = ofs_y + ldata->get_line_descent(line_wrap_index); @@ -2374,7 +2375,7 @@ void TextEdit::_do_backspace(bool p_word, bool p_all_to_left) { if (p_all_to_left) { int caret_current_column = caret.column; - caret.column = 0; + set_caret_column(0); _remove_text(caret.line, 0, caret.line, caret_current_column); return; } @@ -2920,15 +2921,20 @@ void TextEdit::_clear() { end_complex_operation(); return; } + // Cannot merge with above, as we are not part of the tree on creation. + int old_text_size = text.size(); + clear_undo_history(); text.clear(); - caret.column = 0; - caret.line = 0; + set_caret_line(0, false); + set_caret_column(0); caret.x_ofs = 0; caret.line_ofs = 0; caret.wrap_ofs = 0; caret.last_fit_x = 0; selection.active = false; + + emit_signal(SNAME("lines_edited_from"), old_text_size, 0); } void TextEdit::set_text(const String &p_text) { @@ -2987,14 +2993,16 @@ void TextEdit::set_line(int p_line, const String &p_new_text) { if (p_line < 0 || p_line >= text.size()) { return; } + begin_complex_operation(); _remove_text(p_line, 0, p_line, text[p_line].length()); _insert_text(p_line, 0, p_new_text); - if (caret.line == p_line) { - caret.column = MIN(caret.column, p_new_text.length()); + if (caret.line == p_line && caret.column > p_new_text.length()) { + set_caret_column(MIN(caret.column, p_new_text.length()), false); } if (has_selection() && p_line == selection.to_line && selection.to_column > text[p_line].length()) { selection.to_column = text[p_line].length(); } + end_complex_operation(); } String TextEdit::get_line(int p_line) const { @@ -3049,8 +3057,10 @@ void TextEdit::swap_lines(int p_from_line, int p_to_line) { String tmp = get_line(p_from_line); String tmp2 = get_line(p_to_line); + begin_complex_operation(); set_line(p_to_line, tmp); set_line(p_from_line, tmp2); + end_complex_operation(); } void TextEdit::insert_line_at(int p_at, const String &p_text) { @@ -3059,7 +3069,7 @@ void TextEdit::insert_line_at(int p_at, const String &p_text) { _insert_text(p_at, 0, p_text + "\n"); if (caret.line >= p_at) { // offset caret when located after inserted line - ++caret.line; + set_caret_line(caret.line + 1, false); } if (has_selection()) { if (selection.from_line >= p_at) { @@ -3964,6 +3974,7 @@ void TextEdit::set_caret_line(int p_line, bool p_adjust_viewport, bool p_can_be_ } } } + bool caret_moved = caret.line != p_line; caret.line = p_line; int n_col = _get_char_pos_for_line(caret.last_fit_x, p_line, p_wrap_index); @@ -3977,15 +3988,16 @@ void TextEdit::set_caret_line(int p_line, bool p_adjust_viewport, bool p_can_be_ n_col -= 1; } } + caret_moved = (caret_moved || caret.column != n_col); caret.column = n_col; - if (p_adjust_viewport) { + if (is_inside_tree() && p_adjust_viewport) { adjust_viewport_to_caret(); } setting_caret_line = false; - if (!caret_pos_dirty) { + if (caret_moved && !caret_pos_dirty) { if (is_inside_tree()) { MessageQueue::get_singleton()->push_call(this, "_emit_caret_changed"); } @@ -4002,6 +4014,7 @@ void TextEdit::set_caret_column(int p_col, bool p_adjust_viewport) { p_col = 0; } + bool caret_moved = caret.column != p_col; caret.column = p_col; if (caret.column > get_line(caret.line).length()) { caret.column = get_line(caret.line).length(); @@ -4009,11 +4022,11 @@ void TextEdit::set_caret_column(int p_col, bool p_adjust_viewport) { caret.last_fit_x = _get_column_x_offset_for_line(caret.column, caret.line); - if (p_adjust_viewport) { + if (is_inside_tree() && p_adjust_viewport) { adjust_viewport_to_caret(); } - if (!caret_pos_dirty) { + if (caret_moved && !caret_pos_dirty) { if (is_inside_tree()) { MessageQueue::get_singleton()->push_call(this, "_emit_caret_changed"); } @@ -5651,8 +5664,10 @@ void TextEdit::_generate_context_menu() { if (editable) { menu->add_item(RTR("Paste"), MENU_PASTE, is_shortcut_keys_enabled() ? _get_menu_action_accelerator("ui_paste") : Key::NONE); } - menu->add_separator(); - if (is_selecting_enabled()) { + if (selecting_enabled || editable) { + menu->add_separator(); + } + if (selecting_enabled) { menu->add_item(RTR("Select All"), MENU_SELECT_ALL, is_shortcut_keys_enabled() ? _get_menu_action_accelerator("ui_text_select_all") : Key::NONE); } if (editable) { @@ -6566,7 +6581,7 @@ void TextEdit::_base_remove_text(int p_from_line, int p_from_column, int p_to_li emit_signal(SNAME("lines_edited_from"), p_to_line, p_from_line); } -TextEdit::TextEdit() { +TextEdit::TextEdit(const String &p_placeholder) { placeholder_data_buf.instantiate(); clear(); @@ -6608,5 +6623,7 @@ TextEdit::TextEdit() { undo_stack_max_size = GLOBAL_GET("gui/common/text_edit_undo_stack_max_size"); + set_placeholder(p_placeholder); + set_editable(true); } diff --git a/scene/gui/text_edit.h b/scene/gui/text_edit.h index 83a63ae40a..6deaf76e5e 100644 --- a/scene/gui/text_edit.h +++ b/scene/gui/text_edit.h @@ -940,7 +940,7 @@ public: void set_draw_spaces(bool p_enabled); bool is_drawing_spaces() const; - TextEdit(); + TextEdit(const String &p_placeholder = String()); }; VARIANT_ENUM_CAST(TextEdit::CaretType); diff --git a/scene/gui/tree.cpp b/scene/gui/tree.cpp index 73cf2b9c6e..5afc37061b 100644 --- a/scene/gui/tree.cpp +++ b/scene/gui/tree.cpp @@ -3286,8 +3286,21 @@ void Tree::gui_input(const Ref<InputEvent> &p_event) { } else { Rect2 rect = get_selected()->get_meta("__focus_rect"); Point2 mpos = b->get_position(); + int icon_size_x = 0; + Ref<Texture2D> icon = get_selected()->get_icon(selected_col); + if (icon.is_valid()) { + Rect2i icon_region = get_selected()->get_icon_region(selected_col); + if (icon_region == Rect2i()) { + icon_size_x = icon->get_width(); + } else { + icon_size_x = icon_region.size.width; + } + } + // Icon is treated as if it is outside of the rect so that double clicking on it will emit the item_double_clicked signal. if (rtl) { - mpos.x = get_size().width - mpos.x; + mpos.x = get_size().width - (mpos.x + icon_size_x); + } else { + mpos.x -= icon_size_x; } if (rect.has_point(mpos)) { if (!edit_selected()) { diff --git a/scene/main/canvas_item.cpp b/scene/main/canvas_item.cpp index 26b67b763c..d2f5b52dbf 100644 --- a/scene/main/canvas_item.cpp +++ b/scene/main/canvas_item.cpp @@ -59,35 +59,16 @@ bool CanvasItem::is_visible_in_tree() const { return visible && parent_visible_in_tree; } -void CanvasItem::_propagate_visibility_changed(bool p_visible, bool p_is_source) { - if (p_visible && first_draw) { // Avoid propagating it twice. - first_draw = false; - } - if (!p_is_source) { - parent_visible_in_tree = p_visible; - } - notification(NOTIFICATION_VISIBILITY_CHANGED); - - if (visible && p_visible) { - update(); - } else if (!p_visible && (visible || p_is_source)) { - emit_signal(SceneStringNames::get_singleton()->hidden); +void CanvasItem::_propagate_visibility_changed(bool p_parent_visible_in_tree) { + parent_visible_in_tree = p_parent_visible_in_tree; + if (!visible) { + return; } - _block(); - - for (int i = 0; i < get_child_count(); i++) { - CanvasItem *c = Object::cast_to<CanvasItem>(get_child(i)); - - if (c) { // Should the top_levels stop propagation? I think so, but... - if (c->visible) { - c->_propagate_visibility_changed(p_visible); - } else { - c->parent_visible_in_tree = p_visible; - } - } + if (p_parent_visible_in_tree && first_draw) { // Avoid propagating it twice. + first_draw = false; } - _unblock(); + _handle_visibility_change(p_parent_visible_in_tree); } void CanvasItem::set_visible(bool p_visible) { @@ -96,14 +77,34 @@ void CanvasItem::set_visible(bool p_visible) { } visible = p_visible; - RenderingServer::get_singleton()->canvas_item_set_visible(canvas_item, p_visible); if (!parent_visible_in_tree) { notification(NOTIFICATION_VISIBILITY_CHANGED); return; } - _propagate_visibility_changed(p_visible, true); + _handle_visibility_change(p_visible); +} + +void CanvasItem::_handle_visibility_change(bool p_visible) { + RenderingServer::get_singleton()->canvas_item_set_visible(canvas_item, p_visible); + notification(NOTIFICATION_VISIBILITY_CHANGED); + + if (p_visible) { + update(); + } else { + emit_signal(SceneStringNames::get_singleton()->hidden); + } + + _block(); + for (int i = 0; i < get_child_count(); i++) { + CanvasItem *c = Object::cast_to<CanvasItem>(get_child(i)); + + if (c) { // Should the top_levels stop propagation? I think so, but... + c->_propagate_visibility_changed(p_visible); + } + } + _unblock(); } void CanvasItem::show() { diff --git a/scene/main/canvas_item.h b/scene/main/canvas_item.h index c0558b6be2..1b2c188fc0 100644 --- a/scene/main/canvas_item.h +++ b/scene/main/canvas_item.h @@ -108,7 +108,8 @@ private: void _top_level_raise_self(); - void _propagate_visibility_changed(bool p_visible, bool p_is_source = false); + void _propagate_visibility_changed(bool p_parent_visible_in_tree); + void _handle_visibility_change(bool p_visible); void _update_callback(); diff --git a/scene/main/canvas_layer.cpp b/scene/main/canvas_layer.cpp index be24620904..7aa4d391f8 100644 --- a/scene/main/canvas_layer.cpp +++ b/scene/main/canvas_layer.cpp @@ -58,11 +58,7 @@ void CanvasLayer::set_visible(bool p_visible) { if (c) { RenderingServer::get_singleton()->canvas_item_set_visible(c->get_canvas_item(), p_visible && c->is_visible()); - if (c->is_visible()) { - c->_propagate_visibility_changed(p_visible); - } else { - c->parent_visible_in_tree = p_visible; - } + c->_propagate_visibility_changed(p_visible); } } } diff --git a/scene/main/scene_tree.cpp b/scene/main/scene_tree.cpp index 2b4d7d8331..69e7472cf2 100644 --- a/scene/main/scene_tree.cpp +++ b/scene/main/scene_tree.cpp @@ -1256,8 +1256,6 @@ void SceneTree::_bind_methods() { ADD_SIGNAL(MethodInfo("process_frame")); ADD_SIGNAL(MethodInfo("physics_frame")); - ADD_SIGNAL(MethodInfo("files_dropped", PropertyInfo(Variant::PACKED_STRING_ARRAY, "files"), PropertyInfo(Variant::INT, "screen"))); - BIND_ENUM_CONSTANT(GROUP_CALL_DEFAULT); BIND_ENUM_CONSTANT(GROUP_CALL_REVERSE); BIND_ENUM_CONSTANT(GROUP_CALL_REALTIME); diff --git a/scene/main/viewport.cpp b/scene/main/viewport.cpp index ca817b17bc..de6aa2b139 100644 --- a/scene/main/viewport.cpp +++ b/scene/main/viewport.cpp @@ -1239,6 +1239,7 @@ void Viewport::_gui_show_tooltip() { panel->set_transient(true); panel->set_flag(Window::FLAG_NO_FOCUS, true); + panel->set_flag(Window::FLAG_POPUP, false); panel->set_wrap_controls(true); panel->add_child(base_tooltip); @@ -1268,7 +1269,10 @@ void Viewport::_gui_show_tooltip() { gui.tooltip_popup->set_position(r.position); gui.tooltip_popup->set_size(r.size); - gui.tooltip_popup->show(); + DisplayServer::WindowID active_popup = DisplayServer::get_singleton()->window_get_active_popup(); + if (active_popup == DisplayServer::INVALID_WINDOW_ID || active_popup == window->get_window_id()) { + gui.tooltip_popup->show(); + } gui.tooltip_popup->child_controls_changed(); } diff --git a/scene/main/window.cpp b/scene/main/window.cpp index 0ce556d36c..6837fcae21 100644 --- a/scene/main/window.cpp +++ b/scene/main/window.cpp @@ -436,8 +436,12 @@ void Window::set_visible(bool p_visible) { //update transient exclusive if (transient_parent) { if (exclusive && visible) { - ERR_FAIL_COND_MSG(transient_parent->exclusive_child && transient_parent->exclusive_child != this, "Transient parent has another exclusive child."); - transient_parent->exclusive_child = this; +#ifdef TOOLS_ENABLED + if (!(Engine::get_singleton()->is_editor_hint() && get_tree()->get_edited_scene_root() && get_tree()->get_edited_scene_root()->is_ancestor_of(this))) { + ERR_FAIL_COND_MSG(transient_parent->exclusive_child && transient_parent->exclusive_child != this, "Transient parent has another exclusive child."); + transient_parent->exclusive_child = this; + } +#endif } else { if (transient_parent->exclusive_child == this) { transient_parent->exclusive_child = nullptr; @@ -951,7 +955,7 @@ bool Window::_can_consume_input_events() const { void Window::_window_input(const Ref<InputEvent> &p_ev) { if (EngineDebugger::is_active()) { - //quit from game window using F8 + // Quit from game window using F8. Ref<InputEventKey> k = p_ev; if (k.is_valid() && k->is_pressed() && !k->is_echo() && k->get_keycode() == Key::F8) { EngineDebugger::get_singleton()->send_message("request_quit", Array()); @@ -959,15 +963,7 @@ void Window::_window_input(const Ref<InputEvent> &p_ev) { } if (exclusive_child != nullptr) { - /* - Window *focus_target = exclusive_child; - focus_target->grab_focus(); - while (focus_target->exclusive_child != nullptr) { - focus_target = focus_target->exclusive_child; - focus_target->grab_focus(); - }*/ - - if (!is_embedding_subwindows()) { //not embedding, no need for event + if (!is_embedding_subwindows()) { // Not embedding, no need for event. return; } } @@ -1587,6 +1583,7 @@ void Window::_bind_methods() { ADD_PROPERTYI(PropertyInfo(Variant::BOOL, "always_on_top"), "set_flag", "get_flag", FLAG_ALWAYS_ON_TOP); ADD_PROPERTYI(PropertyInfo(Variant::BOOL, "transparent"), "set_flag", "get_flag", FLAG_TRANSPARENT); ADD_PROPERTYI(PropertyInfo(Variant::BOOL, "unfocusable"), "set_flag", "get_flag", FLAG_NO_FOCUS); + ADD_PROPERTYI(PropertyInfo(Variant::BOOL, "popup_window"), "set_flag", "get_flag", FLAG_POPUP); ADD_GROUP("Limits", ""); ADD_PROPERTY(PropertyInfo(Variant::VECTOR2I, "min_size"), "set_min_size", "get_min_size"); @@ -1630,6 +1627,7 @@ void Window::_bind_methods() { BIND_ENUM_CONSTANT(FLAG_ALWAYS_ON_TOP); BIND_ENUM_CONSTANT(FLAG_TRANSPARENT); BIND_ENUM_CONSTANT(FLAG_NO_FOCUS); + BIND_ENUM_CONSTANT(FLAG_POPUP); BIND_ENUM_CONSTANT(FLAG_MAX); BIND_ENUM_CONSTANT(CONTENT_SCALE_MODE_DISABLED); diff --git a/scene/main/window.h b/scene/main/window.h index f37689f905..3d8e337b4a 100644 --- a/scene/main/window.h +++ b/scene/main/window.h @@ -55,6 +55,7 @@ public: FLAG_ALWAYS_ON_TOP = DisplayServer::WINDOW_FLAG_ALWAYS_ON_TOP, FLAG_TRANSPARENT = DisplayServer::WINDOW_FLAG_TRANSPARENT, FLAG_NO_FOCUS = DisplayServer::WINDOW_FLAG_NO_FOCUS, + FLAG_POPUP = DisplayServer::WINDOW_FLAG_POPUP, FLAG_MAX = DisplayServer::WINDOW_FLAG_MAX, }; diff --git a/scene/resources/importer_mesh.cpp b/scene/resources/importer_mesh.cpp index a27da11f8d..30deb5ccd5 100644 --- a/scene/resources/importer_mesh.cpp +++ b/scene/resources/importer_mesh.cpp @@ -275,6 +275,7 @@ void ImporterMesh::generate_lods(float p_normal_merge_angle, float p_normal_spli PackedInt32Array indices = surfaces[i].arrays[RS::ARRAY_INDEX]; Vector<Vector3> normals = surfaces[i].arrays[RS::ARRAY_NORMAL]; Vector<Vector2> uvs = surfaces[i].arrays[RS::ARRAY_TEX_UV]; + Vector<Vector2> uv2s = surfaces[i].arrays[RS::ARRAY_TEX_UV2]; unsigned int index_count = indices.size(); unsigned int vertex_count = vertices.size(); @@ -313,6 +314,7 @@ void ImporterMesh::generate_lods(float p_normal_merge_angle, float p_normal_spli LocalVector<Vector3> merged_normals; LocalVector<int> merged_normals_counts; const Vector2 *uvs_ptr = uvs.ptr(); + const Vector2 *uv2s_ptr = uv2s.ptr(); for (unsigned int j = 0; j < vertex_count; j++) { const Vector3 &v = vertices_ptr[j]; @@ -327,8 +329,10 @@ void ImporterMesh::generate_lods(float p_normal_merge_angle, float p_normal_spli for (unsigned int k = 0; k < close_verts.size(); k++) { const Pair<int, int> &idx = close_verts[k]; - // TODO check more attributes? - if ((!uvs_ptr || uvs_ptr[j].distance_squared_to(uvs_ptr[idx.second]) < CMP_EPSILON2) && normals[idx.second].dot(n) > normal_merge_threshold) { + bool is_uvs_close = (!uvs_ptr || uvs_ptr[j].distance_squared_to(uvs_ptr[idx.second]) < CMP_EPSILON2); + bool is_uv2s_close = (!uv2s_ptr || uv2s_ptr[j].distance_squared_to(uv2s_ptr[idx.second]) < CMP_EPSILON2); + bool is_normals_close = normals[idx.second].dot(n) > normal_merge_threshold; + if (is_uvs_close && is_uv2s_close && is_normals_close) { vertex_remap.push_back(idx.first); merged_normals[idx.first] += normals[idx.second]; merged_normals_counts[idx.first]++; diff --git a/scene/resources/resource_format_text.cpp b/scene/resources/resource_format_text.cpp index c03faa2c2d..d9ac967699 100644 --- a/scene/resources/resource_format_text.cpp +++ b/scene/resources/resource_format_text.cpp @@ -153,7 +153,7 @@ Error ResourceLoaderText::_parse_ext_resource(VariantParser::Stream *p_stream, R RES res = ResourceLoader::load_threaded_get(path); if (res.is_null()) { if (ResourceLoader::get_abort_on_missing_resources()) { - error = ERR_FILE_CORRUPT; + error = ERR_FILE_MISSING_DEPENDENCIES; error_text = "[ext_resource] referenced nonexistent resource at: " + path; _printerr(); return error; @@ -165,7 +165,7 @@ Error ResourceLoaderText::_parse_ext_resource(VariantParser::Stream *p_stream, R r_res = res; } } else { - error = ERR_FILE_CORRUPT; + error = ERR_FILE_MISSING_DEPENDENCIES; error_text = "[ext_resource] referenced non-loaded resource at: " + path; _printerr(); return error; @@ -265,7 +265,9 @@ Ref<PackedScene> ResourceLoaderText::_parse_node_tag(VariantParser::ResourcePars error = VariantParser::parse_tag_assign_eof(&stream, lines, error_text, next_tag, assign, value, &parser); if (error) { - if (error != ERR_FILE_EOF) { + if (error == ERR_FILE_MISSING_DEPENDENCIES) { + // Resource loading error, just skip it. + } else if (error != ERR_FILE_EOF) { _printerr(); return Ref<PackedScene>(); } else { diff --git a/servers/audio/effects/audio_effect_pitch_shift.cpp b/servers/audio/effects/audio_effect_pitch_shift.cpp index ba2d257c0a..3c53887931 100644 --- a/servers/audio/effects/audio_effect_pitch_shift.cpp +++ b/servers/audio/effects/audio_effect_pitch_shift.cpp @@ -74,7 +74,7 @@ * *****************************************************************************/ -void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int64_t fftFrameSize, int64_t osamp, float sampleRate, float *indata, float *outdata,int stride) { +void SMBPitchShift::PitchShift(float pitchShift, long numSampsToProcess, long fftFrameSize, long osamp, float sampleRate, float *indata, float *outdata,int stride) { /* @@ -85,32 +85,19 @@ void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int6 */ double magn, phase, tmp, window, real, imag; - double freqPerBin, expct, reciprocalFftFrameSize; - int64_t i,k, qpd, index, inFifoLatency, stepSize, fftFrameSize2; + double freqPerBin, expct; + long i,k, qpd, index, inFifoLatency, stepSize, fftFrameSize2; /* set up some handy variables */ fftFrameSize2 = fftFrameSize/2; - reciprocalFftFrameSize = 1./fftFrameSize; stepSize = fftFrameSize/osamp; - freqPerBin = reciprocalFftFrameSize * sampleRate; - expct = Math_TAU * reciprocalFftFrameSize * stepSize; + freqPerBin = sampleRate/(double)fftFrameSize; + expct = 2.*Math_PI*(double)stepSize/(double)fftFrameSize; inFifoLatency = fftFrameSize-stepSize; - if (gRover == 0) { - gRover = inFifoLatency; - } + if (gRover == 0) { gRover = inFifoLatency; +} - // If pitchShift changes clear arrays to prevent some artifacts and quality loss. - if (lastPitchShift != pitchShift) { - lastPitchShift = pitchShift; - memset(gInFIFO, 0, MAX_FRAME_LENGTH * sizeof(float)); - memset(gOutFIFO, 0, MAX_FRAME_LENGTH * sizeof(float)); - memset(gFFTworksp, 0, 2 * MAX_FRAME_LENGTH * sizeof(double)); - memset(gLastPhase, 0, (MAX_FRAME_LENGTH / 2 + 1) * sizeof(double)); - memset(gSumPhase, 0, (MAX_FRAME_LENGTH / 2 + 1) * sizeof(double)); - memset(gOutputAccum, 0, 2 * MAX_FRAME_LENGTH * sizeof(double)); - memset(gAnaFreq, 0, MAX_FRAME_LENGTH * sizeof(double)); - memset(gAnaMagn, 0, MAX_FRAME_LENGTH * sizeof(double)); - } + /* initialize our static arrays */ /* main processing loop */ for (i = 0; i < numSampsToProcess; i++){ @@ -125,7 +112,7 @@ void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int6 /* do windowing and re,im interleave */ for (k = 0; k < fftFrameSize;k++) { - window = -.5*cos(Math_TAU * reciprocalFftFrameSize * k)+.5; + window = -.5*cos(2.*Math_PI*(double)k/(double)fftFrameSize)+.5; gFFTworksp[2*k] = gInFIFO[k] * window; gFFTworksp[2*k+1] = 0.; } @@ -137,7 +124,6 @@ void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int6 /* this is the analysis step */ for (k = 0; k <= fftFrameSize2; k++) { - /* de-interlace FFT buffer */ real = gFFTworksp[2*k]; imag = gFFTworksp[2*k+1]; @@ -155,15 +141,13 @@ void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int6 /* map delta phase into +/- Pi interval */ qpd = tmp/Math_PI; - if (qpd >= 0) { - qpd += qpd&1; - } else { - qpd -= qpd&1; - } + if (qpd >= 0) { qpd += qpd&1; + } else { qpd -= qpd&1; +} tmp -= Math_PI*(double)qpd; /* get deviation from bin frequency from the +/- Pi interval */ - tmp = osamp*tmp/Math_TAU; + tmp = osamp*tmp/(2.*Math_PI); /* compute the k-th partials' true frequency */ tmp = (double)k*freqPerBin + tmp*freqPerBin; @@ -176,8 +160,8 @@ void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int6 /* ***************** PROCESSING ******************* */ /* this does the actual pitch shifting */ - memset(gSynMagn, 0, fftFrameSize*sizeof(double)); - memset(gSynFreq, 0, fftFrameSize*sizeof(double)); + memset(gSynMagn, 0, fftFrameSize*sizeof(float)); + memset(gSynFreq, 0, fftFrameSize*sizeof(float)); for (k = 0; k <= fftFrameSize2; k++) { index = k*pitchShift; if (index <= fftFrameSize2) { @@ -200,7 +184,7 @@ void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int6 tmp /= freqPerBin; /* take osamp into account */ - tmp = Math_TAU*tmp/osamp; + tmp = 2.*Math_PI*tmp/osamp; /* add the overlap phase advance back in */ tmp += (double)k*expct; @@ -215,35 +199,33 @@ void SMBPitchShift::PitchShift(float pitchShift, int64_t numSampsToProcess, int6 } /* zero negative frequencies */ - for (k = fftFrameSize+2; k < 2*MAX_FRAME_LENGTH; k++) { - gFFTworksp[k] = 0.; - } + for (k = fftFrameSize+2; k < 2*fftFrameSize; k++) { gFFTworksp[k] = 0.; +} /* do inverse transform */ smbFft(gFFTworksp, fftFrameSize, 1); /* do windowing and add to output accumulator */ for(k=0; k < fftFrameSize; k++) { - window = -.5*cos(Math_TAU * reciprocalFftFrameSize * k)+.5; + window = -.5*cos(2.*Math_PI*(double)k/(double)fftFrameSize)+.5; gOutputAccum[k] += 2.*window*gFFTworksp[2*k]/(fftFrameSize2*osamp); } - for (k = 0; k < stepSize; k++) { - gOutFIFO[k] = gOutputAccum[k]; - } + for (k = 0; k < stepSize; k++) { gOutFIFO[k] = gOutputAccum[k]; +} /* shift accumulator */ - memmove(gOutputAccum, gOutputAccum+stepSize, fftFrameSize*sizeof(double)); + memmove(gOutputAccum, gOutputAccum+stepSize, fftFrameSize*sizeof(float)); /* move input FIFO */ - for (k = 0; k < inFifoLatency; k++) { - gInFIFO[k] = gInFIFO[k+stepSize]; - } + for (k = 0; k < inFifoLatency; k++) { gInFIFO[k] = gInFIFO[k+stepSize]; +} } } } -void SMBPitchShift::smbFft(double *fftBuffer, int64_t fftFrameSize, int64_t sign) + +void SMBPitchShift::smbFft(float *fftBuffer, long fftFrameSize, long sign) /* FFT routine, (C)1996 S.M.Bernsee. Sign = -1 is FFT, 1 is iFFT (inverse) Fills fftBuffer[0...2*fftFrameSize-1] with the Fourier transform of the @@ -256,16 +238,14 @@ void SMBPitchShift::smbFft(double *fftBuffer, int64_t fftFrameSize, int64_t sign of the frequencies of interest is in fftBuffer[0...fftFrameSize]. */ { - double wr, wi, arg, *p1, *p2, temp; - double tr, ti, ur, ui, *p1r, *p1i, *p2r, *p2i; - int64_t i, bitm, j, le, le2, k, logN; - logN = (int64_t)(log(fftFrameSize) / log(2.) + .5); + float wr, wi, arg, *p1, *p2, temp; + float tr, ti, ur, ui, *p1r, *p1i, *p2r, *p2i; + long i, bitm, j, le, le2, k; for (i = 2; i < 2*fftFrameSize-2; i += 2) { for (bitm = 2, j = 0; bitm < 2*fftFrameSize; bitm <<= 1) { - if (i & bitm) { - j++; - } + if (i & bitm) { j++; +} j <<= 1; } if (i < j) { @@ -275,8 +255,7 @@ void SMBPitchShift::smbFft(double *fftBuffer, int64_t fftFrameSize, int64_t sign *p1 = *p2; *p2 = temp; } } - - for (k = 0, le = 2; k < logN; k++) { + for (k = 0, le = 2; k < (long)(log((double)fftFrameSize)/log(2.)+.5); k++) { le <<= 1; le2 = le>>1; ur = 1.0; @@ -309,14 +288,6 @@ void SMBPitchShift::smbFft(double *fftBuffer, int64_t fftFrameSize, int64_t sign void AudioEffectPitchShiftInstance::process(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) { float sample_rate = AudioServer::get_singleton()->get_mix_rate(); - // For pitch_scale 1.0 it's cheaper to just pass samples without processing them. - if (Math::is_equal_approx(base->pitch_scale, 1.0f)) { - for (int i = 0; i < p_frame_count; i++) { - p_dst_frames[i] = p_src_frames[i]; - } - return; - } - float *in_l = (float *)p_src_frames; float *in_r = in_l + 1; @@ -390,4 +361,7 @@ AudioEffectPitchShift::AudioEffectPitchShift() { pitch_scale = 1.0; oversampling = 4; fft_size = FFT_SIZE_2048; + wet = 0.0; + dry = 0.0; + filter = false; } diff --git a/servers/audio/effects/audio_effect_pitch_shift.h b/servers/audio/effects/audio_effect_pitch_shift.h index 23da61bb32..0478d05ceb 100644 --- a/servers/audio/effects/audio_effect_pitch_shift.h +++ b/servers/audio/effects/audio_effect_pitch_shift.h @@ -40,33 +40,31 @@ class SMBPitchShift { float gInFIFO[MAX_FRAME_LENGTH]; float gOutFIFO[MAX_FRAME_LENGTH]; - double gFFTworksp[2 * MAX_FRAME_LENGTH]; - double gLastPhase[MAX_FRAME_LENGTH / 2 + 1]; - double gSumPhase[MAX_FRAME_LENGTH / 2 + 1]; - double gOutputAccum[2 * MAX_FRAME_LENGTH]; - double gAnaFreq[MAX_FRAME_LENGTH]; - double gAnaMagn[MAX_FRAME_LENGTH]; - double gSynFreq[MAX_FRAME_LENGTH]; - double gSynMagn[MAX_FRAME_LENGTH]; - int64_t gRover; - float lastPitchShift; - - void smbFft(double *fftBuffer, int64_t fftFrameSize, int64_t sign); + float gFFTworksp[2 * MAX_FRAME_LENGTH]; + float gLastPhase[MAX_FRAME_LENGTH / 2 + 1]; + float gSumPhase[MAX_FRAME_LENGTH / 2 + 1]; + float gOutputAccum[2 * MAX_FRAME_LENGTH]; + float gAnaFreq[MAX_FRAME_LENGTH]; + float gAnaMagn[MAX_FRAME_LENGTH]; + float gSynFreq[MAX_FRAME_LENGTH]; + float gSynMagn[MAX_FRAME_LENGTH]; + long gRover; + + void smbFft(float *fftBuffer, long fftFrameSize, long sign); public: - void PitchShift(float pitchShift, int64_t numSampsToProcess, int64_t fftFrameSize, int64_t osamp, float sampleRate, float *indata, float *outdata, int stride); + void PitchShift(float pitchShift, long numSampsToProcess, long fftFrameSize, long osamp, float sampleRate, float *indata, float *outdata, int stride); SMBPitchShift() { gRover = 0; memset(gInFIFO, 0, MAX_FRAME_LENGTH * sizeof(float)); memset(gOutFIFO, 0, MAX_FRAME_LENGTH * sizeof(float)); - memset(gFFTworksp, 0, 2 * MAX_FRAME_LENGTH * sizeof(double)); - memset(gLastPhase, 0, (MAX_FRAME_LENGTH / 2 + 1) * sizeof(double)); - memset(gSumPhase, 0, (MAX_FRAME_LENGTH / 2 + 1) * sizeof(double)); - memset(gOutputAccum, 0, 2 * MAX_FRAME_LENGTH * sizeof(double)); - memset(gAnaFreq, 0, MAX_FRAME_LENGTH * sizeof(double)); - memset(gAnaMagn, 0, MAX_FRAME_LENGTH * sizeof(double)); - lastPitchShift = 1.0; + memset(gFFTworksp, 0, 2 * MAX_FRAME_LENGTH * sizeof(float)); + memset(gLastPhase, 0, (MAX_FRAME_LENGTH / 2 + 1) * sizeof(float)); + memset(gSumPhase, 0, (MAX_FRAME_LENGTH / 2 + 1) * sizeof(float)); + memset(gOutputAccum, 0, 2 * MAX_FRAME_LENGTH * sizeof(float)); + memset(gAnaFreq, 0, MAX_FRAME_LENGTH * sizeof(float)); + memset(gAnaMagn, 0, MAX_FRAME_LENGTH * sizeof(float)); } }; @@ -103,6 +101,9 @@ public: float pitch_scale; int oversampling; FFTSize fft_size; + float wet; + float dry; + bool filter; protected: static void _bind_methods(); diff --git a/servers/display_server.cpp b/servers/display_server.cpp index 58a51e3aea..819c151087 100644 --- a/servers/display_server.cpp +++ b/servers/display_server.cpp @@ -399,6 +399,9 @@ void DisplayServer::_bind_methods() { ClassDB::bind_method(D_METHOD("delete_sub_window", "window_id"), &DisplayServer::delete_sub_window); ClassDB::bind_method(D_METHOD("window_get_native_handle", "handle_type", "window_id"), &DisplayServer::window_get_native_handle, DEFVAL(MAIN_WINDOW_ID)); + ClassDB::bind_method(D_METHOD("window_get_active_popup"), &DisplayServer::window_get_active_popup); + ClassDB::bind_method(D_METHOD("window_set_popup_safe_rect", "window", "rect"), &DisplayServer::window_set_popup_safe_rect); + ClassDB::bind_method(D_METHOD("window_get_popup_safe_rect", "window"), &DisplayServer::window_get_popup_safe_rect); ClassDB::bind_method(D_METHOD("window_set_title", "title", "window_id"), &DisplayServer::window_set_title, DEFVAL(MAIN_WINDOW_ID)); ClassDB::bind_method(D_METHOD("window_set_mouse_passthrough", "region", "window_id"), &DisplayServer::window_set_mouse_passthrough, DEFVAL(MAIN_WINDOW_ID)); @@ -552,6 +555,7 @@ void DisplayServer::_bind_methods() { BIND_ENUM_CONSTANT(WINDOW_FLAG_ALWAYS_ON_TOP); BIND_ENUM_CONSTANT(WINDOW_FLAG_TRANSPARENT); BIND_ENUM_CONSTANT(WINDOW_FLAG_NO_FOCUS); + BIND_ENUM_CONSTANT(WINDOW_FLAG_POPUP); BIND_ENUM_CONSTANT(WINDOW_FLAG_MAX); BIND_ENUM_CONSTANT(WINDOW_EVENT_MOUSE_ENTER); diff --git a/servers/display_server.h b/servers/display_server.h index 81ac551f57..67dbab0924 100644 --- a/servers/display_server.h +++ b/servers/display_server.h @@ -226,6 +226,7 @@ public: WINDOW_FLAG_ALWAYS_ON_TOP, WINDOW_FLAG_TRANSPARENT, WINDOW_FLAG_NO_FOCUS, + WINDOW_FLAG_POPUP, WINDOW_FLAG_MAX, }; @@ -235,13 +236,18 @@ public: WINDOW_FLAG_BORDERLESS_BIT = (1 << WINDOW_FLAG_BORDERLESS), WINDOW_FLAG_ALWAYS_ON_TOP_BIT = (1 << WINDOW_FLAG_ALWAYS_ON_TOP), WINDOW_FLAG_TRANSPARENT_BIT = (1 << WINDOW_FLAG_TRANSPARENT), - WINDOW_FLAG_NO_FOCUS_BIT = (1 << WINDOW_FLAG_NO_FOCUS) + WINDOW_FLAG_NO_FOCUS_BIT = (1 << WINDOW_FLAG_NO_FOCUS), + WINDOW_FLAG_POPUP_BIT = (1 << WINDOW_FLAG_POPUP), }; virtual WindowID create_sub_window(WindowMode p_mode, VSyncMode p_vsync_mode, uint32_t p_flags, const Rect2i &p_rect = Rect2i()); virtual void show_window(WindowID p_id); virtual void delete_sub_window(WindowID p_id); + virtual WindowID window_get_active_popup() const { return INVALID_WINDOW_ID; }; + virtual void window_set_popup_safe_rect(WindowID p_window, const Rect2i &p_rect){}; + virtual Rect2i window_get_popup_safe_rect(WindowID p_window) const { return Rect2i(); }; + virtual int64_t window_get_native_handle(HandleType p_handle_type, WindowID p_window = MAIN_WINDOW_ID) const; virtual WindowID get_window_at_screen_position(const Point2i &p_position) const = 0; diff --git a/servers/rendering/renderer_rd/effects_rd.cpp b/servers/rendering/renderer_rd/effects_rd.cpp index 7883a2d816..fe3863fec7 100644 --- a/servers/rendering/renderer_rd/effects_rd.cpp +++ b/servers/rendering/renderer_rd/effects_rd.cpp @@ -646,13 +646,13 @@ void EffectsRD::screen_space_reflection(RID p_diffuse, RID p_normal_roughness, R ssr.push_constant.metallic_mask[3] = CLAMP(p_metallic_mask.a * 255.0, 0, 255); store_camera(p_camera, ssr.push_constant.projection); - RD::get_singleton()->compute_list_bind_compute_pipeline(compute_list, ssr.pipelines[(p_roughness_quality != RS::ENV_SSR_ROUGNESS_QUALITY_DISABLED) ? SCREEN_SPACE_REFLECTION_ROUGH : SCREEN_SPACE_REFLECTION_NORMAL]); + RD::get_singleton()->compute_list_bind_compute_pipeline(compute_list, ssr.pipelines[(p_roughness_quality != RS::ENV_SSR_ROUGHNESS_QUALITY_DISABLED) ? SCREEN_SPACE_REFLECTION_ROUGH : SCREEN_SPACE_REFLECTION_NORMAL]); RD::get_singleton()->compute_list_set_push_constant(compute_list, &ssr.push_constant, sizeof(ScreenSpaceReflectionPushConstant)); RD::get_singleton()->compute_list_bind_uniform_set(compute_list, _get_compute_uniform_set_from_image_pair(p_output_blur, p_scale_depth), 0); - if (p_roughness_quality != RS::ENV_SSR_ROUGNESS_QUALITY_DISABLED) { + if (p_roughness_quality != RS::ENV_SSR_ROUGHNESS_QUALITY_DISABLED) { RD::get_singleton()->compute_list_bind_uniform_set(compute_list, _get_compute_uniform_set_from_image_pair(p_output, p_blur_radius), 1); } else { RD::get_singleton()->compute_list_bind_uniform_set(compute_list, _get_uniform_set_from_image(p_output), 1); @@ -663,7 +663,7 @@ void EffectsRD::screen_space_reflection(RID p_diffuse, RID p_normal_roughness, R RD::get_singleton()->compute_list_dispatch_threads(compute_list, p_screen_size.width, p_screen_size.height, 1); } - if (p_roughness_quality != RS::ENV_SSR_ROUGNESS_QUALITY_DISABLED) { + if (p_roughness_quality != RS::ENV_SSR_ROUGHNESS_QUALITY_DISABLED) { //blur RD::get_singleton()->compute_list_add_barrier(compute_list); @@ -675,10 +675,10 @@ void EffectsRD::screen_space_reflection(RID p_diffuse, RID p_normal_roughness, R ssr_filter.push_constant.proj_info[2] = (1.0f - p_camera.matrix[0][2]) / p_camera.matrix[0][0]; ssr_filter.push_constant.proj_info[3] = (1.0f + p_camera.matrix[1][2]) / p_camera.matrix[1][1]; ssr_filter.push_constant.vertical = 0; - if (p_roughness_quality == RS::ENV_SSR_ROUGNESS_QUALITY_LOW) { + if (p_roughness_quality == RS::ENV_SSR_ROUGHNESS_QUALITY_LOW) { ssr_filter.push_constant.steps = p_max_steps / 3; ssr_filter.push_constant.increment = 3; - } else if (p_roughness_quality == RS::ENV_SSR_ROUGNESS_QUALITY_MEDIUM) { + } else if (p_roughness_quality == RS::ENV_SSR_ROUGHNESS_QUALITY_MEDIUM) { ssr_filter.push_constant.steps = p_max_steps / 2; ssr_filter.push_constant.increment = 2; } else { diff --git a/servers/rendering/renderer_rd/renderer_scene_render_rd.cpp b/servers/rendering/renderer_rd/renderer_scene_render_rd.cpp index 92f1286d52..8814822cda 100644 --- a/servers/rendering/renderer_rd/renderer_scene_render_rd.cpp +++ b/servers/rendering/renderer_rd/renderer_scene_render_rd.cpp @@ -1980,7 +1980,7 @@ void RendererSceneRenderRD::_process_ssr(RID p_render_buffers, RID p_dest_frameb rb->ssr.normal_scaled = RD::get_singleton()->texture_create(tf, RD::TextureView()); } - if (ssr_roughness_quality != RS::ENV_SSR_ROUGNESS_QUALITY_DISABLED && !rb->ssr.blur_radius[0].is_valid()) { + if (ssr_roughness_quality != RS::ENV_SSR_ROUGHNESS_QUALITY_DISABLED && !rb->ssr.blur_radius[0].is_valid()) { RD::TextureFormat tf; tf.format = RD::DATA_FORMAT_R8_UNORM; tf.width = rb->internal_width / 2; diff --git a/servers/rendering/renderer_rd/renderer_scene_render_rd.h b/servers/rendering/renderer_rd/renderer_scene_render_rd.h index 09c828ba37..47bc0af1db 100644 --- a/servers/rendering/renderer_rd/renderer_scene_render_rd.h +++ b/servers/rendering/renderer_rd/renderer_scene_render_rd.h @@ -428,7 +428,7 @@ private: bool glow_bicubic_upscale = false; bool glow_high_quality = false; - RS::EnvironmentSSRRoughnessQuality ssr_roughness_quality = RS::ENV_SSR_ROUGNESS_QUALITY_LOW; + RS::EnvironmentSSRRoughnessQuality ssr_roughness_quality = RS::ENV_SSR_ROUGHNESS_QUALITY_LOW; mutable RID_Owner<RendererSceneEnvironmentRD, true> environment_owner; diff --git a/servers/rendering_server.cpp b/servers/rendering_server.cpp index e07feaab43..39a0acb2bd 100644 --- a/servers/rendering_server.cpp +++ b/servers/rendering_server.cpp @@ -2372,10 +2372,10 @@ void RenderingServer::_bind_methods() { BIND_ENUM_CONSTANT(ENV_TONE_MAPPER_FILMIC); BIND_ENUM_CONSTANT(ENV_TONE_MAPPER_ACES); - BIND_ENUM_CONSTANT(ENV_SSR_ROUGNESS_QUALITY_DISABLED); - BIND_ENUM_CONSTANT(ENV_SSR_ROUGNESS_QUALITY_LOW); - BIND_ENUM_CONSTANT(ENV_SSR_ROUGNESS_QUALITY_MEDIUM); - BIND_ENUM_CONSTANT(ENV_SSR_ROUGNESS_QUALITY_HIGH); + BIND_ENUM_CONSTANT(ENV_SSR_ROUGHNESS_QUALITY_DISABLED); + BIND_ENUM_CONSTANT(ENV_SSR_ROUGHNESS_QUALITY_LOW); + BIND_ENUM_CONSTANT(ENV_SSR_ROUGHNESS_QUALITY_MEDIUM); + BIND_ENUM_CONSTANT(ENV_SSR_ROUGHNESS_QUALITY_HIGH); BIND_ENUM_CONSTANT(ENV_SSAO_QUALITY_VERY_LOW); BIND_ENUM_CONSTANT(ENV_SSAO_QUALITY_LOW); diff --git a/servers/rendering_server.h b/servers/rendering_server.h index 71460c2d5d..533c000166 100644 --- a/servers/rendering_server.h +++ b/servers/rendering_server.h @@ -1010,10 +1010,10 @@ public: virtual void environment_set_ssr(RID p_env, bool p_enable, int p_max_steps, float p_fade_in, float p_fade_out, float p_depth_tolerance) = 0; enum EnvironmentSSRRoughnessQuality { - ENV_SSR_ROUGNESS_QUALITY_DISABLED, - ENV_SSR_ROUGNESS_QUALITY_LOW, - ENV_SSR_ROUGNESS_QUALITY_MEDIUM, - ENV_SSR_ROUGNESS_QUALITY_HIGH, + ENV_SSR_ROUGHNESS_QUALITY_DISABLED, + ENV_SSR_ROUGHNESS_QUALITY_LOW, + ENV_SSR_ROUGHNESS_QUALITY_MEDIUM, + ENV_SSR_ROUGHNESS_QUALITY_HIGH, }; virtual void environment_set_ssr_roughness_quality(EnvironmentSSRRoughnessQuality p_quality) = 0; diff --git a/thirdparty/README.md b/thirdparty/README.md index f204367b8b..d591d2cbd8 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -206,7 +206,7 @@ Files extracted from upstream source: ## harfbuzz - Upstream: https://github.com/harfbuzz/harfbuzz -- Version: 3.3.2 (ac46c3248e8b0316235943175c4d4a11c24dd4a9, 2022) +- Version: 4.0.0 (8d1b000a3edc90c12267b836b4ef3f81c0e53edc, 2022) - License: MIT Files extracted from upstream source: @@ -454,7 +454,7 @@ Collection of single-file libraries used in Godot components. * License: Public Domain or MIT - `stb_rect_pack.h` * Upstream: https://github.com/nothings/stb - * Version: 1.00 (2bb4a0accd4003c1db4c24533981e01b1adfd656, 2019) + * Version: 1.01 (af1a5bc352164740c1cc1354942b1c6b72eacb8a, 2021) * License: Public Domain or Unlicense or MIT - `yuv2rgb.h` * Upstream: http://wss.co.uk/pinknoise/yuv2rgb/ (to check) @@ -690,10 +690,10 @@ Files extracted from upstream source: SDK release: https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/layers/generated/vk_enum_string_helper.h `vk_mem_alloc.h` is taken from https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator -Version: 3.0.0-development (2022-02-08), commit `a1895bc76547370564d604faa27e0b73de747df1` +Version: 3.0.0-development (2022-02-24), commit `dc3f6bb9159df22ceed69c7765ddfb4fbb1b6ed0` `vk_mem_alloc.cpp` is a Godot file and should be preserved on updates. -Patches in the `patches` directory should be re-applied after updates (order must be followed among the number-prefixed ones). +Patches in the `patches` directory should be re-applied after updates. ## wslay diff --git a/thirdparty/harfbuzz/src/hb-algs.hh b/thirdparty/harfbuzz/src/hb-algs.hh index 3a3ab08046..c40a55cd1f 100644 --- a/thirdparty/harfbuzz/src/hb-algs.hh +++ b/thirdparty/harfbuzz/src/hb-algs.hh @@ -498,7 +498,7 @@ struct hb_pair_t template <typename Q1, typename Q2, hb_enable_if (hb_is_convertible (T1, Q1) && - hb_is_convertible (T2, T2))> + hb_is_convertible (T2, Q2))> operator hb_pair_t<Q1, Q2> () { return hb_pair_t<Q1, Q2> (first, second); } hb_pair_t<T1, T2> reverse () const diff --git a/thirdparty/harfbuzz/src/hb-buffer-verify.cc b/thirdparty/harfbuzz/src/hb-buffer-verify.cc new file mode 100644 index 0000000000..dea2c11c35 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-buffer-verify.cc @@ -0,0 +1,422 @@ +/* + * Copyright © 2022 Behdad Esfahbod + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Behdad Esfahbod + */ + +#include "hb.hh" + +#ifndef HB_NO_BUFFER_VERIFY + +#include "hb-buffer.hh" + + +#define BUFFER_VERIFY_ERROR "buffer verify error: " +static inline void +buffer_verify_error (hb_buffer_t *buffer, + hb_font_t *font, + const char *fmt, + ...) HB_PRINTF_FUNC(3, 4); + +static inline void +buffer_verify_error (hb_buffer_t *buffer, + hb_font_t *font, + const char *fmt, + ...) +{ + va_list ap; + va_start (ap, fmt); + if (buffer->messaging ()) + { + buffer->message_impl (font, fmt, ap); + } + else + { + fprintf (stderr, "harfbuzz "); + vfprintf (stderr, fmt, ap); + fprintf (stderr, "\n"); + } + va_end (ap); +} + +static bool +buffer_verify_monotone (hb_buffer_t *buffer, + hb_font_t *font) +{ + /* Check that clusters are monotone. */ + if (buffer->cluster_level == HB_BUFFER_CLUSTER_LEVEL_MONOTONE_GRAPHEMES || + buffer->cluster_level == HB_BUFFER_CLUSTER_LEVEL_MONOTONE_CHARACTERS) + { + bool is_forward = HB_DIRECTION_IS_FORWARD (hb_buffer_get_direction (buffer)); + + unsigned int num_glyphs; + hb_glyph_info_t *info = hb_buffer_get_glyph_infos (buffer, &num_glyphs); + + for (unsigned int i = 1; i < num_glyphs; i++) + if (info[i-1].cluster != info[i].cluster && + (info[i-1].cluster < info[i].cluster) != is_forward) + { + buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "clusters are not monotone."); + return false; + } + } + + return true; +} + +static bool +buffer_verify_unsafe_to_break (hb_buffer_t *buffer, + hb_buffer_t *text_buffer, + hb_font_t *font, + const hb_feature_t *features, + unsigned int num_features, + const char * const *shapers) +{ + if (buffer->cluster_level != HB_BUFFER_CLUSTER_LEVEL_MONOTONE_GRAPHEMES && + buffer->cluster_level != HB_BUFFER_CLUSTER_LEVEL_MONOTONE_CHARACTERS) + { + /* Cannot perform this check without monotone clusters. */ + return true; + } + + /* Check that breaking up shaping at safe-to-break is indeed safe. */ + + hb_buffer_t *fragment = hb_buffer_create_similar (buffer); + hb_buffer_set_flags (fragment, hb_buffer_get_flags (fragment) & ~HB_BUFFER_FLAG_VERIFY); + hb_buffer_t *reconstruction = hb_buffer_create_similar (buffer); + hb_buffer_set_flags (reconstruction, hb_buffer_get_flags (reconstruction) & ~HB_BUFFER_FLAG_VERIFY); + + unsigned int num_glyphs; + hb_glyph_info_t *info = hb_buffer_get_glyph_infos (buffer, &num_glyphs); + + unsigned int num_chars; + hb_glyph_info_t *text = hb_buffer_get_glyph_infos (text_buffer, &num_chars); + + /* Chop text and shape fragments. */ + bool forward = HB_DIRECTION_IS_FORWARD (hb_buffer_get_direction (buffer)); + unsigned int start = 0; + unsigned int text_start = forward ? 0 : num_chars; + unsigned int text_end = text_start; + for (unsigned int end = 1; end < num_glyphs + 1; end++) + { + if (end < num_glyphs && + (info[end].cluster == info[end-1].cluster || + info[end-(forward?0:1)].mask & HB_GLYPH_FLAG_UNSAFE_TO_BREAK)) + continue; + + /* Shape segment corresponding to glyphs start..end. */ + if (end == num_glyphs) + { + if (forward) + text_end = num_chars; + else + text_start = 0; + } + else + { + if (forward) + { + unsigned int cluster = info[end].cluster; + while (text_end < num_chars && text[text_end].cluster < cluster) + text_end++; + } + else + { + unsigned int cluster = info[end - 1].cluster; + while (text_start && text[text_start - 1].cluster >= cluster) + text_start--; + } + } + assert (text_start < text_end); + + if (0) + printf("start %d end %d text start %d end %d\n", start, end, text_start, text_end); + + hb_buffer_clear_contents (fragment); + + hb_buffer_flags_t flags = hb_buffer_get_flags (fragment); + if (0 < text_start) + flags = (hb_buffer_flags_t) (flags & ~HB_BUFFER_FLAG_BOT); + if (text_end < num_chars) + flags = (hb_buffer_flags_t) (flags & ~HB_BUFFER_FLAG_EOT); + hb_buffer_set_flags (fragment, flags); + + hb_buffer_append (fragment, text_buffer, text_start, text_end); + if (!hb_shape_full (font, fragment, features, num_features, shapers)) + { + buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "shaping failed while shaping fragment."); + hb_buffer_destroy (reconstruction); + hb_buffer_destroy (fragment); + return false; + } + hb_buffer_append (reconstruction, fragment, 0, -1); + + start = end; + if (forward) + text_start = text_end; + else + text_end = text_start; + } + + bool ret = true; + hb_buffer_diff_flags_t diff = hb_buffer_diff (reconstruction, buffer, (hb_codepoint_t) -1, 0); + if (diff) + { + buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "unsafe-to-break test failed."); + ret = false; + + /* Return the reconstructed result instead so it can be inspected. */ + hb_buffer_set_length (buffer, 0); + hb_buffer_append (buffer, reconstruction, 0, -1); + } + + hb_buffer_destroy (reconstruction); + hb_buffer_destroy (fragment); + + return ret; +} + +static bool +buffer_verify_unsafe_to_concat (hb_buffer_t *buffer, + hb_buffer_t *text_buffer, + hb_font_t *font, + const hb_feature_t *features, + unsigned int num_features, + const char * const *shapers) +{ + if (buffer->cluster_level != HB_BUFFER_CLUSTER_LEVEL_MONOTONE_GRAPHEMES && + buffer->cluster_level != HB_BUFFER_CLUSTER_LEVEL_MONOTONE_CHARACTERS) + { + /* Cannot perform this check without monotone clusters. */ + return true; + } + + /* Check that shuffling up text before shaping at safe-to-concat points + * is indeed safe. */ + + /* This is what we do: + * + * 1. We shape text once. Then segment the text at all the safe-to-concat + * points; + * + * 2. Then we create two buffers, one containing all the even segments and + * one all the odd segments. + * + * 3. Because all these segments were safe-to-concat at both ends, we + * expect that concatenating them and shaping should NOT change the + * shaping results of each segment. As such, we expect that after + * shaping the two buffers, we still get cluster boundaries at the + * segment boundaries, and that those all are safe-to-concat points. + * Moreover, that there are NOT any safe-to-concat points within the + * segments. + * + * 4. Finally, we reconstruct the shaping results of the original text by + * simply interleaving the shaping results of the segments from the two + * buffers, and assert that the total shaping results is the same as + * the one from original buffer in step 1. + */ + + hb_buffer_t *fragments[2] {hb_buffer_create_similar (buffer), + hb_buffer_create_similar (buffer)}; + hb_buffer_set_flags (fragments[0], hb_buffer_get_flags (fragments[0]) & ~HB_BUFFER_FLAG_VERIFY); + hb_buffer_set_flags (fragments[1], hb_buffer_get_flags (fragments[1]) & ~HB_BUFFER_FLAG_VERIFY); + hb_buffer_t *reconstruction = hb_buffer_create_similar (buffer); + hb_buffer_set_flags (reconstruction, hb_buffer_get_flags (reconstruction) & ~HB_BUFFER_FLAG_VERIFY); + hb_segment_properties_t props; + hb_buffer_get_segment_properties (buffer, &props); + hb_buffer_set_segment_properties (fragments[0], &props); + hb_buffer_set_segment_properties (fragments[1], &props); + hb_buffer_set_segment_properties (reconstruction, &props); + + unsigned num_glyphs; + hb_glyph_info_t *info = hb_buffer_get_glyph_infos (buffer, &num_glyphs); + + unsigned num_chars; + hb_glyph_info_t *text = hb_buffer_get_glyph_infos (text_buffer, &num_chars); + + bool forward = HB_DIRECTION_IS_FORWARD (hb_buffer_get_direction (buffer)); + + if (!forward) + hb_buffer_reverse (buffer); + + /* + * Split text into segments and collect into to fragment streams. + */ + { + unsigned fragment_idx = 0; + unsigned start = 0; + unsigned text_start = 0; + unsigned text_end = 0; + for (unsigned end = 1; end < num_glyphs + 1; end++) + { + if (end < num_glyphs && + (info[end].cluster == info[end-1].cluster || + info[end].mask & HB_GLYPH_FLAG_UNSAFE_TO_CONCAT)) + continue; + + /* Accumulate segment corresponding to glyphs start..end. */ + if (end == num_glyphs) + text_end = num_chars; + else + { + unsigned cluster = info[end].cluster; + while (text_end < num_chars && text[text_end].cluster < cluster) + text_end++; + } + assert (text_start < text_end); + + if (0) + printf("start %d end %d text start %d end %d\n", start, end, text_start, text_end); + +#if 0 + hb_buffer_flags_t flags = hb_buffer_get_flags (fragment); + if (0 < text_start) + flags = (hb_buffer_flags_t) (flags & ~HB_BUFFER_FLAG_BOT); + if (text_end < num_chars) + flags = (hb_buffer_flags_t) (flags & ~HB_BUFFER_FLAG_EOT); + hb_buffer_set_flags (fragment, flags); +#endif + + hb_buffer_append (fragments[fragment_idx], text_buffer, text_start, text_end); + + start = end; + text_start = text_end; + fragment_idx = 1 - fragment_idx; + } + } + + bool ret = true; + hb_buffer_diff_flags_t diff; + + /* + * Shape the two fragment streams. + */ + if (!hb_shape_full (font, fragments[0], features, num_features, shapers)) + { + buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "shaping failed while shaping fragment."); + ret = false; + goto out; + } + if (!hb_shape_full (font, fragments[1], features, num_features, shapers)) + { + buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "shaping failed while shaping fragment."); + ret = false; + goto out; + } + + if (!forward) + { + hb_buffer_reverse (fragments[0]); + hb_buffer_reverse (fragments[1]); + } + + /* + * Reconstruct results. + */ + { + unsigned fragment_idx = 0; + unsigned fragment_start[2] {0, 0}; + unsigned fragment_num_glyphs[2]; + hb_glyph_info_t *fragment_info[2]; + for (unsigned i = 0; i < 2; i++) + fragment_info[i] = hb_buffer_get_glyph_infos (fragments[i], &fragment_num_glyphs[i]); + while (fragment_start[0] < fragment_num_glyphs[0] || + fragment_start[1] < fragment_num_glyphs[1]) + { + unsigned fragment_end = fragment_start[fragment_idx] + 1; + while (fragment_end < fragment_num_glyphs[fragment_idx] && + (fragment_info[fragment_idx][fragment_end].cluster == fragment_info[fragment_idx][fragment_end - 1].cluster || + fragment_info[fragment_idx][fragment_end].mask & HB_GLYPH_FLAG_UNSAFE_TO_CONCAT)) + fragment_end++; + + hb_buffer_append (reconstruction, fragments[fragment_idx], fragment_start[fragment_idx], fragment_end); + + fragment_start[fragment_idx] = fragment_end; + fragment_idx = 1 - fragment_idx; + } + } + + if (!forward) + { + hb_buffer_reverse (buffer); + hb_buffer_reverse (reconstruction); + } + + /* + * Diff results. + */ + diff = hb_buffer_diff (reconstruction, buffer, (hb_codepoint_t) -1, 0); + if (diff) + { + buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "unsafe-to-concat test failed."); + ret = false; + + /* Return the reconstructed result instead so it can be inspected. */ + hb_buffer_set_length (buffer, 0); + hb_buffer_append (buffer, reconstruction, 0, -1); + } + + +out: + hb_buffer_destroy (reconstruction); + hb_buffer_destroy (fragments[0]); + hb_buffer_destroy (fragments[1]); + + return ret; +} + +bool +hb_buffer_t::verify (hb_buffer_t *text_buffer, + hb_font_t *font, + const hb_feature_t *features, + unsigned int num_features, + const char * const *shapers) +{ + bool ret = true; + if (!buffer_verify_monotone (this, font)) + ret = false; + if (!buffer_verify_unsafe_to_break (this, text_buffer, font, features, num_features, shapers)) + ret = false; + if ((flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT) != 0 && + !buffer_verify_unsafe_to_concat (this, text_buffer, font, features, num_features, shapers)) + ret = false; + if (!ret) + { + unsigned len = text_buffer->len; + hb_vector_t<char> bytes; + if (likely (bytes.resize (len * 10 + 16))) + { + hb_buffer_serialize_unicode (text_buffer, + 0, len, + bytes.arrayZ, bytes.length, + &len, + HB_BUFFER_SERIALIZE_FORMAT_TEXT, + HB_BUFFER_SERIALIZE_FLAG_NO_CLUSTERS); + buffer_verify_error (this, font, BUFFER_VERIFY_ERROR "text was: %s.", bytes.arrayZ); + } + } + return ret; +} + + +#endif diff --git a/thirdparty/harfbuzz/src/hb-buffer.cc b/thirdparty/harfbuzz/src/hb-buffer.cc index e50afcb203..d36fcfde39 100644 --- a/thirdparty/harfbuzz/src/hb-buffer.cc +++ b/thirdparty/harfbuzz/src/hb-buffer.cc @@ -1789,7 +1789,7 @@ hb_buffer_add_codepoints (hb_buffer_t *buffer, **/ HB_EXTERN void hb_buffer_append (hb_buffer_t *buffer, - hb_buffer_t *source, + const hb_buffer_t *source, unsigned int start, unsigned int end) { diff --git a/thirdparty/harfbuzz/src/hb-buffer.h b/thirdparty/harfbuzz/src/hb-buffer.h index 9fbd7b1ec3..ece7d2d8cf 100644 --- a/thirdparty/harfbuzz/src/hb-buffer.h +++ b/thirdparty/harfbuzz/src/hb-buffer.h @@ -137,7 +137,11 @@ typedef struct hb_glyph_info_t { * clusters. * The #HB_GLYPH_FLAG_UNSAFE_TO_BREAK flag will * always imply this flag. - * Since: 3.3.0 + * To use this flag, you must enable the buffer flag + * @HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT during + * shaping, otherwise the buffer flag will not be + * reliably produced. + * Since: 4.0.0 * @HB_GLYPH_FLAG_DEFINED: All the currently defined flags. * * Flags for #hb_glyph_info_t. @@ -356,7 +360,19 @@ hb_buffer_guess_segment_properties (hb_buffer_t *buffer); * @HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE: * flag indicating that a dotted circle should * not be inserted in the rendering of incorrect - * character sequences (such at <0905 093E>). Since: 2.4 + * character sequences (such at <0905 093E>). Since: 2.4.0 + * @HB_BUFFER_FLAG_VERIFY: + * flag indicating that the hb_shape() call and its variants + * should perform various verification processes on the results + * of the shaping operation on the buffer. If the verification + * fails, then either a buffer message is sent, if a message + * handler is installed on the buffer, or a message is written + * to standard error. In either case, the shaping result might + * be modified to show the failed output. Since: 3.4.0 + * @HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT: + * flag indicating that the @HB_GLYPH_FLAG_UNSAFE_TO_CONCAT + * glyph-flag should be produced by the shaper. By default + * it will not be produced since it incurs a cost. Since: 4.0.0 * * Flags for #hb_buffer_t. * @@ -368,7 +384,9 @@ typedef enum { /*< flags >*/ HB_BUFFER_FLAG_EOT = 0x00000002u, /* End-of-text */ HB_BUFFER_FLAG_PRESERVE_DEFAULT_IGNORABLES = 0x00000004u, HB_BUFFER_FLAG_REMOVE_DEFAULT_IGNORABLES = 0x00000008u, - HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE = 0x00000010u + HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE = 0x00000010u, + HB_BUFFER_FLAG_VERIFY = 0x00000020u, + HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT = 0x00000040u } hb_buffer_flags_t; HB_EXTERN void @@ -522,7 +540,7 @@ hb_buffer_add_codepoints (hb_buffer_t *buffer, HB_EXTERN void hb_buffer_append (hb_buffer_t *buffer, - hb_buffer_t *source, + const hb_buffer_t *source, unsigned int start, unsigned int end); @@ -619,24 +637,24 @@ hb_buffer_serialize_glyphs (hb_buffer_t *buffer, HB_EXTERN unsigned int hb_buffer_serialize_unicode (hb_buffer_t *buffer, - unsigned int start, - unsigned int end, - char *buf, - unsigned int buf_size, - unsigned int *buf_consumed, - hb_buffer_serialize_format_t format, - hb_buffer_serialize_flags_t flags); + unsigned int start, + unsigned int end, + char *buf, + unsigned int buf_size, + unsigned int *buf_consumed, + hb_buffer_serialize_format_t format, + hb_buffer_serialize_flags_t flags); HB_EXTERN unsigned int hb_buffer_serialize (hb_buffer_t *buffer, - unsigned int start, - unsigned int end, - char *buf, - unsigned int buf_size, - unsigned int *buf_consumed, - hb_font_t *font, - hb_buffer_serialize_format_t format, - hb_buffer_serialize_flags_t flags); + unsigned int start, + unsigned int end, + char *buf, + unsigned int buf_size, + unsigned int *buf_consumed, + hb_font_t *font, + hb_buffer_serialize_format_t format, + hb_buffer_serialize_flags_t flags); HB_EXTERN hb_bool_t hb_buffer_deserialize_glyphs (hb_buffer_t *buffer, @@ -648,10 +666,10 @@ hb_buffer_deserialize_glyphs (hb_buffer_t *buffer, HB_EXTERN hb_bool_t hb_buffer_deserialize_unicode (hb_buffer_t *buffer, - const char *buf, - int buf_len, - const char **end_ptr, - hb_buffer_serialize_format_t format); + const char *buf, + int buf_len, + const char **end_ptr, + hb_buffer_serialize_format_t format); diff --git a/thirdparty/harfbuzz/src/hb-buffer.hh b/thirdparty/harfbuzz/src/hb-buffer.hh index ac45f090a5..bc6992905e 100644 --- a/thirdparty/harfbuzz/src/hb-buffer.hh +++ b/thirdparty/harfbuzz/src/hb-buffer.hh @@ -212,6 +212,20 @@ struct hb_buffer_t HB_INTERNAL void enter (); HB_INTERNAL void leave (); +#ifndef HB_NO_BUFFER_VERIFY + HB_INTERNAL +#endif + bool verify (hb_buffer_t *text_buffer, + hb_font_t *font, + const hb_feature_t *features, + unsigned int num_features, + const char * const *shapers) +#ifndef HB_NO_BUFFER_VERIFY + ; +#else + { return true; } +#endif + unsigned int backtrack_len () const { return have_output ? out_len : idx; } unsigned int lookahead_len () const { return len - idx; } uint8_t next_serial () { return ++serial ? serial : ++serial; } @@ -446,6 +460,8 @@ struct hb_buffer_t } void unsafe_to_concat (unsigned int start = 0, unsigned int end = -1) { + if (likely ((flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT) == 0)) + return; _set_glyph_flags (HB_GLYPH_FLAG_UNSAFE_TO_CONCAT, start, end, true); @@ -458,6 +474,8 @@ struct hb_buffer_t } void unsafe_to_concat_from_outbuffer (unsigned int start = 0, unsigned int end = -1) { + if (likely ((flags & HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT) == 0)) + return; _set_glyph_flags (HB_GLYPH_FLAG_UNSAFE_TO_CONCAT, start, end, false, true); diff --git a/thirdparty/harfbuzz/src/hb-common.cc b/thirdparty/harfbuzz/src/hb-common.cc index 249a8a8010..41229b9183 100644 --- a/thirdparty/harfbuzz/src/hb-common.cc +++ b/thirdparty/harfbuzz/src/hb-common.cc @@ -1065,7 +1065,7 @@ hb_variation_from_string (const char *str, int len, static inline void free_static_C_locale (); static struct hb_C_locale_lazy_loader_t : hb_lazy_loader_t<hb_remove_pointer<hb_locale_t>, - hb_C_locale_lazy_loader_t> + hb_C_locale_lazy_loader_t> { static hb_locale_t create () { diff --git a/thirdparty/harfbuzz/src/hb-common.h b/thirdparty/harfbuzz/src/hb-common.h index 0384117a4d..7b897a6c51 100644 --- a/thirdparty/harfbuzz/src/hb-common.h +++ b/thirdparty/harfbuzz/src/hb-common.h @@ -130,6 +130,16 @@ typedef union _hb_var_int_t { int8_t i8[4]; } hb_var_int_t; +typedef union _hb_var_num_t { + float f; + uint32_t u32; + int32_t i32; + uint16_t u16[2]; + int16_t i16[2]; + uint8_t u8[4]; + int8_t i8[4]; +} hb_var_num_t; + /* hb_tag_t */ @@ -481,6 +491,7 @@ hb_language_get_default (void); * @HB_SCRIPT_TANGSA: `Tnsa`, Since: 3.0.0 * @HB_SCRIPT_TOTO: `Toto`, Since: 3.0.0 * @HB_SCRIPT_VITHKUQI: `Vith`, Since: 3.0.0 + * @HB_SCRIPT_MATH: `Zmth`, Since: 3.4.0 * @HB_SCRIPT_INVALID: No script set * * Data type for scripts. Each #hb_script_t's value is an #hb_tag_t corresponding @@ -697,6 +708,11 @@ typedef enum HB_SCRIPT_TOTO = HB_TAG ('T','o','t','o'), /*14.0*/ HB_SCRIPT_VITHKUQI = HB_TAG ('V','i','t','h'), /*14.0*/ + /* + * Since 3.4.0 + */ + HB_SCRIPT_MATH = HB_TAG ('Z','m','t','h'), + /* No script set. */ HB_SCRIPT_INVALID = HB_TAG_NONE, diff --git a/thirdparty/harfbuzz/src/hb-config.hh b/thirdparty/harfbuzz/src/hb-config.hh index 7d00d9088a..4b46dea938 100644 --- a/thirdparty/harfbuzz/src/hb-config.hh +++ b/thirdparty/harfbuzz/src/hb-config.hh @@ -55,6 +55,7 @@ #define HB_NO_ATEXIT #define HB_NO_BUFFER_MESSAGE #define HB_NO_BUFFER_SERIALIZE +#define HB_NO_BUFFER_VERIFY #define HB_NO_BITMAP #define HB_NO_CFF #define HB_NO_COLOR @@ -84,6 +85,7 @@ #ifdef HB_MINI #define HB_NO_AAT #define HB_NO_LEGACY +#define HB_NO_BORING_EXPANSION #endif #if defined(HAVE_CONFIG_OVERRIDE_H) || defined(HB_CONFIG_OVERRIDE_H) diff --git a/thirdparty/harfbuzz/src/hb-draw.cc b/thirdparty/harfbuzz/src/hb-draw.cc index c0af6ce013..b31019b07e 100644 --- a/thirdparty/harfbuzz/src/hb-draw.cc +++ b/thirdparty/harfbuzz/src/hb-draw.cc @@ -25,237 +25,313 @@ #include "hb.hh" #ifndef HB_NO_DRAW -#ifdef HB_EXPERIMENTAL_API #include "hb-draw.hh" -#include "hb-ot.h" -#include "hb-ot-glyf-table.hh" -#include "hb-ot-cff1-table.hh" -#include "hb-ot-cff2-table.hh" /** - * hb_draw_funcs_set_move_to_func: - * @funcs: draw functions object - * @move_to: move-to callback + * SECTION:hb-draw + * @title: hb-draw + * @short_description: Glyph drawing + * @include: hb.h * - * Sets move-to callback to the draw functions object. - * - * Since: EXPERIMENTAL + * Functions for drawing (extracting) glyph shapes. **/ -void -hb_draw_funcs_set_move_to_func (hb_draw_funcs_t *funcs, - hb_draw_move_to_func_t move_to) + +static void +hb_draw_move_to_nil (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data HB_UNUSED, + hb_draw_state_t *st HB_UNUSED, + float to_x HB_UNUSED, float to_y HB_UNUSED, + void *user_data HB_UNUSED) {} + +static void +hb_draw_line_to_nil (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data HB_UNUSED, + hb_draw_state_t *st HB_UNUSED, + float to_x HB_UNUSED, float to_y HB_UNUSED, + void *user_data HB_UNUSED) {} + +static void +hb_draw_quadratic_to_nil (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float control_x, float control_y, + float to_x, float to_y, + void *user_data HB_UNUSED) { - if (unlikely (hb_object_is_immutable (funcs))) return; - funcs->move_to = move_to; + dfuncs->emit_cubic_to (draw_data, *st, + (st->current_x + 2.f * control_x) / 3.f, + (st->current_y + 2.f * control_y) / 3.f, + (to_x + 2.f * control_x) / 3.f, + (to_y + 2.f * control_y) / 3.f, + to_x, to_y); +} + +static void +hb_draw_cubic_to_nil (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data HB_UNUSED, + hb_draw_state_t *st HB_UNUSED, + float control1_x HB_UNUSED, float control1_y HB_UNUSED, + float control2_x HB_UNUSED, float control2_y HB_UNUSED, + float to_x HB_UNUSED, float to_y HB_UNUSED, + void *user_data HB_UNUSED) {} + +static void +hb_draw_close_path_nil (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data HB_UNUSED, + hb_draw_state_t *st HB_UNUSED, + void *user_data HB_UNUSED) {} + + +#define HB_DRAW_FUNC_IMPLEMENT(name) \ + \ +void \ +hb_draw_funcs_set_##name##_func (hb_draw_funcs_t *dfuncs, \ + hb_draw_##name##_func_t func, \ + void *user_data, \ + hb_destroy_func_t destroy) \ +{ \ + if (hb_object_is_immutable (dfuncs)) \ + return; \ + \ + if (dfuncs->destroy.name) \ + dfuncs->destroy.name (dfuncs->user_data.name); \ + \ + if (func) { \ + dfuncs->func.name = func; \ + dfuncs->user_data.name = user_data; \ + dfuncs->destroy.name = destroy; \ + } else { \ + dfuncs->func.name = hb_draw_##name##_nil; \ + dfuncs->user_data.name = nullptr; \ + dfuncs->destroy.name = nullptr; \ + } \ } +HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS +#undef HB_DRAW_FUNC_IMPLEMENT + /** - * hb_draw_funcs_set_line_to_func: - * @funcs: draw functions object - * @line_to: line-to callback + * hb_draw_funcs_create: (Xconstructor) + * + * Creates a new draw callbacks object. * - * Sets line-to callback to the draw functions object. + * Return value: (transfer full): + * A newly allocated #hb_draw_funcs_t with a reference count of 1. The initial + * reference count should be released with hb_draw_funcs_destroy when you are + * done using the #hb_draw_funcs_t. This function never returns %NULL. If + * memory cannot be allocated, a special singleton #hb_draw_funcs_t object will + * be returned. * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ -void -hb_draw_funcs_set_line_to_func (hb_draw_funcs_t *funcs, - hb_draw_line_to_func_t line_to) +hb_draw_funcs_t * +hb_draw_funcs_create () { - if (unlikely (hb_object_is_immutable (funcs))) return; - funcs->line_to = line_to; + hb_draw_funcs_t *dfuncs; + if (unlikely (!(dfuncs = hb_object_create<hb_draw_funcs_t> ()))) + return const_cast<hb_draw_funcs_t *> (&Null (hb_draw_funcs_t)); + + dfuncs->func = Null (hb_draw_funcs_t).func; + + return dfuncs; } +DEFINE_NULL_INSTANCE (hb_draw_funcs_t) = +{ + HB_OBJECT_HEADER_STATIC, + + { +#define HB_DRAW_FUNC_IMPLEMENT(name) hb_draw_##name##_nil, + HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS +#undef HB_DRAW_FUNC_IMPLEMENT + } +}; + + /** - * hb_draw_funcs_set_quadratic_to_func: - * @funcs: draw functions object - * @move_to: quadratic-to callback + * hb_draw_funcs_reference: (skip) + * @dfuncs: draw functions + * + * Increases the reference count on @dfuncs by one. This prevents @buffer from + * being destroyed until a matching call to hb_draw_funcs_destroy() is made. * - * Sets quadratic-to callback to the draw functions object. + * Return value: (transfer full): + * The referenced #hb_draw_funcs_t. * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ -void -hb_draw_funcs_set_quadratic_to_func (hb_draw_funcs_t *funcs, - hb_draw_quadratic_to_func_t quadratic_to) +hb_draw_funcs_t * +hb_draw_funcs_reference (hb_draw_funcs_t *dfuncs) { - if (unlikely (hb_object_is_immutable (funcs))) return; - funcs->quadratic_to = quadratic_to; - funcs->is_quadratic_to_set = true; + return hb_object_reference (dfuncs); } /** - * hb_draw_funcs_set_cubic_to_func: - * @funcs: draw functions - * @cubic_to: cubic-to callback + * hb_draw_funcs_destroy: (skip) + * @dfuncs: draw functions * - * Sets cubic-to callback to the draw functions object. + * Deallocate the @dfuncs. + * Decreases the reference count on @dfuncs by one. If the result is zero, then + * @dfuncs and all associated resources are freed. See hb_draw_funcs_reference(). * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ void -hb_draw_funcs_set_cubic_to_func (hb_draw_funcs_t *funcs, - hb_draw_cubic_to_func_t cubic_to) +hb_draw_funcs_destroy (hb_draw_funcs_t *dfuncs) { - if (unlikely (hb_object_is_immutable (funcs))) return; - funcs->cubic_to = cubic_to; + if (!hb_object_destroy (dfuncs)) return; + +#define HB_DRAW_FUNC_IMPLEMENT(name) \ + if (dfuncs->destroy.name) dfuncs->destroy.name (dfuncs->user_data.name); + HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS +#undef HB_DRAW_FUNC_IMPLEMENT + + + hb_free (dfuncs); } /** - * hb_draw_funcs_set_close_path_func: - * @funcs: draw functions object - * @close_path: close-path callback + * hb_draw_funcs_make_immutable: + * @dfuncs: draw functions * - * Sets close-path callback to the draw functions object. + * Makes @dfuncs object immutable. * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ void -hb_draw_funcs_set_close_path_func (hb_draw_funcs_t *funcs, - hb_draw_close_path_func_t close_path) +hb_draw_funcs_make_immutable (hb_draw_funcs_t *dfuncs) { - if (unlikely (hb_object_is_immutable (funcs))) return; - funcs->close_path = close_path; -} - -static void -_move_to_nil (hb_position_t to_x HB_UNUSED, hb_position_t to_y HB_UNUSED, void *user_data HB_UNUSED) {} - -static void -_line_to_nil (hb_position_t to_x HB_UNUSED, hb_position_t to_y HB_UNUSED, void *user_data HB_UNUSED) {} - -static void -_quadratic_to_nil (hb_position_t control_x HB_UNUSED, hb_position_t control_y HB_UNUSED, - hb_position_t to_x HB_UNUSED, hb_position_t to_y HB_UNUSED, - void *user_data HB_UNUSED) {} - -static void -_cubic_to_nil (hb_position_t control1_x HB_UNUSED, hb_position_t control1_y HB_UNUSED, - hb_position_t control2_x HB_UNUSED, hb_position_t control2_y HB_UNUSED, - hb_position_t to_x HB_UNUSED, hb_position_t to_y HB_UNUSED, - void *user_data HB_UNUSED) {} + if (hb_object_is_immutable (dfuncs)) + return; -static void -_close_path_nil (void *user_data HB_UNUSED) {} + hb_object_make_immutable (dfuncs); +} /** - * hb_draw_funcs_create: + * hb_draw_funcs_is_immutable: + * @dfuncs: draw functions * - * Creates a new draw callbacks object. + * Checks whether @dfuncs is immutable. + * + * Return value: %true if @dfuncs is immutable, %false otherwise * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ -hb_draw_funcs_t * -hb_draw_funcs_create () +hb_bool_t +hb_draw_funcs_is_immutable (hb_draw_funcs_t *dfuncs) { - hb_draw_funcs_t *funcs; - if (unlikely (!(funcs = hb_object_create<hb_draw_funcs_t> ()))) - return const_cast<hb_draw_funcs_t *> (&Null (hb_draw_funcs_t)); - - funcs->move_to = (hb_draw_move_to_func_t) _move_to_nil; - funcs->line_to = (hb_draw_line_to_func_t) _line_to_nil; - funcs->quadratic_to = (hb_draw_quadratic_to_func_t) _quadratic_to_nil; - funcs->is_quadratic_to_set = false; - funcs->cubic_to = (hb_draw_cubic_to_func_t) _cubic_to_nil; - funcs->close_path = (hb_draw_close_path_func_t) _close_path_nil; - return funcs; + return hb_object_is_immutable (dfuncs); } + /** - * hb_draw_funcs_reference: - * @funcs: draw functions + * hb_draw_move_to: + * @dfuncs: draw functions + * @draw_data: associated draw data passed by the caller + * @st: current draw state + * @to_x: X component of target point + * @to_y: Y component of target point * - * Add to callbacks object refcount. + * Perform a "move-to" draw operation. * - * Returns: The same object. - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ -hb_draw_funcs_t * -hb_draw_funcs_reference (hb_draw_funcs_t *funcs) +void +hb_draw_move_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y) { - return hb_object_reference (funcs); + dfuncs->move_to (draw_data, *st, + to_x, to_y); } /** - * hb_draw_funcs_destroy: - * @funcs: draw functions + * hb_draw_line_to: + * @dfuncs: draw functions + * @draw_data: associated draw data passed by the caller + * @st: current draw state + * @to_x: X component of target point + * @to_y: Y component of target point * - * Decreases refcount of callbacks object and deletes the object if it reaches - * to zero. + * Perform a "line-to" draw operation. * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ void -hb_draw_funcs_destroy (hb_draw_funcs_t *funcs) +hb_draw_line_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y) { - if (!hb_object_destroy (funcs)) return; - - hb_free (funcs); + dfuncs->line_to (draw_data, *st, + to_x, to_y); } /** - * hb_draw_funcs_make_immutable: - * @funcs: draw functions + * hb_draw_quadratic_to: + * @dfuncs: draw functions + * @draw_data: associated draw data passed by the caller + * @st: current draw state + * @control_x: X component of control point + * @control_y: Y component of control point + * @to_x: X component of target point + * @to_y: Y component of target point * - * Makes funcs object immutable. + * Perform a "quadratic-to" draw operation. * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ void -hb_draw_funcs_make_immutable (hb_draw_funcs_t *funcs) +hb_draw_quadratic_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float control_x, float control_y, + float to_x, float to_y) { - if (hb_object_is_immutable (funcs)) - return; - - hb_object_make_immutable (funcs); + dfuncs->quadratic_to (draw_data, *st, + control_x, control_y, + to_x, to_y); } /** - * hb_draw_funcs_is_immutable: - * @funcs: draw functions + * hb_draw_cubic_to: + * @dfuncs: draw functions + * @draw_data: associated draw data passed by the caller + * @st: current draw state + * @control1_x: X component of first control point + * @control1_y: Y component of first control point + * @control2_x: X component of second control point + * @control2_y: Y component of second control point + * @to_x: X component of target point + * @to_y: Y component of target point * - * Checks whether funcs is immutable. + * Perform a "cubic-to" draw operation. * - * Returns: If is immutable. - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ -hb_bool_t -hb_draw_funcs_is_immutable (hb_draw_funcs_t *funcs) +void +hb_draw_cubic_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float control1_x, float control1_y, + float control2_x, float control2_y, + float to_x, float to_y) { - return hb_object_is_immutable (funcs); + dfuncs->cubic_to (draw_data, *st, + control1_x, control1_y, + control2_x, control2_y, + to_x, to_y); } /** - * hb_font_draw_glyph: - * @font: a font object - * @glyph: a glyph id - * @funcs: draw callbacks object - * @user_data: parameter you like be passed to the callbacks when are called + * hb_draw_close_path: + * @dfuncs: draw functions + * @draw_data: associated draw data passed by the caller + * @st: current draw state * - * Draw a glyph. + * Perform a "close-path" draw operation. * - * Returns: Whether the font had the glyph and the operation completed successfully. - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ -hb_bool_t -hb_font_draw_glyph (hb_font_t *font, hb_codepoint_t glyph, - const hb_draw_funcs_t *funcs, - void *user_data) +void +hb_draw_close_path (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st) { - if (unlikely (funcs == &Null (hb_draw_funcs_t) || - glyph >= font->face->get_num_glyphs ())) - return false; - - draw_helper_t draw_helper (funcs, user_data); - if (font->face->table.glyf->get_path (font, glyph, draw_helper)) return true; -#ifndef HB_NO_CFF - if (font->face->table.cff1->get_path (font, glyph, draw_helper)) return true; - if (font->face->table.cff2->get_path (font, glyph, draw_helper)) return true; -#endif - - return false; + dfuncs->close_path (draw_data, *st); } -#endif + #endif diff --git a/thirdparty/harfbuzz/src/hb-draw.h b/thirdparty/harfbuzz/src/hb-draw.h index f82cc34842..c45a53212a 100644 --- a/thirdparty/harfbuzz/src/hb-draw.h +++ b/thirdparty/harfbuzz/src/hb-draw.h @@ -33,65 +33,292 @@ HB_BEGIN_DECLS -#ifdef HB_EXPERIMENTAL_API -typedef void (*hb_draw_move_to_func_t) (hb_position_t to_x, hb_position_t to_y, void *user_data); -typedef void (*hb_draw_line_to_func_t) (hb_position_t to_x, hb_position_t to_y, void *user_data); -typedef void (*hb_draw_quadratic_to_func_t) (hb_position_t control_x, hb_position_t control_y, - hb_position_t to_x, hb_position_t to_y, - void *user_data); -typedef void (*hb_draw_cubic_to_func_t) (hb_position_t control1_x, hb_position_t control1_y, - hb_position_t control2_x, hb_position_t control2_y, - hb_position_t to_x, hb_position_t to_y, - void *user_data); -typedef void (*hb_draw_close_path_func_t) (void *user_data); + +/** + * hb_draw_state_t + * @path_open: Whether there is an open path + * @path_start_x: X component of the start of current path + * @path_start_y: Y component of the start of current path + * @current_x: X component of current point + * @current_y: Y component of current point + * + * Current drawing state. + * + * Since: 4.0.0 + **/ +typedef struct hb_draw_state_t { + hb_bool_t path_open; + + float path_start_x; + float path_start_y; + + float current_x; + float current_y; + + /*< private >*/ + hb_var_num_t reserved1; + hb_var_num_t reserved2; + hb_var_num_t reserved3; + hb_var_num_t reserved4; + hb_var_num_t reserved5; + hb_var_num_t reserved6; + hb_var_num_t reserved7; +} hb_draw_state_t; + +/** + * HB_DRAW_STATE_DEFAULT: + * + * The default #hb_draw_state_t at the start of glyph drawing. + */ +#define HB_DRAW_STATE_DEFAULT {0, 0.f, 0.f, 0.f, 0.f, {0.}, {0.}, {0.}} + /** * hb_draw_funcs_t: * * Glyph draw callbacks. * - * _move_to, _line_to and _cubic_to calls are necessary to be defined but we - * translate _quadratic_to calls to _cubic_to if the callback isn't defined. + * #hb_draw_move_to_func_t, #hb_draw_line_to_func_t and + * #hb_draw_cubic_to_func_t calls are necessary to be defined but we translate + * #hb_draw_quadratic_to_func_t calls to #hb_draw_cubic_to_func_t if the + * callback isn't defined. * - * Since: EXPERIMENTAL + * Since: 4.0.0 **/ + typedef struct hb_draw_funcs_t hb_draw_funcs_t; + +/** + * hb_draw_move_to_func_t: + * @dfuncs: draw functions object + * @draw_data: The data accompanying the draw functions + * @st: current draw state + * @to_x: X component of target point + * @to_y: Y component of target point + * @user_data: User data pointer passed by the caller + * + * A virtual method for the #hb_draw_funcs_t to perform a "move-to" draw + * operation. + * + * Since: 4.0.0 + * + **/ +typedef void (*hb_draw_move_to_func_t) (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y, + void *user_data); + +/** + * hb_draw_line_to_func_t: + * @dfuncs: draw functions object + * @draw_data: The data accompanying the draw functions + * @st: current draw state + * @to_x: X component of target point + * @to_y: Y component of target point + * @user_data: User data pointer passed by the caller + * + * A virtual method for the #hb_draw_funcs_t to perform a "line-to" draw + * operation. + * + * Since: 4.0.0 + * + **/ +typedef void (*hb_draw_line_to_func_t) (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y, + void *user_data); + +/** + * hb_draw_quadratic_to_func_t: + * @dfuncs: draw functions object + * @draw_data: The data accompanying the draw functions + * @st: current draw state + * @control_x: X component of control point + * @control_y: Y component of control point + * @to_x: X component of target point + * @to_y: Y component of target point + * @user_data: User data pointer passed by the caller + * + * A virtual method for the #hb_draw_funcs_t to perform a "quadratic-to" draw + * operation. + * + * Since: 4.0.0 + * + **/ +typedef void (*hb_draw_quadratic_to_func_t) (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float control_x, float control_y, + float to_x, float to_y, + void *user_data); + +/** + * hb_draw_cubic_to_func_t: + * @dfuncs: draw functions object + * @draw_data: The data accompanying the draw functions + * @st: current draw state + * @control1_x: X component of first control point + * @control1_y: Y component of first control point + * @control2_x: X component of second control point + * @control2_y: Y component of second control point + * @to_x: X component of target point + * @to_y: Y component of target point + * @user_data: User data pointer passed by the caller + * + * A virtual method for the #hb_draw_funcs_t to perform a "cubic-to" draw + * operation. + * + * Since: 4.0.0 + * + **/ +typedef void (*hb_draw_cubic_to_func_t) (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float control1_x, float control1_y, + float control2_x, float control2_y, + float to_x, float to_y, + void *user_data); + +/** + * hb_draw_close_path_func_t: + * @dfuncs: draw functions object + * @draw_data: The data accompanying the draw functions + * @st: current draw state + * @user_data: User data pointer passed by the caller + * + * A virtual method for the #hb_draw_funcs_t to perform a "close-path" draw + * operation. + * + * Since: 4.0.0 + * + **/ +typedef void (*hb_draw_close_path_func_t) (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + void *user_data); + +/** + * hb_draw_funcs_set_move_to_func: + * @dfuncs: draw functions object + * @func: (closure user_data) (destroy destroy) (scope notified): move-to callback + * @user_data: Data to pass to @func + * @destroy: (nullable): The function to call when @user_data is not needed anymore + * + * Sets move-to callback to the draw functions object. + * + * Since: 4.0.0 + **/ HB_EXTERN void -hb_draw_funcs_set_move_to_func (hb_draw_funcs_t *funcs, - hb_draw_move_to_func_t move_to); +hb_draw_funcs_set_move_to_func (hb_draw_funcs_t *dfuncs, + hb_draw_move_to_func_t func, + void *user_data, hb_destroy_func_t destroy); +/** + * hb_draw_funcs_set_line_to_func: + * @dfuncs: draw functions object + * @func: (closure user_data) (destroy destroy) (scope notified): line-to callback + * @user_data: Data to pass to @func + * @destroy: (nullable): The function to call when @user_data is not needed anymore + * + * Sets line-to callback to the draw functions object. + * + * Since: 4.0.0 + **/ HB_EXTERN void -hb_draw_funcs_set_line_to_func (hb_draw_funcs_t *funcs, - hb_draw_line_to_func_t line_to); +hb_draw_funcs_set_line_to_func (hb_draw_funcs_t *dfuncs, + hb_draw_line_to_func_t func, + void *user_data, hb_destroy_func_t destroy); +/** + * hb_draw_funcs_set_quadratic_to_func: + * @dfuncs: draw functions object + * @func: (closure user_data) (destroy destroy) (scope notified): quadratic-to callback + * @user_data: Data to pass to @func + * @destroy: (nullable): The function to call when @user_data is not needed anymore + * + * Sets quadratic-to callback to the draw functions object. + * + * Since: 4.0.0 + **/ HB_EXTERN void -hb_draw_funcs_set_quadratic_to_func (hb_draw_funcs_t *funcs, - hb_draw_quadratic_to_func_t quadratic_to); +hb_draw_funcs_set_quadratic_to_func (hb_draw_funcs_t *dfuncs, + hb_draw_quadratic_to_func_t func, + void *user_data, hb_destroy_func_t destroy); +/** + * hb_draw_funcs_set_cubic_to_func: + * @dfuncs: draw functions + * @func: (closure user_data) (destroy destroy) (scope notified): cubic-to callback + * @user_data: Data to pass to @func + * @destroy: (nullable): The function to call when @user_data is not needed anymore + * + * Sets cubic-to callback to the draw functions object. + * + * Since: 4.0.0 + **/ HB_EXTERN void -hb_draw_funcs_set_cubic_to_func (hb_draw_funcs_t *funcs, - hb_draw_cubic_to_func_t cubic_to); +hb_draw_funcs_set_cubic_to_func (hb_draw_funcs_t *dfuncs, + hb_draw_cubic_to_func_t func, + void *user_data, hb_destroy_func_t destroy); +/** + * hb_draw_funcs_set_close_path_func: + * @dfuncs: draw functions object + * @func: (closure user_data) (destroy destroy) (scope notified): close-path callback + * @user_data: Data to pass to @func + * @destroy: (nullable): The function to call when @user_data is not needed anymore + * + * Sets close-path callback to the draw functions object. + * + * Since: 4.0.0 + **/ HB_EXTERN void -hb_draw_funcs_set_close_path_func (hb_draw_funcs_t *funcs, - hb_draw_close_path_func_t close_path); +hb_draw_funcs_set_close_path_func (hb_draw_funcs_t *dfuncs, + hb_draw_close_path_func_t func, + void *user_data, hb_destroy_func_t destroy); + HB_EXTERN hb_draw_funcs_t * hb_draw_funcs_create (void); HB_EXTERN hb_draw_funcs_t * -hb_draw_funcs_reference (hb_draw_funcs_t *funcs); +hb_draw_funcs_reference (hb_draw_funcs_t *dfuncs); HB_EXTERN void -hb_draw_funcs_destroy (hb_draw_funcs_t *funcs); +hb_draw_funcs_destroy (hb_draw_funcs_t *dfuncs); HB_EXTERN void -hb_draw_funcs_make_immutable (hb_draw_funcs_t *funcs); +hb_draw_funcs_make_immutable (hb_draw_funcs_t *dfuncs); HB_EXTERN hb_bool_t -hb_draw_funcs_is_immutable (hb_draw_funcs_t *funcs); -#endif +hb_draw_funcs_is_immutable (hb_draw_funcs_t *dfuncs); + + +HB_EXTERN void +hb_draw_move_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y); + +HB_EXTERN void +hb_draw_line_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y); + +HB_EXTERN void +hb_draw_quadratic_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float control_x, float control_y, + float to_x, float to_y); + +HB_EXTERN void +hb_draw_cubic_to (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st, + float control1_x, float control1_y, + float control2_x, float control2_y, + float to_x, float to_y); + +HB_EXTERN void +hb_draw_close_path (hb_draw_funcs_t *dfuncs, void *draw_data, + hb_draw_state_t *st); + HB_END_DECLS diff --git a/thirdparty/harfbuzz/src/hb-draw.hh b/thirdparty/harfbuzz/src/hb-draw.hh index 2aa0a5b4db..28bc9218e1 100644 --- a/thirdparty/harfbuzz/src/hb-draw.hh +++ b/thirdparty/harfbuzz/src/hb-draw.hh @@ -27,113 +27,205 @@ #include "hb.hh" -#ifdef HB_EXPERIMENTAL_API -struct hb_draw_funcs_t -{ - hb_object_header_t header; - hb_draw_move_to_func_t move_to; - hb_draw_line_to_func_t line_to; - hb_draw_quadratic_to_func_t quadratic_to; - bool is_quadratic_to_set; - hb_draw_cubic_to_func_t cubic_to; - hb_draw_close_path_func_t close_path; -}; +/* + * hb_draw_funcs_t + */ + +#define HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS \ + HB_DRAW_FUNC_IMPLEMENT (move_to) \ + HB_DRAW_FUNC_IMPLEMENT (line_to) \ + HB_DRAW_FUNC_IMPLEMENT (quadratic_to) \ + HB_DRAW_FUNC_IMPLEMENT (cubic_to) \ + HB_DRAW_FUNC_IMPLEMENT (close_path) \ + /* ^--- Add new callbacks here */ -struct draw_helper_t +struct hb_draw_funcs_t { - draw_helper_t (const hb_draw_funcs_t *funcs_, void *user_data_) - { - funcs = funcs_; - user_data = user_data_; - path_open = false; - path_start_x = current_x = path_start_y = current_y = 0; - } - ~draw_helper_t () { end_path (); } + hb_object_header_t header; - void move_to (hb_position_t x, hb_position_t y) + struct { +#define HB_DRAW_FUNC_IMPLEMENT(name) hb_draw_##name##_func_t name; + HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS +#undef HB_DRAW_FUNC_IMPLEMENT + } func; + + struct { +#define HB_DRAW_FUNC_IMPLEMENT(name) void *name; + HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS +#undef HB_DRAW_FUNC_IMPLEMENT + } user_data; + + struct { +#define HB_DRAW_FUNC_IMPLEMENT(name) hb_destroy_func_t name; + HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS +#undef HB_DRAW_FUNC_IMPLEMENT + } destroy; + + void emit_move_to (void *draw_data, hb_draw_state_t &st, + float to_x, float to_y) + { func.move_to (this, draw_data, &st, + to_x, to_y, + user_data.move_to); } + void emit_line_to (void *draw_data, hb_draw_state_t &st, + float to_x, float to_y) + { func.line_to (this, draw_data, &st, + to_x, to_y, + user_data.line_to); } + void emit_quadratic_to (void *draw_data, hb_draw_state_t &st, + float control_x, float control_y, + float to_x, float to_y) + { func.quadratic_to (this, draw_data, &st, + control_x, control_y, + to_x, to_y, + user_data.quadratic_to); } + void emit_cubic_to (void *draw_data, hb_draw_state_t &st, + float control1_x, float control1_y, + float control2_x, float control2_y, + float to_x, float to_y) + { func.cubic_to (this, draw_data, &st, + control1_x, control1_y, + control2_x, control2_y, + to_x, to_y, + user_data.cubic_to); } + void emit_close_path (void *draw_data, hb_draw_state_t &st) + { func.close_path (this, draw_data, &st, + user_data.close_path); } + + + void move_to (void *draw_data, hb_draw_state_t &st, + float to_x, float to_y) { - if (path_open) end_path (); - current_x = path_start_x = x; - current_y = path_start_y = y; + if (st.path_open) close_path (draw_data, st); + st.current_x = to_x; + st.current_y = to_y; } - void line_to (hb_position_t x, hb_position_t y) + void line_to (void *draw_data, hb_draw_state_t &st, + float to_x, float to_y) { - if (equal_to_current (x, y)) return; - if (!path_open) start_path (); - funcs->line_to (x, y, user_data); - current_x = x; - current_y = y; + if (!st.path_open) start_path (draw_data, st); + emit_line_to (draw_data, st, to_x, to_y); + st.current_x = to_x; + st.current_y = to_y; } void - quadratic_to (hb_position_t control_x, hb_position_t control_y, - hb_position_t to_x, hb_position_t to_y) + quadratic_to (void *draw_data, hb_draw_state_t &st, + float control_x, float control_y, + float to_x, float to_y) { - if (equal_to_current (control_x, control_y) && equal_to_current (to_x, to_y)) - return; - if (!path_open) start_path (); - if (funcs->is_quadratic_to_set) - funcs->quadratic_to (control_x, control_y, to_x, to_y, user_data); - else - funcs->cubic_to (roundf ((current_x + 2.f * control_x) / 3.f), - roundf ((current_y + 2.f * control_y) / 3.f), - roundf ((to_x + 2.f * control_x) / 3.f), - roundf ((to_y + 2.f * control_y) / 3.f), - to_x, to_y, user_data); - current_x = to_x; - current_y = to_y; + if (!st.path_open) start_path (draw_data, st); + emit_quadratic_to (draw_data, st, control_x, control_y, to_x, to_y); + st.current_x = to_x; + st.current_y = to_y; } void - cubic_to (hb_position_t control1_x, hb_position_t control1_y, - hb_position_t control2_x, hb_position_t control2_y, - hb_position_t to_x, hb_position_t to_y) + cubic_to (void *draw_data, hb_draw_state_t &st, + float control1_x, float control1_y, + float control2_x, float control2_y, + float to_x, float to_y) { - if (equal_to_current (control1_x, control1_y) && - equal_to_current (control2_x, control2_y) && - equal_to_current (to_x, to_y)) - return; - if (!path_open) start_path (); - funcs->cubic_to (control1_x, control1_y, control2_x, control2_y, to_x, to_y, user_data); - current_x = to_x; - current_y = to_y; + if (!st.path_open) start_path (draw_data, st); + emit_cubic_to (draw_data, st, control1_x, control1_y, control2_x, control2_y, to_x, to_y); + st.current_x = to_x; + st.current_y = to_y; } - void end_path () + void + close_path (void *draw_data, hb_draw_state_t &st) { - if (path_open) + if (st.path_open) { - if ((path_start_x != current_x) || (path_start_y != current_y)) - funcs->line_to (path_start_x, path_start_y, user_data); - funcs->close_path (user_data); + if ((st.path_start_x != st.current_x) || (st.path_start_y != st.current_y)) + emit_line_to (draw_data, st, st.path_start_x, st.path_start_y); + emit_close_path (draw_data, st); } - path_open = false; - path_start_x = current_x = path_start_y = current_y = 0; + st.path_open = false; + st.path_start_x = st.current_x = st.path_start_y = st.current_y = 0; } protected: - bool equal_to_current (hb_position_t x, hb_position_t y) - { return current_x == x && current_y == y; } - void start_path () + void start_path (void *draw_data, hb_draw_state_t &st) { - if (path_open) end_path (); - path_open = true; - funcs->move_to (path_start_x, path_start_y, user_data); + assert (!st.path_open); + emit_move_to (draw_data, st, st.current_x, st.current_y); + st.path_open = true; + st.path_start_x = st.current_x; + st.path_start_y = st.current_y; } +}; +DECLARE_NULL_INSTANCE (hb_draw_funcs_t); - hb_position_t path_start_x; - hb_position_t path_start_y; +struct hb_draw_session_t +{ + hb_draw_session_t (hb_draw_funcs_t *funcs_, void *draw_data_, float slant_ = 0.f) + : slant {slant_}, not_slanted {slant == 0.f}, + funcs {funcs_}, draw_data {draw_data_}, st HB_DRAW_STATE_DEFAULT + {} - hb_position_t current_x; - hb_position_t current_y; + ~hb_draw_session_t () { close_path (); } - bool path_open; - const hb_draw_funcs_t *funcs; - void *user_data; + void move_to (float to_x, float to_y) + { + if (likely (not_slanted)) + funcs->move_to (draw_data, st, + to_x, to_y); + else + funcs->move_to (draw_data, st, + to_x + to_y * slant, to_y); + } + void line_to (float to_x, float to_y) + { + if (likely (not_slanted)) + funcs->line_to (draw_data, st, + to_x, to_y); + else + funcs->line_to (draw_data, st, + to_x + to_y * slant, to_y); + } + void + quadratic_to (float control_x, float control_y, + float to_x, float to_y) + { + if (likely (not_slanted)) + funcs->quadratic_to (draw_data, st, + control_x, control_y, + to_x, to_y); + else + funcs->quadratic_to (draw_data, st, + control_x + control_y * slant, control_y, + to_x + to_y * slant, to_y); + } + void + cubic_to (float control1_x, float control1_y, + float control2_x, float control2_y, + float to_x, float to_y) + { + if (likely (not_slanted)) + funcs->cubic_to (draw_data, st, + control1_x, control1_y, + control2_x, control2_y, + to_x, to_y); + else + funcs->cubic_to (draw_data, st, + control1_x + control1_y * slant, control1_y, + control2_x + control2_y * slant, control2_y, + to_x + to_y * slant, to_y); + } + void close_path () + { + funcs->close_path (draw_data, st); + } + + protected: + float slant; + bool not_slanted; + hb_draw_funcs_t *funcs; + void *draw_data; + hb_draw_state_t st; }; -#endif #endif /* HB_DRAW_HH */ diff --git a/thirdparty/harfbuzz/src/hb-font.cc b/thirdparty/harfbuzz/src/hb-font.cc index 350fcac139..db05f017a5 100644 --- a/thirdparty/harfbuzz/src/hb-font.cc +++ b/thirdparty/harfbuzz/src/hb-font.cc @@ -29,6 +29,7 @@ #include "hb.hh" #include "hb-font.hh" +#include "hb-draw.hh" #include "hb-machinery.hh" #include "hb-ot.h" @@ -501,6 +502,136 @@ hb_font_get_glyph_from_name_default (hb_font_t *font, return font->parent->get_glyph_from_name (name, len, glyph); } +static void +hb_font_get_glyph_shape_nil (hb_font_t *font HB_UNUSED, + void *font_data HB_UNUSED, + hb_codepoint_t glyph, + hb_draw_funcs_t *draw_funcs, + void *draw_data, + void *user_data HB_UNUSED) +{ +} + + +typedef struct hb_font_get_glyph_shape_default_adaptor_t { + hb_draw_funcs_t *draw_funcs; + void *draw_data; + float x_scale; + float y_scale; +} hb_font_get_glyph_shape_default_adaptor_t; + +static void +hb_draw_move_to_default (hb_draw_funcs_t *dfuncs HB_UNUSED, + void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y, + void *user_data HB_UNUSED) +{ + hb_font_get_glyph_shape_default_adaptor_t *adaptor = (hb_font_get_glyph_shape_default_adaptor_t *) draw_data; + float x_scale = adaptor->x_scale; + float y_scale = adaptor->y_scale; + + adaptor->draw_funcs->emit_move_to (adaptor->draw_data, *st, + x_scale * to_x, y_scale * to_y); +} + +static void +hb_draw_line_to_default (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data, + hb_draw_state_t *st, + float to_x, float to_y, + void *user_data HB_UNUSED) +{ + hb_font_get_glyph_shape_default_adaptor_t *adaptor = (hb_font_get_glyph_shape_default_adaptor_t *) draw_data; + float x_scale = adaptor->x_scale; + float y_scale = adaptor->y_scale; + + st->current_x *= x_scale; + st->current_y *= y_scale; + + adaptor->draw_funcs->emit_line_to (adaptor->draw_data, *st, + x_scale * to_x, y_scale * to_y); +} + +static void +hb_draw_quadratic_to_default (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data, + hb_draw_state_t *st, + float control_x, float control_y, + float to_x, float to_y, + void *user_data HB_UNUSED) +{ + hb_font_get_glyph_shape_default_adaptor_t *adaptor = (hb_font_get_glyph_shape_default_adaptor_t *) draw_data; + float x_scale = adaptor->x_scale; + float y_scale = adaptor->y_scale; + + st->current_x *= x_scale; + st->current_y *= y_scale; + + adaptor->draw_funcs->emit_quadratic_to (adaptor->draw_data, *st, + x_scale * control_x, y_scale * control_y, + x_scale * to_x, y_scale * to_y); +} + +static void +hb_draw_cubic_to_default (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data, + hb_draw_state_t *st, + float control1_x, float control1_y, + float control2_x, float control2_y, + float to_x, float to_y, + void *user_data HB_UNUSED) +{ + hb_font_get_glyph_shape_default_adaptor_t *adaptor = (hb_font_get_glyph_shape_default_adaptor_t *) draw_data; + float x_scale = adaptor->x_scale; + float y_scale = adaptor->y_scale; + + st->current_x *= x_scale; + st->current_y *= y_scale; + + adaptor->draw_funcs->emit_cubic_to (adaptor->draw_data, *st, + x_scale * control1_x, y_scale * control1_y, + x_scale * control2_x, y_scale * control2_y, + x_scale * to_x, y_scale * to_y); +} + +static void +hb_draw_close_path_default (hb_draw_funcs_t *dfuncs HB_UNUSED, void *draw_data, + hb_draw_state_t *st, + void *user_data HB_UNUSED) +{ + hb_font_get_glyph_shape_default_adaptor_t *adaptor = (hb_font_get_glyph_shape_default_adaptor_t *) draw_data; + + adaptor->draw_funcs->emit_close_path (adaptor->draw_data, *st); +} + +static const hb_draw_funcs_t _hb_draw_funcs_default = { + HB_OBJECT_HEADER_STATIC, + + { +#define HB_DRAW_FUNC_IMPLEMENT(name) hb_draw_##name##_default, + HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS +#undef HB_DRAW_FUNC_IMPLEMENT + } +}; + +static void +hb_font_get_glyph_shape_default (hb_font_t *font, + void *font_data HB_UNUSED, + hb_codepoint_t glyph, + hb_draw_funcs_t *draw_funcs, + void *draw_data, + void *user_data HB_UNUSED) +{ + hb_font_get_glyph_shape_default_adaptor_t adaptor = { + draw_funcs, + draw_data, + (float) font->x_scale / (float) font->parent->x_scale, + (float) font->y_scale / (float) font->parent->y_scale + }; + + font->parent->get_glyph_shape (glyph, + const_cast<hb_draw_funcs_t *> (&_hb_draw_funcs_default), + &adaptor); +} + DEFINE_NULL_INSTANCE (hb_font_funcs_t) = { HB_OBJECT_HEADER_STATIC, @@ -1168,6 +1299,26 @@ hb_font_get_glyph_from_name (hb_font_t *font, return font->get_glyph_from_name (name, len, glyph); } +/** + * hb_font_get_glyph_shape: + * @font: #hb_font_t to work upon + * @glyph: : The glyph ID + * @dfuncs: #hb_draw_funcs_t to draw to + * @draw_data: User data to pass to draw callbacks + * + * Fetches the glyph shape that corresponds to a glyph in the specified @font. + * The shape is returned by way of calls to the callsbacks of the @dfuncs + * objects, with @draw_data passed to them. + * + * Since: 4.0.0 + **/ +void +hb_font_get_glyph_shape (hb_font_t *font, + hb_codepoint_t glyph, + hb_draw_funcs_t *dfuncs, void *draw_data) +{ + font->get_glyph_shape (glyph, dfuncs, draw_data); +} /* A bit higher-level, and with fallback */ @@ -1190,7 +1341,7 @@ hb_font_get_extents_for_direction (hb_font_t *font, hb_direction_t direction, hb_font_extents_t *extents) { - return font->get_extents_for_direction (direction, extents); + font->get_extents_for_direction (direction, extents); } /** * hb_font_get_glyph_advance_for_direction: @@ -1215,7 +1366,7 @@ hb_font_get_glyph_advance_for_direction (hb_font_t *font, hb_position_t *x, hb_position_t *y) { - return font->get_glyph_advance_for_direction (glyph, direction, x, y); + font->get_glyph_advance_for_direction (glyph, direction, x, y); } /** * hb_font_get_glyph_advances_for_direction: @@ -2044,12 +2195,16 @@ hb_font_get_ptem (hb_font_t *font) * @slant: synthetic slant value. * * Sets the "synthetic slant" of a font. By default is zero. - * Synthetic slant is the graphical skew that the renderer - * applies to the font at rendering time. + * Synthetic slant is the graphical skew applied to the font + * at rendering time. * * HarfBuzz needs to know this value to adjust shaping results, * metrics, and style values to match the slanted rendering. * + * <note>Note: The glyph shape fetched via the + * hb_font_get_glyph_shape() is slanted to reflect this value + * as well.</note> + * * <note>Note: The slant value is a ratio. For example, a * 20% slant would be represented as a 0.2 value.</note> * diff --git a/thirdparty/harfbuzz/src/hb-font.h b/thirdparty/harfbuzz/src/hb-font.h index a3bbb2e37b..9548857535 100644 --- a/thirdparty/harfbuzz/src/hb-font.h +++ b/thirdparty/harfbuzz/src/hb-font.h @@ -511,6 +511,25 @@ typedef hb_bool_t (*hb_font_get_glyph_from_name_func_t) (hb_font_t *font, void * hb_codepoint_t *glyph, void *user_data); +/** + * hb_font_get_glyph_shape_func_t: + * @font: #hb_font_t to work upon + * @font_data: @font user data pointer + * @glyph: The glyph ID to query + * @draw_funcs: The draw functions to send the shape data to + * @draw_data: The data accompanying the draw functions + * @user_data: User data pointer passed by the caller + * + * A virtual method for the #hb_font_funcs_t of an #hb_font_t object. + * + * Since: 4.0.0 + * + **/ +typedef void (*hb_font_get_glyph_shape_func_t) (hb_font_t *font, void *font_data, + hb_codepoint_t glyph, + hb_draw_funcs_t *draw_funcs, void *draw_data, + void *user_data); + /* func setters */ @@ -770,6 +789,22 @@ hb_font_funcs_set_glyph_from_name_func (hb_font_funcs_t *ffuncs, hb_font_get_glyph_from_name_func_t func, void *user_data, hb_destroy_func_t destroy); +/** + * hb_font_funcs_set_glyph_shape_func: + * @ffuncs: A font-function structure + * @func: (closure user_data) (destroy destroy) (scope notified): The callback function to assign + * @user_data: Data to pass to @func + * @destroy: (nullable): The function to call when @user_data is not needed anymore + * + * Sets the implementation function for #hb_font_get_glyph_shape_func_t. + * + * Since: 4.0.0 + **/ +HB_EXTERN void +hb_font_funcs_set_glyph_shape_func (hb_font_funcs_t *ffuncs, + hb_font_get_glyph_shape_func_t func, + void *user_data, hb_destroy_func_t destroy); + /* func dispatch */ HB_EXTERN hb_bool_t @@ -850,6 +885,11 @@ hb_font_get_glyph_from_name (hb_font_t *font, const char *name, int len, /* -1 means nul-terminated */ hb_codepoint_t *glyph); +HB_EXTERN void +hb_font_get_glyph_shape (hb_font_t *font, + hb_codepoint_t glyph, + hb_draw_funcs_t *dfuncs, void *draw_data); + /* high-level funcs, with fallback */ @@ -1056,11 +1096,6 @@ HB_EXTERN void hb_font_set_var_named_instance (hb_font_t *font, unsigned instance_index); -#ifdef HB_EXPERIMENTAL_API -HB_EXTERN hb_bool_t -hb_font_draw_glyph (hb_font_t *font, hb_codepoint_t glyph, - const hb_draw_funcs_t *funcs, void *user_data); -#endif HB_END_DECLS diff --git a/thirdparty/harfbuzz/src/hb-font.hh b/thirdparty/harfbuzz/src/hb-font.hh index 0d73589e8c..70311b4a85 100644 --- a/thirdparty/harfbuzz/src/hb-font.hh +++ b/thirdparty/harfbuzz/src/hb-font.hh @@ -57,6 +57,7 @@ HB_FONT_FUNC_IMPLEMENT (glyph_contour_point) \ HB_FONT_FUNC_IMPLEMENT (glyph_name) \ HB_FONT_FUNC_IMPLEMENT (glyph_from_name) \ + HB_FONT_FUNC_IMPLEMENT (glyph_shape) \ /* ^--- Add new callbacks here */ struct hb_font_funcs_t @@ -140,6 +141,8 @@ struct hb_font_t hb_position_t em_scalef_y (float v) { return em_scalef (v, y_scale); } float em_fscale_x (int16_t v) { return em_fscale (v, x_scale); } float em_fscale_y (int16_t v) { return em_fscale (v, y_scale); } + float em_fscalef_x (float v) { return em_fscalef (v, x_scale); } + float em_fscalef_y (float v) { return em_fscalef (v, y_scale); } hb_position_t em_scale_dir (int16_t v, hb_direction_t direction) { return em_mult (v, dir_mult (direction)); } @@ -373,6 +376,15 @@ struct hb_font_t klass->user_data.glyph_from_name); } + void get_glyph_shape (hb_codepoint_t glyph, + hb_draw_funcs_t *draw_funcs, void *draw_data) + { + klass->get.f.glyph_shape (this, user_data, + glyph, + draw_funcs, draw_data, + klass->user_data.glyph_shape); + } + /* A bit higher-level, and with fallback */ @@ -625,7 +637,9 @@ struct hb_font_t hb_position_t em_mult (int16_t v, int64_t mult) { return (hb_position_t) ((v * mult + 32768) >> 16); } hb_position_t em_scalef (float v, int scale) - { return (hb_position_t) roundf (v * scale / face->get_upem ()); } + { return (hb_position_t) roundf (em_fscalef (v, scale)); } + float em_fscalef (float v, int scale) + { return v * scale / face->get_upem (); } float em_fscale (int16_t v, int scale) { return (float) v * scale / face->get_upem (); } }; diff --git a/thirdparty/harfbuzz/src/hb-ft.cc b/thirdparty/harfbuzz/src/hb-ft.cc index 67691e3ff3..40311e1b91 100644 --- a/thirdparty/harfbuzz/src/hb-ft.cc +++ b/thirdparty/harfbuzz/src/hb-ft.cc @@ -33,12 +33,14 @@ #include "hb-ft.h" +#include "hb-draw.hh" #include "hb-font.hh" #include "hb-machinery.hh" #include "hb-cache.hh" #include FT_ADVANCES_H #include FT_MULTIPLE_MASTERS_H +#include FT_OUTLINE_H #include FT_TRUETYPE_TABLES_H @@ -565,6 +567,82 @@ hb_ft_get_font_h_extents (hb_font_t *font HB_UNUSED, return true; } +#ifndef HB_NO_DRAW + +static int +_hb_ft_move_to (const FT_Vector *to, + hb_draw_session_t *drawing) +{ + drawing->move_to (to->x, to->y); + return FT_Err_Ok; +} + +static int +_hb_ft_line_to (const FT_Vector *to, + hb_draw_session_t *drawing) +{ + drawing->line_to (to->x, to->y); + return FT_Err_Ok; +} + +static int +_hb_ft_conic_to (const FT_Vector *control, + const FT_Vector *to, + hb_draw_session_t *drawing) +{ + drawing->quadratic_to (control->x, control->y, + to->x, to->y); + return FT_Err_Ok; +} + +static int +_hb_ft_cubic_to (const FT_Vector *control1, + const FT_Vector *control2, + const FT_Vector *to, + hb_draw_session_t *drawing) +{ + drawing->cubic_to (control1->x, control1->y, + control2->x, control2->y, + to->x, to->y); + return FT_Err_Ok; +} + +static void +hb_ft_get_glyph_shape (hb_font_t *font HB_UNUSED, + void *font_data, + hb_codepoint_t glyph, + hb_draw_funcs_t *draw_funcs, void *draw_data, + void *user_data HB_UNUSED) +{ + const hb_ft_font_t *ft_font = (const hb_ft_font_t *) font_data; + hb_lock_t lock (ft_font->lock); + FT_Face ft_face = ft_font->ft_face; + + if (unlikely (FT_Load_Glyph (ft_face, glyph, + FT_LOAD_NO_BITMAP | ft_font->load_flags))) + return; + + if (ft_face->glyph->format != FT_GLYPH_FORMAT_OUTLINE) + return; + + const FT_Outline_Funcs outline_funcs = { + (FT_Outline_MoveToFunc) _hb_ft_move_to, + (FT_Outline_LineToFunc) _hb_ft_line_to, + (FT_Outline_ConicToFunc) _hb_ft_conic_to, + (FT_Outline_CubicToFunc) _hb_ft_cubic_to, + 0, /* shift */ + 0, /* delta */ + }; + + hb_draw_session_t draw_session (draw_funcs, draw_data, font->slant_xy); + + FT_Outline_Decompose (&ft_face->glyph->outline, + &outline_funcs, + &draw_session); +} +#endif + + static inline void free_static_ft_funcs (); static struct hb_ft_font_funcs_lazy_loader_t : hb_font_funcs_lazy_loader_t<hb_ft_font_funcs_lazy_loader_t> @@ -596,6 +674,10 @@ static struct hb_ft_font_funcs_lazy_loader_t : hb_font_funcs_lazy_loader_t<hb_ft hb_font_funcs_set_glyph_name_func (funcs, hb_ft_get_glyph_name, nullptr, nullptr); hb_font_funcs_set_glyph_from_name_func (funcs, hb_ft_get_glyph_from_name, nullptr, nullptr); +#ifndef HB_NO_DRAW + hb_font_funcs_set_glyph_shape_func (funcs, hb_ft_get_glyph_shape, nullptr, nullptr); +#endif + hb_font_funcs_make_immutable (funcs); hb_atexit (free_static_ft_funcs); diff --git a/thirdparty/harfbuzz/src/hb-gobject-structs.cc b/thirdparty/harfbuzz/src/hb-gobject-structs.cc index 540b11f911..ef13f1e966 100644 --- a/thirdparty/harfbuzz/src/hb-gobject-structs.cc +++ b/thirdparty/harfbuzz/src/hb-gobject-structs.cc @@ -90,6 +90,7 @@ hb_gobject_##name##_get_type () \ HB_DEFINE_OBJECT_TYPE (buffer) HB_DEFINE_OBJECT_TYPE (blob) +HB_DEFINE_OBJECT_TYPE (draw_funcs) HB_DEFINE_OBJECT_TYPE (face) HB_DEFINE_OBJECT_TYPE (font) HB_DEFINE_OBJECT_TYPE (font_funcs) diff --git a/thirdparty/harfbuzz/src/hb-gobject-structs.h b/thirdparty/harfbuzz/src/hb-gobject-structs.h index 63467f80df..3914a2431a 100644 --- a/thirdparty/harfbuzz/src/hb-gobject-structs.h +++ b/thirdparty/harfbuzz/src/hb-gobject-structs.h @@ -49,6 +49,10 @@ hb_gobject_buffer_get_type (void); #define HB_GOBJECT_TYPE_BUFFER (hb_gobject_buffer_get_type ()) HB_EXTERN GType +hb_gobject_draw_funcs_get_type (void); +#define HB_GOBJECT_TYPE_DRAW_FUNCS (hb_gobject_draw_funcs_get_type ()) + +HB_EXTERN GType hb_gobject_face_get_type (void); #define HB_GOBJECT_TYPE_FACE (hb_gobject_face_get_type ()) diff --git a/thirdparty/harfbuzz/src/hb-machinery.hh b/thirdparty/harfbuzz/src/hb-machinery.hh index 5046ac1933..e52a6a4124 100644 --- a/thirdparty/harfbuzz/src/hb-machinery.hh +++ b/thirdparty/harfbuzz/src/hb-machinery.hh @@ -194,7 +194,8 @@ struct hb_lazy_loader_t : hb_data_wrapper_t<Data, WheresData> } const Returned * operator -> () const { return get (); } - const Returned & operator * () const { return *get (); } + template <typename U = Returned, hb_enable_if (!hb_is_same (U, void))> + const U & operator * () const { return *get (); } explicit operator bool () const { return get_stored () != Funcs::get_null (); } template <typename C> operator const C * () const { return get (); } @@ -272,14 +273,19 @@ struct hb_face_lazy_loader_t : hb_lazy_loader_t<T, hb_face_lazy_loader_t<T, WheresFace>, hb_face_t, WheresFace> {}; -template <typename T, unsigned int WheresFace> +template <typename T, unsigned int WheresFace, bool core=false> struct hb_table_lazy_loader_t : hb_lazy_loader_t<T, - hb_table_lazy_loader_t<T, WheresFace>, + hb_table_lazy_loader_t<T, WheresFace, core>, hb_face_t, WheresFace, hb_blob_t> { static hb_blob_t *create (hb_face_t *face) - { return hb_sanitize_context_t ().reference_table<T> (face); } + { + auto c = hb_sanitize_context_t (); + if (core) + c.set_num_glyphs (0); // So we don't recurse ad infinitum... + return c.reference_table<T> (face); + } static void destroy (hb_blob_t *p) { hb_blob_destroy (p); } static const hb_blob_t *get_null () diff --git a/thirdparty/harfbuzz/src/hb-ot-cff1-table.cc b/thirdparty/harfbuzz/src/hb-ot-cff1-table.cc index 3298fa35ae..df4554ac00 100644 --- a/thirdparty/harfbuzz/src/hb-ot-cff1-table.cc +++ b/thirdparty/harfbuzz/src/hb-ot-cff1-table.cc @@ -442,13 +442,12 @@ bool OT::cff1::accelerator_t::get_extents (hb_font_t *font, hb_codepoint_t glyph return true; } -#ifdef HB_EXPERIMENTAL_API struct cff1_path_param_t { cff1_path_param_t (const OT::cff1::accelerator_t *cff_, hb_font_t *font_, - draw_helper_t &draw_helper_, point_t *delta_) + hb_draw_session_t &draw_session_, point_t *delta_) { - draw_helper = &draw_helper_; + draw_session = &draw_session_; cff = cff_; font = font_; delta = delta_; @@ -458,14 +457,14 @@ struct cff1_path_param_t { point_t point = p; if (delta) point.move (*delta); - draw_helper->move_to (font->em_scalef_x (point.x.to_real ()), font->em_scalef_y (point.y.to_real ())); + draw_session->move_to (font->em_fscalef_x (point.x.to_real ()), font->em_fscalef_y (point.y.to_real ())); } void line_to (const point_t &p) { point_t point = p; if (delta) point.move (*delta); - draw_helper->line_to (font->em_scalef_x (point.x.to_real ()), font->em_scalef_y (point.y.to_real ())); + draw_session->line_to (font->em_fscalef_x (point.x.to_real ()), font->em_fscalef_y (point.y.to_real ())); } void cubic_to (const point_t &p1, const point_t &p2, const point_t &p3) @@ -477,15 +476,15 @@ struct cff1_path_param_t point2.move (*delta); point3.move (*delta); } - draw_helper->cubic_to (font->em_scalef_x (point1.x.to_real ()), font->em_scalef_y (point1.y.to_real ()), - font->em_scalef_x (point2.x.to_real ()), font->em_scalef_y (point2.y.to_real ()), - font->em_scalef_x (point3.x.to_real ()), font->em_scalef_y (point3.y.to_real ())); + draw_session->cubic_to (font->em_fscalef_x (point1.x.to_real ()), font->em_fscalef_y (point1.y.to_real ()), + font->em_fscalef_x (point2.x.to_real ()), font->em_fscalef_y (point2.y.to_real ()), + font->em_fscalef_x (point3.x.to_real ()), font->em_fscalef_y (point3.y.to_real ())); } - void end_path () { draw_helper->end_path (); } + void end_path () { draw_session->close_path (); } hb_font_t *font; - draw_helper_t *draw_helper; + hb_draw_session_t *draw_session; point_t *delta; const OT::cff1::accelerator_t *cff; @@ -513,7 +512,7 @@ struct cff1_path_procs_path_t : path_procs_t<cff1_path_procs_path_t, cff1_cs_int }; static bool _get_path (const OT::cff1::accelerator_t *cff, hb_font_t *font, hb_codepoint_t glyph, - draw_helper_t &draw_helper, bool in_seac = false, point_t *delta = nullptr); + hb_draw_session_t &draw_session, bool in_seac = false, point_t *delta = nullptr); struct cff1_cs_opset_path_t : cff1_cs_opset_t<cff1_cs_opset_path_t, cff1_path_param_t, cff1_path_procs_path_t> { @@ -530,14 +529,14 @@ struct cff1_cs_opset_path_t : cff1_cs_opset_t<cff1_cs_opset_path_t, cff1_path_pa hb_codepoint_t accent = param.cff->std_code_to_glyph (env.argStack[n-1].to_int ()); if (unlikely (!(!env.in_seac && base && accent - && _get_path (param.cff, param.font, base, *param.draw_helper, true) - && _get_path (param.cff, param.font, accent, *param.draw_helper, true, &delta)))) + && _get_path (param.cff, param.font, base, *param.draw_session, true) + && _get_path (param.cff, param.font, accent, *param.draw_session, true, &delta)))) env.set_error (); } }; bool _get_path (const OT::cff1::accelerator_t *cff, hb_font_t *font, hb_codepoint_t glyph, - draw_helper_t &draw_helper, bool in_seac, point_t *delta) + hb_draw_session_t &draw_session, bool in_seac, point_t *delta) { if (unlikely (!cff->is_valid () || (glyph >= cff->num_glyphs))) return false; @@ -546,7 +545,7 @@ bool _get_path (const OT::cff1::accelerator_t *cff, hb_font_t *font, hb_codepoin const byte_str_t str = (*cff->charStrings)[glyph]; interp.env.init (str, *cff, fd); interp.env.set_in_seac (in_seac); - cff1_path_param_t param (cff, font, draw_helper, delta); + cff1_path_param_t param (cff, font, draw_session, delta); if (unlikely (!interp.interpret (param))) return false; /* Let's end the path specially since it is called inside seac also */ @@ -555,16 +554,15 @@ bool _get_path (const OT::cff1::accelerator_t *cff, hb_font_t *font, hb_codepoin return true; } -bool OT::cff1::accelerator_t::get_path (hb_font_t *font, hb_codepoint_t glyph, draw_helper_t &draw_helper) const +bool OT::cff1::accelerator_t::get_path (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session) const { #ifdef HB_NO_OT_FONT_CFF /* XXX Remove check when this code moves to .hh file. */ return true; #endif - return _get_path (this, font, glyph, draw_helper); + return _get_path (this, font, glyph, draw_session); } -#endif struct get_seac_param_t { diff --git a/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh b/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh index 6fb59315c9..542e3f4de3 100644 --- a/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh @@ -1347,9 +1347,7 @@ struct cff1 HB_INTERNAL bool get_extents (hb_font_t *font, hb_codepoint_t glyph, hb_glyph_extents_t *extents) const; HB_INTERNAL bool get_seac_components (hb_codepoint_t glyph, hb_codepoint_t *base, hb_codepoint_t *accent) const; -#ifdef HB_EXPERIMENTAL_API - HB_INTERNAL bool get_path (hb_font_t *font, hb_codepoint_t glyph, draw_helper_t &draw_helper) const; -#endif + HB_INTERNAL bool get_path (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session) const; private: struct gname_t diff --git a/thirdparty/harfbuzz/src/hb-ot-cff2-table.cc b/thirdparty/harfbuzz/src/hb-ot-cff2-table.cc index 879b7cdb23..817fe064ce 100644 --- a/thirdparty/harfbuzz/src/hb-ot-cff2-table.cc +++ b/thirdparty/harfbuzz/src/hb-ot-cff2-table.cc @@ -143,30 +143,29 @@ bool OT::cff2::accelerator_t::get_extents (hb_font_t *font, return true; } -#ifdef HB_EXPERIMENTAL_API struct cff2_path_param_t { - cff2_path_param_t (hb_font_t *font_, draw_helper_t &draw_helper_) + cff2_path_param_t (hb_font_t *font_, hb_draw_session_t &draw_session_) { - draw_helper = &draw_helper_; + draw_session = &draw_session_; font = font_; } void move_to (const point_t &p) - { draw_helper->move_to (font->em_scalef_x (p.x.to_real ()), font->em_scalef_y (p.y.to_real ())); } + { draw_session->move_to (font->em_fscalef_x (p.x.to_real ()), font->em_fscalef_y (p.y.to_real ())); } void line_to (const point_t &p) - { draw_helper->line_to (font->em_scalef_x (p.x.to_real ()), font->em_scalef_y (p.y.to_real ())); } + { draw_session->line_to (font->em_fscalef_x (p.x.to_real ()), font->em_fscalef_y (p.y.to_real ())); } void cubic_to (const point_t &p1, const point_t &p2, const point_t &p3) { - draw_helper->cubic_to (font->em_scalef_x (p1.x.to_real ()), font->em_scalef_y (p1.y.to_real ()), - font->em_scalef_x (p2.x.to_real ()), font->em_scalef_y (p2.y.to_real ()), - font->em_scalef_x (p3.x.to_real ()), font->em_scalef_y (p3.y.to_real ())); + draw_session->cubic_to (font->em_fscalef_x (p1.x.to_real ()), font->em_fscalef_y (p1.y.to_real ()), + font->em_fscalef_x (p2.x.to_real ()), font->em_fscalef_y (p2.y.to_real ()), + font->em_fscalef_x (p3.x.to_real ()), font->em_fscalef_y (p3.y.to_real ())); } protected: - draw_helper_t *draw_helper; + hb_draw_session_t *draw_session; hb_font_t *font; }; @@ -193,7 +192,7 @@ struct cff2_path_procs_path_t : path_procs_t<cff2_path_procs_path_t, cff2_cs_int struct cff2_cs_opset_path_t : cff2_cs_opset_t<cff2_cs_opset_path_t, cff2_path_param_t, cff2_path_procs_path_t> {}; -bool OT::cff2::accelerator_t::get_path (hb_font_t *font, hb_codepoint_t glyph, draw_helper_t &draw_helper) const +bool OT::cff2::accelerator_t::get_path (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session) const { #ifdef HB_NO_OT_FONT_CFF /* XXX Remove check when this code moves to .hh file. */ @@ -206,10 +205,9 @@ bool OT::cff2::accelerator_t::get_path (hb_font_t *font, hb_codepoint_t glyph, d cff2_cs_interpreter_t<cff2_cs_opset_path_t, cff2_path_param_t> interp; const byte_str_t str = (*charStrings)[glyph]; interp.env.init (str, *this, fd, font->coords, font->num_coords); - cff2_path_param_t param (font, draw_helper); + cff2_path_param_t param (font, draw_session); if (unlikely (!interp.interpret (param))) return false; return true; } -#endif #endif diff --git a/thirdparty/harfbuzz/src/hb-ot-cff2-table.hh b/thirdparty/harfbuzz/src/hb-ot-cff2-table.hh index 6e1b01c8fe..b77e7f53fa 100644 --- a/thirdparty/harfbuzz/src/hb-ot-cff2-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-cff2-table.hh @@ -515,9 +515,7 @@ struct cff2 HB_INTERNAL bool get_extents (hb_font_t *font, hb_codepoint_t glyph, hb_glyph_extents_t *extents) const; -#ifdef HB_EXPERIMENTAL_API - HB_INTERNAL bool get_path (hb_font_t *font, hb_codepoint_t glyph, draw_helper_t &draw_helper) const; -#endif + HB_INTERNAL bool get_path (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session) const; }; typedef accelerator_templ_t<cff2_private_dict_opset_subset_t, cff2_private_dict_values_subset_t> accelerator_subset_t; diff --git a/thirdparty/harfbuzz/src/hb-ot-deprecated.h b/thirdparty/harfbuzz/src/hb-ot-deprecated.h index ce6b6fef11..5192ff73e3 100644 --- a/thirdparty/harfbuzz/src/hb-ot-deprecated.h +++ b/thirdparty/harfbuzz/src/hb-ot-deprecated.h @@ -50,6 +50,21 @@ HB_BEGIN_DECLS */ #define HB_MATH_GLYPH_PART_FLAG_EXTENDER HB_OT_MATH_GLYPH_PART_FLAG_EXTENDER +/* https://github.com/harfbuzz/harfbuzz/pull/3417 */ +/** + * HB_OT_MATH_SCRIPT: + * + * Use #HB_SCRIPT_MATH or #HB_OT_TAG_MATH_SCRIPT instead. + * + * <note>Previous versions of this documentation recommended passing + * #HB_OT_MATH_SCRIPT to hb_buffer_set_script() to enable math shaping, but this + * usage is no longer supported. Use #HB_SCRIPT_MATH instead.</note> + * + * Since: 1.3.3 + * Deprecated: 3.4.0 + */ +#define HB_OT_MATH_SCRIPT HB_OT_TAG_MATH_SCRIPT + /* Like hb_ot_layout_table_find_script, but takes zero-terminated array of scripts to test */ HB_EXTERN HB_DEPRECATED_FOR (hb_ot_layout_table_select_script) hb_bool_t diff --git a/thirdparty/harfbuzz/src/hb-ot-face-table-list.hh b/thirdparty/harfbuzz/src/hb-ot-face-table-list.hh index eff09838af..c05034b3bb 100644 --- a/thirdparty/harfbuzz/src/hb-ot-face-table-list.hh +++ b/thirdparty/harfbuzz/src/hb-ot-face-table-list.hh @@ -32,6 +32,11 @@ #define HB_OT_FACE_TABLE_LIST_HH #endif /* HB_OT_FACE_TABLE_LIST_HH */ /* Dummy header guards */ +#ifndef HB_OT_CORE_TABLE +#define HB_OT_CORE_TABLE(Namespace, Type) HB_OT_TABLE (Namespace, Type) +#define _HB_OT_CORE_TABLE_UNDEF +#endif + #ifndef HB_OT_ACCELERATOR #define HB_OT_ACCELERATOR(Namespace, Type) HB_OT_TABLE (Namespace, Type) #define _HB_OT_ACCELERATOR_UNDEF @@ -46,7 +51,8 @@ /* OpenType fundamentals. */ -HB_OT_TABLE (OT, head) +HB_OT_CORE_TABLE (OT, head) +HB_OT_CORE_TABLE (OT, maxp) #if !defined(HB_NO_FACE_COLLECT_UNICODES) || !defined(HB_NO_OT_FONT) HB_OT_ACCELERATOR (OT, cmap) #endif @@ -74,6 +80,7 @@ HB_OT_TABLE (OT, VORG) #endif /* TrueType outlines. */ +HB_OT_CORE_TABLE (OT, loca) // Also used to determine number of glyphs HB_OT_ACCELERATOR (OT, glyf) /* CFF outlines. */ @@ -138,3 +145,7 @@ HB_OT_TABLE (OT, MATH) #ifdef _HB_OT_ACCELERATOR_UNDEF #undef HB_OT_ACCELERATOR #endif + +#ifdef _HB_OT_CORE_TABLE_UNDEF +#undef HB_OT_CORE_TABLE +#endif diff --git a/thirdparty/harfbuzz/src/hb-ot-face.hh b/thirdparty/harfbuzz/src/hb-ot-face.hh index e24d380bca..415dae8e20 100644 --- a/thirdparty/harfbuzz/src/hb-ot-face.hh +++ b/thirdparty/harfbuzz/src/hb-ot-face.hh @@ -63,10 +63,13 @@ struct hb_ot_face_t hb_face_t *face; /* MUST be JUST before the lazy loaders. */ #define HB_OT_TABLE(Namespace, Type) \ hb_table_lazy_loader_t<Namespace::Type, HB_OT_TABLE_ORDER (Namespace, Type)> Type; +#define HB_OT_CORE_TABLE(Namespace, Type) \ + hb_table_lazy_loader_t<Namespace::Type, HB_OT_TABLE_ORDER (Namespace, Type), true> Type; #define HB_OT_ACCELERATOR(Namespace, Type) \ hb_face_lazy_loader_t<Namespace::Type##_accelerator_t, HB_OT_TABLE_ORDER (Namespace, Type)> Type; #include "hb-ot-face-table-list.hh" #undef HB_OT_ACCELERATOR +#undef HB_OT_CORE_TABLE #undef HB_OT_TABLE }; diff --git a/thirdparty/harfbuzz/src/hb-ot-font.cc b/thirdparty/harfbuzz/src/hb-ot-font.cc index 9f0359a773..77d3f639db 100644 --- a/thirdparty/harfbuzz/src/hb-ot-font.cc +++ b/thirdparty/harfbuzz/src/hb-ot-font.cc @@ -257,6 +257,23 @@ hb_ot_get_font_v_extents (hb_font_t *font, } #endif +#ifndef HB_NO_DRAW +static void +hb_ot_get_glyph_shape (hb_font_t *font, + void *font_data HB_UNUSED, + hb_codepoint_t glyph, + hb_draw_funcs_t *draw_funcs, void *draw_data, + void *user_data) +{ + hb_draw_session_t draw_session (draw_funcs, draw_data, font->slant_xy); + if (font->face->table.glyf->get_path (font, glyph, draw_session)) return; +#ifndef HB_NO_CFF + if (font->face->table.cff1->get_path (font, glyph, draw_session)) return; + if (font->face->table.cff2->get_path (font, glyph, draw_session)) return; +#endif +} +#endif + static inline void free_static_ot_funcs (); static struct hb_ot_font_funcs_lazy_loader_t : hb_font_funcs_lazy_loader_t<hb_ot_font_funcs_lazy_loader_t> @@ -279,6 +296,10 @@ static struct hb_ot_font_funcs_lazy_loader_t : hb_font_funcs_lazy_loader_t<hb_ot hb_font_funcs_set_glyph_v_origin_func (funcs, hb_ot_get_glyph_v_origin, nullptr, nullptr); #endif +#ifndef HB_NO_DRAW + hb_font_funcs_set_glyph_shape_func (funcs, hb_ot_get_glyph_shape, nullptr, nullptr); +#endif + hb_font_funcs_set_glyph_extents_func (funcs, hb_ot_get_glyph_extents, nullptr, nullptr); //hb_font_funcs_set_glyph_contour_point_func (funcs, hb_ot_get_glyph_contour_point, nullptr, nullptr); diff --git a/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh b/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh index 87a7d800c1..066e152da3 100644 --- a/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh @@ -936,7 +936,7 @@ struct glyf return; short_offset = 0 == head.indexToLocFormat; - loca_table = hb_sanitize_context_t ().reference_table<loca> (face); + loca_table = face->table.loca.get_blob (); // Needs no destruct! glyf_table = hb_sanitize_context_t ().reference_table<glyf> (face); #ifndef HB_NO_VAR gvar = face->table.gvar; @@ -951,7 +951,6 @@ struct glyf } ~accelerator_t () { - loca_table.destroy (); glyf_table.destroy (); } @@ -1152,11 +1151,10 @@ struct glyf return operation_count; } -#ifdef HB_EXPERIMENTAL_API struct path_builder_t { hb_font_t *font; - draw_helper_t *draw_helper; + hb_draw_session_t *draw_session; struct optional_point_t { @@ -1171,10 +1169,10 @@ struct glyf { return optional_point_t (x + t * (p.x - x), y + t * (p.y - y)); } } first_oncurve, first_offcurve, last_offcurve; - path_builder_t (hb_font_t *font_, draw_helper_t &draw_helper_) + path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_) { font = font_; - draw_helper = &draw_helper_; + draw_session = &draw_session_; first_oncurve = first_offcurve = last_offcurve = optional_point_t (); } @@ -1184,10 +1182,6 @@ struct glyf * https://stackoverflow.com/a/20772557 */ void consume_point (const contour_point_t &point) { - /* Skip empty contours */ - if (unlikely (point.is_end_point && !first_oncurve.has_data && !first_offcurve.has_data)) - return; - bool is_on_curve = point.flag & Glyph::FLAG_ON_CURVE; optional_point_t p (point.x, point.y); if (!first_oncurve.has_data) @@ -1195,7 +1189,7 @@ struct glyf if (is_on_curve) { first_oncurve = p; - draw_helper->move_to (font->em_scalef_x (p.x), font->em_scalef_y (p.y)); + draw_session->move_to (font->em_fscalef_x (p.x), font->em_fscalef_y (p.y)); } else { @@ -1204,7 +1198,7 @@ struct glyf optional_point_t mid = first_offcurve.lerp (p, .5f); first_oncurve = mid; last_offcurve = p; - draw_helper->move_to (font->em_scalef_x (mid.x), font->em_scalef_y (mid.y)); + draw_session->move_to (font->em_fscalef_x (mid.x), font->em_fscalef_y (mid.y)); } else first_offcurve = p; @@ -1216,22 +1210,22 @@ struct glyf { if (is_on_curve) { - draw_helper->quadratic_to (font->em_scalef_x (last_offcurve.x), font->em_scalef_y (last_offcurve.y), - font->em_scalef_x (p.x), font->em_scalef_y (p.y)); + draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), + font->em_fscalef_x (p.x), font->em_fscalef_y (p.y)); last_offcurve = optional_point_t (); } else { optional_point_t mid = last_offcurve.lerp (p, .5f); - draw_helper->quadratic_to (font->em_scalef_x (last_offcurve.x), font->em_scalef_y (last_offcurve.y), - font->em_scalef_x (mid.x), font->em_scalef_y (mid.y)); + draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), + font->em_fscalef_x (mid.x), font->em_fscalef_y (mid.y)); last_offcurve = p; } } else { if (is_on_curve) - draw_helper->line_to (font->em_scalef_x (p.x), font->em_scalef_y (p.y)); + draw_session->line_to (font->em_fscalef_x (p.x), font->em_fscalef_y (p.y)); else last_offcurve = p; } @@ -1242,24 +1236,30 @@ struct glyf if (first_offcurve.has_data && last_offcurve.has_data) { optional_point_t mid = last_offcurve.lerp (first_offcurve, .5f); - draw_helper->quadratic_to (font->em_scalef_x (last_offcurve.x), font->em_scalef_y (last_offcurve.y), - font->em_scalef_x (mid.x), font->em_scalef_y (mid.y)); + draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), + font->em_fscalef_x (mid.x), font->em_fscalef_y (mid.y)); last_offcurve = optional_point_t (); /* now check the rest */ } if (first_offcurve.has_data && first_oncurve.has_data) - draw_helper->quadratic_to (font->em_scalef_x (first_offcurve.x), font->em_scalef_y (first_offcurve.y), - font->em_scalef_x (first_oncurve.x), font->em_scalef_y (first_oncurve.y)); + draw_session->quadratic_to (font->em_fscalef_x (first_offcurve.x), font->em_fscalef_y (first_offcurve.y), + font->em_fscalef_x (first_oncurve.x), font->em_fscalef_y (first_oncurve.y)); else if (last_offcurve.has_data && first_oncurve.has_data) - draw_helper->quadratic_to (font->em_scalef_x (last_offcurve.x), font->em_scalef_y (last_offcurve.y), - font->em_scalef_x (first_oncurve.x), font->em_scalef_y (first_oncurve.y)); + draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), + font->em_fscalef_x (first_oncurve.x), font->em_fscalef_y (first_oncurve.y)); else if (first_oncurve.has_data) - draw_helper->line_to (font->em_scalef_x (first_oncurve.x), font->em_scalef_y (first_oncurve.y)); + draw_session->line_to (font->em_fscalef_x (first_oncurve.x), font->em_fscalef_y (first_oncurve.y)); + else if (first_offcurve.has_data) + { + float x = font->em_fscalef_x (first_offcurve.x), y = font->em_fscalef_x (first_offcurve.y); + draw_session->move_to (x, y); + draw_session->quadratic_to (x, y, x, y); + } /* Getting ready for the next contour */ first_oncurve = first_offcurve = last_offcurve = optional_point_t (); - draw_helper->end_path (); + draw_session->close_path (); } } void points_end () {} @@ -1269,9 +1269,8 @@ struct glyf }; bool - get_path (hb_font_t *font, hb_codepoint_t gid, draw_helper_t &draw_helper) const - { return get_points (font, gid, path_builder_t (font, draw_helper)); } -#endif + get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const + { return get_points (font, gid, path_builder_t (font, draw_session)); } #ifndef HB_NO_VAR const gvar_accelerator_t *gvar; diff --git a/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh b/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh index 36bffa70a5..7487e40e6d 100644 --- a/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh @@ -28,6 +28,7 @@ #define HB_OT_HMTX_TABLE_HH #include "hb-open-type.hh" +#include "hb-ot-maxp-table.hh" #include "hb-ot-hhea-table.hh" #include "hb-ot-var-hvar-table.hh" #include "hb-ot-metrics.hh" @@ -98,12 +99,12 @@ struct hmtxvmtx hb_requires (hb_is_iterator (Iterator))> void serialize (hb_serialize_context_t *c, Iterator it, - unsigned num_advances) + unsigned num_long_metrics) { unsigned idx = 0; for (auto _ : it) { - if (idx < num_advances) + if (idx < num_long_metrics) { LongMetric lm; lm.advance = _.first; @@ -128,7 +129,19 @@ struct hmtxvmtx if (unlikely (!table_prime)) return_trace (false); accelerator_t _mtx (c->plan->source); - unsigned num_advances = _mtx.num_advances_for_subset (c->plan); + unsigned num_long_metrics; + { + /* Determine num_long_metrics to encode. */ + auto& plan = c->plan; + num_long_metrics = plan->num_output_glyphs (); + hb_codepoint_t old_gid = 0; + unsigned int last_advance = plan->old_gid_for_new_gid (num_long_metrics - 1, &old_gid) ? _mtx.get_advance (old_gid) : 0; + while (num_long_metrics > 1 && + last_advance == (plan->old_gid_for_new_gid (num_long_metrics - 2, &old_gid) ? _mtx.get_advance (old_gid) : 0)) + { + num_long_metrics--; + } + } auto it = + hb_range (c->plan->num_output_glyphs ()) @@ -141,13 +154,13 @@ struct hmtxvmtx }) ; - table_prime->serialize (c->serializer, it, num_advances); + table_prime->serialize (c->serializer, it, num_long_metrics); if (unlikely (c->serializer->in_error ())) return_trace (false); // Amend header num hmetrics - if (unlikely (!subset_update_header (c->plan, num_advances))) + if (unlikely (!subset_update_header (c->plan, num_long_metrics))) return_trace (false); return_trace (true); @@ -160,35 +173,46 @@ struct hmtxvmtx accelerator_t (hb_face_t *face, unsigned int default_advance_ = 0) { + table = hb_sanitize_context_t ().reference_table<hmtxvmtx> (face, T::tableTag); + var_table = hb_sanitize_context_t ().reference_table<HVARVVAR> (face, T::variationsTag); + default_advance = default_advance_ ? default_advance_ : hb_face_get_upem (face); - num_advances = T::is_horizontal ? - face->table.hhea->numberOfLongMetrics : + /* Populate count variables and sort them out as we go */ + + unsigned int len = table.get_length (); + if (len & 1) + len--; + + num_long_metrics = T::is_horizontal ? + face->table.hhea->numberOfLongMetrics : #ifndef HB_NO_VERTICAL - face->table.vhea->numberOfLongMetrics + face->table.vhea->numberOfLongMetrics #else - 0 + 0 #endif - ; + ; + if (unlikely (num_long_metrics * 4 > len)) + num_long_metrics = len / 4; + len -= num_long_metrics * 4; - table = hb_sanitize_context_t ().reference_table<hmtxvmtx> (face, T::tableTag); + num_bearings = face->table.maxp->get_num_glyphs (); - /* Cap num_metrics() and num_advances() based on table length. */ - unsigned int len = table.get_length (); - if (unlikely (num_advances * 4 > len)) - num_advances = len / 4; - num_metrics = num_advances + (len - 4 * num_advances) / 2; + if (unlikely (num_bearings < num_long_metrics)) + num_bearings = num_long_metrics; + if (unlikely ((num_bearings - num_long_metrics) * 2 > len)) + num_bearings = num_long_metrics + len / 2; + len -= (num_bearings - num_long_metrics) * 2; - /* We MUST set num_metrics to zero if num_advances is zero. + /* We MUST set num_bearings to zero if num_long_metrics is zero. * Our get_advance() depends on that. */ - if (unlikely (!num_advances)) - { - num_metrics = num_advances = 0; - table.destroy (); - table = hb_blob_get_empty (); - } + if (unlikely (!num_long_metrics)) + num_bearings = num_long_metrics = 0; - var_table = hb_sanitize_context_t ().reference_table<HVARVVAR> (face, T::variationsTag); + num_advances = num_bearings + len / 2; + num_glyphs = face->get_num_glyphs (); + if (num_glyphs < num_advances) + num_glyphs = num_advances; } ~accelerator_t () { @@ -198,14 +222,14 @@ struct hmtxvmtx int get_side_bearing (hb_codepoint_t glyph) const { - if (glyph < num_advances) + if (glyph < num_long_metrics) return table->longMetricZ[glyph].sb; - if (unlikely (glyph >= num_metrics)) + if (unlikely (glyph >= num_bearings)) return 0; - const FWORD *bearings = (const FWORD *) &table->longMetricZ[num_advances]; - return bearings[glyph - num_advances]; + const FWORD *bearings = (const FWORD *) &table->longMetricZ[num_long_metrics]; + return bearings[glyph - num_long_metrics]; } int get_side_bearing (hb_font_t *font, hb_codepoint_t glyph) const @@ -213,7 +237,7 @@ struct hmtxvmtx int side_bearing = get_side_bearing (glyph); #ifndef HB_NO_VAR - if (unlikely (glyph >= num_metrics) || !font->num_coords) + if (unlikely (glyph >= num_bearings) || !font->num_coords) return side_bearing; if (var_table.get_length ()) @@ -227,18 +251,35 @@ struct hmtxvmtx unsigned int get_advance (hb_codepoint_t glyph) const { - if (unlikely (glyph >= num_metrics)) - { - /* If num_metrics is zero, it means we don't have the metrics table - * for this direction: return default advance. Otherwise, it means that the - * glyph index is out of bound: return zero. */ - if (num_metrics) - return 0; - else - return default_advance; - } + /* OpenType case. */ + if (glyph < num_bearings) + return table->longMetricZ[hb_min (glyph, (uint32_t) num_long_metrics - 1)].advance; + + /* If num_advances is zero, it means we don't have the metrics table + * for this direction: return default advance. Otherwise, there's a + * well-defined answer. */ + if (unlikely (!num_advances)) + return default_advance; - return table->longMetricZ[hb_min (glyph, (uint32_t) num_advances - 1)].advance; +#ifdef HB_NO_BORING_EXPANSION + return 0; +#endif + + if (unlikely (glyph >= num_glyphs)) + return 0; + + /* num_bearings <= glyph < num_glyphs; + * num_bearings <= num_advances */ + + /* TODO Optimize */ + + if (num_bearings == num_advances) + return get_advance (num_bearings - 1); + + const FWORD *bearings = (const FWORD *) &table->longMetricZ[num_long_metrics]; + const UFWORD *advances = (const UFWORD *) &bearings[num_bearings - num_long_metrics]; + + return advances[hb_min (glyph - num_bearings, num_advances - num_bearings - 1)]; } unsigned int get_advance (hb_codepoint_t glyph, @@ -247,7 +288,7 @@ struct hmtxvmtx unsigned int advance = get_advance (glyph); #ifndef HB_NO_VAR - if (unlikely (glyph >= num_metrics) || !font->num_coords) + if (unlikely (glyph >= num_bearings) || !font->num_coords) return advance; if (var_table.get_length ()) @@ -259,35 +300,13 @@ struct hmtxvmtx #endif } - unsigned int num_advances_for_subset (const hb_subset_plan_t *plan) const - { - unsigned int num_advances = plan->num_output_glyphs (); - unsigned int last_advance = _advance_for_new_gid (plan, - num_advances - 1); - while (num_advances > 1 && - last_advance == _advance_for_new_gid (plan, - num_advances - 2)) - { - num_advances--; - } - - return num_advances; - } - - private: - unsigned int _advance_for_new_gid (const hb_subset_plan_t *plan, - hb_codepoint_t new_gid) const - { - hb_codepoint_t old_gid; - if (!plan->old_gid_for_new_gid (new_gid, &old_gid)) - return 0; - - return get_advance (old_gid); - } - protected: - unsigned int num_metrics; - unsigned int num_advances; + // 0 <= num_long_metrics <= num_bearings <= num_advances <= num_glyphs + unsigned num_long_metrics; + unsigned num_bearings; + unsigned num_advances; + unsigned num_glyphs; + unsigned int default_advance; private: @@ -319,6 +338,8 @@ struct hmtxvmtx * the end. This allows a monospaced * font to vary the side bearing * values for each glyph. */ +/*UnsizedArrayOf<UFWORD>advancesX;*/ + /* TODO Document. */ public: DEFINE_SIZE_ARRAY (0, longMetricZ); }; diff --git a/thirdparty/harfbuzz/src/hb-ot-layout.cc b/thirdparty/harfbuzz/src/hb-ot-layout.cc index a599eea6e9..07bbe3bc84 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout.cc +++ b/thirdparty/harfbuzz/src/hb-ot-layout.cc @@ -361,6 +361,13 @@ hb_ot_layout_get_attach_points (hb_face_t *face, * Fetches a list of the caret positions defined for a ligature glyph in the GDEF * table of the font. The list returned will begin at the offset provided. * + * Note that a ligature that is formed from n characters will have n-1 + * caret positions. The first character is not represented in the array, + * since its caret position is the glyph position. + * + * The positions returned by this function are 'unshaped', and will have to + * be fixed up for kerning that may be applied to the ligature glyph. + * * Return value: Total number of ligature caret positions for @glyph. * **/ @@ -1960,13 +1967,84 @@ hb_ot_layout_substitute_lookup (OT::hb_ot_apply_context_t *c, #ifndef HB_NO_BASE /** + * hb_ot_layout_get_horizontal_baseline_tag_for_script: + * @script: a script tag. + * + * Fetches the dominant horizontal baseline tag used by @script. + * + * Return value: dominant baseline tag for the @script. + * + * Since: 4.0.0 + **/ +hb_ot_layout_baseline_tag_t +hb_ot_layout_get_horizontal_baseline_tag_for_script (hb_script_t script) +{ + /* Keep in sync with hb_ot_layout_get_baseline_with_fallback */ + switch ((int) script) + { + /* Unicode-1.1 additions */ + case HB_SCRIPT_BENGALI: + case HB_SCRIPT_DEVANAGARI: + case HB_SCRIPT_GUJARATI: + case HB_SCRIPT_GURMUKHI: + /* Unicode-2.0 additions */ + case HB_SCRIPT_TIBETAN: + /* Unicode-4.0 additions */ + case HB_SCRIPT_LIMBU: + /* Unicode-4.1 additions */ + case HB_SCRIPT_SYLOTI_NAGRI: + /* Unicode-5.0 additions */ + case HB_SCRIPT_PHAGS_PA: + /* Unicode-5.2 additions */ + case HB_SCRIPT_MEETEI_MAYEK: + /* Unicode-6.1 additions */ + case HB_SCRIPT_SHARADA: + case HB_SCRIPT_TAKRI: + /* Unicode-7.0 additions */ + case HB_SCRIPT_MODI: + case HB_SCRIPT_SIDDHAM: + case HB_SCRIPT_TIRHUTA: + /* Unicode-9.0 additions */ + case HB_SCRIPT_MARCHEN: + case HB_SCRIPT_NEWA: + /* Unicode-10.0 additions */ + case HB_SCRIPT_SOYOMBO: + case HB_SCRIPT_ZANABAZAR_SQUARE: + /* Unicode-11.0 additions */ + case HB_SCRIPT_DOGRA: + case HB_SCRIPT_GUNJALA_GONDI: + /* Unicode-12.0 additions */ + case HB_SCRIPT_NANDINAGARI: + return HB_OT_LAYOUT_BASELINE_TAG_HANGING; + + /* Unicode-1.1 additions */ + case HB_SCRIPT_HANGUL: + case HB_SCRIPT_HAN: + case HB_SCRIPT_HIRAGANA: + case HB_SCRIPT_KATAKANA: + /* Unicode-3.0 additions */ + case HB_SCRIPT_BOPOMOFO: + /* Unicode-9.0 additions */ + case HB_SCRIPT_TANGUT: + /* Unicode-10.0 additions */ + case HB_SCRIPT_NUSHU: + /* Unicode-13.0 additions */ + case HB_SCRIPT_KHITAN_SMALL_SCRIPT: + return HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_BOTTOM_OR_LEFT; + + default: + return HB_OT_LAYOUT_BASELINE_TAG_ROMAN; + } +} + +/** * hb_ot_layout_get_baseline: * @font: a font * @baseline_tag: a baseline tag * @direction: text direction. * @script_tag: script tag. * @language_tag: language tag, currently unused. - * @coord: (out): baseline value if found. + * @coord: (out) (nullable): baseline value if found. * * Fetches a baseline value from the face. * @@ -1989,6 +2067,227 @@ hb_ot_layout_get_baseline (hb_font_t *font, return result; } + +/** + * hb_ot_layout_get_baseline_with_fallback: + * @font: a font + * @baseline_tag: a baseline tag + * @direction: text direction. + * @script_tag: script tag. + * @language_tag: language tag, currently unused. + * @coord: (out): baseline value if found. + * + * Fetches a baseline value from the face, and synthesizes + * it if the font does not have it. + * + * Since: 4.0.0 + **/ +void +hb_ot_layout_get_baseline_with_fallback (hb_font_t *font, + hb_ot_layout_baseline_tag_t baseline_tag, + hb_direction_t direction, + hb_tag_t script_tag, + hb_tag_t language_tag, + hb_position_t *coord /* OUT */) +{ + if (hb_ot_layout_get_baseline (font, + baseline_tag, + direction, + script_tag, + language_tag, + coord)) + return; + + /* Synthesize missing baselines. + * See https://www.w3.org/TR/css-inline-3/#baseline-synthesis-fonts + */ + switch (baseline_tag) + { + case HB_OT_LAYOUT_BASELINE_TAG_ROMAN: + *coord = 0; // FIXME origin ? + break; + + case HB_OT_LAYOUT_BASELINE_TAG_MATH: + { + hb_codepoint_t glyph; + hb_glyph_extents_t extents; + if (HB_DIRECTION_IS_HORIZONTAL (direction) && + (hb_font_get_nominal_glyph (font, 0x2212u, &glyph) || + hb_font_get_nominal_glyph (font, '-', &glyph)) && + hb_font_get_glyph_extents (font, glyph, &extents)) + { + *coord = extents.y_bearing + extents.height / 2; + } + else + { + hb_position_t x_height = 0; + hb_ot_metrics_get_position (font, HB_OT_METRICS_TAG_X_HEIGHT, &x_height); + *coord = x_height / 2; + } + } + break; + + case HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_TOP_OR_RIGHT: + case HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_BOTTOM_OR_LEFT: + { + hb_position_t embox_top, embox_bottom; + + hb_ot_layout_get_baseline_with_fallback (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_TOP_OR_RIGHT, + direction, + script_tag, + language_tag, + &embox_top); + hb_ot_layout_get_baseline_with_fallback (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_BOTTOM_OR_LEFT, + direction, + script_tag, + language_tag, + &embox_bottom); + + if (baseline_tag == HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_TOP_OR_RIGHT) + *coord = embox_top + (embox_bottom - embox_top) / 10; + else + *coord = embox_bottom + (embox_top - embox_bottom) / 10; + } + break; + + case HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_TOP_OR_RIGHT: + if (hb_ot_layout_get_baseline (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_BOTTOM_OR_LEFT, + direction, + script_tag, + language_tag, + coord)) + *coord += HB_DIRECTION_IS_HORIZONTAL (direction) ? font->y_scale : font->x_scale; + else + { + hb_font_extents_t font_extents; + hb_font_get_extents_for_direction (font, direction, &font_extents); + *coord = font_extents.ascender; + } + break; + + case HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_BOTTOM_OR_LEFT: + if (hb_ot_layout_get_baseline (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_TOP_OR_RIGHT, + direction, + script_tag, + language_tag, + coord)) + *coord -= HB_DIRECTION_IS_HORIZONTAL (direction) ? font->y_scale : font->x_scale; + else + { + hb_font_extents_t font_extents; + hb_font_get_extents_for_direction (font, direction, &font_extents); + *coord = font_extents.descender; + } + break; + + case HB_OT_LAYOUT_BASELINE_TAG_HANGING: + if (HB_DIRECTION_IS_HORIZONTAL (direction)) + { + hb_codepoint_t ch; + hb_codepoint_t glyph; + hb_glyph_extents_t extents; + + /* Keep in sync with hb_ot_layout_get_horizontal_baseline_for_script */ + switch ((int) script_tag) + { + /* Unicode-1.1 additions */ + case HB_SCRIPT_BENGALI: ch = 0x0995u; break; + case HB_SCRIPT_DEVANAGARI: ch = 0x0915u; break; + case HB_SCRIPT_GUJARATI: ch = 0x0a95u; break; + case HB_SCRIPT_GURMUKHI: ch = 0x0a15u; break; + /* Unicode-2.0 additions */ + case HB_SCRIPT_TIBETAN: ch = 0x0f40u; break; + /* Unicode-4.0 additions */ + case HB_SCRIPT_LIMBU: ch = 0x1901u; break; + /* Unicode-4.1 additions */ + case HB_SCRIPT_SYLOTI_NAGRI: ch = 0xa807u; break; + /* Unicode-5.0 additions */ + case HB_SCRIPT_PHAGS_PA: ch = 0xa840u; break; + /* Unicode-5.2 additions */ + case HB_SCRIPT_MEETEI_MAYEK: ch = 0xabc0u; break; + /* Unicode-6.1 additions */ + case HB_SCRIPT_SHARADA: ch = 0x11191u; break; + case HB_SCRIPT_TAKRI: ch = 0x1168cu; break; + /* Unicode-7.0 additions */ + case HB_SCRIPT_MODI: ch = 0x1160eu;break; + case HB_SCRIPT_SIDDHAM: ch = 0x11590u; break; + case HB_SCRIPT_TIRHUTA: ch = 0x1148fu; break; + /* Unicode-9.0 additions */ + case HB_SCRIPT_MARCHEN: ch = 0x11c72u; break; + case HB_SCRIPT_NEWA: ch = 0x1140eu; break; + /* Unicode-10.0 additions */ + case HB_SCRIPT_SOYOMBO: ch = 0x11a5cu; break; + case HB_SCRIPT_ZANABAZAR_SQUARE: ch = 0x11a0bu; break; + /* Unicode-11.0 additions */ + case HB_SCRIPT_DOGRA: ch = 0x1180au; break; + case HB_SCRIPT_GUNJALA_GONDI: ch = 0x11d6cu; break; + /* Unicode-12.0 additions */ + case HB_SCRIPT_NANDINAGARI: ch = 0x119b0u; break; + default: ch = 0; break; + } + + if (ch && + hb_font_get_nominal_glyph (font, ch, &glyph) && + hb_font_get_glyph_extents (font, glyph, &extents)) + *coord = extents.y_bearing; + else + *coord = font->y_scale * 6 / 10; // FIXME makes assumptions about origin + } + else + *coord = font->x_scale * 6 / 10; // FIXME makes assumptions about origin + break; + + case HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_CENTRAL: + { + hb_position_t top, bottom; + hb_ot_layout_get_baseline_with_fallback (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_TOP_OR_RIGHT, + direction, + script_tag, + language_tag, + &top); + hb_ot_layout_get_baseline_with_fallback (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_BOTTOM_OR_LEFT, + direction, + script_tag, + language_tag, + &bottom); + *coord = (top + bottom) / 2; + + } + break; + + case HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_CENTRAL: + { + hb_position_t top, bottom; + hb_ot_layout_get_baseline_with_fallback (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_TOP_OR_RIGHT, + direction, + script_tag, + language_tag, + &top); + hb_ot_layout_get_baseline_with_fallback (font, + HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_BOTTOM_OR_LEFT, + direction, + script_tag, + language_tag, + &bottom); + *coord = (top + bottom) / 2; + + } + break; + + case _HB_OT_LAYOUT_BASELINE_TAG_MAX_VALUE: + default: + *coord = 0; + break; + } +} + #endif diff --git a/thirdparty/harfbuzz/src/hb-ot-layout.h b/thirdparty/harfbuzz/src/hb-ot-layout.h index d47ba0fc92..4edddd9e0d 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout.h +++ b/thirdparty/harfbuzz/src/hb-ot-layout.h @@ -332,31 +332,6 @@ hb_ot_layout_lookup_collect_glyphs (hb_face_t *face, hb_set_t *glyphs_after, /* OUT. May be NULL */ hb_set_t *glyphs_output /* OUT. May be NULL */); -#ifdef HB_NOT_IMPLEMENTED -typedef struct -{ - const hb_codepoint_t *before, - unsigned int before_length, - const hb_codepoint_t *input, - unsigned int input_length, - const hb_codepoint_t *after, - unsigned int after_length, -} hb_ot_layout_glyph_sequence_t; - -typedef hb_bool_t -(*hb_ot_layout_glyph_sequence_func_t) (hb_font_t *font, - hb_tag_t table_tag, - unsigned int lookup_index, - const hb_ot_layout_glyph_sequence_t *sequence, - void *user_data); - -HB_EXTERN void -Xhb_ot_layout_lookup_enumerate_sequences (hb_face_t *face, - hb_tag_t table_tag, - unsigned int lookup_index, - hb_ot_layout_glyph_sequence_func_t callback, - void *user_data); -#endif /* Variations support */ @@ -411,19 +386,6 @@ hb_ot_layout_lookups_substitute_closure (hb_face_t *face, hb_set_t *glyphs); -#ifdef HB_NOT_IMPLEMENTED -/* Note: You better have GDEF when using this API, or marks won't do much. */ -HB_EXTERN hb_bool_t -Xhb_ot_layout_lookup_substitute (hb_font_t *font, - unsigned int lookup_index, - const hb_ot_layout_glyph_sequence_t *sequence, - unsigned int out_size, - hb_codepoint_t *glyphs_out, /* OUT */ - unsigned int *clusters_out, /* OUT */ - unsigned int *out_length /* OUT */); -#endif - - /* * GPOS */ @@ -431,15 +393,6 @@ Xhb_ot_layout_lookup_substitute (hb_font_t *font, HB_EXTERN hb_bool_t hb_ot_layout_has_positioning (hb_face_t *face); -#ifdef HB_NOT_IMPLEMENTED -/* Note: You better have GDEF when using this API, or marks won't do much. */ -HB_EXTERN hb_bool_t -Xhb_ot_layout_lookup_position (hb_font_t *font, - unsigned int lookup_index, - const hb_ot_layout_glyph_sequence_t *sequence, - hb_glyph_position_t *positions /* IN / OUT */); -#endif - /* Optical 'size' feature info. Returns true if found. * https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#size */ HB_EXTERN hb_bool_t @@ -487,9 +440,11 @@ hb_ot_layout_feature_get_characters (hb_face_t *face, * if the direction is horizontal or vertical, respectively. * @HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_TOP_OR_RIGHT: Ideographic character face top or right edge, * if the direction is horizontal or vertical, respectively. + * @HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_CENTRAL: The center of the ideographic character face. Since: 4.0.0 * @HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_BOTTOM_OR_LEFT: Ideographic em-box bottom or left edge, * if the direction is horizontal or vertical, respectively. * @HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_TOP_OR_RIGHT: Ideographic em-box top or right edge baseline, + * @HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_CENTRAL: The center of the ideographic em-box. Since: 4.0.0 * if the direction is horizontal or vertical, respectively. * @HB_OT_LAYOUT_BASELINE_TAG_MATH: The baseline about which mathematical characters are centered. * In vertical writing mode when mathematical characters rotated 90 degrees clockwise, are centered. @@ -503,14 +458,19 @@ typedef enum { HB_OT_LAYOUT_BASELINE_TAG_HANGING = HB_TAG ('h','a','n','g'), HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_BOTTOM_OR_LEFT = HB_TAG ('i','c','f','b'), HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_TOP_OR_RIGHT = HB_TAG ('i','c','f','t'), + HB_OT_LAYOUT_BASELINE_TAG_IDEO_FACE_CENTRAL = HB_TAG ('I','c','f','c'), HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_BOTTOM_OR_LEFT = HB_TAG ('i','d','e','o'), HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_TOP_OR_RIGHT = HB_TAG ('i','d','t','p'), + HB_OT_LAYOUT_BASELINE_TAG_IDEO_EMBOX_CENTRAL = HB_TAG ('I','d','c','e'), HB_OT_LAYOUT_BASELINE_TAG_MATH = HB_TAG ('m','a','t','h'), /*< private >*/ _HB_OT_LAYOUT_BASELINE_TAG_MAX_VALUE = HB_TAG_MAX_SIGNED /*< skip >*/ } hb_ot_layout_baseline_tag_t; +HB_EXTERN hb_ot_layout_baseline_tag_t +hb_ot_layout_get_horizontal_baseline_tag_for_script (hb_script_t script); + HB_EXTERN hb_bool_t hb_ot_layout_get_baseline (hb_font_t *font, hb_ot_layout_baseline_tag_t baseline_tag, @@ -519,6 +479,14 @@ hb_ot_layout_get_baseline (hb_font_t *font, hb_tag_t language_tag, hb_position_t *coord /* OUT. May be NULL. */); +HB_EXTERN void +hb_ot_layout_get_baseline_with_fallback (hb_font_t *font, + hb_ot_layout_baseline_tag_t baseline_tag, + hb_direction_t direction, + hb_tag_t script_tag, + hb_tag_t language_tag, + hb_position_t *coord /* OUT */); + HB_END_DECLS #endif /* HB_OT_LAYOUT_H */ diff --git a/thirdparty/harfbuzz/src/hb-ot-math-table.hh b/thirdparty/harfbuzz/src/hb-ot-math-table.hh index 8d0b4317c3..d834d94371 100644 --- a/thirdparty/harfbuzz/src/hb-ot-math-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-math-table.hh @@ -369,6 +369,37 @@ struct MathKern return kernValue[i].get_x_value (font, this); } + unsigned int get_entries (unsigned int start_offset, + unsigned int *entries_count, /* IN/OUT */ + hb_ot_math_kern_entry_t *kern_entries, /* OUT */ + hb_font_t *font) const + { + const MathValueRecord* correctionHeight = mathValueRecordsZ.arrayZ; + const MathValueRecord* kernValue = mathValueRecordsZ.arrayZ + heightCount; + const unsigned int entriesCount = heightCount + 1; + + if (entries_count) + { + unsigned int start = hb_min (start_offset, entriesCount); + unsigned int end = hb_min (start + *entries_count, entriesCount); + *entries_count = end - start; + + for (unsigned int i = 0; i < *entries_count; i++) { + unsigned int j = start + i; + + hb_position_t max_height; + if (j == heightCount) { + max_height = INT32_MAX; + } else { + max_height = correctionHeight[j].get_y_value (font, this); + } + + kern_entries[i] = {max_height, kernValue[j].get_x_value (font, this)}; + } + } + return entriesCount; + } + protected: HBUINT16 heightCount; UnsizedArrayOf<MathValueRecord> @@ -423,6 +454,24 @@ struct MathKernInfoRecord return (base+mathKern[idx]).get_value (correction_height, font); } + unsigned int get_kernings (hb_ot_math_kern_t kern, + unsigned int start_offset, + unsigned int *entries_count, /* IN/OUT */ + hb_ot_math_kern_entry_t *kern_entries, /* OUT */ + hb_font_t *font, + const void *base) const + { + unsigned int idx = kern; + if (unlikely (idx >= ARRAY_LENGTH (mathKern)) || !mathKern[idx]) { + if (entries_count) *entries_count = 0; + return 0; + } + return (base+mathKern[idx]).get_entries (start_offset, + entries_count, + kern_entries, + font); + } + protected: /* Offset to MathKern table for each corner - * from the beginning of MathKernInfo table. May be NULL. */ @@ -473,6 +522,22 @@ struct MathKernInfo return mathKernInfoRecords[index].get_kerning (kern, correction_height, font, this); } + unsigned int get_kernings (hb_codepoint_t glyph, + hb_ot_math_kern_t kern, + unsigned int start_offset, + unsigned int *entries_count, /* IN/OUT */ + hb_ot_math_kern_entry_t *kern_entries, /* OUT */ + hb_font_t *font) const + { + unsigned int index = (this+mathKernCoverage).get_coverage (glyph); + return mathKernInfoRecords[index].get_kernings (kern, + start_offset, + entries_count, + kern_entries, + font, + this); + } + protected: Offset16To<Coverage> mathKernCoverage; @@ -545,6 +610,19 @@ struct MathGlyphInfo hb_font_t *font) const { return (this+mathKernInfo).get_kerning (glyph, kern, correction_height, font); } + hb_position_t get_kernings (hb_codepoint_t glyph, + hb_ot_math_kern_t kern, + unsigned int start_offset, + unsigned int *entries_count, /* IN/OUT */ + hb_ot_math_kern_entry_t *kern_entries, /* OUT */ + hb_font_t *font) const + { return (this+mathKernInfo).get_kernings (glyph, + kern, + start_offset, + entries_count, + kern_entries, + font); } + protected: /* Offset to MathItalicsCorrectionInfo table - * from the beginning of MathGlyphInfo table. */ diff --git a/thirdparty/harfbuzz/src/hb-ot-math.cc b/thirdparty/harfbuzz/src/hb-ot-math.cc index 5781d25f2a..f44ac35849 100644 --- a/thirdparty/harfbuzz/src/hb-ot-math.cc +++ b/thirdparty/harfbuzz/src/hb-ot-math.cc @@ -185,6 +185,51 @@ hb_ot_math_get_glyph_kerning (hb_font_t *font, } /** + * hb_ot_math_get_glyph_kernings: + * @font: #hb_font_t to work upon + * @glyph: The glyph index from which to retrieve the kernings + * @kern: The #hb_ot_math_kern_t from which to retrieve the kernings + * @start_offset: offset of the first kern entry to retrieve + * @entries_count: (inout) (optional): Input = the maximum number of kern entries to return; + * Output = the actual number of kern entries returned + * @kern_entries: (out caller-allocates) (array length=entries_count): array of kern entries returned + * + * Fetches the raw MathKern (cut-in) data for the specified font, glyph index, + * and @kern. The corresponding list of kern values and correction heights is + * returned as a list of #hb_ot_math_kern_entry_t structs. + * + * See also #hb_ot_math_get_glyph_kerning, which handles selecting the + * appropriate kern value for a given correction height. + * + * <note>For a glyph with @n defined kern values (where @n > 0), there are only + * @n−1 defined correction heights, as each correction height defines a boundary + * past which the next kern value should be selected. Therefore, only the + * #hb_ot_math_kern_entry_t.kern_value of the uppermost #hb_ot_math_kern_entry_t + * actually comes from the font; its corresponding + * #hb_ot_math_kern_entry_t.max_correction_height is always set to + * <code>INT32_MAX</code>.</note> + * + * Return value: the total number of kern values available or zero + * + * Since: 3.4.0 + **/ +unsigned int +hb_ot_math_get_glyph_kernings (hb_font_t *font, + hb_codepoint_t glyph, + hb_ot_math_kern_t kern, + unsigned int start_offset, + unsigned int *entries_count, /* IN/OUT */ + hb_ot_math_kern_entry_t *kern_entries /* OUT */) +{ + return font->face->table.MATH->get_glyph_info().get_kernings (glyph, + kern, + start_offset, + entries_count, + kern_entries, + font); +} + +/** * hb_ot_math_get_glyph_variants: * @font: #hb_font_t to work upon * @glyph: The index of the glyph to stretch diff --git a/thirdparty/harfbuzz/src/hb-ot-math.h b/thirdparty/harfbuzz/src/hb-ot-math.h index d3ffa19d85..1378a0639a 100644 --- a/thirdparty/harfbuzz/src/hb-ot-math.h +++ b/thirdparty/harfbuzz/src/hb-ot-math.h @@ -50,14 +50,18 @@ HB_BEGIN_DECLS #define HB_OT_TAG_MATH HB_TAG('M','A','T','H') /** - * HB_OT_MATH_SCRIPT: + * HB_OT_TAG_MATH_SCRIPT: * - * OpenType script tag for math shaping, for use with - * Use with hb_buffer_set_script(). + * OpenType script tag, `math`, for features specific to math shaping. * - * Since: 1.3.3 + * <note>#HB_OT_TAG_MATH_SCRIPT is not a valid #hb_script_t and should only be + * used with functions that accept raw OpenType script tags, such as + * #hb_ot_layout_collect_features. In other cases, #HB_SCRIPT_MATH should be + * used instead.</note> + * + * Since: 3.4.0 */ -#define HB_OT_MATH_SCRIPT HB_TAG('m','a','t','h') +#define HB_OT_TAG_MATH_SCRIPT HB_TAG('m','a','t','h') /* Types */ @@ -205,6 +209,20 @@ typedef enum { } hb_ot_math_kern_t; /** + * hb_ot_math_kern_entry_t: + * @max_correction_height: The maximum height at which this entry should be used + * @kern_value: The kern value of the entry + * + * Data type to hold math kerning (cut-in) information for a glyph. + * + * Since: 3.4.0 + */ +typedef struct { + hb_position_t max_correction_height; + hb_position_t kern_value; +} hb_ot_math_kern_entry_t; + +/** * hb_ot_math_glyph_variant_t: * @glyph: The glyph index of the variant * @advance: The advance width of the variant @@ -281,6 +299,14 @@ hb_ot_math_get_glyph_kerning (hb_font_t *font, hb_position_t correction_height); HB_EXTERN unsigned int +hb_ot_math_get_glyph_kernings (hb_font_t *font, + hb_codepoint_t glyph, + hb_ot_math_kern_t kern, + unsigned int start_offset, + unsigned int *entries_count, /* IN/OUT */ + hb_ot_math_kern_entry_t *kern_entries /* OUT */); + +HB_EXTERN unsigned int hb_ot_math_get_glyph_variants (hb_font_t *font, hb_codepoint_t glyph, hb_direction_t direction, diff --git a/thirdparty/harfbuzz/src/hb-ot-metrics.cc b/thirdparty/harfbuzz/src/hb-ot-metrics.cc index 103808cf91..43c3cbd41f 100644 --- a/thirdparty/harfbuzz/src/hb-ot-metrics.cc +++ b/thirdparty/harfbuzz/src/hb-ot-metrics.cc @@ -238,6 +238,145 @@ hb_ot_metrics_get_position (hb_font_t *font, } } +/** + * hb_ot_metrics_get_position_with_fallback: + * @font: an #hb_font_t object. + * @metrics_tag: tag of metrics value you like to fetch. + * @position: (out) (optional): result of metrics value from the font. + * + * Fetches metrics value corresponding to @metrics_tag from @font, + * and synthesizes a value if it the value is missing in the font. + * + * Since: 4.0.0 + **/ +void +hb_ot_metrics_get_position_with_fallback (hb_font_t *font, + hb_ot_metrics_tag_t metrics_tag, + hb_position_t *position /* OUT */) +{ + hb_font_extents_t font_extents; + hb_codepoint_t glyph; + hb_glyph_extents_t extents; + + if (hb_ot_metrics_get_position (font, metrics_tag, position)) + { + if ((metrics_tag != HB_OT_METRICS_TAG_STRIKEOUT_SIZE && + metrics_tag != HB_OT_METRICS_TAG_UNDERLINE_SIZE) || + *position != 0) + return; + } + + switch (metrics_tag) + { + case HB_OT_METRICS_TAG_HORIZONTAL_ASCENDER: + case HB_OT_METRICS_TAG_HORIZONTAL_CLIPPING_ASCENT: + hb_font_get_extents_for_direction (font, HB_DIRECTION_LTR, &font_extents); + *position = font_extents.ascender; + break; + + case HB_OT_METRICS_TAG_VERTICAL_ASCENDER: + hb_font_get_extents_for_direction (font, HB_DIRECTION_TTB, &font_extents); + *position = font_extents.ascender; + break; + + case HB_OT_METRICS_TAG_HORIZONTAL_DESCENDER: + case HB_OT_METRICS_TAG_HORIZONTAL_CLIPPING_DESCENT: + hb_font_get_extents_for_direction (font, HB_DIRECTION_LTR, &font_extents); + *position = font_extents.descender; + break; + + case HB_OT_METRICS_TAG_VERTICAL_DESCENDER: + hb_font_get_extents_for_direction (font, HB_DIRECTION_TTB, &font_extents); + *position = font_extents.ascender; + break; + + case HB_OT_METRICS_TAG_HORIZONTAL_LINE_GAP: + hb_font_get_extents_for_direction (font, HB_DIRECTION_LTR, &font_extents); + *position = font_extents.line_gap; + break; + + case HB_OT_METRICS_TAG_VERTICAL_LINE_GAP: + hb_font_get_extents_for_direction (font, HB_DIRECTION_TTB, &font_extents); + *position = font_extents.line_gap; + break; + + case HB_OT_METRICS_TAG_HORIZONTAL_CARET_RISE: + case HB_OT_METRICS_TAG_VERTICAL_CARET_RISE: + *position = 1; + break; + + case HB_OT_METRICS_TAG_HORIZONTAL_CARET_RUN: + case HB_OT_METRICS_TAG_VERTICAL_CARET_RUN: + *position = 0; + break; + + case HB_OT_METRICS_TAG_HORIZONTAL_CARET_OFFSET: + case HB_OT_METRICS_TAG_VERTICAL_CARET_OFFSET: + *position = 0; + break; + + case HB_OT_METRICS_TAG_X_HEIGHT: + if (hb_font_get_nominal_glyph (font, 'o', &glyph) && + hb_font_get_glyph_extents (font, glyph, &extents)) + *position = extents.height + 2 * extents.y_bearing; + else + *position = font->y_scale / 2; + break; + + case HB_OT_METRICS_TAG_CAP_HEIGHT: + if (hb_font_get_nominal_glyph (font, 'O', &glyph) && + hb_font_get_glyph_extents (font, glyph, &extents)) + *position = extents.height + 2 * extents.y_bearing; + else + *position = font->y_scale * 2 / 3; + break; + + case HB_OT_METRICS_TAG_STRIKEOUT_SIZE: + case HB_OT_METRICS_TAG_UNDERLINE_SIZE: + *position = font->y_scale / 18; + break; + + case HB_OT_METRICS_TAG_STRIKEOUT_OFFSET: + { + hb_position_t ascender; + hb_ot_metrics_get_position_with_fallback (font, + HB_OT_METRICS_TAG_HORIZONTAL_ASCENDER, + &ascender); + *position = ascender / 2; + } + break; + + case HB_OT_METRICS_TAG_UNDERLINE_OFFSET: + *position = - font->y_scale / 18; + break; + + case HB_OT_METRICS_TAG_SUBSCRIPT_EM_X_SIZE: + case HB_OT_METRICS_TAG_SUPERSCRIPT_EM_X_SIZE: + *position = font->x_scale * 10 / 12; + break; + + case HB_OT_METRICS_TAG_SUBSCRIPT_EM_Y_SIZE: + case HB_OT_METRICS_TAG_SUPERSCRIPT_EM_Y_SIZE: + *position = font->y_scale * 10 / 12; + break; + + case HB_OT_METRICS_TAG_SUBSCRIPT_EM_X_OFFSET: + case HB_OT_METRICS_TAG_SUPERSCRIPT_EM_X_OFFSET: + *position = 0; + break; + + case HB_OT_METRICS_TAG_SUBSCRIPT_EM_Y_OFFSET: + case HB_OT_METRICS_TAG_SUPERSCRIPT_EM_Y_OFFSET: + *position = font->y_scale / 5; + break; + + case _HB_OT_METRICS_TAG_MAX_VALUE: + default: + *position = 0; + break; + } +} + #ifndef HB_NO_VAR /** * hb_ot_metrics_get_variation: diff --git a/thirdparty/harfbuzz/src/hb-ot-metrics.h b/thirdparty/harfbuzz/src/hb-ot-metrics.h index 5841fc8b0f..30de500088 100644 --- a/thirdparty/harfbuzz/src/hb-ot-metrics.h +++ b/thirdparty/harfbuzz/src/hb-ot-metrics.h @@ -110,6 +110,11 @@ hb_ot_metrics_get_position (hb_font_t *font, hb_ot_metrics_tag_t metrics_tag, hb_position_t *position /* OUT. May be NULL. */); +HB_EXTERN void +hb_ot_metrics_get_position_with_fallback (hb_font_t *font, + hb_ot_metrics_tag_t metrics_tag, + hb_position_t *position /* OUT */); + HB_EXTERN float hb_ot_metrics_get_variation (hb_font_t *font, hb_ot_metrics_tag_t metrics_tag); diff --git a/thirdparty/harfbuzz/src/hb-ot-name.cc b/thirdparty/harfbuzz/src/hb-ot-name.cc index eff46ef227..c35ac5b3dc 100644 --- a/thirdparty/harfbuzz/src/hb-ot-name.cc +++ b/thirdparty/harfbuzz/src/hb-ot-name.cc @@ -52,7 +52,7 @@ * array is owned by the @face and should not be modified. It can be * used as long as @face is alive. * - * Returns: (out) (transfer none) (array length=num_entries): Array of available name entries. + * Returns: (transfer none) (array length=num_entries): Array of available name entries. * Since: 2.1.0 **/ const hb_ot_name_entry_t * diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.cc b/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.cc index 2298aa92f2..224f8b842e 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.cc @@ -635,6 +635,11 @@ modifier_combining_marks[] = 0x06E3u, /* ARABIC SMALL LOW SEEN */ 0x06E7u, /* ARABIC SMALL HIGH YEH */ 0x06E8u, /* ARABIC SMALL HIGH NOON */ + 0x08CAu, /* ARABIC SMALL HIGH FARSI YEH */ + 0x08CBu, /* ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW */ + 0x08CDu, /* ARABIC SMALL HIGH ZAH */ + 0x08CEu, /* ARABIC LARGE ROUND DOT ABOVE */ + 0x08CFu, /* ARABIC LARGE ROUND DOT BELOW */ 0x08D3u, /* ARABIC SMALL LOW WAW */ 0x08F3u, /* ARABIC SMALL HIGH WAW */ }; diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-machine.hh index c3920b2cc6..fb9c60cce9 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-machine.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-machine.hh @@ -385,7 +385,9 @@ struct machine_index_t : typename Iter::item_t> { machine_index_t (const Iter& it) : it (it) {} - machine_index_t (const machine_index_t& o) : it (o.it) {} + machine_index_t (const machine_index_t& o) : hb_iter_with_fallback_t<machine_index_t<Iter>, + typename Iter::item_t> (), + it (o.it) {} static constexpr bool is_random_access_iterator = Iter::is_random_access_iterator; static constexpr bool is_sorted_iterator = Iter::is_sorted_iterator; diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-fallback.cc b/thirdparty/harfbuzz/src/hb-ot-shape-fallback.cc index 671f30327f..b2eedb027b 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-fallback.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shape-fallback.cc @@ -497,14 +497,14 @@ _hb_ot_shape_fallback_kern (const hb_ot_shape_plan_t *plan, #endif #ifndef HB_DISABLE_DEPRECATED - if (!buffer->message (font, "start fallback kern")) - return; - if (HB_DIRECTION_IS_HORIZONTAL (buffer->props.direction) ? !font->has_glyph_h_kerning_func () : !font->has_glyph_v_kerning_func ()) return; + if (!buffer->message (font, "start fallback kern")) + return; + bool reverse = HB_DIRECTION_IS_BACKWARD (buffer->props.direction); if (reverse) diff --git a/thirdparty/harfbuzz/src/hb-ot-tag.cc b/thirdparty/harfbuzz/src/hb-ot-tag.cc index 1837063af8..f50be97ad3 100644 --- a/thirdparty/harfbuzz/src/hb-ot-tag.cc +++ b/thirdparty/harfbuzz/src/hb-ot-tag.cc @@ -41,6 +41,7 @@ hb_ot_old_tag_from_script (hb_script_t script) switch ((hb_tag_t) script) { case HB_SCRIPT_INVALID: return HB_OT_TAG_DEFAULT_SCRIPT; + case HB_SCRIPT_MATH: return HB_OT_TAG_MATH_SCRIPT; /* KATAKANA and HIRAGANA both map to 'kana' */ case HB_SCRIPT_HIRAGANA: return HB_TAG('k','a','n','a'); @@ -63,6 +64,8 @@ hb_ot_old_tag_to_script (hb_tag_t tag) { if (unlikely (tag == HB_OT_TAG_DEFAULT_SCRIPT)) return HB_SCRIPT_INVALID; + if (unlikely (tag == HB_OT_TAG_MATH_SCRIPT)) + return HB_SCRIPT_MATH; /* This side of the conversion is fully algorithmic. */ diff --git a/thirdparty/harfbuzz/src/hb-shape.cc b/thirdparty/harfbuzz/src/hb-shape.cc index c1f619c81c..3407e1af42 100644 --- a/thirdparty/harfbuzz/src/hb-shape.cc +++ b/thirdparty/harfbuzz/src/hb-shape.cc @@ -126,6 +126,13 @@ hb_shape_full (hb_font_t *font, unsigned int num_features, const char * const *shaper_list) { + hb_buffer_t *text_buffer = nullptr; + if (buffer->flags & HB_BUFFER_FLAG_VERIFY) + { + text_buffer = hb_buffer_create (); + hb_buffer_append (text_buffer, buffer, 0, -1); + } + hb_shape_plan_t *shape_plan = hb_shape_plan_create_cached2 (font->face, &buffer->props, features, num_features, font->coords, font->num_coords, @@ -133,6 +140,17 @@ hb_shape_full (hb_font_t *font, hb_bool_t res = hb_shape_plan_execute (shape_plan, font, buffer, features, num_features); hb_shape_plan_destroy (shape_plan); + if (text_buffer) + { + if (res && !buffer->verify (text_buffer, + font, + features, + num_features, + shaper_list)) + res = false; + hb_buffer_destroy (text_buffer); + } + return res; } diff --git a/thirdparty/harfbuzz/src/hb-static.cc b/thirdparty/harfbuzz/src/hb-static.cc index ec4b241470..bd698814e8 100644 --- a/thirdparty/harfbuzz/src/hb-static.cc +++ b/thirdparty/harfbuzz/src/hb-static.cc @@ -33,6 +33,7 @@ #include "hb-aat-layout-feat-table.hh" #include "hb-ot-layout-common.hh" #include "hb-ot-cmap-table.hh" +#include "hb-ot-glyf-table.hh" #include "hb-ot-head-table.hh" #include "hb-ot-maxp-table.hh" @@ -55,17 +56,41 @@ const unsigned char _hb_Null_AAT_Lookup[2] = {0xFF, 0xFF}; /* hb_face_t */ +static inline unsigned +load_num_glyphs_from_loca (const hb_face_t *face) +{ + unsigned ret = 0; + + unsigned indexToLocFormat = face->table.head->indexToLocFormat; + + if (indexToLocFormat <= 1) + { + bool short_offset = 0 == indexToLocFormat; + hb_blob_t *loca_blob = face->table.loca.get_blob (); + ret = hb_max (1u, loca_blob->length / (short_offset ? 2 : 4)) - 1; + } + + return ret; +} + +static inline unsigned +load_num_glyphs_from_maxp (const hb_face_t *face) +{ + return face->table.maxp->get_num_glyphs (); +} + unsigned int hb_face_t::load_num_glyphs () const { - hb_sanitize_context_t c = hb_sanitize_context_t (); - c.set_num_glyphs (0); /* So we don't recurse ad infinitum. */ - hb_blob_t *maxp_blob = c.reference_table<OT::maxp> (this); - const OT::maxp *maxp_table = maxp_blob->as<OT::maxp> (); + unsigned ret = 0; + +#ifndef HB_NO_BORING_EXPANSION + ret = hb_max (ret, load_num_glyphs_from_loca (this)); +#endif + + ret = hb_max (ret, load_num_glyphs_from_maxp (this)); - unsigned int ret = maxp_table->get_num_glyphs (); num_glyphs.set_relaxed (ret); - hb_blob_destroy (maxp_blob); return ret; } diff --git a/thirdparty/harfbuzz/src/hb-style.cc b/thirdparty/harfbuzz/src/hb-style.cc index c0c5c4832c..c7d7d713c2 100644 --- a/thirdparty/harfbuzz/src/hb-style.cc +++ b/thirdparty/harfbuzz/src/hb-style.cc @@ -46,13 +46,13 @@ static inline float _hb_angle_to_ratio (float a) { - return tanf (a * float (M_PI / 180.)); + return tanf (a * float (-M_PI / 180.)); } static inline float _hb_ratio_to_angle (float r) { - return atanf (r) * float (180. / M_PI); + return atanf (r) * float (-180. / M_PI); } /** @@ -72,8 +72,7 @@ float hb_style_get_value (hb_font_t *font, hb_style_tag_t style_tag) { if (unlikely (style_tag == HB_STYLE_TAG_SLANT_RATIO)) - return _hb_angle_to_ratio (hb_style_get_value (font, HB_STYLE_TAG_SLANT_ANGLE)) - + font->slant; + return _hb_angle_to_ratio (hb_style_get_value (font, HB_STYLE_TAG_SLANT_ANGLE)); hb_face_t *face = font->face; diff --git a/thirdparty/harfbuzz/src/hb-style.h b/thirdparty/harfbuzz/src/hb-style.h index 30a6f2b878..d17d2daa5f 100644 --- a/thirdparty/harfbuzz/src/hb-style.h +++ b/thirdparty/harfbuzz/src/hb-style.h @@ -43,8 +43,10 @@ HB_BEGIN_DECLS * @HB_STYLE_TAG_SLANT_ANGLE: Used to vary between upright and slanted text. Values * must be greater than -90 and less than +90. Values can be interpreted as * the angle, in counter-clockwise degrees, of oblique slant from whatever the - * designer considers to be upright for that font design. + * designer considers to be upright for that font design. Typical right-leaning + * Italic fonts have a negative slant angle (typically around -12) * @HB_STYLE_TAG_SLANT_RATIO: same as @HB_STYLE_TAG_SLANT_ANGLE expression as ratio. + * Typical right-leaning Italic fonts have a positive slant ratio (typically around 0.2) * @HB_STYLE_TAG_WIDTH: Used to vary width of text from narrower to wider. * Non-zero. Values can be interpreted as a percentage of whatever the font * designer considers “normal width” for that font design. diff --git a/thirdparty/harfbuzz/src/hb-subset-plan.cc b/thirdparty/harfbuzz/src/hb-subset-plan.cc index af4fcb8137..4481758415 100644 --- a/thirdparty/harfbuzz/src/hb-subset-plan.cc +++ b/thirdparty/harfbuzz/src/hb-subset-plan.cc @@ -111,7 +111,7 @@ static void _collect_layout_indices (hb_face_t *face, retain_all_features = false; continue; } - + if (visited_features.has (tag)) continue; @@ -249,9 +249,9 @@ static void _colr_closure (hb_face_t *face, hb_set_t glyphset_colrv0; for (hb_codepoint_t gid : glyphs_colred->iter ()) colr.closure_glyphs (gid, &glyphset_colrv0); - + glyphs_colred->union_ (glyphset_colrv0); - + //closure for COLRv1 colr.closure_forV1 (glyphs_colred, &layer_indices, &palette_indices); } while (iteration_count++ <= HB_CLOSURE_MAX_STAGES && @@ -458,7 +458,7 @@ _nameid_closure (hb_face_t *face, } /** - * hb_subset_plan_create: + * hb_subset_plan_create_or_fail: * @face: font face to create the plan for. * @input: a #hb_subset_input_t input. * @@ -467,17 +467,18 @@ _nameid_closure (hb_face_t *face, * which tables and glyphs should be retained. * * Return value: (transfer full): New subset plan. Destroy with - * hb_subset_plan_destroy(). + * hb_subset_plan_destroy(). If there is a failure creating the plan + * nullptr will be returned. * - * Since: 1.7.5 + * Since: 4.0.0 **/ hb_subset_plan_t * -hb_subset_plan_create (hb_face_t *face, - const hb_subset_input_t *input) +hb_subset_plan_create_or_fail (hb_face_t *face, + const hb_subset_input_t *input) { hb_subset_plan_t *plan; if (unlikely (!(plan = hb_object_create<hb_subset_plan_t> ()))) - return const_cast<hb_subset_plan_t *> (&Null (hb_subset_plan_t)); + return nullptr; plan->successful = true; plan->flags = input->flags; @@ -514,8 +515,9 @@ hb_subset_plan_create (hb_face_t *face, plan->layout_variation_indices = hb_set_create (); plan->layout_variation_idx_map = hb_map_create (); - if (plan->in_error ()) { - return plan; + if (unlikely (plan->in_error ())) { + hb_subset_plan_destroy (plan); + return nullptr; } _populate_unicodes_to_retain (input->sets.unicodes, input->sets.glyphs, plan); @@ -532,6 +534,10 @@ hb_subset_plan_create (hb_face_t *face, plan->reverse_glyph_map, &plan->_num_output_glyphs); + if (unlikely (plan->in_error ())) { + hb_subset_plan_destroy (plan); + return nullptr; + } return plan; } @@ -542,7 +548,7 @@ hb_subset_plan_create (hb_face_t *face, * Decreases the reference count on @plan, and if it reaches zero, destroys * @plan, freeing all memory. * - * Since: 1.7.5 + * Since: 4.0.0 **/ void hb_subset_plan_destroy (hb_subset_plan_t *plan) @@ -596,3 +602,116 @@ hb_subset_plan_destroy (hb_subset_plan_t *plan) hb_free (plan); } + +/** + * hb_subset_plan_old_to_new_glyph_mapping: + * @plan: a subsetting plan. + * + * Returns the mapping between glyphs in the original font to glyphs in the + * subset that will be produced by @plan + * + * Return value: (transfer none): + * A pointer to the #hb_map_t of the mapping. + * + * Since: 4.0.0 + **/ +const hb_map_t* +hb_subset_plan_old_to_new_glyph_mapping (const hb_subset_plan_t *plan) +{ + return plan->glyph_map; +} + +/** + * hb_subset_plan_new_to_old_glyph_mapping: + * @plan: a subsetting plan. + * + * Returns the mapping between glyphs in the subset that will be produced by + * @plan and the glyph in the original font. + * + * Return value: (transfer none): + * A pointer to the #hb_map_t of the mapping. + * + * Since: 4.0.0 + **/ +const hb_map_t* +hb_subset_plan_new_to_old_glyph_mapping (const hb_subset_plan_t *plan) +{ + return plan->reverse_glyph_map; +} + +/** + * hb_subset_plan_unicode_to_old_glyph_mapping: + * @plan: a subsetting plan. + * + * Returns the mapping between codepoints in the original font and the + * associated glyph id in the original font. + * + * Return value: (transfer none): + * A pointer to the #hb_map_t of the mapping. + * + * Since: 4.0.0 + **/ +const hb_map_t* +hb_subset_plan_unicode_to_old_glyph_mapping (const hb_subset_plan_t *plan) +{ + return plan->codepoint_to_glyph; +} + +/** + * hb_subset_plan_reference: (skip) + * @plan: a #hb_subset_plan_t object. + * + * Increases the reference count on @plan. + * + * Return value: @plan. + * + * Since: 4.0.0 + **/ +hb_subset_plan_t * +hb_subset_plan_reference (hb_subset_plan_t *plan) +{ + return hb_object_reference (plan); +} + +/** + * hb_subset_plan_set_user_data: (skip) + * @plan: a #hb_subset_plan_t object. + * @key: The user-data key to set + * @data: A pointer to the user data + * @destroy: (nullable): A callback to call when @data is not needed anymore + * @replace: Whether to replace an existing data with the same key + * + * Attaches a user-data key/data pair to the given subset plan object. + * + * Return value: %true if success, %false otherwise + * + * Since: 4.0.0 + **/ +hb_bool_t +hb_subset_plan_set_user_data (hb_subset_plan_t *plan, + hb_user_data_key_t *key, + void *data, + hb_destroy_func_t destroy, + hb_bool_t replace) +{ + return hb_object_set_user_data (plan, key, data, destroy, replace); +} + +/** + * hb_subset_plan_get_user_data: (skip) + * @plan: a #hb_subset_plan_t object. + * @key: The user-data key to query + * + * Fetches the user data associated with the specified key, + * attached to the specified subset plan object. + * + * Return value: (transfer none): A pointer to the user data + * + * Since: 4.0.0 + **/ +void * +hb_subset_plan_get_user_data (const hb_subset_plan_t *plan, + hb_user_data_key_t *key) +{ + return hb_object_get_user_data (plan, key); +} diff --git a/thirdparty/harfbuzz/src/hb-subset-plan.hh b/thirdparty/harfbuzz/src/hb-subset-plan.hh index b9244e5cb2..ab2c4c302c 100644 --- a/thirdparty/harfbuzz/src/hb-subset-plan.hh +++ b/thirdparty/harfbuzz/src/hb-subset-plan.hh @@ -198,13 +198,4 @@ struct hb_subset_plan_t } }; -typedef struct hb_subset_plan_t hb_subset_plan_t; - -HB_INTERNAL hb_subset_plan_t * -hb_subset_plan_create (hb_face_t *face, - const hb_subset_input_t *input); - -HB_INTERNAL void -hb_subset_plan_destroy (hb_subset_plan_t *plan); - #endif /* HB_SUBSET_PLAN_HH */ diff --git a/thirdparty/harfbuzz/src/hb-subset.cc b/thirdparty/harfbuzz/src/hb-subset.cc index bb46e5b97f..aa8f2c6fb0 100644 --- a/thirdparty/harfbuzz/src/hb-subset.cc +++ b/thirdparty/harfbuzz/src/hb-subset.cc @@ -343,9 +343,33 @@ hb_subset_or_fail (hb_face_t *source, const hb_subset_input_t *input) { if (unlikely (!input || !source)) return hb_face_get_empty (); - hb_subset_plan_t *plan = hb_subset_plan_create (source, input); - if (unlikely (plan->in_error ())) { - hb_subset_plan_destroy (plan); + hb_subset_plan_t *plan = hb_subset_plan_create_or_fail (source, input); + if (unlikely (!plan)) { + return nullptr; + } + + hb_face_t * result = hb_subset_plan_execute_or_fail (plan); + hb_subset_plan_destroy (plan); + return result; +} + + +/** + * hb_subset_plan_execute_or_fail: + * @plan: a subsetting plan. + * + * Executes the provided subsetting @plan. + * + * Return value: + * on success returns a reference to generated font subset. If the subsetting operation fails + * returns nullptr. + * + * Since: 4.0.0 + **/ +hb_face_t * +hb_subset_plan_execute_or_fail (hb_subset_plan_t *plan) +{ + if (unlikely (!plan || plan->in_error ())) { return nullptr; } @@ -353,7 +377,7 @@ hb_subset_or_fail (hb_face_t *source, const hb_subset_input_t *input) bool success = true; hb_tag_t table_tags[32]; unsigned offset = 0, num_tables = ARRAY_LENGTH (table_tags); - while ((hb_face_get_table_tags (source, offset, &num_tables, table_tags), num_tables)) + while ((hb_face_get_table_tags (plan->source, offset, &num_tables, table_tags), num_tables)) { for (unsigned i = 0; i < num_tables; ++i) { @@ -367,8 +391,5 @@ hb_subset_or_fail (hb_face_t *source, const hb_subset_input_t *input) } end: - hb_face_t *result = success ? hb_face_reference (plan->dest) : nullptr; - - hb_subset_plan_destroy (plan); - return result; + return success ? hb_face_reference (plan->dest) : nullptr; } diff --git a/thirdparty/harfbuzz/src/hb-subset.h b/thirdparty/harfbuzz/src/hb-subset.h index 1c65a4da1c..a2799d91e8 100644 --- a/thirdparty/harfbuzz/src/hb-subset.h +++ b/thirdparty/harfbuzz/src/hb-subset.h @@ -40,6 +40,15 @@ HB_BEGIN_DECLS typedef struct hb_subset_input_t hb_subset_input_t; /** + * hb_subset_plan_t: + * + * Contains information about how the subset operation will be executed. + * Such as mappings from the old glyph ids to the new ones in the subset. + */ + +typedef struct hb_subset_plan_t hb_subset_plan_t; + +/** * hb_subset_flags_t: * @HB_SUBSET_FLAGS_DEFAULT: all flags at their default value of false. * @HB_SUBSET_FLAGS_NO_HINTING: If set hinting instructions will be dropped in @@ -124,7 +133,7 @@ hb_subset_input_set_user_data (hb_subset_input_t *input, HB_EXTERN void * hb_subset_input_get_user_data (const hb_subset_input_t *input, - hb_user_data_key_t *key); + hb_user_data_key_t *key); HB_EXTERN hb_set_t * hb_subset_input_unicode_set (hb_subset_input_t *input); @@ -145,6 +154,41 @@ hb_subset_input_set_flags (hb_subset_input_t *input, HB_EXTERN hb_face_t * hb_subset_or_fail (hb_face_t *source, const hb_subset_input_t *input); +HB_EXTERN hb_face_t * +hb_subset_plan_execute_or_fail (hb_subset_plan_t *plan); + +HB_EXTERN hb_subset_plan_t * +hb_subset_plan_create_or_fail (hb_face_t *face, + const hb_subset_input_t *input); + +HB_EXTERN void +hb_subset_plan_destroy (hb_subset_plan_t *plan); + +HB_EXTERN const hb_map_t* +hb_subset_plan_old_to_new_glyph_mapping (const hb_subset_plan_t *plan); + +HB_EXTERN const hb_map_t* +hb_subset_plan_new_to_old_glyph_mapping (const hb_subset_plan_t *plan); + +HB_EXTERN const hb_map_t* +hb_subset_plan_unicode_to_old_glyph_mapping (const hb_subset_plan_t *plan); + + +HB_EXTERN hb_subset_plan_t * +hb_subset_plan_reference (hb_subset_plan_t *plan); + +HB_EXTERN hb_bool_t +hb_subset_plan_set_user_data (hb_subset_plan_t *plan, + hb_user_data_key_t *key, + void *data, + hb_destroy_func_t destroy, + hb_bool_t replace); + +HB_EXTERN void * +hb_subset_plan_get_user_data (const hb_subset_plan_t *plan, + hb_user_data_key_t *key); + + HB_END_DECLS #endif /* HB_SUBSET_H */ diff --git a/thirdparty/harfbuzz/src/hb-version.h b/thirdparty/harfbuzz/src/hb-version.h index 493a09f8cf..dd2c5288cc 100644 --- a/thirdparty/harfbuzz/src/hb-version.h +++ b/thirdparty/harfbuzz/src/hb-version.h @@ -41,26 +41,26 @@ HB_BEGIN_DECLS * * The major component of the library version available at compile-time. */ -#define HB_VERSION_MAJOR 3 +#define HB_VERSION_MAJOR 4 /** * HB_VERSION_MINOR: * * The minor component of the library version available at compile-time. */ -#define HB_VERSION_MINOR 3 +#define HB_VERSION_MINOR 0 /** * HB_VERSION_MICRO: * * The micro component of the library version available at compile-time. */ -#define HB_VERSION_MICRO 2 +#define HB_VERSION_MICRO 0 /** * HB_VERSION_STRING: * * A string literal containing the library version available at compile-time. */ -#define HB_VERSION_STRING "3.3.2" +#define HB_VERSION_STRING "4.0.0" /** * HB_VERSION_ATLEAST: diff --git a/thirdparty/misc/stb_rect_pack.h b/thirdparty/misc/stb_rect_pack.h index 5c848de0e7..6a633ce666 100644 --- a/thirdparty/misc/stb_rect_pack.h +++ b/thirdparty/misc/stb_rect_pack.h @@ -1,9 +1,15 @@ -// stb_rect_pack.h - v1.00 - public domain - rectangle packing +// stb_rect_pack.h - v1.01 - public domain - rectangle packing // Sean Barrett 2014 // // Useful for e.g. packing rectangular textures into an atlas. // Does not do rotation. // +// Before #including, +// +// #define STB_RECT_PACK_IMPLEMENTATION +// +// in the file that you want to have the implementation. +// // Not necessarily the awesomest packing method, but better than // the totally naive one in stb_truetype (which is primarily what // this is meant to replace). @@ -35,6 +41,7 @@ // // Version history: // +// 1.01 (2021-07-11) always use large rect mode, expose STBRP__MAXVAL in public section // 1.00 (2019-02-25) avoid small space waste; gracefully fail too-wide rectangles // 0.99 (2019-02-07) warning fixes // 0.11 (2017-03-03) return packing success/fail result @@ -75,11 +82,10 @@ typedef struct stbrp_context stbrp_context; typedef struct stbrp_node stbrp_node; typedef struct stbrp_rect stbrp_rect; -#ifdef STBRP_LARGE_RECTS typedef int stbrp_coord; -#else -typedef unsigned short stbrp_coord; -#endif + +#define STBRP__MAXVAL 0x7fffffff +// Mostly for internal use, but this is the maximum supported coordinate value. STBRP_DEF int stbrp_pack_rects (stbrp_context *context, stbrp_rect *rects, int num_rects); // Assign packed locations to rectangles. The rectangles are of type @@ -209,8 +215,10 @@ struct stbrp_context #ifdef _MSC_VER #define STBRP__NOTUSED(v) (void)(v) +#define STBRP__CDECL __cdecl #else #define STBRP__NOTUSED(v) (void)sizeof(v) +#define STBRP__CDECL #endif enum @@ -253,9 +261,6 @@ STBRP_DEF void stbrp_setup_allow_out_of_mem(stbrp_context *context, int allow_ou STBRP_DEF void stbrp_init_target(stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes) { int i; -#ifndef STBRP_LARGE_RECTS - STBRP_ASSERT(width <= 0xffff && height <= 0xffff); -#endif for (i=0; i < num_nodes-1; ++i) nodes[i].next = &nodes[i+1]; @@ -274,11 +279,7 @@ STBRP_DEF void stbrp_init_target(stbrp_context *context, int width, int height, context->extra[0].y = 0; context->extra[0].next = &context->extra[1]; context->extra[1].x = (stbrp_coord) width; -#ifdef STBRP_LARGE_RECTS context->extra[1].y = (1<<30); -#else - context->extra[1].y = 65535; -#endif context->extra[1].next = NULL; } @@ -520,7 +521,7 @@ static stbrp__findresult stbrp__skyline_pack_rectangle(stbrp_context *context, i return res; } -static int rect_height_compare(const void *a, const void *b) +static int STBRP__CDECL rect_height_compare(const void *a, const void *b) { const stbrp_rect *p = (const stbrp_rect *) a; const stbrp_rect *q = (const stbrp_rect *) b; @@ -531,19 +532,13 @@ static int rect_height_compare(const void *a, const void *b) return (p->w > q->w) ? -1 : (p->w < q->w); } -static int rect_original_order(const void *a, const void *b) +static int STBRP__CDECL rect_original_order(const void *a, const void *b) { const stbrp_rect *p = (const stbrp_rect *) a; const stbrp_rect *q = (const stbrp_rect *) b; return (p->was_packed < q->was_packed) ? -1 : (p->was_packed > q->was_packed); } -#ifdef STBRP_LARGE_RECTS -#define STBRP__MAXVAL 0xffffffff -#else -#define STBRP__MAXVAL 0xffff -#endif - STBRP_DEF int stbrp_pack_rects(stbrp_context *context, stbrp_rect *rects, int num_rects) { int i, all_rects_packed = 1; diff --git a/thirdparty/volk/volk.c b/thirdparty/volk/volk.c index bb8b928326..c3383ee41d 100644 --- a/thirdparty/volk/volk.c +++ b/thirdparty/volk/volk.c @@ -176,6 +176,9 @@ static void volkGenLoadInstance(void* context, PFN_vkVoidFunction (*load)(void*, vkGetPhysicalDeviceQueueFamilyProperties2 = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2)load(context, "vkGetPhysicalDeviceQueueFamilyProperties2"); vkGetPhysicalDeviceSparseImageFormatProperties2 = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties2"); #endif /* defined(VK_VERSION_1_1) */ +#if defined(VK_VERSION_1_3) + vkGetPhysicalDeviceToolProperties = (PFN_vkGetPhysicalDeviceToolProperties)load(context, "vkGetPhysicalDeviceToolProperties"); +#endif /* defined(VK_VERSION_1_3) */ #if defined(VK_EXT_acquire_drm_display) vkAcquireDrmDisplayEXT = (PFN_vkAcquireDrmDisplayEXT)load(context, "vkAcquireDrmDisplayEXT"); vkGetDrmDisplayEXT = (PFN_vkGetDrmDisplayEXT)load(context, "vkGetDrmDisplayEXT"); @@ -503,6 +506,44 @@ static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, c vkSignalSemaphore = (PFN_vkSignalSemaphore)load(context, "vkSignalSemaphore"); vkWaitSemaphores = (PFN_vkWaitSemaphores)load(context, "vkWaitSemaphores"); #endif /* defined(VK_VERSION_1_2) */ +#if defined(VK_VERSION_1_3) + vkCmdBeginRendering = (PFN_vkCmdBeginRendering)load(context, "vkCmdBeginRendering"); + vkCmdBindVertexBuffers2 = (PFN_vkCmdBindVertexBuffers2)load(context, "vkCmdBindVertexBuffers2"); + vkCmdBlitImage2 = (PFN_vkCmdBlitImage2)load(context, "vkCmdBlitImage2"); + vkCmdCopyBuffer2 = (PFN_vkCmdCopyBuffer2)load(context, "vkCmdCopyBuffer2"); + vkCmdCopyBufferToImage2 = (PFN_vkCmdCopyBufferToImage2)load(context, "vkCmdCopyBufferToImage2"); + vkCmdCopyImage2 = (PFN_vkCmdCopyImage2)load(context, "vkCmdCopyImage2"); + vkCmdCopyImageToBuffer2 = (PFN_vkCmdCopyImageToBuffer2)load(context, "vkCmdCopyImageToBuffer2"); + vkCmdEndRendering = (PFN_vkCmdEndRendering)load(context, "vkCmdEndRendering"); + vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)load(context, "vkCmdPipelineBarrier2"); + vkCmdResetEvent2 = (PFN_vkCmdResetEvent2)load(context, "vkCmdResetEvent2"); + vkCmdResolveImage2 = (PFN_vkCmdResolveImage2)load(context, "vkCmdResolveImage2"); + vkCmdSetCullMode = (PFN_vkCmdSetCullMode)load(context, "vkCmdSetCullMode"); + vkCmdSetDepthBiasEnable = (PFN_vkCmdSetDepthBiasEnable)load(context, "vkCmdSetDepthBiasEnable"); + vkCmdSetDepthBoundsTestEnable = (PFN_vkCmdSetDepthBoundsTestEnable)load(context, "vkCmdSetDepthBoundsTestEnable"); + vkCmdSetDepthCompareOp = (PFN_vkCmdSetDepthCompareOp)load(context, "vkCmdSetDepthCompareOp"); + vkCmdSetDepthTestEnable = (PFN_vkCmdSetDepthTestEnable)load(context, "vkCmdSetDepthTestEnable"); + vkCmdSetDepthWriteEnable = (PFN_vkCmdSetDepthWriteEnable)load(context, "vkCmdSetDepthWriteEnable"); + vkCmdSetEvent2 = (PFN_vkCmdSetEvent2)load(context, "vkCmdSetEvent2"); + vkCmdSetFrontFace = (PFN_vkCmdSetFrontFace)load(context, "vkCmdSetFrontFace"); + vkCmdSetPrimitiveRestartEnable = (PFN_vkCmdSetPrimitiveRestartEnable)load(context, "vkCmdSetPrimitiveRestartEnable"); + vkCmdSetPrimitiveTopology = (PFN_vkCmdSetPrimitiveTopology)load(context, "vkCmdSetPrimitiveTopology"); + vkCmdSetRasterizerDiscardEnable = (PFN_vkCmdSetRasterizerDiscardEnable)load(context, "vkCmdSetRasterizerDiscardEnable"); + vkCmdSetScissorWithCount = (PFN_vkCmdSetScissorWithCount)load(context, "vkCmdSetScissorWithCount"); + vkCmdSetStencilOp = (PFN_vkCmdSetStencilOp)load(context, "vkCmdSetStencilOp"); + vkCmdSetStencilTestEnable = (PFN_vkCmdSetStencilTestEnable)load(context, "vkCmdSetStencilTestEnable"); + vkCmdSetViewportWithCount = (PFN_vkCmdSetViewportWithCount)load(context, "vkCmdSetViewportWithCount"); + vkCmdWaitEvents2 = (PFN_vkCmdWaitEvents2)load(context, "vkCmdWaitEvents2"); + vkCmdWriteTimestamp2 = (PFN_vkCmdWriteTimestamp2)load(context, "vkCmdWriteTimestamp2"); + vkCreatePrivateDataSlot = (PFN_vkCreatePrivateDataSlot)load(context, "vkCreatePrivateDataSlot"); + vkDestroyPrivateDataSlot = (PFN_vkDestroyPrivateDataSlot)load(context, "vkDestroyPrivateDataSlot"); + vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)load(context, "vkGetDeviceBufferMemoryRequirements"); + vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)load(context, "vkGetDeviceImageMemoryRequirements"); + vkGetDeviceImageSparseMemoryRequirements = (PFN_vkGetDeviceImageSparseMemoryRequirements)load(context, "vkGetDeviceImageSparseMemoryRequirements"); + vkGetPrivateData = (PFN_vkGetPrivateData)load(context, "vkGetPrivateData"); + vkQueueSubmit2 = (PFN_vkQueueSubmit2)load(context, "vkQueueSubmit2"); + vkSetPrivateData = (PFN_vkSetPrivateData)load(context, "vkSetPrivateData"); +#endif /* defined(VK_VERSION_1_3) */ #if defined(VK_AMD_buffer_marker) vkCmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)load(context, "vkCmdWriteBufferMarkerAMD"); #endif /* defined(VK_AMD_buffer_marker) */ @@ -593,6 +634,9 @@ static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, c vkCmdDrawMultiEXT = (PFN_vkCmdDrawMultiEXT)load(context, "vkCmdDrawMultiEXT"); vkCmdDrawMultiIndexedEXT = (PFN_vkCmdDrawMultiIndexedEXT)load(context, "vkCmdDrawMultiIndexedEXT"); #endif /* defined(VK_EXT_multi_draw) */ +#if defined(VK_EXT_pageable_device_local_memory) + vkSetDeviceMemoryPriorityEXT = (PFN_vkSetDeviceMemoryPriorityEXT)load(context, "vkSetDeviceMemoryPriorityEXT"); +#endif /* defined(VK_EXT_pageable_device_local_memory) */ #if defined(VK_EXT_private_data) vkCreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)load(context, "vkCreatePrivateDataSlotEXT"); vkDestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)load(context, "vkDestroyPrivateDataSlotEXT"); @@ -619,6 +663,13 @@ static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, c #if defined(VK_EXT_vertex_input_dynamic_state) vkCmdSetVertexInputEXT = (PFN_vkCmdSetVertexInputEXT)load(context, "vkCmdSetVertexInputEXT"); #endif /* defined(VK_EXT_vertex_input_dynamic_state) */ +#if defined(VK_FUCHSIA_buffer_collection) + vkCreateBufferCollectionFUCHSIA = (PFN_vkCreateBufferCollectionFUCHSIA)load(context, "vkCreateBufferCollectionFUCHSIA"); + vkDestroyBufferCollectionFUCHSIA = (PFN_vkDestroyBufferCollectionFUCHSIA)load(context, "vkDestroyBufferCollectionFUCHSIA"); + vkGetBufferCollectionPropertiesFUCHSIA = (PFN_vkGetBufferCollectionPropertiesFUCHSIA)load(context, "vkGetBufferCollectionPropertiesFUCHSIA"); + vkSetBufferCollectionBufferConstraintsFUCHSIA = (PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)load(context, "vkSetBufferCollectionBufferConstraintsFUCHSIA"); + vkSetBufferCollectionImageConstraintsFUCHSIA = (PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)load(context, "vkSetBufferCollectionImageConstraintsFUCHSIA"); +#endif /* defined(VK_FUCHSIA_buffer_collection) */ #if defined(VK_FUCHSIA_external_memory) vkGetMemoryZirconHandleFUCHSIA = (PFN_vkGetMemoryZirconHandleFUCHSIA)load(context, "vkGetMemoryZirconHandleFUCHSIA"); vkGetMemoryZirconHandlePropertiesFUCHSIA = (PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA)load(context, "vkGetMemoryZirconHandlePropertiesFUCHSIA"); @@ -714,6 +765,10 @@ static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, c vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)load(context, "vkCmdDrawIndexedIndirectCountKHR"); vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)load(context, "vkCmdDrawIndirectCountKHR"); #endif /* defined(VK_KHR_draw_indirect_count) */ +#if defined(VK_KHR_dynamic_rendering) + vkCmdBeginRenderingKHR = (PFN_vkCmdBeginRenderingKHR)load(context, "vkCmdBeginRenderingKHR"); + vkCmdEndRenderingKHR = (PFN_vkCmdEndRenderingKHR)load(context, "vkCmdEndRenderingKHR"); +#endif /* defined(VK_KHR_dynamic_rendering) */ #if defined(VK_KHR_external_fence_fd) vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)load(context, "vkGetFenceFdKHR"); vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)load(context, "vkImportFenceFdKHR"); @@ -752,6 +807,11 @@ static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, c #if defined(VK_KHR_maintenance3) vkGetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)load(context, "vkGetDescriptorSetLayoutSupportKHR"); #endif /* defined(VK_KHR_maintenance3) */ +#if defined(VK_KHR_maintenance4) + vkGetDeviceBufferMemoryRequirementsKHR = (PFN_vkGetDeviceBufferMemoryRequirementsKHR)load(context, "vkGetDeviceBufferMemoryRequirementsKHR"); + vkGetDeviceImageMemoryRequirementsKHR = (PFN_vkGetDeviceImageMemoryRequirementsKHR)load(context, "vkGetDeviceImageMemoryRequirementsKHR"); + vkGetDeviceImageSparseMemoryRequirementsKHR = (PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)load(context, "vkGetDeviceImageSparseMemoryRequirementsKHR"); +#endif /* defined(VK_KHR_maintenance4) */ #if defined(VK_KHR_performance_query) vkAcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)load(context, "vkAcquireProfilingLockKHR"); vkReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)load(context, "vkReleaseProfilingLockKHR"); @@ -1063,6 +1123,44 @@ static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, table->vkSignalSemaphore = (PFN_vkSignalSemaphore)load(context, "vkSignalSemaphore"); table->vkWaitSemaphores = (PFN_vkWaitSemaphores)load(context, "vkWaitSemaphores"); #endif /* defined(VK_VERSION_1_2) */ +#if defined(VK_VERSION_1_3) + table->vkCmdBeginRendering = (PFN_vkCmdBeginRendering)load(context, "vkCmdBeginRendering"); + table->vkCmdBindVertexBuffers2 = (PFN_vkCmdBindVertexBuffers2)load(context, "vkCmdBindVertexBuffers2"); + table->vkCmdBlitImage2 = (PFN_vkCmdBlitImage2)load(context, "vkCmdBlitImage2"); + table->vkCmdCopyBuffer2 = (PFN_vkCmdCopyBuffer2)load(context, "vkCmdCopyBuffer2"); + table->vkCmdCopyBufferToImage2 = (PFN_vkCmdCopyBufferToImage2)load(context, "vkCmdCopyBufferToImage2"); + table->vkCmdCopyImage2 = (PFN_vkCmdCopyImage2)load(context, "vkCmdCopyImage2"); + table->vkCmdCopyImageToBuffer2 = (PFN_vkCmdCopyImageToBuffer2)load(context, "vkCmdCopyImageToBuffer2"); + table->vkCmdEndRendering = (PFN_vkCmdEndRendering)load(context, "vkCmdEndRendering"); + table->vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)load(context, "vkCmdPipelineBarrier2"); + table->vkCmdResetEvent2 = (PFN_vkCmdResetEvent2)load(context, "vkCmdResetEvent2"); + table->vkCmdResolveImage2 = (PFN_vkCmdResolveImage2)load(context, "vkCmdResolveImage2"); + table->vkCmdSetCullMode = (PFN_vkCmdSetCullMode)load(context, "vkCmdSetCullMode"); + table->vkCmdSetDepthBiasEnable = (PFN_vkCmdSetDepthBiasEnable)load(context, "vkCmdSetDepthBiasEnable"); + table->vkCmdSetDepthBoundsTestEnable = (PFN_vkCmdSetDepthBoundsTestEnable)load(context, "vkCmdSetDepthBoundsTestEnable"); + table->vkCmdSetDepthCompareOp = (PFN_vkCmdSetDepthCompareOp)load(context, "vkCmdSetDepthCompareOp"); + table->vkCmdSetDepthTestEnable = (PFN_vkCmdSetDepthTestEnable)load(context, "vkCmdSetDepthTestEnable"); + table->vkCmdSetDepthWriteEnable = (PFN_vkCmdSetDepthWriteEnable)load(context, "vkCmdSetDepthWriteEnable"); + table->vkCmdSetEvent2 = (PFN_vkCmdSetEvent2)load(context, "vkCmdSetEvent2"); + table->vkCmdSetFrontFace = (PFN_vkCmdSetFrontFace)load(context, "vkCmdSetFrontFace"); + table->vkCmdSetPrimitiveRestartEnable = (PFN_vkCmdSetPrimitiveRestartEnable)load(context, "vkCmdSetPrimitiveRestartEnable"); + table->vkCmdSetPrimitiveTopology = (PFN_vkCmdSetPrimitiveTopology)load(context, "vkCmdSetPrimitiveTopology"); + table->vkCmdSetRasterizerDiscardEnable = (PFN_vkCmdSetRasterizerDiscardEnable)load(context, "vkCmdSetRasterizerDiscardEnable"); + table->vkCmdSetScissorWithCount = (PFN_vkCmdSetScissorWithCount)load(context, "vkCmdSetScissorWithCount"); + table->vkCmdSetStencilOp = (PFN_vkCmdSetStencilOp)load(context, "vkCmdSetStencilOp"); + table->vkCmdSetStencilTestEnable = (PFN_vkCmdSetStencilTestEnable)load(context, "vkCmdSetStencilTestEnable"); + table->vkCmdSetViewportWithCount = (PFN_vkCmdSetViewportWithCount)load(context, "vkCmdSetViewportWithCount"); + table->vkCmdWaitEvents2 = (PFN_vkCmdWaitEvents2)load(context, "vkCmdWaitEvents2"); + table->vkCmdWriteTimestamp2 = (PFN_vkCmdWriteTimestamp2)load(context, "vkCmdWriteTimestamp2"); + table->vkCreatePrivateDataSlot = (PFN_vkCreatePrivateDataSlot)load(context, "vkCreatePrivateDataSlot"); + table->vkDestroyPrivateDataSlot = (PFN_vkDestroyPrivateDataSlot)load(context, "vkDestroyPrivateDataSlot"); + table->vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)load(context, "vkGetDeviceBufferMemoryRequirements"); + table->vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)load(context, "vkGetDeviceImageMemoryRequirements"); + table->vkGetDeviceImageSparseMemoryRequirements = (PFN_vkGetDeviceImageSparseMemoryRequirements)load(context, "vkGetDeviceImageSparseMemoryRequirements"); + table->vkGetPrivateData = (PFN_vkGetPrivateData)load(context, "vkGetPrivateData"); + table->vkQueueSubmit2 = (PFN_vkQueueSubmit2)load(context, "vkQueueSubmit2"); + table->vkSetPrivateData = (PFN_vkSetPrivateData)load(context, "vkSetPrivateData"); +#endif /* defined(VK_VERSION_1_3) */ #if defined(VK_AMD_buffer_marker) table->vkCmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)load(context, "vkCmdWriteBufferMarkerAMD"); #endif /* defined(VK_AMD_buffer_marker) */ @@ -1153,6 +1251,9 @@ static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, table->vkCmdDrawMultiEXT = (PFN_vkCmdDrawMultiEXT)load(context, "vkCmdDrawMultiEXT"); table->vkCmdDrawMultiIndexedEXT = (PFN_vkCmdDrawMultiIndexedEXT)load(context, "vkCmdDrawMultiIndexedEXT"); #endif /* defined(VK_EXT_multi_draw) */ +#if defined(VK_EXT_pageable_device_local_memory) + table->vkSetDeviceMemoryPriorityEXT = (PFN_vkSetDeviceMemoryPriorityEXT)load(context, "vkSetDeviceMemoryPriorityEXT"); +#endif /* defined(VK_EXT_pageable_device_local_memory) */ #if defined(VK_EXT_private_data) table->vkCreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)load(context, "vkCreatePrivateDataSlotEXT"); table->vkDestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)load(context, "vkDestroyPrivateDataSlotEXT"); @@ -1179,6 +1280,13 @@ static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, #if defined(VK_EXT_vertex_input_dynamic_state) table->vkCmdSetVertexInputEXT = (PFN_vkCmdSetVertexInputEXT)load(context, "vkCmdSetVertexInputEXT"); #endif /* defined(VK_EXT_vertex_input_dynamic_state) */ +#if defined(VK_FUCHSIA_buffer_collection) + table->vkCreateBufferCollectionFUCHSIA = (PFN_vkCreateBufferCollectionFUCHSIA)load(context, "vkCreateBufferCollectionFUCHSIA"); + table->vkDestroyBufferCollectionFUCHSIA = (PFN_vkDestroyBufferCollectionFUCHSIA)load(context, "vkDestroyBufferCollectionFUCHSIA"); + table->vkGetBufferCollectionPropertiesFUCHSIA = (PFN_vkGetBufferCollectionPropertiesFUCHSIA)load(context, "vkGetBufferCollectionPropertiesFUCHSIA"); + table->vkSetBufferCollectionBufferConstraintsFUCHSIA = (PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)load(context, "vkSetBufferCollectionBufferConstraintsFUCHSIA"); + table->vkSetBufferCollectionImageConstraintsFUCHSIA = (PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)load(context, "vkSetBufferCollectionImageConstraintsFUCHSIA"); +#endif /* defined(VK_FUCHSIA_buffer_collection) */ #if defined(VK_FUCHSIA_external_memory) table->vkGetMemoryZirconHandleFUCHSIA = (PFN_vkGetMemoryZirconHandleFUCHSIA)load(context, "vkGetMemoryZirconHandleFUCHSIA"); table->vkGetMemoryZirconHandlePropertiesFUCHSIA = (PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA)load(context, "vkGetMemoryZirconHandlePropertiesFUCHSIA"); @@ -1274,6 +1382,10 @@ static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, table->vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)load(context, "vkCmdDrawIndexedIndirectCountKHR"); table->vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)load(context, "vkCmdDrawIndirectCountKHR"); #endif /* defined(VK_KHR_draw_indirect_count) */ +#if defined(VK_KHR_dynamic_rendering) + table->vkCmdBeginRenderingKHR = (PFN_vkCmdBeginRenderingKHR)load(context, "vkCmdBeginRenderingKHR"); + table->vkCmdEndRenderingKHR = (PFN_vkCmdEndRenderingKHR)load(context, "vkCmdEndRenderingKHR"); +#endif /* defined(VK_KHR_dynamic_rendering) */ #if defined(VK_KHR_external_fence_fd) table->vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)load(context, "vkGetFenceFdKHR"); table->vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)load(context, "vkImportFenceFdKHR"); @@ -1312,6 +1424,11 @@ static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, #if defined(VK_KHR_maintenance3) table->vkGetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)load(context, "vkGetDescriptorSetLayoutSupportKHR"); #endif /* defined(VK_KHR_maintenance3) */ +#if defined(VK_KHR_maintenance4) + table->vkGetDeviceBufferMemoryRequirementsKHR = (PFN_vkGetDeviceBufferMemoryRequirementsKHR)load(context, "vkGetDeviceBufferMemoryRequirementsKHR"); + table->vkGetDeviceImageMemoryRequirementsKHR = (PFN_vkGetDeviceImageMemoryRequirementsKHR)load(context, "vkGetDeviceImageMemoryRequirementsKHR"); + table->vkGetDeviceImageSparseMemoryRequirementsKHR = (PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)load(context, "vkGetDeviceImageSparseMemoryRequirementsKHR"); +#endif /* defined(VK_KHR_maintenance4) */ #if defined(VK_KHR_performance_query) table->vkAcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)load(context, "vkAcquireProfilingLockKHR"); table->vkReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)load(context, "vkReleaseProfilingLockKHR"); @@ -1658,6 +1775,45 @@ PFN_vkResetQueryPool vkResetQueryPool; PFN_vkSignalSemaphore vkSignalSemaphore; PFN_vkWaitSemaphores vkWaitSemaphores; #endif /* defined(VK_VERSION_1_2) */ +#if defined(VK_VERSION_1_3) +PFN_vkCmdBeginRendering vkCmdBeginRendering; +PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; +PFN_vkCmdBlitImage2 vkCmdBlitImage2; +PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; +PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; +PFN_vkCmdCopyImage2 vkCmdCopyImage2; +PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; +PFN_vkCmdEndRendering vkCmdEndRendering; +PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; +PFN_vkCmdResetEvent2 vkCmdResetEvent2; +PFN_vkCmdResolveImage2 vkCmdResolveImage2; +PFN_vkCmdSetCullMode vkCmdSetCullMode; +PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; +PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; +PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; +PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; +PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; +PFN_vkCmdSetEvent2 vkCmdSetEvent2; +PFN_vkCmdSetFrontFace vkCmdSetFrontFace; +PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; +PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; +PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; +PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; +PFN_vkCmdSetStencilOp vkCmdSetStencilOp; +PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; +PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; +PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; +PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; +PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; +PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; +PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; +PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; +PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; +PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties; +PFN_vkGetPrivateData vkGetPrivateData; +PFN_vkQueueSubmit2 vkQueueSubmit2; +PFN_vkSetPrivateData vkSetPrivateData; +#endif /* defined(VK_VERSION_1_3) */ #if defined(VK_AMD_buffer_marker) PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; #endif /* defined(VK_AMD_buffer_marker) */ @@ -1792,6 +1948,9 @@ PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT; PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; #endif /* defined(VK_EXT_multi_draw) */ +#if defined(VK_EXT_pageable_device_local_memory) +PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; +#endif /* defined(VK_EXT_pageable_device_local_memory) */ #if defined(VK_EXT_private_data) PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; @@ -1822,6 +1981,13 @@ PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; #if defined(VK_EXT_vertex_input_dynamic_state) PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; #endif /* defined(VK_EXT_vertex_input_dynamic_state) */ +#if defined(VK_FUCHSIA_buffer_collection) +PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; +PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; +PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; +PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; +PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; +#endif /* defined(VK_FUCHSIA_buffer_collection) */ #if defined(VK_FUCHSIA_external_memory) PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; @@ -1938,6 +2104,10 @@ PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; #endif /* defined(VK_KHR_draw_indirect_count) */ +#if defined(VK_KHR_dynamic_rendering) +PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; +PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; +#endif /* defined(VK_KHR_dynamic_rendering) */ #if defined(VK_KHR_external_fence_capabilities) PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR; #endif /* defined(VK_KHR_external_fence_capabilities) */ @@ -2005,6 +2175,11 @@ PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; #if defined(VK_KHR_maintenance3) PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; #endif /* defined(VK_KHR_maintenance3) */ +#if defined(VK_KHR_maintenance4) +PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; +PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; +PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; +#endif /* defined(VK_KHR_maintenance4) */ #if defined(VK_KHR_performance_query) PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; diff --git a/thirdparty/volk/volk.h b/thirdparty/volk/volk.h index 2e292ca114..cdeedfc5ff 100644 --- a/thirdparty/volk/volk.h +++ b/thirdparty/volk/volk.h @@ -15,7 +15,7 @@ #endif /* VOLK_GENERATE_VERSION_DEFINE */ -#define VOLK_HEADER_VERSION 190 +#define VOLK_HEADER_VERSION 204 /* VOLK_GENERATE_VERSION_DEFINE */ #ifndef VK_NO_PROTOTYPES @@ -285,6 +285,44 @@ struct VolkDeviceTable PFN_vkSignalSemaphore vkSignalSemaphore; PFN_vkWaitSemaphores vkWaitSemaphores; #endif /* defined(VK_VERSION_1_2) */ +#if defined(VK_VERSION_1_3) + PFN_vkCmdBeginRendering vkCmdBeginRendering; + PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; + PFN_vkCmdBlitImage2 vkCmdBlitImage2; + PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; + PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; + PFN_vkCmdCopyImage2 vkCmdCopyImage2; + PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; + PFN_vkCmdEndRendering vkCmdEndRendering; + PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; + PFN_vkCmdResetEvent2 vkCmdResetEvent2; + PFN_vkCmdResolveImage2 vkCmdResolveImage2; + PFN_vkCmdSetCullMode vkCmdSetCullMode; + PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; + PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; + PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; + PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; + PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; + PFN_vkCmdSetEvent2 vkCmdSetEvent2; + PFN_vkCmdSetFrontFace vkCmdSetFrontFace; + PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; + PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; + PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; + PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; + PFN_vkCmdSetStencilOp vkCmdSetStencilOp; + PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; + PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; + PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; + PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; + PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; + PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; + PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; + PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; + PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; + PFN_vkGetPrivateData vkGetPrivateData; + PFN_vkQueueSubmit2 vkQueueSubmit2; + PFN_vkSetPrivateData vkSetPrivateData; +#endif /* defined(VK_VERSION_1_3) */ #if defined(VK_AMD_buffer_marker) PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; #endif /* defined(VK_AMD_buffer_marker) */ @@ -375,6 +413,9 @@ struct VolkDeviceTable PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; #endif /* defined(VK_EXT_multi_draw) */ +#if defined(VK_EXT_pageable_device_local_memory) + PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; +#endif /* defined(VK_EXT_pageable_device_local_memory) */ #if defined(VK_EXT_private_data) PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; @@ -401,6 +442,13 @@ struct VolkDeviceTable #if defined(VK_EXT_vertex_input_dynamic_state) PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; #endif /* defined(VK_EXT_vertex_input_dynamic_state) */ +#if defined(VK_FUCHSIA_buffer_collection) + PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; + PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; + PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; + PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; + PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; +#endif /* defined(VK_FUCHSIA_buffer_collection) */ #if defined(VK_FUCHSIA_external_memory) PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; @@ -496,6 +544,10 @@ struct VolkDeviceTable PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; #endif /* defined(VK_KHR_draw_indirect_count) */ +#if defined(VK_KHR_dynamic_rendering) + PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; + PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; +#endif /* defined(VK_KHR_dynamic_rendering) */ #if defined(VK_KHR_external_fence_fd) PFN_vkGetFenceFdKHR vkGetFenceFdKHR; PFN_vkImportFenceFdKHR vkImportFenceFdKHR; @@ -534,6 +586,11 @@ struct VolkDeviceTable #if defined(VK_KHR_maintenance3) PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; #endif /* defined(VK_KHR_maintenance3) */ +#if defined(VK_KHR_maintenance4) + PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; + PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; + PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; +#endif /* defined(VK_KHR_maintenance4) */ #if defined(VK_KHR_performance_query) PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR; @@ -872,6 +929,45 @@ extern PFN_vkResetQueryPool vkResetQueryPool; extern PFN_vkSignalSemaphore vkSignalSemaphore; extern PFN_vkWaitSemaphores vkWaitSemaphores; #endif /* defined(VK_VERSION_1_2) */ +#if defined(VK_VERSION_1_3) +extern PFN_vkCmdBeginRendering vkCmdBeginRendering; +extern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; +extern PFN_vkCmdBlitImage2 vkCmdBlitImage2; +extern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; +extern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; +extern PFN_vkCmdCopyImage2 vkCmdCopyImage2; +extern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; +extern PFN_vkCmdEndRendering vkCmdEndRendering; +extern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; +extern PFN_vkCmdResetEvent2 vkCmdResetEvent2; +extern PFN_vkCmdResolveImage2 vkCmdResolveImage2; +extern PFN_vkCmdSetCullMode vkCmdSetCullMode; +extern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; +extern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; +extern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; +extern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; +extern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; +extern PFN_vkCmdSetEvent2 vkCmdSetEvent2; +extern PFN_vkCmdSetFrontFace vkCmdSetFrontFace; +extern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; +extern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; +extern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; +extern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; +extern PFN_vkCmdSetStencilOp vkCmdSetStencilOp; +extern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; +extern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; +extern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; +extern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; +extern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; +extern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; +extern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; +extern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; +extern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; +extern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties; +extern PFN_vkGetPrivateData vkGetPrivateData; +extern PFN_vkQueueSubmit2 vkQueueSubmit2; +extern PFN_vkSetPrivateData vkSetPrivateData; +#endif /* defined(VK_VERSION_1_3) */ #if defined(VK_AMD_buffer_marker) extern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; #endif /* defined(VK_AMD_buffer_marker) */ @@ -1006,6 +1102,9 @@ extern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT; extern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; extern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; #endif /* defined(VK_EXT_multi_draw) */ +#if defined(VK_EXT_pageable_device_local_memory) +extern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; +#endif /* defined(VK_EXT_pageable_device_local_memory) */ #if defined(VK_EXT_private_data) extern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; extern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; @@ -1036,6 +1135,13 @@ extern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; #if defined(VK_EXT_vertex_input_dynamic_state) extern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; #endif /* defined(VK_EXT_vertex_input_dynamic_state) */ +#if defined(VK_FUCHSIA_buffer_collection) +extern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; +extern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; +extern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; +extern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; +extern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; +#endif /* defined(VK_FUCHSIA_buffer_collection) */ #if defined(VK_FUCHSIA_external_memory) extern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; extern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; @@ -1152,6 +1258,10 @@ extern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; extern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; extern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; #endif /* defined(VK_KHR_draw_indirect_count) */ +#if defined(VK_KHR_dynamic_rendering) +extern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; +extern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; +#endif /* defined(VK_KHR_dynamic_rendering) */ #if defined(VK_KHR_external_fence_capabilities) extern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR; #endif /* defined(VK_KHR_external_fence_capabilities) */ @@ -1219,6 +1329,11 @@ extern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; #if defined(VK_KHR_maintenance3) extern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; #endif /* defined(VK_KHR_maintenance3) */ +#if defined(VK_KHR_maintenance4) +extern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; +extern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; +extern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; +#endif /* defined(VK_KHR_maintenance4) */ #if defined(VK_KHR_performance_query) extern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; extern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; diff --git a/thirdparty/vulkan/patches/01-VMA-fix-nullability.patch b/thirdparty/vulkan/patches/01-VMA-fix-nullability.patch deleted file mode 100644 index 7deada97b0..0000000000 --- a/thirdparty/vulkan/patches/01-VMA-fix-nullability.patch +++ /dev/null @@ -1,80 +0,0 @@ -diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h -index 52b403bede..d88c305a7c 100644 ---- a/thirdparty/vulkan/vk_mem_alloc.h -+++ b/thirdparty/vulkan/vk_mem_alloc.h -@@ -2366,7 +2366,7 @@ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( - */ - VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, -- VmaVirtualAllocation allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); -+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); - - /** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. - -diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h -index d1138a7bc8..74c66b9789 100644 ---- a/thirdparty/vulkan/vk_mem_alloc.h -+++ b/thirdparty/vulkan/vk_mem_alloc.h -@@ -2386,7 +2386,7 @@ If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF - VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, -- VmaVirtualAllocation* VMA_NOT_NULL pAllocation, -+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset); - - /** \brief Frees virtual allocation inside given #VmaVirtualBlock. -@@ -2391,7 +2391,7 @@ It is correct to call this function with `allocation == VK_NULL_HANDLE` - it doe - */ - VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, -- VmaVirtualAllocation allocation); -+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); - - /** \brief Frees all virtual allocations inside given #VmaVirtualBlock. - -@@ -2408,7 +2408,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( - */ - VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, -- VmaVirtualAllocation allocation, -+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, - void* VMA_NULLABLE pUserData); - - /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. -@@ -17835,7 +17835,7 @@ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_N - } - - VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, -- VmaVirtualAllocation allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) -+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) - { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); - VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); -@@ -17853,7 +17853,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_N - return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); - } - --VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation allocation) -+VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) - { - if(allocation != VK_NULL_HANDLE) - { -@@ -17873,7 +17873,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NUL - } - - VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, -- VmaVirtualAllocation allocation, void* VMA_NULLABLE pUserData) -+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) - { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); -@@ -17848,7 +17848,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_ - } - - VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, -- const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation* VMA_NOT_NULL pAllocation, -+ const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset) - { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); diff --git a/thirdparty/vulkan/patches/03-VMA-universal-pools.patch b/thirdparty/vulkan/patches/03-VMA-universal-pools.patch deleted file mode 100644 index a5de3aaace..0000000000 --- a/thirdparty/vulkan/patches/03-VMA-universal-pools.patch +++ /dev/null @@ -1,567 +0,0 @@ -diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h -index 74c66b9789..89e00e6326 100644 ---- a/thirdparty/vulkan/vk_mem_alloc.h -+++ b/thirdparty/vulkan/vk_mem_alloc.h -@@ -1127,31 +1127,26 @@ typedef struct VmaAllocationCreateInfo - /** \brief Intended usage of memory. - - You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n -- If `pool` is not null, this member is ignored. - */ - VmaMemoryUsage usage; - /** \brief Flags that must be set in a Memory Type chosen for an allocation. - -- Leave 0 if you specify memory requirements in other way. \n -- If `pool` is not null, this member is ignored.*/ -+ Leave 0 if you specify memory requirements in other way.*/ - VkMemoryPropertyFlags requiredFlags; - /** \brief Flags that preferably should be set in a memory type chosen for an allocation. - -- Set to 0 if no additional flags are preferred. \n -- If `pool` is not null, this member is ignored. */ -+ Set to 0 if no additional flags are preferred.*/ - VkMemoryPropertyFlags preferredFlags; - /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. - - Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if - it meets other requirements specified by this structure, with no further - restrictions on memory type index. \n -- If `pool` is not null, this member is ignored. - */ - uint32_t memoryTypeBits; - /** \brief Pool that this allocation should be created in. - -- Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: -- `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. -+ Leave `VK_NULL_HANDLE` to allocate from default pool. - */ - VmaPool VMA_NULLABLE pool; - /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). -@@ -1173,9 +1168,6 @@ typedef struct VmaAllocationCreateInfo - /// Describes parameter of created #VmaPool. - typedef struct VmaPoolCreateInfo - { -- /** \brief Vulkan memory type index to allocate this pool from. -- */ -- uint32_t memoryTypeIndex; - /** \brief Use combination of #VmaPoolCreateFlagBits. - */ - VmaPoolCreateFlags flags; -@@ -10904,13 +10896,12 @@ struct VmaPool_T - friend struct VmaPoolListItemTraits; - VMA_CLASS_NO_COPY(VmaPool_T) - public: -- VmaBlockVector m_BlockVector; -- VmaDedicatedAllocationList m_DedicatedAllocations; -+ VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; -+ VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; - - VmaPool_T( - VmaAllocator hAllocator, -- const VmaPoolCreateInfo& createInfo, -- VkDeviceSize preferredBlockSize); -+ const VmaPoolCreateInfo& createInfo); - ~VmaPool_T(); - - uint32_t GetId() const { return m_Id; } -@@ -10924,6 +10915,7 @@ public: - #endif - - private: -+ const VmaAllocator m_hAllocator; - uint32_t m_Id; - char* m_Name; - VmaPool_T* m_PrevPool = VMA_NULL; -@@ -11405,8 +11397,10 @@ private: - - void ValidateVulkanFunctions(); - -+public: // I'm sorry - VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); - -+private: - VkResult AllocateMemoryOfType( - VmaPool pool, - VkDeviceSize size, -@@ -14176,30 +14170,36 @@ void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pP - { - VmaPool pool = pPools[poolIndex]; - VMA_ASSERT(pool); -- // Pools with algorithm other than default are not defragmented. -- if (pool->m_BlockVector.GetAlgorithm() == 0) -+ for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex) - { -- VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; -- -- for (size_t i = m_CustomPoolContexts.size(); i--; ) -+ if(pool->m_pBlockVectors[memTypeIndex]) - { -- if (m_CustomPoolContexts[i]->GetCustomPool() == pool) -+ // Pools with algorithm other than default are not defragmented. -+ if (pool->m_pBlockVectors[memTypeIndex]->GetAlgorithm() == 0) - { -- pBlockVectorDefragCtx = m_CustomPoolContexts[i]; -- break; -- } -- } -+ VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; - -- if (!pBlockVectorDefragCtx) -- { -- pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( -- m_hAllocator, -- pool, -- &pool->m_BlockVector); -- m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); -- } -+ for (size_t i = m_CustomPoolContexts.size(); i--; ) -+ { -+ if (m_CustomPoolContexts[i]->GetCustomPool() == pool) -+ { -+ pBlockVectorDefragCtx = m_CustomPoolContexts[i]; -+ break; -+ } -+ } -+ -+ if (!pBlockVectorDefragCtx) -+ { -+ pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( -+ m_hAllocator, -+ pool, -+ pool->m_pBlockVectors[memTypeIndex]); -+ m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); -+ } - -- pBlockVectorDefragCtx->AddAll(); -+ pBlockVectorDefragCtx->AddAll(); -+ } -+ } - } - } - } -@@ -14214,6 +14214,7 @@ void VmaDefragmentationContext_T::AddAllocations( - { - const VmaAllocation hAlloc = pAllocations[allocIndex]; - VMA_ASSERT(hAlloc); -+ const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); - // DedicatedAlloc cannot be defragmented. - if (hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) - { -@@ -14224,7 +14225,7 @@ void VmaDefragmentationContext_T::AddAllocations( - if (hAllocPool != VK_NULL_HANDLE) - { - // Pools with algorithm other than default are not defragmented. -- if (hAllocPool->m_BlockVector.GetAlgorithm() == 0) -+ if (hAllocPool->m_pBlockVectors[memTypeIndex]->GetAlgorithm() == 0) - { - for (size_t i = m_CustomPoolContexts.size(); i--; ) - { -@@ -14239,7 +14240,7 @@ void VmaDefragmentationContext_T::AddAllocations( - pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( - m_hAllocator, - hAllocPool, -- &hAllocPool->m_BlockVector); -+ hAllocPool->m_pBlockVectors[memTypeIndex]); - m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); - } - } -@@ -14247,7 +14248,6 @@ void VmaDefragmentationContext_T::AddAllocations( - // This allocation belongs to default pool. - else - { -- const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); - pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex]; - if (!pBlockVectorDefragCtx) - { -@@ -14481,41 +14481,61 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd() - #ifndef _VMA_POOL_T_FUNCTIONS - VmaPool_T::VmaPool_T( - VmaAllocator hAllocator, -- const VmaPoolCreateInfo& createInfo, -- VkDeviceSize preferredBlockSize) -- : m_BlockVector( -- hAllocator, -- this, // hParentPool -- createInfo.memoryTypeIndex, -- createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, -- createInfo.minBlockCount, -- createInfo.maxBlockCount, -- (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), -- createInfo.blockSize != 0, // explicitBlockSize -- createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm -- createInfo.priority, -- VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), -- createInfo.pMemoryAllocateNext), -+ const VmaPoolCreateInfo& createInfo) : -+ m_hAllocator(hAllocator), -+ m_pBlockVectors{}, - m_Id(0), -- m_Name(VMA_NULL) {} -+ m_Name(VMA_NULL) -+{ -+ for(uint32_t memTypeIndex = 0; memTypeIndex < hAllocator->GetMemoryTypeCount(); ++memTypeIndex) -+ { -+ // Create only supported types -+ if((hAllocator->GetGlobalMemoryTypeBits() & (1u << memTypeIndex)) != 0) -+ { -+ m_pBlockVectors[memTypeIndex] = vma_new(hAllocator, VmaBlockVector)( -+ hAllocator, -+ this, // hParentPool -+ memTypeIndex, -+ createInfo.blockSize != 0 ? createInfo.blockSize : hAllocator->CalcPreferredBlockSize(memTypeIndex), -+ createInfo.minBlockCount, -+ createInfo.maxBlockCount, -+ (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), -+ false, // explicitBlockSize -+ createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm -+ createInfo.priority, -+ VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(memTypeIndex), createInfo.minAllocationAlignment), -+ createInfo.pMemoryAllocateNext); -+ } -+ } -+} - - VmaPool_T::~VmaPool_T() - { - VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); -+ for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex) -+ { -+ vma_delete(m_hAllocator, m_pBlockVectors[memTypeIndex]); -+ } - } - - void VmaPool_T::SetName(const char* pName) - { -- const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); -- VmaFreeString(allocs, m_Name); -- -- if (pName != VMA_NULL) -- { -- m_Name = VmaCreateStringCopy(allocs, pName); -- } -- else -+ for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex) - { -- m_Name = VMA_NULL; -+ if(m_pBlockVectors[memTypeIndex]) -+ { -+ const VkAllocationCallbacks* allocs = m_pBlockVectors[memTypeIndex]->GetAllocator()->GetAllocationCallbacks(); -+ VmaFreeString(allocs, m_Name); -+ -+ if (pName != VMA_NULL) -+ { -+ m_Name = VmaCreateStringCopy(allocs, pName); -+ } -+ else -+ { -+ m_Name = VMA_NULL; -+ } -+ } - } - } - #endif // _VMA_POOL_T_FUNCTIONS -@@ -15377,15 +15397,22 @@ VkResult VmaAllocator_T::CalcAllocationParams( - inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - } - -- if(inoutCreateInfo.pool != VK_NULL_HANDLE) -+ if(inoutCreateInfo.pool != VK_NULL_HANDLE && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) - { -- if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && -- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) -+ // Assuming here every block has the same block size and priority. -+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { -- VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); -- return VK_ERROR_FEATURE_NOT_PRESENT; -+ if(inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]) -+ { -+ if(inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]->HasExplicitBlockSize()) -+ { -+ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); -+ return VK_ERROR_FEATURE_NOT_PRESENT; -+ } -+ inoutCreateInfo.priority = inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]->GetPriority(); -+ break; -+ } - } -- inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); - } - - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && -@@ -15429,67 +15456,46 @@ VkResult VmaAllocator_T::AllocateMemory( - if(res != VK_SUCCESS) - return res; - -- if(createInfoFinal.pool != VK_NULL_HANDLE) -+ // Bit mask of memory Vulkan types acceptable for this allocation. -+ uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; -+ uint32_t memTypeIndex = UINT32_MAX; -+ res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex); -+ // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. -+ if(res != VK_SUCCESS) -+ return res; -+ do - { -- VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; -- return AllocateMemoryOfType( -+ VmaBlockVector* blockVector = createInfoFinal.pool == VK_NULL_HANDLE ? m_pBlockVectors[memTypeIndex] : createInfoFinal.pool->m_pBlockVectors[memTypeIndex]; -+ VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); -+ VmaDedicatedAllocationList& dedicatedAllocations = createInfoFinal.pool == VK_NULL_HANDLE ? m_DedicatedAllocations[memTypeIndex] : createInfoFinal.pool->m_DedicatedAllocations[memTypeIndex]; -+ res = AllocateMemoryOfType( - createInfoFinal.pool, - vkMemReq.size, - vkMemReq.alignment, -- prefersDedicatedAllocation, -+ requiresDedicatedAllocation || prefersDedicatedAllocation, - dedicatedBuffer, - dedicatedBufferUsage, - dedicatedImage, - createInfoFinal, -- blockVector.GetMemoryTypeIndex(), -+ memTypeIndex, - suballocType, -- createInfoFinal.pool->m_DedicatedAllocations, -- blockVector, -+ dedicatedAllocations, -+ *blockVector, - allocationCount, - pAllocations); -- } -- else -- { -- // Bit mask of memory Vulkan types acceptable for this allocation. -- uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; -- uint32_t memTypeIndex = UINT32_MAX; -- res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex); -- // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. -- if(res != VK_SUCCESS) -- return res; -- do -- { -- VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; -- VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); -- res = AllocateMemoryOfType( -- VK_NULL_HANDLE, -- vkMemReq.size, -- vkMemReq.alignment, -- requiresDedicatedAllocation || prefersDedicatedAllocation, -- dedicatedBuffer, -- dedicatedBufferUsage, -- dedicatedImage, -- createInfoFinal, -- memTypeIndex, -- suballocType, -- m_DedicatedAllocations[memTypeIndex], -- *blockVector, -- allocationCount, -- pAllocations); -- // Allocation succeeded -- if(res == VK_SUCCESS) -- return VK_SUCCESS; -+ // Allocation succeeded -+ if(res == VK_SUCCESS) -+ return VK_SUCCESS; - -- // Remove old memTypeIndex from list of possibilities. -- memoryTypeBits &= ~(1u << memTypeIndex); -- // Find alternative memTypeIndex. -- res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex); -- } while(res == VK_SUCCESS); -+ // Remove old memTypeIndex from list of possibilities. -+ memoryTypeBits &= ~(1u << memTypeIndex); -+ // Find alternative memTypeIndex. -+ res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex); -+ } while(res == VK_SUCCESS); - -- // No other matching memory type index could be found. -- // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. -- return VK_ERROR_OUT_OF_DEVICE_MEMORY; -- } -+ // No other matching memory type index could be found. -+ // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. -+ return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - - void VmaAllocator_T::FreeMemory( -@@ -15515,16 +15521,16 @@ void VmaAllocator_T::FreeMemory( - { - VmaBlockVector* pBlockVector = VMA_NULL; - VmaPool hPool = allocation->GetParentPool(); -+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - if(hPool != VK_NULL_HANDLE) - { -- pBlockVector = &hPool->m_BlockVector; -+ pBlockVector = hPool->m_pBlockVectors[memTypeIndex]; - } - else - { -- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - pBlockVector = m_pBlockVectors[memTypeIndex]; -- VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); - } -+ VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); - pBlockVector->Free(allocation); - } - break; -@@ -15564,11 +15570,17 @@ void VmaAllocator_T::CalculateStats(VmaStats* pStats) - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { -- VmaBlockVector& blockVector = pool->m_BlockVector; -- blockVector.AddStats(pStats); -- const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); -- const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); -- pool->m_DedicatedAllocations.AddStats(pStats, memTypeIndex, memHeapIndex); -+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) -+ { -+ if (pool->m_pBlockVectors[memTypeIndex]) -+ { -+ VmaBlockVector& blockVector = *pool->m_pBlockVectors[memTypeIndex]; -+ blockVector.AddStats(pStats); -+ const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); -+ const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); -+ pool->m_DedicatedAllocations[memTypeIndex].AddStats(pStats, memTypeIndex, memHeapIndex); -+ } -+ } - } - } - -@@ -15720,27 +15732,26 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPoo - { - return VK_ERROR_INITIALIZATION_FAILED; - } -- // Memory type index out of range or forbidden. -- if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || -- ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) -- { -- return VK_ERROR_FEATURE_NOT_PRESENT; -- } - if(newCreateInfo.minAllocationAlignment > 0) - { - VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); - } - -- const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); -- -- *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); -+ *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo); - -- VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); -- if(res != VK_SUCCESS) -+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { -- vma_delete(this, *pPool); -- *pPool = VMA_NULL; -- return res; -+ // Create only supported types -+ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) -+ { -+ VkResult res = (*pPool)->m_pBlockVectors[memTypeIndex]->CreateMinBlocks(); -+ if(res != VK_SUCCESS) -+ { -+ vma_delete(this, *pPool); -+ *pPool = VMA_NULL; -+ return res; -+ } -+ } - } - - // Add to m_Pools. -@@ -15772,8 +15783,14 @@ void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats) - pPoolStats->unusedRangeCount = 0; - pPoolStats->blockCount = 0; - -- pool->m_BlockVector.AddPoolStats(pPoolStats); -- pool->m_DedicatedAllocations.AddPoolStats(pPoolStats); -+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) -+ { -+ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) -+ { -+ pool->m_pBlockVectors[memTypeIndex]->AddPoolStats(pPoolStats); -+ pool->m_DedicatedAllocations[memTypeIndex].AddPoolStats(pPoolStats); -+ } -+ } - } - - void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) -@@ -15790,7 +15807,13 @@ void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) - - VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) - { -- return hPool->m_BlockVector.CheckCorruption(); -+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) -+ { -+ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) -+ { -+ return hPool->m_pBlockVectors[memTypeIndex]->CheckCorruption(); -+ } -+ } - } - - VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) -@@ -15822,18 +15845,21 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { -- if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) -+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { -- VkResult localRes = pool->m_BlockVector.CheckCorruption(); -- switch(localRes) -+ if(pool->m_pBlockVectors[memTypeIndex] && ((1u << memTypeIndex) & memoryTypeBits) != 0) - { -- case VK_ERROR_FEATURE_NOT_PRESENT: -- break; -- case VK_SUCCESS: -- finalRes = VK_SUCCESS; -- break; -- default: -- return localRes; -+ VkResult localRes = pool->m_pBlockVectors[memTypeIndex]->CheckCorruption(); -+ switch(localRes) -+ { -+ case VK_ERROR_FEATURE_NOT_PRESENT: -+ break; -+ case VK_SUCCESS: -+ finalRes = VK_SUCCESS; -+ break; -+ default: -+ return localRes; -+ } - } - } - } -@@ -16155,7 +16181,7 @@ void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) - else - { - // Custom pool -- parentPool->m_DedicatedAllocations.Unregister(allocation); -+ parentPool->m_DedicatedAllocations[memTypeIndex].Unregister(allocation); - } - - VkDeviceMemory hMemory = allocation->GetMemory(); -@@ -16430,12 +16456,18 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) - json.EndString(); - - json.BeginObject(); -- pool->m_BlockVector.PrintDetailedMap(json); -- -- if (!pool->m_DedicatedAllocations.IsEmpty()) -+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { -- json.WriteString("DedicatedAllocations"); -- pool->m_DedicatedAllocations.BuildStatsString(json); -+ if (pool->m_pBlockVectors[memTypeIndex]) -+ { -+ pool->m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json); -+ } -+ -+ if (!pool->m_DedicatedAllocations[memTypeIndex].IsEmpty()) -+ { -+ json.WriteString("DedicatedAllocations"); -+ pool->m_DedicatedAllocations->BuildStatsString(json); -+ } - } - json.EndObject(); - } diff --git a/thirdparty/vulkan/patches/02-VMA-use-volk.patch b/thirdparty/vulkan/patches/VMA-use-volk.patch index 1b6e0f04b8..1b6e0f04b8 100644 --- a/thirdparty/vulkan/patches/02-VMA-use-volk.patch +++ b/thirdparty/vulkan/patches/VMA-use-volk.patch diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h index 89e00e6326..6618f1d1f0 100644 --- a/thirdparty/vulkan/vk_mem_alloc.h +++ b/thirdparty/vulkan/vk_mem_alloc.h @@ -1,5 +1,5 @@ // -/// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. +// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -49,7 +49,6 @@ License: MIT - [Mapping functions](@ref memory_mapping_mapping_functions) - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) - [Cache flush and invalidate](@ref memory_mapping_cache_control) - - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable) - \subpage staying_within_budget - [Querying for budget](@ref staying_within_budget_querying_for_budget) - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) @@ -61,12 +60,7 @@ License: MIT - [Stack](@ref linear_algorithm_stack) - [Double stack](@ref linear_algorithm_double_stack) - [Ring buffer](@ref linear_algorithm_ring_buffer) - - [Buddy allocation algorithm](@ref buddy_algorithm) - \subpage defragmentation - - [Defragmenting CPU memory](@ref defragmentation_cpu) - - [Defragmenting GPU memory](@ref defragmentation_gpu) - - [Additional notes](@ref defragmentation_additional_notes) - - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm) - \subpage statistics - [Numeric statistics](@ref statistics_numeric_statistics) - [JSON dump](@ref statistics_json_dump) @@ -80,17 +74,19 @@ License: MIT - [Corruption detection](@ref debugging_memory_usage_corruption_detection) - \subpage opengl_interop - \subpage usage_patterns - - [Common mistakes](@ref usage_patterns_common_mistakes) - - [Simple patterns](@ref usage_patterns_simple) - - [Advanced patterns](@ref usage_patterns_advanced) + - [GPU-only resource](@ref usage_patterns_gpu_only) + - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) + - [Readback](@ref usage_patterns_readback) + - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) + - [Other use cases](@ref usage_patterns_other_use_cases) - \subpage configuration - [Pointers to Vulkan functions](@ref config_Vulkan_functions) - [Custom host memory allocator](@ref custom_memory_allocator) - [Device memory allocation callbacks](@ref allocation_callbacks) - [Device heap memory limit](@ref heap_memory_limit) - - \subpage vk_khr_dedicated_allocation - - \subpage enabling_buffer_device_address - - \subpage vk_amd_device_coherent_memory +- \subpage vk_khr_dedicated_allocation +- \subpage enabling_buffer_device_address +- \subpage vk_amd_device_coherent_memory - \subpage general_considerations - [Thread safety](@ref general_considerations_thread_safety) - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) @@ -99,8 +95,8 @@ License: MIT \section main_see_also See also -- [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) -- [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) +- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) +- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) \defgroup group_init Library initialization @@ -119,6 +115,7 @@ for user-defined purpose without allocating any real GPU memory. \defgroup group_stats Statistics \brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. +See documentation chapter: \ref statistics. */ @@ -178,13 +175,6 @@ extern "C" { #endif // #if VMA_VULKAN_VERSION >= 1001000 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES -#if !defined(VK_VERSION_1_2) - // This one is tricky. Vulkan specification defines this code as available since - // Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. - // See pull request #207. - #define VK_ERROR_UNKNOWN ((VkResult)-13) -#endif - #if !defined(VMA_DEDICATED_ALLOCATION) #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation #define VMA_DEDICATED_ALLOCATION 1 @@ -307,9 +297,9 @@ extern "C" { //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// -// +// // INTERFACE -// +// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// @@ -453,56 +443,33 @@ typedef enum VmaMemoryUsage Use other members of VmaAllocationCreateInfo to specify your requirements. */ VMA_MEMORY_USAGE_UNKNOWN = 0, - /** Memory will be used on device only, so fast access from the device is preferred. - It usually means device-local GPU (video) memory. - No need to be mappable on host. - It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`. - - Usage: - - - Resources written and read by device, e.g. images used as attachments. - - Resources transferred from host once (immutable) or infrequently and read by - device multiple times, e.g. textures to be sampled, vertex buffers, uniform - (constant) buffers, and majority of other types of resources used on GPU. - - Allocation may still end up in `HOST_VISIBLE` memory on some implementations. - In such case, you are free to map it. - You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type. + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. */ VMA_MEMORY_USAGE_GPU_ONLY = 1, - /** Memory will be mappable on host. - It usually means CPU (system) memory. - Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`. - CPU access is typically uncached. Writes may be write-combined. - Resources created in this pool may still be accessible to the device, but access to them can be slow. - It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`. - - Usage: Staging copy of resources used as transfer source. + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. */ VMA_MEMORY_USAGE_CPU_ONLY = 2, /** - Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU. - CPU access is typically uncached. Writes may be write-combined. - - Usage: Resources written frequently by host (dynamic), read by device. E.g. textures (with LINEAR layout), vertex buffers, uniform buffers updated every frame or every draw call. + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. */ VMA_MEMORY_USAGE_CPU_TO_GPU = 3, - /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached. - It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`. - - Usage: - - - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping. - - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection. + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. */ VMA_MEMORY_USAGE_GPU_TO_CPU = 4, - /** CPU memory - memory that is preferably not `DEVICE_LOCAL`, but also not guaranteed to be `HOST_VISIBLE`. - - Usage: Staging copy of resources moved from GPU memory to CPU memory as part - of custom paging/residency mechanism, to be moved back to GPU memory when needed. + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. */ VMA_MEMORY_USAGE_CPU_COPY = 5, - /** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + /** + Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. @@ -510,6 +477,43 @@ typedef enum VmaMemoryUsage Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. */ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + /** + Selects best memory type automatically. + This flag is recommended for most common use cases. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO = 7, + /** + Selects best memory type automatically with preference for GPU (device) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, + /** + Selects best memory type automatically with preference for CPU (host) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF } VmaMemoryUsage; @@ -570,11 +574,51 @@ typedef enum VmaAllocationCreateFlagBits */ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, /** \brief Set this flag if the allocated memory will have aliasing resources. - * + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. */ VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, + never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. + + \warning Violating this declaration may work correctly, but will likely be very slow. + Watch out for implicit reads introduces by doing e.g. `pMappedData[i] += x;` + Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory can be read, written, and accessed in random order, + so a `HOST_CACHED` memory type is preferred. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, + /** + Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected + if it may improve performance. + + By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type + (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and + issue an explicit transfer to write/read your data. + To prepare for this possibility, don't forget to add appropriate flags like + `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, /** Allocation strategy that chooses smallest possible free range for the allocation to minimize memory usage and fragmentation, possibly at the expense of allocation time. */ @@ -584,6 +628,11 @@ typedef enum VmaAllocationCreateFlagBits to minimize allocation time, possibly at the expense of allocation quality. */ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + Used internally by defragmentation, not recomended in typical usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. */ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, @@ -594,7 +643,8 @@ typedef enum VmaAllocationCreateFlagBits */ VMA_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaAllocationCreateFlagBits; @@ -635,48 +685,60 @@ typedef enum VmaPoolCreateFlagBits */ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, - /** \brief Enables alternative, buddy allocation algorithm in this pool. - - It operates on a tree of blocks, each having size that is a power of two and - a half of its parent's size. Comparing to default algorithm, this one provides - faster allocation and deallocation and decreased external fragmentation, - at the expense of more memory wasted (internal fragmentation). - For details, see documentation chapter \ref buddy_algorithm. - */ - VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008, - - /** \brief Enables alternative, Two-Level Segregated Fit (TLSF) allocation algorithm in this pool. - - This algorithm is based on 2-level lists dividing address space into smaller - chunks. The first level is aligned to power of two which serves as buckets for requested - memory to fall into, and the second level is lineary subdivided into lists of free memory. - This algorithm aims to achieve bounded response time even in the worst case scenario. - Allocation time can be sometimes slightly longer than compared to other algorithms - but in return the application can avoid stalls in case of fragmentation, giving - predictable results, suitable for real-time use cases. - */ - VMA_POOL_CREATE_TLSF_ALGORITHM_BIT = 0x00000010, - /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. */ VMA_POOL_CREATE_ALGORITHM_MASK = - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT | - VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT | - VMA_POOL_CREATE_TLSF_ALGORITHM_BIT, + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaPoolCreateFlagBits; /// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. typedef VkFlags VmaPoolCreateFlags; -/// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use. +/// Flags to be passed as VmaDefragmentationInfo::flags. typedef enum VmaDefragmentationFlagBits { - VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 0x1, + /* \brief Use simple but fast algorithm for defragmentation. + May not achieve best results but will require least time to compute and least allocations to copy. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, + /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. + Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, + /* \brief Perform full defragmentation of memory. + Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, + /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. + Only available when bufferImageGranularity is greater than 1, since it aims to reduce + alignment issues between different types of resources. + Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, + + /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaDefragmentationFlagBits; typedef VkFlags VmaDefragmentationFlags; +/// Operation performed on single defragmentation move. +typedef enum VmaDefragmentationMoveOperation +{ + /// Buffer/image has been recreated at `dstMemory` + `dstOffset`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, + /// Set this value if you cannot move the allocation. New place reserved `dstMemory` + `dstOffset` will be freed. `srcAllocation` will remain unchanged. + VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved `dstMemory` + `dstOffset` will be freed, along with `srcAllocation`. + VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, +} VmaDefragmentationMoveOperation; + /** @} */ /** @@ -700,34 +762,10 @@ typedef enum VmaVirtualBlockCreateFlagBits */ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, - /** \brief Enables alternative, buddy allocation algorithm in this virtual block. - - It operates on a tree of blocks, each having size that is a power of two and - a half of its parent's size. Comparing to default algorithm, this one provides - faster allocation and deallocation and decreased external fragmentation, - at the expense of more memory wasted (internal fragmentation). - For details, see documentation chapter \ref buddy_algorithm. - */ - VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT = 0x00000002, - - /** \brief Enables alternative, TLSF allocation algorithm in virtual block. - - This algorithm is based on 2-level lists dividing address space into smaller - chunks. The first level is aligned to power of two which serves as buckets for requested - memory to fall into, and the second level is lineary subdivided into lists of free memory. - This algorithm aims to achieve bounded response time even in the worst case scenario. - Allocation time can be sometimes slightly longer than compared to other algorithms - but in return the application can avoid stalls in case of fragmentation, giving - predictable results, suitable for real-time use cases. - */ - VMA_VIRTUAL_BLOCK_CREATE_TLSF_ALGORITHM_BIT = 0x00000004, - /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. */ VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = - VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT | - VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT | - VMA_VIRTUAL_BLOCK_CREATE_TLSF_ALGORITHM_BIT, + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VmaVirtualBlockCreateFlagBits; @@ -748,6 +786,10 @@ typedef enum VmaVirtualAllocationCreateFlagBits /** \brief Allocation strategy that tries to minimize allocation time. */ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_PACKED_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT , /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. @@ -821,10 +863,10 @@ returned structure VmaAllocationInfo. VK_DEFINE_HANDLE(VmaAllocation) /** \struct VmaDefragmentationContext -\brief Represents Opaque object that represents started defragmentation process. +\brief An opaque object that represents started defragmentation process. -Fill structure #VmaDefragmentationInfo2 and call function vmaDefragmentationBegin() to create it. -Call function vmaDefragmentationEnd() to destroy it. +Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. +Call function vmaEndDefragmentation() to destroy it. */ VK_DEFINE_HANDLE(VmaDefragmentationContext) @@ -943,6 +985,12 @@ typedef struct VmaVulkanFunctions #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; #endif +#if VMA_VULKAN_VERSION >= 1003000 + /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; + /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements; +#endif } VmaVulkanFunctions; /// Description of a Allocator to be created. @@ -1051,59 +1099,102 @@ typedef struct VmaAllocatorInfo @{ */ -/// Calculated statistics of memory usage in entire allocator. -typedef struct VmaStatInfo +/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. + +These are fast to calculate. +See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). +*/ +typedef struct VmaStatistics { - /// Number of `VkDeviceMemory` Vulkan memory blocks allocated. + /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + */ uint32_t blockCount; - /// Number of #VmaAllocation allocation objects allocated. + /** \brief Number of #VmaAllocation objects allocated. + + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. + */ uint32_t allocationCount; + /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. + + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object + (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls + "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + */ + VkDeviceSize blockBytes; + /** \brief Total number of bytes occupied by all #VmaAllocation objects. + + Always less or equal than `blockBytes`. + Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan + but unused by any #VmaAllocation. + */ + VkDeviceSize allocationBytes; +} VmaStatistics; + +/** \brief More detailed statistics than #VmaStatistics. + +These are slower to calculate. Use for debugging purposes. +See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). + +Previous version of the statistics API provided averages, but they have been removed +because they can be easily calculated as: + +\code +VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; +VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; +VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; +\endcode +*/ +typedef struct VmaDetailedStatistics +{ + /// Basic statistics. + VmaStatistics statistics; /// Number of free ranges of memory between allocations. uint32_t unusedRangeCount; - /// Total number of bytes occupied by all allocations. - VkDeviceSize usedBytes; - /// Total number of bytes occupied by unused ranges. - VkDeviceSize unusedBytes; - VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax; - VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax; -} VmaStatInfo; - -/// General statistics from current state of Allocator. -typedef struct VmaStats -{ - VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]; - VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]; - VmaStatInfo total; -} VmaStats; - -/// Statistics of current memory usage and available budget, in bytes, for specific memory heap. -typedef struct VmaBudget + /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. + VkDeviceSize allocationSizeMin; + /// Largest allocation size. 0 if there are 0 allocations. + VkDeviceSize allocationSizeMax; + /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMin; + /// Largest empty range size. 0 if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMax; +} VmaDetailedStatistics; + +/** \brief General statistics from current state of the Allocator - +total memory usage across all memory heaps and types. + +These are slower to calculate. Use for debugging purposes. +See function vmaCalculateStatistics(). +*/ +typedef struct VmaTotalStatistics { - /** \brief Sum size of all `VkDeviceMemory` blocks allocated from particular heap, in bytes. - */ - VkDeviceSize blockBytes; + VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; + VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaDetailedStatistics total; +} VmaTotalStatistics; - /** \brief Sum size of all allocations created in particular heap, in bytes. +/** \brief Statistics of current memory usage and available budget for a specific memory heap. - Usually less or equal than `blockBytes`. - Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused - - available for new allocations or wasted due to fragmentation. +These are fast to calculate. +See function vmaGetHeapBudgets(). +*/ +typedef struct VmaBudget +{ + /** \brief Statistics fetched from the library. */ - VkDeviceSize allocationBytes; - + VmaStatistics statistics; /** \brief Estimated current memory usage of the program, in bytes. - Fetched from system using `VK_EXT_memory_budget` extension if enabled. + Fetched from system using VK_EXT_memory_budget extension if enabled. - It might be different than `blockBytes` (usually higher) due to additional implicit objects + It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or `VkDeviceMemory` blocks allocated outside of this library, if any. */ VkDeviceSize usage; - /** \brief Estimated amount of memory available to the program, in bytes. - Fetched from system using `VK_EXT_memory_budget` extension if enabled. + Fetched from system using VK_EXT_memory_budget extension if enabled. It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors external to the program, like other programs also consuming system resources. @@ -1127,26 +1218,31 @@ typedef struct VmaAllocationCreateInfo /** \brief Intended usage of memory. You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. */ VmaMemoryUsage usage; /** \brief Flags that must be set in a Memory Type chosen for an allocation. - Leave 0 if you specify memory requirements in other way.*/ + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ VkMemoryPropertyFlags requiredFlags; /** \brief Flags that preferably should be set in a memory type chosen for an allocation. - Set to 0 if no additional flags are preferred.*/ + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ VkMemoryPropertyFlags preferredFlags; /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if it meets other requirements specified by this structure, with no further restrictions on memory type index. \n + If `pool` is not null, this member is ignored. */ uint32_t memoryTypeBits; /** \brief Pool that this allocation should be created in. - Leave `VK_NULL_HANDLE` to allocate from default pool. + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. */ VmaPool VMA_NULLABLE pool; /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). @@ -1168,6 +1264,9 @@ typedef struct VmaAllocationCreateInfo /// Describes parameter of created #VmaPool. typedef struct VmaPoolCreateInfo { + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; /** \brief Use combination of #VmaPoolCreateFlagBits. */ VmaPoolCreateFlags flags; @@ -1222,33 +1321,6 @@ typedef struct VmaPoolCreateInfo /** @} */ /** -\addtogroup group_stats -@{ -*/ - -/// Describes parameter of existing #VmaPool. -typedef struct VmaPoolStats -{ - /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes. - */ - VkDeviceSize size; - /** \brief Total number of bytes in the pool not used by any #VmaAllocation. - */ - VkDeviceSize unusedSize; - /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed. - */ - size_t allocationCount; - /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation. - */ - size_t unusedRangeCount; - /** \brief Number of `VkDeviceMemory` blocks allocated for this pool. - */ - size_t blockCount; -} VmaPoolStats; - -/** @} */ - -/** \addtogroup group_alloc @{ */ @@ -1307,116 +1379,79 @@ typedef struct VmaAllocationInfo /** \brief Parameters for defragmentation. -To be used with function vmaDefragmentationBegin(). +To be used with function vmaBeginDefragmentation(). */ -typedef struct VmaDefragmentationInfo2 +typedef struct VmaDefragmentationInfo { - /** \brief Reserved for future use. Should be 0. - */ + /// \brief Use combination of #VmaDefragmentationFlagBits. VmaDefragmentationFlags flags; - /** \brief Number of allocations in `pAllocations` array. - */ - uint32_t allocationCount; - /** \brief Pointer to array of allocations that can be defragmented. - - The array should have `allocationCount` elements. - The array should not contain nulls. - Elements in the array should be unique - same allocation cannot occur twice. - All allocations not present in this array are considered non-moveable during this defragmentation. - */ - const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations; - /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation. - - The array should have `allocationCount` elements. - You can pass null if you are not interested in this information. - */ - VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged; - /** \brief Numer of pools in `pPools` array. - */ - uint32_t poolCount; - /** \brief Either null or pointer to array of pools to be defragmented. - - All the allocations in the specified pools can be moved during defragmentation - and there is no way to check if they were really moved as in `pAllocationsChanged`, - so you must query all the allocations in all these pools for new `VkDeviceMemory` - and offset using vmaGetAllocationInfo() if you might need to recreate buffers - and images bound to them. + /** \brief Custom pool to be defragmented. - The array should have `poolCount` elements. - The array should not contain nulls. - Elements in the array should be unique - same pool cannot occur twice. - - Using this array is equivalent to specifying all allocations from the pools in `pAllocations`. - It might be more efficient. - */ - const VmaPool VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools; - /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`. - - `VK_WHOLE_SIZE` means no limit. - */ - VkDeviceSize maxCpuBytesToMove; - /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`. - - `UINT32_MAX` means no limit. + If null then default pools will undergo defragmentation process. */ - uint32_t maxCpuAllocationsToMove; - /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`. + VmaPool VMA_NULLABLE pool; + /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. - `VK_WHOLE_SIZE` means no limit. + `0` means no limit. */ - VkDeviceSize maxGpuBytesToMove; - /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`. + VkDeviceSize maxBytesPerPass; + /** \brief Maximum number of allocations that can be moved during single pass to a different place. - `UINT32_MAX` means no limit. + `0` means no limit. */ - uint32_t maxGpuAllocationsToMove; - /** \brief Optional. Command buffer where GPU copy commands will be posted. - - If not null, it must be a valid command buffer handle that supports Transfer queue type. - It must be in the recording state and outside of a render pass instance. - You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd(). - - Passing null means that only CPU defragmentation will be performed. - */ - VkCommandBuffer VMA_NULLABLE commandBuffer; -} VmaDefragmentationInfo2; + uint32_t maxAllocationsPerPass; +} VmaDefragmentationInfo; -typedef struct VmaDefragmentationPassMoveInfo +/// Single move of an allocation to be done for defragmentation. +typedef struct VmaDefragmentationMove { - VmaAllocation VMA_NOT_NULL allocation; - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory; - VkDeviceSize offset; -} VmaDefragmentationPassMoveInfo; + /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. + VmaDefragmentationMoveOperation operation; + /// Allocation that should be moved. + VmaAllocation VMA_NOT_NULL srcAllocation; + /// Destination memory block where the allocation should be moved. + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE dstMemory; + /// Destination offset where the allocation should be moved. + VkDeviceSize dstOffset; + /// Internal data used by VMA. Do not use or modify! + void* VMA_NOT_NULL internalData; +} VmaDefragmentationMove; /** \brief Parameters for incremental defragmentation steps. To be used with function vmaBeginDefragmentationPass(). */ -typedef struct VmaDefragmentationPassInfo +typedef struct VmaDefragmentationPassMoveInfo { + /// Number of elements in the `pMoves` array. uint32_t moveCount; - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves; -} VmaDefragmentationPassInfo; + /** \brief Array of moves to be performed by the user in the current defragmentation pass. + + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). -/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment(). + For each element, you should: + + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. + 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. + 3. Make sure these commands finished executing on the GPU. + 4. Destroy the old buffer/image. + + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). + After this call, the allocation will point to the new place in memory. -\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. -*/ -typedef struct VmaDefragmentationInfo -{ - /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places. + Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. - Default is `VK_WHOLE_SIZE`, which means no limit. - */ - VkDeviceSize maxBytesToMove; - /** \brief Maximum number of allocations that can be moved to different place. + Alternatively, if you decide you want to completely remove the allocation: - Default is `UINT32_MAX`, which means no limit. + 1. Destroy its buffer/image. + 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + + Then, after vmaEndDefragmentationPass() the allocation will be freed. */ - uint32_t maxAllocationsToMove; -} VmaDefragmentationInfo; + VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassMoveInfo; -/// Statistics returned by function vmaDefragment(). +/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). typedef struct VmaDefragmentationStats { /// Total number of bytes that have been copied while moving allocations to different places. @@ -1484,7 +1519,7 @@ typedef struct VmaVirtualAllocationCreateInfo typedef struct VmaVirtualAllocationInfo { /** \brief Offset of the allocation. - + Offset at which the allocation was made. */ VkDeviceSize offset; @@ -1572,23 +1607,24 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( /** \brief Retrieves statistics from current state of the Allocator. This function is called "calculate" not "get" because it has to traverse all -internal data structures, so it may be quite slow. For faster but more brief statistics -suitable to be called every frame or every allocation, use vmaGetHeapBudgets(). +internal data structures, so it may be quite slow. Use it for debugging purposes. +For faster but more brief statistics suitable to be called every frame or every allocation, +use vmaGetHeapBudgets(). Note that when using allocator from multiple threads, returned information may immediately become outdated. */ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( VmaAllocator VMA_NOT_NULL allocator, - VmaStats* VMA_NOT_NULL pStats); + VmaTotalStatistics* VMA_NOT_NULL pStats); -/** \brief Retrieves information about current memory budget for all memory heaps. +/** \brief Retrieves information about current memory usage and budget for all memory heaps. \param allocator \param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. This function is called "get" not "calculate" because it is very fast, suitable to be called -every frame or every allocation. For more detailed statistics use vmaCalculateStats(). +every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). Note that when using allocator from multiple threads, returned information may immediately become outdated. @@ -1631,12 +1667,6 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. It internally creates a temporary, dummy buffer that never has memory bound. -It is just a convenience function, equivalent to calling: - -- `vkCreateBuffer` -- `vkGetBufferMemoryRequirements` -- `vmaFindMemoryTypeIndex` -- `vkDestroyBuffer` */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VmaAllocator VMA_NOT_NULL allocator, @@ -1649,12 +1679,6 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. It internally creates a temporary, dummy image that never has memory bound. -It is just a convenience function, equivalent to calling: - -- `vkCreateImage` -- `vkGetImageMemoryRequirements` -- `vmaFindMemoryTypeIndex` -- `vkDestroyImage` */ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VmaAllocator VMA_NOT_NULL allocator, @@ -1692,10 +1716,21 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( \param pool Pool object. \param[out] pPoolStats Statistics of specified pool. */ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, - VmaPoolStats* VMA_NOT_NULL pPoolStats); + VmaStatistics* VMA_NOT_NULL pPoolStats); + +/** \brief Retrieves detailed statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); /** @} */ @@ -2056,103 +2091,66 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( \param allocator Allocator object. \param pInfo Structure filled with parameters of defragmentation. -\param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information. -\param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation. -\return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error. - -Use this function instead of old, deprecated vmaDefragment(). - -Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd(): - -- You should not use any of allocations passed as `pInfo->pAllocations` or - any allocations that belong to pools passed as `pInfo->pPools`, - including calling vmaGetAllocationInfo(), or access - their data. -- Some mutexes protecting internal data structures may be locked, so trying to - make or free any allocations, bind buffers or images, map memory, or launch - another simultaneous defragmentation in between may cause stall (when done on - another thread) or deadlock (when done on the same thread), unless you are - 100% sure that defragmented allocations are in different pools. -- Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined. - They become valid after call to vmaDefragmentationEnd(). -- If `pInfo->commandBuffer` is not null, you must submit that command buffer - and make sure it finished execution before calling vmaDefragmentationEnd(). - -For more information and important limitations regarding defragmentation, see documentation chapter: +\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. + +For more information about defragmentation, see documentation chapter: [Defragmentation](@ref defragmentation). */ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( VmaAllocator VMA_NOT_NULL allocator, - const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo, - VmaDefragmentationStats* VMA_NULLABLE pStats, + const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); /** \brief Ends defragmentation process. -Use this function to finish defragmentation started by vmaDefragmentationBegin(). -It is safe to pass `context == null`. The function then does nothing. +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pStats Optional stats for the defragmentation. Can be null. + +Use this function to finish defragmentation started by vmaBeginDefragmentation(). */ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentation( VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NULLABLE context); + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationStats* VMA_NULLABLE pStats); + +/** \brief Starts single defragmentation pass. +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pPassInfo Computed informations for current pass. +\returns +- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. +- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), + and then preferably try another pass with vmaBeginDefragmentationPass(). +*/ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NULLABLE context, - VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo); + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NULLABLE context); +/** \brief Ends single defragmentation pass. -/** \brief Deprecated. Compacts memory by moving allocations. +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. -\param allocator -\param pAllocations Array of allocations that can be moved during this compation. -\param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays. -\param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information. -\param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values. -\param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information. -\return `VK_SUCCESS` if completed, negative error code in case of error. - -\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. - -This function works by moving allocations to different places (different -`VkDeviceMemory` objects and/or different offsets) in order to optimize memory -usage. Only allocations that are in `pAllocations` array can be moved. All other -allocations are considered nonmovable in this call. Basic rules: - -- Only allocations made in memory types that have - `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` - flags can be compacted. You may pass other allocations but it makes no sense - - these will never be moved. -- Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or - #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations - passed to this function that come from such pools are ignored. -- Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or - created as dedicated allocations for any other reason are also ignored. -- Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT - flag can be compacted. If not persistently mapped, memory will be mapped - temporarily inside this function if needed. -- You must not pass same #VmaAllocation object multiple times in `pAllocations` array. - -The function also frees empty `VkDeviceMemory` blocks. - -Warning: This function may be time-consuming, so you shouldn't call it too often -(like after every resource creation/destruction). -You can call it on special occasions (like when reloading a game level or -when you just destroyed a lot of objects). Calling it every frame may be OK, but -you should measure that on your platform. - -For more information, see [Defragmentation](@ref defragmentation) chapter. +Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. + +Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. +After this call: + +- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY + (which is the default) will be pointing to the new destination place. +- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + will be freed. + +If no more moves are possible you can end whole defragmentation. */ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, - const VmaAllocation VMA_NOT_NULL* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, - size_t allocationCount, - VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged, - const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo, - VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats); + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); /** \brief Binds buffer to allocation. @@ -2408,10 +2406,21 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( void* VMA_NULLABLE pUserData); /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats); + +/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is slow to call. Use for debugging purposes. +For less detailed statistics, see vmaGetVirtualBlockStatistics(). */ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStats( +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatInfo* VMA_NOT_NULL pStatInfo); + VmaDetailedStatistics* VMA_NOT_NULL pStats); /** @} */ @@ -2424,7 +2433,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStats( /** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. \param virtualBlock Virtual block. \param[out] ppStatsString Returned string. -\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStats(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. +\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. Returned string must be freed using vmaFreeVirtualBlockStatsString(). */ @@ -2466,9 +2475,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// -// +// // IMPLEMENTATION -// +// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// @@ -2485,6 +2494,10 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( #include <cstring> #include <utility> +#ifdef _MSC_VER + #include <intrin.h> // For functions like __popcnt, _BitScanForward etc. +#endif + /******************************************************************************* CONFIGURATION SECTION @@ -2886,6 +2899,17 @@ If providing your own implementation, you need to implement a subset of std::ato #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) #endif +/* +Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called +or a persistently mapped allocation is created and destroyed several times in a row. +It keeps additional +1 mapping of a device memory block to prevent calling actual +vkMapMemory/vkUnmapMemory too many times, which may improve performance and help +tools like RenderDOc. +*/ +#ifndef VMA_MAPPING_HYSTERESIS_ENABLED + #define VMA_MAPPING_HYSTERESIS_ENABLED 1 +#endif + #ifndef VMA_CLASS_NO_COPY #define VMA_CLASS_NO_COPY(className) \ private: \ @@ -2913,10 +2937,17 @@ static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; +static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; +static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; static const uint32_t VMA_VENDOR_ID_AMD = 4098; +// This one is tricky. Vulkan specification defines this code as available since +// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. +// See pull request #207. +#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) + #if VMA_STATS_STRING_ENABLED // Correspond to values of enum VmaSuballocationType. @@ -3032,23 +3063,13 @@ typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballoc struct VmaAllocationRequest; class VmaBlockMetadata; -class VmaBlockMetadata_Generic; class VmaBlockMetadata_Linear; -class VmaBlockMetadata_Buddy; class VmaBlockMetadata_TLSF; class VmaBlockVector; -struct VmaDefragmentationMove; -class VmaDefragmentationAlgorithm; -class VmaDefragmentationAlgorithm_Generic; -class VmaDefragmentationAlgorithm_Fast; - struct VmaPoolListItemTraits; -struct VmaBlockDefragmentationContext; -class VmaBlockVectorDefragmentationContext; - struct VmaCurrentBudgetData; class VmaAllocationObjectAllocator; @@ -3056,7 +3077,7 @@ class VmaAllocationObjectAllocator; #endif // _VMA_FORWARD_DECLARATIONS -#ifndef _VMA_FUNCTIONS +#ifndef _VMA_FUNCTIONS // Returns number of bits set to 1 in (v). static inline uint32_t VmaCountBitsSet(uint32_t v) { @@ -3267,12 +3288,8 @@ static const char* VmaAlgorithmToStr(uint32_t algorithm) { case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: return "Linear"; - case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: - return "Buddy"; - case VMA_POOL_CREATE_TLSF_ALGORITHM_BIT: - return "TLSF"; case 0: - return "Default"; + return "TLSF"; default: VMA_ASSERT(0); return ""; @@ -3497,6 +3514,150 @@ static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) mainStruct->pNext = newStruct; } +// This is the main algorithm that guides the selection of a memory type best for an allocation - +// converts usage to required/preferred/not preferred flags. +static bool FindMemoryPreferences( + bool isIntegratedGPU, + const VmaAllocationCreateInfo& allocCreateInfo, + VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. + VkMemoryPropertyFlags& outRequiredFlags, + VkMemoryPropertyFlags& outPreferredFlags, + VkMemoryPropertyFlags& outNotPreferredFlags) +{ + outRequiredFlags = allocCreateInfo.requiredFlags; + outPreferredFlags = allocCreateInfo.preferredFlags; + outNotPreferredFlags = 0; + + switch(allocCreateInfo.usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + case VMA_MEMORY_USAGE_AUTO: + case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: + case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: + { + if(bufImgUsage == UINT32_MAX) + { + VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known."); + return false; + } + // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*. + const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; + const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; + const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; + const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; + const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; + + // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. + if(hostAccessRandom) + { + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. + // Omitting HOST_VISIBLE here is intentional. + // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. + // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + } + else + { + // Always CPU memory, cached. + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + } + } + // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. + else if(hostAccessSequentialWrite) + { + // Want uncached and write-combined. + outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + else + { + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) + if(deviceAccess) + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) + else + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. + if(preferDevice) + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + } + } + // No CPU access + else + { + // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory + if(deviceAccess) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // No direct GPU access, no CPU access, just transfers. + // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or + // a "swap file" copy to free some GPU memory (then better CPU memory). + // Up to the user to decide. If no preferece, assume the former and choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + } + default: + VMA_ASSERT(0); + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; + } + + return true; +} + //////////////////////////////////////////////////////////////////////////////// // Memory allocation @@ -3635,65 +3796,60 @@ bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& } #endif // _VMA_FUNCTIONS -#ifndef _VMA_STAT_INFO_FUNCTIONS -static void VmaInitStatInfo(VmaStatInfo& outInfo) +#ifndef _VMA_STATISTICS_FUNCTIONS + +static void VmaClearStatistics(VmaStatistics& outStats) { - memset(&outInfo, 0, sizeof(outInfo)); - outInfo.allocationSizeMin = UINT64_MAX; - outInfo.unusedRangeSizeMin = UINT64_MAX; + outStats.blockCount = 0; + outStats.allocationCount = 0; + outStats.blockBytes = 0; + outStats.allocationBytes = 0; } -// Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo. -static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo) +static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) { - inoutInfo.blockCount += srcInfo.blockCount; - inoutInfo.allocationCount += srcInfo.allocationCount; - inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount; - inoutInfo.usedBytes += srcInfo.usedBytes; - inoutInfo.unusedBytes += srcInfo.unusedBytes; - inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin); - inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax); - inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin); - inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax); + inoutStats.blockCount += src.blockCount; + inoutStats.allocationCount += src.allocationCount; + inoutStats.blockBytes += src.blockBytes; + inoutStats.allocationBytes += src.allocationBytes; } -static void VmaAddStatInfoAllocation(VmaStatInfo& inoutInfo, VkDeviceSize size) +static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) { - ++inoutInfo.allocationCount; - inoutInfo.usedBytes += size; - if (size < inoutInfo.allocationSizeMin) - { - inoutInfo.allocationSizeMin = size; - } - if (size > inoutInfo.allocationSizeMax) - { - inoutInfo.allocationSizeMax = size; - } + VmaClearStatistics(outStats.statistics); + outStats.unusedRangeCount = 0; + outStats.allocationSizeMin = VK_WHOLE_SIZE; + outStats.allocationSizeMax = 0; + outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; + outStats.unusedRangeSizeMax = 0; } -static void VmaAddStatInfoUnusedRange(VmaStatInfo& inoutInfo, VkDeviceSize size) +static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) { - ++inoutInfo.unusedRangeCount; - inoutInfo.unusedBytes += size; - if (size < inoutInfo.unusedRangeSizeMin) - { - inoutInfo.unusedRangeSizeMin = size; - } - if (size > inoutInfo.unusedRangeSizeMax) - { - inoutInfo.unusedRangeSizeMax = size; - } + inoutStats.statistics.allocationCount++; + inoutStats.statistics.allocationBytes += size; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); +} + +static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.unusedRangeCount++; + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); } -static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo) +static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) { - inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ? - VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0; - inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ? - VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0; + VmaAddStatistics(inoutStats.statistics, src.statistics); + inoutStats.unusedRangeCount += src.unusedRangeCount; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); } -#endif // _VMA_STAT_INFO_FUNCTIONS +#endif // _VMA_STATISTICS_FUNCTIONS #ifndef _VMA_MUTEX_LOCK // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). @@ -4089,8 +4245,9 @@ VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& template<typename T, typename AllocatorT, size_t N> void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src) { - resize(m_Count + 1); - data()[m_Count] = src; + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; } template<typename T, typename AllocatorT, size_t N> @@ -4589,6 +4746,7 @@ public: class iterator { + friend class const_iterator; friend class VmaList<T, AllocatorT>; public: iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} @@ -4614,6 +4772,7 @@ public: }; class reverse_iterator { + friend class const_reverse_iterator; friend class VmaList<T, AllocatorT>; public: reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} @@ -4645,6 +4804,8 @@ public: const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; } + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } @@ -4671,6 +4832,8 @@ public: const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; } + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } @@ -4707,8 +4870,8 @@ public: reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } - const_reverse_iterator crbegin() { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } - const_reverse_iterator crend() { return const_reverse_iterator(&m_RawList, VMA_NULL); } + const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } + const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } const_reverse_iterator rbegin() const { return crbegin(); } const_reverse_iterator rend() const { return crend(); } @@ -4813,7 +4976,7 @@ public: VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } - + size_t GetCount() const { return m_Count; } bool IsEmpty() const { return m_Count == 0; } ItemType* Front() { return m_Front; } @@ -5077,13 +5240,14 @@ public: iterator begin() { return m_Vector.begin(); } iterator end() { return m_Vector.end(); } + size_t size() { return m_Vector.size(); } void insert(const PairType& pair); iterator find(const KeyT& key); void erase(iterator it); private: - VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector; + VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector; }; #ifndef _VMA_MAP_FUNCTIONS @@ -5224,7 +5388,7 @@ public: // Writes a string value inside "". // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. void WriteString(const char* pStr); - + // Begins writing a string value. // Call BeginString, ContinueString, ContinueString, ..., EndString instead of // WriteString to conveniently build the string content incrementally, made of @@ -5503,33 +5667,31 @@ void VmaJsonWriter::WriteIndent(bool oneLess) } #endif // _VMA_JSON_WRITER_FUNCTIONS -static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat) +static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) { json.BeginObject(); - json.WriteString("Blocks"); - json.WriteNumber(stat.blockCount); + json.WriteString("BlockCount"); + json.WriteNumber(stat.statistics.blockCount); - json.WriteString("Allocations"); - json.WriteNumber(stat.allocationCount); + json.WriteString("AllocationCount"); + json.WriteNumber(stat.statistics.allocationCount); - json.WriteString("UnusedRanges"); + json.WriteString("UnusedRangeCount"); json.WriteNumber(stat.unusedRangeCount); - json.WriteString("UsedBytes"); - json.WriteNumber(stat.usedBytes); + json.WriteString("BlockBytes"); + json.WriteNumber(stat.statistics.blockBytes); - json.WriteString("UnusedBytes"); - json.WriteNumber(stat.unusedBytes); + json.WriteString("AllocationBytes"); + json.WriteNumber(stat.statistics.allocationBytes); - if (stat.allocationCount > 1) + if (stat.statistics.allocationCount > 1) { json.WriteString("AllocationSize"); json.BeginObject(true); json.WriteString("Min"); json.WriteNumber(stat.allocationSizeMin); - json.WriteString("Avg"); - json.WriteNumber(stat.allocationSizeAvg); json.WriteString("Max"); json.WriteNumber(stat.allocationSizeMax); json.EndObject(); @@ -5541,8 +5703,6 @@ static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat) json.BeginObject(true); json.WriteString("Min"); json.WriteNumber(stat.unusedRangeSizeMin); - json.WriteString("Avg"); - json.WriteNumber(stat.unusedRangeSizeAvg); json.WriteString("Max"); json.WriteNumber(stat.unusedRangeSizeMax); json.EndObject(); @@ -5552,12 +5712,109 @@ static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat) } #endif // _VMA_JSON_WRITER +#ifndef _VMA_MAPPING_HYSTERESIS + +class VmaMappingHysteresis +{ + VMA_CLASS_NO_COPY(VmaMappingHysteresis) +public: + VmaMappingHysteresis() = default; + + uint32_t GetExtraMapping() const { return m_ExtraMapping; } + + // Call when Map was called. + // Returns true if switched to extra +1 mapping reference count. + bool PostMap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) + { + m_ExtraMapping = 1; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + + // Call when Unmap was called. + void PostUnmap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + ++m_MajorCounter; + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was made from the memory block. + void PostAlloc() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + ++m_MajorCounter; + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was freed from the memory block. + // Returns true if switched to extra -1 mapping reference count. + bool PostFree() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && + m_MajorCounter > m_MinorCounter + 1) + { + m_ExtraMapping = 0; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + +private: + static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; + + uint32_t m_MinorCounter = 0; + uint32_t m_MajorCounter = 0; + uint32_t m_ExtraMapping = 0; // 0 or 1. + + void PostMinorCounter() + { + if(m_MinorCounter < m_MajorCounter) + ++m_MinorCounter; + else if(m_MajorCounter > 0) + --m_MajorCounter, --m_MinorCounter; + } +}; + +#endif // _VMA_MAPPING_HYSTERESIS + #ifndef _VMA_DEVICE_MEMORY_BLOCK /* Represents a single block of device memory (`VkDeviceMemory`) with all the data about its regions (aka suballocations, #VmaAllocation), assigned and free. -Thread-safety: This class must be externally synchronized. +Thread-safety: +- Access to m_pMetadata must be externally synchronized. +- Map, Unmap, Bind* are synchronized internally. */ class VmaDeviceMemoryBlock { @@ -5586,6 +5843,12 @@ public: uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } uint32_t GetId() const { return m_Id; } void* GetMappedData() const { return m_pMappedData; } + uint32_t GetMapRefCount() const { return m_MapCount; } + + // Call when allocation/free was made from m_pMetadata. + // Used for m_MappingHysteresis. + void PostAlloc() { m_MappingHysteresis.PostAlloc(); } + void PostFree(VmaAllocator hAllocator); // Validates all data structures inside this object. If not valid, returns false. bool Validate() const; @@ -5622,7 +5885,8 @@ private: Also protects m_MapCount, m_pMappedData. Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. */ - VMA_MUTEX m_Mutex; + VMA_MUTEX m_MapAndBindMutex; + VmaMappingHysteresis m_MappingHysteresis; uint32_t m_MapCount; void* m_pMappedData; }; @@ -5633,9 +5897,12 @@ struct VmaAllocation_T { friend struct VmaDedicatedAllocationListItemTraits; - static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80; - - enum FLAGS { FLAG_USER_DATA_STRING = 0x01 }; + enum FLAGS + { + FLAG_USER_DATA_STRING = 0x01, + FLAG_PERSISTENT_MAP = 0x02, + FLAG_MAPPING_ALLOWED = 0x04, + }; public: enum ALLOCATION_TYPE @@ -5646,7 +5913,7 @@ public: }; // This struct is allocated using VmaPoolAllocator. - VmaAllocation_T(bool userDataString); + VmaAllocation_T(bool userDataString, bool mappingAllowed); ~VmaAllocation_T(); void InitBlockAllocation( @@ -5675,19 +5942,17 @@ public: VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } - bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; } + bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } + bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } void SetUserData(VmaAllocator hAllocator, void* pUserData); - void ChangeBlockAllocation(VmaAllocator hAllocator, VmaDeviceMemoryBlock* block, VmaAllocHandle allocHandle); - void ChangeAllocHandle(VmaAllocHandle newAllocHandle); + void SwapBlockAllocation(VmaAllocation allocation); VmaAllocHandle GetAllocHandle() const; VkDeviceSize GetOffset() const; VmaPool GetParentPool() const; VkDeviceMemory GetMemory() const; void* GetMappedData() const; - void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo); - void BlockAllocMap(); void BlockAllocUnmap(); VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); @@ -5730,8 +5995,7 @@ private: uint32_t m_MemoryTypeIndex; uint8_t m_Type; // ALLOCATION_TYPE uint8_t m_SuballocationType; // VmaSuballocationType - // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT. - // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory(). + // Reference counter for vmaMapMemory()/vmaUnmapMemory(). uint8_t m_MapCount; uint8_t m_Flags; // enum FLAGS #if VMA_STATS_STRING_ENABLED @@ -5784,8 +6048,8 @@ public: void Init(bool useMutex) { m_UseMutex = useMutex; } bool Validate(); - void AddStats(VmaStats* stats, uint32_t memTypeIndex, uint32_t memHeapIndex); - void AddPoolStats(VmaPoolStats* stats); + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + void AddStatistics(VmaStatistics& inoutStats); #if VMA_STATS_STRING_ENABLED // Writes JSON array with the list of allocations. void BuildStatsString(VmaJsonWriter& json); @@ -5830,31 +6094,30 @@ bool VmaDedicatedAllocationList::Validate() return true; } -void VmaDedicatedAllocationList::AddStats(VmaStats* stats, uint32_t memTypeIndex, uint32_t memHeapIndex) +void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) { - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - for (VmaAllocation alloc = m_AllocationList.Front(); - alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) { - VmaStatInfo allocationStatInfo; - alloc->DedicatedAllocCalcStatsInfo(allocationStatInfo); - VmaAddStatInfo(stats->total, allocationStatInfo); - VmaAddStatInfo(stats->memoryType[memTypeIndex], allocationStatInfo); - VmaAddStatInfo(stats->memoryHeap[memHeapIndex], allocationStatInfo); + const VkDeviceSize size = item->GetSize(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); } } -void VmaDedicatedAllocationList::AddPoolStats(VmaPoolStats* stats) +void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) { VmaMutexLockRead lock(m_Mutex, m_UseMutex); - const size_t allocCount = m_AllocationList.GetCount(); - stats->allocationCount += allocCount; - stats->blockCount += allocCount; + const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); + inoutStats.blockCount += allocCount; + inoutStats.allocationCount += allocCount; for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) { - stats->size += item->GetSize(); + const VkDeviceSize size = item->GetSize(); + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size; } } @@ -5981,14 +6244,18 @@ public: virtual bool IsEmpty() const = 0; virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; + virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; + + virtual VmaAllocHandle GetAllocationListBegin() const = 0; + virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; - // Must set blockCount to 1. - virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0; // Shouldn't modify blockCount. - virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0; + virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; + virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; + // mapRefCount == UINT32_MAX means unspecified. + virtual void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const = 0; #endif // Tries to find a place for suballocation with given parameters inside this block. @@ -6028,10 +6295,12 @@ protected: void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; #if VMA_STATS_STRING_ENABLED + // mapRefCount == UINT32_MAX means unspecified. void PrintDetailedMap_Begin(class VmaJsonWriter& json, VkDeviceSize unusedBytes, size_t allocationCount, - size_t unusedRangeCount) const; + size_t unusedRangeCount, + uint32_t mapRefCount) const; void PrintDetailedMap_Allocation(class VmaJsonWriter& json, VkDeviceSize offset, VkDeviceSize size, void* userData) const; void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, @@ -6098,12 +6367,12 @@ void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size } #endif // VMA_STATS_STRING_ENABLED } - + } #if VMA_STATS_STRING_ENABLED void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount, uint32_t mapRefCount) const { json.BeginObject(); @@ -6119,6 +6388,12 @@ void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, json.WriteString("UnusedRanges"); json.WriteNumber((uint64_t)unusedRangeCount); + if(mapRefCount != UINT32_MAX) + { + json.WriteString("MapRefCount"); + json.WriteNumber(mapRefCount); + } + json.WriteString("Suballocations"); json.BeginArray(); } @@ -6408,7 +6683,7 @@ uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) { // When current alloc type is free then it can be overriden by new type - if (page.allocCount == 0 || page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE) + if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) page.allocType = allocType; ++page.allocCount; @@ -6416,6 +6691,7 @@ void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocTy #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS #endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY +#if 0 #ifndef _VMA_BLOCK_METADATA_GENERIC class VmaBlockMetadata_Generic : public VmaBlockMetadata { @@ -6436,11 +6712,11 @@ public: void Init(VkDeviceSize size) override; bool Validate() const override; - void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; - void AddPoolStats(VmaPoolStats& inoutStats) const override; + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; #endif bool CreateAllocationRequest( @@ -6459,15 +6735,13 @@ public: void* userData) override; void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; void DebugLogAllAllocations() const override; - // For defragmentation - bool IsBufferImageGranularityConflictPossible( - VkDeviceSize bufferImageGranularity, - VmaSuballocationType& inOutPrevSuballocType) const; - private: uint32_t m_FreeCount; VkDeviceSize m_SumFreeSize; @@ -6477,7 +6751,7 @@ private: VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); } - VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset); + VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const; bool ValidateFreeSuballocationList() const; // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. @@ -6612,42 +6886,37 @@ bool VmaBlockMetadata_Generic::Validate() const return true; } -void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const { const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); - VmaInitStatInfo(outInfo); - outInfo.blockCount = 1; + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); for (const auto& suballoc : m_Suballocations) { if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - VmaAddStatInfoAllocation(outInfo, suballoc.size); - } + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); else - { - VmaAddStatInfoUnusedRange(outInfo, suballoc.size); - } + VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size); } } -void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const +void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const { - const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); - - inoutStats.size += GetSize(); - inoutStats.unusedSize += m_SumFreeSize; - inoutStats.allocationCount += rangeCount - m_FreeCount; - inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - m_SumFreeSize; } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const +void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const { PrintDetailedMap_Begin(json, m_SumFreeSize, // unusedBytes m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount - m_FreeCount); // unusedRangeCount + m_FreeCount, // unusedRangeCount + mapRefCount); for (const auto& suballoc : m_Suballocations) { @@ -6740,7 +7009,7 @@ bool VmaBlockMetadata_Generic::CreateAllocationRequest( } else { - VMA_ASSERT(strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT); + VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )); // Search staring from biggest suballocations. for (size_t index = freeSuballocCount; index--; ) { @@ -6770,7 +7039,7 @@ VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN; + return VK_ERROR_UNKNOWN_COPY; } } } @@ -6851,6 +7120,37 @@ void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, Vma outInfo.pUserData = suballoc.userData; } +void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData; +} + +VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const +{ + if (IsEmpty()) + return VK_NULL_HANDLE; + + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + return (VmaAllocHandle)(suballoc.offset + 1); + } + VMA_ASSERT(false && "Should contain at least 1 allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1); + + for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it) + { + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + return (VmaAllocHandle)(it->offset + 1); + } + return VK_NULL_HANDLE; +} + void VmaBlockMetadata_Generic::Clear() { const VkDeviceSize size = GetSize(); @@ -6885,15 +7185,15 @@ void VmaBlockMetadata_Generic::DebugLogAllAllocations() const } } -VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const { VMA_HEAVY_ASSERT(!m_Suballocations.empty()); const VkDeviceSize last = m_Suballocations.rbegin()->offset; if (last == offset) - return m_Suballocations.rbegin(); + return m_Suballocations.rbegin().drop_const(); const VkDeviceSize first = m_Suballocations.begin()->offset; if (first == offset) - return m_Suballocations.begin(); + return m_Suballocations.begin().drop_const(); const size_t suballocCount = m_Suballocations.size(); const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount; @@ -6903,12 +7203,11 @@ VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSi suballocItem != end; ++suballocItem) { - VmaSuballocation& suballoc = *suballocItem; - if (suballoc.offset == offset) - return suballocItem; + if (suballocItem->offset == offset) + return suballocItem.drop_const(); } VMA_ASSERT(false && "Not found!"); - return m_Suballocations.end(); + return m_Suballocations.end().drop_const(); }; // If requested offset is closer to the end of range, search from the end if (offset - first > suballocCount * step / 2) @@ -7152,37 +7451,9 @@ void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList: //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); } - -bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible( - VkDeviceSize bufferImageGranularity, - VmaSuballocationType& inOutPrevSuballocType) const -{ - if (bufferImageGranularity == 1 || IsEmpty() || IsVirtual()) - { - return false; - } - - VkDeviceSize minAlignment = VK_WHOLE_SIZE; - bool typeConflictFound = false; - for (const auto& suballoc : m_Suballocations) - { - const VmaSuballocationType suballocType = suballoc.type; - if (suballocType != VMA_SUBALLOCATION_TYPE_FREE) - { - VmaAllocation const alloc = (VmaAllocation)suballoc.userData; - minAlignment = VMA_MIN(minAlignment, alloc->GetAlignment()); - if (VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType)) - { - typeConflictFound = true; - } - inOutPrevSuballocType = suballocType; - } - } - - return typeConflictFound || minAlignment >= bufferImageGranularity; -} #endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS #endif // _VMA_BLOCK_METADATA_GENERIC +#endif // #if 0 #ifndef _VMA_BLOCK_METADATA_LINEAR /* @@ -7279,11 +7550,11 @@ public: bool Validate() const override; size_t GetAllocationCount() const override; - void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; - void AddPoolStats(VmaPoolStats& inoutStats) const override; + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; #endif bool CreateAllocationRequest( @@ -7303,6 +7574,9 @@ public: void Free(VmaAllocHandle allocHandle) override; void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; void DebugLogAllAllocations() const override; @@ -7349,7 +7623,7 @@ private: const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - VmaSuballocation& FindSuballocation(VkDeviceSize offset); + VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; bool ShouldCompact1st() const; void CleanupAfterFree(); @@ -7541,7 +7815,7 @@ size_t VmaBlockMetadata_Linear::GetAllocationCount() const AccessSuballocations2nd().size() - m_2ndNullItemsCount; } -void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const { const VkDeviceSize size = GetSize(); const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); @@ -7549,8 +7823,8 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); - VmaInitStatInfo(outInfo); - outInfo.blockCount = 1; + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; VkDeviceSize lastOffset = 0; @@ -7577,12 +7851,12 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddStatInfoUnusedRange(outInfo, unusedRangeSize); + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. - VmaAddStatInfoAllocation(outInfo, suballoc.size); + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; @@ -7595,7 +7869,7 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const if (lastOffset < freeSpace2ndTo1stEnd) { const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - VmaAddStatInfoUnusedRange(outInfo, unusedRangeSize); + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // End of loop. @@ -7626,12 +7900,12 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddStatInfoUnusedRange(outInfo, unusedRangeSize); + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. - VmaAddStatInfoAllocation(outInfo, suballoc.size); + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; @@ -7644,7 +7918,7 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const if (lastOffset < freeSpace1stTo2ndEnd) { const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - VmaAddStatInfoUnusedRange(outInfo, unusedRangeSize); + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // End of loop. @@ -7674,12 +7948,12 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddStatInfoUnusedRange(outInfo, unusedRangeSize); + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // 2. Process this allocation. // There is allocation with suballoc.offset, suballoc.size. - VmaAddStatInfoAllocation(outInfo, suballoc.size); + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); // 3. Prepare for next iteration. lastOffset = suballoc.offset + suballoc.size; @@ -7692,7 +7966,7 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const if (lastOffset < size) { const VkDeviceSize unusedRangeSize = size - lastOffset; - VmaAddStatInfoUnusedRange(outInfo, unusedRangeSize); + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); } // End of loop. @@ -7700,11 +7974,9 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const } } } - - outInfo.unusedBytes = size - outInfo.usedBytes; } -void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const +void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const { const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); @@ -7712,7 +7984,9 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); - inoutStats.size += size; + inoutStats.blockCount++; + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size - m_SumFreeSize; VkDeviceSize lastOffset = 0; @@ -7739,8 +8013,6 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - inoutStats.unusedSize += unusedRangeSize; - ++inoutStats.unusedRangeCount; } // 2. Process this allocation. @@ -7758,8 +8030,6 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const { // There is free space from lastOffset to freeSpace2ndTo1stEnd. const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - inoutStats.unusedSize += unusedRangeSize; - ++inoutStats.unusedRangeCount; } // End of loop. @@ -7790,8 +8060,6 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - inoutStats.unusedSize += unusedRangeSize; - ++inoutStats.unusedRangeCount; } // 2. Process this allocation. @@ -7809,8 +8077,6 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const { // There is free space from lastOffset to freeSpace1stTo2ndEnd. const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - inoutStats.unusedSize += unusedRangeSize; - ++inoutStats.unusedRangeCount; } // End of loop. @@ -7840,8 +8106,6 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const { // There is free space from lastOffset to suballoc.offset. const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - inoutStats.unusedSize += unusedRangeSize; - ++inoutStats.unusedRangeCount; } // 2. Process this allocation. @@ -7859,8 +8123,6 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const { // There is free space from lastOffset to size. const VkDeviceSize unusedRangeSize = size - lastOffset; - inoutStats.unusedSize += unusedRangeSize; - ++inoutStats.unusedRangeCount; } // End of loop. @@ -7871,7 +8133,7 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const { const VkDeviceSize size = GetSize(); const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); @@ -8033,7 +8295,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const } const VkDeviceSize unusedBytes = size - usedBytes; - PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount, mapRefCount); // SECOND PASS lastOffset = 0; @@ -8219,7 +8481,7 @@ VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN; + return VK_ERROR_UNKNOWN_COPY; } } } @@ -8233,7 +8495,7 @@ VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN; + return VK_ERROR_UNKNOWN_COPY; } } } @@ -8405,6 +8667,25 @@ void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaV outInfo.pUserData = suballoc.userData; } +void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + void VmaBlockMetadata_Linear::Clear() { m_SumFreeSize = GetSize(); @@ -8436,10 +8717,10 @@ void VmaBlockMetadata_Linear::DebugLogAllAllocations() const DebugLogAllocation(it->offset, it->size, it->userData); } -VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) +VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); VmaSuballocation refSuballoc; refSuballoc.offset = offset; @@ -8447,31 +8728,31 @@ VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset // Item from the 1st vector. { - const SuballocationVectorType::iterator it = VmaBinaryFindSorted( + SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( suballocations1st.begin() + m_1stNullItemsBeginCount, suballocations1st.end(), refSuballoc, VmaSuballocationOffsetLess()); if (it != suballocations1st.end()) { - return *it; + return const_cast<VmaSuballocation&>(*it); } } if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) { // Rest of members stays uninitialized intentionally for better performance. - const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); if (it != suballocations2nd.end()) { - return *it; + return const_cast<VmaSuballocation&>(*it); } } VMA_ASSERT(0 && "Allocation not found in linear allocator!"); - return suballocations1st.back(); // Should never occur. + return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur. } bool VmaBlockMetadata_Linear::ShouldCompact1st() const @@ -8882,6 +9163,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( #endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS #endif // _VMA_BLOCK_METADATA_LINEAR +#if 0 #ifndef _VMA_BLOCK_METADATA_BUDDY /* - GetSize() is the original size of allocated memory block. @@ -8912,11 +9194,11 @@ public: void Init(VkDeviceSize size) override; bool Validate() const override; - void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; - void AddPoolStats(VmaPoolStats& inoutStats) const override; + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; #endif bool CreateAllocationRequest( @@ -8934,6 +9216,9 @@ public: void Free(VmaAllocHandle allocHandle) override; void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; @@ -9007,11 +9292,11 @@ private: } return VmaNextPow2(size); } - Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel); + Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const; void DeleteNodeChildren(Node* node); bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; - void CalcAllocationStatInfoNode(VmaStatInfo& inoutInfo, const Node* node, VkDeviceSize levelNodeSize) const; + void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const; // Adds node to the front of FreeList at given level. // node->type must be FREE. // node->free.prev, next can be undefined. @@ -9115,46 +9400,39 @@ bool VmaBlockMetadata_Buddy::Validate() const return true; } -void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const { - VmaInitStatInfo(outInfo); - outInfo.blockCount = 1; + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); - CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0)); + AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0)); const VkDeviceSize unusableSize = GetUnusableSize(); if (unusableSize > 0) - { - VmaAddStatInfoUnusedRange(outInfo, unusableSize); - } + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize); } -void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const +void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const { - const VkDeviceSize unusableSize = GetUnusableSize(); - - inoutStats.size += GetSize(); - inoutStats.unusedSize += m_SumFreeSize + unusableSize; - inoutStats.allocationCount += m_AllocationCount; - inoutStats.unusedRangeCount += m_FreeCount; - - if (unusableSize > 0) - { - ++inoutStats.unusedRangeCount; - } + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocationCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - m_SumFreeSize; } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const +void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const { - VmaStatInfo stat; - CalcAllocationStatInfo(stat); + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); PrintDetailedMap_Begin( json, - stat.unusedBytes, - stat.allocationCount, - stat.unusedRangeCount); + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount, + mapRefCount); PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); @@ -9302,6 +9580,25 @@ void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVi outInfo.pUserData = node->allocation.userData; } +void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + uint32_t level = 0; + const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); + return node->allocation.userData; +} + +VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + return VK_NULL_HANDLE; +} + void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node) { if (node->type == Node::TYPE_SPLIT) @@ -9330,7 +9627,7 @@ void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, v node->allocation.userData = userData; } -VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) +VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const { Node* node = m_Root; VkDeviceSize nodeOffset = 0; @@ -9446,23 +9743,23 @@ void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle) AddToFreeListFront(level, node); } -void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& inoutInfo, const Node* node, VkDeviceSize levelNodeSize) const +void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const { switch (node->type) { case Node::TYPE_FREE: - VmaAddStatInfoUnusedRange(inoutInfo, levelNodeSize); + VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize); break; case Node::TYPE_ALLOCATION: - VmaAddStatInfoAllocation(inoutInfo, levelNodeSize); + VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize); break; case Node::TYPE_SPLIT: { const VkDeviceSize childrenNodeSize = levelNodeSize / 2; const Node* const leftChild = node->split.leftChild; - CalcAllocationStatInfoNode(inoutInfo, leftChild, childrenNodeSize); + AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize); const Node* const rightChild = leftChild->buddy; - CalcAllocationStatInfoNode(inoutInfo, rightChild, childrenNodeSize); + AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize); } break; default: @@ -9527,6 +9824,8 @@ void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t leve { switch (node->type) { + case Node::TYPE_FREE: + break; case Node::TYPE_ALLOCATION: DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData); break; @@ -9569,6 +9868,7 @@ void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, con #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS #endif // _VMA_BLOCK_METADATA_BUDDY +#endif // #if 0 #ifndef _VMA_BLOCK_METADATA_TLSF // To not search current larger region if first allocation won't succeed and skip to smaller range @@ -9591,11 +9891,11 @@ public: void Init(VkDeviceSize size) override; bool Validate() const override; - void CalcAllocationStatInfo(VmaStatInfo& outInfo) const override; - void AddPoolStats(VmaPoolStats& inoutStats) const override; + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; #endif bool CreateAllocationRequest( @@ -9614,6 +9914,9 @@ public: void Free(VmaAllocHandle allocHandle) override; void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; void Clear() override; void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; void DebugLogAllAllocations() const override; @@ -9839,34 +10142,32 @@ bool VmaBlockMetadata_TLSF::Validate() const return true; } -void VmaBlockMetadata_TLSF::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const { - VmaInitStatInfo(outInfo); - outInfo.blockCount = 1; + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); if (m_NullBlock->size > 0) - VmaAddStatInfoUnusedRange(outInfo, m_NullBlock->size); + VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { if (block->IsFree()) - VmaAddStatInfoUnusedRange(outInfo, block->size); + VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); else - VmaAddStatInfoAllocation(outInfo, block->size); + VmaAddDetailedStatisticsAllocation(inoutStats, block->size); } } -void VmaBlockMetadata_TLSF::AddPoolStats(VmaPoolStats& inoutStats) const +void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const { - inoutStats.size += GetSize(); - inoutStats.unusedSize += GetSumFreeSize(); - inoutStats.allocationCount += m_AllocCount; - inoutStats.unusedRangeCount += m_BlocksFreeCount; - if(m_NullBlock->size > 0) - ++inoutStats.unusedRangeCount; + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const { size_t blockCount = m_AllocCount + m_BlocksFreeCount; VmaStlAllocator<Block*> allocator(GetAllocationCallbacks()); @@ -9879,13 +10180,16 @@ void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const } VMA_ASSERT(i == 0); - VmaStatInfo stat; - CalcAllocationStatInfo(stat); + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); - PrintDetailedMap_Begin(json, - stat.unusedBytes, - stat.allocationCount, - stat.unusedRangeCount); + PrintDetailedMap_Begin( + json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount, + mapRefCount); for (; i < blockCount; ++i) { @@ -9996,6 +10300,33 @@ bool VmaBlockMetadata_TLSF::CreateAllocationRequest( nextListBlock = nextListBlock->NextFree(); } } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) + { + // Perform search from the start + VmaStlAllocator<Block*> allocator(GetAllocationCallbacks()); + VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator); + + size_t i = m_BlocksFreeCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree() && block->size >= allocSize) + blockList[--i] = block; + } + + for (; i < m_BlocksFreeCount; ++i) + { + Block& block = *blockList[i]; + if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Whole range searched, no more memory + return false; + } else { // Check larger bucket @@ -10046,7 +10377,7 @@ VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) { VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN; + return VK_ERROR_UNKNOWN_COPY; } } } @@ -10228,6 +10559,40 @@ void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVir outInfo.pUserData = block->UserData(); } +void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); + return block->UserData(); +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const +{ + if (m_AllocCount == 0) + return VK_NULL_HANDLE; + + for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + Block* startBlock = (Block*)prevAlloc; + VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); + + for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + return VK_NULL_HANDLE; +} + void VmaBlockMetadata_TLSF::Clear() { m_AllocCount = 0; @@ -10440,7 +10805,7 @@ Synchronized internally with a mutex. */ class VmaBlockVector { - friend class VmaDefragmentationAlgorithm_Generic; + friend struct VmaDefragmentationContext_T; VMA_CLASS_NO_COPY(VmaBlockVector) public: VmaBlockVector( @@ -10468,9 +10833,14 @@ public: bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } float GetPriority() const { return m_Priority; } void* const GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + // To be used only while the m_Mutex is locked. Used during defragmentation. + size_t GetBlockCount() const { return m_Blocks.size(); } + // To be used only while the m_Mutex is locked. Used during defragmentation. + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } VkResult CreateMinBlocks(); - void AddPoolStats(VmaPoolStats* pStats); + void AddStatistics(VmaStatistics& inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); bool IsEmpty(); bool IsCorruptionDetectionEnabled() const; @@ -10482,9 +10852,7 @@ public: size_t allocationCount, VmaAllocation* pAllocations); - void Free(const VmaAllocation hAllocation); - // Adds statistics of this BlockVector to pStats. - void AddStats(VmaStats* pStats); + void Free(const VmaAllocation hAllocation, bool incrementalSort = true); #if VMA_STATS_STRING_ENABLED void PrintDetailedMap(class VmaJsonWriter& json); @@ -10492,34 +10860,6 @@ public: VkResult CheckCorruption(); - // Saves results in pCtx->res. - void Defragment( - class VmaBlockVectorDefragmentationContext* pCtx, - VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, - VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, - VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer); - void DefragmentationEnd( - class VmaBlockVectorDefragmentationContext* pCtx, - uint32_t flags, - VmaDefragmentationStats* pStats); - - uint32_t ProcessDefragmentations( - class VmaBlockVectorDefragmentationContext* pCtx, - VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves); - - void CommitDefragmentations( - class VmaBlockVectorDefragmentationContext* pCtx, - VmaDefragmentationStats* pStats); - - //////////////////////////////////////////////////////////////////////////////// - // To be used only while the m_Mutex is locked. Used during defragmentation. - - size_t GetBlockCount() const { return m_Blocks.size(); } - VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } - size_t CalcAllocationCount() const; - bool IsBufferImageGranularityConflictPossible() const; - private: const VmaAllocator m_hAllocator; const VmaPool m_hParentPool; @@ -10535,9 +10875,6 @@ private: void* const m_pMemoryAllocateNext; VMA_RW_MUTEX m_Mutex; - /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) - - a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */ - bool m_HasEmptyBlock; // Incrementally sorted by sumFreeSize, ascending. VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks; uint32_t m_NextBlockId; @@ -10548,6 +10885,7 @@ private: // Performs single step in sorting m_Blocks. They may not be fully sorted // after this call. void IncrementallySortBlocks(); + void SortByFreeSize(); VkResult AllocatePage( VkDeviceSize size, @@ -10566,327 +10904,90 @@ private: uint32_t strategy, VmaAllocation* pAllocation); - VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); - // Saves result to pCtx->res. - void ApplyDefragmentationMovesCpu( - VmaBlockVectorDefragmentationContext* pDefragCtx, - const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves); - // Saves result to pCtx->res. - void ApplyDefragmentationMovesGpu( - VmaBlockVectorDefragmentationContext* pDefragCtx, - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves, - VkCommandBuffer commandBuffer); + VkResult CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); - /* - Used during defragmentation. pDefragmentationStats is optional. It is in/out - - updated with new data. - */ - void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats); - void UpdateHasEmptyBlock(); + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + bool HasEmptyBlock(); }; #endif // _VMA_BLOCK_VECTOR -#ifndef _VMA_DEFRAGMENTATION_ALGORITHM -struct VmaDefragmentationMove -{ - size_t srcBlockIndex; - size_t dstBlockIndex; - VkDeviceSize srcOffset; - VkDeviceSize dstOffset; - VmaAllocHandle dstHandle; - VkDeviceSize size; - VmaAllocation hAllocation; - VmaDeviceMemoryBlock* pSrcBlock; - VmaDeviceMemoryBlock* pDstBlock; -}; - -/* -Performs defragmentation: - -- Updates `pBlockVector->m_pMetadata`. -- Updates allocations by calling ChangeBlockAllocation() or ChangeOffset(). -- Does not move actual data, only returns requested moves as `moves`. -*/ -class VmaDefragmentationAlgorithm -{ - VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm) -public: - VmaDefragmentationAlgorithm( - VmaAllocator hAllocator, - VmaBlockVector* pBlockVector) - : m_hAllocator(hAllocator), - m_pBlockVector(pBlockVector) {} - virtual ~VmaDefragmentationAlgorithm() = default; - - virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0; - virtual void AddAll() = 0; - - virtual VkResult Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove, - VmaDefragmentationFlags flags) = 0; - - virtual VkDeviceSize GetBytesMoved() const = 0; - virtual uint32_t GetAllocationsMoved() const = 0; - -protected: - struct AllocationInfo - { - VmaAllocation m_hAllocation; - VkBool32* m_pChanged; - - AllocationInfo() : m_hAllocation(VK_NULL_HANDLE), m_pChanged(VMA_NULL) {} - AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) : m_hAllocation(hAlloc), m_pChanged(pChanged) {} - }; - - VmaAllocator const m_hAllocator; - VmaBlockVector* const m_pBlockVector; -}; - -#endif // _VMA_DEFRAGMENTATION_ALGORITHM - -#ifndef _VMA_DEFRAGMENTATION_ALGORITHM_GENERIC -class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm -{ - VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic) -public: - VmaDefragmentationAlgorithm_Generic( - VmaAllocator hAllocator, - VmaBlockVector* pBlockVector, - bool overlappingMoveSupported); - virtual ~VmaDefragmentationAlgorithm_Generic(); - - virtual void AddAll() { m_AllAllocations = true; } - virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } - virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } - - virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); - virtual VkResult Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove, - VmaDefragmentationFlags flags); - -private: - struct AllocationInfoSizeGreater - { - bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const; - }; - struct AllocationInfoOffsetGreater - { - bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const; - }; - struct BlockInfo - { - size_t m_OriginalBlockIndex; - VmaDeviceMemoryBlock* m_pBlock; - bool m_HasNonMovableAllocations; - VmaVector<AllocationInfo, VmaStlAllocator<AllocationInfo>> m_Allocations; - - BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks); - - void CalcHasNonMovableAllocations(); - void SortAllocationsBySizeDescending(); - void SortAllocationsByOffsetDescending(); - }; - struct BlockPointerLess - { - bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const; - bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const; - }; - // 1. Blocks with some non-movable allocations go first. - // 2. Blocks with smaller sumFreeSize go first. - struct BlockInfoCompareMoveDestination - { - bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const; - }; - typedef VmaVector<BlockInfo*, VmaStlAllocator<BlockInfo*>> BlockInfoVector; - - BlockInfoVector m_Blocks; - uint32_t m_AllocationCount; - bool m_AllAllocations; - VkDeviceSize m_BytesMoved; - uint32_t m_AllocationsMoved; - - static bool MoveMakesSense( - size_t dstBlockIndex, VkDeviceSize dstOffset, - size_t srcBlockIndex, VkDeviceSize srcOffset); - - size_t CalcBlocksWithNonMovableCount() const; - VkResult DefragmentRound( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove, - bool freeOldAllocations); -}; -#endif // _VMA_DEFRAGMENTATION_ALGORITHM_GENERIC - -#ifndef _VMA_DEFRAGMENTATION_ALGORITHM_FAST -class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm +#ifndef _VMA_DEFRAGMENTATION_CONTEXT +struct VmaDefragmentationContext_T { - VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast) + VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) public: - VmaDefragmentationAlgorithm_Fast( + VmaDefragmentationContext_T( VmaAllocator hAllocator, - VmaBlockVector* pBlockVector, - bool overlappingMoveSupported); - virtual ~VmaDefragmentationAlgorithm_Fast() = default; + const VmaDefragmentationInfo& info); + ~VmaDefragmentationContext_T(); - virtual void AddAll() { m_AllAllocations = true; } - virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } - virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } - virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; } + void GetStats(VmaDefragmentationStats& outStats) { outStats = m_Stats; } - virtual VkResult Defragment( - VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove, - VmaDefragmentationFlags flags); + VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); + VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); private: - struct BlockInfo + struct ImmovableBlock { - size_t origBlockIndex; + uint32_t vectorIndex; + VmaDeviceMemoryBlock* block; }; - class FreeSpaceDatabase + struct StateExtensive { - public: - FreeSpaceDatabase(); - - void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size); - bool Fetch(VkDeviceSize alignment, VkDeviceSize size, - size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset); - - private: - static const size_t MAX_COUNT = 4; - - struct FreeSpace + enum class Operation : uint8_t { - size_t blockInfoIndex; // SIZE_MAX means this structure is invalid. - VkDeviceSize offset; - VkDeviceSize size; - } m_FreeSpaces[MAX_COUNT]; - }; - - const bool m_OverlappingMoveSupported; - - uint32_t m_AllocationCount; - bool m_AllAllocations; - VkDeviceSize m_BytesMoved; - uint32_t m_AllocationsMoved; - - VmaVector<BlockInfo, VmaStlAllocator<BlockInfo>> m_BlockInfos; - - void PreprocessMetadata(); - void PostprocessMetadata(); - void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc); -}; -#endif // _VMA_DEFRAGMENTATION_ALGORITHM_FAST + FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, + MoveBuffers, MoveTextures, MoveAll, + Cleanup, Done + }; -#ifndef _VMA_BLOCK_VECTOR_DEFRAGMENTATION_CONTEXT -struct VmaBlockDefragmentationContext -{ - enum BLOCK_FLAG - { - BLOCK_FLAG_USED = 0x00000001, + Operation operation = Operation::FindFreeBlockTexture; + size_t firstFreeBlock = SIZE_MAX; }; - uint32_t flags; - VkBuffer hBuffer; -}; - -class VmaBlockVectorDefragmentationContext -{ - VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext) -public: - VkResult res; - bool mutexLocked; - VmaVector<VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext>> blockContexts; - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> defragmentationMoves; - uint32_t defragmentationMovesProcessed; - uint32_t defragmentationMovesCommitted; - bool hasDefragmentationPlan; - - VmaBlockVectorDefragmentationContext( - VmaAllocator hAllocator, - VmaPool hCustomPool, // Optional. - VmaBlockVector* pBlockVector); - ~VmaBlockVectorDefragmentationContext(); - - VmaPool GetCustomPool() const { return m_hCustomPool; } - VmaBlockVector* GetBlockVector() const { return m_pBlockVector; } - VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; } - void AddAll() { m_AllAllocations = true; } - - void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); - void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags); - -private: - struct AllocInfo + struct MoveAllocationData { - VmaAllocation hAlloc; - VkBool32* pChanged; + VkDeviceSize size; + VkDeviceSize alignment; + VmaSuballocationType type; + VmaAllocationCreateFlags flags; + VmaDefragmentationMove move = {}; }; - const VmaAllocator m_hAllocator; - // Null if not from custom pool. - const VmaPool m_hCustomPool; - // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors. - VmaBlockVector* const m_pBlockVector; - // Owner of this object. - VmaDefragmentationAlgorithm* m_pAlgorithm; - // Used between constructor and Begin. - VmaVector<AllocInfo, VmaStlAllocator<AllocInfo>> m_Allocations; - bool m_AllAllocations; -}; -#endif // _VMA_BLOCK_VECTOR_DEFRAGMENTATION_CONTEXT - -#ifndef _VMA_DEFRAGMENTATION_CONTEXT -struct VmaDefragmentationContext_T -{ -private: - VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) -public: - VmaDefragmentationContext_T( - VmaAllocator hAllocator, - uint32_t flags, - VmaDefragmentationStats* pStats); - ~VmaDefragmentationContext_T(); - - void AddPools(uint32_t poolCount, const VmaPool* pPools); - void AddAllocations( - uint32_t allocationCount, - const VmaAllocation* pAllocations, - VkBool32* pAllocationsChanged); - - /* - Returns: - - `VK_SUCCESS` if succeeded and object can be destroyed immediately. - - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd(). - - Negative value if error occurred and object can be destroyed immediately. - */ - VkResult Defragment( - VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, - VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags); - - VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo); - VkResult DefragmentPassEnd(); - -private: - const VmaAllocator m_hAllocator; - const uint32_t m_Flags; - VmaDefragmentationStats* const m_pStats; - - VkDeviceSize m_MaxCpuBytesToMove; - uint32_t m_MaxCpuAllocationsToMove; - VkDeviceSize m_MaxGpuBytesToMove; - uint32_t m_MaxGpuAllocationsToMove; - - // Owner of these objects. - VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES]; - // Owner of these objects. - VmaVector<VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*>> m_CustomPoolContexts; + const VkDeviceSize m_MaxPassBytes; + const uint32_t m_MaxPassAllocations; + + VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator; + VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves; + + uint32_t m_Algorithm; + uint32_t m_BlockVectorCount; + VmaBlockVector* m_PoolBlockVector; + VmaBlockVector** m_pBlockVectors; + size_t m_ImmovableBlockCount = 0; + VmaDefragmentationStats m_Stats = { 0 }; + void* m_AlgorithmState = VMA_NULL; + + static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); + bool IncrementCounters(uint32_t& allocations, VkDeviceSize bytes); + bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); + bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); + + bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); + bool ComputeDefragmentation_Fast(VmaBlockVector& vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector& vector); + bool ComputeDefragmentation_Full(VmaBlockVector& vector); + bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + + bool MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent); }; #endif // _VMA_DEFRAGMENTATION_CONTEXT @@ -10896,12 +10997,13 @@ struct VmaPool_T friend struct VmaPoolListItemTraits; VMA_CLASS_NO_COPY(VmaPool_T) public: - VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; - VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; + VmaBlockVector m_BlockVector; + VmaDedicatedAllocationList m_DedicatedAllocations; VmaPool_T( VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo); + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); ~VmaPool_T(); uint32_t GetId() const { return m_Id; } @@ -10915,7 +11017,6 @@ public: #endif private: - const VmaAllocator m_hAllocator; uint32_t m_Id; char* m_Name; VmaPool_T* m_PrevPool = VMA_NULL; @@ -10936,6 +11037,8 @@ struct VmaPoolListItemTraits #ifndef _VMA_CURRENT_BUDGET_DATA struct VmaCurrentBudgetData { + VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; @@ -10958,6 +11061,8 @@ VmaCurrentBudgetData::VmaCurrentBudgetData() { for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) { + m_BlockCount[heapIndex] = 0; + m_AllocationCount[heapIndex] = 0; m_BlockBytes[heapIndex] = 0; m_AllocationBytes[heapIndex] = 0; #if VMA_MEMORY_BUDGET @@ -10975,6 +11080,7 @@ VmaCurrentBudgetData::VmaCurrentBudgetData() void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) { m_AllocationBytes[heapIndex] += allocationSize; + ++m_AllocationCount[heapIndex]; #if VMA_MEMORY_BUDGET ++m_OperationsSinceBudgetFetch; #endif @@ -10984,6 +11090,8 @@ void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize all { VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); m_AllocationBytes[heapIndex] -= allocationSize; + VMA_ASSERT(m_AllocationCount[heapIndex] > 0); + --m_AllocationCount[heapIndex]; #if VMA_MEMORY_BUDGET ++m_OperationsSinceBudgetFetch; #endif @@ -11045,7 +11153,8 @@ public: void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, VkDeviceSize* outOffset); - void CalculateStats(VmaStatInfo& outStatInfo) const; + void GetStatistics(VmaStatistics& outStats) const; + void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; #if VMA_STATS_STRING_ENABLED void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; #endif @@ -11062,20 +11171,14 @@ VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; switch (algorithm) { + default: + VMA_ASSERT(0); case 0: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Generic)(VK_NULL_HANDLE, 1, true); - break; - case VMA_VIRTUAL_BLOCK_CREATE_BUDDY_ALGORITHM_BIT: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Buddy)(VK_NULL_HANDLE, 1, true); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); break; case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); break; - case VMA_VIRTUAL_BLOCK_CREATE_TLSF_ALGORITHM_BIT: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); - break; - default: - VMA_ASSERT(0); } m_Metadata->Init(createInfo.size); @@ -11129,10 +11232,16 @@ VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& creat return VK_ERROR_OUT_OF_DEVICE_MEMORY; } -void VmaVirtualBlock_T::CalculateStats(VmaStatInfo& outStatInfo) const +void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const +{ + VmaClearStatistics(outStats); + m_Metadata->AddStatistics(outStats); +} + +void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const { - m_Metadata->CalcAllocationStatInfo(outStatInfo); - VmaPostprocessCalcStatInfo(outStatInfo); + VmaClearDetailedStatistics(outStats); + m_Metadata->AddDetailedStatistics(outStats); } #if VMA_STATS_STRING_ENABLED @@ -11141,16 +11250,17 @@ void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) VmaJsonWriter json(GetAllocationCallbacks(), sb); json.BeginObject(); - VmaStatInfo stat = {}; - CalculateStats(stat); + VmaDetailedStatistics stats; + CalculateDetailedStatistics(stats); json.WriteString("Stats"); - VmaPrintStatInfo(json, stat); + VmaPrintDetailedStatistics(json, stats); if (detailedMap) { json.WriteString("Details"); - m_Metadata->PrintDetailedMap(json); + m_Metadata->PrintDetailedMap(json, + UINT32_MAX); // mapRefCount } json.EndObject(); @@ -11159,6 +11269,7 @@ void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) #endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS #endif // _VMA_VIRTUAL_BLOCK_T + // Main allocator object. struct VmaAllocator_T { @@ -11253,6 +11364,11 @@ public: VkMemoryRequirements& memReq, bool& requiresDedicatedAllocation, bool& prefersDedicatedAllocation) const; + VkResult FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. + uint32_t* pMemoryTypeIndex) const; // Main allocation function. VkResult AllocateMemory( @@ -11260,8 +11376,8 @@ public: bool requiresDedicatedAllocation, bool prefersDedicatedAllocation, VkBuffer dedicatedBuffer, - VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown. VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown. const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, size_t allocationCount, @@ -11272,7 +11388,7 @@ public: size_t allocationCount, const VmaAllocation* pAllocations); - void CalculateStats(VmaStats* pStats); + void CalculateStatistics(VmaTotalStatistics* pStats); void GetHeapBudgets( VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); @@ -11281,24 +11397,12 @@ public: void PrintDetailedMap(class VmaJsonWriter& json); #endif - VkResult DefragmentationBegin( - const VmaDefragmentationInfo2& info, - VmaDefragmentationStats* pStats, - VmaDefragmentationContext* pContext); - VkResult DefragmentationEnd( - VmaDefragmentationContext context); - - VkResult DefragmentationPassBegin( - VmaDefragmentationPassInfo* pInfo, - VmaDefragmentationContext context); - VkResult DefragmentationPassEnd( - VmaDefragmentationContext context); - void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); void DestroyPool(VmaPool pool); - void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats); + void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); + void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); void SetCurrentFrameIndex(uint32_t frameIndex); uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } @@ -11397,18 +11501,16 @@ private: void ValidateVulkanFunctions(); -public: // I'm sorry VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); -private: VkResult AllocateMemoryOfType( VmaPool pool, VkDeviceSize size, VkDeviceSize alignment, bool dedicatedPreferred, VkBuffer dedicatedBuffer, - VkBufferUsageFlags dedicatedBufferUsage, VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, @@ -11426,6 +11528,7 @@ private: const VkMemoryAllocateInfo& allocInfo, bool map, bool isUserDataString, + bool isMappingAllowed, void* pUserData, VmaAllocation* pAllocation); @@ -11438,12 +11541,13 @@ private: uint32_t memTypeIndex, bool map, bool isUserDataString, + bool isMappingAllowed, bool canAliasMemory, void* pUserData, float priority, VkBuffer dedicatedBuffer, - VkBufferUsageFlags dedicatedBufferUsage, VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, size_t allocationCount, VmaAllocation* pAllocations, const void* pNextChain = nullptr); @@ -11561,19 +11665,11 @@ void VmaDeviceMemoryBlock::Init( m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), bufferImageGranularity, false); // isVirtual break; - case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual - break; - case VMA_POOL_CREATE_TLSF_ALGORITHM_BIT: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual - break; default: VMA_ASSERT(0); // Fall-through. case 0: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator->GetAllocationCallbacks(), + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), bufferImageGranularity, false); // isVirtual } m_pMetadata->Init(newSize); @@ -11596,6 +11692,19 @@ void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) m_pMetadata = VMA_NULL; } +void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) +{ + if(m_MappingHysteresis.PostFree()) + { + VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); + if (m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } +} + bool VmaDeviceMemoryBlock::Validate() const { VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && @@ -11627,8 +11736,10 @@ VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void return VK_SUCCESS; } - VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); - if (m_MapCount != 0) + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + m_MappingHysteresis.PostMap(); + if (oldTotalMapCount != 0) { m_MapCount += count; VMA_ASSERT(m_pMappedData != VMA_NULL); @@ -11666,15 +11777,17 @@ void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) return; } - VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); if (m_MapCount >= count) { m_MapCount -= count; - if (m_MapCount == 0) + const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (totalMapCount == 0) { m_pMappedData = VMA_NULL; (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); } + m_MappingHysteresis.PostUnmap(); } else { @@ -11732,7 +11845,7 @@ VkResult VmaDeviceMemoryBlock::BindBufferMemory( "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. - VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); } @@ -11749,13 +11862,13 @@ VkResult VmaDeviceMemoryBlock::BindImageMemory( "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. - VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); } #endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS #ifndef _VMA_ALLOCATION_T_FUNCTIONS -VmaAllocation_T::VmaAllocation_T(bool userDataString) +VmaAllocation_T::VmaAllocation_T(bool userDataString, bool mappingAllowed) : m_Alignment{ 1 }, m_Size{ 0 }, m_pUserData{ VMA_NULL }, @@ -11763,8 +11876,13 @@ VmaAllocation_T::VmaAllocation_T(bool userDataString) m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, m_MapCount{ 0 }, - m_Flags{ userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0 } + m_Flags{ 0 } { + if(userDataString) + m_Flags |= (uint8_t)FLAG_USER_DATA_STRING; + if(mappingAllowed) + m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; + #if VMA_STATS_STRING_ENABLED m_BufferImageUsage = 0; #endif @@ -11772,10 +11890,10 @@ VmaAllocation_T::VmaAllocation_T(bool userDataString) VmaAllocation_T::~VmaAllocation_T() { - VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction."); + VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction."); // Check if owned string was freed. - VMA_ASSERT(m_pUserData == VMA_NULL); + VMA_ASSERT((IsUserDataString() && m_pUserData == VMA_NULL) || !IsUserDataString()); } void VmaAllocation_T::InitBlockAllocation( @@ -11793,7 +11911,11 @@ void VmaAllocation_T::InitBlockAllocation( m_Alignment = alignment; m_Size = size; m_MemoryTypeIndex = memoryTypeIndex; - m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + if(mapped) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } m_SuballocationType = (uint8_t)suballocationType; m_BlockAllocation.m_Block = block; m_BlockAllocation.m_AllocHandle = allocHandle; @@ -11814,7 +11936,11 @@ void VmaAllocation_T::InitDedicatedAllocation( m_Size = size; m_MemoryTypeIndex = memoryTypeIndex; m_SuballocationType = (uint8_t)suballocationType; - m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + if(pMappedData != VMA_NULL) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } m_DedicatedAllocation.m_hParentPool = hParentPool; m_DedicatedAllocation.m_hMemory = hMemory; m_DedicatedAllocation.m_pMappedData = pMappedData; @@ -11841,32 +11967,19 @@ void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData) } } -void VmaAllocation_T::ChangeBlockAllocation( - VmaAllocator hAllocator, - VmaDeviceMemoryBlock* block, - VmaAllocHandle allocHandle) +void VmaAllocation_T::SwapBlockAllocation(VmaAllocation allocation) { - VMA_ASSERT(block != VMA_NULL); + VMA_ASSERT(allocation != VMA_NULL); VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); - // Move mapping reference counter from old block to new block. - if (block != m_BlockAllocation.m_Block) - { - uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP; - if (IsPersistentMap()) - ++mapRefCount; - m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount); - block->Map(hAllocator, mapRefCount, VMA_NULL); - } + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); + VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation); + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); - m_BlockAllocation.m_Block = block; - m_BlockAllocation.m_AllocHandle = allocHandle; -} - -void VmaAllocation_T::ChangeAllocHandle(VmaAllocHandle newAllocHandle) -{ - VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); - m_BlockAllocation.m_AllocHandle = newAllocHandle; +#if VMA_STATS_STRING_ENABLED + VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage); +#endif } VmaAllocHandle VmaAllocation_T::GetAllocHandle() const @@ -11930,7 +12043,7 @@ void* VmaAllocation_T::GetMappedData() const switch (m_Type) { case ALLOCATION_TYPE_BLOCK: - if (m_MapCount != 0) + if (m_MapCount != 0 || IsPersistentMap()) { void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); VMA_ASSERT(pBlockData != VMA_NULL); @@ -11942,7 +12055,7 @@ void* VmaAllocation_T::GetMappedData() const } break; case ALLOCATION_TYPE_DEDICATED: - VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0)); + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap())); return m_DedicatedAllocation.m_pMappedData; default: VMA_ASSERT(0); @@ -11950,24 +12063,12 @@ void* VmaAllocation_T::GetMappedData() const } } -void VmaAllocation_T::DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo) -{ - VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); - outInfo.blockCount = 1; - outInfo.allocationCount = 1; - outInfo.unusedRangeCount = 0; - outInfo.usedBytes = m_Size; - outInfo.unusedBytes = 0; - outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size; - outInfo.unusedRangeSizeMin = UINT64_MAX; - outInfo.unusedRangeSizeMax = 0; -} - void VmaAllocation_T::BlockAllocMap() { VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + if (m_MapCount < 0xFF) { ++m_MapCount; } @@ -11981,7 +12082,7 @@ void VmaAllocation_T::BlockAllocUnmap() { VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + if (m_MapCount > 0) { --m_MapCount; } @@ -11994,10 +12095,11 @@ void VmaAllocation_T::BlockAllocUnmap() VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) { VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - if (m_MapCount != 0) + if (m_MapCount != 0 || IsPersistentMap()) { - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + if (m_MapCount < 0xFF) { VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); *ppData = m_DedicatedAllocation.m_pMappedData; @@ -12032,10 +12134,10 @@ void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) { VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); - if ((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + if (m_MapCount > 0) { --m_MapCount; - if (m_MapCount == 0) + if (m_MapCount == 0 && !IsPersistentMap()) { m_DedicatedAllocation.m_pMappedData = VMA_NULL; (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( @@ -12121,7 +12223,6 @@ VmaBlockVector::VmaBlockVector( m_Priority(priority), m_MinAllocationAlignment(minAllocationAlignment), m_pMemoryAllocateNext(pMemoryAllocateNext), - m_HasEmptyBlock(false), m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())), m_NextBlockId(0) {} @@ -12147,19 +12248,31 @@ VkResult VmaBlockVector::CreateMinBlocks() return VK_SUCCESS; } -void VmaBlockVector::AddPoolStats(VmaPoolStats* pStats) +void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); const size_t blockCount = m_Blocks.size(); - pStats->blockCount += blockCount; + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddStatistics(inoutStats); + } +} + +void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + const size_t blockCount = m_Blocks.size(); for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); - pBlock->m_pMetadata->AddPoolStats(*pStats); + pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); } } @@ -12217,14 +12330,8 @@ VkResult VmaBlockVector::Allocate( if (res != VK_SUCCESS) { // Free all already created allocations. - const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); while (allocIndex--) - { - VmaAllocation_T* const alloc = pAllocations[allocIndex]; - const VkDeviceSize allocSize = alloc->GetSize(); - Free(alloc); - m_hAllocator->m_Budget.RemoveAllocation(heapIndex, allocSize); - } + Free(pAllocations[allocIndex]); memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); } @@ -12239,8 +12346,6 @@ VkResult VmaBlockVector::AllocatePage( VmaAllocation* pAllocation) { const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; - const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; - const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; VkDeviceSize freeMemory; { @@ -12280,17 +12385,11 @@ VkResult VmaBlockVector::AllocatePage( VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock( - pCurrBlock, - size, - alignment, - createInfo.flags, - createInfo.pUserData, - suballocType, - strategy, - pAllocation); + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); return VK_SUCCESS; } } @@ -12299,24 +12398,55 @@ VkResult VmaBlockVector::AllocatePage( { if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default { - // Forward order in m_Blocks - prefer blocks with smallest amount of free space. - for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + const bool isHostVisible = + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + if(isHostVisible) { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock( - pCurrBlock, - size, - alignment, - createInfo.flags, - createInfo.pUserData, - suballocType, - strategy, - pAllocation); - if (res == VK_SUCCESS) + const bool isMappingAllowed = (createInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + /* + For non-mappable allocations, check blocks that are not mapped first. + For mappable allocations, check blocks that are already mapped first. + This way, having many blocks, we will separate mappable and non-mappable allocations, + hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. + */ + for(size_t mappingI = 0; mappingI < 2; ++mappingI) { - VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); - return VK_SUCCESS; + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; + if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) + { + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + } + else + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } } } } @@ -12327,18 +12457,11 @@ VkResult VmaBlockVector::AllocatePage( { VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock( - pCurrBlock, - size, - alignment, - createInfo.flags, - createInfo.pUserData, - suballocType, - strategy, - pAllocation); + VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); return VK_SUCCESS; } } @@ -12401,17 +12524,11 @@ VkResult VmaBlockVector::AllocatePage( VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); res = AllocateFromBlock( - pBlock, - size, - alignment, - createInfo.flags, - createInfo.pUserData, - suballocType, - strategy, - pAllocation); + pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) { VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); + IncrementallySortBlocks(); return VK_SUCCESS; } else @@ -12426,7 +12543,8 @@ VkResult VmaBlockVector::AllocatePage( } void VmaBlockVector::Free( - const VmaAllocation hAllocation) + const VmaAllocation hAllocation, + bool incrementalSort) { VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; @@ -12455,7 +12573,9 @@ void VmaBlockVector::Free( pBlock->Unmap(m_hAllocator, 1); } + const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); + pBlock->PostFree(m_hAllocator); VMA_HEAVY_ASSERT(pBlock->Validate()); VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); @@ -12464,17 +12584,17 @@ void VmaBlockVector::Free( // pBlock became empty after this deallocation. if (pBlock->m_pMetadata->IsEmpty()) { - // Already has empty block. We don't want to have two, so delete this one. - if ((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock) + // Already had empty block. We don't want to have two, so delete this one. + if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) { pBlockToDelete = pBlock; Remove(pBlock); } - // else: We now have an empty block - leave it. + // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. } // pBlock didn't become empty, but we have another empty block - find and free that one. // (This is optional, heuristics.) - else if (m_HasEmptyBlock && canDeleteBlock) + else if (hadEmptyBlockBeforeFree && canDeleteBlock) { VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); if (pLastBlock->m_pMetadata->IsEmpty()) @@ -12484,8 +12604,8 @@ void VmaBlockVector::Free( } } - UpdateHasEmptyBlock(); - IncrementallySortBlocks(); + if (incrementalSort) + IncrementallySortBlocks(); } // Destruction of a free block. Deferred until this point, outside of mutex @@ -12496,6 +12616,9 @@ void VmaBlockVector::Free( pBlockToDelete->Destroy(m_hAllocator); vma_delete(m_hAllocator, pBlockToDelete); } + + m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); + m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); } VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const @@ -12541,6 +12664,15 @@ void VmaBlockVector::IncrementallySortBlocks() } } +void VmaBlockVector::SortByFreeSize() +{ + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), + [](auto* b1, auto* b2) + { + return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); + }); +} + VkResult VmaBlockVector::AllocateFromBlock( VmaDeviceMemoryBlock* pBlock, VkDeviceSize size, @@ -12552,8 +12684,6 @@ VkResult VmaBlockVector::AllocateFromBlock( VmaAllocation* pAllocation) { const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; - const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; - const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; VmaAllocationRequest currRequest = {}; if (pBlock->m_pMetadata->CreateAllocationRequest( @@ -12564,42 +12694,59 @@ VkResult VmaBlockVector::AllocateFromBlock( strategy, &currRequest)) { - // Allocate from pCurrBlock. - if (mapped) - { - VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); - if (res != VK_SUCCESS) - { - return res; - } - } + return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} - *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isUserDataString); - pBlock->m_pMetadata->Alloc(currRequest, suballocType, *pAllocation); - UpdateHasEmptyBlock(); - (*pAllocation)->InitBlockAllocation( - pBlock, - currRequest.allocHandle, - alignment, - currRequest.size, // Not size, as actual allocation size may be larger than requested! - m_MemoryTypeIndex, - suballocType, - mapped); - VMA_HEAVY_ASSERT(pBlock->Validate()); - (*pAllocation)->SetUserData(m_hAllocator, pUserData); - m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), currRequest.size); - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); - } - if (IsCorruptionDetectionEnabled()) +VkResult VmaBlockVector::CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + const bool isMappingAllowed = (allocFlags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + + pBlock->PostAlloc(); + // Allocate from pCurrBlock. + if (mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if (res != VK_SUCCESS) { - VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), currRequest.size); - VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + return res; } - return VK_SUCCESS; } - return VK_ERROR_OUT_OF_DEVICE_MEMORY; + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isUserDataString, isMappingAllowed); + pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); + (*pAllocation)->InitBlockAllocation( + pBlock, + allocRequest.allocHandle, + alignment, + allocRequest.size, // Not size, as actual allocation size may be larger than requested! + m_MemoryTypeIndex, + suballocType, + mapped); + VMA_HEAVY_ASSERT(pBlock->Validate()); + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; } VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) @@ -12668,241 +12815,17 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn return VK_SUCCESS; } -void VmaBlockVector::ApplyDefragmentationMovesCpu( - VmaBlockVectorDefragmentationContext* pDefragCtx, - const VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves) +bool VmaBlockVector::HasEmptyBlock() { - const size_t blockCount = m_Blocks.size(); - const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex); - - enum BLOCK_FLAG - { - BLOCK_FLAG_USED = 0x00000001, - BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002, - }; - - struct BlockInfo - { - uint32_t flags; - void* pMappedData; - }; - VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > - blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks())); - memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo)); - - // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. - const size_t moveCount = moves.size(); - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) - { - const VmaDefragmentationMove& move = moves[moveIndex]; - blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED; - blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED; - } - - VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); - - // Go over all blocks. Get mapped pointer or map if necessary. - for (size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) - { - BlockInfo& currBlockInfo = blockInfo[blockIndex]; - VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; - if ((currBlockInfo.flags & BLOCK_FLAG_USED) != 0) - { - currBlockInfo.pMappedData = pBlock->GetMappedData(); - // It is not originally mapped - map it. - if (currBlockInfo.pMappedData == VMA_NULL) - { - pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData); - if (pDefragCtx->res == VK_SUCCESS) - { - currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION; - } - } - } - } - - // Go over all moves. Do actual data transfer. - if (pDefragCtx->res == VK_SUCCESS) - { - const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; - VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; - - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) - { - const VmaDefragmentationMove& move = moves[moveIndex]; - - const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex]; - const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex]; - - VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData); - - // Invalidate source. - if (isNonCoherent) - { - VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex]; - memRange.memory = pSrcBlock->GetDeviceMemory(); - memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize); - memRange.size = VMA_MIN( - VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize), - pSrcBlock->m_pMetadata->GetSize() - memRange.offset); - (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); - } - - // THE PLACE WHERE ACTUAL DATA COPY HAPPENS. - memmove( - reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset, - reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset, - static_cast<size_t>(move.size)); - - if (IsCorruptionDetectionEnabled()) - { - VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size); - } - - // Flush destination. - if (isNonCoherent) - { - VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex]; - memRange.memory = pDstBlock->GetDeviceMemory(); - memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize); - memRange.size = VMA_MIN( - VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize), - pDstBlock->m_pMetadata->GetSize() - memRange.offset); - (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); - } - } - } - - // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation. - // Regardless of pCtx->res == VK_SUCCESS. - for (size_t blockIndex = blockCount; blockIndex--; ) - { - const BlockInfo& currBlockInfo = blockInfo[blockIndex]; - if ((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0) - { - VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; - pBlock->Unmap(m_hAllocator, 1); - } - } -} - -void VmaBlockVector::ApplyDefragmentationMovesGpu( - VmaBlockVectorDefragmentationContext* pDefragCtx, - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves, - VkCommandBuffer commandBuffer) -{ - const size_t blockCount = m_Blocks.size(); - - pDefragCtx->blockContexts.resize(blockCount); - memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext)); - - // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. - const size_t moveCount = moves.size(); - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) - { - const VmaDefragmentationMove& move = moves[moveIndex]; - - //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN) - { - // Old school move still require us to map the whole block - pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; - pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; - } - } - - VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); - - // Go over all blocks. Create and bind buffer for whole block if necessary. - { - VkBufferCreateInfo bufCreateInfo; - VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo); - - for (size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) - { - VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex]; - VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; - if ((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0) - { - bufCreateInfo.size = pBlock->m_pMetadata->GetSize(); - pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)( - m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer); - if (pDefragCtx->res == VK_SUCCESS) - { - pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)( - m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0); - } - } - } - } - - // Go over all moves. Post data transfer commands to command buffer. - if (pDefragCtx->res == VK_SUCCESS) - { - for (size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) - { - const VmaDefragmentationMove& move = moves[moveIndex]; - - const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex]; - const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex]; - - VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer); - - VkBufferCopy region = { - move.srcOffset, - move.dstOffset, - move.size }; - (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)( - commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion); - } - } - - // Save buffers to defrag context for later destruction. - if (pDefragCtx->res == VK_SUCCESS && moveCount > 0) - { - pDefragCtx->res = VK_NOT_READY; - } -} - -void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats) -{ - for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) - { - VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; - if (pBlock->m_pMetadata->IsEmpty()) - { - if (m_Blocks.size() > m_MinBlockCount) - { - if (pDefragmentationStats != VMA_NULL) - { - ++pDefragmentationStats->deviceMemoryBlocksFreed; - pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize(); - } - - VmaVectorRemove(m_Blocks, blockIndex); - pBlock->Destroy(m_hAllocator); - vma_delete(m_hAllocator, pBlock); - } - else - { - break; - } - } - } - UpdateHasEmptyBlock(); -} - -void VmaBlockVector::UpdateHasEmptyBlock() -{ - m_HasEmptyBlock = false; for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) { VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; if (pBlock->m_pMetadata->IsEmpty()) { - m_HasEmptyBlock = true; - break; + return true; } } + return false; } #if VMA_STATS_STRING_ENABLED @@ -12961,234 +12884,12 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) json.ContinueString(m_Blocks[i]->GetId()); json.EndString(); - m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json, m_Blocks[i]->GetMapRefCount()); } json.EndObject(); } #endif // VMA_STATS_STRING_ENABLED -void VmaBlockVector::Defragment( - class VmaBlockVectorDefragmentationContext* pCtx, - VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, - VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, - VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer) -{ - pCtx->res = VK_SUCCESS; - - const VkMemoryPropertyFlags memPropFlags = - m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags; - const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; - - const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 && - isHostVisible; - const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 && - !IsCorruptionDetectionEnabled() && - ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0; - - // There are options to defragment this memory type. - if (canDefragmentOnCpu || canDefragmentOnGpu) - { - bool defragmentOnGpu; - // There is only one option to defragment this memory type. - if (canDefragmentOnGpu != canDefragmentOnCpu) - { - defragmentOnGpu = canDefragmentOnGpu; - } - // Both options are available: Heuristics to choose the best one. - else - { - defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 || - m_hAllocator->IsIntegratedGpu(); - } - - bool overlappingMoveSupported = !defragmentOnGpu; - - if (m_hAllocator->m_UseMutex) - { - if (flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) - { - if (!m_Mutex.TryLockWrite()) - { - pCtx->res = VK_ERROR_INITIALIZATION_FAILED; - return; - } - } - else - { - m_Mutex.LockWrite(); - pCtx->mutexLocked = true; - } - } - - pCtx->Begin(overlappingMoveSupported, flags); - - // Defragment. - - const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove; - const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove; - VmaDefragmentationAlgorithm* algo = pCtx->GetAlgorithm(); - pCtx->res = algo->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags); - - // Accumulate statistics. - if (pStats != VMA_NULL) - { - const VkDeviceSize bytesMoved = algo->GetBytesMoved(); - const uint32_t allocationsMoved = algo->GetAllocationsMoved(); - pStats->bytesMoved += bytesMoved; - pStats->allocationsMoved += allocationsMoved; - VMA_ASSERT(bytesMoved <= maxBytesToMove); - VMA_ASSERT(allocationsMoved <= maxAllocationsToMove); - if (defragmentOnGpu) - { - maxGpuBytesToMove -= bytesMoved; - maxGpuAllocationsToMove -= allocationsMoved; - } - else - { - maxCpuBytesToMove -= bytesMoved; - maxCpuAllocationsToMove -= allocationsMoved; - } - } - - if (flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) - { - if (m_hAllocator->m_UseMutex) - m_Mutex.UnlockWrite(); - - if (pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty()) - pCtx->res = VK_NOT_READY; - - return; - } - - if (pCtx->res >= VK_SUCCESS) - { - if (defragmentOnGpu) - { - ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer); - } - else - { - ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves); - } - } - } -} - -void VmaBlockVector::DefragmentationEnd( - class VmaBlockVectorDefragmentationContext* pCtx, - uint32_t flags, - VmaDefragmentationStats* pStats) -{ - if (flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex) - { - VMA_ASSERT(pCtx->mutexLocked == false); - - // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any - // lock protecting us. Since we mutate state here, we have to take the lock out now - m_Mutex.LockWrite(); - pCtx->mutexLocked = true; - } - - // If the mutex isn't locked we didn't do any work and there is nothing to delete. - if (pCtx->mutexLocked || !m_hAllocator->m_UseMutex) - { - // Destroy buffers. - for (size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;) - { - VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex]; - if (blockCtx.hBuffer) - { - (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks()); - } - } - - if (pCtx->res >= VK_SUCCESS) - { - FreeEmptyBlocks(pStats); - } - } - - if (pCtx->mutexLocked) - { - VMA_ASSERT(m_hAllocator->m_UseMutex); - m_Mutex.UnlockWrite(); - } -} - -uint32_t VmaBlockVector::ProcessDefragmentations( - class VmaBlockVectorDefragmentationContext* pCtx, - VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves) -{ - VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - - const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves); - - for (uint32_t i = 0; i < moveCount; ++i) - { - VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i]; - - pMove->allocation = move.hAllocation; - pMove->memory = move.pDstBlock->GetDeviceMemory(); - pMove->offset = move.dstOffset; - - ++pMove; - } - - pCtx->defragmentationMovesProcessed += moveCount; - - return moveCount; -} - -void VmaBlockVector::CommitDefragmentations( - class VmaBlockVectorDefragmentationContext* pCtx, - VmaDefragmentationStats* pStats) -{ - VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - - for (uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++i) - { - const VmaDefragmentationMove& move = pCtx->defragmentationMoves[i]; - - move.pSrcBlock->m_pMetadata->Free(move.hAllocation->GetAllocHandle()); - move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstHandle); - } - - pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed; - FreeEmptyBlocks(pStats); -} - -size_t VmaBlockVector::CalcAllocationCount() const -{ - size_t result = 0; - for (size_t i = 0; i < m_Blocks.size(); ++i) - { - result += m_Blocks[i]->m_pMetadata->GetAllocationCount(); - } - return result; -} - -bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const -{ - if (m_BufferImageGranularity == 1) - { - return false; - } - VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE; - for (size_t i = 0, count = m_Blocks.size(); i < count; ++i) - { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[i]; - VMA_ASSERT(m_Algorithm == 0); - VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata; - if (pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType)) - { - return true; - } - } - return false; -} - VkResult VmaBlockVector::CheckCorruption() { if (!IsCorruptionDetectionEnabled()) @@ -13210,1332 +12911,827 @@ VkResult VmaBlockVector::CheckCorruption() return VK_SUCCESS; } -void VmaBlockVector::AddStats(VmaStats* pStats) -{ - const uint32_t memTypeIndex = m_MemoryTypeIndex; - const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex); - - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pBlock); - VMA_HEAVY_ASSERT(pBlock->Validate()); - VmaStatInfo allocationStatInfo; - pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo); - VmaAddStatInfo(pStats->total, allocationStatInfo); - VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); - VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); - } -} #endif // _VMA_BLOCK_VECTOR_FUNCTIONS -#ifndef _VMA_DEFRAGMENTATION_ALGORITHM_GENERIC_FUNCTIONS -VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic( +#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS +VmaDefragmentationContext_T::VmaDefragmentationContext_T( VmaAllocator hAllocator, - VmaBlockVector* pBlockVector, - bool overlappingMoveSupported) - : VmaDefragmentationAlgorithm(hAllocator, pBlockVector), - m_AllocationCount(0), - m_AllAllocations(false), - m_BytesMoved(0), - m_AllocationsMoved(0), - m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks())) -{ - // Create block info for each block. - const size_t blockCount = m_pBlockVector->m_Blocks.size(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) - { - BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks()); - pBlockInfo->m_OriginalBlockIndex = blockIndex; - pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex]; - m_Blocks.push_back(pBlockInfo); - } - - // Sort them by m_pBlock pointer value. - VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess()); -} - -VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic() + const VmaDefragmentationInfo& info) + : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), + m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), + m_MoveAllocator(hAllocator->GetAllocationCallbacks()), + m_Moves(m_MoveAllocator) { - for (size_t i = m_Blocks.size(); i--; ) - { - vma_delete(m_hAllocator, m_Blocks[i]); - } -} + m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; -void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) -{ - VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock(); - BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess()); - if (it != m_Blocks.end() && (*it)->m_pBlock == pBlock) + if (info.pool != VMA_NULL) { - AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged); - (*it)->m_Allocations.push_back(allocInfo); + m_BlockVectorCount = 1; + m_PoolBlockVector = &info.pool->m_BlockVector; + m_pBlockVectors = &m_PoolBlockVector; + m_PoolBlockVector->SortByFreeSize(); } else { - VMA_ASSERT(0); - } - - ++m_AllocationCount; -} - -VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( - VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove, - bool freeOldAllocations) -{ - if (m_Blocks.empty()) - { - return VK_SUCCESS; - } - - // This is a choice based on research. - // Option 1: - uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT; - // Option 2: - //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT; - - size_t srcBlockMinIndex = 0; - // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations. - /* - if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT) - { - const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount(); - if(blocksWithNonMovableCount > 0) + m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); + m_PoolBlockVector = VMA_NULL; + m_pBlockVectors = hAllocator->m_pBlockVectors; + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) { - srcBlockMinIndex = blocksWithNonMovableCount - 1; + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + vector->SortByFreeSize(); } } - */ - - size_t srcBlockIndex = m_Blocks.size() - 1; - size_t srcAllocIndex = SIZE_MAX; - for (;;) + + switch (m_Algorithm) { - // 1. Find next allocation to move. - // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source". - // 1.2. Then start from last to first m_Allocations. - while (srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size()) - { - if (m_Blocks[srcBlockIndex]->m_Allocations.empty()) - { - // Finished: no more allocations to process. - if (srcBlockIndex == srcBlockMinIndex) - { - return VK_SUCCESS; - } - else - { - --srcBlockIndex; - srcAllocIndex = SIZE_MAX; - } - } - else - { - srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1; - } - } - - BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex]; - AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex]; - - const VkDeviceSize size = allocInfo.m_hAllocation->GetSize(); - const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset(); - const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment(); - const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType(); - - // 2. Try to find new place for this allocation in preceding or current block. - for (size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) - { - BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex]; - VmaBlockMetadata* pMetadata = pDstBlockInfo->m_pBlock->m_pMetadata; - VmaAllocationRequest dstAllocRequest; - if (pMetadata->CreateAllocationRequest( - size, - alignment, - false, // upperAddress - suballocType, - strategy, - &dstAllocRequest) && - MoveMakesSense( - dstBlockIndex, pMetadata->GetAllocationOffset(dstAllocRequest.allocHandle), srcBlockIndex, srcOffset)) - { - // Reached limit on number of allocations or bytes to move. - if ((m_AllocationsMoved + 1 > maxAllocationsToMove) || - (m_BytesMoved + size > maxBytesToMove)) - { - return VK_SUCCESS; - } - - VmaDefragmentationMove move = {}; - move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex; - move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex; - move.srcOffset = srcOffset; - move.dstOffset = pMetadata->GetAllocationOffset(dstAllocRequest.allocHandle); - move.size = size; - move.hAllocation = allocInfo.m_hAllocation; - move.pSrcBlock = pSrcBlockInfo->m_pBlock; - move.pDstBlock = pDstBlockInfo->m_pBlock; - move.dstHandle = dstAllocRequest.allocHandle; - - moves.push_back(move); - - pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(dstAllocRequest, suballocType, allocInfo.m_hAllocation); - - if (freeOldAllocations) - { - pSrcBlockInfo->m_pBlock->m_pMetadata->Free(allocInfo.m_hAllocation->GetAllocHandle()); - allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.allocHandle); - } - - if (allocInfo.m_pChanged != VMA_NULL) - { - *allocInfo.m_pChanged = VK_TRUE; - } - - ++m_AllocationsMoved; - m_BytesMoved += size; - - VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex); - - break; - } - } - - // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round. - - if (srcAllocIndex > 0) + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + { + if (hAllocator->GetBufferImageGranularity() > 1) { - --srcAllocIndex; + m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); } - else - { - if (srcBlockIndex > 0) - { - --srcBlockIndex; - srcAllocIndex = SIZE_MAX; - } - else - { - return VK_SUCCESS; - } - } - } -} - -bool VmaDefragmentationAlgorithm_Generic::AllocationInfoSizeGreater::operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const -{ - return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize(); -} - -bool VmaDefragmentationAlgorithm_Generic::AllocationInfoOffsetGreater::operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const -{ - return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset(); -} - -VmaDefragmentationAlgorithm_Generic::BlockInfo::BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) - : m_OriginalBlockIndex(SIZE_MAX), - m_pBlock(VMA_NULL), - m_HasNonMovableAllocations(true), - m_Allocations(pAllocationCallbacks) {} - -void VmaDefragmentationAlgorithm_Generic::BlockInfo::CalcHasNonMovableAllocations() -{ - const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount(); - const size_t defragmentAllocCount = m_Allocations.size(); - m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount; -} - -void VmaDefragmentationAlgorithm_Generic::BlockInfo::SortAllocationsBySizeDescending() -{ - VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater()); -} - -void VmaDefragmentationAlgorithm_Generic::BlockInfo::SortAllocationsByOffsetDescending() -{ - VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater()); -} - -bool VmaDefragmentationAlgorithm_Generic::BlockPointerLess::operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const -{ - return pLhsBlockInfo->m_pBlock < pRhsBlock; -} -bool VmaDefragmentationAlgorithm_Generic::BlockPointerLess::operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const -{ - return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock; -} - -bool VmaDefragmentationAlgorithm_Generic::BlockInfoCompareMoveDestination::operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const -{ - if (pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations) - { - return true; - } - if (!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations) - { - return false; - } - if (pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize()) - { - return true; - } - return false; -} - -bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense( - size_t dstBlockIndex, VkDeviceSize dstOffset, - size_t srcBlockIndex, VkDeviceSize srcOffset) -{ - if (dstBlockIndex < srcBlockIndex) - { - return true; - } - if (dstBlockIndex > srcBlockIndex) - { - return false; + break; } - if (dstOffset < srcOffset) - { - return true; } - return false; } -size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() { - size_t result = 0; - for (size_t i = 0; i < m_Blocks.size(); ++i) + if (m_AlgorithmState) { - if (m_Blocks[i]->m_HasNonMovableAllocations) + switch (m_Algorithm) { - ++result; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount); + break; + default: + VMA_ASSERT(0); } } - return result; } -VkResult VmaDefragmentationAlgorithm_Generic::Defragment( - VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove, - VmaDefragmentationFlags flags) +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) { - if (!m_AllAllocations && m_AllocationCount == 0) + if (m_PoolBlockVector != VMA_NULL) { - return VK_SUCCESS; + if (m_PoolBlockVector->GetBlockCount() > 1) + ComputeDefragmentation(*m_PoolBlockVector, 0); + else if (m_PoolBlockVector->GetBlockCount() == 1) + ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); } - - const size_t blockCount = m_Blocks.size(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + else { - BlockInfo* pBlockInfo = m_Blocks[blockIndex]; - - if (m_AllAllocations) + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) { - VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata; - VMA_ASSERT(!pMetadata->IsVirtual()); - for (VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin(); - it != pMetadata->m_Suballocations.end(); - ++it) + if (m_pBlockVectors[i] != VMA_NULL) { - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + if (m_pBlockVectors[i]->GetBlockCount() > 1) { - AllocationInfo allocInfo = AllocationInfo((VmaAllocation)it->userData, VMA_NULL); - pBlockInfo->m_Allocations.push_back(allocInfo); + if (ComputeDefragmentation(*m_pBlockVectors[i], i)) + break; + } + else if (m_pBlockVectors[i]->GetBlockCount() == 1) + { + if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) + break; } } } - - pBlockInfo->CalcHasNonMovableAllocations(); - - // This is a choice based on research. - // Option 1: - pBlockInfo->SortAllocationsByOffsetDescending(); - // Option 2: - //pBlockInfo->SortAllocationsBySizeDescending(); } - // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks. - VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination()); - - // This is a choice based on research. - const uint32_t roundCount = 2; - - // Execute defragmentation rounds (the main part). - VkResult result = VK_SUCCESS; - for (uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round) + moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size()); + if (moveInfo.moveCount > 0) { - result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)); + moveInfo.pMoves = m_Moves.data(); + return VK_INCOMPLETE; } - return result; -} -#endif // _VMA_DEFRAGMENTATION_ALGORITHM_GENERIC_FUNCTIONS - -#ifndef _VMA_DEFRAGMENTATION_ALGORITHM_FAST_FUNCTIONS -VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast( - VmaAllocator hAllocator, - VmaBlockVector* pBlockVector, - bool overlappingMoveSupported) - : VmaDefragmentationAlgorithm(hAllocator, pBlockVector), - m_OverlappingMoveSupported(overlappingMoveSupported), - m_AllocationCount(0), - m_AllAllocations(false), - m_BytesMoved(0), - m_AllocationsMoved(0), - m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks())) -{ - VMA_ASSERT(VMA_DEBUG_MARGIN == 0); + moveInfo.pMoves = VMA_NULL; + return VK_SUCCESS; } -VkResult VmaDefragmentationAlgorithm_Fast::Defragment( - VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>>& moves, - VkDeviceSize maxBytesToMove, - uint32_t maxAllocationsToMove, - VmaDefragmentationFlags flags) +VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) { - VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount); - - const size_t blockCount = m_pBlockVector->GetBlockCount(); - if (blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0) - { - return VK_SUCCESS; - } - - PreprocessMetadata(); - - // Sort blocks in order from most destination. + VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); - m_BlockInfos.resize(blockCount); - for (size_t i = 0; i < blockCount; ++i) + VkResult result = VK_SUCCESS; + VmaVector<ImmovableBlock, VmaStlAllocator<ImmovableBlock>> immovableBlocks(VmaStlAllocator<ImmovableBlock>(m_MoveAllocator.m_pCallbacks)); + for (uint32_t i = 0; i < moveInfo.moveCount; ++i) { - m_BlockInfos[i].origBlockIndex = i; - } - - VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool { - return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() < - m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize(); - }); + VmaDefragmentationMove& move = moveInfo.pMoves[i]; + size_t prevCount = 0, currentCount = 0; + VkDeviceSize freedBlockSize = 0; - // THE MAIN ALGORITHM - - FreeSpaceDatabase freeSpaceDb; + uint32_t vectorIndex; + VmaBlockVector* vector; + if (m_PoolBlockVector != VMA_NULL) + { + vectorIndex = 0; + vector = m_PoolBlockVector; + } + else + { + vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); + vector = m_pBlockVectors[vectorIndex]; + VMA_ASSERT(vector != VMA_NULL); + } - size_t dstBlockInfoIndex = 0; - size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; - VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); - VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; - VkDeviceSize dstBlockSize = pDstMetadata->GetSize(); - VkDeviceSize dstOffset = 0; + VmaAllocation dst = reinterpret_cast<VmaAllocation>(move.internalData); + switch (move.operation) + { + case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: + { + move.srcAllocation->SwapBlockAllocation(dst); + prevCount = vector->GetBlockCount(); + freedBlockSize = dst->GetBlock()->m_pMetadata->GetSize(); + vector->Free(dst, false); + currentCount = vector->GetBlockCount(); - bool end = false; - for (size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex) - { - const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex; - VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex); - VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata; - for (VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin(); - !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); ) + result = VK_INCOMPLETE; + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: { - VmaAllocation const pAlloc = (VmaAllocation)srcSuballocIt->userData; - const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment(); - const VkDeviceSize srcAllocSize = srcSuballocIt->size; - if (m_AllocationsMoved == maxAllocationsToMove || - m_BytesMoved + srcAllocSize > maxBytesToMove) - { - end = true; - break; - } - const VkDeviceSize srcAllocOffset = srcSuballocIt->offset; - - VmaDefragmentationMove move = {}; - // Try to place it in one of free spaces from the database. - size_t freeSpaceInfoIndex; - VkDeviceSize dstAllocOffset; - if (freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize, - freeSpaceInfoIndex, dstAllocOffset)) - { - size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex; - VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex); - VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata; + m_Stats.bytesMoved -= move.srcAllocation->GetSize(); + vector->Free(dst, false); - // Same block - if (freeSpaceInfoIndex == srcBlockInfoIndex) + VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (const ImmovableBlock& block : immovableBlocks) + { + if (block.block == newBlock) { - VMA_ASSERT(dstAllocOffset <= srcAllocOffset); - - // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. - - VmaSuballocation suballoc = *srcSuballocIt; - suballoc.offset = dstAllocOffset; - ((VmaAllocation)(suballoc.userData))->ChangeAllocHandle((VmaAllocHandle)(dstAllocOffset + 1)); - m_BytesMoved += srcAllocSize; - ++m_AllocationsMoved; - - VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; - ++nextSuballocIt; - pSrcMetadata->m_Suballocations.erase(srcSuballocIt); - srcSuballocIt = nextSuballocIt; - - InsertSuballoc(pFreeSpaceMetadata, suballoc); - - move.srcBlockIndex = srcOrigBlockIndex; - move.dstBlockIndex = freeSpaceOrigBlockIndex; - move.srcOffset = srcAllocOffset; - move.dstOffset = dstAllocOffset; - move.dstHandle = (VmaAllocHandle)(dstAllocOffset + 1); - move.size = srcAllocSize; - - moves.push_back(move); + notPresent = false; + break; } - // Different block - else - { - // MOVE OPTION 2: Move the allocation to a different block. - - VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex); - - VmaSuballocation suballoc = *srcSuballocIt; - suballoc.offset = dstAllocOffset; - ((VmaAllocation)(suballoc.userData))->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, (VmaAllocHandle)(dstAllocOffset + 1)); - m_BytesMoved += srcAllocSize; - ++m_AllocationsMoved; + } + if (notPresent) + immovableBlocks.push_back({ vectorIndex, newBlock }); + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: + { + prevCount = vector->GetBlockCount(); + freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); + vector->Free(move.srcAllocation, false); + currentCount = vector->GetBlockCount(); + freedBlockSize *= prevCount - currentCount; - VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; - ++nextSuballocIt; - pSrcMetadata->m_Suballocations.erase(srcSuballocIt); - srcSuballocIt = nextSuballocIt; + VkDeviceSize dstBlockSize = dst->GetBlock()->m_pMetadata->GetSize(); + vector->Free(dst, false); + freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); + currentCount = vector->GetBlockCount(); - InsertSuballoc(pFreeSpaceMetadata, suballoc); + result = VK_INCOMPLETE; + break; + } + default: + VMA_ASSERT(0); + } - move.srcBlockIndex = srcOrigBlockIndex; - move.dstBlockIndex = freeSpaceOrigBlockIndex; - move.srcOffset = srcAllocOffset; - move.dstOffset = dstAllocOffset; - move.dstHandle = (VmaAllocHandle)(dstAllocOffset + 1); - move.size = srcAllocSize; + if (prevCount > currentCount) + { + size_t freedBlocks = prevCount - currentCount; + m_Stats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks); + m_Stats.bytesFreed += freedBlockSize; + } - moves.push_back(move); - } - } - else + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + { + if (m_AlgorithmState != VMA_NULL) { - dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment); - - // If the allocation doesn't fit before the end of dstBlock, forward to next block. - while (dstBlockInfoIndex < srcBlockInfoIndex && - dstAllocOffset + srcAllocSize > dstBlockSize) + // Avoid unnecessary tries to allocate when new free block is avaiable + StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex]; + if (state.firstFreeBlock != SIZE_MAX) { - // But before that, register remaining free space at the end of dst block. - freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset); - - ++dstBlockInfoIndex; - dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; - pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); - pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; - dstBlockSize = pDstMetadata->GetSize(); - dstOffset = 0; - dstAllocOffset = 0; + state.firstFreeBlock -= prevCount - currentCount; + if (state.firstFreeBlock != 0) + state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); } + } + } + } + } + moveInfo.moveCount = 0; + moveInfo.pMoves = VMA_NULL; + m_Moves.clear(); - // Same block - if (dstBlockInfoIndex == srcBlockInfoIndex) + // Move blocks with immovable allocations according to algorithm + if (immovableBlocks.size() > 0) + { + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + { + if (m_AlgorithmState != VMA_NULL) + { + bool swapped = false; + // Move to the start of free blocks range + for (const ImmovableBlock& block : immovableBlocks) { - VMA_ASSERT(dstAllocOffset <= srcAllocOffset); - - const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset; - - bool skipOver = overlap; - if (overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset) - { - // If destination and source place overlap, skip if it would move it - // by only < 1/64 of its size. - skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize; - } - - if (skipOver) - { - freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset); - - dstOffset = srcAllocOffset + srcAllocSize; - ++srcSuballocIt; - } - // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. - else + StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.vectorIndex]; + if (state.operation != StateExtensive::Operation::Cleanup) { - srcSuballocIt->offset = dstAllocOffset; - ((VmaAllocation)(srcSuballocIt->userData))->ChangeAllocHandle((VmaAllocHandle)(dstAllocOffset + 1)); - dstOffset = dstAllocOffset + srcAllocSize; - m_BytesMoved += srcAllocSize; - ++m_AllocationsMoved; - ++srcSuballocIt; - - move.srcBlockIndex = srcOrigBlockIndex; - move.dstBlockIndex = dstOrigBlockIndex; - move.srcOffset = srcAllocOffset; - move.dstOffset = dstAllocOffset; - move.dstHandle = (VmaAllocHandle)(dstAllocOffset + 1); - move.size = srcAllocSize; - - moves.push_back(move); + VmaBlockVector* vector = m_pBlockVectors[block.vectorIndex]; + for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) + { + if (vector->GetBlock(i) == block.block) + { + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); + if (state.firstFreeBlock != SIZE_MAX) + { + if (i < state.firstFreeBlock - 1) + { + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + } + } + swapped = true; + break; + } + } } } - // Different block - else + if (swapped) + result = VK_INCOMPLETE; + break; + } + } + default: + { + // Move to the begining + for (const ImmovableBlock& block : immovableBlocks) + { + VmaBlockVector* vector = m_pBlockVectors[block.vectorIndex]; + for (size_t i = m_ImmovableBlockCount; vector->GetBlockCount(); ++i) { - // MOVE OPTION 2: Move the allocation to a different block. - - VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex); - VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize); - - VmaSuballocation suballoc = *srcSuballocIt; - suballoc.offset = dstAllocOffset; - ((VmaAllocation)(suballoc.userData))->ChangeBlockAllocation(m_hAllocator, pDstBlock, (VmaAllocHandle)(dstAllocOffset + 1)); - dstOffset = dstAllocOffset + srcAllocSize; - m_BytesMoved += srcAllocSize; - ++m_AllocationsMoved; - - VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; - ++nextSuballocIt; - pSrcMetadata->m_Suballocations.erase(srcSuballocIt); - srcSuballocIt = nextSuballocIt; - - pDstMetadata->m_Suballocations.push_back(suballoc); - - move.srcBlockIndex = srcOrigBlockIndex; - move.dstBlockIndex = dstOrigBlockIndex; - move.srcOffset = srcAllocOffset; - move.dstOffset = dstAllocOffset; - move.dstHandle = (VmaAllocHandle)(dstAllocOffset + 1); - move.size = srcAllocSize; - - moves.push_back(move); + if (vector->GetBlock(i) == block.block) + { + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); + break; + } } } + break; + } } } - - m_BlockInfos.clear(); - - PostprocessMetadata(); - - return VK_SUCCESS; + return result; } -VmaDefragmentationAlgorithm_Fast::FreeSpaceDatabase::FreeSpaceDatabase() +bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) { - FreeSpace s = {}; - s.blockInfoIndex = SIZE_MAX; - for (size_t i = 0; i < MAX_COUNT; ++i) + switch (m_Algorithm) { - m_FreeSpaces[i] = s; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: + return ComputeDefragmentation_Fast(vector); + default: // Default algoritm + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + return ComputeDefragmentation_Balanced(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: + return ComputeDefragmentation_Full(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + return ComputeDefragmentation_Extensive(vector, index); } } -void VmaDefragmentationAlgorithm_Fast::FreeSpaceDatabase::Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size) +VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( + VmaAllocHandle handle, VmaBlockMetadata* metadata) { - // Find first invalid or the smallest structure. - size_t bestIndex = SIZE_MAX; - for (size_t i = 0; i < MAX_COUNT; ++i) - { - // Empty structure. - if (m_FreeSpaces[i].blockInfoIndex == SIZE_MAX) - { - bestIndex = i; - break; - } - if (m_FreeSpaces[i].size < size && - (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size)) - { - bestIndex = i; - } - } + MoveAllocationData moveData; + moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); + moveData.size = moveData.move.srcAllocation->GetSize(); + moveData.alignment = moveData.move.srcAllocation->GetAlignment(); + moveData.type = moveData.move.srcAllocation->GetSuballocationType(); + moveData.flags = 0; - if (bestIndex != SIZE_MAX) - { - m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex; - m_FreeSpaces[bestIndex].offset = offset; - m_FreeSpaces[bestIndex].size = size; - } + if (moveData.move.srcAllocation->IsPersistentMap()) + moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; + if (moveData.move.srcAllocation->IsMappingAllowed()) + moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + + return moveData; } -bool VmaDefragmentationAlgorithm_Fast::FreeSpaceDatabase::Fetch(VkDeviceSize alignment, VkDeviceSize size, - size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset) +bool VmaDefragmentationContext_T::IncrementCounters(uint32_t& allocations, VkDeviceSize bytes) { - size_t bestIndex = SIZE_MAX; - VkDeviceSize bestFreeSpaceAfter = 0; - for (size_t i = 0; i < MAX_COUNT; ++i) + if (++allocations >= m_MaxPassAllocations || bytes >= m_MaxPassBytes) { - // Structure is valid. - if (m_FreeSpaces[i].blockInfoIndex != SIZE_MAX) - { - const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment); - // Allocation fits into this structure. - if (dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size) - { - const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) - - (dstOffset + size); - if (bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter) - { - bestIndex = i; - bestFreeSpaceAfter = freeSpaceAfter; - } - } - } - } - - if (bestIndex != SIZE_MAX) - { - outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex; - outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment); - - // Leave this structure for remaining empty space. - const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size; - m_FreeSpaces[bestIndex].offset += alignmentPlusSize; - m_FreeSpaces[bestIndex].size -= alignmentPlusSize; - + m_Stats.bytesMoved += bytes; + m_Stats.allocationsMoved += allocations; return true; } - return false; } -void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata() +bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) { - const size_t blockCount = m_pBlockVector->GetBlockCount(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) - { - VmaBlockMetadata_Generic* const pMetadata = - (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; - pMetadata->m_FreeCount = 0; - pMetadata->m_SumFreeSize = pMetadata->GetSize(); - pMetadata->m_FreeSuballocationsBySize.clear(); - for (VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); - it != pMetadata->m_Suballocations.end(); ) - { - if (it->type == VMA_SUBALLOCATION_TYPE_FREE) - { - VmaSuballocationList::iterator nextIt = it; - ++nextIt; - pMetadata->m_Suballocations.erase(it); - it = nextIt; - } - else - { - ++it; - } - } - } -} + VkDeviceSize currentBytesMoved = 0; + uint32_t currentAllocsMoved = 0; + VmaBlockMetadata* metadata = block->m_pMetadata; -void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() -{ - const size_t blockCount = m_pBlockVector->GetBlockCount(); - for (size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { - VmaBlockMetadata_Generic* const pMetadata = - (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; - const VkDeviceSize blockSize = pMetadata->GetSize(); - - // No allocations in this block - entire area is free. - if (pMetadata->m_Suballocations.empty()) - { - pMetadata->m_FreeCount = 1; - //pMetadata->m_SumFreeSize is already set to blockSize. - VmaSuballocation suballoc = { - 0, // offset - blockSize, // size - VMA_NULL, // hAllocation - VMA_SUBALLOCATION_TYPE_FREE }; - pMetadata->m_Suballocations.push_back(suballoc); - pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin()); - } - // There are some allocations in this block. - else - { - VkDeviceSize offset = 0; - VmaSuballocationList::iterator it; - for (it = pMetadata->m_Suballocations.begin(); - it != pMetadata->m_Suballocations.end(); - ++it) + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(moveData.move.internalData); + + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) { - VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(it->offset >= offset); - - // Need to insert preceding free space. - if (it->offset > offset) + if (metadata->GetAllocationOffset(request.allocHandle) < offset) { - ++pMetadata->m_FreeCount; - const VkDeviceSize freeSize = it->offset - offset; - VmaSuballocation suballoc = { - offset, // offset - freeSize, // size - VMA_NULL, // hAllocation - VMA_SUBALLOCATION_TYPE_FREE }; - VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); - pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt); - } - - pMetadata->m_SumFreeSize -= it->size; - offset = it->offset + it->size; - } + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &dst) == VK_SUCCESS) + { + moveData.move.dstMemory = dst->GetMemory(); + moveData.move.dstOffset = dst->GetOffset(); + m_Moves.push_back(moveData.move); + currentBytesMoved += moveData.size; - // Need to insert trailing free space. - if (offset < blockSize) - { - ++pMetadata->m_FreeCount; - const VkDeviceSize freeSize = blockSize - offset; - VmaSuballocation suballoc = { - offset, // offset - freeSize, // size - VMA_NULL, // hAllocation - VMA_SUBALLOCATION_TYPE_FREE }; - VMA_ASSERT(it == pMetadata->m_Suballocations.end()); - VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); - pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt); + if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) + return true; + } + } } - - VMA_SORT( - pMetadata->m_FreeSuballocationsBySize.begin(), - pMetadata->m_FreeSuballocationsBySize.end(), - VmaSuballocationItemSizeLess()); } - - VMA_HEAVY_ASSERT(pMetadata->Validate()); } + + m_Stats.bytesMoved += currentBytesMoved; + m_Stats.allocationsMoved += currentAllocsMoved; + return false; } -void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc) +bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) { - VmaSuballocationList& suballocs = pMetadata->m_Suballocations; - VmaSuballocationList::iterator elementAfter; - const VkDeviceSize last = suballocs.rbegin()->offset; - const VkDeviceSize first = suballocs.begin()->offset; + VkDeviceSize currentBytesMoved = 0; + uint32_t currentAllocsMoved = 0; + VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(data.move.internalData); - if (last <= suballoc.offset) - elementAfter = suballocs.end(); - else if (first >= suballoc.offset) - elementAfter = suballocs.begin(); - else + for (; start < end; ++start) { - const size_t suballocCount = suballocs.size(); - const VkDeviceSize step = (last - first + suballocs.begin()->size) / suballocCount; - // If offset to be inserted is closer to the end of range, search from the end - if ((suballoc.offset - first) / step > suballocCount / 2) + VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); + if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) { - elementAfter = suballocs.begin(); - for (VmaSuballocationList::reverse_iterator suballocItem = ++suballocs.rbegin(); - suballocItem != suballocs.rend(); - ++suballocItem) - { - if (suballocItem->offset <= suballoc.offset) - { - elementAfter = --suballocItem; - break; - } - } - } - else - { - elementAfter = suballocs.end(); - for (VmaSuballocationList::iterator suballocItem = ++suballocs.begin(); - suballocItem != suballocs.end(); - ++suballocItem) + if (vector.AllocateFromBlock(dstBlock, + data.size, + data.alignment, + data.flags, + this, + data.type, + 0, + &dst) == VK_SUCCESS) { - if (suballocItem->offset >= suballoc.offset) - { - elementAfter = suballocItem; - break; - } + data.move.dstMemory = dst->GetMemory(); + data.move.dstOffset = dst->GetOffset(); + m_Moves.push_back(data.move); + currentBytesMoved += data.size; + + if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) + return true; + break; } } } - pMetadata->m_Suballocations.insert(elementAfter, suballoc); -} -#endif // _VMA_DEFRAGMENTATION_ALGORITHM_FAST_FUNCTIONS -#ifndef _VMA_BLOCK_VECTOR_DEFRAGMENTATION_CONTEXT_FUNCTIONS -VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext( - VmaAllocator hAllocator, - VmaPool hCustomPool, - VmaBlockVector* pBlockVector) - : res(VK_SUCCESS), - mutexLocked(false), - blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())), - defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())), - defragmentationMovesProcessed(0), - defragmentationMovesCommitted(0), - hasDefragmentationPlan(0), - m_hAllocator(hAllocator), - m_hCustomPool(hCustomPool), - m_pBlockVector(pBlockVector), - m_pAlgorithm(VMA_NULL), - m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())), - m_AllAllocations(false) {} - -VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() -{ - vma_delete(m_hAllocator, m_pAlgorithm); -} - -void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) -{ - AllocInfo info = { hAlloc, pChanged }; - m_Allocations.push_back(info); + m_Stats.bytesMoved += currentBytesMoved; + m_Stats.allocationsMoved += currentAllocsMoved; + return false; } -void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) { - const bool allAllocations = m_AllAllocations || - m_Allocations.size() == m_pBlockVector->CalcAllocationCount(); + // Move only between blocks - /******************************** - HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM. - ********************************/ - - /* - Fast algorithm is supported only when certain criteria are met: - - VMA_DEBUG_MARGIN is 0. - - All allocations in this block vector are movable. - - There is no possibility of image/buffer granularity conflict. - - The defragmentation is not incremental - */ - if (VMA_DEBUG_MARGIN == 0 && - allAllocations && - !m_pBlockVector->IsBufferImageGranularityConflictPossible() && - !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)) + // Go through allocations in last blocks and try to fit them inside first ones + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { - m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)( - m_hAllocator, m_pBlockVector, overlappingMoveSupported); - } - else - { - m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)( - m_hAllocator, m_pBlockVector, overlappingMoveSupported); - } + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; - if (allAllocations) - { - m_pAlgorithm->AddAll(); - } - else - { - for (size_t i = 0, count = m_Allocations.size(); i < count; ++i) + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { - m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged); + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; } } + return false; } -#endif // _VMA_BLOCK_VECTOR_DEFRAGMENTATION_CONTEXT_FUNCTIONS -#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS -VmaDefragmentationContext_T::VmaDefragmentationContext_T( - VmaAllocator hAllocator, - uint32_t flags, - VmaDefragmentationStats* pStats) - : m_hAllocator(hAllocator), - m_Flags(flags), - m_pStats(pStats), - m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks())) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector) { - memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts)); -} + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0), + // but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block) -VmaDefragmentationContext_T::~VmaDefragmentationContext_T() -{ - for (size_t i = m_CustomPoolContexts.size(); i--; ) - { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i]; - pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats); - vma_delete(m_hAllocator, pBlockVectorCtx); - } - for (size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) - { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i]; - if (pBlockVectorCtx) - { - pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats); - vma_delete(m_hAllocator, pBlockVectorCtx); - } - } -} + VkDeviceSize currentBytesMoved = 0; + uint32_t currentAllocsMoved = 0; -void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools) -{ - for (uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { - VmaPool pool = pPools[poolIndex]; - VMA_ASSERT(pool); - for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex) + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { - if(pool->m_pBlockVectors[memTypeIndex]) + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { - // Pools with algorithm other than default are not defragmented. - if (pool->m_pBlockVectors[memTypeIndex]->GetAlgorithm() == 0) + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) { - VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; - - for (size_t i = m_CustomPoolContexts.size(); i--; ) + if (metadata->GetAllocationOffset(request.allocHandle) < offset) { - if (m_CustomPoolContexts[i]->GetCustomPool() == pool) + VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(moveData.move.internalData); + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &dst) == VK_SUCCESS) { - pBlockVectorDefragCtx = m_CustomPoolContexts[i]; - break; - } - } + moveData.move.dstMemory = dst->GetMemory(); + moveData.move.dstOffset = dst->GetOffset(); + m_Moves.push_back(moveData.move); + currentBytesMoved += moveData.size; - if (!pBlockVectorDefragCtx) - { - pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( - m_hAllocator, - pool, - pool->m_pBlockVectors[memTypeIndex]); - m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); + if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) + return true; + } } - - pBlockVectorDefragCtx->AddAll(); } } } } + + m_Stats.bytesMoved += currentBytesMoved; + m_Stats.allocationsMoved += currentAllocsMoved; + return false; } -void VmaDefragmentationContext_T::AddAllocations( - uint32_t allocationCount, - const VmaAllocation* pAllocations, - VkBool32* pAllocationsChanged) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) { - // Dispatch pAllocations among defragmentators. Create them when necessary. - for (uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0) + + VkDeviceSize currentBytesMoved = 0; + uint32_t currentAllocsMoved = 0; + + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { - const VmaAllocation hAlloc = pAllocations[allocIndex]; - VMA_ASSERT(hAlloc); - const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); - // DedicatedAlloc cannot be defragmented. - if (hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { - VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; - const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool(); - // This allocation belongs to custom pool. - if (hAllocPool != VK_NULL_HANDLE) + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { - // Pools with algorithm other than default are not defragmented. - if (hAllocPool->m_pBlockVectors[memTypeIndex]->GetAlgorithm() == 0) + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) { - for (size_t i = m_CustomPoolContexts.size(); i--; ) + if (metadata->GetAllocationOffset(request.allocHandle) < offset) { - if (m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool) + VmaAllocation& dst = reinterpret_cast<VmaAllocation&>(moveData.move.internalData); + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &dst) == VK_SUCCESS) { - pBlockVectorDefragCtx = m_CustomPoolContexts[i]; - break; + moveData.move.dstMemory = dst->GetMemory(); + moveData.move.dstOffset = dst->GetOffset(); + m_Moves.push_back(moveData.move); + currentBytesMoved += moveData.size; + + if (IncrementCounters(currentAllocsMoved, currentBytesMoved)) + return true; } } - if (!pBlockVectorDefragCtx) - { - pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( - m_hAllocator, - hAllocPool, - hAllocPool->m_pBlockVectors[memTypeIndex]); - m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); - } } } - // This allocation belongs to default pool. - else - { - pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex]; - if (!pBlockVectorDefragCtx) - { - VMA_ASSERT(m_hAllocator->m_pBlockVectors[memTypeIndex] && "Trying to use unsupported memory type!"); - - pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( - m_hAllocator, - VMA_NULL, // hCustomPool - m_hAllocator->m_pBlockVectors[memTypeIndex]); - m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx; - } - } - - if (pBlockVectorDefragCtx) - { - VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ? - &pAllocationsChanged[allocIndex] : VMA_NULL; - pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged); - } } } + + m_Stats.bytesMoved += currentBytesMoved; + m_Stats.allocationsMoved += currentAllocsMoved; + return false; } -VkResult VmaDefragmentationContext_T::Defragment( - VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, - VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, - VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) { - if (pStats) - { - memset(pStats, 0, sizeof(VmaDefragmentationStats)); - } + // First free single block, then populate it to the brim, then free another block, and so on - if (flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) - { - // For incremental defragmetnations, we just earmark how much we can move - // The real meat is in the defragmentation steps - m_MaxCpuBytesToMove = maxCpuBytesToMove; - m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove; + // Fallback to previous algorithm since without granularity conflicts it can achieve max packing + if (vector.m_BufferImageGranularity == 1) + return ComputeDefragmentation_Full(vector); - m_MaxGpuBytesToMove = maxGpuBytesToMove; - m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove; + VMA_ASSERT(m_AlgorithmState != VMA_NULL); - if (m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 && - m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0) - return VK_SUCCESS; + StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index]; - return VK_NOT_READY; - } - - if (commandBuffer == VK_NULL_HANDLE) + bool texturePresent = false, bufferPresent = false, otherPresent = false; + switch (vectorState.operation) { - maxGpuBytesToMove = 0; - maxGpuAllocationsToMove = 0; - } + case StateExtensive::Operation::Done: // Vector defragmented + return false; + case StateExtensive::Operation::FindFreeBlockBuffer: + case StateExtensive::Operation::FindFreeBlockTexture: + case StateExtensive::Operation::FindFreeBlockAll: + { + // No free blocks, have to clear last one + size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; + VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; - VkResult res = VK_SUCCESS; + const size_t prevMoveCount = m_Moves.size(); + for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = freeMetadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, freeMetadata); - // Process default pools. - for (uint32_t memTypeIndex = 0; - memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS; - ++memTypeIndex) - { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; - if (pBlockVectorCtx) - { - VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); - pBlockVectorCtx->GetBlockVector()->Defragment( - pBlockVectorCtx, - pStats, flags, - maxCpuBytesToMove, maxCpuAllocationsToMove, - maxGpuBytesToMove, maxGpuAllocationsToMove, - commandBuffer); - if (pBlockVectorCtx->res != VK_SUCCESS) + // Check all previous blocks for free space + if (AllocInOtherBlock(0, last, moveData, vector)) { - res = pBlockVectorCtx->res; + // Full clear performed already + if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) + reinterpret_cast<size_t*>(m_AlgorithmState)[index] = last; + return true; } } - } - // Process custom pools. - for (size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); - customCtxIndex < customCtxCount && res >= VK_SUCCESS; - ++customCtxIndex) - { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; - VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); - pBlockVectorCtx->GetBlockVector()->Defragment( - pBlockVectorCtx, - pStats, flags, - maxCpuBytesToMove, maxCpuAllocationsToMove, - maxGpuBytesToMove, maxGpuAllocationsToMove, - commandBuffer); - if (pBlockVectorCtx->res != VK_SUCCESS) + if (prevMoveCount == m_Moves.size()) { - res = pBlockVectorCtx->res; + // Cannot perform full clear, have to move data in other blocks around + if (last != 0) + { + for (size_t i = last - 1; i; --i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // No possible reallocs within blocks, try to move them around fast + return ComputeDefragmentation_Fast(vector); + } + } + else + { + switch (vectorState.operation) + { + case StateExtensive::Operation::FindFreeBlockBuffer: + vectorState.operation = StateExtensive::Operation::MoveBuffers; + break; + default: + VMA_ASSERT(0); + case StateExtensive::Operation::FindFreeBlockTexture: + vectorState.operation = StateExtensive::Operation::MoveTextures; + break; + case StateExtensive::Operation::FindFreeBlockAll: + vectorState.operation = StateExtensive::Operation::MoveAll; + break; + } + vectorState.firstFreeBlock = last; + // Nothing done, block found without reallocations, can perform another reallocs in same pass + if (prevMoveCount == m_Moves.size()) + return ComputeDefragmentation_Extensive(vector, index); } + break; } - - return res; -} - -VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo) -{ - VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves; - uint32_t movesLeft = pInfo->moveCount; - - // Process default pools. - for (uint32_t memTypeIndex = 0; - memTypeIndex < m_hAllocator->GetMemoryTypeCount(); - ++memTypeIndex) + case StateExtensive::Operation::MoveTextures: { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; - if (pBlockVectorCtx) + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { - VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); - - if (!pBlockVectorCtx->hasDefragmentationPlan) + if (texturePresent) { - pBlockVectorCtx->GetBlockVector()->Defragment( - pBlockVectorCtx, - m_pStats, m_Flags, - m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, - m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, - VK_NULL_HANDLE); - - if (pBlockVectorCtx->res < VK_SUCCESS) - continue; - - pBlockVectorCtx->hasDefragmentationPlan = true; + vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; + return ComputeDefragmentation_Extensive(vector, index); } - const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( - pBlockVectorCtx, - pCurrentMove, movesLeft); + if (!bufferPresent && !otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } - movesLeft -= processed; - pCurrentMove += processed; + // No more textures to move, check buffers + vectorState.operation = StateExtensive::Operation::MoveBuffers; + bufferPresent = false; + otherPresent = false; } + else + break; } - - // Process custom pools. - for (size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); - customCtxIndex < customCtxCount; - ++customCtxIndex) + case StateExtensive::Operation::MoveBuffers: { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; - VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); - - if (!pBlockVectorCtx->hasDefragmentationPlan) + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { - pBlockVectorCtx->GetBlockVector()->Defragment( - pBlockVectorCtx, - m_pStats, m_Flags, - m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, - m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, - VK_NULL_HANDLE); + if (bufferPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } - if (pBlockVectorCtx->res < VK_SUCCESS) - continue; + if (!otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } - pBlockVectorCtx->hasDefragmentationPlan = true; + // No more buffers to move, check all others + vectorState.operation = StateExtensive::Operation::MoveAll; + otherPresent = false; } - - const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( - pBlockVectorCtx, - pCurrentMove, movesLeft); - - movesLeft -= processed; - pCurrentMove += processed; + else + break; } - - pInfo->moveCount = pInfo->moveCount - movesLeft; - - return VK_SUCCESS; -} - -VkResult VmaDefragmentationContext_T::DefragmentPassEnd() -{ - VkResult res = VK_SUCCESS; - - // Process default pools. - for (uint32_t memTypeIndex = 0; - memTypeIndex < m_hAllocator->GetMemoryTypeCount(); - ++memTypeIndex) + case StateExtensive::Operation::MoveAll: { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; - if (pBlockVectorCtx) + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { - VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); - - if (!pBlockVectorCtx->hasDefragmentationPlan) + if (otherPresent) { - res = VK_NOT_READY; - continue; + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); } + // Everything moved + vectorState.operation = StateExtensive::Operation::Cleanup; + } + break; + } + } - pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( - pBlockVectorCtx, m_pStats); - - if (pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) - res = VK_NOT_READY; + if (vectorState.operation == StateExtensive::Operation::Cleanup) + { + // All other work done, pack data in blocks even tighter if possible + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; } + + if (prevMoveCount == m_Moves.size()) + vectorState.operation = StateExtensive::Operation::Done; } + return false; +} - // Process custom pools. - for (size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); - customCtxIndex < customCtxCount; - ++customCtxIndex) +bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent) +{ + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = firstFreeBlock ; i;) { - VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; - VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + VmaDeviceMemoryBlock* block = vector.GetBlock(--i); + VmaBlockMetadata* metadata = block->m_pMetadata; - if (!pBlockVectorCtx->hasDefragmentationPlan) + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { - res = VK_NOT_READY; - continue; - } + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; - pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( - pBlockVectorCtx, m_pStats); + // Move only single type of resources at once + if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) + { + // Try to fit allocation into free blocks + if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) + return false; + } - if (pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) - res = VK_NOT_READY; + if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) + texturePresent = true; + else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) + bufferPresent = true; + else + otherPresent = true; + } } - - return res; + return prevMoveCount == m_Moves.size(); } #endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS #ifndef _VMA_POOL_T_FUNCTIONS VmaPool_T::VmaPool_T( VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo) : - m_hAllocator(hAllocator), - m_pBlockVectors{}, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) + : m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm + createInfo.priority, + VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), + createInfo.pMemoryAllocateNext), m_Id(0), - m_Name(VMA_NULL) -{ - for(uint32_t memTypeIndex = 0; memTypeIndex < hAllocator->GetMemoryTypeCount(); ++memTypeIndex) - { - // Create only supported types - if((hAllocator->GetGlobalMemoryTypeBits() & (1u << memTypeIndex)) != 0) - { - m_pBlockVectors[memTypeIndex] = vma_new(hAllocator, VmaBlockVector)( - hAllocator, - this, // hParentPool - memTypeIndex, - createInfo.blockSize != 0 ? createInfo.blockSize : hAllocator->CalcPreferredBlockSize(memTypeIndex), - createInfo.minBlockCount, - createInfo.maxBlockCount, - (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), - false, // explicitBlockSize - createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm - createInfo.priority, - VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(memTypeIndex), createInfo.minAllocationAlignment), - createInfo.pMemoryAllocateNext); - } - } -} + m_Name(VMA_NULL) {} VmaPool_T::~VmaPool_T() { VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); - for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex) - { - vma_delete(m_hAllocator, m_pBlockVectors[memTypeIndex]); - } } void VmaPool_T::SetName(const char* pName) { - for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex) - { - if(m_pBlockVectors[memTypeIndex]) - { - const VkAllocationCallbacks* allocs = m_pBlockVectors[memTypeIndex]->GetAllocator()->GetAllocationCallbacks(); - VmaFreeString(allocs, m_Name); + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); - if (pName != VMA_NULL) - { - m_Name = VmaCreateStringCopy(allocs, pName); - } - else - { - m_Name = VMA_NULL; - } - } + if (pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; } } #endif // _VMA_POOL_T_FUNCTIONS @@ -14784,6 +13980,14 @@ void VmaAllocator_T::ImportVulkanFunctions_Static() m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; } #endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; + m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; + } +#endif } #endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 @@ -14829,6 +14033,11 @@ void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVul VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); #endif +#if VMA_VULKAN_VERSION >= 1003000 + VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); +#endif + #undef VMA_COPY_IF_NOT_NULL } @@ -14902,6 +14111,14 @@ void VmaAllocator_T::ImportVulkanFunctions_Dynamic() } #endif // #if VMA_MEMORY_BUDGET +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); + } +#endif + #undef VMA_FETCH_DEVICE_FUNC #undef VMA_FETCH_INSTANCE_FUNC } @@ -14950,6 +14167,14 @@ void VmaAllocator_T::ValidateVulkanFunctions() VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); } #endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); + } +#endif } VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) @@ -14966,8 +14191,8 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( VkDeviceSize alignment, bool dedicatedPreferred, VkBuffer dedicatedBuffer, - VkBufferUsageFlags dedicatedBufferUsage, VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, const VmaAllocationCreateInfo& createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, @@ -14998,12 +14223,14 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( memTypeIndex, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, dedicatedBuffer, - dedicatedBufferUsage, dedicatedImage, + dedicatedBufferImageUsage, allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); @@ -15039,12 +14266,14 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( memTypeIndex, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, dedicatedBuffer, - dedicatedBufferUsage, dedicatedImage, + dedicatedBufferImageUsage, allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); @@ -15078,12 +14307,14 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( memTypeIndex, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, dedicatedBuffer, - dedicatedBufferUsage, dedicatedImage, + dedicatedBufferImageUsage, allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); @@ -15108,12 +14339,13 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( uint32_t memTypeIndex, bool map, bool isUserDataString, + bool isMappingAllowed, bool canAliasMemory, void* pUserData, float priority, VkBuffer dedicatedBuffer, - VkBufferUsageFlags dedicatedBufferUsage, VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, size_t allocationCount, VmaAllocation* pAllocations, const void* pNextChain) @@ -15153,8 +14385,8 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( bool canContainBufferWithDeviceAddress = true; if(dedicatedBuffer != VK_NULL_HANDLE) { - canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown - (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; + canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown + (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; } else if(dedicatedImage != VK_NULL_HANDLE) { @@ -15199,6 +14431,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( allocInfo, map, isUserDataString, + isMappingAllowed, pUserData, pAllocations + allocIndex); if(res != VK_SUCCESS) @@ -15235,7 +14468,6 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); - currAlloc->SetUserData(this, VMA_NULL); m_AllocationObjectAllocator.Free(currAlloc); } @@ -15253,6 +14485,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( const VkMemoryAllocateInfo& allocInfo, bool map, bool isUserDataString, + bool isMappingAllowed, void* pUserData, VmaAllocation* pAllocation) { @@ -15282,7 +14515,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( } } - *pAllocation = m_AllocationObjectAllocator.Allocate(isUserDataString); + *pAllocation = m_AllocationObjectAllocator.Allocate(isUserDataString, isMappingAllowed); (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); (*pAllocation)->SetUserData(this, pUserData); m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); @@ -15358,6 +14591,62 @@ void VmaAllocator_T::GetImageMemoryRequirements( } } +VkResult VmaAllocator_T::FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkFlags bufImgUsage, + uint32_t* pMemoryTypeIndex) const +{ + memoryTypeBits &= GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; + if(!FindMemoryPreferences( + IsIntegratedGpu(), + *pAllocationCreateInfo, + bufImgUsage, + requiredFlags, preferredFlags, notPreferredFlags)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) + + VmaCountBitsSet(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + VkResult VmaAllocator_T::CalcMemTypeParams( VmaAllocationCreateInfo& inoutCreateInfo, uint32_t memTypeIndex, @@ -15390,29 +14679,38 @@ VkResult VmaAllocator_T::CalcAllocationParams( bool dedicatedRequired, bool dedicatedPreferred) { + VMA_ASSERT((inoutCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && + "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); + VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || + (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && + "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) + { + VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && + "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + } + } + + // If memory is lazily allocated, it should be always dedicated. if(dedicatedRequired || - // If memory is lazily allocated, it should be always dedicated. inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; } - if(inoutCreateInfo.pool != VK_NULL_HANDLE && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + if(inoutCreateInfo.pool != VK_NULL_HANDLE) { - // Assuming here every block has the same block size and priority. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) { - if(inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]) - { - if(inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]->HasExplicitBlockSize()) - { - VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); - return VK_ERROR_FEATURE_NOT_PRESENT; - } - inoutCreateInfo.priority = inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]->GetPriority(); - break; - } + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); + return VK_ERROR_FEATURE_NOT_PRESENT; } + inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); } if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && @@ -15427,6 +14725,21 @@ VkResult VmaAllocator_T::CalcAllocationParams( { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; } + + // Non-auto USAGE values imply HOST_ACCESS flags. + // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. + // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. + // Otherwise they just protect from assert on mapping. + if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + } + } + return VK_SUCCESS; } @@ -15435,8 +14748,8 @@ VkResult VmaAllocator_T::AllocateMemory( bool requiresDedicatedAllocation, bool prefersDedicatedAllocation, VkBuffer dedicatedBuffer, - VkBufferUsageFlags dedicatedBufferUsage, VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, const VmaAllocationCreateInfo& createInfo, VmaSuballocationType suballocType, size_t allocationCount, @@ -15456,46 +14769,67 @@ VkResult VmaAllocator_T::AllocateMemory( if(res != VK_SUCCESS) return res; - // Bit mask of memory Vulkan types acceptable for this allocation. - uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; - uint32_t memTypeIndex = UINT32_MAX; - res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex); - // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. - if(res != VK_SUCCESS) - return res; - do + if(createInfoFinal.pool != VK_NULL_HANDLE) { - VmaBlockVector* blockVector = createInfoFinal.pool == VK_NULL_HANDLE ? m_pBlockVectors[memTypeIndex] : createInfoFinal.pool->m_pBlockVectors[memTypeIndex]; - VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); - VmaDedicatedAllocationList& dedicatedAllocations = createInfoFinal.pool == VK_NULL_HANDLE ? m_DedicatedAllocations[memTypeIndex] : createInfoFinal.pool->m_DedicatedAllocations[memTypeIndex]; - res = AllocateMemoryOfType( + VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; + return AllocateMemoryOfType( createInfoFinal.pool, vkMemReq.size, vkMemReq.alignment, - requiresDedicatedAllocation || prefersDedicatedAllocation, + prefersDedicatedAllocation, dedicatedBuffer, - dedicatedBufferUsage, dedicatedImage, + dedicatedBufferImageUsage, createInfoFinal, - memTypeIndex, + blockVector.GetMemoryTypeIndex(), suballocType, - dedicatedAllocations, - *blockVector, + createInfoFinal.pool->m_DedicatedAllocations, + blockVector, allocationCount, pAllocations); - // Allocation succeeded - if(res == VK_SUCCESS) - return VK_SUCCESS; + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + if(res != VK_SUCCESS) + return res; + do + { + VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); + res = AllocateMemoryOfType( + VK_NULL_HANDLE, + vkMemReq.size, + vkMemReq.alignment, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + memTypeIndex, + suballocType, + m_DedicatedAllocations[memTypeIndex], + *blockVector, + allocationCount, + pAllocations); + // Allocation succeeded + if(res == VK_SUCCESS) + return VK_SUCCESS; - // Remove old memTypeIndex from list of possibilities. - memoryTypeBits &= ~(1u << memTypeIndex); - // Find alternative memTypeIndex. - res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex); - } while(res == VK_SUCCESS); + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + } while(res == VK_SUCCESS); - // No other matching memory type index could be found. - // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. - return VK_ERROR_OUT_OF_DEVICE_MEMORY; + // No other matching memory type index could be found. + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } } void VmaAllocator_T::FreeMemory( @@ -15521,16 +14855,16 @@ void VmaAllocator_T::FreeMemory( { VmaBlockVector* pBlockVector = VMA_NULL; VmaPool hPool = allocation->GetParentPool(); - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); if(hPool != VK_NULL_HANDLE) { - pBlockVector = hPool->m_pBlockVectors[memTypeIndex]; + pBlockVector = &hPool->m_BlockVector; } else { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); } - VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); pBlockVector->Free(allocation); } break; @@ -15540,29 +14874,25 @@ void VmaAllocator_T::FreeMemory( default: VMA_ASSERT(0); } - - m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); - allocation->SetUserData(this, VMA_NULL); - m_AllocationObjectAllocator.Free(allocation); } } } -void VmaAllocator_T::CalculateStats(VmaStats* pStats) +void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) { // Initialize. - VmaInitStatInfo(pStats->total); - for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) - VmaInitStatInfo(pStats->memoryType[i]); - for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) - VmaInitStatInfo(pStats->memoryHeap[i]); + VmaClearDetailedStatistics(pStats->total); + for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + VmaClearDetailedStatistics(pStats->memoryType[i]); + for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + VmaClearDetailedStatistics(pStats->memoryHeap[i]); // Process default pools. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; if (pBlockVector != VMA_NULL) - pBlockVector->AddStats(pStats); + pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); } // Process custom pools. @@ -15570,33 +14900,34 @@ void VmaAllocator_T::CalculateStats(VmaStats* pStats) VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - if (pool->m_pBlockVectors[memTypeIndex]) - { - VmaBlockVector& blockVector = *pool->m_pBlockVectors[memTypeIndex]; - blockVector.AddStats(pStats); - const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); - const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); - pool->m_DedicatedAllocations[memTypeIndex].AddStats(pStats, memTypeIndex, memHeapIndex); - } - } + VmaBlockVector& blockVector = pool->m_BlockVector; + const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); + blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); } } // Process dedicated allocations. for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); - m_DedicatedAllocations[memTypeIndex].AddStats(pStats, memTypeIndex, memHeapIndex); + m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Sum from memory types to memory heaps. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; + VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); } - // Postprocess. - VmaPostprocessCalcStatInfo(pStats->total); - for(size_t i = 0; i < GetMemoryTypeCount(); ++i) - VmaPostprocessCalcStatInfo(pStats->memoryType[i]); - for(size_t i = 0; i < GetMemoryHeapCount(); ++i) - VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]); + // Sum from memory heaps to total. + for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) + VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); + + VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || + pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); + VMA_ASSERT(pStats->total.unusedRangeCount == 0 || + pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); } void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) @@ -15611,13 +14942,15 @@ void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, u { const uint32_t heapIndex = firstHeap + i; - outBudgets->blockBytes = m_Budget.m_BlockBytes[heapIndex]; - outBudgets->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; - if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) { outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + - outBudgets->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; } else { @@ -15642,66 +14975,17 @@ void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, u { const uint32_t heapIndex = firstHeap + i; - outBudgets->blockBytes = m_Budget.m_BlockBytes[heapIndex]; - outBudgets->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; - outBudgets->usage = outBudgets->blockBytes; + outBudgets->usage = outBudgets->statistics.blockBytes; outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. } } } -VkResult VmaAllocator_T::DefragmentationBegin( - const VmaDefragmentationInfo2& info, - VmaDefragmentationStats* pStats, - VmaDefragmentationContext* pContext) -{ - if(info.pAllocationsChanged != VMA_NULL) - { - memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32)); - } - - *pContext = vma_new(this, VmaDefragmentationContext_T)( - this, info.flags, pStats); - - (*pContext)->AddPools(info.poolCount, info.pPools); - (*pContext)->AddAllocations( - info.allocationCount, info.pAllocations, info.pAllocationsChanged); - - VkResult res = (*pContext)->Defragment( - info.maxCpuBytesToMove, info.maxCpuAllocationsToMove, - info.maxGpuBytesToMove, info.maxGpuAllocationsToMove, - info.commandBuffer, pStats, info.flags); - - if(res != VK_NOT_READY) - { - vma_delete(this, *pContext); - *pContext = VMA_NULL; - } - - return res; -} - -VkResult VmaAllocator_T::DefragmentationEnd( - VmaDefragmentationContext context) -{ - vma_delete(this, context); - return VK_SUCCESS; -} - -VkResult VmaAllocator_T::DefragmentationPassBegin( - VmaDefragmentationPassInfo* pInfo, - VmaDefragmentationContext context) -{ - return context->DefragmentPassBegin(pInfo); -} - -VkResult VmaAllocator_T::DefragmentationPassEnd( - VmaDefragmentationContext context) -{ - return context->DefragmentPassEnd(); -} - void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) { pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); @@ -15732,26 +15016,27 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPoo { return VK_ERROR_INITIALIZATION_FAILED; } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } if(newCreateInfo.minAllocationAlignment > 0) { VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); } - *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo); + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) { - // Create only supported types - if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) - { - VkResult res = (*pPool)->m_pBlockVectors[memTypeIndex]->CreateMinBlocks(); - if(res != VK_SUCCESS) - { - vma_delete(this, *pPool); - *pPool = VMA_NULL; - return res; - } - } + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; } // Add to m_Pools. @@ -15775,22 +15060,18 @@ void VmaAllocator_T::DestroyPool(VmaPool pool) vma_delete(this, pool); } -void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats) +void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) { - pPoolStats->size = 0; - pPoolStats->unusedSize = 0; - pPoolStats->allocationCount = 0; - pPoolStats->unusedRangeCount = 0; - pPoolStats->blockCount = 0; + VmaClearStatistics(*pPoolStats); + pool->m_BlockVector.AddStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); +} - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) - { - pool->m_pBlockVectors[memTypeIndex]->AddPoolStats(pPoolStats); - pool->m_DedicatedAllocations[memTypeIndex].AddPoolStats(pPoolStats); - } - } +void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) +{ + VmaClearDetailedStatistics(*pPoolStats); + pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); } void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) @@ -15807,13 +15088,7 @@ void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) { - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) - { - return hPool->m_pBlockVectors[memTypeIndex]->CheckCorruption(); - } - } + return hPool->m_BlockVector.CheckCorruption(); } VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) @@ -15845,21 +15120,18 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) { - if(pool->m_pBlockVectors[memTypeIndex] && ((1u << memTypeIndex) & memoryTypeBits) != 0) + VkResult localRes = pool->m_BlockVector.CheckCorruption(); + switch(localRes) { - VkResult localRes = pool->m_pBlockVectors[memTypeIndex]->CheckCorruption(); - switch(localRes) - { - case VK_ERROR_FEATURE_NOT_PRESENT: - break; - case VK_SUCCESS: - finalRes = VK_SUCCESS; - break; - default: - return localRes; - } + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; } } } @@ -15903,6 +15175,7 @@ VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAlloc { m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; } + ++m_Budget.m_BlockCount[heapIndex]; // VULKAN CALL vkAllocateMemory. VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); @@ -15923,6 +15196,7 @@ VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAlloc } else { + --m_Budget.m_BlockCount[heapIndex]; m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; } @@ -15940,7 +15214,9 @@ void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, Vk // VULKAN CALL vkFreeMemory. (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); - m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size; + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= size; --m_DeviceMemoryCount; } @@ -16181,7 +15457,7 @@ void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) else { // Custom pool - parentPool->m_DedicatedAllocations[memTypeIndex].Unregister(allocation); + parentPool->m_DedicatedAllocations.Unregister(allocation); } VkDeviceMemory hMemory = allocation->GetMemory(); @@ -16198,6 +15474,9 @@ void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + m_AllocationObjectAllocator.Free(allocation); + VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); } @@ -16456,18 +15735,12 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) json.EndString(); json.BeginObject(); - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - if (pool->m_pBlockVectors[memTypeIndex]) - { - pool->m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json); - } + pool->m_BlockVector.PrintDetailedMap(json); - if (!pool->m_DedicatedAllocations[memTypeIndex].IsEmpty()) - { - json.WriteString("DedicatedAllocations"); - pool->m_DedicatedAllocations->BuildStatsString(json); - } + if (!pool->m_DedicatedAllocations.IsEmpty()) + { + json.WriteString("DedicatedAllocations"); + pool->m_DedicatedAllocations.BuildStatsString(json); } json.EndObject(); } @@ -16554,13 +15827,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( allocator->SetCurrentFrameIndex(frameIndex); } -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( VmaAllocator allocator, - VmaStats* pStats) + VmaTotalStatistics* pStats) { VMA_ASSERT(allocator && pStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK - allocator->CalculateStats(pStats); + allocator->CalculateStatistics(pStats); } VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( @@ -16590,11 +15863,11 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); - VmaStats stats; - allocator->CalculateStats(&stats); + VmaTotalStatistics stats; + allocator->CalculateStatistics(&stats); json.WriteString("Total"); - VmaPrintStatInfo(json, stats.total); + VmaPrintDetailedStatistics(json, stats.total); for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) { @@ -16618,9 +15891,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( json.BeginObject(); { json.WriteString("BlockBytes"); - json.WriteNumber(budgets[heapIndex].blockBytes); + json.WriteNumber(budgets[heapIndex].statistics.blockBytes); json.WriteString("AllocationBytes"); - json.WriteNumber(budgets[heapIndex].allocationBytes); + json.WriteNumber(budgets[heapIndex].statistics.allocationBytes); + json.WriteString("BlockCount"); + json.WriteNumber(budgets[heapIndex].statistics.blockCount); + json.WriteString("AllocationCount"); + json.WriteNumber(budgets[heapIndex].statistics.allocationCount); json.WriteString("Usage"); json.WriteNumber(budgets[heapIndex].usage); json.WriteString("Budget"); @@ -16628,10 +15905,10 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( } json.EndObject(); - if(stats.memoryHeap[heapIndex].blockCount > 0) + if(stats.memoryHeap[heapIndex].statistics.blockCount > 0) { json.WriteString("Stats"); - VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]); + VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); } for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) @@ -16685,10 +15962,10 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( #endif // #if VK_AMD_device_coherent_memory json.EndArray(); - if(stats.memoryType[typeIndex].blockCount > 0) + if(stats.memoryType[typeIndex].statistics.blockCount > 0) { json.WriteString("Stats"); - VmaPrintStatInfo(json, stats.memoryType[typeIndex]); + VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); } json.EndObject(); @@ -16734,91 +16011,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - memoryTypeBits &= allocator->GetGlobalMemoryTypeBits(); - - if(pAllocationCreateInfo->memoryTypeBits != 0) - { - memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; - } - - uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags; - uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags; - uint32_t notPreferredFlags = 0; - - // Convert usage to requiredFlags and preferredFlags. - switch(pAllocationCreateInfo->usage) - { - case VMA_MEMORY_USAGE_UNKNOWN: - break; - case VMA_MEMORY_USAGE_GPU_ONLY: - if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_CPU_ONLY: - requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - break; - case VMA_MEMORY_USAGE_CPU_TO_GPU: - requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_GPU_TO_CPU: - requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - break; - case VMA_MEMORY_USAGE_CPU_COPY: - notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - break; - case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: - requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; - break; - default: - VMA_ASSERT(0); - break; - } - - // Avoid DEVICE_COHERENT unless explicitly requested. - if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) & - (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) - { - notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY; - } - - *pMemoryTypeIndex = UINT32_MAX; - uint32_t minCost = UINT32_MAX; - for(uint32_t memTypeIndex = 0, memTypeBit = 1; - memTypeIndex < allocator->GetMemoryTypeCount(); - ++memTypeIndex, memTypeBit <<= 1) - { - // This memory type is acceptable according to memoryTypeBits bitmask. - if((memTypeBit & memoryTypeBits) != 0) - { - const VkMemoryPropertyFlags currFlags = - allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; - // This memory type contains requiredFlags. - if((requiredFlags & ~currFlags) == 0) - { - // Calculate cost as number of bits from preferredFlags not present in this memory type. - uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) + - VmaCountBitsSet(currFlags & notPreferredFlags); - // Remember memory type with lowest cost. - if(currCost < minCost) - { - *pMemoryTypeIndex = memTypeIndex; - if(currCost == 0) - { - return VK_SUCCESS; - } - minCost = currCost; - } - } - } - } - return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; + return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( @@ -16833,24 +16026,40 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); const VkDevice hDev = allocator->m_hDevice; - VkBuffer hBuffer = VK_NULL_HANDLE; const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); - VkResult res = funcs->vkCreateBuffer( - hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); - if(res == VK_SUCCESS) + VkResult res; + +#if VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceBufferMemoryRequirements) { - VkMemoryRequirements memReq = {}; - funcs->vkGetBufferMemoryRequirements( - hDev, hBuffer, &memReq); + // Can query straight from VkBufferCreateInfo :) + VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS}; + devBufMemReq.pCreateInfo = pBufferCreateInfo; - res = vmaFindMemoryTypeIndex( - allocator, - memReq.memoryTypeBits, - pAllocationCreateInfo, - pMemoryTypeIndex); + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); - funcs->vkDestroyBuffer( - hDev, hBuffer, allocator->GetAllocationCallbacks()); + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy buffer to query :( + VkBuffer hBuffer = VK_NULL_HANDLE; + res = funcs->vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); + + funcs->vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } } return res; } @@ -16867,24 +16076,42 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); const VkDevice hDev = allocator->m_hDevice; - VkImage hImage = VK_NULL_HANDLE; const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); - VkResult res = funcs->vkCreateImage( - hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); - if(res == VK_SUCCESS) + VkResult res; + +#if VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceImageMemoryRequirements) + { + // Can query straight from VkImageCreateInfo :) + VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS}; + devImgMemReq.pCreateInfo = pImageCreateInfo; + VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1003000 { - VkMemoryRequirements memReq = {}; - funcs->vkGetImageMemoryRequirements( - hDev, hImage, &memReq); + // Must create a dummy image to query :( + VkImage hImage = VK_NULL_HANDLE; + res = funcs->vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); - res = vmaFindMemoryTypeIndex( - allocator, - memReq.memoryTypeBits, - pAllocationCreateInfo, - pMemoryTypeIndex); + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); - funcs->vkDestroyImage( - hDev, hImage, allocator->GetAllocationCallbacks()); + funcs->vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } } return res; } @@ -16921,16 +16148,28 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( allocator->DestroyPool(pool); } -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( VmaAllocator allocator, VmaPool pool, - VmaPoolStats* pPoolStats) + VmaDetailedStatistics* pPoolStats) { VMA_ASSERT(allocator && pool && pPoolStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK - allocator->GetPoolStats(pool, pPoolStats); + allocator->CalculatePoolStatistics(pool, pPoolStats); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) @@ -16990,8 +16229,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( false, // requiresDedicatedAllocation false, // prefersDedicatedAllocation VK_NULL_HANDLE, // dedicatedBuffer - UINT32_MAX, // dedicatedBufferUsage VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, 1, // allocationCount @@ -17029,8 +16268,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( false, // requiresDedicatedAllocation false, // prefersDedicatedAllocation VK_NULL_HANDLE, // dedicatedBuffer - UINT32_MAX, // dedicatedBufferUsage VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, allocationCount, @@ -17072,8 +16311,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( requiresDedicatedAllocation, prefersDedicatedAllocation, buffer, // dedicatedBuffer - UINT32_MAX, // dedicatedBufferUsage VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount @@ -17111,8 +16350,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( requiresDedicatedAllocation, prefersDedicatedAllocation, VK_NULL_HANDLE, // dedicatedBuffer - UINT32_MAX, // dedicatedBufferUsage image, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, 1, // allocationCount @@ -17319,123 +16558,64 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( return allocator->CheckCorruption(memoryTypeBits); } -VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( VmaAllocator allocator, - const VmaAllocation* pAllocations, - size_t allocationCount, - VkBool32* pAllocationsChanged, - const VmaDefragmentationInfo *pDefragmentationInfo, - VmaDefragmentationStats* pDefragmentationStats) -{ - // Deprecated interface, reimplemented using new one. - - VmaDefragmentationInfo2 info2 = {}; - info2.allocationCount = (uint32_t)allocationCount; - info2.pAllocations = pAllocations; - info2.pAllocationsChanged = pAllocationsChanged; - if(pDefragmentationInfo != VMA_NULL) - { - info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove; - info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove; - } - else - { - info2.maxCpuAllocationsToMove = UINT32_MAX; - info2.maxCpuBytesToMove = VK_WHOLE_SIZE; - } - // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero. - - VmaDefragmentationContext ctx; - VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx); - if(res == VK_NOT_READY) - { - res = vmaDefragmentationEnd( allocator, ctx); - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( - VmaAllocator allocator, - const VmaDefragmentationInfo2* pInfo, - VmaDefragmentationStats* pStats, - VmaDefragmentationContext *pContext) + const VmaDefragmentationInfo* pInfo, + VmaDefragmentationContext* pContext) { VMA_ASSERT(allocator && pInfo && pContext); - // Degenerate case: Nothing to defragment. - if(pInfo->allocationCount == 0 && pInfo->poolCount == 0) - { - return VK_SUCCESS; - } - - VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL); - VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL); - VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations)); - VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools)); - - VMA_DEBUG_LOG("vmaDefragmentationBegin"); + VMA_DEBUG_LOG("vmaBeginDefragmentation"); VMA_DEBUG_GLOBAL_MUTEX_LOCK - VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext); - - return res; + *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); + return VK_SUCCESS; } -VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentation( VmaAllocator allocator, - VmaDefragmentationContext context) + VmaDefragmentationContext context, + VmaDefragmentationStats* pStats) { - VMA_ASSERT(allocator); + VMA_ASSERT(allocator && context); - VMA_DEBUG_LOG("vmaDefragmentationEnd"); + VMA_DEBUG_LOG("vmaEndDefragmentation"); - if(context != VK_NULL_HANDLE) - { - VMA_DEBUG_GLOBAL_MUTEX_LOCK - return allocator->DefragmentationEnd(context); - } - else - { - return VK_SUCCESS; - } + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if (pStats) + context->GetStats(*pStats); + vma_delete(allocator, context); + return VK_SUCCESS; } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( - VmaAllocator allocator, - VmaDefragmentationContext context, - VmaDefragmentationPassInfo* pInfo - ) + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) { - VMA_ASSERT(allocator); - VMA_ASSERT(pInfo); + VMA_ASSERT(context && pPassInfo); VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); VMA_DEBUG_GLOBAL_MUTEX_LOCK - if(context == VK_NULL_HANDLE) - { - pInfo->moveCount = 0; - return VK_SUCCESS; - } - - return allocator->DefragmentationPassBegin(pInfo, context); + return context->DefragmentPassBegin(*pPassInfo); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( - VmaAllocator allocator, - VmaDefragmentationContext context) + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) { - VMA_ASSERT(allocator); + VMA_ASSERT(context && pPassInfo); VMA_DEBUG_LOG("vmaEndDefragmentationPass"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK - if(context == VK_NULL_HANDLE) - return VK_SUCCESS; + VMA_DEBUG_GLOBAL_MUTEX_LOCK - return allocator->DefragmentationPassEnd(context); + return context->DefragmentPassEnd(*pPassInfo); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( @@ -17547,8 +16727,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( requiresDedicatedAllocation, prefersDedicatedAllocation, *pBuffer, // dedicatedBuffer - pBufferCreateInfo->usage, // dedicatedBufferUsage VK_NULL_HANDLE, // dedicatedImage + pBufferCreateInfo->usage, // dedicatedBufferImageUsage *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount @@ -17642,8 +16822,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( requiresDedicatedAllocation, prefersDedicatedAllocation, *pBuffer, // dedicatedBuffer - pBufferCreateInfo->usage, // dedicatedBufferUsage VK_NULL_HANDLE, // dedicatedImage + pBufferCreateInfo->usage, // dedicatedBufferImageUsage *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount @@ -17763,8 +16943,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( requiresDedicatedAllocation, prefersDedicatedAllocation, VK_NULL_HANDLE, // dedicatedBuffer - UINT32_MAX, // dedicatedBufferUsage *pImage, // dedicatedImage + pImageCreateInfo->usage, // dedicatedBufferImageUsage *pAllocationCreateInfo, suballocType, 1, // allocationCount @@ -17917,13 +17097,22 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock virtualBlock->SetAllocationUserData(allocation, pUserData); } -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStats(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatInfo* VMA_NOT_NULL pStatInfo) +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats) { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStatInfo != VMA_NULL); - VMA_DEBUG_LOG("vmaCalculateVirtualBlockStats"); + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->CalculateStats(*pStatInfo); + virtualBlock->GetStatistics(*pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->CalculateDetailedStatistics(*pStats); } #if VMA_STATS_STRING_ENABLED @@ -18054,7 +17243,7 @@ bufferInfo.size = 65536; bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocInfo = {}; -allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; VkBuffer buffer; VmaAllocation allocation; @@ -18078,8 +17267,8 @@ appropriate members of VmaAllocationCreateInfo structure, as described below. You can also combine multiple methods. -# If you just want to find memory type index that meets your requirements, you - can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(), - vmaFindMemoryTypeIndexForImageInfo(). + can use function: vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). -# If you want to allocate a region of device memory without association with any specific image or buffer, you can use function vmaAllocateMemory(). Usage of this function is not recommended and usually not needed. @@ -18090,9 +17279,10 @@ You can also combine multiple methods. vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). --# If you want to create a buffer or an image, allocate memory for it and bind +-# **This is the easiest and recommended way to use this library:** + If you want to create a buffer or an image, allocate memory for it and bind them together, all in one call, you can use function vmaCreateBuffer(), - vmaCreateImage(). This is the easiest and recommended way to use this library. + vmaCreateImage(). When using 3. or 4., the library internally queries Vulkan for memory types supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) @@ -18110,11 +17300,12 @@ It is valid, although not very useful. The easiest way to specify memory requirements is to fill member VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. It defines high level, common usage types. -For more details, see description of this enum. +Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. For example, if you want to create a uniform buffer that will be filled using -transfer only once or infrequently and used for rendering every frame, you can -do it using following code: +transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can +do it using following code. The buffer will most likely end up in a memory type with +`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. \code VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; @@ -18122,13 +17313,56 @@ bufferInfo.size = 65536; bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocInfo = {}; -allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; VkBuffer buffer; VmaAllocation allocation; vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); \endcode +If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory +on systems with discrete graphics card that have the memories separate, you can use +#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. + +When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, +you also need to specify one of the host access flags: +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +so you can map it. + +For example, a staging buffer that will be filled via mapped pointer and then +used as a source of transfer to the buffer decribed previously can be created like this. +It will likely and up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` +but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). + +\code +VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +stagingBufferInfo.size = 65536; +stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo stagingAllocInfo = {}; +stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; +stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer stagingBuffer; +VmaAllocation stagingAllocation; +vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); +\endcode + +For more examples of creating different kinds of resources, see chapter \ref usage_patterns. + +Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows +about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, +so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. +If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting +memory type, as decribed below. + +\note +Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, +`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) +are still available and work same way as in previous versions of the library +for backward compatibility, but they are not recommended. + \section choosing_memory_type_required_preferred_flags Required and preferred flags You can specify more detailed requirements by filling members @@ -18142,7 +17376,7 @@ use following code: VmaAllocationCreateInfo allocInfo = {}; allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; -allocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer buffer; VmaAllocation allocation; @@ -18152,8 +17386,8 @@ vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullpt A memory type is chosen that has all the required flags and as many preferred flags set as possible. -If you use VmaAllocationCreateInfo::usage, it is just internally converted to -a set of required and preferred flags. +Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, +plus some extra "magic" (heuristics). \section choosing_memory_type_explicit_memory_types Explicit memory types @@ -18220,6 +17454,13 @@ Mapping the same `VkDeviceMemory` block multiple times is illegal - only one map This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. Because of this, Vulkan Memory Allocator provides following facilities: +\note If you want to be able to map an allocation, you need to specify one of the flags +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable +when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. +For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, +but they can still be used for consistency. + \section memory_mapping_mapping_functions Mapping functions The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). @@ -18232,16 +17473,15 @@ Example: \code // Having these objects initialized: - struct ConstantBuffer { ... }; -ConstantBuffer constantBufferData; +ConstantBuffer constantBufferData = ... -VmaAllocator allocator; -VkBuffer constantBuffer; -VmaAllocation constantBufferAllocation; +VmaAllocator allocator = ... +VkBuffer constantBuffer = ... +VmaAllocation constantBufferAllocation = ... // You can map and fill your buffer using following code: @@ -18278,8 +17518,9 @@ bufCreateInfo.size = sizeof(ConstantBuffer); bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; VkBuffer buf; VmaAllocation alloc; @@ -18290,18 +17531,12 @@ vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allo memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); \endcode -There are some exceptions though, when you should consider mapping memory only for a short period of time: - -- When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2), - device is discrete AMD GPU, - and memory type is the special 256 MiB pool of `DEVICE_LOCAL + HOST_VISIBLE` memory - (selected when you use #VMA_MEMORY_USAGE_CPU_TO_GPU), - then whenever a memory block allocated from this memory type stays mapped - for the time of any call to `vkQueueSubmit()` or `vkQueuePresentKHR()`, this - block is migrated by WDDM to system RAM, which degrades performance. It doesn't - matter if that particular memory block is actually used by the command buffer - being submitted. -- Keeping many large memory blocks mapped may impact performance or stability of some debugging tools. +\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up +in a mappable memory type. +For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. +For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. \section memory_mapping_cache_control Cache flush and invalidate @@ -18322,86 +17557,9 @@ In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocatio within blocks are aligned to this value, so their offsets are always multiply of `nonCoherentAtomSize` and two different allocations never share same "line" of this size. -Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`. - -Also, Windows drivers from all 3 **PC** GPU vendors (AMD, Intel, NVIDIA) +Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) currently provide `HOST_COHERENT` flag on all memory types that are -`HOST_VISIBLE`, so on this platform you may not need to bother. - -\section memory_mapping_finding_if_memory_mappable Finding out if memory is mappable - -It may happen that your allocation ends up in memory that is `HOST_VISIBLE` (available for mapping) -despite it wasn't explicitly requested. -For example, application may work on integrated graphics with unified memory (like Intel) or -allocation from video memory might have failed, so the library chose system memory as fallback. - -You can detect this case and map such allocation to access its memory on CPU directly, -instead of launching a transfer operation. -In order to do that: call vmaGetAllocationMemoryProperties() -and look for `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = sizeof(ConstantBuffer); -bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; -allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - -VkBuffer buf; -VmaAllocation alloc; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); - -VkMemoryPropertyFlags memFlags; -vmaGetAllocationMemoryProperties(allocator, alloc, &memFlags); -if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) -{ - // Allocation ended up in mappable memory. You can map it and access it directly. - void* mappedData; - vmaMapMemory(allocator, alloc, &mappedData); - memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); - vmaUnmapMemory(allocator, alloc); -} -else -{ - // Allocation ended up in non-mappable memory. - // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. -} -\endcode - -You can even use #VMA_ALLOCATION_CREATE_MAPPED_BIT flag while creating allocations -that are not necessarily `HOST_VISIBLE` (e.g. using #VMA_MEMORY_USAGE_GPU_ONLY). -If the allocation ends up in memory type that is `HOST_VISIBLE`, it will be persistently mapped and you can use it directly. -If not, the flag is just ignored. -Example: - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = sizeof(ConstantBuffer); -bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -if(allocInfo.pMappedData != nullptr) -{ - // Allocation ended up in mappable memory. - // It is persistently mapped. You can access it directly. - memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); -} -else -{ - // Allocation ended up in non-mappable memory. - // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. -} -\endcode +`HOST_VISIBLE`, so on PC you may not need to bother. \page staying_within_budget Staying within budget @@ -18426,8 +17584,8 @@ To query for current memory usage and available budget, use function vmaGetHeapB Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. Please note that this function returns different information and works faster than -vmaCalculateStats(). vmaGetHeapBudgets() can be called every frame or even before every -allocation, while vmaCalculateStats() is intended to be used rarely, +vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every +allocation, while vmaCalculateStatistics() is intended to be used rarely, only to obtain statistical information, e.g. for debugging purposes. It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information @@ -18457,20 +17615,27 @@ budget, by default the library still tries to create it, leaving it to the Vulka implementation whether the allocation succeeds or fails. You can change this behavior by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is not made if it would exceed the budget or if the budget is already exceeded. -The allocation then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +VMA then tries to make the allocation from the next eligible Vulkan memory type. +The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag when creating resources that are not essential for the application (e.g. the texture of a specific object) and not to pass it when creating critically important resources (e.g. render targets). +On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b> +that allows to control the behavior of the Vulkan implementation in out-of-memory cases - +whether it should fail with an error code or still allow the allocation. +Usage of this extension involves only passing extra structure on Vulkan device creation, +so it is out of scope of this library. + Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure a new allocation is created only when it fits inside one of the existing memory blocks. If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. This also ensures that the function call is very fast because it never goes to Vulkan to obtain a new block. -Please note that creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount -set to more than 0 will try to allocate memory blocks without checking whether they +\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will currently try to allocate memory blocks without checking whether they fit within budget. @@ -18544,7 +17709,7 @@ finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBi // Validate if(finalMemReq.memoryTypeBits != 0) VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; VmaAllocation alloc; res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); @@ -18573,7 +17738,7 @@ See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ - You can create more complex layout where different images and buffers are bound at different offsets inside one large allocation. For example, one can imagine a big texture used in some render passes, aliasing with a set of many small buffers -used between in some further passes. To bind a resource at non-zero offset of an allocation, +used between in some further passes. To bind a resource at non-zero offset in an allocation, use vmaBindBufferMemory2() / vmaBindImageMemory2(). - Before allocating memory for the resources you want to alias, check `memoryTypeBits` returned in memory requirements of each resource to make sure the bits overlap. @@ -18598,6 +17763,7 @@ It can be useful if you want to: - Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. - Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. +- Perform defragmentation on a specific subset of your allocations. To use custom memory pools: @@ -18644,6 +17810,14 @@ It is supported only when VmaPoolCreateInfo::blockSize = 0. To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +\note Excessive use of custom pools is a common mistake when using this library. +Custom pools may be useful for special purposes - when you want to +keep certain type of resources separate e.g. to reserve minimum amount of memory +for them or limit maximum amount of memory they can occupy. For most +resources this is not needed and so it is not recommended to create #VmaPool +objects and allocations out of them. Allocating from the default pool is sufficient. + + \section custom_memory_pools_MemTypeIndex Choosing memory type index When creating a pool, you must explicitly specify memory type index. @@ -18654,11 +17828,11 @@ that you are going to create in that pool. \code VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -exampleBufCreateInfo.size = 1024; // Whatever. -exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed. +exampleBufCreateInfo.size = 1024; // Doesn't matter +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed. +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; uint32_t memTypeIndex; vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); @@ -18759,39 +17933,6 @@ you can achieve behavior of a ring buffer / queue. Ring buffer is available only in pools with one memory block - VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. -\section buddy_algorithm Buddy allocation algorithm - -There is another allocation algorithm that can be used with custom pools, called -"buddy". Its internal data structure is based on a binary tree of blocks, each having -size that is a power of two and a half of its parent's size. When you want to -allocate memory of certain size, a free node in the tree is located. If it is too -large, it is recursively split into two halves (called "buddies"). However, if -requested allocation size is not a power of two, the size of the allocation is -aligned up to the nearest power of two and the remaining space is wasted. When -two buddy nodes become free, they are merged back into one larger node. - - - -The advantage of buddy allocation algorithm over default algorithm is faster -allocation and deallocation, as well as smaller external fragmentation. The -disadvantage is more wasted space (internal fragmentation). -For more information, please search the Internet for "Buddy memory allocation" - -sources that describe this concept in general. - -To use buddy allocation algorithm with a custom pool, add flag -#VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating -#VmaPool object. - -Several limitations apply to pools that use buddy algorithm: - -- It is recommended to use VmaPoolCreateInfo::blockSize that is a power of two. - Otherwise, only largest power of two smaller than the size is used for - allocations. The remaining space always stays unused. -- [Margins](@ref debugging_memory_usage_margins) and - [corruption detection](@ref debugging_memory_usage_corruption_detection) - don't work in such pools. -- [Defragmentation](@ref defragmentation) doesn't work with allocations made from - such pool. \page defragmentation Defragmentation @@ -18801,236 +17942,114 @@ to find a continuous range of free memory for a new allocation despite there is enough free space, just scattered across many small free ranges between existing allocations. -To mitigate this problem, you can use defragmentation feature: -structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd(). -Given set of allocations, -this function can move them to compact used memory, ensure more continuous free -space and possibly also free some `VkDeviceMemory` blocks. - -What the defragmentation does is: - -- Updates #VmaAllocation objects to point to new `VkDeviceMemory` and offset. - After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or - VmaAllocationInfo::offset changes. You must query them again using - vmaGetAllocationInfo() if you need them. -- Moves actual data in memory. - -What it doesn't do, so you need to do it yourself: - -- Recreate buffers and images that were bound to allocations that were defragmented and - bind them with their new places in memory. - You must use `vkDestroyBuffer()`, `vkDestroyImage()`, - `vkCreateBuffer()`, `vkCreateImage()`, vmaBindBufferMemory(), vmaBindImageMemory() - for that purpose and NOT vmaDestroyBuffer(), - vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to - destroy or create allocation objects! -- Recreate views and update descriptors that point to these buffers and images. - -\section defragmentation_cpu Defragmenting CPU memory - -Following example demonstrates how you can run defragmentation on CPU. -Only allocations created in memory types that are `HOST_VISIBLE` can be defragmented. -Others are ignored. - -The way it works is: - -- It temporarily maps entire memory blocks when necessary. -- It moves data using `memmove()` function. - -\code -// Given following variables already initialized: -VkDevice device; -VmaAllocator allocator; -std::vector<VkBuffer> buffers; -std::vector<VmaAllocation> allocations; - - -const uint32_t allocCount = (uint32_t)allocations.size(); -std::vector<VkBool32> allocationsChanged(allocCount); - -VmaDefragmentationInfo2 defragInfo = {}; -defragInfo.allocationCount = allocCount; -defragInfo.pAllocations = allocations.data(); -defragInfo.pAllocationsChanged = allocationsChanged.data(); -defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit. -defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit. - -VmaDefragmentationContext defragCtx; -vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); -vmaDefragmentationEnd(allocator, defragCtx); - -for(uint32_t i = 0; i < allocCount; ++i) -{ - if(allocationsChanged[i]) - { - // Destroy buffer that is immutably bound to memory region which is no longer valid. - vkDestroyBuffer(device, buffers[i], nullptr); - - // Create new buffer with same parameters. - VkBufferCreateInfo bufferInfo = ...; - vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); - - // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. - - // Bind new buffer to new memory region. Data contained in it is already moved. - VmaAllocationInfo allocInfo; - vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); - vmaBindBufferMemory(allocator, allocations[i], buffers[i]); - } -} -\endcode - -Setting VmaDefragmentationInfo2::pAllocationsChanged is optional. -This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index -has been modified during defragmentation. -You can pass null, but you then need to query every allocation passed to defragmentation -for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it. - -If you use [Custom memory pools](@ref choosing_memory_type_custom_memory_pools), -you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools -instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations -to defragment all allocations in given pools. -You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case. -You can also combine both methods. - -\section defragmentation_gpu Defragmenting GPU memory - -It is also possible to defragment allocations created in memory types that are not `HOST_VISIBLE`. -To do that, you need to pass a command buffer that meets requirements as described in -VmaDefragmentationInfo2::commandBuffer. The way it works is: - -- It creates temporary buffers and binds them to entire memory blocks when necessary. -- It issues `vkCmdCopyBuffer()` to passed command buffer. +To mitigate this problem, you can use defragmentation feature. +It doesn't happen automatically though and needs your cooperation, +because VMA is a low level library that only allocates memory. +It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. +It cannot copy their contents as it doesn't record any commands to a command buffer. Example: \code -// Given following variables already initialized: -VkDevice device; -VmaAllocator allocator; -VkCommandBuffer commandBuffer; -std::vector<VkBuffer> buffers; -std::vector<VmaAllocation> allocations; - - -const uint32_t allocCount = (uint32_t)allocations.size(); -std::vector<VkBool32> allocationsChanged(allocCount); - -VkCommandBufferBeginInfo cmdBufBeginInfo = ...; -vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo); - -VmaDefragmentationInfo2 defragInfo = {}; -defragInfo.allocationCount = allocCount; -defragInfo.pAllocations = allocations.data(); -defragInfo.pAllocationsChanged = allocationsChanged.data(); -defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it is "GPU" this time. -defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it is "GPU" this time. -defragInfo.commandBuffer = commandBuffer; +VmaDefragmentationInfo defragInfo = {}; +defragInfo.pool = myPool; +defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; VmaDefragmentationContext defragCtx; -vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); - -vkEndCommandBuffer(commandBuffer); +VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); +// Check res... -// Submit commandBuffer. -// Wait for a fence that ensures commandBuffer execution finished. - -vmaDefragmentationEnd(allocator, defragCtx); - -for(uint32_t i = 0; i < allocCount; ++i) +for(;;) { - if(allocationsChanged[i]) + VmaDefragmentationPassMoveInfo pass; + res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res == VK_INCOMPLETE) { - // Destroy buffer that is immutably bound to memory region which is no longer valid. - vkDestroyBuffer(device, buffers[i], nullptr); - - // Create new buffer with same parameters. - VkBufferCreateInfo bufferInfo = ...; - vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); - - // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. - - // Bind new buffer to new memory region. Data contained in it is already moved. - VmaAllocationInfo allocInfo; - vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); - vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + //- Inspect pass.pMoves[i].srcAllocation, identify what buffer or image it represents. + //- Recreate this buffer or image at pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. + //- Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. + } + //- Make sure the copy commands finished executing. + //- Update appropriate descriptors to point to the new places. + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... } + else + // Handle error... } -\endcode - -You can combine these two methods by specifying non-zero `maxGpu*` as well as `maxCpu*` parameters. -The library automatically chooses best method to defragment each memory pool. - -You may try not to block your entire program to wait until defragmentation finishes, -but do it in the background, as long as you carefully fullfill requirements described -in function vmaDefragmentationBegin(). -\section defragmentation_additional_notes Additional notes - -It is only legal to defragment allocations bound to: - -- buffers -- images created with `VK_IMAGE_CREATE_ALIAS_BIT`, `VK_IMAGE_TILING_LINEAR`, and - being currently in `VK_IMAGE_LAYOUT_GENERAL` or `VK_IMAGE_LAYOUT_PREINITIALIZED`. - -Defragmentation of images created with `VK_IMAGE_TILING_OPTIMAL` or in any other -layout may give undefined results. - -If you defragment allocations bound to images, new images to be bound to new -memory region after defragmentation should be created with `VK_IMAGE_LAYOUT_PREINITIALIZED` -and then transitioned to their original layout from before defragmentation if -needed using an image memory barrier. +vmaEndDefragmentation(allocator, defragCtx, nullptr); +\endcode -While using defragmentation, you may experience validation layer warnings, which you just need to ignore. -See [Validation layer warnings](@ref general_considerations_validation_layer_warnings). +You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool +(like in the example above) or all the default pools by setting this member to null. -Please don't expect memory to be fully compacted after defragmentation. -Algorithms inside are based on some heuristics that try to maximize number of Vulkan -memory blocks to make totally empty to release them, as well as to maximize continuous -empty space inside remaining blocks, while minimizing the number and size of allocations that -need to be moved. Some fragmentation may still remain - this is normal. +Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. +Defragmentation algorithm tries to move all suitable allocations. +You can, however, refuse to move some of them inside a defragmentation pass, by setting +`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. +However, this is not recommended and may result in suboptimal packing of the allocations after defragmentation. +If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. -\section defragmentation_custom_algorithm Writing custom defragmentation algorithm +You can also decide to destroy an allocation instead of moving it. +You should then set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. -If you want to implement your own, custom defragmentation algorithm, -there is infrastructure prepared for that, -but it is not exposed through the library API - you need to hack its source code. -Here are steps needed to do this: +You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved +in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. +See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. --# Main thing you need to do is to define your own class derived from base abstract - class `VmaDefragmentationAlgorithm` and implement your version of its pure virtual methods. - See definition and comments of this class for details. --# Your code needs to interact with device memory block metadata. - If you need more access to its data than it is provided by its public interface, - declare your new class as a friend class e.g. in class `VmaBlockMetadata_Generic`. --# If you want to create a flag that would enable your algorithm or pass some additional - flags to configure it, add them to `VmaDefragmentationFlagBits` and use them in - VmaDefragmentationInfo2::flags. --# Modify function `VmaBlockVectorDefragmentationContext::Begin` to create object - of your new class whenever needed. +It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA +usage, possibly from multiple threads, with the exception that allocations +returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. \page statistics Statistics -This library contains functions that return information about its internal state, +This library contains several functions that return information about its internal state, especially the amount of memory allocated from Vulkan. -Please keep in mind that these functions need to traverse all internal data structures -to gather these information, so they may be quite time-consuming. -Don't call them too often. \section statistics_numeric_statistics Numeric statistics -You can query for overall statistics of the allocator using function vmaCalculateStats(). -Information are returned using structure #VmaStats. -It contains #VmaStatInfo - number of allocated blocks, number of allocations -(occupied ranges in these blocks), number of unused (free) ranges in these blocks, -number of bytes used and unused (but still allocated from Vulkan) and other information. -They are summed across memory heaps, memory types and total for whole allocator. +If you need to obtain basic statistics about memory usage per heap, together with current budget, +you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. +This is useful to keep track of memory usage and stay withing budget +(see also \ref staying_within_budget). +Example: -You can query for statistics of a custom pool using function vmaGetPoolStats(). -Information are returned using structure #VmaPoolStats. +\code +uint32_t heapIndex = ... + +VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; +vmaGetHeapBudgets(allocator, budgets); + +printf("My heap currently has %u allocations taking %llu B,\n", + budgets[heapIndex].statistics.allocationCount, + budgets[heapIndex].statistics.allocationBytes); +printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", + budgets[heapIndex].statistics.blockCount, + budgets[heapIndex].statistics.blockBytes); +printf("Vulkan reports total usage %llu B with budget %llu B.\n", + budgets[heapIndex].usage, + budgets[heapIndex].budget); +\endcode + +You can query for more detailed statistics per memory heap, type, and totals, +including minimum and maximum allocation size and unused range size, +by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. +This function is slower though, as it has to traverse all the internal data structures, +so it should be used only for debugging purposes. -You can query for information about specific allocation using function vmaGetAllocationInfo(). +You can query for statistics of a custom pool using function vmaGetPoolStatistics() +or vmaCalculatePoolStatistics(). + +You can query for information about a specific allocation using function vmaGetAllocationInfo(). It fill structure #VmaAllocationInfo. \section statistics_json_dump JSON dump @@ -19047,7 +18066,7 @@ The format of this JSON string is not part of official documentation of the libr but it will not change in backward-incompatible way without increasing library major version number and appropriate mention in changelog. -The JSON string contains all the data that can be obtained using vmaCalculateStats(). +The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). It can also contain detailed map of allocated memory blocks and their regions - free and occupied by allocations. This allows e.g. to visualize the memory or assess fragmentation. @@ -19064,18 +18083,17 @@ some handle, index, key, ordinal number or any other value that would associate the allocation with your custom metadata. \code -VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -// Fill bufferInfo... +VkBufferCreateInfo bufCreateInfo = ... MyBufferMetadata* pMetadata = CreateBufferMetadata(); VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.pUserData = pMetadata; VkBuffer buffer; VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); \endcode The pointer may be later retrieved as VmaAllocationInfo::pUserData: @@ -19089,7 +18107,7 @@ MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; It can also be changed using function vmaSetAllocationUserData(). Values of (non-zero) allocations' `pUserData` are printed in JSON report created by -vmaBuildStatsString(), in hexadecimal form. +vmaBuildStatsString() in hexadecimal form. \section allocation_names Allocation names @@ -19097,19 +18115,18 @@ There is alternative mode available where `pUserData` pointer is used to point t a null-terminated string, giving a name to the allocation. To use this mode, set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags. Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to -vmaSetAllocationUserData() must be either null or pointer to a null-terminated string. +vmaSetAllocationUserData() must be either null or a pointer to a null-terminated string. The library creates internal copy of the string, so the pointer you pass doesn't need to be valid for whole lifetime of the allocation. You can free it after the call. \code -VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -// Fill imageInfo... +VkImageCreateInfo imageInfo = ... std::string imageName = "Texture: "; imageName += fileName; VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT; allocCreateInfo.pUserData = imageName.c_str(); @@ -19265,15 +18282,17 @@ It might be more convenient, but you need to make sure to use this new unit cons \section virtual_allocator_statistics Statistics -You can obtain statistics of a virtual block using vmaCalculateVirtualBlockStats(). -The function fills structure #VmaStatInfo - same as used by the normal Vulkan memory allocator. +You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() +(to get brief statistics that are fast to calculate) +or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). +The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. Example: \code -VmaStatInfo statInfo; -vmaCalculateVirtualBlockStats(block, &statInfo); +VmaStatistics stats; +vmaGetVirtualBlockStatistics(block, &stats); printf("My virtual block has %llu bytes used by %u virtual allocations\n", - statInfo.usedBytes, statInfo.allocationCount); + stats.allocationBytes, stats.allocationCount); \endcode You can also request a full list of allocations and free regions as a string in JSON format by calling @@ -19353,7 +18372,6 @@ allocations, which have their own memory block of specific size. It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag or those automatically decided to put into dedicated allocations, e.g. due to its large size or recommended by VK_KHR_dedicated_allocation extension. -Margins are also not active in custom pools created with #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag. Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. @@ -19439,33 +18457,14 @@ Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entir \page usage_patterns Recommended usage patterns +Vulkan gives great flexibility in memory allocation. +This chapter shows the most common patterns. + See also slides from talk: [Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) -\section usage_patterns_common_mistakes Common mistakes - -<b>Use of CPU_TO_GPU instead of CPU_ONLY memory</b> - -#VMA_MEMORY_USAGE_CPU_TO_GPU is recommended only for resources that will be -mapped and written by the CPU, as well as read directly by the GPU - like some -buffers or textures updated every frame (dynamic). If you create a staging copy -of a resource to be written by CPU and then used as a source of transfer to -another resource placed in the GPU memory, that staging resource should be -created with #VMA_MEMORY_USAGE_CPU_ONLY. Please read the descriptions of these -enums carefully for details. - -<b>Unnecessary use of custom pools</b> - -\ref custom_memory_pools may be useful for special purposes - when you want to -keep certain type of resources separate e.g. to reserve minimum amount of memory -for them or limit maximum amount of memory they can occupy. For most -resources this is not needed and so it is not recommended to create #VmaPool -objects and allocations out of them. Allocating from the default pool is sufficient. - -\section usage_patterns_simple Simple patterns - -\subsection usage_patterns_simple_render_targets Render targets +\section usage_patterns_gpu_only GPU-only resource <b>When:</b> Any resources that you frequently write and read on GPU, @@ -19473,123 +18472,216 @@ e.g. images used as color attachments (aka "render targets"), depth-stencil atta images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). <b>What to do:</b> -Create them in video memory that is fastest to access from GPU using -#VMA_MEMORY_USAGE_GPU_ONLY. +Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. -Consider using [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension -and/or manually creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, -especially if they are large or if you plan to destroy and recreate them e.g. when -display resolution changes. +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +<b>Also consider:</b> +Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them with different sizes +e.g. when display resolution changes. Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. -\subsection usage_patterns_simple_immutable_resources Immutable resources + +\section usage_patterns_staging_copy_upload Staging copy for upload <b>When:</b> -Any resources that you fill on CPU only once (aka "immutable") or infrequently -and then read frequently on GPU, -e.g. textures, vertex and index buffers, constant buffers that don't change often. +A "staging" buffer than you want to map and fill from CPU code, then use as a source od transfer +to some GPU resource. <b>What to do:</b> -Create them in video memory that is fastest to access from GPU using -#VMA_MEMORY_USAGE_GPU_ONLY. +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. -To initialize content of such resource, create a CPU-side (aka "staging") copy of it -in system memory - #VMA_MEMORY_USAGE_CPU_ONLY, map it, fill it, -and submit a transfer from it to the GPU resource. -You can keep the staging copy if you need it for another upload transfer in the future. -If you don't, you can destroy it or reuse this buffer for uploading different resource -after the transfer finishes. +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; -Prefer to create just buffers in system memory rather than images, even for uploading textures. -Use `vkCmdCopyBufferToImage()`. -Dont use images with `VK_IMAGE_TILING_LINEAR`. +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); -\subsection usage_patterns_dynamic_resources Dynamic resources +... -<b>When:</b> -Any resources that change frequently (aka "dynamic"), e.g. every frame or every draw call, -written on CPU, read on GPU. +memcpy(allocInfo.pMappedData, myData, myDataSize); +\endcode -<b>What to do:</b> -Create them using #VMA_MEMORY_USAGE_CPU_TO_GPU. -You can map it and write to it directly on CPU, as well as read from it on GPU. +<b>Also consider:</b> +You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped +using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. -This is a more complex situation. Different solutions are possible, -and the best one depends on specific GPU type, but you can use this simple approach for the start. -Prefer to write to such resource sequentially (e.g. using `memcpy`). -Don't perform random access or any reads from it on CPU, as it may be very slow. -Also note that textures written directly from the host through a mapped pointer need to be in LINEAR not OPTIMAL layout. -\subsection usage_patterns_readback Readback +\section usage_patterns_readback Readback <b>When:</b> -Resources that contain data written by GPU that you want to read back on CPU, +Buffers for data written by or transferred from the GPU that you want to read back on the CPU, e.g. results of some computations. <b>What to do:</b> -Create them using #VMA_MEMORY_USAGE_GPU_TO_CPU. -You can write to them directly on GPU, as well as map and read them on CPU. - -\section usage_patterns_advanced Advanced patterns +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. -\subsection usage_patterns_integrated_graphics Detecting integrated graphics - -You can support integrated graphics (like Intel HD Graphics, AMD APU) better -by detecting it in Vulkan. -To do it, call `vkGetPhysicalDeviceProperties()`, inspect -`VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`. -When you find it, you can assume that memory is unified and all memory types are comparably fast -to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; -You can then sum up sizes of all available memory heaps and treat them as useful for -your GPU resources, instead of only `DEVICE_LOCAL` ones. -You can also prefer to create your resources in memory types that are `HOST_VISIBLE` to map them -directly instead of submitting explicit transfer (see below). +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; -\subsection usage_patterns_direct_vs_transfer Direct access versus transfer +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); -For resources that you frequently write on CPU and read on GPU, many solutions are possible: +... --# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, - second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit transfer each time. --# Create just a single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU, - read it directly on GPU. --# Create just a single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU, - read it directly on GPU. +const float* downloadedData = (const float*)allocInfo.pMappedData; +\endcode -Which solution is the most efficient depends on your resource and especially on the GPU. -It is best to measure it and then make the decision. -Some general recommendations: -- On integrated graphics use (2) or (3) to avoid unnecessary time and memory overhead - related to using a second copy and making transfer. -- For small resources (e.g. constant buffers) use (2). - Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable. - Even if the resource ends up in system memory, its data may be cached on GPU after first - fetch over PCIe bus. -- For larger resources (e.g. textures), decide between (1) and (2). - You may want to differentiate NVIDIA and AMD, e.g. by looking for memory type that is - both `DEVICE_LOCAL` and `HOST_VISIBLE`. When you find it, use (2), otherwise use (1). +\section usage_patterns_advanced_data_uploading Advanced data uploading + +For resources that you frequently write on CPU via mapped pointer and +freqnently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: + +-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, + even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, + and make the device reach out to that resource directly. + - Reads performed by the device will then go through PCI Express bus. + The performace of this access may be limited, but it may be fine depending on the size + of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity + of access. +-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), + a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` + (fast to access from the GPU). Then, it is likely the best choice for such type of resource. +-# Systems with a discrete graphics card and separate video memory may or may not expose + a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). + If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) + that is available to CPU for mapping. + - Writes performed by the host to that memory go through PCI Express bus. + The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, + as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. +-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, + a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. + +Thankfully, VMA offers an aid to create and use such resources in the the way optimal +for the current Vulkan device. To help the library make the best choice, +use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with +#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. +It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), +but if no such memory type is available or allocation from it fails +(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), +it will fall back to `DEVICE_LOCAL` memory for fast GPU access. +It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, +so you need to create another "staging" allocation and perform explicit transfers. -Similarly, for resources that you frequently write on GPU and read on CPU, multiple -solutions are possible: +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); --# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, - second copy in system memory using #VMA_MEMORY_USAGE_GPU_TO_CPU and submit explicit tranfer each time. --# Create just single copy using #VMA_MEMORY_USAGE_GPU_TO_CPU, write to it directly on GPU, - map it and read it on CPU. +VkMemoryPropertyFlags memPropFlags; +vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); -You should take some measurements to decide which option is faster in case of your specific -resource. +if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) +{ + // Allocation ended up in a mappable memory and is already mapped - write to it directly. -Note that textures accessed directly from the host through a mapped pointer need to be in LINEAR layout, -which may slow down their usage on the device. -Textures accessed only by the device and transfer operations can use OPTIMAL layout. + // [Executed in runtime]: + memcpy(allocInfo.pMappedData, myData, myDataSize); +} +else +{ + // Allocation ended up in a non-mappable memory - need to transfer. + VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + stagingBufCreateInfo.size = 65536; + stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo stagingAllocCreateInfo = {}; + stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VkBuffer stagingBuf; + VmaAllocation stagingAlloc; + VmaAllocationInfo stagingAllocInfo; + vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, + &stagingBuf, &stagingAlloc, stagingAllocInfo); + + // [Executed in runtime]: + memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); + VkBufferCopy bufCopy = { + 0, // srcOffset + 0, // dstOffset, + myDataSize); // size + vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); +} +\endcode -If you don't want to specialize your code for specific types of GPUs, you can still make -an simple optimization for cases when your resource ends up in mappable memory to use it -directly in this case instead of creating CPU-side staging copy. -For details see [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable). +\section usage_patterns_other_use_cases Other use cases + +Here are some other, less obvious use cases and their recommended settings: + +- An image that is used only as transfer source and destination, but it should stay on the device, + as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, + for temporal antialiasing or other temporal effects. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO +- An image that is used only as transfer source and destination, but it should be placed + in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict + least recently used textures from VRAM. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, + as VMA needs a hint here to differentiate from the previous case. +- A buffer that you want to map and write from the CPU, directly read from the GPU + (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or + host memory due to its large size. + - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST + - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT \page configuration Configuration @@ -19628,6 +18720,8 @@ by using a helper library like [volk](https://github.com/zeux/volk). Third, VMA tries to fetch remaining pointers that are still null by calling `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. +Other pointers will be fetched automatically. If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. Finally, all the function pointers required by the library (considering selected @@ -19652,7 +18746,7 @@ VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. \section heap_memory_limit Device heap memory limit When device memory of certain heap runs out of free space, new allocations may -fail (returning error code) or they may succeed, silently pushing some existing +fail (returning error code) or they may succeed, silently pushing some existing_ memory blocks from GPU VRAM to system RAM (which degrades performance). This behavior is implementation-dependent - it depends on GPU vendor and graphics driver. @@ -19673,10 +18767,14 @@ VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve performance on some GPUs. It augments Vulkan API with possibility to query driver whether it prefers particular buffer or image to have its own, dedicated allocation (separate `VkDeviceMemory` block) for better efficiency - to be able -to do some internal optimizations. +to do some internal optimizations. The extension is supported by this library. +It will be used automatically when enabled. + +It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version +and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, +you are all set. -The extension is supported by this library. It will be used automatically when -enabled. To enable it: +Otherwise, if you want to use it as an extension: 1 . When creating Vulkan device, check if following 2 device extensions are supported (call `vkEnumerateDeviceExtensionProperties()`). @@ -19688,7 +18786,7 @@ If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). If you enabled these extensions: 2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating -your #VmaAllocator`to inform the library that you enabled required extensions +your #VmaAllocator to inform the library that you enabled required extensions and you want the library to use them. \code @@ -19703,7 +18801,7 @@ buffer using vmaCreateBuffer() or image using vmaCreateImage(). When using the extension together with Vulkan Validation Layer, you will receive warnings like this: - vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer. +_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ It is OK, you should just ignore it. It happens because you use function `vkGetBufferMemoryRequirements2KHR()` instead of standard @@ -19760,7 +18858,7 @@ out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligi devices. There are multiple ways to do it, for example: - You can request or prefer to allocate out of such memory types by adding - `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. - If you manually found memory type index to use for this purpose, force allocation @@ -19778,7 +18876,7 @@ accompanying this library. Device extension VK_KHR_buffer_device_address allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. -It is promoted to core Vulkan 1.2. +It has been promoted to core Vulkan 1.2. If you want to use this feature in connection with VMA, follow these steps: @@ -19839,7 +18937,7 @@ accompanying this library. you must not call vmaGetAllocationInfo() and vmaMapMemory() from different threads at the same time if you pass the same #VmaAllocation object to these functions. -- #VmaVirtualBlock is also not safe to be used from multiple threads simultaneously. +- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. \section general_considerations_validation_layer_warnings Validation layer warnings @@ -19863,7 +18961,7 @@ The library uses following algorithm for allocation, in order: -# Try to find free range of memory in existing blocks. -# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. --# If failed, try to create such block with size/2, size/4, size/8. +-# If failed, try to create such block with size / 2, size / 4, size / 8. -# If failed, try to allocate separate `VkDeviceMemory` for this allocation, just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. -# If failed, choose other memory type that meets the requirements specified in @@ -19874,28 +18972,29 @@ The library uses following algorithm for allocation, in order: Features deliberately excluded from the scope of this library: -- **Data transfer.** Uploading (streaming) and downloading data of buffers and images - between CPU and GPU memory and related synchronization is responsibility of the user. - Defining some "texture" object that would automatically stream its data from a - staging copy in CPU memory to GPU memory would rather be a feature of another, - higher-level library implemented on top of VMA. -- **Recreation of buffers and images.** Although the library has functions for - buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to - recreate these objects yourself after defragmentation. That is because the big - structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in - #VmaAllocation object. -- **Handling CPU memory allocation failures.** When dynamically creating small C++ - objects in CPU memory (not Vulkan memory), allocation failures are not checked - and handled gracefully, because that would complicate code significantly and - is usually not needed in desktop PC applications anyway. - Success of an allocation is just checked with an assert. -- **Code free of any compiler warnings.** Maintaining the library to compile and - work correctly on so many different platforms is hard enough. Being free of - any warnings, on any version of any compiler, is simply not feasible. - There are many preprocessor macros that make some variables unused, function parameters unreferenced, - or conditional expressions constant in some configurations. - The code of this library should not be bigger or more complicated just to silence these warnings. - It is recommended to disable such warnings instead. -- This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but - are not going to be included into this repository. +-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. + VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. +-# **Recreation of buffers and images.** Although the library has functions for + buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to + recreate these objects yourself after defragmentation. That is because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +-# **Handling CPU memory allocation failures.** When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +-# **Code free of any compiler warnings.** Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. + There are many preprocessor macros that make some variables unused, function parameters unreferenced, + or conditional expressions constant in some configurations. + The code of this library should not be bigger or more complicated just to silence these warnings. + It is recommended to disable such warnings instead. +-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but + are not going to be included into this repository. */ |