summaryrefslogtreecommitdiff
path: root/thirdparty
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty')
-rw-r--r--thirdparty/README.md23
-rw-r--r--thirdparty/embree/include/embree3/rtcore_config.h6
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_hybrid.cpp917
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_hybrid4_bvh4.cpp59
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_stream.cpp528
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_stream_bvh4.cpp36
-rw-r--r--thirdparty/embree/kernels/bvh/bvh_intersector_stream_filters.cpp657
-rw-r--r--thirdparty/embree/kernels/config.h2
-rw-r--r--thirdparty/embree/kernels/hash.h2
-rw-r--r--thirdparty/misc/stb_vorbis.c5563
-rw-r--r--thirdparty/misc/stb_vorbis.h2
-rw-r--r--thirdparty/msdfgen/CHANGELOG.md82
-rw-r--r--thirdparty/msdfgen/LICENSE.txt21
-rw-r--r--thirdparty/msdfgen/core/Bitmap.h50
-rw-r--r--thirdparty/msdfgen/core/Bitmap.hpp117
-rw-r--r--thirdparty/msdfgen/core/BitmapRef.hpp43
-rw-r--r--thirdparty/msdfgen/core/Contour.cpp90
-rw-r--r--thirdparty/msdfgen/core/Contour.h34
-rw-r--r--thirdparty/msdfgen/core/EdgeColor.h18
-rw-r--r--thirdparty/msdfgen/core/EdgeHolder.cpp77
-rw-r--r--thirdparty/msdfgen/core/EdgeHolder.h41
-rw-r--r--thirdparty/msdfgen/core/MSDFErrorCorrection.cpp495
-rw-r--r--thirdparty/msdfgen/core/MSDFErrorCorrection.h56
-rw-r--r--thirdparty/msdfgen/core/Projection.cpp42
-rw-r--r--thirdparty/msdfgen/core/Projection.h37
-rw-r--r--thirdparty/msdfgen/core/Scanline.cpp125
-rw-r--r--thirdparty/msdfgen/core/Scanline.h55
-rw-r--r--thirdparty/msdfgen/core/Shape.cpp183
-rw-r--r--thirdparty/msdfgen/core/Shape.h55
-rw-r--r--thirdparty/msdfgen/core/ShapeDistanceFinder.h37
-rw-r--r--thirdparty/msdfgen/core/ShapeDistanceFinder.hpp56
-rw-r--r--thirdparty/msdfgen/core/SignedDistance.cpp30
-rw-r--r--thirdparty/msdfgen/core/SignedDistance.h25
-rw-r--r--thirdparty/msdfgen/core/Vector2.cpp146
-rw-r--r--thirdparty/msdfgen/core/Vector2.h66
-rw-r--r--thirdparty/msdfgen/core/arithmetics.hpp63
-rw-r--r--thirdparty/msdfgen/core/bitmap-interpolation.hpp25
-rw-r--r--thirdparty/msdfgen/core/contour-combiners.cpp133
-rw-r--r--thirdparty/msdfgen/core/contour-combiners.h47
-rw-r--r--thirdparty/msdfgen/core/edge-coloring.cpp499
-rw-r--r--thirdparty/msdfgen/core/edge-coloring.h29
-rw-r--r--thirdparty/msdfgen/core/edge-segments.cpp504
-rw-r--r--thirdparty/msdfgen/core/edge-segments.h122
-rw-r--r--thirdparty/msdfgen/core/edge-selectors.cpp261
-rw-r--r--thirdparty/msdfgen/core/edge-selectors.h117
-rw-r--r--thirdparty/msdfgen/core/equation-solver.cpp77
-rw-r--r--thirdparty/msdfgen/core/equation-solver.h12
-rw-r--r--thirdparty/msdfgen/core/generator-config.h63
-rw-r--r--thirdparty/msdfgen/core/msdf-error-correction.cpp154
-rw-r--r--thirdparty/msdfgen/core/msdf-error-correction.h28
-rw-r--r--thirdparty/msdfgen/core/msdfgen.cpp288
-rw-r--r--thirdparty/msdfgen/core/pixel-conversion.hpp18
-rw-r--r--thirdparty/msdfgen/core/rasterization.cpp115
-rw-r--r--thirdparty/msdfgen/core/rasterization.h25
-rw-r--r--thirdparty/msdfgen/core/render-sdf.cpp108
-rw-r--r--thirdparty/msdfgen/core/render-sdf.h22
-rw-r--r--thirdparty/msdfgen/core/save-bmp.cpp169
-rw-r--r--thirdparty/msdfgen/core/save-bmp.h16
-rw-r--r--thirdparty/msdfgen/core/save-tiff.cpp190
-rw-r--r--thirdparty/msdfgen/core/save-tiff.h13
-rw-r--r--thirdparty/msdfgen/core/sdf-error-estimation.cpp192
-rw-r--r--thirdparty/msdfgen/core/sdf-error-estimation.h30
-rw-r--r--thirdparty/msdfgen/core/shape-description.cpp284
-rw-r--r--thirdparty/msdfgen/core/shape-description.h16
-rw-r--r--thirdparty/msdfgen/msdfgen.h65
-rw-r--r--thirdparty/spirv-reflect/patches/specialization-constants.patch71
-rw-r--r--thirdparty/spirv-reflect/spirv_reflect.c519
-rw-r--r--thirdparty/spirv-reflect/spirv_reflect.h63
-rw-r--r--thirdparty/vulkan/patches/VMA-assert-remove.patch29
-rw-r--r--thirdparty/vulkan/vk_mem_alloc.h3869
70 files changed, 11180 insertions, 6832 deletions
diff --git a/thirdparty/README.md b/thirdparty/README.md
index ebf9c5ac0e..3b5ec77b73 100644
--- a/thirdparty/README.md
+++ b/thirdparty/README.md
@@ -472,16 +472,25 @@ Collection of single-file libraries used in Godot components.
* Upstream: https://github.com/nothings/stb
* Version: 1.00 (2bb4a0accd4003c1db4c24533981e01b1adfd656, 2019)
* License: Public Domain or Unlicense or MIT
-- `stb_vorbis.c`
- * Upstream: https://github.com/nothings/stb
- * Version: 1.20 (314d0a6f9af5af27e585336eecea333e95c5a2d8, 2020)
- * License: Public Domain or Unlicense or MIT
- `yuv2rgb.h`
* Upstream: http://wss.co.uk/pinknoise/yuv2rgb/ (to check)
* Version: ?
* License: BSD
+## msdfgen
+
+- Upstream: https://github.com/Chlumsky/msdfgen
+- Version: 1.9.1 (1b3b6b985094e6f12751177490add3ad11dd91a9, 2010)
+- License: MIT
+
+Files extracted from the upstream source:
+
+- `msdfgen.h`
+- Files in `core/` folder.
+- `LICENSE.txt` and `CHANGELOG.md`
+
+
## nanosvg
- Upstream: https://github.com/memononen/nanosvg
@@ -602,7 +611,7 @@ Godot. Please check the file to know what's new.
## spirv-reflect
- Upstream: https://github.com/KhronosGroup/SPIRV-Reflect
-- Version: git (c0ce03a43ca77fedb5abfd1976ae2fd0eeb0e611, 2021)
+- Version: git (272e050728de8d4a4ce9e7101c1244e6ff56e5b0, 2021)
- License: Apache 2.0
Files extracted from upstream source:
@@ -692,8 +701,8 @@ Files extracted from upstream source:
SDK release: https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/layers/generated/vk_enum_string_helper.h
`vk_mem_alloc.h` is taken from https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator
-Version: 2.3.0 (2019)
-`vk_mem_alloc.cpp` and `android/vk_mem_alloc.cpp` are Godot files and should be preserved on updates.
+Version: 3.0.0-development (2021-06-21), branch `feature-small-buffers`, commit `cfea2f72851f9ee4a399769f18865047b83711f1`
+`vk_mem_alloc.cpp` is a Godot file and should be preserved on updates.
Patches in the `patches` directory should be re-applied after updates.
diff --git a/thirdparty/embree/include/embree3/rtcore_config.h b/thirdparty/embree/include/embree3/rtcore_config.h
index 3a9819c9f1..62b7b6f4dc 100644
--- a/thirdparty/embree/include/embree3/rtcore_config.h
+++ b/thirdparty/embree/include/embree3/rtcore_config.h
@@ -6,9 +6,9 @@
#define RTC_VERSION_MAJOR 3
#define RTC_VERSION_MINOR 13
-#define RTC_VERSION_PATCH 0
-#define RTC_VERSION 31300
-#define RTC_VERSION_STRING "3.13.0"
+#define RTC_VERSION_PATCH 1
+#define RTC_VERSION 31301
+#define RTC_VERSION_STRING "3.13.1"
#define RTC_MAX_INSTANCE_LEVEL_COUNT 1
diff --git a/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid.cpp b/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid.cpp
new file mode 100644
index 0000000000..6e9a5a538e
--- /dev/null
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid.cpp
@@ -0,0 +1,917 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#include "bvh_intersector_hybrid.h"
+#include "bvh_traverser1.h"
+#include "node_intersector1.h"
+#include "node_intersector_packet.h"
+
+#include "../geometry/intersector_iterators.h"
+#include "../geometry/triangle_intersector.h"
+#include "../geometry/trianglev_intersector.h"
+#include "../geometry/trianglev_mb_intersector.h"
+#include "../geometry/trianglei_intersector.h"
+#include "../geometry/quadv_intersector.h"
+#include "../geometry/quadi_intersector.h"
+#include "../geometry/curveNv_intersector.h"
+#include "../geometry/curveNi_intersector.h"
+#include "../geometry/curveNi_mb_intersector.h"
+#include "../geometry/linei_intersector.h"
+#include "../geometry/subdivpatch1_intersector.h"
+#include "../geometry/object_intersector.h"
+#include "../geometry/instance_intersector.h"
+#include "../geometry/subgrid_intersector.h"
+#include "../geometry/subgrid_mb_intersector.h"
+#include "../geometry/curve_intersector_virtual.h"
+
+#define SWITCH_DURING_DOWN_TRAVERSAL 1
+#define FORCE_SINGLE_MODE 0
+
+#define ENABLE_FAST_COHERENT_CODEPATHS 1
+
+namespace embree
+{
+ namespace isa
+ {
+ template<int N, int K, int types, bool robust, typename PrimitiveIntersectorK, bool single>
+ void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
+ const BVH* bvh,
+ NodeRef root,
+ size_t k,
+ Precalculations& pre,
+ RayHitK<K>& ray,
+ const TravRayK<K, robust>& tray,
+ IntersectContext* context)
+ {
+ /* stack state */
+ StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
+ StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
+ StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
+ stack[0].ptr = root;
+ stack[0].dist = neg_inf;
+
+ /* load the ray into SIMD registers */
+ TravRay<N,robust> tray1;
+ tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
+
+ /* pop loop */
+ while (true) pop:
+ {
+ /* pop next node */
+ if (unlikely(stackPtr == stack)) break;
+ stackPtr--;
+ NodeRef cur = NodeRef(stackPtr->ptr);
+
+ /* if popped node is too far, pop next one */
+ if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
+ continue;
+
+ /* downtraversal loop */
+ while (true)
+ {
+ /* intersect node */
+ size_t mask; vfloat<N> tNear;
+ STAT3(normal.trav_nodes, 1, 1, 1);
+ bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
+ if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
+
+ /* if no child is hit, pop next node */
+ if (unlikely(mask == 0))
+ goto pop;
+
+ /* select next child and push other children */
+ BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
+ }
+
+ /* this is a leaf node */
+ assert(cur != BVH::emptyNode);
+ STAT3(normal.trav_leaves, 1, 1, 1);
+ size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
+
+ size_t lazy_node = 0;
+ PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
+
+ tray1.tfar = ray.tfar[k];
+
+ if (unlikely(lazy_node)) {
+ stackPtr->ptr = lazy_node;
+ stackPtr->dist = neg_inf;
+ stackPtr++;
+ }
+ }
+ }
+
+ template<int N, int K, int types, bool robust, typename PrimitiveIntersectorK, bool single>
+ void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
+ Accel::Intersectors* __restrict__ This,
+ RayHitK<K>& __restrict__ ray,
+ IntersectContext* __restrict__ context)
+ {
+ BVH* __restrict__ bvh = (BVH*)This->ptr;
+
+ /* we may traverse an empty BVH in case all geometry was invalid */
+ if (bvh->root == BVH::emptyNode)
+ return;
+
+#if ENABLE_FAST_COHERENT_CODEPATHS == 1
+ assert(context);
+ if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
+ {
+ intersectCoherent(valid_i, This, ray, context);
+ return;
+ }
+#endif
+
+ /* filter out invalid rays */
+ vbool<K> valid = *valid_i == -1;
+#if defined(EMBREE_IGNORE_INVALID_RAYS)
+ valid &= ray.valid();
+#endif
+
+ /* return if there are no valid rays */
+ size_t valid_bits = movemask(valid);
+
+#if defined(__AVX__)
+ STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
+#endif
+
+ if (unlikely(valid_bits == 0)) return;
+
+ /* verify correct input */
+ assert(all(valid, ray.valid()));
+ assert(all(valid, ray.tnear() >= 0.0f));
+ assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
+ Precalculations pre(valid, ray);
+
+ /* load ray */
+ TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
+ const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
+ const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
+
+ if (single)
+ {
+ tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
+ tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
+
+ for (; valid_bits!=0; ) {
+ const size_t i = bscf(valid_bits);
+ intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
+ }
+ return;
+ }
+
+ /* determine switch threshold based on flags */
+ const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
+
+ vint<K> octant = ray.octant();
+ octant = select(valid, octant, vint<K>(0xffffffff));
+
+ /* test whether we have ray with opposing direction signs in the packet */
+ bool split = false;
+ {
+ size_t bits = valid_bits;
+ vbool<K> vsplit( false );
+ do
+ {
+ const size_t valid_index = bsf(bits);
+ vbool<K> octant_valid = octant[valid_index] == octant;
+ bits &= ~(size_t)movemask(octant_valid);
+ vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
+ } while (bits);
+ if (any(vsplit)) split = true;
+ }
+
+ do
+ {
+ const size_t valid_index = bsf(valid_bits);
+ const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
+ const vint<K> count_diff_octant = \
+ ((diff_octant >> 2) & 1) +
+ ((diff_octant >> 1) & 1) +
+ ((diff_octant >> 0) & 1);
+
+ vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
+ if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
+
+
+ octant = select(octant_valid,vint<K>(0xffffffff),octant);
+ valid_bits &= ~(size_t)movemask(octant_valid);
+
+ tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
+ tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
+
+ /* allocate stack and push root node */
+ vfloat<K> stack_near[stackSizeChunk];
+ NodeRef stack_node[stackSizeChunk];
+ stack_node[0] = BVH::invalidNode;
+ stack_near[0] = inf;
+ stack_node[1] = bvh->root;
+ stack_near[1] = tray.tnear;
+ NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
+ NodeRef* __restrict__ sptr_node = stack_node + 2;
+ vfloat<K>* __restrict__ sptr_near = stack_near + 2;
+
+ while (1) pop:
+ {
+ /* pop next node from stack */
+ assert(sptr_node > stack_node);
+ sptr_node--;
+ sptr_near--;
+ NodeRef cur = *sptr_node;
+ if (unlikely(cur == BVH::invalidNode)) {
+ assert(sptr_node == stack_node);
+ break;
+ }
+
+ /* cull node if behind closest hit point */
+ vfloat<K> curDist = *sptr_near;
+ const vbool<K> active = curDist < tray.tfar;
+ if (unlikely(none(active)))
+ continue;
+
+ /* switch to single ray traversal */
+#if (!defined(__WIN32__) || defined(__X86_64__)) && defined(__SSE4_2__)
+#if FORCE_SINGLE_MODE == 0
+ if (single)
+#endif
+ {
+ size_t bits = movemask(active);
+#if FORCE_SINGLE_MODE == 0
+ if (unlikely(popcnt(bits) <= switchThreshold))
+#endif
+ {
+ for (; bits!=0; ) {
+ const size_t i = bscf(bits);
+ intersect1(This, bvh, cur, i, pre, ray, tray, context);
+ }
+ tray.tfar = min(tray.tfar, ray.tfar);
+ continue;
+ }
+ }
+#endif
+ while (likely(!cur.isLeaf()))
+ {
+ /* process nodes */
+ const vbool<K> valid_node = tray.tfar > curDist;
+ STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
+ const NodeRef nodeRef = cur;
+ const BaseNode* __restrict__ const node = nodeRef.baseNode();
+
+ /* set cur to invalid */
+ cur = BVH::emptyNode;
+ curDist = pos_inf;
+
+ size_t num_child_hits = 0;
+
+ for (unsigned i = 0; i < N; i++)
+ {
+ const NodeRef child = node->children[i];
+ if (unlikely(child == BVH::emptyNode)) break;
+ vfloat<K> lnearP;
+ vbool<K> lhit = valid_node;
+ BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
+
+ /* if we hit the child we choose to continue with that child if it
+ is closer than the current next child, or we push it onto the stack */
+ if (likely(any(lhit)))
+ {
+ assert(sptr_node < stackEnd);
+ assert(child != BVH::emptyNode);
+ const vfloat<K> childDist = select(lhit, lnearP, inf);
+ /* push cur node onto stack and continue with hit child */
+ if (any(childDist < curDist))
+ {
+ if (likely(cur != BVH::emptyNode)) {
+ num_child_hits++;
+ *sptr_node = cur; sptr_node++;
+ *sptr_near = curDist; sptr_near++;
+ }
+ curDist = childDist;
+ cur = child;
+ }
+
+ /* push hit child onto stack */
+ else {
+ num_child_hits++;
+ *sptr_node = child; sptr_node++;
+ *sptr_near = childDist; sptr_near++;
+ }
+ }
+ }
+
+#if defined(__AVX__)
+ //STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
+#endif
+
+ if (unlikely(cur == BVH::emptyNode))
+ goto pop;
+
+ /* improved distance sorting for 3 or more hits */
+ if (unlikely(num_child_hits >= 2))
+ {
+ if (any(sptr_near[-2] < sptr_near[-1]))
+ {
+ std::swap(sptr_near[-2],sptr_near[-1]);
+ std::swap(sptr_node[-2],sptr_node[-1]);
+ }
+ if (unlikely(num_child_hits >= 3))
+ {
+ if (any(sptr_near[-3] < sptr_near[-1]))
+ {
+ std::swap(sptr_near[-3],sptr_near[-1]);
+ std::swap(sptr_node[-3],sptr_node[-1]);
+ }
+ if (any(sptr_near[-3] < sptr_near[-2]))
+ {
+ std::swap(sptr_near[-3],sptr_near[-2]);
+ std::swap(sptr_node[-3],sptr_node[-2]);
+ }
+ }
+ }
+
+#if SWITCH_DURING_DOWN_TRAVERSAL == 1
+ if (single)
+ {
+ // seems to be the best place for testing utilization
+ if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
+ {
+ *sptr_node++ = cur;
+ *sptr_near++ = curDist;
+ goto pop;
+ }
+ }
+#endif
+ }
+
+ /* return if stack is empty */
+ if (unlikely(cur == BVH::invalidNode)) {
+ assert(sptr_node == stack_node);
+ break;
+ }
+
+ /* intersect leaf */
+ assert(cur != BVH::emptyNode);
+ const vbool<K> valid_leaf = tray.tfar > curDist;
+ STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
+ if (unlikely(none(valid_leaf))) continue;
+ size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
+
+ size_t lazy_node = 0;
+ PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
+ tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
+
+ if (unlikely(lazy_node)) {
+ *sptr_node = lazy_node; sptr_node++;
+ *sptr_near = neg_inf; sptr_near++;
+ }
+ }
+ } while(valid_bits);
+ }
+
+
+ template<int N, int K, int types, bool robust, typename PrimitiveIntersectorK, bool single>
+ void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersectCoherent(vint<K>* __restrict__ valid_i,
+ Accel::Intersectors* __restrict__ This,
+ RayHitK<K>& __restrict__ ray,
+ IntersectContext* context)
+ {
+ BVH* __restrict__ bvh = (BVH*)This->ptr;
+
+ /* filter out invalid rays */
+ vbool<K> valid = *valid_i == -1;
+#if defined(EMBREE_IGNORE_INVALID_RAYS)
+ valid &= ray.valid();
+#endif
+
+ /* return if there are no valid rays */
+ size_t valid_bits = movemask(valid);
+ if (unlikely(valid_bits == 0)) return;
+
+ /* verify correct input */
+ assert(all(valid, ray.valid()));
+ assert(all(valid, ray.tnear() >= 0.0f));
+ assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
+ Precalculations pre(valid, ray);
+
+ /* load ray */
+ TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
+ const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
+ const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
+
+ vint<K> octant = ray.octant();
+ octant = select(valid, octant, vint<K>(0xffffffff));
+
+ do
+ {
+ const size_t valid_index = bsf(valid_bits);
+ const vbool<K> octant_valid = octant[valid_index] == octant;
+ valid_bits &= ~(size_t)movemask(octant_valid);
+
+ tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
+ tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
+
+ Frustum<robust> frustum;
+ frustum.template init<K>(octant_valid, tray.org, tray.rdir, tray.tnear, tray.tfar, N);
+
+ StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
+ StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
+ stack[0].ptr = bvh->root;
+ stack[0].dist = neg_inf;
+
+ while (1) pop:
+ {
+ /* pop next node from stack */
+ if (unlikely(stackPtr == stack)) break;
+
+ stackPtr--;
+ NodeRef cur = NodeRef(stackPtr->ptr);
+
+ /* cull node if behind closest hit point */
+ vfloat<K> curDist = *(float*)&stackPtr->dist;
+ const vbool<K> active = curDist < tray.tfar;
+ if (unlikely(none(active))) continue;
+
+ while (likely(!cur.isLeaf()))
+ {
+ /* process nodes */
+ //STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
+ const NodeRef nodeRef = cur;
+ const AABBNode* __restrict__ const node = nodeRef.getAABBNode();
+
+ vfloat<N> fmin;
+ size_t m_frustum_node = intersectNodeFrustum<N>(node, frustum, fmin);
+
+ if (unlikely(!m_frustum_node)) goto pop;
+ cur = BVH::emptyNode;
+ curDist = pos_inf;
+
+#if defined(__AVX__)
+ //STAT3(normal.trav_hit_boxes[popcnt(m_frustum_node)], 1, 1, 1);
+#endif
+ size_t num_child_hits = 0;
+ do {
+ const size_t i = bscf(m_frustum_node);
+ vfloat<K> lnearP;
+ vbool<K> lhit = false; // motion blur is not supported, so the initial value will be ignored
+ STAT3(normal.trav_nodes, 1, 1, 1);
+ BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
+
+ if (likely(any(lhit)))
+ {
+ const vfloat<K> childDist = fmin[i];
+ const NodeRef child = node->child(i);
+ BVHN<N>::prefetch(child);
+ if (any(childDist < curDist))
+ {
+ if (likely(cur != BVH::emptyNode)) {
+ num_child_hits++;
+ stackPtr->ptr = cur;
+ *(float*)&stackPtr->dist = toScalar(curDist);
+ stackPtr++;
+ }
+ curDist = childDist;
+ cur = child;
+ }
+ /* push hit child onto stack */
+ else {
+ num_child_hits++;
+ stackPtr->ptr = child;
+ *(float*)&stackPtr->dist = toScalar(childDist);
+ stackPtr++;
+ }
+ }
+ } while(m_frustum_node);
+
+ if (unlikely(cur == BVH::emptyNode)) goto pop;
+
+ /* improved distance sorting for 3 or more hits */
+ if (unlikely(num_child_hits >= 2))
+ {
+ if (stackPtr[-2].dist < stackPtr[-1].dist)
+ std::swap(stackPtr[-2],stackPtr[-1]);
+ if (unlikely(num_child_hits >= 3))
+ {
+ if (stackPtr[-3].dist < stackPtr[-1].dist)
+ std::swap(stackPtr[-3],stackPtr[-1]);
+ if (stackPtr[-3].dist < stackPtr[-2].dist)
+ std::swap(stackPtr[-3],stackPtr[-2]);
+ }
+ }
+ }
+
+ /* intersect leaf */
+ assert(cur != BVH::invalidNode);
+ assert(cur != BVH::emptyNode);
+ const vbool<K> valid_leaf = tray.tfar > curDist;
+ STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
+ if (unlikely(none(valid_leaf))) continue;
+ size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
+
+ size_t lazy_node = 0;
+ PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
+
+ /* reduce max distance interval on successful intersection */
+ if (likely(any((ray.tfar < tray.tfar) & valid_leaf)))
+ {
+ tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
+ frustum.template updateMaxDist<K>(tray.tfar);
+ }
+
+ if (unlikely(lazy_node)) {
+ stackPtr->ptr = lazy_node;
+ stackPtr->dist = neg_inf;
+ stackPtr++;
+ }
+ }
+
+ } while(valid_bits);
+ }
+
+ // ===================================================================================================================================================================
+ // ===================================================================================================================================================================
+ // ===================================================================================================================================================================
+
+ template<int N, int K, int types, bool robust, typename PrimitiveIntersectorK, bool single>
+ bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
+ const BVH* bvh,
+ NodeRef root,
+ size_t k,
+ Precalculations& pre,
+ RayK<K>& ray,
+ const TravRayK<K, robust>& tray,
+ IntersectContext* context)
+ {
+ /* stack state */
+ NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
+ NodeRef* stackPtr = stack+1; // current stack pointer
+ NodeRef* stackEnd = stack+stackSizeSingle;
+ stack[0] = root;
+
+ /* load the ray into SIMD registers */
+ TravRay<N,robust> tray1;
+ tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
+
+ /* pop loop */
+ while (true) pop:
+ {
+ /* pop next node */
+ if (unlikely(stackPtr == stack)) break;
+ stackPtr--;
+ NodeRef cur = (NodeRef)*stackPtr;
+
+ /* downtraversal loop */
+ while (true)
+ {
+ /* intersect node */
+ size_t mask; vfloat<N> tNear;
+ STAT3(shadow.trav_nodes, 1, 1, 1);
+ bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
+ if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
+
+ /* if no child is hit, pop next node */
+ if (unlikely(mask == 0))
+ goto pop;
+
+ /* select next child and push other children */
+ BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
+ }
+
+ /* this is a leaf node */
+ assert(cur != BVH::emptyNode);
+ STAT3(shadow.trav_leaves, 1, 1, 1);
+ size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
+
+ size_t lazy_node = 0;
+ if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
+ ray.tfar[k] = neg_inf;
+ return true;
+ }
+
+ if (unlikely(lazy_node)) {
+ *stackPtr = lazy_node;
+ stackPtr++;
+ }
+ }
+ return false;
+ }
+
+ template<int N, int K, int types, bool robust, typename PrimitiveIntersectorK, bool single>
+ void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
+ Accel::Intersectors* __restrict__ This,
+ RayK<K>& __restrict__ ray,
+ IntersectContext* context)
+ {
+ BVH* __restrict__ bvh = (BVH*)This->ptr;
+
+ /* we may traverse an empty BVH in case all geometry was invalid */
+ if (bvh->root == BVH::emptyNode)
+ return;
+
+#if ENABLE_FAST_COHERENT_CODEPATHS == 1
+ assert(context);
+ if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
+ {
+ occludedCoherent(valid_i, This, ray, context);
+ return;
+ }
+#endif
+
+ /* filter out already occluded and invalid rays */
+ vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
+#if defined(EMBREE_IGNORE_INVALID_RAYS)
+ valid &= ray.valid();
+#endif
+
+ /* return if there are no valid rays */
+ const size_t valid_bits = movemask(valid);
+ if (unlikely(valid_bits == 0)) return;
+
+ /* verify correct input */
+ assert(all(valid, ray.valid()));
+ assert(all(valid, ray.tnear() >= 0.0f));
+ assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
+ Precalculations pre(valid, ray);
+
+ /* load ray */
+ TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
+ const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
+ const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
+
+ tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
+ tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
+
+ vbool<K> terminated = !valid;
+ const vfloat<K> inf = vfloat<K>(pos_inf);
+
+ /* determine switch threshold based on flags */
+ const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
+
+ /* allocate stack and push root node */
+ vfloat<K> stack_near[stackSizeChunk];
+ NodeRef stack_node[stackSizeChunk];
+ stack_node[0] = BVH::invalidNode;
+ stack_near[0] = inf;
+ stack_node[1] = bvh->root;
+ stack_near[1] = tray.tnear;
+ NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
+ NodeRef* __restrict__ sptr_node = stack_node + 2;
+ vfloat<K>* __restrict__ sptr_near = stack_near + 2;
+
+ while (1) pop:
+ {
+ /* pop next node from stack */
+ assert(sptr_node > stack_node);
+ sptr_node--;
+ sptr_near--;
+ NodeRef cur = *sptr_node;
+ if (unlikely(cur == BVH::invalidNode)) {
+ assert(sptr_node == stack_node);
+ break;
+ }
+
+ /* cull node if behind closest hit point */
+ vfloat<K> curDist = *sptr_near;
+ const vbool<K> active = curDist < tray.tfar;
+ if (unlikely(none(active)))
+ continue;
+
+ /* switch to single ray traversal */
+#if (!defined(__WIN32__) || defined(__X86_64__)) && defined(__SSE4_2__)
+#if FORCE_SINGLE_MODE == 0
+ if (single)
+#endif
+ {
+ size_t bits = movemask(active);
+#if FORCE_SINGLE_MODE == 0
+ if (unlikely(popcnt(bits) <= switchThreshold))
+#endif
+ {
+ for (; bits!=0; ) {
+ const size_t i = bscf(bits);
+ if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
+ set(terminated, i);
+ }
+ if (all(terminated)) break;
+ tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
+ continue;
+ }
+ }
+#endif
+
+ while (likely(!cur.isLeaf()))
+ {
+ /* process nodes */
+ const vbool<K> valid_node = tray.tfar > curDist;
+ STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
+ const NodeRef nodeRef = cur;
+ const BaseNode* __restrict__ const node = nodeRef.baseNode();
+
+ /* set cur to invalid */
+ cur = BVH::emptyNode;
+ curDist = pos_inf;
+
+ for (unsigned i = 0; i < N; i++)
+ {
+ const NodeRef child = node->children[i];
+ if (unlikely(child == BVH::emptyNode)) break;
+ vfloat<K> lnearP;
+ vbool<K> lhit = valid_node;
+ BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
+
+ /* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
+ if (likely(any(lhit)))
+ {
+ assert(sptr_node < stackEnd);
+ assert(child != BVH::emptyNode);
+ const vfloat<K> childDist = select(lhit, lnearP, inf);
+
+ /* push 'cur' node onto stack and continue with hit child */
+ if (likely(cur != BVH::emptyNode)) {
+ *sptr_node = cur; sptr_node++;
+ *sptr_near = curDist; sptr_near++;
+ }
+ curDist = childDist;
+ cur = child;
+ }
+ }
+ if (unlikely(cur == BVH::emptyNode))
+ goto pop;
+
+#if SWITCH_DURING_DOWN_TRAVERSAL == 1
+ if (single)
+ {
+ // seems to be the best place for testing utilization
+ if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
+ {
+ *sptr_node++ = cur;
+ *sptr_near++ = curDist;
+ goto pop;
+ }
+ }
+#endif
+ }
+
+ /* return if stack is empty */
+ if (unlikely(cur == BVH::invalidNode)) {
+ assert(sptr_node == stack_node);
+ break;
+ }
+
+
+ /* intersect leaf */
+ assert(cur != BVH::emptyNode);
+ const vbool<K> valid_leaf = tray.tfar > curDist;
+ STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
+ if (unlikely(none(valid_leaf))) continue;
+ size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
+
+ size_t lazy_node = 0;
+ terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
+ if (all(terminated)) break;
+ tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
+
+ if (unlikely(lazy_node)) {
+ *sptr_node = lazy_node; sptr_node++;
+ *sptr_near = neg_inf; sptr_near++;
+ }
+ }
+
+ vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
+ }
+
+
+ template<int N, int K, int types, bool robust, typename PrimitiveIntersectorK, bool single>
+ void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occludedCoherent(vint<K>* __restrict__ valid_i,
+ Accel::Intersectors* __restrict__ This,
+ RayK<K>& __restrict__ ray,
+ IntersectContext* context)
+ {
+ BVH* __restrict__ bvh = (BVH*)This->ptr;
+
+ /* filter out invalid rays */
+ vbool<K> valid = *valid_i == -1;
+#if defined(EMBREE_IGNORE_INVALID_RAYS)
+ valid &= ray.valid();
+#endif
+
+ /* return if there are no valid rays */
+ size_t valid_bits = movemask(valid);
+ if (unlikely(valid_bits == 0)) return;
+
+ /* verify correct input */
+ assert(all(valid, ray.valid()));
+ assert(all(valid, ray.tnear() >= 0.0f));
+ assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
+ Precalculations pre(valid,ray);
+
+ /* load ray */
+ TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
+ const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
+ const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
+
+ vbool<K> terminated = !valid;
+
+ vint<K> octant = ray.octant();
+ octant = select(valid, octant, vint<K>(0xffffffff));
+
+ do
+ {
+ const size_t valid_index = bsf(valid_bits);
+ vbool<K> octant_valid = octant[valid_index] == octant;
+ valid_bits &= ~(size_t)movemask(octant_valid);
+
+ tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
+ tray.tfar = select(octant_valid, org_ray_tfar, vfloat<K>(neg_inf));
+
+ Frustum<robust> frustum;
+ frustum.template init<K>(octant_valid, tray.org, tray.rdir, tray.tnear, tray.tfar, N);
+
+ StackItemMaskT<NodeRef> stack[stackSizeSingle]; // stack of nodes
+ StackItemMaskT<NodeRef>* stackPtr = stack + 1; // current stack pointer
+ stack[0].ptr = bvh->root;
+ stack[0].mask = movemask(octant_valid);
+
+ while (1) pop:
+ {
+ /* pop next node from stack */
+ if (unlikely(stackPtr == stack)) break;
+
+ stackPtr--;
+ NodeRef cur = NodeRef(stackPtr->ptr);
+
+ /* cull node of active rays have already been terminated */
+ size_t m_active = (size_t)stackPtr->mask & (~(size_t)movemask(terminated));
+
+ if (unlikely(m_active == 0)) continue;
+
+ while (likely(!cur.isLeaf()))
+ {
+ /* process nodes */
+ //STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
+ const NodeRef nodeRef = cur;
+ const AABBNode* __restrict__ const node = nodeRef.getAABBNode();
+
+ vfloat<N> fmin;
+ size_t m_frustum_node = intersectNodeFrustum<N>(node, frustum, fmin);
+
+ if (unlikely(!m_frustum_node)) goto pop;
+ cur = BVH::emptyNode;
+ m_active = 0;
+
+#if defined(__AVX__)
+ //STAT3(normal.trav_hit_boxes[popcnt(m_frustum_node)], 1, 1, 1);
+#endif
+ size_t num_child_hits = 0;
+ do {
+ const size_t i = bscf(m_frustum_node);
+ vfloat<K> lnearP;
+ vbool<K> lhit = false; // motion blur is not supported, so the initial value will be ignored
+ STAT3(normal.trav_nodes, 1, 1, 1);
+ BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
+
+ if (likely(any(lhit)))
+ {
+ const NodeRef child = node->child(i);
+ assert(child != BVH::emptyNode);
+ BVHN<N>::prefetch(child);
+ if (likely(cur != BVH::emptyNode)) {
+ num_child_hits++;
+ stackPtr->ptr = cur;
+ stackPtr->mask = m_active;
+ stackPtr++;
+ }
+ cur = child;
+ m_active = movemask(lhit);
+ }
+ } while(m_frustum_node);
+
+ if (unlikely(cur == BVH::emptyNode)) goto pop;
+ }
+
+ /* intersect leaf */
+ assert(cur != BVH::invalidNode);
+ assert(cur != BVH::emptyNode);
+#if defined(__AVX__)
+ STAT3(normal.trav_leaves, 1, popcnt(m_active), K);
+#endif
+ if (unlikely(!m_active)) continue;
+ size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
+
+ size_t lazy_node = 0;
+ terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
+ octant_valid &= !terminated;
+ if (unlikely(none(octant_valid))) break;
+ tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
+
+ if (unlikely(lazy_node)) {
+ stackPtr->ptr = lazy_node;
+ stackPtr->mask = movemask(octant_valid);
+ stackPtr++;
+ }
+ }
+ } while(valid_bits);
+
+ vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
+ }
+ }
+}
diff --git a/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid4_bvh4.cpp b/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid4_bvh4.cpp
new file mode 100644
index 0000000000..2137da6a25
--- /dev/null
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_hybrid4_bvh4.cpp
@@ -0,0 +1,59 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#include "bvh_intersector_hybrid.cpp"
+
+namespace embree
+{
+ namespace isa
+ {
+ ////////////////////////////////////////////////////////////////////////////////
+ /// BVH4Intersector4 Definitions
+ ////////////////////////////////////////////////////////////////////////////////
+
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4Intersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA TriangleMIntersectorKMoeller <4 COMMA 4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4Intersector4HybridMoellerNoFilter, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA TriangleMIntersectorKMoeller <4 COMMA 4 COMMA false> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4iIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA TriangleMiIntersectorKMoeller <4 COMMA 4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4vIntersector4HybridPluecker, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersectorK_1<4 COMMA TriangleMvIntersectorKPluecker<4 COMMA 4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4iIntersector4HybridPluecker, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersectorK_1<4 COMMA TriangleMiIntersectorKPluecker<4 COMMA 4 COMMA true> > >));
+
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4vMBIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersectorK_1<4 COMMA TriangleMvMBIntersectorKMoeller <4 COMMA 4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4iMBIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersectorK_1<4 COMMA TriangleMiMBIntersectorKMoeller <4 COMMA 4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4vMBIntersector4HybridPluecker, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA true COMMA ArrayIntersectorK_1<4 COMMA TriangleMvMBIntersectorKPluecker<4 COMMA 4 COMMA true> > >));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTOR4(BVH4Triangle4iMBIntersector4HybridPluecker, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA true COMMA ArrayIntersectorK_1<4 COMMA TriangleMiMBIntersectorKPluecker<4 COMMA 4 COMMA true> > >));
+
+ IF_ENABLED_QUADS(DEFINE_INTERSECTOR4(BVH4Quad4vIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA QuadMvIntersectorKMoeller <4 COMMA 4 COMMA true > > >));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTOR4(BVH4Quad4vIntersector4HybridMoellerNoFilter,BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA QuadMvIntersectorKMoeller <4 COMMA 4 COMMA false> > >));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTOR4(BVH4Quad4iIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA QuadMiIntersectorKMoeller <4 COMMA 4 COMMA true > > >));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTOR4(BVH4Quad4vIntersector4HybridPluecker, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersectorK_1<4 COMMA QuadMvIntersectorKPluecker<4 COMMA 4 COMMA true > > >));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTOR4(BVH4Quad4iIntersector4HybridPluecker, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA true COMMA ArrayIntersectorK_1<4 COMMA QuadMiIntersectorKPluecker<4 COMMA 4 COMMA true > > >));
+
+ IF_ENABLED_QUADS(DEFINE_INTERSECTOR4(BVH4Quad4iMBIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersectorK_1<4 COMMA QuadMiMBIntersectorKMoeller <4 COMMA 4 COMMA true > > >));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTOR4(BVH4Quad4iMBIntersector4HybridPluecker,BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA true COMMA ArrayIntersectorK_1<4 COMMA QuadMiMBIntersectorKPluecker<4 COMMA 4 COMMA true > > >));
+
+ IF_ENABLED_CURVES_OR_POINTS(DEFINE_INTERSECTOR4(BVH4OBBVirtualCurveIntersector4Hybrid, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1_UN1 COMMA false COMMA VirtualCurveIntersectorK<4> >));
+ IF_ENABLED_CURVES_OR_POINTS(DEFINE_INTERSECTOR4(BVH4OBBVirtualCurveIntersector4HybridMB,BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D_UN2 COMMA false COMMA VirtualCurveIntersectorK<4> >));
+
+ IF_ENABLED_CURVES_OR_POINTS(DEFINE_INTERSECTOR4(BVH4OBBVirtualCurveIntersectorRobust4Hybrid, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1_UN1 COMMA true COMMA VirtualCurveIntersectorK<4> >));
+ IF_ENABLED_CURVES_OR_POINTS(DEFINE_INTERSECTOR4(BVH4OBBVirtualCurveIntersectorRobust4HybridMB,BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D_UN2 COMMA true COMMA VirtualCurveIntersectorK<4> >));
+
+ //IF_ENABLED_SUBDIV(DEFINE_INTERSECTOR4(BVH4SubdivPatch1Intersector4, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA true COMMA SubdivPatch1Intersector4>));
+ IF_ENABLED_SUBDIV(DEFINE_INTERSECTOR4(BVH4SubdivPatch1Intersector4, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA true COMMA SubdivPatch1Intersector4>));
+ IF_ENABLED_SUBDIV(DEFINE_INTERSECTOR4(BVH4SubdivPatch1MBIntersector4, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA false COMMA SubdivPatch1MBIntersector4>));
+ //IF_ENABLED_SUBDIV(DEFINE_INTERSECTOR4(BVH4SubdivPatch1MBIntersector4, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA false COMMA SubdivPatch1MBIntersector4>));
+
+ IF_ENABLED_USER(DEFINE_INTERSECTOR4(BVH4VirtualIntersector4Chunk, BVHNIntersectorKChunk<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA ObjectIntersector4> >));
+ IF_ENABLED_USER(DEFINE_INTERSECTOR4(BVH4VirtualMBIntersector4Chunk, BVHNIntersectorKChunk<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersectorK_1<4 COMMA ObjectIntersector4MB> >));
+
+ IF_ENABLED_INSTANCE(DEFINE_INTERSECTOR4(BVH4InstanceIntersector4Chunk, BVHNIntersectorKChunk<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA ArrayIntersectorK_1<4 COMMA InstanceIntersectorK<4>> >));
+ IF_ENABLED_INSTANCE(DEFINE_INTERSECTOR4(BVH4InstanceMBIntersector4Chunk, BVHNIntersectorKChunk<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA false COMMA ArrayIntersectorK_1<4 COMMA InstanceIntersectorKMB<4>> >));
+
+ IF_ENABLED_GRIDS(DEFINE_INTERSECTOR4(BVH4GridIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA SubGridIntersectorKMoeller <4 COMMA 4 COMMA true> >));
+ //IF_ENABLED_GRIDS(DEFINE_INTERSECTOR4(BVH4GridIntersector4HybridMoeller, BVHNIntersectorKChunk<4 COMMA 4 COMMA BVH_AN1 COMMA false COMMA SubGridIntersectorKMoeller <4 COMMA 4 COMMA true> >));
+
+ IF_ENABLED_GRIDS(DEFINE_INTERSECTOR4(BVH4GridMBIntersector4HybridMoeller, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN2_AN4D COMMA true COMMA SubGridMBIntersectorKPluecker <4 COMMA 4 COMMA true> >));
+ IF_ENABLED_GRIDS(DEFINE_INTERSECTOR4(BVH4GridIntersector4HybridPluecker, BVHNIntersectorKHybrid<4 COMMA 4 COMMA BVH_AN1 COMMA true COMMA SubGridIntersectorKPluecker <4 COMMA 4 COMMA true> >));
+
+ }
+}
+
diff --git a/thirdparty/embree/kernels/bvh/bvh_intersector_stream.cpp b/thirdparty/embree/kernels/bvh/bvh_intersector_stream.cpp
new file mode 100644
index 0000000000..4a74d8468d
--- /dev/null
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_stream.cpp
@@ -0,0 +1,528 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#include "bvh_intersector_stream.h"
+
+#include "../geometry/intersector_iterators.h"
+#include "../geometry/triangle_intersector.h"
+#include "../geometry/trianglev_intersector.h"
+#include "../geometry/trianglev_mb_intersector.h"
+#include "../geometry/trianglei_intersector.h"
+#include "../geometry/quadv_intersector.h"
+#include "../geometry/quadi_intersector.h"
+#include "../geometry/linei_intersector.h"
+#include "../geometry/subdivpatch1_intersector.h"
+#include "../geometry/object_intersector.h"
+#include "../geometry/instance_intersector.h"
+
+#include "../common/scene.h"
+#include <bitset>
+
+namespace embree
+{
+ namespace isa
+ {
+ __aligned(64) static const int shiftTable[32] = {
+ (int)1 << 0, (int)1 << 1, (int)1 << 2, (int)1 << 3, (int)1 << 4, (int)1 << 5, (int)1 << 6, (int)1 << 7,
+ (int)1 << 8, (int)1 << 9, (int)1 << 10, (int)1 << 11, (int)1 << 12, (int)1 << 13, (int)1 << 14, (int)1 << 15,
+ (int)1 << 16, (int)1 << 17, (int)1 << 18, (int)1 << 19, (int)1 << 20, (int)1 << 21, (int)1 << 22, (int)1 << 23,
+ (int)1 << 24, (int)1 << 25, (int)1 << 26, (int)1 << 27, (int)1 << 28, (int)1 << 29, (int)1 << 30, (int)1 << 31
+ };
+
+ template<int N, int types, bool robust, typename PrimitiveIntersector>
+ __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::intersect(Accel::Intersectors* __restrict__ This,
+ RayHitN** inputPackets,
+ size_t numOctantRays,
+ IntersectContext* context)
+ {
+ /* we may traverse an empty BVH in case all geometry was invalid */
+ BVH* __restrict__ bvh = (BVH*) This->ptr;
+ if (bvh->root == BVH::emptyNode)
+ return;
+
+ // Only the coherent code path is implemented
+ assert(context->isCoherent());
+ intersectCoherent(This, (RayHitK<VSIZEL>**)inputPackets, numOctantRays, context);
+ }
+
+ template<int N, int types, bool robust, typename PrimitiveIntersector>
+ template<int K>
+ __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::intersectCoherent(Accel::Intersectors* __restrict__ This,
+ RayHitK<K>** inputPackets,
+ size_t numOctantRays,
+ IntersectContext* context)
+ {
+ assert(context->isCoherent());
+
+ BVH* __restrict__ bvh = (BVH*) This->ptr;
+ __aligned(64) StackItemMaskCoherent stack[stackSizeSingle]; // stack of nodes
+ assert(numOctantRays <= MAX_INTERNAL_STREAM_SIZE);
+
+ __aligned(64) TravRayKStream<K, robust> packets[MAX_INTERNAL_STREAM_SIZE/K];
+ __aligned(64) Frustum<robust> frustum;
+
+ bool commonOctant = true;
+ const size_t m_active = initPacketsAndFrustum((RayK<K>**)inputPackets, numOctantRays, packets, frustum, commonOctant);
+ if (unlikely(m_active == 0)) return;
+
+ /* case of non-common origin */
+ if (unlikely(!commonOctant))
+ {
+ const size_t numPackets = (numOctantRays+K-1)/K;
+ for (size_t i = 0; i < numPackets; i++)
+ This->intersect(inputPackets[i]->tnear() <= inputPackets[i]->tfar, *inputPackets[i], context);
+ return;
+ }
+
+ stack[0].mask = m_active;
+ stack[0].parent = 0;
+ stack[0].child = bvh->root;
+
+ ///////////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////////
+
+ StackItemMaskCoherent* stackPtr = stack + 1;
+
+ while (1) pop:
+ {
+ if (unlikely(stackPtr == stack)) break;
+
+ STAT3(normal.trav_stack_pop,1,1,1);
+ stackPtr--;
+ /*! pop next node */
+ NodeRef cur = NodeRef(stackPtr->child);
+ size_t m_trav_active = stackPtr->mask;
+ assert(m_trav_active);
+ NodeRef parent = stackPtr->parent;
+
+ while (1)
+ {
+ if (unlikely(cur.isLeaf())) break;
+ const AABBNode* __restrict__ const node = cur.getAABBNode();
+ parent = cur;
+
+ __aligned(64) size_t maskK[N];
+ for (size_t i = 0; i < N; i++)
+ maskK[i] = m_trav_active;
+ vfloat<N> dist;
+ const size_t m_node_hit = traverseCoherentStream(m_trav_active, packets, node, frustum, maskK, dist);
+ if (unlikely(m_node_hit == 0)) goto pop;
+
+ BVHNNodeTraverserStreamHitCoherent<N, types>::traverseClosestHit(cur, m_trav_active, vbool<N>((int)m_node_hit), dist, (size_t*)maskK, stackPtr);
+ assert(m_trav_active);
+ }
+
+ /* non-root and leaf => full culling test for all rays */
+ if (unlikely(parent != 0 && cur.isLeaf()))
+ {
+ const AABBNode* __restrict__ const node = parent.getAABBNode();
+ size_t boxID = 0xff;
+ for (size_t i = 0; i < N; i++)
+ if (node->child(i) == cur) { boxID = i; break; }
+ assert(boxID < N);
+ assert(cur == node->child(boxID));
+ m_trav_active = intersectAABBNodePacket(m_trav_active, packets, node, boxID, frustum.nf);
+ }
+
+ /*! this is a leaf node */
+ assert(cur != BVH::emptyNode);
+ STAT3(normal.trav_leaves, 1, 1, 1);
+ size_t num; PrimitiveK<K>* prim = (PrimitiveK<K>*)cur.leaf(num);
+
+ size_t bits = m_trav_active;
+
+ /*! intersect stream of rays with all primitives */
+ size_t lazy_node = 0;
+#if defined(__SSE4_2__)
+ STAT_USER(1,(popcnt(bits)+K-1)/K*4);
+#endif
+ while(bits)
+ {
+ size_t i = bsf(bits) / K;
+ const size_t m_isec = ((((size_t)1 << K)-1) << (i*K));
+ assert(m_isec & bits);
+ bits &= ~m_isec;
+
+ TravRayKStream<K, robust>& p = packets[i];
+ vbool<K> m_valid = p.tnear <= p.tfar;
+ PrimitiveIntersectorK<K>::intersectK(m_valid, This, *inputPackets[i], context, prim, num, lazy_node);
+ p.tfar = min(p.tfar, inputPackets[i]->tfar);
+ };
+
+ } // traversal + intersection
+ }
+
+ template<int N, int types, bool robust, typename PrimitiveIntersector>
+ __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::occluded(Accel::Intersectors* __restrict__ This,
+ RayN** inputPackets,
+ size_t numOctantRays,
+ IntersectContext* context)
+ {
+ /* we may traverse an empty BVH in case all geometry was invalid */
+ BVH* __restrict__ bvh = (BVH*) This->ptr;
+ if (bvh->root == BVH::emptyNode)
+ return;
+
+ if (unlikely(context->isCoherent()))
+ occludedCoherent(This, (RayK<VSIZEL>**)inputPackets, numOctantRays, context);
+ else
+ occludedIncoherent(This, (RayK<VSIZEX>**)inputPackets, numOctantRays, context);
+ }
+
+ template<int N, int types, bool robust, typename PrimitiveIntersector>
+ template<int K>
+ __noinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::occludedCoherent(Accel::Intersectors* __restrict__ This,
+ RayK<K>** inputPackets,
+ size_t numOctantRays,
+ IntersectContext* context)
+ {
+ assert(context->isCoherent());
+
+ BVH* __restrict__ bvh = (BVH*)This->ptr;
+ __aligned(64) StackItemMaskCoherent stack[stackSizeSingle]; // stack of nodes
+ assert(numOctantRays <= MAX_INTERNAL_STREAM_SIZE);
+
+ /* inactive rays should have been filtered out before */
+ __aligned(64) TravRayKStream<K, robust> packets[MAX_INTERNAL_STREAM_SIZE/K];
+ __aligned(64) Frustum<robust> frustum;
+
+ bool commonOctant = true;
+ size_t m_active = initPacketsAndFrustum(inputPackets, numOctantRays, packets, frustum, commonOctant);
+
+ /* valid rays */
+ if (unlikely(m_active == 0)) return;
+
+ /* case of non-common origin */
+ if (unlikely(!commonOctant))
+ {
+ const size_t numPackets = (numOctantRays+K-1)/K;
+ for (size_t i = 0; i < numPackets; i++)
+ This->occluded(inputPackets[i]->tnear() <= inputPackets[i]->tfar, *inputPackets[i], context);
+ return;
+ }
+
+ stack[0].mask = m_active;
+ stack[0].parent = 0;
+ stack[0].child = bvh->root;
+
+ ///////////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////////
+
+ StackItemMaskCoherent* stackPtr = stack + 1;
+
+ while (1) pop:
+ {
+ if (unlikely(stackPtr == stack)) break;
+
+ STAT3(normal.trav_stack_pop,1,1,1);
+ stackPtr--;
+ /*! pop next node */
+ NodeRef cur = NodeRef(stackPtr->child);
+ size_t m_trav_active = stackPtr->mask & m_active;
+ if (unlikely(!m_trav_active)) continue;
+ assert(m_trav_active);
+ NodeRef parent = stackPtr->parent;
+
+ while (1)
+ {
+ if (unlikely(cur.isLeaf())) break;
+ const AABBNode* __restrict__ const node = cur.getAABBNode();
+ parent = cur;
+
+ __aligned(64) size_t maskK[N];
+ for (size_t i = 0; i < N; i++)
+ maskK[i] = m_trav_active;
+
+ vfloat<N> dist;
+ const size_t m_node_hit = traverseCoherentStream(m_trav_active, packets, node, frustum, maskK, dist);
+ if (unlikely(m_node_hit == 0)) goto pop;
+
+ BVHNNodeTraverserStreamHitCoherent<N, types>::traverseAnyHit(cur, m_trav_active, vbool<N>((int)m_node_hit), (size_t*)maskK, stackPtr);
+ assert(m_trav_active);
+ }
+
+ /* non-root and leaf => full culling test for all rays */
+ if (unlikely(parent != 0 && cur.isLeaf()))
+ {
+ const AABBNode* __restrict__ const node = parent.getAABBNode();
+ size_t boxID = 0xff;
+ for (size_t i = 0; i < N; i++)
+ if (node->child(i) == cur) { boxID = i; break; }
+ assert(boxID < N);
+ assert(cur == node->child(boxID));
+ m_trav_active = intersectAABBNodePacket(m_trav_active, packets, node, boxID, frustum.nf);
+ }
+
+ /*! this is a leaf node */
+ assert(cur != BVH::emptyNode);
+ STAT3(normal.trav_leaves, 1, 1, 1);
+ size_t num; PrimitiveK<K>* prim = (PrimitiveK<K>*)cur.leaf(num);
+
+ size_t bits = m_trav_active & m_active;
+ /*! intersect stream of rays with all primitives */
+ size_t lazy_node = 0;
+#if defined(__SSE4_2__)
+ STAT_USER(1,(popcnt(bits)+K-1)/K*4);
+#endif
+ while (bits)
+ {
+ size_t i = bsf(bits) / K;
+ const size_t m_isec = ((((size_t)1 << K)-1) << (i*K));
+ assert(m_isec & bits);
+ bits &= ~m_isec;
+ TravRayKStream<K, robust>& p = packets[i];
+ vbool<K> m_valid = p.tnear <= p.tfar;
+ vbool<K> m_hit = PrimitiveIntersectorK<K>::occludedK(m_valid, This, *inputPackets[i], context, prim, num, lazy_node);
+ inputPackets[i]->tfar = select(m_hit & m_valid, vfloat<K>(neg_inf), inputPackets[i]->tfar);
+ m_active &= ~((size_t)movemask(m_hit) << (i*K));
+ }
+
+ } // traversal + intersection
+ }
+
+
+ template<int N, int types, bool robust, typename PrimitiveIntersector>
+ template<int K>
+ __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::occludedIncoherent(Accel::Intersectors* __restrict__ This,
+ RayK<K>** inputPackets,
+ size_t numOctantRays,
+ IntersectContext* context)
+ {
+ assert(!context->isCoherent());
+ assert(types & BVH_FLAG_ALIGNED_NODE);
+
+ __aligned(64) TravRayKStream<K,robust> packet[MAX_INTERNAL_STREAM_SIZE/K];
+
+ assert(numOctantRays <= 32);
+ const size_t numPackets = (numOctantRays+K-1)/K;
+ size_t m_active = 0;
+ for (size_t i = 0; i < numPackets; i++)
+ {
+ const vfloat<K> tnear = inputPackets[i]->tnear();
+ const vfloat<K> tfar = inputPackets[i]->tfar;
+ vbool<K> m_valid = (tnear <= tfar) & (tnear >= 0.0f);
+ m_active |= (size_t)movemask(m_valid) << (K*i);
+ const Vec3vf<K>& org = inputPackets[i]->org;
+ const Vec3vf<K>& dir = inputPackets[i]->dir;
+ vfloat<K> packet_min_dist = max(tnear, 0.0f);
+ vfloat<K> packet_max_dist = select(m_valid, tfar, neg_inf);
+ new (&packet[i]) TravRayKStream<K,robust>(org, dir, packet_min_dist, packet_max_dist);
+ }
+
+ BVH* __restrict__ bvh = (BVH*)This->ptr;
+
+ StackItemMaskT<NodeRef> stack[stackSizeSingle]; // stack of nodes
+ StackItemMaskT<NodeRef>* stackPtr = stack + 1; // current stack pointer
+ stack[0].ptr = bvh->root;
+ stack[0].mask = m_active;
+
+ size_t terminated = ~m_active;
+
+ /* near/far offsets based on first ray */
+ const NearFarPrecalculations nf(Vec3fa(packet[0].rdir.x[0], packet[0].rdir.y[0], packet[0].rdir.z[0]), N);
+
+ while (1) pop:
+ {
+ if (unlikely(stackPtr == stack)) break;
+ STAT3(shadow.trav_stack_pop,1,1,1);
+ stackPtr--;
+ NodeRef cur = NodeRef(stackPtr->ptr);
+ size_t cur_mask = stackPtr->mask & (~terminated);
+ if (unlikely(cur_mask == 0)) continue;
+
+ while (true)
+ {
+ /*! stop if we found a leaf node */
+ if (unlikely(cur.isLeaf())) break;
+ const AABBNode* __restrict__ const node = cur.getAABBNode();
+
+ const vint<N> vmask = traverseIncoherentStream(cur_mask, packet, node, nf, shiftTable);
+
+ size_t mask = movemask(vmask != vint<N>(zero));
+ if (unlikely(mask == 0)) goto pop;
+
+ __aligned(64) unsigned int child_mask[N];
+ vint<N>::storeu(child_mask, vmask); // this explicit store here causes much better code generation
+
+ /*! one child is hit, continue with that child */
+ size_t r = bscf(mask);
+ assert(r < N);
+ cur = node->child(r);
+ BVHN<N>::prefetch(cur,types);
+ cur_mask = child_mask[r];
+
+ /* simple in order sequence */
+ assert(cur != BVH::emptyNode);
+ if (likely(mask == 0)) continue;
+ stackPtr->ptr = cur;
+ stackPtr->mask = cur_mask;
+ stackPtr++;
+
+ for (; ;)
+ {
+ r = bscf(mask);
+ assert(r < N);
+
+ cur = node->child(r);
+ BVHN<N>::prefetch(cur,types);
+ cur_mask = child_mask[r];
+ assert(cur != BVH::emptyNode);
+ if (likely(mask == 0)) break;
+ stackPtr->ptr = cur;
+ stackPtr->mask = cur_mask;
+ stackPtr++;
+ }
+ }
+
+ /*! this is a leaf node */
+ assert(cur != BVH::emptyNode);
+ STAT3(shadow.trav_leaves,1,1,1);
+ size_t num; PrimitiveK<K>* prim = (PrimitiveK<K>*)cur.leaf(num);
+
+ size_t bits = cur_mask;
+ size_t lazy_node = 0;
+
+ for (; bits != 0;)
+ {
+ const size_t rayID = bscf(bits);
+
+ RayK<K> &ray = *inputPackets[rayID / K];
+ const size_t k = rayID % K;
+ if (PrimitiveIntersectorK<K>::occluded(This, ray, k, context, prim, num, lazy_node))
+ {
+ ray.tfar[k] = neg_inf;
+ terminated |= (size_t)1 << rayID;
+ }
+
+ /* lazy node */
+ if (unlikely(lazy_node))
+ {
+ stackPtr->ptr = lazy_node;
+ stackPtr->mask = cur_mask;
+ stackPtr++;
+ }
+ }
+
+ if (unlikely(terminated == (size_t)-1)) break;
+ }
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// ArrayIntersectorKStream Definitions
+ ////////////////////////////////////////////////////////////////////////////////
+
+ template<bool filter>
+ struct Triangle4IntersectorStreamMoeller {
+ template<int K> using Type = ArrayIntersectorKStream<K,TriangleMIntersectorKMoeller<4 COMMA K COMMA true>>;
+ };
+
+ template<bool filter>
+ struct Triangle4vIntersectorStreamPluecker {
+ template<int K> using Type = ArrayIntersectorKStream<K,TriangleMvIntersectorKPluecker<4 COMMA K COMMA true>>;
+ };
+
+ template<bool filter>
+ struct Triangle4iIntersectorStreamMoeller {
+ template<int K> using Type = ArrayIntersectorKStream<K,TriangleMiIntersectorKMoeller<4 COMMA K COMMA true>>;
+ };
+
+ template<bool filter>
+ struct Triangle4iIntersectorStreamPluecker {
+ template<int K> using Type = ArrayIntersectorKStream<K,TriangleMiIntersectorKPluecker<4 COMMA K COMMA true>>;
+ };
+
+ template<bool filter>
+ struct Quad4vIntersectorStreamMoeller {
+ template<int K> using Type = ArrayIntersectorKStream<K,QuadMvIntersectorKMoeller<4 COMMA K COMMA true>>;
+ };
+
+ template<bool filter>
+ struct Quad4iIntersectorStreamMoeller {
+ template<int K> using Type = ArrayIntersectorKStream<K,QuadMiIntersectorKMoeller<4 COMMA K COMMA true>>;
+ };
+
+ template<bool filter>
+ struct Quad4vIntersectorStreamPluecker {
+ template<int K> using Type = ArrayIntersectorKStream<K,QuadMvIntersectorKPluecker<4 COMMA K COMMA true>>;
+ };
+
+ template<bool filter>
+ struct Quad4iIntersectorStreamPluecker {
+ template<int K> using Type = ArrayIntersectorKStream<K,QuadMiIntersectorKPluecker<4 COMMA K COMMA true>>;
+ };
+
+ struct ObjectIntersectorStream {
+ template<int K> using Type = ArrayIntersectorKStream<K,ObjectIntersectorK<K COMMA false>>;
+ };
+
+ struct InstanceIntersectorStream {
+ template<int K> using Type = ArrayIntersectorKStream<K,InstanceIntersectorK<K>>;
+ };
+
+ // =====================================================================================================
+ // =====================================================================================================
+ // =====================================================================================================
+
+ template<int N>
+ void BVHNIntersectorStreamPacketFallback<N>::intersect(Accel::Intersectors* __restrict__ This,
+ RayHitN** inputRays,
+ size_t numTotalRays,
+ IntersectContext* context)
+ {
+ if (unlikely(context->isCoherent()))
+ intersectK(This, (RayHitK<VSIZEL>**)inputRays, numTotalRays, context);
+ else
+ intersectK(This, (RayHitK<VSIZEX>**)inputRays, numTotalRays, context);
+ }
+
+ template<int N>
+ void BVHNIntersectorStreamPacketFallback<N>::occluded(Accel::Intersectors* __restrict__ This,
+ RayN** inputRays,
+ size_t numTotalRays,
+ IntersectContext* context)
+ {
+ if (unlikely(context->isCoherent()))
+ occludedK(This, (RayK<VSIZEL>**)inputRays, numTotalRays, context);
+ else
+ occludedK(This, (RayK<VSIZEX>**)inputRays, numTotalRays, context);
+ }
+
+ template<int N>
+ template<int K>
+ __noinline void BVHNIntersectorStreamPacketFallback<N>::intersectK(Accel::Intersectors* __restrict__ This,
+ RayHitK<K>** inputRays,
+ size_t numTotalRays,
+ IntersectContext* context)
+ {
+ /* fallback to packets */
+ for (size_t i = 0; i < numTotalRays; i += K)
+ {
+ const vint<K> vi = vint<K>(int(i)) + vint<K>(step);
+ vbool<K> valid = vi < vint<K>(int(numTotalRays));
+ RayHitK<K>& ray = *(inputRays[i / K]);
+ valid &= ray.tnear() <= ray.tfar;
+ This->intersect(valid, ray, context);
+ }
+ }
+
+ template<int N>
+ template<int K>
+ __noinline void BVHNIntersectorStreamPacketFallback<N>::occludedK(Accel::Intersectors* __restrict__ This,
+ RayK<K>** inputRays,
+ size_t numTotalRays,
+ IntersectContext* context)
+ {
+ /* fallback to packets */
+ for (size_t i = 0; i < numTotalRays; i += K)
+ {
+ const vint<K> vi = vint<K>(int(i)) + vint<K>(step);
+ vbool<K> valid = vi < vint<K>(int(numTotalRays));
+ RayK<K>& ray = *(inputRays[i / K]);
+ valid &= ray.tnear() <= ray.tfar;
+ This->occluded(valid, ray, context);
+ }
+ }
+ }
+}
diff --git a/thirdparty/embree/kernels/bvh/bvh_intersector_stream_bvh4.cpp b/thirdparty/embree/kernels/bvh/bvh_intersector_stream_bvh4.cpp
new file mode 100644
index 0000000000..c3e5f137b8
--- /dev/null
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_stream_bvh4.cpp
@@ -0,0 +1,36 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#include "bvh_intersector_stream.cpp"
+
+namespace embree
+{
+ namespace isa
+ {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// General BVHIntersectorStreamPacketFallback Intersector
+ ////////////////////////////////////////////////////////////////////////////////
+
+ DEFINE_INTERSECTORN(BVH4IntersectorStreamPacketFallback,BVHNIntersectorStreamPacketFallback<4>);
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// BVH4IntersectorStream Definitions
+ ////////////////////////////////////////////////////////////////////////////////
+
+ IF_ENABLED_TRIS(DEFINE_INTERSECTORN(BVH4Triangle4iIntersectorStreamMoeller, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA Triangle4iIntersectorStreamMoeller<true>>));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTORN(BVH4Triangle4vIntersectorStreamPluecker, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA true COMMA Triangle4vIntersectorStreamPluecker<true>>));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTORN(BVH4Triangle4iIntersectorStreamPluecker, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA true COMMA Triangle4iIntersectorStreamPluecker<true>>));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTORN(BVH4Triangle4IntersectorStreamMoeller, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA Triangle4IntersectorStreamMoeller<true>>));
+ IF_ENABLED_TRIS(DEFINE_INTERSECTORN(BVH4Triangle4IntersectorStreamMoellerNoFilter, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA Triangle4IntersectorStreamMoeller<false>>));
+
+ IF_ENABLED_QUADS(DEFINE_INTERSECTORN(BVH4Quad4vIntersectorStreamMoeller, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA Quad4vIntersectorStreamMoeller<true>>));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTORN(BVH4Quad4vIntersectorStreamMoellerNoFilter,BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA Quad4vIntersectorStreamMoeller<false>>));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTORN(BVH4Quad4iIntersectorStreamMoeller, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA Quad4iIntersectorStreamMoeller<true>>));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTORN(BVH4Quad4vIntersectorStreamPluecker, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA true COMMA Quad4vIntersectorStreamPluecker<true>>));
+ IF_ENABLED_QUADS(DEFINE_INTERSECTORN(BVH4Quad4iIntersectorStreamPluecker, BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA true COMMA Quad4iIntersectorStreamPluecker<true>>));
+
+ IF_ENABLED_USER(DEFINE_INTERSECTORN(BVH4VirtualIntersectorStream,BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA ObjectIntersectorStream>));
+ IF_ENABLED_INSTANCE(DEFINE_INTERSECTORN(BVH4InstanceIntersectorStream,BVHNIntersectorStream<4 COMMA BVH_AN1 COMMA false COMMA InstanceIntersectorStream>));
+ }
+}
diff --git a/thirdparty/embree/kernels/bvh/bvh_intersector_stream_filters.cpp b/thirdparty/embree/kernels/bvh/bvh_intersector_stream_filters.cpp
new file mode 100644
index 0000000000..b858eb163f
--- /dev/null
+++ b/thirdparty/embree/kernels/bvh/bvh_intersector_stream_filters.cpp
@@ -0,0 +1,657 @@
+// Copyright 2009-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#include "bvh_intersector_stream_filters.h"
+#include "bvh_intersector_stream.h"
+
+namespace embree
+{
+ namespace isa
+ {
+ template<int K, bool intersect>
+ __noinline void RayStreamFilter::filterAOS(Scene* scene, void* _rayN, size_t N, size_t stride, IntersectContext* context)
+ {
+ RayStreamAOS rayN(_rayN);
+
+ /* use fast path for coherent ray mode */
+ if (unlikely(context->isCoherent()))
+ {
+ __aligned(64) RayTypeK<K, intersect> rays[MAX_INTERNAL_STREAM_SIZE / K];
+ __aligned(64) RayTypeK<K, intersect>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ for (size_t i = 0; i < N; i += MAX_INTERNAL_STREAM_SIZE)
+ {
+ const size_t size = min(N - i, MAX_INTERNAL_STREAM_SIZE);
+
+ /* convert from AOS to SOA */
+ for (size_t j = 0; j < size; j += K)
+ {
+ const vint<K> vij = vint<K>(int(i+j)) + vint<K>(step);
+ const vbool<K> valid = vij < vint<K>(int(N));
+ const vint<K> offset = vij * int(stride);
+ const size_t packetIndex = j / K;
+
+ RayTypeK<K, intersect> ray = rayN.getRayByOffset<K>(valid, offset);
+ ray.tnear() = select(valid, ray.tnear(), zero);
+ ray.tfar = select(valid, ray.tfar, neg_inf);
+
+ rays[packetIndex] = ray;
+ rayPtrs[packetIndex] = &rays[packetIndex]; // rayPtrs might get reordered for occludedN
+ }
+
+ /* trace stream */
+ scene->intersectors.intersectN(rayPtrs, size, context);
+
+ /* convert from SOA to AOS */
+ for (size_t j = 0; j < size; j += K)
+ {
+ const vint<K> vij = vint<K>(int(i+j)) + vint<K>(step);
+ const vbool<K> valid = vij < vint<K>(int(N));
+ const vint<K> offset = vij * int(stride);
+ const size_t packetIndex = j / K;
+ rayN.setHitByOffset(valid, offset, rays[packetIndex]);
+ }
+ }
+ }
+ else if (unlikely(!intersect))
+ {
+ /* octant sorting for occlusion rays */
+ __aligned(64) unsigned int octants[8][MAX_INTERNAL_STREAM_SIZE];
+ __aligned(64) RayK<K> rays[MAX_INTERNAL_STREAM_SIZE / K];
+ __aligned(64) RayK<K>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ unsigned int raysInOctant[8];
+ for (unsigned int i = 0; i < 8; i++)
+ raysInOctant[i] = 0;
+ size_t inputRayID = 0;
+
+ for (;;)
+ {
+ int curOctant = -1;
+
+ /* sort rays into octants */
+ for (; inputRayID < N;)
+ {
+ const Ray& ray = rayN.getRayByOffset(inputRayID * stride);
+
+ /* skip invalid rays */
+ if (unlikely(ray.tnear() > ray.tfar || ray.tfar < 0.0f)) { inputRayID++; continue; } // ignore invalid or already occluded rays
+#if defined(EMBREE_IGNORE_INVALID_RAYS)
+ if (unlikely(!ray.valid())) { inputRayID++; continue; }
+#endif
+
+ const unsigned int octantID = movemask(vfloat4(Vec3fa(ray.dir)) < 0.0f) & 0x7;
+
+ assert(octantID < 8);
+ octants[octantID][raysInOctant[octantID]++] = (unsigned int)inputRayID;
+ inputRayID++;
+ if (unlikely(raysInOctant[octantID] == MAX_INTERNAL_STREAM_SIZE))
+ {
+ curOctant = octantID;
+ break;
+ }
+ }
+
+ /* need to flush rays in octant? */
+ if (unlikely(curOctant == -1))
+ {
+ for (unsigned int i = 0; i < 8; i++)
+ if (raysInOctant[i]) { curOctant = i; break; }
+ }
+
+ /* all rays traced? */
+ if (unlikely(curOctant == -1))
+ break;
+
+ unsigned int* const rayIDs = &octants[curOctant][0];
+ const unsigned int numOctantRays = raysInOctant[curOctant];
+ assert(numOctantRays);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> offset = *(vint<K>*)&rayIDs[j] * int(stride);
+ RayK<K>& ray = rays[j/K];
+ rayPtrs[j/K] = &ray;
+ ray = rayN.getRayByOffset<K>(valid, offset);
+ ray.tnear() = select(valid, ray.tnear(), zero);
+ ray.tfar = select(valid, ray.tfar, neg_inf);
+ }
+
+ scene->intersectors.occludedN(rayPtrs, numOctantRays, context);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> offset = *(vint<K>*)&rayIDs[j] * int(stride);
+ rayN.setHitByOffset<K>(valid, offset, rays[j/K]);
+ }
+
+ raysInOctant[curOctant] = 0;
+ }
+ }
+ else
+ {
+ /* fallback to packets */
+ for (size_t i = 0; i < N; i += K)
+ {
+ const vint<K> vi = vint<K>(int(i)) + vint<K>(step);
+ vbool<K> valid = vi < vint<K>(int(N));
+ const vint<K> offset = vi * int(stride);
+
+ RayTypeK<K, intersect> ray = rayN.getRayByOffset<K>(valid, offset);
+ valid &= ray.tnear() <= ray.tfar;
+
+ scene->intersectors.intersect(valid, ray, context);
+
+ rayN.setHitByOffset<K>(valid, offset, ray);
+ }
+ }
+ }
+
+ template<int K, bool intersect>
+ __noinline void RayStreamFilter::filterAOP(Scene* scene, void** _rayN, size_t N, IntersectContext* context)
+ {
+ RayStreamAOP rayN(_rayN);
+
+ /* use fast path for coherent ray mode */
+ if (unlikely(context->isCoherent()))
+ {
+ __aligned(64) RayTypeK<K, intersect> rays[MAX_INTERNAL_STREAM_SIZE / K];
+ __aligned(64) RayTypeK<K, intersect>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ for (size_t i = 0; i < N; i += MAX_INTERNAL_STREAM_SIZE)
+ {
+ const size_t size = min(N - i, MAX_INTERNAL_STREAM_SIZE);
+
+ /* convert from AOP to SOA */
+ for (size_t j = 0; j < size; j += K)
+ {
+ const vint<K> vij = vint<K>(int(i+j)) + vint<K>(step);
+ const vbool<K> valid = vij < vint<K>(int(N));
+ const size_t packetIndex = j / K;
+
+ RayTypeK<K, intersect> ray = rayN.getRayByIndex<K>(valid, vij);
+ ray.tnear() = select(valid, ray.tnear(), zero);
+ ray.tfar = select(valid, ray.tfar, neg_inf);
+
+ rays[packetIndex] = ray;
+ rayPtrs[packetIndex] = &rays[packetIndex]; // rayPtrs might get reordered for occludedN
+ }
+
+ /* trace stream */
+ scene->intersectors.intersectN(rayPtrs, size, context);
+
+ /* convert from SOA to AOP */
+ for (size_t j = 0; j < size; j += K)
+ {
+ const vint<K> vij = vint<K>(int(i+j)) + vint<K>(step);
+ const vbool<K> valid = vij < vint<K>(int(N));
+ const size_t packetIndex = j / K;
+
+ rayN.setHitByIndex<K>(valid, vij, rays[packetIndex]);
+ }
+ }
+ }
+ else if (unlikely(!intersect))
+ {
+ /* octant sorting for occlusion rays */
+ __aligned(64) unsigned int octants[8][MAX_INTERNAL_STREAM_SIZE];
+ __aligned(64) RayK<K> rays[MAX_INTERNAL_STREAM_SIZE / K];
+ __aligned(64) RayK<K>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ unsigned int raysInOctant[8];
+ for (unsigned int i = 0; i < 8; i++)
+ raysInOctant[i] = 0;
+ size_t inputRayID = 0;
+
+ for (;;)
+ {
+ int curOctant = -1;
+
+ /* sort rays into octants */
+ for (; inputRayID < N;)
+ {
+ const Ray& ray = rayN.getRayByIndex(inputRayID);
+
+ /* skip invalid rays */
+ if (unlikely(ray.tnear() > ray.tfar || ray.tfar < 0.0f)) { inputRayID++; continue; } // ignore invalid or already occluded rays
+#if defined(EMBREE_IGNORE_INVALID_RAYS)
+ if (unlikely(!ray.valid())) { inputRayID++; continue; }
+#endif
+
+ const unsigned int octantID = movemask(lt_mask(ray.dir,Vec3fa(0.0f)));
+
+ assert(octantID < 8);
+ octants[octantID][raysInOctant[octantID]++] = (unsigned int)inputRayID;
+ inputRayID++;
+ if (unlikely(raysInOctant[octantID] == MAX_INTERNAL_STREAM_SIZE))
+ {
+ curOctant = octantID;
+ break;
+ }
+ }
+
+ /* need to flush rays in octant? */
+ if (unlikely(curOctant == -1))
+ {
+ for (unsigned int i = 0; i < 8; i++)
+ if (raysInOctant[i]) { curOctant = i; break; }
+ }
+
+ /* all rays traced? */
+ if (unlikely(curOctant == -1))
+ break;
+
+ unsigned int* const rayIDs = &octants[curOctant][0];
+ const unsigned int numOctantRays = raysInOctant[curOctant];
+ assert(numOctantRays);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> index = *(vint<K>*)&rayIDs[j];
+ RayK<K>& ray = rays[j/K];
+ rayPtrs[j/K] = &ray;
+ ray = rayN.getRayByIndex<K>(valid, index);
+ ray.tnear() = select(valid, ray.tnear(), zero);
+ ray.tfar = select(valid, ray.tfar, neg_inf);
+ }
+
+ scene->intersectors.occludedN(rayPtrs, numOctantRays, context);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> index = *(vint<K>*)&rayIDs[j];
+ rayN.setHitByIndex<K>(valid, index, rays[j/K]);
+ }
+
+ raysInOctant[curOctant] = 0;
+ }
+ }
+ else
+ {
+ /* fallback to packets */
+ for (size_t i = 0; i < N; i += K)
+ {
+ const vint<K> vi = vint<K>(int(i)) + vint<K>(step);
+ vbool<K> valid = vi < vint<K>(int(N));
+
+ RayTypeK<K, intersect> ray = rayN.getRayByIndex<K>(valid, vi);
+ valid &= ray.tnear() <= ray.tfar;
+
+ scene->intersectors.intersect(valid, ray, context);
+
+ rayN.setHitByIndex<K>(valid, vi, ray);
+ }
+ }
+ }
+
+ template<int K, bool intersect>
+ __noinline void RayStreamFilter::filterSOA(Scene* scene, char* rayData, size_t N, size_t numPackets, size_t stride, IntersectContext* context)
+ {
+ const size_t rayDataAlignment = (size_t)rayData % (K*sizeof(float));
+ const size_t offsetAlignment = (size_t)stride % (K*sizeof(float));
+
+ /* fast path for packets with the correct width and data alignment */
+ if (likely(N == K &&
+ !rayDataAlignment &&
+ !offsetAlignment))
+ {
+ if (unlikely(context->isCoherent()))
+ {
+ __aligned(64) RayTypeK<K, intersect>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ size_t packetIndex = 0;
+ for (size_t i = 0; i < numPackets; i++)
+ {
+ const size_t offset = i * stride;
+ RayTypeK<K, intersect>& ray = *(RayTypeK<K, intersect>*)(rayData + offset);
+ rayPtrs[packetIndex++] = &ray;
+
+ /* trace as stream */
+ if (unlikely(packetIndex == MAX_INTERNAL_STREAM_SIZE / K))
+ {
+ const size_t size = packetIndex*K;
+ scene->intersectors.intersectN(rayPtrs, size, context);
+ packetIndex = 0;
+ }
+ }
+
+ /* flush remaining packets */
+ if (unlikely(packetIndex > 0))
+ {
+ const size_t size = packetIndex*K;
+ scene->intersectors.intersectN(rayPtrs, size, context);
+ }
+ }
+ else if (unlikely(!intersect))
+ {
+ /* octant sorting for occlusion rays */
+ RayStreamSOA rayN(rayData, K);
+
+ __aligned(64) unsigned int octants[8][MAX_INTERNAL_STREAM_SIZE];
+ __aligned(64) RayK<K> rays[MAX_INTERNAL_STREAM_SIZE / K];
+ __aligned(64) RayK<K>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ unsigned int raysInOctant[8];
+ for (unsigned int i = 0; i < 8; i++)
+ raysInOctant[i] = 0;
+ size_t inputRayID = 0;
+
+ for (;;)
+ {
+ int curOctant = -1;
+
+ /* sort rays into octants */
+ for (; inputRayID < N*numPackets;)
+ {
+ const size_t offset = (inputRayID / K) * stride + (inputRayID % K) * sizeof(float);
+
+ /* skip invalid rays */
+ if (unlikely(!rayN.isValidByOffset(offset))) { inputRayID++; continue; } // ignore invalid or already occluded rays
+ #if defined(EMBREE_IGNORE_INVALID_RAYS)
+ __aligned(64) Ray ray = rayN.getRayByOffset(offset);
+ if (unlikely(!ray.valid())) { inputRayID++; continue; }
+ #endif
+
+ const unsigned int octantID = (unsigned int)rayN.getOctantByOffset(offset);
+
+ assert(octantID < 8);
+ octants[octantID][raysInOctant[octantID]++] = (unsigned int)offset;
+ inputRayID++;
+ if (unlikely(raysInOctant[octantID] == MAX_INTERNAL_STREAM_SIZE))
+ {
+ curOctant = octantID;
+ break;
+ }
+ }
+
+ /* need to flush rays in octant? */
+ if (unlikely(curOctant == -1))
+ {
+ for (unsigned int i = 0; i < 8; i++)
+ if (raysInOctant[i]) { curOctant = i; break; }
+ }
+
+ /* all rays traced? */
+ if (unlikely(curOctant == -1))
+ break;
+
+ unsigned int* const rayOffsets = &octants[curOctant][0];
+ const unsigned int numOctantRays = raysInOctant[curOctant];
+ assert(numOctantRays);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> offset = *(vint<K>*)&rayOffsets[j];
+ RayK<K>& ray = rays[j/K];
+ rayPtrs[j/K] = &ray;
+ ray = rayN.getRayByOffset<K>(valid, offset);
+ ray.tnear() = select(valid, ray.tnear(), zero);
+ ray.tfar = select(valid, ray.tfar, neg_inf);
+ }
+
+ scene->intersectors.occludedN(rayPtrs, numOctantRays, context);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> offset = *(vint<K>*)&rayOffsets[j];
+ rayN.setHitByOffset(valid, offset, rays[j/K]);
+ }
+ raysInOctant[curOctant] = 0;
+ }
+ }
+ else
+ {
+ /* fallback to packets */
+ for (size_t i = 0; i < numPackets; i++)
+ {
+ const size_t offset = i * stride;
+ RayTypeK<K, intersect>& ray = *(RayTypeK<K, intersect>*)(rayData + offset);
+ const vbool<K> valid = ray.tnear() <= ray.tfar;
+
+ scene->intersectors.intersect(valid, ray, context);
+ }
+ }
+ }
+ else
+ {
+ /* fallback to packets for arbitrary packet size and alignment */
+ for (size_t i = 0; i < numPackets; i++)
+ {
+ const size_t offsetN = i * stride;
+ RayStreamSOA rayN(rayData + offsetN, N);
+
+ for (size_t j = 0; j < N; j += K)
+ {
+ const size_t offset = j * sizeof(float);
+ vbool<K> valid = (vint<K>(int(j)) + vint<K>(step)) < vint<K>(int(N));
+ RayTypeK<K, intersect> ray = rayN.getRayByOffset<K>(valid, offset);
+ valid &= ray.tnear() <= ray.tfar;
+
+ scene->intersectors.intersect(valid, ray, context);
+
+ rayN.setHitByOffset(valid, offset, ray);
+ }
+ }
+ }
+ }
+
+ template<int K, bool intersect>
+ __noinline void RayStreamFilter::filterSOP(Scene* scene, const void* _rayN, size_t N, IntersectContext* context)
+ {
+ RayStreamSOP& rayN = *(RayStreamSOP*)_rayN;
+
+ /* use fast path for coherent ray mode */
+ if (unlikely(context->isCoherent()))
+ {
+ __aligned(64) RayTypeK<K, intersect> rays[MAX_INTERNAL_STREAM_SIZE / K];
+ __aligned(64) RayTypeK<K, intersect>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ for (size_t i = 0; i < N; i += MAX_INTERNAL_STREAM_SIZE)
+ {
+ const size_t size = min(N - i, MAX_INTERNAL_STREAM_SIZE);
+
+ /* convert from SOP to SOA */
+ for (size_t j = 0; j < size; j += K)
+ {
+ const vint<K> vij = vint<K>(int(i+j)) + vint<K>(step);
+ const vbool<K> valid = vij < vint<K>(int(N));
+ const size_t offset = (i+j) * sizeof(float);
+ const size_t packetIndex = j / K;
+
+ RayTypeK<K, intersect> ray = rayN.getRayByOffset<K>(valid, offset);
+ ray.tnear() = select(valid, ray.tnear(), zero);
+ ray.tfar = select(valid, ray.tfar, neg_inf);
+
+ rays[packetIndex] = ray;
+ rayPtrs[packetIndex] = &rays[packetIndex]; // rayPtrs might get reordered for occludedN
+ }
+
+ /* trace stream */
+ scene->intersectors.intersectN(rayPtrs, size, context);
+
+ /* convert from SOA to SOP */
+ for (size_t j = 0; j < size; j += K)
+ {
+ const vint<K> vij = vint<K>(int(i+j)) + vint<K>(step);
+ const vbool<K> valid = vij < vint<K>(int(N));
+ const size_t offset = (i+j) * sizeof(float);
+ const size_t packetIndex = j / K;
+
+ rayN.setHitByOffset(valid, offset, rays[packetIndex]);
+ }
+ }
+ }
+ else if (unlikely(!intersect))
+ {
+ /* octant sorting for occlusion rays */
+ __aligned(64) unsigned int octants[8][MAX_INTERNAL_STREAM_SIZE];
+ __aligned(64) RayK<K> rays[MAX_INTERNAL_STREAM_SIZE / K];
+ __aligned(64) RayK<K>* rayPtrs[MAX_INTERNAL_STREAM_SIZE / K];
+
+ unsigned int raysInOctant[8];
+ for (unsigned int i = 0; i < 8; i++)
+ raysInOctant[i] = 0;
+ size_t inputRayID = 0;
+
+ for (;;)
+ {
+ int curOctant = -1;
+
+ /* sort rays into octants */
+ for (; inputRayID < N;)
+ {
+ const size_t offset = inputRayID * sizeof(float);
+ /* skip invalid rays */
+ if (unlikely(!rayN.isValidByOffset(offset))) { inputRayID++; continue; } // ignore invalid or already occluded rays
+#if defined(EMBREE_IGNORE_INVALID_RAYS)
+ __aligned(64) Ray ray = rayN.getRayByOffset(offset);
+ if (unlikely(!ray.valid())) { inputRayID++; continue; }
+#endif
+
+ const unsigned int octantID = (unsigned int)rayN.getOctantByOffset(offset);
+
+ assert(octantID < 8);
+ octants[octantID][raysInOctant[octantID]++] = (unsigned int)offset;
+ inputRayID++;
+ if (unlikely(raysInOctant[octantID] == MAX_INTERNAL_STREAM_SIZE))
+ {
+ curOctant = octantID;
+ break;
+ }
+ }
+
+ /* need to flush rays in octant? */
+ if (unlikely(curOctant == -1))
+ {
+ for (unsigned int i = 0; i < 8; i++)
+ if (raysInOctant[i]) { curOctant = i; break; }
+ }
+
+ /* all rays traced? */
+ if (unlikely(curOctant == -1))
+ break;
+
+ unsigned int* const rayOffsets = &octants[curOctant][0];
+ const unsigned int numOctantRays = raysInOctant[curOctant];
+ assert(numOctantRays);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> offset = *(vint<K>*)&rayOffsets[j];
+ RayK<K>& ray = rays[j/K];
+ rayPtrs[j/K] = &ray;
+ ray = rayN.getRayByOffset<K>(valid, offset);
+ ray.tnear() = select(valid, ray.tnear(), zero);
+ ray.tfar = select(valid, ray.tfar, neg_inf);
+ }
+
+ scene->intersectors.occludedN(rayPtrs, numOctantRays, context);
+
+ for (unsigned int j = 0; j < numOctantRays; j += K)
+ {
+ const vint<K> vi = vint<K>(int(j)) + vint<K>(step);
+ const vbool<K> valid = vi < vint<K>(int(numOctantRays));
+ const vint<K> offset = *(vint<K>*)&rayOffsets[j];
+ rayN.setHitByOffset(valid, offset, rays[j/K]);
+ }
+
+ raysInOctant[curOctant] = 0;
+ }
+ }
+ else
+ {
+ /* fallback to packets */
+ for (size_t i = 0; i < N; i += K)
+ {
+ const vint<K> vi = vint<K>(int(i)) + vint<K>(step);
+ vbool<K> valid = vi < vint<K>(int(N));
+ const size_t offset = i * sizeof(float);
+
+ RayTypeK<K, intersect> ray = rayN.getRayByOffset<K>(valid, offset);
+ valid &= ray.tnear() <= ray.tfar;
+
+ scene->intersectors.intersect(valid, ray, context);
+
+ rayN.setHitByOffset(valid, offset, ray);
+ }
+ }
+ }
+
+
+ void RayStreamFilter::intersectAOS(Scene* scene, RTCRayHit* _rayN, size_t N, size_t stride, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterAOS<VSIZEL, true>(scene, _rayN, N, stride, context);
+ else
+ filterAOS<VSIZEX, true>(scene, _rayN, N, stride, context);
+ }
+
+ void RayStreamFilter::occludedAOS(Scene* scene, RTCRay* _rayN, size_t N, size_t stride, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterAOS<VSIZEL, false>(scene, _rayN, N, stride, context);
+ else
+ filterAOS<VSIZEX, false>(scene, _rayN, N, stride, context);
+ }
+
+ void RayStreamFilter::intersectAOP(Scene* scene, RTCRayHit** _rayN, size_t N, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterAOP<VSIZEL, true>(scene, (void**)_rayN, N, context);
+ else
+ filterAOP<VSIZEX, true>(scene, (void**)_rayN, N, context);
+ }
+
+ void RayStreamFilter::occludedAOP(Scene* scene, RTCRay** _rayN, size_t N, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterAOP<VSIZEL, false>(scene, (void**)_rayN, N, context);
+ else
+ filterAOP<VSIZEX, false>(scene, (void**)_rayN, N, context);
+ }
+
+ void RayStreamFilter::intersectSOA(Scene* scene, char* rayData, size_t N, size_t numPackets, size_t stride, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterSOA<VSIZEL, true>(scene, rayData, N, numPackets, stride, context);
+ else
+ filterSOA<VSIZEX, true>(scene, rayData, N, numPackets, stride, context);
+ }
+
+ void RayStreamFilter::occludedSOA(Scene* scene, char* rayData, size_t N, size_t numPackets, size_t stride, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterSOA<VSIZEL, false>(scene, rayData, N, numPackets, stride, context);
+ else
+ filterSOA<VSIZEX, false>(scene, rayData, N, numPackets, stride, context);
+ }
+
+ void RayStreamFilter::intersectSOP(Scene* scene, const RTCRayHitNp* _rayN, size_t N, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterSOP<VSIZEL, true>(scene, _rayN, N, context);
+ else
+ filterSOP<VSIZEX, true>(scene, _rayN, N, context);
+ }
+
+ void RayStreamFilter::occludedSOP(Scene* scene, const RTCRayNp* _rayN, size_t N, IntersectContext* context) {
+ if (unlikely(context->isCoherent()))
+ filterSOP<VSIZEL, false>(scene, _rayN, N, context);
+ else
+ filterSOP<VSIZEX, false>(scene, _rayN, N, context);
+ }
+
+
+ RayStreamFilterFuncs rayStreamFilterFuncs() {
+ return RayStreamFilterFuncs(RayStreamFilter::intersectAOS, RayStreamFilter::intersectAOP, RayStreamFilter::intersectSOA, RayStreamFilter::intersectSOP,
+ RayStreamFilter::occludedAOS, RayStreamFilter::occludedAOP, RayStreamFilter::occludedSOA, RayStreamFilter::occludedSOP);
+ }
+ };
+};
diff --git a/thirdparty/embree/kernels/config.h b/thirdparty/embree/kernels/config.h
index 80a8ab2a56..2bf7e93587 100644
--- a/thirdparty/embree/kernels/config.h
+++ b/thirdparty/embree/kernels/config.h
@@ -16,7 +16,7 @@
/* #undef EMBREE_GEOMETRY_INSTANCE */
/* #undef EMBREE_GEOMETRY_GRID */
/* #undef EMBREE_GEOMETRY_POINT */
-/* #undef EMBREE_RAY_PACKETS */
+#define EMBREE_RAY_PACKETS
/* #undef EMBREE_COMPACT_POLYS */
#define EMBREE_CURVE_SELF_INTERSECTION_AVOIDANCE_FACTOR 2.0
diff --git a/thirdparty/embree/kernels/hash.h b/thirdparty/embree/kernels/hash.h
index 10f315cee7..470e15f03e 100644
--- a/thirdparty/embree/kernels/hash.h
+++ b/thirdparty/embree/kernels/hash.h
@@ -2,4 +2,4 @@
// Copyright 2009-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
-#define RTC_HASH "7c53133eb21424f7f0ae1e25bf357e358feaf6ab"
+#define RTC_HASH "12b99393438a4cc9e478e33459eed78bec6233fd"
diff --git a/thirdparty/misc/stb_vorbis.c b/thirdparty/misc/stb_vorbis.c
deleted file mode 100644
index a8cbfa6c23..0000000000
--- a/thirdparty/misc/stb_vorbis.c
+++ /dev/null
@@ -1,5563 +0,0 @@
-// Ogg Vorbis audio decoder - v1.20 - public domain
-// http://nothings.org/stb_vorbis/
-//
-// Original version written by Sean Barrett in 2007.
-//
-// Originally sponsored by RAD Game Tools. Seeking implementation
-// sponsored by Phillip Bennefall, Marc Andersen, Aaron Baker,
-// Elias Software, Aras Pranckevicius, and Sean Barrett.
-//
-// LICENSE
-//
-// See end of file for license information.
-//
-// Limitations:
-//
-// - floor 0 not supported (used in old ogg vorbis files pre-2004)
-// - lossless sample-truncation at beginning ignored
-// - cannot concatenate multiple vorbis streams
-// - sample positions are 32-bit, limiting seekable 192Khz
-// files to around 6 hours (Ogg supports 64-bit)
-//
-// Feature contributors:
-// Dougall Johnson (sample-exact seeking)
-//
-// Bugfix/warning contributors:
-// Terje Mathisen Niklas Frykholm Andy Hill
-// Casey Muratori John Bolton Gargaj
-// Laurent Gomila Marc LeBlanc Ronny Chevalier
-// Bernhard Wodo Evan Balster github:alxprd
-// Tom Beaumont Ingo Leitgeb Nicolas Guillemot
-// Phillip Bennefall Rohit Thiago Goulart
-// github:manxorist saga musix github:infatum
-// Timur Gagiev Maxwell Koo Peter Waller
-// github:audinowho Dougall Johnson David Reid
-// github:Clownacy Pedro J. Estebanez Remi Verschelde
-//
-// Partial history:
-// 1.20 - 2020-07-11 - several small fixes
-// 1.19 - 2020-02-05 - warnings
-// 1.18 - 2020-02-02 - fix seek bugs; parse header comments; misc warnings etc.
-// 1.17 - 2019-07-08 - fix CVE-2019-13217..CVE-2019-13223 (by ForAllSecure)
-// 1.16 - 2019-03-04 - fix warnings
-// 1.15 - 2019-02-07 - explicit failure if Ogg Skeleton data is found
-// 1.14 - 2018-02-11 - delete bogus dealloca usage
-// 1.13 - 2018-01-29 - fix truncation of last frame (hopefully)
-// 1.12 - 2017-11-21 - limit residue begin/end to blocksize/2 to avoid large temp allocs in bad/corrupt files
-// 1.11 - 2017-07-23 - fix MinGW compilation
-// 1.10 - 2017-03-03 - more robust seeking; fix negative ilog(); clear error in open_memory
-// 1.09 - 2016-04-04 - back out 'truncation of last frame' fix from previous version
-// 1.08 - 2016-04-02 - warnings; setup memory leaks; truncation of last frame
-// 1.07 - 2015-01-16 - fixes for crashes on invalid files; warning fixes; const
-// 1.06 - 2015-08-31 - full, correct support for seeking API (Dougall Johnson)
-// some crash fixes when out of memory or with corrupt files
-// fix some inappropriately signed shifts
-// 1.05 - 2015-04-19 - don't define __forceinline if it's redundant
-// 1.04 - 2014-08-27 - fix missing const-correct case in API
-// 1.03 - 2014-08-07 - warning fixes
-// 1.02 - 2014-07-09 - declare qsort comparison as explicitly _cdecl in Windows
-// 1.01 - 2014-06-18 - fix stb_vorbis_get_samples_float (interleaved was correct)
-// 1.0 - 2014-05-26 - fix memory leaks; fix warnings; fix bugs in >2-channel;
-// (API change) report sample rate for decode-full-file funcs
-//
-// See end of file for full version history.
-
-
-//////////////////////////////////////////////////////////////////////////////
-//
-// HEADER BEGINS HERE
-//
-
-#ifndef STB_VORBIS_INCLUDE_STB_VORBIS_H
-#define STB_VORBIS_INCLUDE_STB_VORBIS_H
-
-#if defined(STB_VORBIS_NO_CRT) && !defined(STB_VORBIS_NO_STDIO)
-#define STB_VORBIS_NO_STDIO 1
-#endif
-
-#ifndef STB_VORBIS_NO_STDIO
-#include <stdio.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/////////// THREAD SAFETY
-
-// Individual stb_vorbis* handles are not thread-safe; you cannot decode from
-// them from multiple threads at the same time. However, you can have multiple
-// stb_vorbis* handles and decode from them independently in multiple thrads.
-
-
-/////////// MEMORY ALLOCATION
-
-// normally stb_vorbis uses malloc() to allocate memory at startup,
-// and alloca() to allocate temporary memory during a frame on the
-// stack. (Memory consumption will depend on the amount of setup
-// data in the file and how you set the compile flags for speed
-// vs. size. In my test files the maximal-size usage is ~150KB.)
-//
-// You can modify the wrapper functions in the source (setup_malloc,
-// setup_temp_malloc, temp_malloc) to change this behavior, or you
-// can use a simpler allocation model: you pass in a buffer from
-// which stb_vorbis will allocate _all_ its memory (including the
-// temp memory). "open" may fail with a VORBIS_outofmem if you
-// do not pass in enough data; there is no way to determine how
-// much you do need except to succeed (at which point you can
-// query get_info to find the exact amount required. yes I know
-// this is lame).
-//
-// If you pass in a non-NULL buffer of the type below, allocation
-// will occur from it as described above. Otherwise just pass NULL
-// to use malloc()/alloca()
-
-typedef struct
-{
- char *alloc_buffer;
- int alloc_buffer_length_in_bytes;
-} stb_vorbis_alloc;
-
-
-/////////// FUNCTIONS USEABLE WITH ALL INPUT MODES
-
-typedef struct stb_vorbis stb_vorbis;
-
-typedef struct
-{
- unsigned int sample_rate;
- int channels;
-
- unsigned int setup_memory_required;
- unsigned int setup_temp_memory_required;
- unsigned int temp_memory_required;
-
- int max_frame_size;
-} stb_vorbis_info;
-
-typedef struct
-{
- char *vendor;
-
- int comment_list_length;
- char **comment_list;
-} stb_vorbis_comment;
-
-// get general information about the file
-extern stb_vorbis_info stb_vorbis_get_info(stb_vorbis *f);
-
-// get ogg comments
-extern stb_vorbis_comment stb_vorbis_get_comment(stb_vorbis *f);
-
-// get the last error detected (clears it, too)
-extern int stb_vorbis_get_error(stb_vorbis *f);
-
-// close an ogg vorbis file and free all memory in use
-extern void stb_vorbis_close(stb_vorbis *f);
-
-// this function returns the offset (in samples) from the beginning of the
-// file that will be returned by the next decode, if it is known, or -1
-// otherwise. after a flush_pushdata() call, this may take a while before
-// it becomes valid again.
-// NOT WORKING YET after a seek with PULLDATA API
-extern int stb_vorbis_get_sample_offset(stb_vorbis *f);
-
-// returns the current seek point within the file, or offset from the beginning
-// of the memory buffer. In pushdata mode it returns 0.
-extern unsigned int stb_vorbis_get_file_offset(stb_vorbis *f);
-
-/////////// PUSHDATA API
-
-#ifndef STB_VORBIS_NO_PUSHDATA_API
-
-// this API allows you to get blocks of data from any source and hand
-// them to stb_vorbis. you have to buffer them; stb_vorbis will tell
-// you how much it used, and you have to give it the rest next time;
-// and stb_vorbis may not have enough data to work with and you will
-// need to give it the same data again PLUS more. Note that the Vorbis
-// specification does not bound the size of an individual frame.
-
-extern stb_vorbis *stb_vorbis_open_pushdata(
- const unsigned char * datablock, int datablock_length_in_bytes,
- int *datablock_memory_consumed_in_bytes,
- int *error,
- const stb_vorbis_alloc *alloc_buffer);
-// create a vorbis decoder by passing in the initial data block containing
-// the ogg&vorbis headers (you don't need to do parse them, just provide
-// the first N bytes of the file--you're told if it's not enough, see below)
-// on success, returns an stb_vorbis *, does not set error, returns the amount of
-// data parsed/consumed on this call in *datablock_memory_consumed_in_bytes;
-// on failure, returns NULL on error and sets *error, does not change *datablock_memory_consumed
-// if returns NULL and *error is VORBIS_need_more_data, then the input block was
-// incomplete and you need to pass in a larger block from the start of the file
-
-extern int stb_vorbis_decode_frame_pushdata(
- stb_vorbis *f,
- const unsigned char *datablock, int datablock_length_in_bytes,
- int *channels, // place to write number of float * buffers
- float ***output, // place to write float ** array of float * buffers
- int *samples // place to write number of output samples
- );
-// decode a frame of audio sample data if possible from the passed-in data block
-//
-// return value: number of bytes we used from datablock
-//
-// possible cases:
-// 0 bytes used, 0 samples output (need more data)
-// N bytes used, 0 samples output (resynching the stream, keep going)
-// N bytes used, M samples output (one frame of data)
-// note that after opening a file, you will ALWAYS get one N-bytes,0-sample
-// frame, because Vorbis always "discards" the first frame.
-//
-// Note that on resynch, stb_vorbis will rarely consume all of the buffer,
-// instead only datablock_length_in_bytes-3 or less. This is because it wants
-// to avoid missing parts of a page header if they cross a datablock boundary,
-// without writing state-machiney code to record a partial detection.
-//
-// The number of channels returned are stored in *channels (which can be
-// NULL--it is always the same as the number of channels reported by
-// get_info). *output will contain an array of float* buffers, one per
-// channel. In other words, (*output)[0][0] contains the first sample from
-// the first channel, and (*output)[1][0] contains the first sample from
-// the second channel.
-
-extern void stb_vorbis_flush_pushdata(stb_vorbis *f);
-// inform stb_vorbis that your next datablock will not be contiguous with
-// previous ones (e.g. you've seeked in the data); future attempts to decode
-// frames will cause stb_vorbis to resynchronize (as noted above), and
-// once it sees a valid Ogg page (typically 4-8KB, as large as 64KB), it
-// will begin decoding the _next_ frame.
-//
-// if you want to seek using pushdata, you need to seek in your file, then
-// call stb_vorbis_flush_pushdata(), then start calling decoding, then once
-// decoding is returning you data, call stb_vorbis_get_sample_offset, and
-// if you don't like the result, seek your file again and repeat.
-#endif
-
-
-////////// PULLING INPUT API
-
-#ifndef STB_VORBIS_NO_PULLDATA_API
-// This API assumes stb_vorbis is allowed to pull data from a source--
-// either a block of memory containing the _entire_ vorbis stream, or a
-// FILE * that you or it create, or possibly some other reading mechanism
-// if you go modify the source to replace the FILE * case with some kind
-// of callback to your code. (But if you don't support seeking, you may
-// just want to go ahead and use pushdata.)
-
-#if !defined(STB_VORBIS_NO_STDIO) && !defined(STB_VORBIS_NO_INTEGER_CONVERSION)
-extern int stb_vorbis_decode_filename(const char *filename, int *channels, int *sample_rate, short **output);
-#endif
-#if !defined(STB_VORBIS_NO_INTEGER_CONVERSION)
-extern int stb_vorbis_decode_memory(const unsigned char *mem, int len, int *channels, int *sample_rate, short **output);
-#endif
-// decode an entire file and output the data interleaved into a malloc()ed
-// buffer stored in *output. The return value is the number of samples
-// decoded, or -1 if the file could not be opened or was not an ogg vorbis file.
-// When you're done with it, just free() the pointer returned in *output.
-
-extern stb_vorbis * stb_vorbis_open_memory(const unsigned char *data, int len,
- int *error, const stb_vorbis_alloc *alloc_buffer);
-// create an ogg vorbis decoder from an ogg vorbis stream in memory (note
-// this must be the entire stream!). on failure, returns NULL and sets *error
-
-#ifndef STB_VORBIS_NO_STDIO
-extern stb_vorbis * stb_vorbis_open_filename(const char *filename,
- int *error, const stb_vorbis_alloc *alloc_buffer);
-// create an ogg vorbis decoder from a filename via fopen(). on failure,
-// returns NULL and sets *error (possibly to VORBIS_file_open_failure).
-
-extern stb_vorbis * stb_vorbis_open_file(FILE *f, int close_handle_on_close,
- int *error, const stb_vorbis_alloc *alloc_buffer);
-// create an ogg vorbis decoder from an open FILE *, looking for a stream at
-// the _current_ seek point (ftell). on failure, returns NULL and sets *error.
-// note that stb_vorbis must "own" this stream; if you seek it in between
-// calls to stb_vorbis, it will become confused. Moreover, if you attempt to
-// perform stb_vorbis_seek_*() operations on this file, it will assume it
-// owns the _entire_ rest of the file after the start point. Use the next
-// function, stb_vorbis_open_file_section(), to limit it.
-
-extern stb_vorbis * stb_vorbis_open_file_section(FILE *f, int close_handle_on_close,
- int *error, const stb_vorbis_alloc *alloc_buffer, unsigned int len);
-// create an ogg vorbis decoder from an open FILE *, looking for a stream at
-// the _current_ seek point (ftell); the stream will be of length 'len' bytes.
-// on failure, returns NULL and sets *error. note that stb_vorbis must "own"
-// this stream; if you seek it in between calls to stb_vorbis, it will become
-// confused.
-#endif
-
-extern int stb_vorbis_seek_frame(stb_vorbis *f, unsigned int sample_number);
-extern int stb_vorbis_seek(stb_vorbis *f, unsigned int sample_number);
-// these functions seek in the Vorbis file to (approximately) 'sample_number'.
-// after calling seek_frame(), the next call to get_frame_*() will include
-// the specified sample. after calling stb_vorbis_seek(), the next call to
-// stb_vorbis_get_samples_* will start with the specified sample. If you
-// do not need to seek to EXACTLY the target sample when using get_samples_*,
-// you can also use seek_frame().
-
-extern int stb_vorbis_seek_start(stb_vorbis *f);
-// this function is equivalent to stb_vorbis_seek(f,0)
-
-extern unsigned int stb_vorbis_stream_length_in_samples(stb_vorbis *f);
-extern float stb_vorbis_stream_length_in_seconds(stb_vorbis *f);
-// these functions return the total length of the vorbis stream
-
-extern int stb_vorbis_get_frame_float(stb_vorbis *f, int *channels, float ***output);
-// decode the next frame and return the number of samples. the number of
-// channels returned are stored in *channels (which can be NULL--it is always
-// the same as the number of channels reported by get_info). *output will
-// contain an array of float* buffers, one per channel. These outputs will
-// be overwritten on the next call to stb_vorbis_get_frame_*.
-//
-// You generally should not intermix calls to stb_vorbis_get_frame_*()
-// and stb_vorbis_get_samples_*(), since the latter calls the former.
-
-#ifndef STB_VORBIS_NO_INTEGER_CONVERSION
-extern int stb_vorbis_get_frame_short_interleaved(stb_vorbis *f, int num_c, short *buffer, int num_shorts);
-extern int stb_vorbis_get_frame_short (stb_vorbis *f, int num_c, short **buffer, int num_samples);
-#endif
-// decode the next frame and return the number of *samples* per channel.
-// Note that for interleaved data, you pass in the number of shorts (the
-// size of your array), but the return value is the number of samples per
-// channel, not the total number of samples.
-//
-// The data is coerced to the number of channels you request according to the
-// channel coercion rules (see below). You must pass in the size of your
-// buffer(s) so that stb_vorbis will not overwrite the end of the buffer.
-// The maximum buffer size needed can be gotten from get_info(); however,
-// the Vorbis I specification implies an absolute maximum of 4096 samples
-// per channel.
-
-// Channel coercion rules:
-// Let M be the number of channels requested, and N the number of channels present,
-// and Cn be the nth channel; let stereo L be the sum of all L and center channels,
-// and stereo R be the sum of all R and center channels (channel assignment from the
-// vorbis spec).
-// M N output
-// 1 k sum(Ck) for all k
-// 2 * stereo L, stereo R
-// k l k > l, the first l channels, then 0s
-// k l k <= l, the first k channels
-// Note that this is not _good_ surround etc. mixing at all! It's just so
-// you get something useful.
-
-extern int stb_vorbis_get_samples_float_interleaved(stb_vorbis *f, int channels, float *buffer, int num_floats);
-extern int stb_vorbis_get_samples_float(stb_vorbis *f, int channels, float **buffer, int num_samples);
-// gets num_samples samples, not necessarily on a frame boundary--this requires
-// buffering so you have to supply the buffers. DOES NOT APPLY THE COERCION RULES.
-// Returns the number of samples stored per channel; it may be less than requested
-// at the end of the file. If there are no more samples in the file, returns 0.
-
-#ifndef STB_VORBIS_NO_INTEGER_CONVERSION
-extern int stb_vorbis_get_samples_short_interleaved(stb_vorbis *f, int channels, short *buffer, int num_shorts);
-extern int stb_vorbis_get_samples_short(stb_vorbis *f, int channels, short **buffer, int num_samples);
-#endif
-// gets num_samples samples, not necessarily on a frame boundary--this requires
-// buffering so you have to supply the buffers. Applies the coercion rules above
-// to produce 'channels' channels. Returns the number of samples stored per channel;
-// it may be less than requested at the end of the file. If there are no more
-// samples in the file, returns 0.
-
-#endif
-
-//////// ERROR CODES
-
-enum STBVorbisError
-{
- VORBIS__no_error,
-
- VORBIS_need_more_data=1, // not a real error
-
- VORBIS_invalid_api_mixing, // can't mix API modes
- VORBIS_outofmem, // not enough memory
- VORBIS_feature_not_supported, // uses floor 0
- VORBIS_too_many_channels, // STB_VORBIS_MAX_CHANNELS is too small
- VORBIS_file_open_failure, // fopen() failed
- VORBIS_seek_without_length, // can't seek in unknown-length file
-
- VORBIS_unexpected_eof=10, // file is truncated?
- VORBIS_seek_invalid, // seek past EOF
-
- // decoding errors (corrupt/invalid stream) -- you probably
- // don't care about the exact details of these
-
- // vorbis errors:
- VORBIS_invalid_setup=20,
- VORBIS_invalid_stream,
-
- // ogg errors:
- VORBIS_missing_capture_pattern=30,
- VORBIS_invalid_stream_structure_version,
- VORBIS_continued_packet_flag_invalid,
- VORBIS_incorrect_stream_serial_number,
- VORBIS_invalid_first_page,
- VORBIS_bad_packet_type,
- VORBIS_cant_find_last_page,
- VORBIS_seek_failed,
- VORBIS_ogg_skeleton_not_supported
-};
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // STB_VORBIS_INCLUDE_STB_VORBIS_H
-//
-// HEADER ENDS HERE
-//
-//////////////////////////////////////////////////////////////////////////////
-
-#ifndef STB_VORBIS_HEADER_ONLY
-
-// global configuration settings (e.g. set these in the project/makefile),
-// or just set them in this file at the top (although ideally the first few
-// should be visible when the header file is compiled too, although it's not
-// crucial)
-
-// STB_VORBIS_NO_PUSHDATA_API
-// does not compile the code for the various stb_vorbis_*_pushdata()
-// functions
-// #define STB_VORBIS_NO_PUSHDATA_API
-
-// STB_VORBIS_NO_PULLDATA_API
-// does not compile the code for the non-pushdata APIs
-// #define STB_VORBIS_NO_PULLDATA_API
-
-// STB_VORBIS_NO_STDIO
-// does not compile the code for the APIs that use FILE *s internally
-// or externally (implied by STB_VORBIS_NO_PULLDATA_API)
-// #define STB_VORBIS_NO_STDIO
-
-// STB_VORBIS_NO_INTEGER_CONVERSION
-// does not compile the code for converting audio sample data from
-// float to integer (implied by STB_VORBIS_NO_PULLDATA_API)
-// #define STB_VORBIS_NO_INTEGER_CONVERSION
-
-// STB_VORBIS_NO_FAST_SCALED_FLOAT
-// does not use a fast float-to-int trick to accelerate float-to-int on
-// most platforms which requires endianness be defined correctly.
-//#define STB_VORBIS_NO_FAST_SCALED_FLOAT
-
-
-// STB_VORBIS_MAX_CHANNELS [number]
-// globally define this to the maximum number of channels you need.
-// The spec does not put a restriction on channels except that
-// the count is stored in a byte, so 255 is the hard limit.
-// Reducing this saves about 16 bytes per value, so using 16 saves
-// (255-16)*16 or around 4KB. Plus anything other memory usage
-// I forgot to account for. Can probably go as low as 8 (7.1 audio),
-// 6 (5.1 audio), or 2 (stereo only).
-#ifndef STB_VORBIS_MAX_CHANNELS
-#define STB_VORBIS_MAX_CHANNELS 16 // enough for anyone?
-#endif
-
-// STB_VORBIS_PUSHDATA_CRC_COUNT [number]
-// after a flush_pushdata(), stb_vorbis begins scanning for the
-// next valid page, without backtracking. when it finds something
-// that looks like a page, it streams through it and verifies its
-// CRC32. Should that validation fail, it keeps scanning. But it's
-// possible that _while_ streaming through to check the CRC32 of
-// one candidate page, it sees another candidate page. This #define
-// determines how many "overlapping" candidate pages it can search
-// at once. Note that "real" pages are typically ~4KB to ~8KB, whereas
-// garbage pages could be as big as 64KB, but probably average ~16KB.
-// So don't hose ourselves by scanning an apparent 64KB page and
-// missing a ton of real ones in the interim; so minimum of 2
-#ifndef STB_VORBIS_PUSHDATA_CRC_COUNT
-#define STB_VORBIS_PUSHDATA_CRC_COUNT 4
-#endif
-
-// STB_VORBIS_FAST_HUFFMAN_LENGTH [number]
-// sets the log size of the huffman-acceleration table. Maximum
-// supported value is 24. with larger numbers, more decodings are O(1),
-// but the table size is larger so worse cache missing, so you'll have
-// to probe (and try multiple ogg vorbis files) to find the sweet spot.
-#ifndef STB_VORBIS_FAST_HUFFMAN_LENGTH
-#define STB_VORBIS_FAST_HUFFMAN_LENGTH 10
-#endif
-
-// STB_VORBIS_FAST_BINARY_LENGTH [number]
-// sets the log size of the binary-search acceleration table. this
-// is used in similar fashion to the fast-huffman size to set initial
-// parameters for the binary search
-
-// STB_VORBIS_FAST_HUFFMAN_INT
-// The fast huffman tables are much more efficient if they can be
-// stored as 16-bit results instead of 32-bit results. This restricts
-// the codebooks to having only 65535 possible outcomes, though.
-// (At least, accelerated by the huffman table.)
-#ifndef STB_VORBIS_FAST_HUFFMAN_INT
-#define STB_VORBIS_FAST_HUFFMAN_SHORT
-#endif
-
-// STB_VORBIS_NO_HUFFMAN_BINARY_SEARCH
-// If the 'fast huffman' search doesn't succeed, then stb_vorbis falls
-// back on binary searching for the correct one. This requires storing
-// extra tables with the huffman codes in sorted order. Defining this
-// symbol trades off space for speed by forcing a linear search in the
-// non-fast case, except for "sparse" codebooks.
-// #define STB_VORBIS_NO_HUFFMAN_BINARY_SEARCH
-
-// STB_VORBIS_DIVIDES_IN_RESIDUE
-// stb_vorbis precomputes the result of the scalar residue decoding
-// that would otherwise require a divide per chunk. you can trade off
-// space for time by defining this symbol.
-// #define STB_VORBIS_DIVIDES_IN_RESIDUE
-
-// STB_VORBIS_DIVIDES_IN_CODEBOOK
-// vorbis VQ codebooks can be encoded two ways: with every case explicitly
-// stored, or with all elements being chosen from a small range of values,
-// and all values possible in all elements. By default, stb_vorbis expands
-// this latter kind out to look like the former kind for ease of decoding,
-// because otherwise an integer divide-per-vector-element is required to
-// unpack the index. If you define STB_VORBIS_DIVIDES_IN_CODEBOOK, you can
-// trade off storage for speed.
-//#define STB_VORBIS_DIVIDES_IN_CODEBOOK
-
-#ifdef STB_VORBIS_CODEBOOK_SHORTS
-#error "STB_VORBIS_CODEBOOK_SHORTS is no longer supported as it produced incorrect results for some input formats"
-#endif
-
-// STB_VORBIS_DIVIDE_TABLE
-// this replaces small integer divides in the floor decode loop with
-// table lookups. made less than 1% difference, so disabled by default.
-
-// STB_VORBIS_NO_INLINE_DECODE
-// disables the inlining of the scalar codebook fast-huffman decode.
-// might save a little codespace; useful for debugging
-// #define STB_VORBIS_NO_INLINE_DECODE
-
-// STB_VORBIS_NO_DEFER_FLOOR
-// Normally we only decode the floor without synthesizing the actual
-// full curve. We can instead synthesize the curve immediately. This
-// requires more memory and is very likely slower, so I don't think
-// you'd ever want to do it except for debugging.
-// #define STB_VORBIS_NO_DEFER_FLOOR
-
-
-
-
-//////////////////////////////////////////////////////////////////////////////
-
-#ifdef STB_VORBIS_NO_PULLDATA_API
- #define STB_VORBIS_NO_INTEGER_CONVERSION
- #define STB_VORBIS_NO_STDIO
-#endif
-
-#if defined(STB_VORBIS_NO_CRT) && !defined(STB_VORBIS_NO_STDIO)
- #define STB_VORBIS_NO_STDIO 1
-#endif
-
-#ifndef STB_VORBIS_NO_INTEGER_CONVERSION
-#ifndef STB_VORBIS_NO_FAST_SCALED_FLOAT
-
- // only need endianness for fast-float-to-int, which we don't
- // use for pushdata
-
- #ifndef STB_VORBIS_BIG_ENDIAN
- #define STB_VORBIS_ENDIAN 0
- #else
- #define STB_VORBIS_ENDIAN 1
- #endif
-
-#endif
-#endif
-
-
-#ifndef STB_VORBIS_NO_STDIO
-#include <stdio.h>
-#endif
-
-#ifndef STB_VORBIS_NO_CRT
- #include <stdlib.h>
- #include <string.h>
- #include <assert.h>
- #include <math.h>
-
- // find definition of alloca if it's not in stdlib.h:
- #if defined(_MSC_VER) || defined(__MINGW32__)
- #include <malloc.h>
- #endif
- #if defined(__linux__) || defined(__linux) || defined(__EMSCRIPTEN__) || defined(__NEWLIB__)
- #include <alloca.h>
- #endif
-#else // STB_VORBIS_NO_CRT
- #define NULL 0
- #define malloc(s) 0
- #define free(s) ((void) 0)
- #define realloc(s) 0
-#endif // STB_VORBIS_NO_CRT
-
-#include <limits.h>
-
-#ifdef __MINGW32__
- // eff you mingw:
- // "fixed":
- // http://sourceforge.net/p/mingw-w64/mailman/message/32882927/
- // "no that broke the build, reverted, who cares about C":
- // http://sourceforge.net/p/mingw-w64/mailman/message/32890381/
- #ifdef __forceinline
- #undef __forceinline
- #endif
- #define __forceinline
- #ifndef alloca
- #define alloca __builtin_alloca
- #endif
-#elif !defined(_MSC_VER)
- #if __GNUC__
- #define __forceinline inline
- #else
- #define __forceinline
- #endif
-#endif
-
-#if STB_VORBIS_MAX_CHANNELS > 256
-#error "Value of STB_VORBIS_MAX_CHANNELS outside of allowed range"
-#endif
-
-#if STB_VORBIS_FAST_HUFFMAN_LENGTH > 24
-#error "Value of STB_VORBIS_FAST_HUFFMAN_LENGTH outside of allowed range"
-#endif
-
-
-#if 0
-#include <crtdbg.h>
-#define CHECK(f) _CrtIsValidHeapPointer(f->channel_buffers[1])
-#else
-#define CHECK(f) ((void) 0)
-#endif
-
-#define MAX_BLOCKSIZE_LOG 13 // from specification
-#define MAX_BLOCKSIZE (1 << MAX_BLOCKSIZE_LOG)
-
-
-typedef unsigned char uint8;
-typedef signed char int8;
-typedef unsigned short uint16;
-typedef signed short int16;
-typedef unsigned int uint32;
-typedef signed int int32;
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-typedef float codetype;
-
-// @NOTE
-//
-// Some arrays below are tagged "//varies", which means it's actually
-// a variable-sized piece of data, but rather than malloc I assume it's
-// small enough it's better to just allocate it all together with the
-// main thing
-//
-// Most of the variables are specified with the smallest size I could pack
-// them into. It might give better performance to make them all full-sized
-// integers. It should be safe to freely rearrange the structures or change
-// the sizes larger--nothing relies on silently truncating etc., nor the
-// order of variables.
-
-#define FAST_HUFFMAN_TABLE_SIZE (1 << STB_VORBIS_FAST_HUFFMAN_LENGTH)
-#define FAST_HUFFMAN_TABLE_MASK (FAST_HUFFMAN_TABLE_SIZE - 1)
-
-typedef struct
-{
- int dimensions, entries;
- uint8 *codeword_lengths;
- float minimum_value;
- float delta_value;
- uint8 value_bits;
- uint8 lookup_type;
- uint8 sequence_p;
- uint8 sparse;
- uint32 lookup_values;
- codetype *multiplicands;
- uint32 *codewords;
- #ifdef STB_VORBIS_FAST_HUFFMAN_SHORT
- int16 fast_huffman[FAST_HUFFMAN_TABLE_SIZE];
- #else
- int32 fast_huffman[FAST_HUFFMAN_TABLE_SIZE];
- #endif
- uint32 *sorted_codewords;
- int *sorted_values;
- int sorted_entries;
-} Codebook;
-
-typedef struct
-{
- uint8 order;
- uint16 rate;
- uint16 bark_map_size;
- uint8 amplitude_bits;
- uint8 amplitude_offset;
- uint8 number_of_books;
- uint8 book_list[16]; // varies
-} Floor0;
-
-typedef struct
-{
- uint8 partitions;
- uint8 partition_class_list[32]; // varies
- uint8 class_dimensions[16]; // varies
- uint8 class_subclasses[16]; // varies
- uint8 class_masterbooks[16]; // varies
- int16 subclass_books[16][8]; // varies
- uint16 Xlist[31*8+2]; // varies
- uint8 sorted_order[31*8+2];
- uint8 neighbors[31*8+2][2];
- uint8 floor1_multiplier;
- uint8 rangebits;
- int values;
-} Floor1;
-
-typedef union
-{
- Floor0 floor0;
- Floor1 floor1;
-} Floor;
-
-typedef struct
-{
- uint32 begin, end;
- uint32 part_size;
- uint8 classifications;
- uint8 classbook;
- uint8 **classdata;
- int16 (*residue_books)[8];
-} Residue;
-
-typedef struct
-{
- uint8 magnitude;
- uint8 angle;
- uint8 mux;
-} MappingChannel;
-
-typedef struct
-{
- uint16 coupling_steps;
- MappingChannel *chan;
- uint8 submaps;
- uint8 submap_floor[15]; // varies
- uint8 submap_residue[15]; // varies
-} Mapping;
-
-typedef struct
-{
- uint8 blockflag;
- uint8 mapping;
- uint16 windowtype;
- uint16 transformtype;
-} Mode;
-
-typedef struct
-{
- uint32 goal_crc; // expected crc if match
- int bytes_left; // bytes left in packet
- uint32 crc_so_far; // running crc
- int bytes_done; // bytes processed in _current_ chunk
- uint32 sample_loc; // granule pos encoded in page
-} CRCscan;
-
-typedef struct
-{
- uint32 page_start, page_end;
- uint32 last_decoded_sample;
-} ProbedPage;
-
-struct stb_vorbis
-{
- // user-accessible info
- unsigned int sample_rate;
- int channels;
-
- unsigned int setup_memory_required;
- unsigned int temp_memory_required;
- unsigned int setup_temp_memory_required;
-
- char *vendor;
- int comment_list_length;
- char **comment_list;
-
- // input config
-#ifndef STB_VORBIS_NO_STDIO
- FILE *f;
- uint32 f_start;
- int close_on_free;
-#endif
-
- uint8 *stream;
- uint8 *stream_start;
- uint8 *stream_end;
-
- uint32 stream_len;
-
- uint8 push_mode;
-
- // the page to seek to when seeking to start, may be zero
- uint32 first_audio_page_offset;
-
- // p_first is the page on which the first audio packet ends
- // (but not necessarily the page on which it starts)
- ProbedPage p_first, p_last;
-
- // memory management
- stb_vorbis_alloc alloc;
- int setup_offset;
- int temp_offset;
-
- // run-time results
- int eof;
- enum STBVorbisError error;
-
- // user-useful data
-
- // header info
- int blocksize[2];
- int blocksize_0, blocksize_1;
- int codebook_count;
- Codebook *codebooks;
- int floor_count;
- uint16 floor_types[64]; // varies
- Floor *floor_config;
- int residue_count;
- uint16 residue_types[64]; // varies
- Residue *residue_config;
- int mapping_count;
- Mapping *mapping;
- int mode_count;
- Mode mode_config[64]; // varies
-
- uint32 total_samples;
-
- // decode buffer
- float *channel_buffers[STB_VORBIS_MAX_CHANNELS];
- float *outputs [STB_VORBIS_MAX_CHANNELS];
-
- float *previous_window[STB_VORBIS_MAX_CHANNELS];
- int previous_length;
-
- #ifndef STB_VORBIS_NO_DEFER_FLOOR
- int16 *finalY[STB_VORBIS_MAX_CHANNELS];
- #else
- float *floor_buffers[STB_VORBIS_MAX_CHANNELS];
- #endif
-
- uint32 current_loc; // sample location of next frame to decode
- int current_loc_valid;
-
- // per-blocksize precomputed data
-
- // twiddle factors
- float *A[2],*B[2],*C[2];
- float *window[2];
- uint16 *bit_reverse[2];
-
- // current page/packet/segment streaming info
- uint32 serial; // stream serial number for verification
- int last_page;
- int segment_count;
- uint8 segments[255];
- uint8 page_flag;
- uint8 bytes_in_seg;
- uint8 first_decode;
- int next_seg;
- int last_seg; // flag that we're on the last segment
- int last_seg_which; // what was the segment number of the last seg?
- uint32 acc;
- int valid_bits;
- int packet_bytes;
- int end_seg_with_known_loc;
- uint32 known_loc_for_packet;
- int discard_samples_deferred;
- uint32 samples_output;
-
- // push mode scanning
- int page_crc_tests; // only in push_mode: number of tests active; -1 if not searching
-#ifndef STB_VORBIS_NO_PUSHDATA_API
- CRCscan scan[STB_VORBIS_PUSHDATA_CRC_COUNT];
-#endif
-
- // sample-access
- int channel_buffer_start;
- int channel_buffer_end;
-};
-
-#if defined(STB_VORBIS_NO_PUSHDATA_API)
- #define IS_PUSH_MODE(f) FALSE
-#elif defined(STB_VORBIS_NO_PULLDATA_API)
- #define IS_PUSH_MODE(f) TRUE
-#else
- #define IS_PUSH_MODE(f) ((f)->push_mode)
-#endif
-
-typedef struct stb_vorbis vorb;
-
-static int error(vorb *f, enum STBVorbisError e)
-{
- f->error = e;
- if (!f->eof && e != VORBIS_need_more_data) {
- f->error=e; // breakpoint for debugging
- }
- return 0;
-}
-
-
-// these functions are used for allocating temporary memory
-// while decoding. if you can afford the stack space, use
-// alloca(); otherwise, provide a temp buffer and it will
-// allocate out of those.
-
-#define array_size_required(count,size) (count*(sizeof(void *)+(size)))
-
-#define temp_alloc(f,size) (f->alloc.alloc_buffer ? setup_temp_malloc(f,size) : alloca(size))
-#define temp_free(f,p) (void)0
-#define temp_alloc_save(f) ((f)->temp_offset)
-#define temp_alloc_restore(f,p) ((f)->temp_offset = (p))
-
-#define temp_block_array(f,count,size) make_block_array(temp_alloc(f,array_size_required(count,size)), count, size)
-
-// given a sufficiently large block of memory, make an array of pointers to subblocks of it
-static void *make_block_array(void *mem, int count, int size)
-{
- int i;
- void ** p = (void **) mem;
- char *q = (char *) (p + count);
- for (i=0; i < count; ++i) {
- p[i] = q;
- q += size;
- }
- return p;
-}
-
-static void *setup_malloc(vorb *f, int sz)
-{
- sz = (sz+7) & ~7; // round up to nearest 8 for alignment of future allocs.
- f->setup_memory_required += sz;
- if (f->alloc.alloc_buffer) {
- void *p = (char *) f->alloc.alloc_buffer + f->setup_offset;
- if (f->setup_offset + sz > f->temp_offset) return NULL;
- f->setup_offset += sz;
- return p;
- }
- return sz ? malloc(sz) : NULL;
-}
-
-static void setup_free(vorb *f, void *p)
-{
- if (f->alloc.alloc_buffer) return; // do nothing; setup mem is a stack
- free(p);
-}
-
-static void *setup_temp_malloc(vorb *f, int sz)
-{
- sz = (sz+7) & ~7; // round up to nearest 8 for alignment of future allocs.
- if (f->alloc.alloc_buffer) {
- if (f->temp_offset - sz < f->setup_offset) return NULL;
- f->temp_offset -= sz;
- return (char *) f->alloc.alloc_buffer + f->temp_offset;
- }
- return malloc(sz);
-}
-
-static void setup_temp_free(vorb *f, void *p, int sz)
-{
- if (f->alloc.alloc_buffer) {
- f->temp_offset += (sz+7)&~7;
- return;
- }
- free(p);
-}
-
-#define CRC32_POLY 0x04c11db7 // from spec
-
-static uint32 crc_table[256];
-static void crc32_init(void)
-{
- int i,j;
- uint32 s;
- for(i=0; i < 256; i++) {
- for (s=(uint32) i << 24, j=0; j < 8; ++j)
- s = (s << 1) ^ (s >= (1U<<31) ? CRC32_POLY : 0);
- crc_table[i] = s;
- }
-}
-
-static __forceinline uint32 crc32_update(uint32 crc, uint8 byte)
-{
- return (crc << 8) ^ crc_table[byte ^ (crc >> 24)];
-}
-
-
-// used in setup, and for huffman that doesn't go fast path
-static unsigned int bit_reverse(unsigned int n)
-{
- n = ((n & 0xAAAAAAAA) >> 1) | ((n & 0x55555555) << 1);
- n = ((n & 0xCCCCCCCC) >> 2) | ((n & 0x33333333) << 2);
- n = ((n & 0xF0F0F0F0) >> 4) | ((n & 0x0F0F0F0F) << 4);
- n = ((n & 0xFF00FF00) >> 8) | ((n & 0x00FF00FF) << 8);
- return (n >> 16) | (n << 16);
-}
-
-static float square(float x)
-{
- return x*x;
-}
-
-// this is a weird definition of log2() for which log2(1) = 1, log2(2) = 2, log2(4) = 3
-// as required by the specification. fast(?) implementation from stb.h
-// @OPTIMIZE: called multiple times per-packet with "constants"; move to setup
-static int ilog(int32 n)
-{
- static signed char log2_4[16] = { 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4 };
-
- if (n < 0) return 0; // signed n returns 0
-
- // 2 compares if n < 16, 3 compares otherwise (4 if signed or n > 1<<29)
- if (n < (1 << 14))
- if (n < (1 << 4)) return 0 + log2_4[n ];
- else if (n < (1 << 9)) return 5 + log2_4[n >> 5];
- else return 10 + log2_4[n >> 10];
- else if (n < (1 << 24))
- if (n < (1 << 19)) return 15 + log2_4[n >> 15];
- else return 20 + log2_4[n >> 20];
- else if (n < (1 << 29)) return 25 + log2_4[n >> 25];
- else return 30 + log2_4[n >> 30];
-}
-
-#ifndef M_PI
- #define M_PI 3.14159265358979323846264f // from CRC
-#endif
-
-// code length assigned to a value with no huffman encoding
-#define NO_CODE 255
-
-/////////////////////// LEAF SETUP FUNCTIONS //////////////////////////
-//
-// these functions are only called at setup, and only a few times
-// per file
-
-static float float32_unpack(uint32 x)
-{
- // from the specification
- uint32 mantissa = x & 0x1fffff;
- uint32 sign = x & 0x80000000;
- uint32 exp = (x & 0x7fe00000) >> 21;
- double res = sign ? -(double)mantissa : (double)mantissa;
- return (float) ldexp((float)res, exp-788);
-}
-
-
-// zlib & jpeg huffman tables assume that the output symbols
-// can either be arbitrarily arranged, or have monotonically
-// increasing frequencies--they rely on the lengths being sorted;
-// this makes for a very simple generation algorithm.
-// vorbis allows a huffman table with non-sorted lengths. This
-// requires a more sophisticated construction, since symbols in
-// order do not map to huffman codes "in order".
-static void add_entry(Codebook *c, uint32 huff_code, int symbol, int count, int len, uint32 *values)
-{
- if (!c->sparse) {
- c->codewords [symbol] = huff_code;
- } else {
- c->codewords [count] = huff_code;
- c->codeword_lengths[count] = len;
- values [count] = symbol;
- }
-}
-
-static int compute_codewords(Codebook *c, uint8 *len, int n, uint32 *values)
-{
- int i,k,m=0;
- uint32 available[32];
-
- memset(available, 0, sizeof(available));
- // find the first entry
- for (k=0; k < n; ++k) if (len[k] < NO_CODE) break;
- if (k == n) { assert(c->sorted_entries == 0); return TRUE; }
- // add to the list
- add_entry(c, 0, k, m++, len[k], values);
- // add all available leaves
- for (i=1; i <= len[k]; ++i)
- available[i] = 1U << (32-i);
- // note that the above code treats the first case specially,
- // but it's really the same as the following code, so they
- // could probably be combined (except the initial code is 0,
- // and I use 0 in available[] to mean 'empty')
- for (i=k+1; i < n; ++i) {
- uint32 res;
- int z = len[i], y;
- if (z == NO_CODE) continue;
- // find lowest available leaf (should always be earliest,
- // which is what the specification calls for)
- // note that this property, and the fact we can never have
- // more than one free leaf at a given level, isn't totally
- // trivial to prove, but it seems true and the assert never
- // fires, so!
- while (z > 0 && !available[z]) --z;
- if (z == 0) { return FALSE; }
- res = available[z];
- assert(z >= 0 && z < 32);
- available[z] = 0;
- add_entry(c, bit_reverse(res), i, m++, len[i], values);
- // propagate availability up the tree
- if (z != len[i]) {
- assert(len[i] >= 0 && len[i] < 32);
- for (y=len[i]; y > z; --y) {
- assert(available[y] == 0);
- available[y] = res + (1 << (32-y));
- }
- }
- }
- return TRUE;
-}
-
-// accelerated huffman table allows fast O(1) match of all symbols
-// of length <= STB_VORBIS_FAST_HUFFMAN_LENGTH
-static void compute_accelerated_huffman(Codebook *c)
-{
- int i, len;
- for (i=0; i < FAST_HUFFMAN_TABLE_SIZE; ++i)
- c->fast_huffman[i] = -1;
-
- len = c->sparse ? c->sorted_entries : c->entries;
- #ifdef STB_VORBIS_FAST_HUFFMAN_SHORT
- if (len > 32767) len = 32767; // largest possible value we can encode!
- #endif
- for (i=0; i < len; ++i) {
- if (c->codeword_lengths[i] <= STB_VORBIS_FAST_HUFFMAN_LENGTH) {
- uint32 z = c->sparse ? bit_reverse(c->sorted_codewords[i]) : c->codewords[i];
- // set table entries for all bit combinations in the higher bits
- while (z < FAST_HUFFMAN_TABLE_SIZE) {
- c->fast_huffman[z] = i;
- z += 1 << c->codeword_lengths[i];
- }
- }
- }
-}
-
-#ifdef _MSC_VER
-#define STBV_CDECL __cdecl
-#else
-#define STBV_CDECL
-#endif
-
-static int STBV_CDECL uint32_compare(const void *p, const void *q)
-{
- uint32 x = * (uint32 *) p;
- uint32 y = * (uint32 *) q;
- return x < y ? -1 : x > y;
-}
-
-static int include_in_sort(Codebook *c, uint8 len)
-{
- if (c->sparse) { assert(len != NO_CODE); return TRUE; }
- if (len == NO_CODE) return FALSE;
- if (len > STB_VORBIS_FAST_HUFFMAN_LENGTH) return TRUE;
- return FALSE;
-}
-
-// if the fast table above doesn't work, we want to binary
-// search them... need to reverse the bits
-static void compute_sorted_huffman(Codebook *c, uint8 *lengths, uint32 *values)
-{
- int i, len;
- // build a list of all the entries
- // OPTIMIZATION: don't include the short ones, since they'll be caught by FAST_HUFFMAN.
- // this is kind of a frivolous optimization--I don't see any performance improvement,
- // but it's like 4 extra lines of code, so.
- if (!c->sparse) {
- int k = 0;
- for (i=0; i < c->entries; ++i)
- if (include_in_sort(c, lengths[i]))
- c->sorted_codewords[k++] = bit_reverse(c->codewords[i]);
- assert(k == c->sorted_entries);
- } else {
- for (i=0; i < c->sorted_entries; ++i)
- c->sorted_codewords[i] = bit_reverse(c->codewords[i]);
- }
-
- qsort(c->sorted_codewords, c->sorted_entries, sizeof(c->sorted_codewords[0]), uint32_compare);
- c->sorted_codewords[c->sorted_entries] = 0xffffffff;
-
- len = c->sparse ? c->sorted_entries : c->entries;
- // now we need to indicate how they correspond; we could either
- // #1: sort a different data structure that says who they correspond to
- // #2: for each sorted entry, search the original list to find who corresponds
- // #3: for each original entry, find the sorted entry
- // #1 requires extra storage, #2 is slow, #3 can use binary search!
- for (i=0; i < len; ++i) {
- int huff_len = c->sparse ? lengths[values[i]] : lengths[i];
- if (include_in_sort(c,huff_len)) {
- uint32 code = bit_reverse(c->codewords[i]);
- int x=0, n=c->sorted_entries;
- while (n > 1) {
- // invariant: sc[x] <= code < sc[x+n]
- int m = x + (n >> 1);
- if (c->sorted_codewords[m] <= code) {
- x = m;
- n -= (n>>1);
- } else {
- n >>= 1;
- }
- }
- assert(c->sorted_codewords[x] == code);
- if (c->sparse) {
- c->sorted_values[x] = values[i];
- c->codeword_lengths[x] = huff_len;
- } else {
- c->sorted_values[x] = i;
- }
- }
- }
-}
-
-// only run while parsing the header (3 times)
-static int vorbis_validate(uint8 *data)
-{
- static uint8 vorbis[6] = { 'v', 'o', 'r', 'b', 'i', 's' };
- return memcmp(data, vorbis, 6) == 0;
-}
-
-// called from setup only, once per code book
-// (formula implied by specification)
-static int lookup1_values(int entries, int dim)
-{
- int r = (int) floor(exp((float) log((float) entries) / dim));
- if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning;
- ++r; // floor() to avoid _ftol() when non-CRT
- if (pow((float) r+1, dim) <= entries)
- return -1;
- if ((int) floor(pow((float) r, dim)) > entries)
- return -1;
- return r;
-}
-
-// called twice per file
-static void compute_twiddle_factors(int n, float *A, float *B, float *C)
-{
- int n4 = n >> 2, n8 = n >> 3;
- int k,k2;
-
- for (k=k2=0; k < n4; ++k,k2+=2) {
- A[k2 ] = (float) cos(4*k*M_PI/n);
- A[k2+1] = (float) -sin(4*k*M_PI/n);
- B[k2 ] = (float) cos((k2+1)*M_PI/n/2) * 0.5f;
- B[k2+1] = (float) sin((k2+1)*M_PI/n/2) * 0.5f;
- }
- for (k=k2=0; k < n8; ++k,k2+=2) {
- C[k2 ] = (float) cos(2*(k2+1)*M_PI/n);
- C[k2+1] = (float) -sin(2*(k2+1)*M_PI/n);
- }
-}
-
-static void compute_window(int n, float *window)
-{
- int n2 = n >> 1, i;
- for (i=0; i < n2; ++i)
- window[i] = (float) sin(0.5 * M_PI * square((float) sin((i - 0 + 0.5) / n2 * 0.5 * M_PI)));
-}
-
-static void compute_bitreverse(int n, uint16 *rev)
-{
- int ld = ilog(n) - 1; // ilog is off-by-one from normal definitions
- int i, n8 = n >> 3;
- for (i=0; i < n8; ++i)
- rev[i] = (bit_reverse(i) >> (32-ld+3)) << 2;
-}
-
-static int init_blocksize(vorb *f, int b, int n)
-{
- int n2 = n >> 1, n4 = n >> 2, n8 = n >> 3;
- f->A[b] = (float *) setup_malloc(f, sizeof(float) * n2);
- f->B[b] = (float *) setup_malloc(f, sizeof(float) * n2);
- f->C[b] = (float *) setup_malloc(f, sizeof(float) * n4);
- if (!f->A[b] || !f->B[b] || !f->C[b]) return error(f, VORBIS_outofmem);
- compute_twiddle_factors(n, f->A[b], f->B[b], f->C[b]);
- f->window[b] = (float *) setup_malloc(f, sizeof(float) * n2);
- if (!f->window[b]) return error(f, VORBIS_outofmem);
- compute_window(n, f->window[b]);
- f->bit_reverse[b] = (uint16 *) setup_malloc(f, sizeof(uint16) * n8);
- if (!f->bit_reverse[b]) return error(f, VORBIS_outofmem);
- compute_bitreverse(n, f->bit_reverse[b]);
- return TRUE;
-}
-
-static void neighbors(uint16 *x, int n, int *plow, int *phigh)
-{
- int low = -1;
- int high = 65536;
- int i;
- for (i=0; i < n; ++i) {
- if (x[i] > low && x[i] < x[n]) { *plow = i; low = x[i]; }
- if (x[i] < high && x[i] > x[n]) { *phigh = i; high = x[i]; }
- }
-}
-
-// this has been repurposed so y is now the original index instead of y
-typedef struct
-{
- uint16 x,id;
-} stbv__floor_ordering;
-
-static int STBV_CDECL point_compare(const void *p, const void *q)
-{
- stbv__floor_ordering *a = (stbv__floor_ordering *) p;
- stbv__floor_ordering *b = (stbv__floor_ordering *) q;
- return a->x < b->x ? -1 : a->x > b->x;
-}
-
-//
-/////////////////////// END LEAF SETUP FUNCTIONS //////////////////////////
-
-
-#if defined(STB_VORBIS_NO_STDIO)
- #define USE_MEMORY(z) TRUE
-#else
- #define USE_MEMORY(z) ((z)->stream)
-#endif
-
-static uint8 get8(vorb *z)
-{
- if (USE_MEMORY(z)) {
- if (z->stream >= z->stream_end) { z->eof = TRUE; return 0; }
- return *z->stream++;
- }
-
- #ifndef STB_VORBIS_NO_STDIO
- {
- int c = fgetc(z->f);
- if (c == EOF) { z->eof = TRUE; return 0; }
- return c;
- }
- #endif
-}
-
-static uint32 get32(vorb *f)
-{
- uint32 x;
- x = get8(f);
- x += get8(f) << 8;
- x += get8(f) << 16;
- x += (uint32) get8(f) << 24;
- return x;
-}
-
-static int getn(vorb *z, uint8 *data, int n)
-{
- if (USE_MEMORY(z)) {
- if (z->stream+n > z->stream_end) { z->eof = 1; return 0; }
- memcpy(data, z->stream, n);
- z->stream += n;
- return 1;
- }
-
- #ifndef STB_VORBIS_NO_STDIO
- if (fread(data, n, 1, z->f) == 1)
- return 1;
- else {
- z->eof = 1;
- return 0;
- }
- #endif
-}
-
-static void skip(vorb *z, int n)
-{
- if (USE_MEMORY(z)) {
- z->stream += n;
- if (z->stream >= z->stream_end) z->eof = 1;
- return;
- }
- #ifndef STB_VORBIS_NO_STDIO
- {
- long x = ftell(z->f);
- fseek(z->f, x+n, SEEK_SET);
- }
- #endif
-}
-
-static int set_file_offset(stb_vorbis *f, unsigned int loc)
-{
- #ifndef STB_VORBIS_NO_PUSHDATA_API
- if (f->push_mode) return 0;
- #endif
- f->eof = 0;
- if (USE_MEMORY(f)) {
- if (f->stream_start + loc >= f->stream_end || f->stream_start + loc < f->stream_start) {
- f->stream = f->stream_end;
- f->eof = 1;
- return 0;
- } else {
- f->stream = f->stream_start + loc;
- return 1;
- }
- }
- #ifndef STB_VORBIS_NO_STDIO
- if (loc + f->f_start < loc || loc >= 0x80000000) {
- loc = 0x7fffffff;
- f->eof = 1;
- } else {
- loc += f->f_start;
- }
- if (!fseek(f->f, loc, SEEK_SET))
- return 1;
- f->eof = 1;
- fseek(f->f, f->f_start, SEEK_END);
- return 0;
- #endif
-}
-
-
-static uint8 ogg_page_header[4] = { 0x4f, 0x67, 0x67, 0x53 };
-
-static int capture_pattern(vorb *f)
-{
- if (0x4f != get8(f)) return FALSE;
- if (0x67 != get8(f)) return FALSE;
- if (0x67 != get8(f)) return FALSE;
- if (0x53 != get8(f)) return FALSE;
- return TRUE;
-}
-
-#define PAGEFLAG_continued_packet 1
-#define PAGEFLAG_first_page 2
-#define PAGEFLAG_last_page 4
-
-static int start_page_no_capturepattern(vorb *f)
-{
- uint32 loc0,loc1,n;
- if (f->first_decode && !IS_PUSH_MODE(f)) {
- f->p_first.page_start = stb_vorbis_get_file_offset(f) - 4;
- }
- // stream structure version
- if (0 != get8(f)) return error(f, VORBIS_invalid_stream_structure_version);
- // header flag
- f->page_flag = get8(f);
- // absolute granule position
- loc0 = get32(f);
- loc1 = get32(f);
- // @TODO: validate loc0,loc1 as valid positions?
- // stream serial number -- vorbis doesn't interleave, so discard
- get32(f);
- //if (f->serial != get32(f)) return error(f, VORBIS_incorrect_stream_serial_number);
- // page sequence number
- n = get32(f);
- f->last_page = n;
- // CRC32
- get32(f);
- // page_segments
- f->segment_count = get8(f);
- if (!getn(f, f->segments, f->segment_count))
- return error(f, VORBIS_unexpected_eof);
- // assume we _don't_ know any the sample position of any segments
- f->end_seg_with_known_loc = -2;
- if (loc0 != ~0U || loc1 != ~0U) {
- int i;
- // determine which packet is the last one that will complete
- for (i=f->segment_count-1; i >= 0; --i)
- if (f->segments[i] < 255)
- break;
- // 'i' is now the index of the _last_ segment of a packet that ends
- if (i >= 0) {
- f->end_seg_with_known_loc = i;
- f->known_loc_for_packet = loc0;
- }
- }
- if (f->first_decode) {
- int i,len;
- len = 0;
- for (i=0; i < f->segment_count; ++i)
- len += f->segments[i];
- len += 27 + f->segment_count;
- f->p_first.page_end = f->p_first.page_start + len;
- f->p_first.last_decoded_sample = loc0;
- }
- f->next_seg = 0;
- return TRUE;
-}
-
-static int start_page(vorb *f)
-{
- if (!capture_pattern(f)) return error(f, VORBIS_missing_capture_pattern);
- return start_page_no_capturepattern(f);
-}
-
-static int start_packet(vorb *f)
-{
- while (f->next_seg == -1) {
- if (!start_page(f)) return FALSE;
- if (f->page_flag & PAGEFLAG_continued_packet)
- return error(f, VORBIS_continued_packet_flag_invalid);
- }
- f->last_seg = FALSE;
- f->valid_bits = 0;
- f->packet_bytes = 0;
- f->bytes_in_seg = 0;
- // f->next_seg is now valid
- return TRUE;
-}
-
-static int maybe_start_packet(vorb *f)
-{
- if (f->next_seg == -1) {
- int x = get8(f);
- if (f->eof) return FALSE; // EOF at page boundary is not an error!
- if (0x4f != x ) return error(f, VORBIS_missing_capture_pattern);
- if (0x67 != get8(f)) return error(f, VORBIS_missing_capture_pattern);
- if (0x67 != get8(f)) return error(f, VORBIS_missing_capture_pattern);
- if (0x53 != get8(f)) return error(f, VORBIS_missing_capture_pattern);
- if (!start_page_no_capturepattern(f)) return FALSE;
- if (f->page_flag & PAGEFLAG_continued_packet) {
- // set up enough state that we can read this packet if we want,
- // e.g. during recovery
- f->last_seg = FALSE;
- f->bytes_in_seg = 0;
- return error(f, VORBIS_continued_packet_flag_invalid);
- }
- }
- return start_packet(f);
-}
-
-static int next_segment(vorb *f)
-{
- int len;
- if (f->last_seg) return 0;
- if (f->next_seg == -1) {
- f->last_seg_which = f->segment_count-1; // in case start_page fails
- if (!start_page(f)) { f->last_seg = 1; return 0; }
- if (!(f->page_flag & PAGEFLAG_continued_packet)) return error(f, VORBIS_continued_packet_flag_invalid);
- }
- len = f->segments[f->next_seg++];
- if (len < 255) {
- f->last_seg = TRUE;
- f->last_seg_which = f->next_seg-1;
- }
- if (f->next_seg >= f->segment_count)
- f->next_seg = -1;
- assert(f->bytes_in_seg == 0);
- f->bytes_in_seg = len;
- return len;
-}
-
-#define EOP (-1)
-#define INVALID_BITS (-1)
-
-static int get8_packet_raw(vorb *f)
-{
- if (!f->bytes_in_seg) { // CLANG!
- if (f->last_seg) return EOP;
- else if (!next_segment(f)) return EOP;
- }
- assert(f->bytes_in_seg > 0);
- --f->bytes_in_seg;
- ++f->packet_bytes;
- return get8(f);
-}
-
-static int get8_packet(vorb *f)
-{
- int x = get8_packet_raw(f);
- f->valid_bits = 0;
- return x;
-}
-
-static int get32_packet(vorb *f)
-{
- uint32 x;
- x = get8_packet(f);
- x += get8_packet(f) << 8;
- x += get8_packet(f) << 16;
- x += (uint32) get8_packet(f) << 24;
- return x;
-}
-
-static void flush_packet(vorb *f)
-{
- while (get8_packet_raw(f) != EOP);
-}
-
-// @OPTIMIZE: this is the secondary bit decoder, so it's probably not as important
-// as the huffman decoder?
-static uint32 get_bits(vorb *f, int n)
-{
- uint32 z;
-
- if (f->valid_bits < 0) return 0;
- if (f->valid_bits < n) {
- if (n > 24) {
- // the accumulator technique below would not work correctly in this case
- z = get_bits(f, 24);
- z += get_bits(f, n-24) << 24;
- return z;
- }
- if (f->valid_bits == 0) f->acc = 0;
- while (f->valid_bits < n) {
- int z = get8_packet_raw(f);
- if (z == EOP) {
- f->valid_bits = INVALID_BITS;
- return 0;
- }
- f->acc += z << f->valid_bits;
- f->valid_bits += 8;
- }
- }
-
- assert(f->valid_bits >= n);
- z = f->acc & ((1 << n)-1);
- f->acc >>= n;
- f->valid_bits -= n;
- return z;
-}
-
-// @OPTIMIZE: primary accumulator for huffman
-// expand the buffer to as many bits as possible without reading off end of packet
-// it might be nice to allow f->valid_bits and f->acc to be stored in registers,
-// e.g. cache them locally and decode locally
-static __forceinline void prep_huffman(vorb *f)
-{
- if (f->valid_bits <= 24) {
- if (f->valid_bits == 0) f->acc = 0;
- do {
- int z;
- if (f->last_seg && !f->bytes_in_seg) return;
- z = get8_packet_raw(f);
- if (z == EOP) return;
- f->acc += (unsigned) z << f->valid_bits;
- f->valid_bits += 8;
- } while (f->valid_bits <= 24);
- }
-}
-
-enum
-{
- VORBIS_packet_id = 1,
- VORBIS_packet_comment = 3,
- VORBIS_packet_setup = 5
-};
-
-static int codebook_decode_scalar_raw(vorb *f, Codebook *c)
-{
- int i;
- prep_huffman(f);
-
- if (c->codewords == NULL && c->sorted_codewords == NULL)
- return -1;
-
- // cases to use binary search: sorted_codewords && !c->codewords
- // sorted_codewords && c->entries > 8
- if (c->entries > 8 ? c->sorted_codewords!=NULL : !c->codewords) {
- // binary search
- uint32 code = bit_reverse(f->acc);
- int x=0, n=c->sorted_entries, len;
-
- while (n > 1) {
- // invariant: sc[x] <= code < sc[x+n]
- int m = x + (n >> 1);
- if (c->sorted_codewords[m] <= code) {
- x = m;
- n -= (n>>1);
- } else {
- n >>= 1;
- }
- }
- // x is now the sorted index
- if (!c->sparse) x = c->sorted_values[x];
- // x is now sorted index if sparse, or symbol otherwise
- len = c->codeword_lengths[x];
- if (f->valid_bits >= len) {
- f->acc >>= len;
- f->valid_bits -= len;
- return x;
- }
-
- f->valid_bits = 0;
- return -1;
- }
-
- // if small, linear search
- assert(!c->sparse);
- for (i=0; i < c->entries; ++i) {
- if (c->codeword_lengths[i] == NO_CODE) continue;
- if (c->codewords[i] == (f->acc & ((1 << c->codeword_lengths[i])-1))) {
- if (f->valid_bits >= c->codeword_lengths[i]) {
- f->acc >>= c->codeword_lengths[i];
- f->valid_bits -= c->codeword_lengths[i];
- return i;
- }
- f->valid_bits = 0;
- return -1;
- }
- }
-
- error(f, VORBIS_invalid_stream);
- f->valid_bits = 0;
- return -1;
-}
-
-#ifndef STB_VORBIS_NO_INLINE_DECODE
-
-#define DECODE_RAW(var, f,c) \
- if (f->valid_bits < STB_VORBIS_FAST_HUFFMAN_LENGTH) \
- prep_huffman(f); \
- var = f->acc & FAST_HUFFMAN_TABLE_MASK; \
- var = c->fast_huffman[var]; \
- if (var >= 0) { \
- int n = c->codeword_lengths[var]; \
- f->acc >>= n; \
- f->valid_bits -= n; \
- if (f->valid_bits < 0) { f->valid_bits = 0; var = -1; } \
- } else { \
- var = codebook_decode_scalar_raw(f,c); \
- }
-
-#else
-
-static int codebook_decode_scalar(vorb *f, Codebook *c)
-{
- int i;
- if (f->valid_bits < STB_VORBIS_FAST_HUFFMAN_LENGTH)
- prep_huffman(f);
- // fast huffman table lookup
- i = f->acc & FAST_HUFFMAN_TABLE_MASK;
- i = c->fast_huffman[i];
- if (i >= 0) {
- f->acc >>= c->codeword_lengths[i];
- f->valid_bits -= c->codeword_lengths[i];
- if (f->valid_bits < 0) { f->valid_bits = 0; return -1; }
- return i;
- }
- return codebook_decode_scalar_raw(f,c);
-}
-
-#define DECODE_RAW(var,f,c) var = codebook_decode_scalar(f,c);
-
-#endif
-
-#define DECODE(var,f,c) \
- DECODE_RAW(var,f,c) \
- if (c->sparse) var = c->sorted_values[var];
-
-#ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK
- #define DECODE_VQ(var,f,c) DECODE_RAW(var,f,c)
-#else
- #define DECODE_VQ(var,f,c) DECODE(var,f,c)
-#endif
-
-
-
-
-
-
-// CODEBOOK_ELEMENT_FAST is an optimization for the CODEBOOK_FLOATS case
-// where we avoid one addition
-#define CODEBOOK_ELEMENT(c,off) (c->multiplicands[off])
-#define CODEBOOK_ELEMENT_FAST(c,off) (c->multiplicands[off])
-#define CODEBOOK_ELEMENT_BASE(c) (0)
-
-static int codebook_decode_start(vorb *f, Codebook *c)
-{
- int z = -1;
-
- // type 0 is only legal in a scalar context
- if (c->lookup_type == 0)
- error(f, VORBIS_invalid_stream);
- else {
- DECODE_VQ(z,f,c);
- if (c->sparse) assert(z < c->sorted_entries);
- if (z < 0) { // check for EOP
- if (!f->bytes_in_seg)
- if (f->last_seg)
- return z;
- error(f, VORBIS_invalid_stream);
- }
- }
- return z;
-}
-
-static int codebook_decode(vorb *f, Codebook *c, float *output, int len)
-{
- int i,z = codebook_decode_start(f,c);
- if (z < 0) return FALSE;
- if (len > c->dimensions) len = c->dimensions;
-
-#ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK
- if (c->lookup_type == 1) {
- float last = CODEBOOK_ELEMENT_BASE(c);
- int div = 1;
- for (i=0; i < len; ++i) {
- int off = (z / div) % c->lookup_values;
- float val = CODEBOOK_ELEMENT_FAST(c,off) + last;
- output[i] += val;
- if (c->sequence_p) last = val + c->minimum_value;
- div *= c->lookup_values;
- }
- return TRUE;
- }
-#endif
-
- z *= c->dimensions;
- if (c->sequence_p) {
- float last = CODEBOOK_ELEMENT_BASE(c);
- for (i=0; i < len; ++i) {
- float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last;
- output[i] += val;
- last = val + c->minimum_value;
- }
- } else {
- float last = CODEBOOK_ELEMENT_BASE(c);
- for (i=0; i < len; ++i) {
- output[i] += CODEBOOK_ELEMENT_FAST(c,z+i) + last;
- }
- }
-
- return TRUE;
-}
-
-static int codebook_decode_step(vorb *f, Codebook *c, float *output, int len, int step)
-{
- int i,z = codebook_decode_start(f,c);
- float last = CODEBOOK_ELEMENT_BASE(c);
- if (z < 0) return FALSE;
- if (len > c->dimensions) len = c->dimensions;
-
-#ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK
- if (c->lookup_type == 1) {
- int div = 1;
- for (i=0; i < len; ++i) {
- int off = (z / div) % c->lookup_values;
- float val = CODEBOOK_ELEMENT_FAST(c,off) + last;
- output[i*step] += val;
- if (c->sequence_p) last = val;
- div *= c->lookup_values;
- }
- return TRUE;
- }
-#endif
-
- z *= c->dimensions;
- for (i=0; i < len; ++i) {
- float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last;
- output[i*step] += val;
- if (c->sequence_p) last = val;
- }
-
- return TRUE;
-}
-
-static int codebook_decode_deinterleave_repeat(vorb *f, Codebook *c, float **outputs, int ch, int *c_inter_p, int *p_inter_p, int len, int total_decode)
-{
- int c_inter = *c_inter_p;
- int p_inter = *p_inter_p;
- int i,z, effective = c->dimensions;
-
- // type 0 is only legal in a scalar context
- if (c->lookup_type == 0) return error(f, VORBIS_invalid_stream);
-
- while (total_decode > 0) {
- float last = CODEBOOK_ELEMENT_BASE(c);
- DECODE_VQ(z,f,c);
- #ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK
- assert(!c->sparse || z < c->sorted_entries);
- #endif
- if (z < 0) {
- if (!f->bytes_in_seg)
- if (f->last_seg) return FALSE;
- return error(f, VORBIS_invalid_stream);
- }
-
- // if this will take us off the end of the buffers, stop short!
- // we check by computing the length of the virtual interleaved
- // buffer (len*ch), our current offset within it (p_inter*ch)+(c_inter),
- // and the length we'll be using (effective)
- if (c_inter + p_inter*ch + effective > len * ch) {
- effective = len*ch - (p_inter*ch - c_inter);
- }
-
- #ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK
- if (c->lookup_type == 1) {
- int div = 1;
- for (i=0; i < effective; ++i) {
- int off = (z / div) % c->lookup_values;
- float val = CODEBOOK_ELEMENT_FAST(c,off) + last;
- if (outputs[c_inter])
- outputs[c_inter][p_inter] += val;
- if (++c_inter == ch) { c_inter = 0; ++p_inter; }
- if (c->sequence_p) last = val;
- div *= c->lookup_values;
- }
- } else
- #endif
- {
- z *= c->dimensions;
- if (c->sequence_p) {
- for (i=0; i < effective; ++i) {
- float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last;
- if (outputs[c_inter])
- outputs[c_inter][p_inter] += val;
- if (++c_inter == ch) { c_inter = 0; ++p_inter; }
- last = val;
- }
- } else {
- for (i=0; i < effective; ++i) {
- float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last;
- if (outputs[c_inter])
- outputs[c_inter][p_inter] += val;
- if (++c_inter == ch) { c_inter = 0; ++p_inter; }
- }
- }
- }
-
- total_decode -= effective;
- }
- *c_inter_p = c_inter;
- *p_inter_p = p_inter;
- return TRUE;
-}
-
-static int predict_point(int x, int x0, int x1, int y0, int y1)
-{
- int dy = y1 - y0;
- int adx = x1 - x0;
- // @OPTIMIZE: force int division to round in the right direction... is this necessary on x86?
- int err = abs(dy) * (x - x0);
- int off = err / adx;
- return dy < 0 ? y0 - off : y0 + off;
-}
-
-// the following table is block-copied from the specification
-static float inverse_db_table[256] =
-{
- 1.0649863e-07f, 1.1341951e-07f, 1.2079015e-07f, 1.2863978e-07f,
- 1.3699951e-07f, 1.4590251e-07f, 1.5538408e-07f, 1.6548181e-07f,
- 1.7623575e-07f, 1.8768855e-07f, 1.9988561e-07f, 2.1287530e-07f,
- 2.2670913e-07f, 2.4144197e-07f, 2.5713223e-07f, 2.7384213e-07f,
- 2.9163793e-07f, 3.1059021e-07f, 3.3077411e-07f, 3.5226968e-07f,
- 3.7516214e-07f, 3.9954229e-07f, 4.2550680e-07f, 4.5315863e-07f,
- 4.8260743e-07f, 5.1396998e-07f, 5.4737065e-07f, 5.8294187e-07f,
- 6.2082472e-07f, 6.6116941e-07f, 7.0413592e-07f, 7.4989464e-07f,
- 7.9862701e-07f, 8.5052630e-07f, 9.0579828e-07f, 9.6466216e-07f,
- 1.0273513e-06f, 1.0941144e-06f, 1.1652161e-06f, 1.2409384e-06f,
- 1.3215816e-06f, 1.4074654e-06f, 1.4989305e-06f, 1.5963394e-06f,
- 1.7000785e-06f, 1.8105592e-06f, 1.9282195e-06f, 2.0535261e-06f,
- 2.1869758e-06f, 2.3290978e-06f, 2.4804557e-06f, 2.6416497e-06f,
- 2.8133190e-06f, 2.9961443e-06f, 3.1908506e-06f, 3.3982101e-06f,
- 3.6190449e-06f, 3.8542308e-06f, 4.1047004e-06f, 4.3714470e-06f,
- 4.6555282e-06f, 4.9580707e-06f, 5.2802740e-06f, 5.6234160e-06f,
- 5.9888572e-06f, 6.3780469e-06f, 6.7925283e-06f, 7.2339451e-06f,
- 7.7040476e-06f, 8.2047000e-06f, 8.7378876e-06f, 9.3057248e-06f,
- 9.9104632e-06f, 1.0554501e-05f, 1.1240392e-05f, 1.1970856e-05f,
- 1.2748789e-05f, 1.3577278e-05f, 1.4459606e-05f, 1.5399272e-05f,
- 1.6400004e-05f, 1.7465768e-05f, 1.8600792e-05f, 1.9809576e-05f,
- 2.1096914e-05f, 2.2467911e-05f, 2.3928002e-05f, 2.5482978e-05f,
- 2.7139006e-05f, 2.8902651e-05f, 3.0780908e-05f, 3.2781225e-05f,
- 3.4911534e-05f, 3.7180282e-05f, 3.9596466e-05f, 4.2169667e-05f,
- 4.4910090e-05f, 4.7828601e-05f, 5.0936773e-05f, 5.4246931e-05f,
- 5.7772202e-05f, 6.1526565e-05f, 6.5524908e-05f, 6.9783085e-05f,
- 7.4317983e-05f, 7.9147585e-05f, 8.4291040e-05f, 8.9768747e-05f,
- 9.5602426e-05f, 0.00010181521f, 0.00010843174f, 0.00011547824f,
- 0.00012298267f, 0.00013097477f, 0.00013948625f, 0.00014855085f,
- 0.00015820453f, 0.00016848555f, 0.00017943469f, 0.00019109536f,
- 0.00020351382f, 0.00021673929f, 0.00023082423f, 0.00024582449f,
- 0.00026179955f, 0.00027881276f, 0.00029693158f, 0.00031622787f,
- 0.00033677814f, 0.00035866388f, 0.00038197188f, 0.00040679456f,
- 0.00043323036f, 0.00046138411f, 0.00049136745f, 0.00052329927f,
- 0.00055730621f, 0.00059352311f, 0.00063209358f, 0.00067317058f,
- 0.00071691700f, 0.00076350630f, 0.00081312324f, 0.00086596457f,
- 0.00092223983f, 0.00098217216f, 0.0010459992f, 0.0011139742f,
- 0.0011863665f, 0.0012634633f, 0.0013455702f, 0.0014330129f,
- 0.0015261382f, 0.0016253153f, 0.0017309374f, 0.0018434235f,
- 0.0019632195f, 0.0020908006f, 0.0022266726f, 0.0023713743f,
- 0.0025254795f, 0.0026895994f, 0.0028643847f, 0.0030505286f,
- 0.0032487691f, 0.0034598925f, 0.0036847358f, 0.0039241906f,
- 0.0041792066f, 0.0044507950f, 0.0047400328f, 0.0050480668f,
- 0.0053761186f, 0.0057254891f, 0.0060975636f, 0.0064938176f,
- 0.0069158225f, 0.0073652516f, 0.0078438871f, 0.0083536271f,
- 0.0088964928f, 0.009474637f, 0.010090352f, 0.010746080f,
- 0.011444421f, 0.012188144f, 0.012980198f, 0.013823725f,
- 0.014722068f, 0.015678791f, 0.016697687f, 0.017782797f,
- 0.018938423f, 0.020169149f, 0.021479854f, 0.022875735f,
- 0.024362330f, 0.025945531f, 0.027631618f, 0.029427276f,
- 0.031339626f, 0.033376252f, 0.035545228f, 0.037855157f,
- 0.040315199f, 0.042935108f, 0.045725273f, 0.048696758f,
- 0.051861348f, 0.055231591f, 0.058820850f, 0.062643361f,
- 0.066714279f, 0.071049749f, 0.075666962f, 0.080584227f,
- 0.085821044f, 0.091398179f, 0.097337747f, 0.10366330f,
- 0.11039993f, 0.11757434f, 0.12521498f, 0.13335215f,
- 0.14201813f, 0.15124727f, 0.16107617f, 0.17154380f,
- 0.18269168f, 0.19456402f, 0.20720788f, 0.22067342f,
- 0.23501402f, 0.25028656f, 0.26655159f, 0.28387361f,
- 0.30232132f, 0.32196786f, 0.34289114f, 0.36517414f,
- 0.38890521f, 0.41417847f, 0.44109412f, 0.46975890f,
- 0.50028648f, 0.53279791f, 0.56742212f, 0.60429640f,
- 0.64356699f, 0.68538959f, 0.72993007f, 0.77736504f,
- 0.82788260f, 0.88168307f, 0.9389798f, 1.0f
-};
-
-
-// @OPTIMIZE: if you want to replace this bresenham line-drawing routine,
-// note that you must produce bit-identical output to decode correctly;
-// this specific sequence of operations is specified in the spec (it's
-// drawing integer-quantized frequency-space lines that the encoder
-// expects to be exactly the same)
-// ... also, isn't the whole point of Bresenham's algorithm to NOT
-// have to divide in the setup? sigh.
-#ifndef STB_VORBIS_NO_DEFER_FLOOR
-#define LINE_OP(a,b) a *= b
-#else
-#define LINE_OP(a,b) a = b
-#endif
-
-#ifdef STB_VORBIS_DIVIDE_TABLE
-#define DIVTAB_NUMER 32
-#define DIVTAB_DENOM 64
-int8 integer_divide_table[DIVTAB_NUMER][DIVTAB_DENOM]; // 2KB
-#endif
-
-static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n)
-{
- int dy = y1 - y0;
- int adx = x1 - x0;
- int ady = abs(dy);
- int base;
- int x=x0,y=y0;
- int err = 0;
- int sy;
-
-#ifdef STB_VORBIS_DIVIDE_TABLE
- if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) {
- if (dy < 0) {
- base = -integer_divide_table[ady][adx];
- sy = base-1;
- } else {
- base = integer_divide_table[ady][adx];
- sy = base+1;
- }
- } else {
- base = dy / adx;
- if (dy < 0)
- sy = base - 1;
- else
- sy = base+1;
- }
-#else
- base = dy / adx;
- if (dy < 0)
- sy = base - 1;
- else
- sy = base+1;
-#endif
- ady -= abs(base) * adx;
- if (x1 > n) x1 = n;
- if (x < x1) {
- LINE_OP(output[x], inverse_db_table[y&255]);
- for (++x; x < x1; ++x) {
- err += ady;
- if (err >= adx) {
- err -= adx;
- y += sy;
- } else
- y += base;
- LINE_OP(output[x], inverse_db_table[y&255]);
- }
- }
-}
-
-static int residue_decode(vorb *f, Codebook *book, float *target, int offset, int n, int rtype)
-{
- int k;
- if (rtype == 0) {
- int step = n / book->dimensions;
- for (k=0; k < step; ++k)
- if (!codebook_decode_step(f, book, target+offset+k, n-offset-k, step))
- return FALSE;
- } else {
- for (k=0; k < n; ) {
- if (!codebook_decode(f, book, target+offset, n-k))
- return FALSE;
- k += book->dimensions;
- offset += book->dimensions;
- }
- }
- return TRUE;
-}
-
-// n is 1/2 of the blocksize --
-// specification: "Correct per-vector decode length is [n]/2"
-static void decode_residue(vorb *f, float *residue_buffers[], int ch, int n, int rn, uint8 *do_not_decode)
-{
- int i,j,pass;
- Residue *r = f->residue_config + rn;
- int rtype = f->residue_types[rn];
- int c = r->classbook;
- int classwords = f->codebooks[c].dimensions;
- unsigned int actual_size = rtype == 2 ? n*2 : n;
- unsigned int limit_r_begin = (r->begin < actual_size ? r->begin : actual_size);
- unsigned int limit_r_end = (r->end < actual_size ? r->end : actual_size);
- int n_read = limit_r_end - limit_r_begin;
- int part_read = n_read / r->part_size;
- int temp_alloc_point = temp_alloc_save(f);
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- uint8 ***part_classdata = (uint8 ***) temp_block_array(f,f->channels, part_read * sizeof(**part_classdata));
- #else
- int **classifications = (int **) temp_block_array(f,f->channels, part_read * sizeof(**classifications));
- #endif
-
- CHECK(f);
-
- for (i=0; i < ch; ++i)
- if (!do_not_decode[i])
- memset(residue_buffers[i], 0, sizeof(float) * n);
-
- if (rtype == 2 && ch != 1) {
- for (j=0; j < ch; ++j)
- if (!do_not_decode[j])
- break;
- if (j == ch)
- goto done;
-
- for (pass=0; pass < 8; ++pass) {
- int pcount = 0, class_set = 0;
- if (ch == 2) {
- while (pcount < part_read) {
- int z = r->begin + pcount*r->part_size;
- int c_inter = (z & 1), p_inter = z>>1;
- if (pass == 0) {
- Codebook *c = f->codebooks+r->classbook;
- int q;
- DECODE(q,f,c);
- if (q == EOP) goto done;
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- part_classdata[0][class_set] = r->classdata[q];
- #else
- for (i=classwords-1; i >= 0; --i) {
- classifications[0][i+pcount] = q % r->classifications;
- q /= r->classifications;
- }
- #endif
- }
- for (i=0; i < classwords && pcount < part_read; ++i, ++pcount) {
- int z = r->begin + pcount*r->part_size;
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- int c = part_classdata[0][class_set][i];
- #else
- int c = classifications[0][pcount];
- #endif
- int b = r->residue_books[c][pass];
- if (b >= 0) {
- Codebook *book = f->codebooks + b;
- #ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK
- if (!codebook_decode_deinterleave_repeat(f, book, residue_buffers, ch, &c_inter, &p_inter, n, r->part_size))
- goto done;
- #else
- // saves 1%
- if (!codebook_decode_deinterleave_repeat(f, book, residue_buffers, ch, &c_inter, &p_inter, n, r->part_size))
- goto done;
- #endif
- } else {
- z += r->part_size;
- c_inter = z & 1;
- p_inter = z >> 1;
- }
- }
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- ++class_set;
- #endif
- }
- } else if (ch > 2) {
- while (pcount < part_read) {
- int z = r->begin + pcount*r->part_size;
- int c_inter = z % ch, p_inter = z/ch;
- if (pass == 0) {
- Codebook *c = f->codebooks+r->classbook;
- int q;
- DECODE(q,f,c);
- if (q == EOP) goto done;
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- part_classdata[0][class_set] = r->classdata[q];
- #else
- for (i=classwords-1; i >= 0; --i) {
- classifications[0][i+pcount] = q % r->classifications;
- q /= r->classifications;
- }
- #endif
- }
- for (i=0; i < classwords && pcount < part_read; ++i, ++pcount) {
- int z = r->begin + pcount*r->part_size;
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- int c = part_classdata[0][class_set][i];
- #else
- int c = classifications[0][pcount];
- #endif
- int b = r->residue_books[c][pass];
- if (b >= 0) {
- Codebook *book = f->codebooks + b;
- if (!codebook_decode_deinterleave_repeat(f, book, residue_buffers, ch, &c_inter, &p_inter, n, r->part_size))
- goto done;
- } else {
- z += r->part_size;
- c_inter = z % ch;
- p_inter = z / ch;
- }
- }
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- ++class_set;
- #endif
- }
- }
- }
- goto done;
- }
- CHECK(f);
-
- for (pass=0; pass < 8; ++pass) {
- int pcount = 0, class_set=0;
- while (pcount < part_read) {
- if (pass == 0) {
- for (j=0; j < ch; ++j) {
- if (!do_not_decode[j]) {
- Codebook *c = f->codebooks+r->classbook;
- int temp;
- DECODE(temp,f,c);
- if (temp == EOP) goto done;
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- part_classdata[j][class_set] = r->classdata[temp];
- #else
- for (i=classwords-1; i >= 0; --i) {
- classifications[j][i+pcount] = temp % r->classifications;
- temp /= r->classifications;
- }
- #endif
- }
- }
- }
- for (i=0; i < classwords && pcount < part_read; ++i, ++pcount) {
- for (j=0; j < ch; ++j) {
- if (!do_not_decode[j]) {
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- int c = part_classdata[j][class_set][i];
- #else
- int c = classifications[j][pcount];
- #endif
- int b = r->residue_books[c][pass];
- if (b >= 0) {
- float *target = residue_buffers[j];
- int offset = r->begin + pcount * r->part_size;
- int n = r->part_size;
- Codebook *book = f->codebooks + b;
- if (!residue_decode(f, book, target, offset, n, rtype))
- goto done;
- }
- }
- }
- }
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- ++class_set;
- #endif
- }
- }
- done:
- CHECK(f);
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- temp_free(f,part_classdata);
- #else
- temp_free(f,classifications);
- #endif
- temp_alloc_restore(f,temp_alloc_point);
-}
-
-
-#if 0
-// slow way for debugging
-void inverse_mdct_slow(float *buffer, int n)
-{
- int i,j;
- int n2 = n >> 1;
- float *x = (float *) malloc(sizeof(*x) * n2);
- memcpy(x, buffer, sizeof(*x) * n2);
- for (i=0; i < n; ++i) {
- float acc = 0;
- for (j=0; j < n2; ++j)
- // formula from paper:
- //acc += n/4.0f * x[j] * (float) cos(M_PI / 2 / n * (2 * i + 1 + n/2.0)*(2*j+1));
- // formula from wikipedia
- //acc += 2.0f / n2 * x[j] * (float) cos(M_PI/n2 * (i + 0.5 + n2/2)*(j + 0.5));
- // these are equivalent, except the formula from the paper inverts the multiplier!
- // however, what actually works is NO MULTIPLIER!?!
- //acc += 64 * 2.0f / n2 * x[j] * (float) cos(M_PI/n2 * (i + 0.5 + n2/2)*(j + 0.5));
- acc += x[j] * (float) cos(M_PI / 2 / n * (2 * i + 1 + n/2.0)*(2*j+1));
- buffer[i] = acc;
- }
- free(x);
-}
-#elif 0
-// same as above, but just barely able to run in real time on modern machines
-void inverse_mdct_slow(float *buffer, int n, vorb *f, int blocktype)
-{
- float mcos[16384];
- int i,j;
- int n2 = n >> 1, nmask = (n << 2) -1;
- float *x = (float *) malloc(sizeof(*x) * n2);
- memcpy(x, buffer, sizeof(*x) * n2);
- for (i=0; i < 4*n; ++i)
- mcos[i] = (float) cos(M_PI / 2 * i / n);
-
- for (i=0; i < n; ++i) {
- float acc = 0;
- for (j=0; j < n2; ++j)
- acc += x[j] * mcos[(2 * i + 1 + n2)*(2*j+1) & nmask];
- buffer[i] = acc;
- }
- free(x);
-}
-#elif 0
-// transform to use a slow dct-iv; this is STILL basically trivial,
-// but only requires half as many ops
-void dct_iv_slow(float *buffer, int n)
-{
- float mcos[16384];
- float x[2048];
- int i,j;
- int n2 = n >> 1, nmask = (n << 3) - 1;
- memcpy(x, buffer, sizeof(*x) * n);
- for (i=0; i < 8*n; ++i)
- mcos[i] = (float) cos(M_PI / 4 * i / n);
- for (i=0; i < n; ++i) {
- float acc = 0;
- for (j=0; j < n; ++j)
- acc += x[j] * mcos[((2 * i + 1)*(2*j+1)) & nmask];
- buffer[i] = acc;
- }
-}
-
-void inverse_mdct_slow(float *buffer, int n, vorb *f, int blocktype)
-{
- int i, n4 = n >> 2, n2 = n >> 1, n3_4 = n - n4;
- float temp[4096];
-
- memcpy(temp, buffer, n2 * sizeof(float));
- dct_iv_slow(temp, n2); // returns -c'-d, a-b'
-
- for (i=0; i < n4 ; ++i) buffer[i] = temp[i+n4]; // a-b'
- for ( ; i < n3_4; ++i) buffer[i] = -temp[n3_4 - i - 1]; // b-a', c+d'
- for ( ; i < n ; ++i) buffer[i] = -temp[i - n3_4]; // c'+d
-}
-#endif
-
-#ifndef LIBVORBIS_MDCT
-#define LIBVORBIS_MDCT 0
-#endif
-
-#if LIBVORBIS_MDCT
-// directly call the vorbis MDCT using an interface documented
-// by Jeff Roberts... useful for performance comparison
-typedef struct
-{
- int n;
- int log2n;
-
- float *trig;
- int *bitrev;
-
- float scale;
-} mdct_lookup;
-
-extern void mdct_init(mdct_lookup *lookup, int n);
-extern void mdct_clear(mdct_lookup *l);
-extern void mdct_backward(mdct_lookup *init, float *in, float *out);
-
-mdct_lookup M1,M2;
-
-void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
-{
- mdct_lookup *M;
- if (M1.n == n) M = &M1;
- else if (M2.n == n) M = &M2;
- else if (M1.n == 0) { mdct_init(&M1, n); M = &M1; }
- else {
- if (M2.n) __asm int 3;
- mdct_init(&M2, n);
- M = &M2;
- }
-
- mdct_backward(M, buffer, buffer);
-}
-#endif
-
-
-// the following were split out into separate functions while optimizing;
-// they could be pushed back up but eh. __forceinline showed no change;
-// they're probably already being inlined.
-static void imdct_step3_iter0_loop(int n, float *e, int i_off, int k_off, float *A)
-{
- float *ee0 = e + i_off;
- float *ee2 = ee0 + k_off;
- int i;
-
- assert((n & 3) == 0);
- for (i=(n>>2); i > 0; --i) {
- float k00_20, k01_21;
- k00_20 = ee0[ 0] - ee2[ 0];
- k01_21 = ee0[-1] - ee2[-1];
- ee0[ 0] += ee2[ 0];//ee0[ 0] = ee0[ 0] + ee2[ 0];
- ee0[-1] += ee2[-1];//ee0[-1] = ee0[-1] + ee2[-1];
- ee2[ 0] = k00_20 * A[0] - k01_21 * A[1];
- ee2[-1] = k01_21 * A[0] + k00_20 * A[1];
- A += 8;
-
- k00_20 = ee0[-2] - ee2[-2];
- k01_21 = ee0[-3] - ee2[-3];
- ee0[-2] += ee2[-2];//ee0[-2] = ee0[-2] + ee2[-2];
- ee0[-3] += ee2[-3];//ee0[-3] = ee0[-3] + ee2[-3];
- ee2[-2] = k00_20 * A[0] - k01_21 * A[1];
- ee2[-3] = k01_21 * A[0] + k00_20 * A[1];
- A += 8;
-
- k00_20 = ee0[-4] - ee2[-4];
- k01_21 = ee0[-5] - ee2[-5];
- ee0[-4] += ee2[-4];//ee0[-4] = ee0[-4] + ee2[-4];
- ee0[-5] += ee2[-5];//ee0[-5] = ee0[-5] + ee2[-5];
- ee2[-4] = k00_20 * A[0] - k01_21 * A[1];
- ee2[-5] = k01_21 * A[0] + k00_20 * A[1];
- A += 8;
-
- k00_20 = ee0[-6] - ee2[-6];
- k01_21 = ee0[-7] - ee2[-7];
- ee0[-6] += ee2[-6];//ee0[-6] = ee0[-6] + ee2[-6];
- ee0[-7] += ee2[-7];//ee0[-7] = ee0[-7] + ee2[-7];
- ee2[-6] = k00_20 * A[0] - k01_21 * A[1];
- ee2[-7] = k01_21 * A[0] + k00_20 * A[1];
- A += 8;
- ee0 -= 8;
- ee2 -= 8;
- }
-}
-
-static void imdct_step3_inner_r_loop(int lim, float *e, int d0, int k_off, float *A, int k1)
-{
- int i;
- float k00_20, k01_21;
-
- float *e0 = e + d0;
- float *e2 = e0 + k_off;
-
- for (i=lim >> 2; i > 0; --i) {
- k00_20 = e0[-0] - e2[-0];
- k01_21 = e0[-1] - e2[-1];
- e0[-0] += e2[-0];//e0[-0] = e0[-0] + e2[-0];
- e0[-1] += e2[-1];//e0[-1] = e0[-1] + e2[-1];
- e2[-0] = (k00_20)*A[0] - (k01_21) * A[1];
- e2[-1] = (k01_21)*A[0] + (k00_20) * A[1];
-
- A += k1;
-
- k00_20 = e0[-2] - e2[-2];
- k01_21 = e0[-3] - e2[-3];
- e0[-2] += e2[-2];//e0[-2] = e0[-2] + e2[-2];
- e0[-3] += e2[-3];//e0[-3] = e0[-3] + e2[-3];
- e2[-2] = (k00_20)*A[0] - (k01_21) * A[1];
- e2[-3] = (k01_21)*A[0] + (k00_20) * A[1];
-
- A += k1;
-
- k00_20 = e0[-4] - e2[-4];
- k01_21 = e0[-5] - e2[-5];
- e0[-4] += e2[-4];//e0[-4] = e0[-4] + e2[-4];
- e0[-5] += e2[-5];//e0[-5] = e0[-5] + e2[-5];
- e2[-4] = (k00_20)*A[0] - (k01_21) * A[1];
- e2[-5] = (k01_21)*A[0] + (k00_20) * A[1];
-
- A += k1;
-
- k00_20 = e0[-6] - e2[-6];
- k01_21 = e0[-7] - e2[-7];
- e0[-6] += e2[-6];//e0[-6] = e0[-6] + e2[-6];
- e0[-7] += e2[-7];//e0[-7] = e0[-7] + e2[-7];
- e2[-6] = (k00_20)*A[0] - (k01_21) * A[1];
- e2[-7] = (k01_21)*A[0] + (k00_20) * A[1];
-
- e0 -= 8;
- e2 -= 8;
-
- A += k1;
- }
-}
-
-static void imdct_step3_inner_s_loop(int n, float *e, int i_off, int k_off, float *A, int a_off, int k0)
-{
- int i;
- float A0 = A[0];
- float A1 = A[0+1];
- float A2 = A[0+a_off];
- float A3 = A[0+a_off+1];
- float A4 = A[0+a_off*2+0];
- float A5 = A[0+a_off*2+1];
- float A6 = A[0+a_off*3+0];
- float A7 = A[0+a_off*3+1];
-
- float k00,k11;
-
- float *ee0 = e +i_off;
- float *ee2 = ee0+k_off;
-
- for (i=n; i > 0; --i) {
- k00 = ee0[ 0] - ee2[ 0];
- k11 = ee0[-1] - ee2[-1];
- ee0[ 0] = ee0[ 0] + ee2[ 0];
- ee0[-1] = ee0[-1] + ee2[-1];
- ee2[ 0] = (k00) * A0 - (k11) * A1;
- ee2[-1] = (k11) * A0 + (k00) * A1;
-
- k00 = ee0[-2] - ee2[-2];
- k11 = ee0[-3] - ee2[-3];
- ee0[-2] = ee0[-2] + ee2[-2];
- ee0[-3] = ee0[-3] + ee2[-3];
- ee2[-2] = (k00) * A2 - (k11) * A3;
- ee2[-3] = (k11) * A2 + (k00) * A3;
-
- k00 = ee0[-4] - ee2[-4];
- k11 = ee0[-5] - ee2[-5];
- ee0[-4] = ee0[-4] + ee2[-4];
- ee0[-5] = ee0[-5] + ee2[-5];
- ee2[-4] = (k00) * A4 - (k11) * A5;
- ee2[-5] = (k11) * A4 + (k00) * A5;
-
- k00 = ee0[-6] - ee2[-6];
- k11 = ee0[-7] - ee2[-7];
- ee0[-6] = ee0[-6] + ee2[-6];
- ee0[-7] = ee0[-7] + ee2[-7];
- ee2[-6] = (k00) * A6 - (k11) * A7;
- ee2[-7] = (k11) * A6 + (k00) * A7;
-
- ee0 -= k0;
- ee2 -= k0;
- }
-}
-
-static __forceinline void iter_54(float *z)
-{
- float k00,k11,k22,k33;
- float y0,y1,y2,y3;
-
- k00 = z[ 0] - z[-4];
- y0 = z[ 0] + z[-4];
- y2 = z[-2] + z[-6];
- k22 = z[-2] - z[-6];
-
- z[-0] = y0 + y2; // z0 + z4 + z2 + z6
- z[-2] = y0 - y2; // z0 + z4 - z2 - z6
-
- // done with y0,y2
-
- k33 = z[-3] - z[-7];
-
- z[-4] = k00 + k33; // z0 - z4 + z3 - z7
- z[-6] = k00 - k33; // z0 - z4 - z3 + z7
-
- // done with k33
-
- k11 = z[-1] - z[-5];
- y1 = z[-1] + z[-5];
- y3 = z[-3] + z[-7];
-
- z[-1] = y1 + y3; // z1 + z5 + z3 + z7
- z[-3] = y1 - y3; // z1 + z5 - z3 - z7
- z[-5] = k11 - k22; // z1 - z5 + z2 - z6
- z[-7] = k11 + k22; // z1 - z5 - z2 + z6
-}
-
-static void imdct_step3_inner_s_loop_ld654(int n, float *e, int i_off, float *A, int base_n)
-{
- int a_off = base_n >> 3;
- float A2 = A[0+a_off];
- float *z = e + i_off;
- float *base = z - 16 * n;
-
- while (z > base) {
- float k00,k11;
-
- k00 = z[-0] - z[-8];
- k11 = z[-1] - z[-9];
- z[-0] = z[-0] + z[-8];
- z[-1] = z[-1] + z[-9];
- z[-8] = k00;
- z[-9] = k11 ;
-
- k00 = z[ -2] - z[-10];
- k11 = z[ -3] - z[-11];
- z[ -2] = z[ -2] + z[-10];
- z[ -3] = z[ -3] + z[-11];
- z[-10] = (k00+k11) * A2;
- z[-11] = (k11-k00) * A2;
-
- k00 = z[-12] - z[ -4]; // reverse to avoid a unary negation
- k11 = z[ -5] - z[-13];
- z[ -4] = z[ -4] + z[-12];
- z[ -5] = z[ -5] + z[-13];
- z[-12] = k11;
- z[-13] = k00;
-
- k00 = z[-14] - z[ -6]; // reverse to avoid a unary negation
- k11 = z[ -7] - z[-15];
- z[ -6] = z[ -6] + z[-14];
- z[ -7] = z[ -7] + z[-15];
- z[-14] = (k00+k11) * A2;
- z[-15] = (k00-k11) * A2;
-
- iter_54(z);
- iter_54(z-8);
- z -= 16;
- }
-}
-
-static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
-{
- int n2 = n >> 1, n4 = n >> 2, n8 = n >> 3, l;
- int ld;
- // @OPTIMIZE: reduce register pressure by using fewer variables?
- int save_point = temp_alloc_save(f);
- float *buf2 = (float *) temp_alloc(f, n2 * sizeof(*buf2));
- float *u=NULL,*v=NULL;
- // twiddle factors
- float *A = f->A[blocktype];
-
- // IMDCT algorithm from "The use of multirate filter banks for coding of high quality digital audio"
- // See notes about bugs in that paper in less-optimal implementation 'inverse_mdct_old' after this function.
-
- // kernel from paper
-
-
- // merged:
- // copy and reflect spectral data
- // step 0
-
- // note that it turns out that the items added together during
- // this step are, in fact, being added to themselves (as reflected
- // by step 0). inexplicable inefficiency! this became obvious
- // once I combined the passes.
-
- // so there's a missing 'times 2' here (for adding X to itself).
- // this propagates through linearly to the end, where the numbers
- // are 1/2 too small, and need to be compensated for.
-
- {
- float *d,*e, *AA, *e_stop;
- d = &buf2[n2-2];
- AA = A;
- e = &buffer[0];
- e_stop = &buffer[n2];
- while (e != e_stop) {
- d[1] = (e[0] * AA[0] - e[2]*AA[1]);
- d[0] = (e[0] * AA[1] + e[2]*AA[0]);
- d -= 2;
- AA += 2;
- e += 4;
- }
-
- e = &buffer[n2-3];
- while (d >= buf2) {
- d[1] = (-e[2] * AA[0] - -e[0]*AA[1]);
- d[0] = (-e[2] * AA[1] + -e[0]*AA[0]);
- d -= 2;
- AA += 2;
- e -= 4;
- }
- }
-
- // now we use symbolic names for these, so that we can
- // possibly swap their meaning as we change which operations
- // are in place
-
- u = buffer;
- v = buf2;
-
- // step 2 (paper output is w, now u)
- // this could be in place, but the data ends up in the wrong
- // place... _somebody_'s got to swap it, so this is nominated
- {
- float *AA = &A[n2-8];
- float *d0,*d1, *e0, *e1;
-
- e0 = &v[n4];
- e1 = &v[0];
-
- d0 = &u[n4];
- d1 = &u[0];
-
- while (AA >= A) {
- float v40_20, v41_21;
-
- v41_21 = e0[1] - e1[1];
- v40_20 = e0[0] - e1[0];
- d0[1] = e0[1] + e1[1];
- d0[0] = e0[0] + e1[0];
- d1[1] = v41_21*AA[4] - v40_20*AA[5];
- d1[0] = v40_20*AA[4] + v41_21*AA[5];
-
- v41_21 = e0[3] - e1[3];
- v40_20 = e0[2] - e1[2];
- d0[3] = e0[3] + e1[3];
- d0[2] = e0[2] + e1[2];
- d1[3] = v41_21*AA[0] - v40_20*AA[1];
- d1[2] = v40_20*AA[0] + v41_21*AA[1];
-
- AA -= 8;
-
- d0 += 4;
- d1 += 4;
- e0 += 4;
- e1 += 4;
- }
- }
-
- // step 3
- ld = ilog(n) - 1; // ilog is off-by-one from normal definitions
-
- // optimized step 3:
-
- // the original step3 loop can be nested r inside s or s inside r;
- // it's written originally as s inside r, but this is dumb when r
- // iterates many times, and s few. So I have two copies of it and
- // switch between them halfway.
-
- // this is iteration 0 of step 3
- imdct_step3_iter0_loop(n >> 4, u, n2-1-n4*0, -(n >> 3), A);
- imdct_step3_iter0_loop(n >> 4, u, n2-1-n4*1, -(n >> 3), A);
-
- // this is iteration 1 of step 3
- imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*0, -(n >> 4), A, 16);
- imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*1, -(n >> 4), A, 16);
- imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*2, -(n >> 4), A, 16);
- imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*3, -(n >> 4), A, 16);
-
- l=2;
- for (; l < (ld-3)>>1; ++l) {
- int k0 = n >> (l+2), k0_2 = k0>>1;
- int lim = 1 << (l+1);
- int i;
- for (i=0; i < lim; ++i)
- imdct_step3_inner_r_loop(n >> (l+4), u, n2-1 - k0*i, -k0_2, A, 1 << (l+3));
- }
-
- for (; l < ld-6; ++l) {
- int k0 = n >> (l+2), k1 = 1 << (l+3), k0_2 = k0>>1;
- int rlim = n >> (l+6), r;
- int lim = 1 << (l+1);
- int i_off;
- float *A0 = A;
- i_off = n2-1;
- for (r=rlim; r > 0; --r) {
- imdct_step3_inner_s_loop(lim, u, i_off, -k0_2, A0, k1, k0);
- A0 += k1*4;
- i_off -= 8;
- }
- }
-
- // iterations with count:
- // ld-6,-5,-4 all interleaved together
- // the big win comes from getting rid of needless flops
- // due to the constants on pass 5 & 4 being all 1 and 0;
- // combining them to be simultaneous to improve cache made little difference
- imdct_step3_inner_s_loop_ld654(n >> 5, u, n2-1, A, n);
-
- // output is u
-
- // step 4, 5, and 6
- // cannot be in-place because of step 5
- {
- uint16 *bitrev = f->bit_reverse[blocktype];
- // weirdly, I'd have thought reading sequentially and writing
- // erratically would have been better than vice-versa, but in
- // fact that's not what my testing showed. (That is, with
- // j = bitreverse(i), do you read i and write j, or read j and write i.)
-
- float *d0 = &v[n4-4];
- float *d1 = &v[n2-4];
- while (d0 >= v) {
- int k4;
-
- k4 = bitrev[0];
- d1[3] = u[k4+0];
- d1[2] = u[k4+1];
- d0[3] = u[k4+2];
- d0[2] = u[k4+3];
-
- k4 = bitrev[1];
- d1[1] = u[k4+0];
- d1[0] = u[k4+1];
- d0[1] = u[k4+2];
- d0[0] = u[k4+3];
-
- d0 -= 4;
- d1 -= 4;
- bitrev += 2;
- }
- }
- // (paper output is u, now v)
-
-
- // data must be in buf2
- assert(v == buf2);
-
- // step 7 (paper output is v, now v)
- // this is now in place
- {
- float *C = f->C[blocktype];
- float *d, *e;
-
- d = v;
- e = v + n2 - 4;
-
- while (d < e) {
- float a02,a11,b0,b1,b2,b3;
-
- a02 = d[0] - e[2];
- a11 = d[1] + e[3];
-
- b0 = C[1]*a02 + C[0]*a11;
- b1 = C[1]*a11 - C[0]*a02;
-
- b2 = d[0] + e[ 2];
- b3 = d[1] - e[ 3];
-
- d[0] = b2 + b0;
- d[1] = b3 + b1;
- e[2] = b2 - b0;
- e[3] = b1 - b3;
-
- a02 = d[2] - e[0];
- a11 = d[3] + e[1];
-
- b0 = C[3]*a02 + C[2]*a11;
- b1 = C[3]*a11 - C[2]*a02;
-
- b2 = d[2] + e[ 0];
- b3 = d[3] - e[ 1];
-
- d[2] = b2 + b0;
- d[3] = b3 + b1;
- e[0] = b2 - b0;
- e[1] = b1 - b3;
-
- C += 4;
- d += 4;
- e -= 4;
- }
- }
-
- // data must be in buf2
-
-
- // step 8+decode (paper output is X, now buffer)
- // this generates pairs of data a la 8 and pushes them directly through
- // the decode kernel (pushing rather than pulling) to avoid having
- // to make another pass later
-
- // this cannot POSSIBLY be in place, so we refer to the buffers directly
-
- {
- float *d0,*d1,*d2,*d3;
-
- float *B = f->B[blocktype] + n2 - 8;
- float *e = buf2 + n2 - 8;
- d0 = &buffer[0];
- d1 = &buffer[n2-4];
- d2 = &buffer[n2];
- d3 = &buffer[n-4];
- while (e >= v) {
- float p0,p1,p2,p3;
-
- p3 = e[6]*B[7] - e[7]*B[6];
- p2 = -e[6]*B[6] - e[7]*B[7];
-
- d0[0] = p3;
- d1[3] = - p3;
- d2[0] = p2;
- d3[3] = p2;
-
- p1 = e[4]*B[5] - e[5]*B[4];
- p0 = -e[4]*B[4] - e[5]*B[5];
-
- d0[1] = p1;
- d1[2] = - p1;
- d2[1] = p0;
- d3[2] = p0;
-
- p3 = e[2]*B[3] - e[3]*B[2];
- p2 = -e[2]*B[2] - e[3]*B[3];
-
- d0[2] = p3;
- d1[1] = - p3;
- d2[2] = p2;
- d3[1] = p2;
-
- p1 = e[0]*B[1] - e[1]*B[0];
- p0 = -e[0]*B[0] - e[1]*B[1];
-
- d0[3] = p1;
- d1[0] = - p1;
- d2[3] = p0;
- d3[0] = p0;
-
- B -= 8;
- e -= 8;
- d0 += 4;
- d2 += 4;
- d1 -= 4;
- d3 -= 4;
- }
- }
-
- temp_free(f,buf2);
- temp_alloc_restore(f,save_point);
-}
-
-#if 0
-// this is the original version of the above code, if you want to optimize it from scratch
-void inverse_mdct_naive(float *buffer, int n)
-{
- float s;
- float A[1 << 12], B[1 << 12], C[1 << 11];
- int i,k,k2,k4, n2 = n >> 1, n4 = n >> 2, n8 = n >> 3, l;
- int n3_4 = n - n4, ld;
- // how can they claim this only uses N words?!
- // oh, because they're only used sparsely, whoops
- float u[1 << 13], X[1 << 13], v[1 << 13], w[1 << 13];
- // set up twiddle factors
-
- for (k=k2=0; k < n4; ++k,k2+=2) {
- A[k2 ] = (float) cos(4*k*M_PI/n);
- A[k2+1] = (float) -sin(4*k*M_PI/n);
- B[k2 ] = (float) cos((k2+1)*M_PI/n/2);
- B[k2+1] = (float) sin((k2+1)*M_PI/n/2);
- }
- for (k=k2=0; k < n8; ++k,k2+=2) {
- C[k2 ] = (float) cos(2*(k2+1)*M_PI/n);
- C[k2+1] = (float) -sin(2*(k2+1)*M_PI/n);
- }
-
- // IMDCT algorithm from "The use of multirate filter banks for coding of high quality digital audio"
- // Note there are bugs in that pseudocode, presumably due to them attempting
- // to rename the arrays nicely rather than representing the way their actual
- // implementation bounces buffers back and forth. As a result, even in the
- // "some formulars corrected" version, a direct implementation fails. These
- // are noted below as "paper bug".
-
- // copy and reflect spectral data
- for (k=0; k < n2; ++k) u[k] = buffer[k];
- for ( ; k < n ; ++k) u[k] = -buffer[n - k - 1];
- // kernel from paper
- // step 1
- for (k=k2=k4=0; k < n4; k+=1, k2+=2, k4+=4) {
- v[n-k4-1] = (u[k4] - u[n-k4-1]) * A[k2] - (u[k4+2] - u[n-k4-3])*A[k2+1];
- v[n-k4-3] = (u[k4] - u[n-k4-1]) * A[k2+1] + (u[k4+2] - u[n-k4-3])*A[k2];
- }
- // step 2
- for (k=k4=0; k < n8; k+=1, k4+=4) {
- w[n2+3+k4] = v[n2+3+k4] + v[k4+3];
- w[n2+1+k4] = v[n2+1+k4] + v[k4+1];
- w[k4+3] = (v[n2+3+k4] - v[k4+3])*A[n2-4-k4] - (v[n2+1+k4]-v[k4+1])*A[n2-3-k4];
- w[k4+1] = (v[n2+1+k4] - v[k4+1])*A[n2-4-k4] + (v[n2+3+k4]-v[k4+3])*A[n2-3-k4];
- }
- // step 3
- ld = ilog(n) - 1; // ilog is off-by-one from normal definitions
- for (l=0; l < ld-3; ++l) {
- int k0 = n >> (l+2), k1 = 1 << (l+3);
- int rlim = n >> (l+4), r4, r;
- int s2lim = 1 << (l+2), s2;
- for (r=r4=0; r < rlim; r4+=4,++r) {
- for (s2=0; s2 < s2lim; s2+=2) {
- u[n-1-k0*s2-r4] = w[n-1-k0*s2-r4] + w[n-1-k0*(s2+1)-r4];
- u[n-3-k0*s2-r4] = w[n-3-k0*s2-r4] + w[n-3-k0*(s2+1)-r4];
- u[n-1-k0*(s2+1)-r4] = (w[n-1-k0*s2-r4] - w[n-1-k0*(s2+1)-r4]) * A[r*k1]
- - (w[n-3-k0*s2-r4] - w[n-3-k0*(s2+1)-r4]) * A[r*k1+1];
- u[n-3-k0*(s2+1)-r4] = (w[n-3-k0*s2-r4] - w[n-3-k0*(s2+1)-r4]) * A[r*k1]
- + (w[n-1-k0*s2-r4] - w[n-1-k0*(s2+1)-r4]) * A[r*k1+1];
- }
- }
- if (l+1 < ld-3) {
- // paper bug: ping-ponging of u&w here is omitted
- memcpy(w, u, sizeof(u));
- }
- }
-
- // step 4
- for (i=0; i < n8; ++i) {
- int j = bit_reverse(i) >> (32-ld+3);
- assert(j < n8);
- if (i == j) {
- // paper bug: original code probably swapped in place; if copying,
- // need to directly copy in this case
- int i8 = i << 3;
- v[i8+1] = u[i8+1];
- v[i8+3] = u[i8+3];
- v[i8+5] = u[i8+5];
- v[i8+7] = u[i8+7];
- } else if (i < j) {
- int i8 = i << 3, j8 = j << 3;
- v[j8+1] = u[i8+1], v[i8+1] = u[j8 + 1];
- v[j8+3] = u[i8+3], v[i8+3] = u[j8 + 3];
- v[j8+5] = u[i8+5], v[i8+5] = u[j8 + 5];
- v[j8+7] = u[i8+7], v[i8+7] = u[j8 + 7];
- }
- }
- // step 5
- for (k=0; k < n2; ++k) {
- w[k] = v[k*2+1];
- }
- // step 6
- for (k=k2=k4=0; k < n8; ++k, k2 += 2, k4 += 4) {
- u[n-1-k2] = w[k4];
- u[n-2-k2] = w[k4+1];
- u[n3_4 - 1 - k2] = w[k4+2];
- u[n3_4 - 2 - k2] = w[k4+3];
- }
- // step 7
- for (k=k2=0; k < n8; ++k, k2 += 2) {
- v[n2 + k2 ] = ( u[n2 + k2] + u[n-2-k2] + C[k2+1]*(u[n2+k2]-u[n-2-k2]) + C[k2]*(u[n2+k2+1]+u[n-2-k2+1]))/2;
- v[n-2 - k2] = ( u[n2 + k2] + u[n-2-k2] - C[k2+1]*(u[n2+k2]-u[n-2-k2]) - C[k2]*(u[n2+k2+1]+u[n-2-k2+1]))/2;
- v[n2+1+ k2] = ( u[n2+1+k2] - u[n-1-k2] + C[k2+1]*(u[n2+1+k2]+u[n-1-k2]) - C[k2]*(u[n2+k2]-u[n-2-k2]))/2;
- v[n-1 - k2] = (-u[n2+1+k2] + u[n-1-k2] + C[k2+1]*(u[n2+1+k2]+u[n-1-k2]) - C[k2]*(u[n2+k2]-u[n-2-k2]))/2;
- }
- // step 8
- for (k=k2=0; k < n4; ++k,k2 += 2) {
- X[k] = v[k2+n2]*B[k2 ] + v[k2+1+n2]*B[k2+1];
- X[n2-1-k] = v[k2+n2]*B[k2+1] - v[k2+1+n2]*B[k2 ];
- }
-
- // decode kernel to output
- // determined the following value experimentally
- // (by first figuring out what made inverse_mdct_slow work); then matching that here
- // (probably vorbis encoder premultiplies by n or n/2, to save it on the decoder?)
- s = 0.5; // theoretically would be n4
-
- // [[[ note! the s value of 0.5 is compensated for by the B[] in the current code,
- // so it needs to use the "old" B values to behave correctly, or else
- // set s to 1.0 ]]]
- for (i=0; i < n4 ; ++i) buffer[i] = s * X[i+n4];
- for ( ; i < n3_4; ++i) buffer[i] = -s * X[n3_4 - i - 1];
- for ( ; i < n ; ++i) buffer[i] = -s * X[i - n3_4];
-}
-#endif
-
-static float *get_window(vorb *f, int len)
-{
- len <<= 1;
- if (len == f->blocksize_0) return f->window[0];
- if (len == f->blocksize_1) return f->window[1];
- return NULL;
-}
-
-#ifndef STB_VORBIS_NO_DEFER_FLOOR
-typedef int16 YTYPE;
-#else
-typedef int YTYPE;
-#endif
-static int do_floor(vorb *f, Mapping *map, int i, int n, float *target, YTYPE *finalY, uint8 *step2_flag)
-{
- int n2 = n >> 1;
- int s = map->chan[i].mux, floor;
- floor = map->submap_floor[s];
- if (f->floor_types[floor] == 0) {
- return error(f, VORBIS_invalid_stream);
- } else {
- Floor1 *g = &f->floor_config[floor].floor1;
- int j,q;
- int lx = 0, ly = finalY[0] * g->floor1_multiplier;
- for (q=1; q < g->values; ++q) {
- j = g->sorted_order[q];
- #ifndef STB_VORBIS_NO_DEFER_FLOOR
- if (finalY[j] >= 0)
- #else
- if (step2_flag[j])
- #endif
- {
- int hy = finalY[j] * g->floor1_multiplier;
- int hx = g->Xlist[j];
- if (lx != hx)
- draw_line(target, lx,ly, hx,hy, n2);
- CHECK(f);
- lx = hx, ly = hy;
- }
- }
- if (lx < n2) {
- // optimization of: draw_line(target, lx,ly, n,ly, n2);
- for (j=lx; j < n2; ++j)
- LINE_OP(target[j], inverse_db_table[ly]);
- CHECK(f);
- }
- }
- return TRUE;
-}
-
-// The meaning of "left" and "right"
-//
-// For a given frame:
-// we compute samples from 0..n
-// window_center is n/2
-// we'll window and mix the samples from left_start to left_end with data from the previous frame
-// all of the samples from left_end to right_start can be output without mixing; however,
-// this interval is 0-length except when transitioning between short and long frames
-// all of the samples from right_start to right_end need to be mixed with the next frame,
-// which we don't have, so those get saved in a buffer
-// frame N's right_end-right_start, the number of samples to mix with the next frame,
-// has to be the same as frame N+1's left_end-left_start (which they are by
-// construction)
-
-static int vorbis_decode_initial(vorb *f, int *p_left_start, int *p_left_end, int *p_right_start, int *p_right_end, int *mode)
-{
- Mode *m;
- int i, n, prev, next, window_center;
- f->channel_buffer_start = f->channel_buffer_end = 0;
-
- retry:
- if (f->eof) return FALSE;
- if (!maybe_start_packet(f))
- return FALSE;
- // check packet type
- if (get_bits(f,1) != 0) {
- if (IS_PUSH_MODE(f))
- return error(f,VORBIS_bad_packet_type);
- while (EOP != get8_packet(f));
- goto retry;
- }
-
- if (f->alloc.alloc_buffer)
- assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset);
-
- i = get_bits(f, ilog(f->mode_count-1));
- if (i == EOP) return FALSE;
- if (i >= f->mode_count) return FALSE;
- *mode = i;
- m = f->mode_config + i;
- if (m->blockflag) {
- n = f->blocksize_1;
- prev = get_bits(f,1);
- next = get_bits(f,1);
- } else {
- prev = next = 0;
- n = f->blocksize_0;
- }
-
-// WINDOWING
-
- window_center = n >> 1;
- if (m->blockflag && !prev) {
- *p_left_start = (n - f->blocksize_0) >> 2;
- *p_left_end = (n + f->blocksize_0) >> 2;
- } else {
- *p_left_start = 0;
- *p_left_end = window_center;
- }
- if (m->blockflag && !next) {
- *p_right_start = (n*3 - f->blocksize_0) >> 2;
- *p_right_end = (n*3 + f->blocksize_0) >> 2;
- } else {
- *p_right_start = window_center;
- *p_right_end = n;
- }
-
- return TRUE;
-}
-
-static int vorbis_decode_packet_rest(vorb *f, int *len, Mode *m, int left_start, int left_end, int right_start, int right_end, int *p_left)
-{
- Mapping *map;
- int i,j,k,n,n2;
- int zero_channel[256];
- int really_zero_channel[256];
-
-// WINDOWING
-
- n = f->blocksize[m->blockflag];
- map = &f->mapping[m->mapping];
-
-// FLOORS
- n2 = n >> 1;
-
- CHECK(f);
-
- for (i=0; i < f->channels; ++i) {
- int s = map->chan[i].mux, floor;
- zero_channel[i] = FALSE;
- floor = map->submap_floor[s];
- if (f->floor_types[floor] == 0) {
- return error(f, VORBIS_invalid_stream);
- } else {
- Floor1 *g = &f->floor_config[floor].floor1;
- if (get_bits(f, 1)) {
- short *finalY;
- uint8 step2_flag[256];
- static int range_list[4] = { 256, 128, 86, 64 };
- int range = range_list[g->floor1_multiplier-1];
- int offset = 2;
- finalY = f->finalY[i];
- finalY[0] = get_bits(f, ilog(range)-1);
- finalY[1] = get_bits(f, ilog(range)-1);
- for (j=0; j < g->partitions; ++j) {
- int pclass = g->partition_class_list[j];
- int cdim = g->class_dimensions[pclass];
- int cbits = g->class_subclasses[pclass];
- int csub = (1 << cbits)-1;
- int cval = 0;
- if (cbits) {
- Codebook *c = f->codebooks + g->class_masterbooks[pclass];
- DECODE(cval,f,c);
- }
- for (k=0; k < cdim; ++k) {
- int book = g->subclass_books[pclass][cval & csub];
- cval = cval >> cbits;
- if (book >= 0) {
- int temp;
- Codebook *c = f->codebooks + book;
- DECODE(temp,f,c);
- finalY[offset++] = temp;
- } else
- finalY[offset++] = 0;
- }
- }
- if (f->valid_bits == INVALID_BITS) goto error; // behavior according to spec
- step2_flag[0] = step2_flag[1] = 1;
- for (j=2; j < g->values; ++j) {
- int low, high, pred, highroom, lowroom, room, val;
- low = g->neighbors[j][0];
- high = g->neighbors[j][1];
- //neighbors(g->Xlist, j, &low, &high);
- pred = predict_point(g->Xlist[j], g->Xlist[low], g->Xlist[high], finalY[low], finalY[high]);
- val = finalY[j];
- highroom = range - pred;
- lowroom = pred;
- if (highroom < lowroom)
- room = highroom * 2;
- else
- room = lowroom * 2;
- if (val) {
- step2_flag[low] = step2_flag[high] = 1;
- step2_flag[j] = 1;
- if (val >= room)
- if (highroom > lowroom)
- finalY[j] = val - lowroom + pred;
- else
- finalY[j] = pred - val + highroom - 1;
- else
- if (val & 1)
- finalY[j] = pred - ((val+1)>>1);
- else
- finalY[j] = pred + (val>>1);
- } else {
- step2_flag[j] = 0;
- finalY[j] = pred;
- }
- }
-
-#ifdef STB_VORBIS_NO_DEFER_FLOOR
- do_floor(f, map, i, n, f->floor_buffers[i], finalY, step2_flag);
-#else
- // defer final floor computation until _after_ residue
- for (j=0; j < g->values; ++j) {
- if (!step2_flag[j])
- finalY[j] = -1;
- }
-#endif
- } else {
- error:
- zero_channel[i] = TRUE;
- }
- // So we just defer everything else to later
-
- // at this point we've decoded the floor into buffer
- }
- }
- CHECK(f);
- // at this point we've decoded all floors
-
- if (f->alloc.alloc_buffer)
- assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset);
-
- // re-enable coupled channels if necessary
- memcpy(really_zero_channel, zero_channel, sizeof(really_zero_channel[0]) * f->channels);
- for (i=0; i < map->coupling_steps; ++i)
- if (!zero_channel[map->chan[i].magnitude] || !zero_channel[map->chan[i].angle]) {
- zero_channel[map->chan[i].magnitude] = zero_channel[map->chan[i].angle] = FALSE;
- }
-
- CHECK(f);
-// RESIDUE DECODE
- for (i=0; i < map->submaps; ++i) {
- float *residue_buffers[STB_VORBIS_MAX_CHANNELS];
- int r;
- uint8 do_not_decode[256];
- int ch = 0;
- for (j=0; j < f->channels; ++j) {
- if (map->chan[j].mux == i) {
- if (zero_channel[j]) {
- do_not_decode[ch] = TRUE;
- residue_buffers[ch] = NULL;
- } else {
- do_not_decode[ch] = FALSE;
- residue_buffers[ch] = f->channel_buffers[j];
- }
- ++ch;
- }
- }
- r = map->submap_residue[i];
- decode_residue(f, residue_buffers, ch, n2, r, do_not_decode);
- }
-
- if (f->alloc.alloc_buffer)
- assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset);
- CHECK(f);
-
-// INVERSE COUPLING
- for (i = map->coupling_steps-1; i >= 0; --i) {
- int n2 = n >> 1;
- float *m = f->channel_buffers[map->chan[i].magnitude];
- float *a = f->channel_buffers[map->chan[i].angle ];
- for (j=0; j < n2; ++j) {
- float a2,m2;
- if (m[j] > 0)
- if (a[j] > 0)
- m2 = m[j], a2 = m[j] - a[j];
- else
- a2 = m[j], m2 = m[j] + a[j];
- else
- if (a[j] > 0)
- m2 = m[j], a2 = m[j] + a[j];
- else
- a2 = m[j], m2 = m[j] - a[j];
- m[j] = m2;
- a[j] = a2;
- }
- }
- CHECK(f);
-
- // finish decoding the floors
-#ifndef STB_VORBIS_NO_DEFER_FLOOR
- for (i=0; i < f->channels; ++i) {
- if (really_zero_channel[i]) {
- memset(f->channel_buffers[i], 0, sizeof(*f->channel_buffers[i]) * n2);
- } else {
- do_floor(f, map, i, n, f->channel_buffers[i], f->finalY[i], NULL);
- }
- }
-#else
- for (i=0; i < f->channels; ++i) {
- if (really_zero_channel[i]) {
- memset(f->channel_buffers[i], 0, sizeof(*f->channel_buffers[i]) * n2);
- } else {
- for (j=0; j < n2; ++j)
- f->channel_buffers[i][j] *= f->floor_buffers[i][j];
- }
- }
-#endif
-
-// INVERSE MDCT
- CHECK(f);
- for (i=0; i < f->channels; ++i)
- inverse_mdct(f->channel_buffers[i], n, f, m->blockflag);
- CHECK(f);
-
- // this shouldn't be necessary, unless we exited on an error
- // and want to flush to get to the next packet
- flush_packet(f);
-
- if (f->first_decode) {
- // assume we start so first non-discarded sample is sample 0
- // this isn't to spec, but spec would require us to read ahead
- // and decode the size of all current frames--could be done,
- // but presumably it's not a commonly used feature
- f->current_loc = -n2; // start of first frame is positioned for discard
- // we might have to discard samples "from" the next frame too,
- // if we're lapping a large block then a small at the start?
- f->discard_samples_deferred = n - right_end;
- f->current_loc_valid = TRUE;
- f->first_decode = FALSE;
- } else if (f->discard_samples_deferred) {
- if (f->discard_samples_deferred >= right_start - left_start) {
- f->discard_samples_deferred -= (right_start - left_start);
- left_start = right_start;
- *p_left = left_start;
- } else {
- left_start += f->discard_samples_deferred;
- *p_left = left_start;
- f->discard_samples_deferred = 0;
- }
- } else if (f->previous_length == 0 && f->current_loc_valid) {
- // we're recovering from a seek... that means we're going to discard
- // the samples from this packet even though we know our position from
- // the last page header, so we need to update the position based on
- // the discarded samples here
- // but wait, the code below is going to add this in itself even
- // on a discard, so we don't need to do it here...
- }
-
- // check if we have ogg information about the sample # for this packet
- if (f->last_seg_which == f->end_seg_with_known_loc) {
- // if we have a valid current loc, and this is final:
- if (f->current_loc_valid && (f->page_flag & PAGEFLAG_last_page)) {
- uint32 current_end = f->known_loc_for_packet;
- // then let's infer the size of the (probably) short final frame
- if (current_end < f->current_loc + (right_end-left_start)) {
- if (current_end < f->current_loc) {
- // negative truncation, that's impossible!
- *len = 0;
- } else {
- *len = current_end - f->current_loc;
- }
- *len += left_start; // this doesn't seem right, but has no ill effect on my test files
- if (*len > right_end) *len = right_end; // this should never happen
- f->current_loc += *len;
- return TRUE;
- }
- }
- // otherwise, just set our sample loc
- // guess that the ogg granule pos refers to the _middle_ of the
- // last frame?
- // set f->current_loc to the position of left_start
- f->current_loc = f->known_loc_for_packet - (n2-left_start);
- f->current_loc_valid = TRUE;
- }
- if (f->current_loc_valid)
- f->current_loc += (right_start - left_start);
-
- if (f->alloc.alloc_buffer)
- assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset);
- *len = right_end; // ignore samples after the window goes to 0
- CHECK(f);
-
- return TRUE;
-}
-
-static int vorbis_decode_packet(vorb *f, int *len, int *p_left, int *p_right)
-{
- int mode, left_end, right_end;
- if (!vorbis_decode_initial(f, p_left, &left_end, p_right, &right_end, &mode)) return 0;
- return vorbis_decode_packet_rest(f, len, f->mode_config + mode, *p_left, left_end, *p_right, right_end, p_left);
-}
-
-static int vorbis_finish_frame(stb_vorbis *f, int len, int left, int right)
-{
- int prev,i,j;
- // we use right&left (the start of the right- and left-window sin()-regions)
- // to determine how much to return, rather than inferring from the rules
- // (same result, clearer code); 'left' indicates where our sin() window
- // starts, therefore where the previous window's right edge starts, and
- // therefore where to start mixing from the previous buffer. 'right'
- // indicates where our sin() ending-window starts, therefore that's where
- // we start saving, and where our returned-data ends.
-
- // mixin from previous window
- if (f->previous_length) {
- int i,j, n = f->previous_length;
- float *w = get_window(f, n);
- if (w == NULL) return 0;
- for (i=0; i < f->channels; ++i) {
- for (j=0; j < n; ++j)
- f->channel_buffers[i][left+j] =
- f->channel_buffers[i][left+j]*w[ j] +
- f->previous_window[i][ j]*w[n-1-j];
- }
- }
-
- prev = f->previous_length;
-
- // last half of this data becomes previous window
- f->previous_length = len - right;
-
- // @OPTIMIZE: could avoid this copy by double-buffering the
- // output (flipping previous_window with channel_buffers), but
- // then previous_window would have to be 2x as large, and
- // channel_buffers couldn't be temp mem (although they're NOT
- // currently temp mem, they could be (unless we want to level
- // performance by spreading out the computation))
- for (i=0; i < f->channels; ++i)
- for (j=0; right+j < len; ++j)
- f->previous_window[i][j] = f->channel_buffers[i][right+j];
-
- if (!prev)
- // there was no previous packet, so this data isn't valid...
- // this isn't entirely true, only the would-have-overlapped data
- // isn't valid, but this seems to be what the spec requires
- return 0;
-
- // truncate a short frame
- if (len < right) right = len;
-
- f->samples_output += right-left;
-
- return right - left;
-}
-
-static int vorbis_pump_first_frame(stb_vorbis *f)
-{
- int len, right, left, res;
- res = vorbis_decode_packet(f, &len, &left, &right);
- if (res)
- vorbis_finish_frame(f, len, left, right);
- return res;
-}
-
-#ifndef STB_VORBIS_NO_PUSHDATA_API
-static int is_whole_packet_present(stb_vorbis *f)
-{
- // make sure that we have the packet available before continuing...
- // this requires a full ogg parse, but we know we can fetch from f->stream
-
- // instead of coding this out explicitly, we could save the current read state,
- // read the next packet with get8() until end-of-packet, check f->eof, then
- // reset the state? but that would be slower, esp. since we'd have over 256 bytes
- // of state to restore (primarily the page segment table)
-
- int s = f->next_seg, first = TRUE;
- uint8 *p = f->stream;
-
- if (s != -1) { // if we're not starting the packet with a 'continue on next page' flag
- for (; s < f->segment_count; ++s) {
- p += f->segments[s];
- if (f->segments[s] < 255) // stop at first short segment
- break;
- }
- // either this continues, or it ends it...
- if (s == f->segment_count)
- s = -1; // set 'crosses page' flag
- if (p > f->stream_end) return error(f, VORBIS_need_more_data);
- first = FALSE;
- }
- for (; s == -1;) {
- uint8 *q;
- int n;
-
- // check that we have the page header ready
- if (p + 26 >= f->stream_end) return error(f, VORBIS_need_more_data);
- // validate the page
- if (memcmp(p, ogg_page_header, 4)) return error(f, VORBIS_invalid_stream);
- if (p[4] != 0) return error(f, VORBIS_invalid_stream);
- if (first) { // the first segment must NOT have 'continued_packet', later ones MUST
- if (f->previous_length)
- if ((p[5] & PAGEFLAG_continued_packet)) return error(f, VORBIS_invalid_stream);
- // if no previous length, we're resynching, so we can come in on a continued-packet,
- // which we'll just drop
- } else {
- if (!(p[5] & PAGEFLAG_continued_packet)) return error(f, VORBIS_invalid_stream);
- }
- n = p[26]; // segment counts
- q = p+27; // q points to segment table
- p = q + n; // advance past header
- // make sure we've read the segment table
- if (p > f->stream_end) return error(f, VORBIS_need_more_data);
- for (s=0; s < n; ++s) {
- p += q[s];
- if (q[s] < 255)
- break;
- }
- if (s == n)
- s = -1; // set 'crosses page' flag
- if (p > f->stream_end) return error(f, VORBIS_need_more_data);
- first = FALSE;
- }
- return TRUE;
-}
-#endif // !STB_VORBIS_NO_PUSHDATA_API
-
-static int start_decoder(vorb *f)
-{
- uint8 header[6], x,y;
- int len,i,j,k, max_submaps = 0;
- int longest_floorlist=0;
-
- // first page, first packet
- f->first_decode = TRUE;
-
- if (!start_page(f)) return FALSE;
- // validate page flag
- if (!(f->page_flag & PAGEFLAG_first_page)) return error(f, VORBIS_invalid_first_page);
- if (f->page_flag & PAGEFLAG_last_page) return error(f, VORBIS_invalid_first_page);
- if (f->page_flag & PAGEFLAG_continued_packet) return error(f, VORBIS_invalid_first_page);
- // check for expected packet length
- if (f->segment_count != 1) return error(f, VORBIS_invalid_first_page);
- if (f->segments[0] != 30) {
- // check for the Ogg skeleton fishead identifying header to refine our error
- if (f->segments[0] == 64 &&
- getn(f, header, 6) &&
- header[0] == 'f' &&
- header[1] == 'i' &&
- header[2] == 's' &&
- header[3] == 'h' &&
- header[4] == 'e' &&
- header[5] == 'a' &&
- get8(f) == 'd' &&
- get8(f) == '\0') return error(f, VORBIS_ogg_skeleton_not_supported);
- else
- return error(f, VORBIS_invalid_first_page);
- }
-
- // read packet
- // check packet header
- if (get8(f) != VORBIS_packet_id) return error(f, VORBIS_invalid_first_page);
- if (!getn(f, header, 6)) return error(f, VORBIS_unexpected_eof);
- if (!vorbis_validate(header)) return error(f, VORBIS_invalid_first_page);
- // vorbis_version
- if (get32(f) != 0) return error(f, VORBIS_invalid_first_page);
- f->channels = get8(f); if (!f->channels) return error(f, VORBIS_invalid_first_page);
- if (f->channels > STB_VORBIS_MAX_CHANNELS) return error(f, VORBIS_too_many_channels);
- f->sample_rate = get32(f); if (!f->sample_rate) return error(f, VORBIS_invalid_first_page);
- get32(f); // bitrate_maximum
- get32(f); // bitrate_nominal
- get32(f); // bitrate_minimum
- x = get8(f);
- {
- int log0,log1;
- log0 = x & 15;
- log1 = x >> 4;
- f->blocksize_0 = 1 << log0;
- f->blocksize_1 = 1 << log1;
- if (log0 < 6 || log0 > 13) return error(f, VORBIS_invalid_setup);
- if (log1 < 6 || log1 > 13) return error(f, VORBIS_invalid_setup);
- if (log0 > log1) return error(f, VORBIS_invalid_setup);
- }
-
- // framing_flag
- x = get8(f);
- if (!(x & 1)) return error(f, VORBIS_invalid_first_page);
-
- // second packet!
- if (!start_page(f)) return FALSE;
-
- if (!start_packet(f)) return FALSE;
-
- if (!next_segment(f)) return FALSE;
-
- if (get8_packet(f) != VORBIS_packet_comment) return error(f, VORBIS_invalid_setup);
- for (i=0; i < 6; ++i) header[i] = get8_packet(f);
- if (!vorbis_validate(header)) return error(f, VORBIS_invalid_setup);
- //file vendor
- len = get32_packet(f);
- f->vendor = (char*)setup_malloc(f, sizeof(char) * (len+1));
- if (f->vendor == NULL) return error(f, VORBIS_outofmem);
- for(i=0; i < len; ++i) {
- f->vendor[i] = get8_packet(f);
- }
- f->vendor[len] = (char)'\0';
- //user comments
- f->comment_list_length = get32_packet(f);
- f->comment_list = (char**)setup_malloc(f, sizeof(char*) * (f->comment_list_length));
- if (f->comment_list == NULL) return error(f, VORBIS_outofmem);
-
- for(i=0; i < f->comment_list_length; ++i) {
- len = get32_packet(f);
- f->comment_list[i] = (char*)setup_malloc(f, sizeof(char) * (len+1));
- if (f->comment_list[i] == NULL) return error(f, VORBIS_outofmem);
-
- for(j=0; j < len; ++j) {
- f->comment_list[i][j] = get8_packet(f);
- }
- f->comment_list[i][len] = (char)'\0';
- }
-
- // framing_flag
- x = get8_packet(f);
- if (!(x & 1)) return error(f, VORBIS_invalid_setup);
-
-
- skip(f, f->bytes_in_seg);
- f->bytes_in_seg = 0;
-
- do {
- len = next_segment(f);
- skip(f, len);
- f->bytes_in_seg = 0;
- } while (len);
-
- // third packet!
- if (!start_packet(f)) return FALSE;
-
- #ifndef STB_VORBIS_NO_PUSHDATA_API
- if (IS_PUSH_MODE(f)) {
- if (!is_whole_packet_present(f)) {
- // convert error in ogg header to write type
- if (f->error == VORBIS_invalid_stream)
- f->error = VORBIS_invalid_setup;
- return FALSE;
- }
- }
- #endif
-
- crc32_init(); // always init it, to avoid multithread race conditions
-
- if (get8_packet(f) != VORBIS_packet_setup) return error(f, VORBIS_invalid_setup);
- for (i=0; i < 6; ++i) header[i] = get8_packet(f);
- if (!vorbis_validate(header)) return error(f, VORBIS_invalid_setup);
-
- // codebooks
-
- f->codebook_count = get_bits(f,8) + 1;
- f->codebooks = (Codebook *) setup_malloc(f, sizeof(*f->codebooks) * f->codebook_count);
- if (f->codebooks == NULL) return error(f, VORBIS_outofmem);
- memset(f->codebooks, 0, sizeof(*f->codebooks) * f->codebook_count);
- for (i=0; i < f->codebook_count; ++i) {
- uint32 *values;
- int ordered, sorted_count;
- int total=0;
- uint8 *lengths;
- Codebook *c = f->codebooks+i;
- CHECK(f);
- x = get_bits(f, 8); if (x != 0x42) return error(f, VORBIS_invalid_setup);
- x = get_bits(f, 8); if (x != 0x43) return error(f, VORBIS_invalid_setup);
- x = get_bits(f, 8); if (x != 0x56) return error(f, VORBIS_invalid_setup);
- x = get_bits(f, 8);
- c->dimensions = (get_bits(f, 8)<<8) + x;
- x = get_bits(f, 8);
- y = get_bits(f, 8);
- c->entries = (get_bits(f, 8)<<16) + (y<<8) + x;
- ordered = get_bits(f,1);
- c->sparse = ordered ? 0 : get_bits(f,1);
-
- if (c->dimensions == 0 && c->entries != 0) return error(f, VORBIS_invalid_setup);
-
- if (c->sparse)
- lengths = (uint8 *) setup_temp_malloc(f, c->entries);
- else
- lengths = c->codeword_lengths = (uint8 *) setup_malloc(f, c->entries);
-
- if (!lengths) return error(f, VORBIS_outofmem);
-
- if (ordered) {
- int current_entry = 0;
- int current_length = get_bits(f,5) + 1;
- while (current_entry < c->entries) {
- int limit = c->entries - current_entry;
- int n = get_bits(f, ilog(limit));
- if (current_length >= 32) return error(f, VORBIS_invalid_setup);
- if (current_entry + n > (int) c->entries) { return error(f, VORBIS_invalid_setup); }
- memset(lengths + current_entry, current_length, n);
- current_entry += n;
- ++current_length;
- }
- } else {
- for (j=0; j < c->entries; ++j) {
- int present = c->sparse ? get_bits(f,1) : 1;
- if (present) {
- lengths[j] = get_bits(f, 5) + 1;
- ++total;
- if (lengths[j] == 32)
- return error(f, VORBIS_invalid_setup);
- } else {
- lengths[j] = NO_CODE;
- }
- }
- }
-
- if (c->sparse && total >= c->entries >> 2) {
- // convert sparse items to non-sparse!
- if (c->entries > (int) f->setup_temp_memory_required)
- f->setup_temp_memory_required = c->entries;
-
- c->codeword_lengths = (uint8 *) setup_malloc(f, c->entries);
- if (c->codeword_lengths == NULL) return error(f, VORBIS_outofmem);
- memcpy(c->codeword_lengths, lengths, c->entries);
- setup_temp_free(f, lengths, c->entries); // note this is only safe if there have been no intervening temp mallocs!
- lengths = c->codeword_lengths;
- c->sparse = 0;
- }
-
- // compute the size of the sorted tables
- if (c->sparse) {
- sorted_count = total;
- } else {
- sorted_count = 0;
- #ifndef STB_VORBIS_NO_HUFFMAN_BINARY_SEARCH
- for (j=0; j < c->entries; ++j)
- if (lengths[j] > STB_VORBIS_FAST_HUFFMAN_LENGTH && lengths[j] != NO_CODE)
- ++sorted_count;
- #endif
- }
-
- c->sorted_entries = sorted_count;
- values = NULL;
-
- CHECK(f);
- if (!c->sparse) {
- c->codewords = (uint32 *) setup_malloc(f, sizeof(c->codewords[0]) * c->entries);
- if (!c->codewords) return error(f, VORBIS_outofmem);
- } else {
- unsigned int size;
- if (c->sorted_entries) {
- c->codeword_lengths = (uint8 *) setup_malloc(f, c->sorted_entries);
- if (!c->codeword_lengths) return error(f, VORBIS_outofmem);
- c->codewords = (uint32 *) setup_temp_malloc(f, sizeof(*c->codewords) * c->sorted_entries);
- if (!c->codewords) return error(f, VORBIS_outofmem);
- values = (uint32 *) setup_temp_malloc(f, sizeof(*values) * c->sorted_entries);
- if (!values) return error(f, VORBIS_outofmem);
- }
- size = c->entries + (sizeof(*c->codewords) + sizeof(*values)) * c->sorted_entries;
- if (size > f->setup_temp_memory_required)
- f->setup_temp_memory_required = size;
- }
-
- if (!compute_codewords(c, lengths, c->entries, values)) {
- if (c->sparse) setup_temp_free(f, values, 0);
- return error(f, VORBIS_invalid_setup);
- }
-
- if (c->sorted_entries) {
- // allocate an extra slot for sentinels
- c->sorted_codewords = (uint32 *) setup_malloc(f, sizeof(*c->sorted_codewords) * (c->sorted_entries+1));
- if (c->sorted_codewords == NULL) return error(f, VORBIS_outofmem);
- // allocate an extra slot at the front so that c->sorted_values[-1] is defined
- // so that we can catch that case without an extra if
- c->sorted_values = ( int *) setup_malloc(f, sizeof(*c->sorted_values ) * (c->sorted_entries+1));
- if (c->sorted_values == NULL) return error(f, VORBIS_outofmem);
- ++c->sorted_values;
- c->sorted_values[-1] = -1;
- compute_sorted_huffman(c, lengths, values);
- }
-
- if (c->sparse) {
- setup_temp_free(f, values, sizeof(*values)*c->sorted_entries);
- setup_temp_free(f, c->codewords, sizeof(*c->codewords)*c->sorted_entries);
- setup_temp_free(f, lengths, c->entries);
- c->codewords = NULL;
- }
-
- compute_accelerated_huffman(c);
-
- CHECK(f);
- c->lookup_type = get_bits(f, 4);
- if (c->lookup_type > 2) return error(f, VORBIS_invalid_setup);
- if (c->lookup_type > 0) {
- uint16 *mults;
- c->minimum_value = float32_unpack(get_bits(f, 32));
- c->delta_value = float32_unpack(get_bits(f, 32));
- c->value_bits = get_bits(f, 4)+1;
- c->sequence_p = get_bits(f,1);
- if (c->lookup_type == 1) {
- int values = lookup1_values(c->entries, c->dimensions);
- if (values < 0) return error(f, VORBIS_invalid_setup);
- c->lookup_values = (uint32) values;
- } else {
- c->lookup_values = c->entries * c->dimensions;
- }
- if (c->lookup_values == 0) return error(f, VORBIS_invalid_setup);
- mults = (uint16 *) setup_temp_malloc(f, sizeof(mults[0]) * c->lookup_values);
- if (mults == NULL) return error(f, VORBIS_outofmem);
- for (j=0; j < (int) c->lookup_values; ++j) {
- int q = get_bits(f, c->value_bits);
- if (q == EOP) { setup_temp_free(f,mults,sizeof(mults[0])*c->lookup_values); return error(f, VORBIS_invalid_setup); }
- mults[j] = q;
- }
-
-#ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK
- if (c->lookup_type == 1) {
- int len, sparse = c->sparse;
- float last=0;
- // pre-expand the lookup1-style multiplicands, to avoid a divide in the inner loop
- if (sparse) {
- if (c->sorted_entries == 0) goto skip;
- c->multiplicands = (codetype *) setup_malloc(f, sizeof(c->multiplicands[0]) * c->sorted_entries * c->dimensions);
- } else
- c->multiplicands = (codetype *) setup_malloc(f, sizeof(c->multiplicands[0]) * c->entries * c->dimensions);
- if (c->multiplicands == NULL) { setup_temp_free(f,mults,sizeof(mults[0])*c->lookup_values); return error(f, VORBIS_outofmem); }
- len = sparse ? c->sorted_entries : c->entries;
- for (j=0; j < len; ++j) {
- unsigned int z = sparse ? c->sorted_values[j] : j;
- unsigned int div=1;
- for (k=0; k < c->dimensions; ++k) {
- int off = (z / div) % c->lookup_values;
- float val = mults[off];
- val = mults[off]*c->delta_value + c->minimum_value + last;
- c->multiplicands[j*c->dimensions + k] = val;
- if (c->sequence_p)
- last = val;
- if (k+1 < c->dimensions) {
- if (div > UINT_MAX / (unsigned int) c->lookup_values) {
- setup_temp_free(f, mults,sizeof(mults[0])*c->lookup_values);
- return error(f, VORBIS_invalid_setup);
- }
- div *= c->lookup_values;
- }
- }
- }
- c->lookup_type = 2;
- }
- else
-#endif
- {
- float last=0;
- CHECK(f);
- c->multiplicands = (codetype *) setup_malloc(f, sizeof(c->multiplicands[0]) * c->lookup_values);
- if (c->multiplicands == NULL) { setup_temp_free(f, mults,sizeof(mults[0])*c->lookup_values); return error(f, VORBIS_outofmem); }
- for (j=0; j < (int) c->lookup_values; ++j) {
- float val = mults[j] * c->delta_value + c->minimum_value + last;
- c->multiplicands[j] = val;
- if (c->sequence_p)
- last = val;
- }
- }
-#ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK
- skip:;
-#endif
- setup_temp_free(f, mults, sizeof(mults[0])*c->lookup_values);
-
- CHECK(f);
- }
- CHECK(f);
- }
-
- // time domain transfers (notused)
-
- x = get_bits(f, 6) + 1;
- for (i=0; i < x; ++i) {
- uint32 z = get_bits(f, 16);
- if (z != 0) return error(f, VORBIS_invalid_setup);
- }
-
- // Floors
- f->floor_count = get_bits(f, 6)+1;
- f->floor_config = (Floor *) setup_malloc(f, f->floor_count * sizeof(*f->floor_config));
- if (f->floor_config == NULL) return error(f, VORBIS_outofmem);
- for (i=0; i < f->floor_count; ++i) {
- f->floor_types[i] = get_bits(f, 16);
- if (f->floor_types[i] > 1) return error(f, VORBIS_invalid_setup);
- if (f->floor_types[i] == 0) {
- Floor0 *g = &f->floor_config[i].floor0;
- g->order = get_bits(f,8);
- g->rate = get_bits(f,16);
- g->bark_map_size = get_bits(f,16);
- g->amplitude_bits = get_bits(f,6);
- g->amplitude_offset = get_bits(f,8);
- g->number_of_books = get_bits(f,4) + 1;
- for (j=0; j < g->number_of_books; ++j)
- g->book_list[j] = get_bits(f,8);
- return error(f, VORBIS_feature_not_supported);
- } else {
- stbv__floor_ordering p[31*8+2];
- Floor1 *g = &f->floor_config[i].floor1;
- int max_class = -1;
- g->partitions = get_bits(f, 5);
- for (j=0; j < g->partitions; ++j) {
- g->partition_class_list[j] = get_bits(f, 4);
- if (g->partition_class_list[j] > max_class)
- max_class = g->partition_class_list[j];
- }
- for (j=0; j <= max_class; ++j) {
- g->class_dimensions[j] = get_bits(f, 3)+1;
- g->class_subclasses[j] = get_bits(f, 2);
- if (g->class_subclasses[j]) {
- g->class_masterbooks[j] = get_bits(f, 8);
- if (g->class_masterbooks[j] >= f->codebook_count) return error(f, VORBIS_invalid_setup);
- }
- for (k=0; k < 1 << g->class_subclasses[j]; ++k) {
- g->subclass_books[j][k] = get_bits(f,8)-1;
- if (g->subclass_books[j][k] >= f->codebook_count) return error(f, VORBIS_invalid_setup);
- }
- }
- g->floor1_multiplier = get_bits(f,2)+1;
- g->rangebits = get_bits(f,4);
- g->Xlist[0] = 0;
- g->Xlist[1] = 1 << g->rangebits;
- g->values = 2;
- for (j=0; j < g->partitions; ++j) {
- int c = g->partition_class_list[j];
- for (k=0; k < g->class_dimensions[c]; ++k) {
- g->Xlist[g->values] = get_bits(f, g->rangebits);
- ++g->values;
- }
- }
- // precompute the sorting
- for (j=0; j < g->values; ++j) {
- p[j].x = g->Xlist[j];
- p[j].id = j;
- }
- qsort(p, g->values, sizeof(p[0]), point_compare);
- for (j=0; j < g->values-1; ++j)
- if (p[j].x == p[j+1].x)
- return error(f, VORBIS_invalid_setup);
- for (j=0; j < g->values; ++j)
- g->sorted_order[j] = (uint8) p[j].id;
- // precompute the neighbors
- for (j=2; j < g->values; ++j) {
- int low = 0,hi = 0;
- neighbors(g->Xlist, j, &low,&hi);
- g->neighbors[j][0] = low;
- g->neighbors[j][1] = hi;
- }
-
- if (g->values > longest_floorlist)
- longest_floorlist = g->values;
- }
- }
-
- // Residue
- f->residue_count = get_bits(f, 6)+1;
- f->residue_config = (Residue *) setup_malloc(f, f->residue_count * sizeof(f->residue_config[0]));
- if (f->residue_config == NULL) return error(f, VORBIS_outofmem);
- memset(f->residue_config, 0, f->residue_count * sizeof(f->residue_config[0]));
- for (i=0; i < f->residue_count; ++i) {
- uint8 residue_cascade[64];
- Residue *r = f->residue_config+i;
- f->residue_types[i] = get_bits(f, 16);
- if (f->residue_types[i] > 2) return error(f, VORBIS_invalid_setup);
- r->begin = get_bits(f, 24);
- r->end = get_bits(f, 24);
- if (r->end < r->begin) return error(f, VORBIS_invalid_setup);
- r->part_size = get_bits(f,24)+1;
- r->classifications = get_bits(f,6)+1;
- r->classbook = get_bits(f,8);
- if (r->classbook >= f->codebook_count) return error(f, VORBIS_invalid_setup);
- for (j=0; j < r->classifications; ++j) {
- uint8 high_bits=0;
- uint8 low_bits=get_bits(f,3);
- if (get_bits(f,1))
- high_bits = get_bits(f,5);
- residue_cascade[j] = high_bits*8 + low_bits;
- }
- r->residue_books = (short (*)[8]) setup_malloc(f, sizeof(r->residue_books[0]) * r->classifications);
- if (r->residue_books == NULL) return error(f, VORBIS_outofmem);
- for (j=0; j < r->classifications; ++j) {
- for (k=0; k < 8; ++k) {
- if (residue_cascade[j] & (1 << k)) {
- r->residue_books[j][k] = get_bits(f, 8);
- if (r->residue_books[j][k] >= f->codebook_count) return error(f, VORBIS_invalid_setup);
- } else {
- r->residue_books[j][k] = -1;
- }
- }
- }
- // precompute the classifications[] array to avoid inner-loop mod/divide
- // call it 'classdata' since we already have r->classifications
- r->classdata = (uint8 **) setup_malloc(f, sizeof(*r->classdata) * f->codebooks[r->classbook].entries);
- if (!r->classdata) return error(f, VORBIS_outofmem);
- memset(r->classdata, 0, sizeof(*r->classdata) * f->codebooks[r->classbook].entries);
- for (j=0; j < f->codebooks[r->classbook].entries; ++j) {
- int classwords = f->codebooks[r->classbook].dimensions;
- int temp = j;
- r->classdata[j] = (uint8 *) setup_malloc(f, sizeof(r->classdata[j][0]) * classwords);
- if (r->classdata[j] == NULL) return error(f, VORBIS_outofmem);
- for (k=classwords-1; k >= 0; --k) {
- r->classdata[j][k] = temp % r->classifications;
- temp /= r->classifications;
- }
- }
- }
-
- f->mapping_count = get_bits(f,6)+1;
- f->mapping = (Mapping *) setup_malloc(f, f->mapping_count * sizeof(*f->mapping));
- if (f->mapping == NULL) return error(f, VORBIS_outofmem);
- memset(f->mapping, 0, f->mapping_count * sizeof(*f->mapping));
- for (i=0; i < f->mapping_count; ++i) {
- Mapping *m = f->mapping + i;
- int mapping_type = get_bits(f,16);
- if (mapping_type != 0) return error(f, VORBIS_invalid_setup);
- m->chan = (MappingChannel *) setup_malloc(f, f->channels * sizeof(*m->chan));
- if (m->chan == NULL) return error(f, VORBIS_outofmem);
- if (get_bits(f,1))
- m->submaps = get_bits(f,4)+1;
- else
- m->submaps = 1;
- if (m->submaps > max_submaps)
- max_submaps = m->submaps;
- if (get_bits(f,1)) {
- m->coupling_steps = get_bits(f,8)+1;
- if (m->coupling_steps > f->channels) return error(f, VORBIS_invalid_setup);
- for (k=0; k < m->coupling_steps; ++k) {
- m->chan[k].magnitude = get_bits(f, ilog(f->channels-1));
- m->chan[k].angle = get_bits(f, ilog(f->channels-1));
- if (m->chan[k].magnitude >= f->channels) return error(f, VORBIS_invalid_setup);
- if (m->chan[k].angle >= f->channels) return error(f, VORBIS_invalid_setup);
- if (m->chan[k].magnitude == m->chan[k].angle) return error(f, VORBIS_invalid_setup);
- }
- } else
- m->coupling_steps = 0;
-
- // reserved field
- if (get_bits(f,2)) return error(f, VORBIS_invalid_setup);
- if (m->submaps > 1) {
- for (j=0; j < f->channels; ++j) {
- m->chan[j].mux = get_bits(f, 4);
- if (m->chan[j].mux >= m->submaps) return error(f, VORBIS_invalid_setup);
- }
- } else
- // @SPECIFICATION: this case is missing from the spec
- for (j=0; j < f->channels; ++j)
- m->chan[j].mux = 0;
-
- for (j=0; j < m->submaps; ++j) {
- get_bits(f,8); // discard
- m->submap_floor[j] = get_bits(f,8);
- m->submap_residue[j] = get_bits(f,8);
- if (m->submap_floor[j] >= f->floor_count) return error(f, VORBIS_invalid_setup);
- if (m->submap_residue[j] >= f->residue_count) return error(f, VORBIS_invalid_setup);
- }
- }
-
- // Modes
- f->mode_count = get_bits(f, 6)+1;
- for (i=0; i < f->mode_count; ++i) {
- Mode *m = f->mode_config+i;
- m->blockflag = get_bits(f,1);
- m->windowtype = get_bits(f,16);
- m->transformtype = get_bits(f,16);
- m->mapping = get_bits(f,8);
- if (m->windowtype != 0) return error(f, VORBIS_invalid_setup);
- if (m->transformtype != 0) return error(f, VORBIS_invalid_setup);
- if (m->mapping >= f->mapping_count) return error(f, VORBIS_invalid_setup);
- }
-
- flush_packet(f);
-
- f->previous_length = 0;
-
- for (i=0; i < f->channels; ++i) {
- f->channel_buffers[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1);
- f->previous_window[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1/2);
- f->finalY[i] = (int16 *) setup_malloc(f, sizeof(int16) * longest_floorlist);
- if (f->channel_buffers[i] == NULL || f->previous_window[i] == NULL || f->finalY[i] == NULL) return error(f, VORBIS_outofmem);
- memset(f->channel_buffers[i], 0, sizeof(float) * f->blocksize_1);
- #ifdef STB_VORBIS_NO_DEFER_FLOOR
- f->floor_buffers[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1/2);
- if (f->floor_buffers[i] == NULL) return error(f, VORBIS_outofmem);
- #endif
- }
-
- if (!init_blocksize(f, 0, f->blocksize_0)) return FALSE;
- if (!init_blocksize(f, 1, f->blocksize_1)) return FALSE;
- f->blocksize[0] = f->blocksize_0;
- f->blocksize[1] = f->blocksize_1;
-
-#ifdef STB_VORBIS_DIVIDE_TABLE
- if (integer_divide_table[1][1]==0)
- for (i=0; i < DIVTAB_NUMER; ++i)
- for (j=1; j < DIVTAB_DENOM; ++j)
- integer_divide_table[i][j] = i / j;
-#endif
-
- // compute how much temporary memory is needed
-
- // 1.
- {
- uint32 imdct_mem = (f->blocksize_1 * sizeof(float) >> 1);
- uint32 classify_mem;
- int i,max_part_read=0;
- for (i=0; i < f->residue_count; ++i) {
- Residue *r = f->residue_config + i;
- unsigned int actual_size = f->blocksize_1 / 2;
- unsigned int limit_r_begin = r->begin < actual_size ? r->begin : actual_size;
- unsigned int limit_r_end = r->end < actual_size ? r->end : actual_size;
- int n_read = limit_r_end - limit_r_begin;
- int part_read = n_read / r->part_size;
- if (part_read > max_part_read)
- max_part_read = part_read;
- }
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- classify_mem = f->channels * (sizeof(void*) + max_part_read * sizeof(uint8 *));
- #else
- classify_mem = f->channels * (sizeof(void*) + max_part_read * sizeof(int *));
- #endif
-
- // maximum reasonable partition size is f->blocksize_1
-
- f->temp_memory_required = classify_mem;
- if (imdct_mem > f->temp_memory_required)
- f->temp_memory_required = imdct_mem;
- }
-
-
- if (f->alloc.alloc_buffer) {
- assert(f->temp_offset == f->alloc.alloc_buffer_length_in_bytes);
- // check if there's enough temp memory so we don't error later
- if (f->setup_offset + sizeof(*f) + f->temp_memory_required > (unsigned) f->temp_offset)
- return error(f, VORBIS_outofmem);
- }
-
- // @TODO: stb_vorbis_seek_start expects first_audio_page_offset to point to a page
- // without PAGEFLAG_continued_packet, so this either points to the first page, or
- // the page after the end of the headers. It might be cleaner to point to a page
- // in the middle of the headers, when that's the page where the first audio packet
- // starts, but we'd have to also correctly skip the end of any continued packet in
- // stb_vorbis_seek_start.
- if (f->next_seg == -1) {
- f->first_audio_page_offset = stb_vorbis_get_file_offset(f);
- } else {
- f->first_audio_page_offset = 0;
- }
-
- return TRUE;
-}
-
-static void vorbis_deinit(stb_vorbis *p)
-{
- int i,j;
-
- setup_free(p, p->vendor);
- for (i=0; i < p->comment_list_length; ++i) {
- setup_free(p, p->comment_list[i]);
- }
- setup_free(p, p->comment_list);
-
- if (p->residue_config) {
- for (i=0; i < p->residue_count; ++i) {
- Residue *r = p->residue_config+i;
- if (r->classdata) {
- for (j=0; j < p->codebooks[r->classbook].entries; ++j)
- setup_free(p, r->classdata[j]);
- setup_free(p, r->classdata);
- }
- setup_free(p, r->residue_books);
- }
- }
-
- if (p->codebooks) {
- CHECK(p);
- for (i=0; i < p->codebook_count; ++i) {
- Codebook *c = p->codebooks + i;
- setup_free(p, c->codeword_lengths);
- setup_free(p, c->multiplicands);
- setup_free(p, c->codewords);
- setup_free(p, c->sorted_codewords);
- // c->sorted_values[-1] is the first entry in the array
- setup_free(p, c->sorted_values ? c->sorted_values-1 : NULL);
- }
- setup_free(p, p->codebooks);
- }
- setup_free(p, p->floor_config);
- setup_free(p, p->residue_config);
- if (p->mapping) {
- for (i=0; i < p->mapping_count; ++i)
- setup_free(p, p->mapping[i].chan);
- setup_free(p, p->mapping);
- }
- CHECK(p);
- for (i=0; i < p->channels && i < STB_VORBIS_MAX_CHANNELS; ++i) {
- setup_free(p, p->channel_buffers[i]);
- setup_free(p, p->previous_window[i]);
- #ifdef STB_VORBIS_NO_DEFER_FLOOR
- setup_free(p, p->floor_buffers[i]);
- #endif
- setup_free(p, p->finalY[i]);
- }
- for (i=0; i < 2; ++i) {
- setup_free(p, p->A[i]);
- setup_free(p, p->B[i]);
- setup_free(p, p->C[i]);
- setup_free(p, p->window[i]);
- setup_free(p, p->bit_reverse[i]);
- }
- #ifndef STB_VORBIS_NO_STDIO
- if (p->close_on_free) fclose(p->f);
- #endif
-}
-
-void stb_vorbis_close(stb_vorbis *p)
-{
- if (p == NULL) return;
- vorbis_deinit(p);
- setup_free(p,p);
-}
-
-static void vorbis_init(stb_vorbis *p, const stb_vorbis_alloc *z)
-{
- memset(p, 0, sizeof(*p)); // NULL out all malloc'd pointers to start
- if (z) {
- p->alloc = *z;
- p->alloc.alloc_buffer_length_in_bytes &= ~7;
- p->temp_offset = p->alloc.alloc_buffer_length_in_bytes;
- }
- p->eof = 0;
- p->error = VORBIS__no_error;
- p->stream = NULL;
- p->codebooks = NULL;
- p->page_crc_tests = -1;
- #ifndef STB_VORBIS_NO_STDIO
- p->close_on_free = FALSE;
- p->f = NULL;
- #endif
-}
-
-int stb_vorbis_get_sample_offset(stb_vorbis *f)
-{
- if (f->current_loc_valid)
- return f->current_loc;
- else
- return -1;
-}
-
-stb_vorbis_info stb_vorbis_get_info(stb_vorbis *f)
-{
- stb_vorbis_info d;
- d.channels = f->channels;
- d.sample_rate = f->sample_rate;
- d.setup_memory_required = f->setup_memory_required;
- d.setup_temp_memory_required = f->setup_temp_memory_required;
- d.temp_memory_required = f->temp_memory_required;
- d.max_frame_size = f->blocksize_1 >> 1;
- return d;
-}
-
-stb_vorbis_comment stb_vorbis_get_comment(stb_vorbis *f)
-{
- stb_vorbis_comment d;
- d.vendor = f->vendor;
- d.comment_list_length = f->comment_list_length;
- d.comment_list = f->comment_list;
- return d;
-}
-
-int stb_vorbis_get_error(stb_vorbis *f)
-{
- int e = f->error;
- f->error = VORBIS__no_error;
- return e;
-}
-
-static stb_vorbis * vorbis_alloc(stb_vorbis *f)
-{
- stb_vorbis *p = (stb_vorbis *) setup_malloc(f, sizeof(*p));
- return p;
-}
-
-#ifndef STB_VORBIS_NO_PUSHDATA_API
-
-void stb_vorbis_flush_pushdata(stb_vorbis *f)
-{
- f->previous_length = 0;
- f->page_crc_tests = 0;
- f->discard_samples_deferred = 0;
- f->current_loc_valid = FALSE;
- f->first_decode = FALSE;
- f->samples_output = 0;
- f->channel_buffer_start = 0;
- f->channel_buffer_end = 0;
-}
-
-static int vorbis_search_for_page_pushdata(vorb *f, uint8 *data, int data_len)
-{
- int i,n;
- for (i=0; i < f->page_crc_tests; ++i)
- f->scan[i].bytes_done = 0;
-
- // if we have room for more scans, search for them first, because
- // they may cause us to stop early if their header is incomplete
- if (f->page_crc_tests < STB_VORBIS_PUSHDATA_CRC_COUNT) {
- if (data_len < 4) return 0;
- data_len -= 3; // need to look for 4-byte sequence, so don't miss
- // one that straddles a boundary
- for (i=0; i < data_len; ++i) {
- if (data[i] == 0x4f) {
- if (0==memcmp(data+i, ogg_page_header, 4)) {
- int j,len;
- uint32 crc;
- // make sure we have the whole page header
- if (i+26 >= data_len || i+27+data[i+26] >= data_len) {
- // only read up to this page start, so hopefully we'll
- // have the whole page header start next time
- data_len = i;
- break;
- }
- // ok, we have it all; compute the length of the page
- len = 27 + data[i+26];
- for (j=0; j < data[i+26]; ++j)
- len += data[i+27+j];
- // scan everything up to the embedded crc (which we must 0)
- crc = 0;
- for (j=0; j < 22; ++j)
- crc = crc32_update(crc, data[i+j]);
- // now process 4 0-bytes
- for ( ; j < 26; ++j)
- crc = crc32_update(crc, 0);
- // len is the total number of bytes we need to scan
- n = f->page_crc_tests++;
- f->scan[n].bytes_left = len-j;
- f->scan[n].crc_so_far = crc;
- f->scan[n].goal_crc = data[i+22] + (data[i+23] << 8) + (data[i+24]<<16) + (data[i+25]<<24);
- // if the last frame on a page is continued to the next, then
- // we can't recover the sample_loc immediately
- if (data[i+27+data[i+26]-1] == 255)
- f->scan[n].sample_loc = ~0;
- else
- f->scan[n].sample_loc = data[i+6] + (data[i+7] << 8) + (data[i+ 8]<<16) + (data[i+ 9]<<24);
- f->scan[n].bytes_done = i+j;
- if (f->page_crc_tests == STB_VORBIS_PUSHDATA_CRC_COUNT)
- break;
- // keep going if we still have room for more
- }
- }
- }
- }
-
- for (i=0; i < f->page_crc_tests;) {
- uint32 crc;
- int j;
- int n = f->scan[i].bytes_done;
- int m = f->scan[i].bytes_left;
- if (m > data_len - n) m = data_len - n;
- // m is the bytes to scan in the current chunk
- crc = f->scan[i].crc_so_far;
- for (j=0; j < m; ++j)
- crc = crc32_update(crc, data[n+j]);
- f->scan[i].bytes_left -= m;
- f->scan[i].crc_so_far = crc;
- if (f->scan[i].bytes_left == 0) {
- // does it match?
- if (f->scan[i].crc_so_far == f->scan[i].goal_crc) {
- // Houston, we have page
- data_len = n+m; // consumption amount is wherever that scan ended
- f->page_crc_tests = -1; // drop out of page scan mode
- f->previous_length = 0; // decode-but-don't-output one frame
- f->next_seg = -1; // start a new page
- f->current_loc = f->scan[i].sample_loc; // set the current sample location
- // to the amount we'd have decoded had we decoded this page
- f->current_loc_valid = f->current_loc != ~0U;
- return data_len;
- }
- // delete entry
- f->scan[i] = f->scan[--f->page_crc_tests];
- } else {
- ++i;
- }
- }
-
- return data_len;
-}
-
-// return value: number of bytes we used
-int stb_vorbis_decode_frame_pushdata(
- stb_vorbis *f, // the file we're decoding
- const uint8 *data, int data_len, // the memory available for decoding
- int *channels, // place to write number of float * buffers
- float ***output, // place to write float ** array of float * buffers
- int *samples // place to write number of output samples
- )
-{
- int i;
- int len,right,left;
-
- if (!IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing);
-
- if (f->page_crc_tests >= 0) {
- *samples = 0;
- return vorbis_search_for_page_pushdata(f, (uint8 *) data, data_len);
- }
-
- f->stream = (uint8 *) data;
- f->stream_end = (uint8 *) data + data_len;
- f->error = VORBIS__no_error;
-
- // check that we have the entire packet in memory
- if (!is_whole_packet_present(f)) {
- *samples = 0;
- return 0;
- }
-
- if (!vorbis_decode_packet(f, &len, &left, &right)) {
- // save the actual error we encountered
- enum STBVorbisError error = f->error;
- if (error == VORBIS_bad_packet_type) {
- // flush and resynch
- f->error = VORBIS__no_error;
- while (get8_packet(f) != EOP)
- if (f->eof) break;
- *samples = 0;
- return (int) (f->stream - data);
- }
- if (error == VORBIS_continued_packet_flag_invalid) {
- if (f->previous_length == 0) {
- // we may be resynching, in which case it's ok to hit one
- // of these; just discard the packet
- f->error = VORBIS__no_error;
- while (get8_packet(f) != EOP)
- if (f->eof) break;
- *samples = 0;
- return (int) (f->stream - data);
- }
- }
- // if we get an error while parsing, what to do?
- // well, it DEFINITELY won't work to continue from where we are!
- stb_vorbis_flush_pushdata(f);
- // restore the error that actually made us bail
- f->error = error;
- *samples = 0;
- return 1;
- }
-
- // success!
- len = vorbis_finish_frame(f, len, left, right);
- for (i=0; i < f->channels; ++i)
- f->outputs[i] = f->channel_buffers[i] + left;
-
- if (channels) *channels = f->channels;
- *samples = len;
- *output = f->outputs;
- return (int) (f->stream - data);
-}
-
-stb_vorbis *stb_vorbis_open_pushdata(
- const unsigned char *data, int data_len, // the memory available for decoding
- int *data_used, // only defined if result is not NULL
- int *error, const stb_vorbis_alloc *alloc)
-{
- stb_vorbis *f, p;
- vorbis_init(&p, alloc);
- p.stream = (uint8 *) data;
- p.stream_end = (uint8 *) data + data_len;
- p.push_mode = TRUE;
- if (!start_decoder(&p)) {
- if (p.eof)
- *error = VORBIS_need_more_data;
- else
- *error = p.error;
- return NULL;
- }
- f = vorbis_alloc(&p);
- if (f) {
- *f = p;
- *data_used = (int) (f->stream - data);
- *error = 0;
- return f;
- } else {
- vorbis_deinit(&p);
- return NULL;
- }
-}
-#endif // STB_VORBIS_NO_PUSHDATA_API
-
-unsigned int stb_vorbis_get_file_offset(stb_vorbis *f)
-{
- #ifndef STB_VORBIS_NO_PUSHDATA_API
- if (f->push_mode) return 0;
- #endif
- if (USE_MEMORY(f)) return (unsigned int) (f->stream - f->stream_start);
- #ifndef STB_VORBIS_NO_STDIO
- return (unsigned int) (ftell(f->f) - f->f_start);
- #endif
-}
-
-#ifndef STB_VORBIS_NO_PULLDATA_API
-//
-// DATA-PULLING API
-//
-
-static uint32 vorbis_find_page(stb_vorbis *f, uint32 *end, uint32 *last)
-{
- for(;;) {
- int n;
- if (f->eof) return 0;
- n = get8(f);
- if (n == 0x4f) { // page header candidate
- unsigned int retry_loc = stb_vorbis_get_file_offset(f);
- int i;
- // check if we're off the end of a file_section stream
- if (retry_loc - 25 > f->stream_len)
- return 0;
- // check the rest of the header
- for (i=1; i < 4; ++i)
- if (get8(f) != ogg_page_header[i])
- break;
- if (f->eof) return 0;
- if (i == 4) {
- uint8 header[27];
- uint32 i, crc, goal, len;
- for (i=0; i < 4; ++i)
- header[i] = ogg_page_header[i];
- for (; i < 27; ++i)
- header[i] = get8(f);
- if (f->eof) return 0;
- if (header[4] != 0) goto invalid;
- goal = header[22] + (header[23] << 8) + (header[24]<<16) + (header[25]<<24);
- for (i=22; i < 26; ++i)
- header[i] = 0;
- crc = 0;
- for (i=0; i < 27; ++i)
- crc = crc32_update(crc, header[i]);
- len = 0;
- for (i=0; i < header[26]; ++i) {
- int s = get8(f);
- crc = crc32_update(crc, s);
- len += s;
- }
- if (len && f->eof) return 0;
- for (i=0; i < len; ++i)
- crc = crc32_update(crc, get8(f));
- // finished parsing probable page
- if (crc == goal) {
- // we could now check that it's either got the last
- // page flag set, OR it's followed by the capture
- // pattern, but I guess TECHNICALLY you could have
- // a file with garbage between each ogg page and recover
- // from it automatically? So even though that paranoia
- // might decrease the chance of an invalid decode by
- // another 2^32, not worth it since it would hose those
- // invalid-but-useful files?
- if (end)
- *end = stb_vorbis_get_file_offset(f);
- if (last) {
- if (header[5] & 0x04)
- *last = 1;
- else
- *last = 0;
- }
- set_file_offset(f, retry_loc-1);
- return 1;
- }
- }
- invalid:
- // not a valid page, so rewind and look for next one
- set_file_offset(f, retry_loc);
- }
- }
-}
-
-
-#define SAMPLE_unknown 0xffffffff
-
-// seeking is implemented with a binary search, which narrows down the range to
-// 64K, before using a linear search (because finding the synchronization
-// pattern can be expensive, and the chance we'd find the end page again is
-// relatively high for small ranges)
-//
-// two initial interpolation-style probes are used at the start of the search
-// to try to bound either side of the binary search sensibly, while still
-// working in O(log n) time if they fail.
-
-static int get_seek_page_info(stb_vorbis *f, ProbedPage *z)
-{
- uint8 header[27], lacing[255];
- int i,len;
-
- // record where the page starts
- z->page_start = stb_vorbis_get_file_offset(f);
-
- // parse the header
- getn(f, header, 27);
- if (header[0] != 'O' || header[1] != 'g' || header[2] != 'g' || header[3] != 'S')
- return 0;
- getn(f, lacing, header[26]);
-
- // determine the length of the payload
- len = 0;
- for (i=0; i < header[26]; ++i)
- len += lacing[i];
-
- // this implies where the page ends
- z->page_end = z->page_start + 27 + header[26] + len;
-
- // read the last-decoded sample out of the data
- z->last_decoded_sample = header[6] + (header[7] << 8) + (header[8] << 16) + (header[9] << 24);
-
- // restore file state to where we were
- set_file_offset(f, z->page_start);
- return 1;
-}
-
-// rarely used function to seek back to the preceding page while finding the
-// start of a packet
-static int go_to_page_before(stb_vorbis *f, unsigned int limit_offset)
-{
- unsigned int previous_safe, end;
-
- // now we want to seek back 64K from the limit
- if (limit_offset >= 65536 && limit_offset-65536 >= f->first_audio_page_offset)
- previous_safe = limit_offset - 65536;
- else
- previous_safe = f->first_audio_page_offset;
-
- set_file_offset(f, previous_safe);
-
- while (vorbis_find_page(f, &end, NULL)) {
- if (end >= limit_offset && stb_vorbis_get_file_offset(f) < limit_offset)
- return 1;
- set_file_offset(f, end);
- }
-
- return 0;
-}
-
-// implements the search logic for finding a page and starting decoding. if
-// the function succeeds, current_loc_valid will be true and current_loc will
-// be less than or equal to the provided sample number (the closer the
-// better).
-static int seek_to_sample_coarse(stb_vorbis *f, uint32 sample_number)
-{
- ProbedPage left, right, mid;
- int i, start_seg_with_known_loc, end_pos, page_start;
- uint32 delta, stream_length, padding, last_sample_limit;
- double offset = 0.0, bytes_per_sample = 0.0;
- int probe = 0;
-
- // find the last page and validate the target sample
- stream_length = stb_vorbis_stream_length_in_samples(f);
- if (stream_length == 0) return error(f, VORBIS_seek_without_length);
- if (sample_number > stream_length) return error(f, VORBIS_seek_invalid);
-
- // this is the maximum difference between the window-center (which is the
- // actual granule position value), and the right-start (which the spec
- // indicates should be the granule position (give or take one)).
- padding = ((f->blocksize_1 - f->blocksize_0) >> 2);
- if (sample_number < padding)
- last_sample_limit = 0;
- else
- last_sample_limit = sample_number - padding;
-
- left = f->p_first;
- while (left.last_decoded_sample == ~0U) {
- // (untested) the first page does not have a 'last_decoded_sample'
- set_file_offset(f, left.page_end);
- if (!get_seek_page_info(f, &left)) goto error;
- }
-
- right = f->p_last;
- assert(right.last_decoded_sample != ~0U);
-
- // starting from the start is handled differently
- if (last_sample_limit <= left.last_decoded_sample) {
- if (stb_vorbis_seek_start(f)) {
- if (f->current_loc > sample_number)
- return error(f, VORBIS_seek_failed);
- return 1;
- }
- return 0;
- }
-
- while (left.page_end != right.page_start) {
- assert(left.page_end < right.page_start);
- // search range in bytes
- delta = right.page_start - left.page_end;
- if (delta <= 65536) {
- // there's only 64K left to search - handle it linearly
- set_file_offset(f, left.page_end);
- } else {
- if (probe < 2) {
- if (probe == 0) {
- // first probe (interpolate)
- double data_bytes = right.page_end - left.page_start;
- bytes_per_sample = data_bytes / right.last_decoded_sample;
- offset = left.page_start + bytes_per_sample * (last_sample_limit - left.last_decoded_sample);
- } else {
- // second probe (try to bound the other side)
- double error = ((double) last_sample_limit - mid.last_decoded_sample) * bytes_per_sample;
- if (error >= 0 && error < 8000) error = 8000;
- if (error < 0 && error > -8000) error = -8000;
- offset += error * 2;
- }
-
- // ensure the offset is valid
- if (offset < left.page_end)
- offset = left.page_end;
- if (offset > right.page_start - 65536)
- offset = right.page_start - 65536;
-
- set_file_offset(f, (unsigned int) offset);
- } else {
- // binary search for large ranges (offset by 32K to ensure
- // we don't hit the right page)
- set_file_offset(f, left.page_end + (delta / 2) - 32768);
- }
-
- if (!vorbis_find_page(f, NULL, NULL)) goto error;
- }
-
- for (;;) {
- if (!get_seek_page_info(f, &mid)) goto error;
- if (mid.last_decoded_sample != ~0U) break;
- // (untested) no frames end on this page
- set_file_offset(f, mid.page_end);
- assert(mid.page_start < right.page_start);
- }
-
- // if we've just found the last page again then we're in a tricky file,
- // and we're close enough (if it wasn't an interpolation probe).
- if (mid.page_start == right.page_start) {
- if (probe >= 2 || delta <= 65536)
- break;
- } else {
- if (last_sample_limit < mid.last_decoded_sample)
- right = mid;
- else
- left = mid;
- }
-
- ++probe;
- }
-
- // seek back to start of the last packet
- page_start = left.page_start;
- set_file_offset(f, page_start);
- if (!start_page(f)) return error(f, VORBIS_seek_failed);
- end_pos = f->end_seg_with_known_loc;
- assert(end_pos >= 0);
-
- for (;;) {
- for (i = end_pos; i > 0; --i)
- if (f->segments[i-1] != 255)
- break;
-
- start_seg_with_known_loc = i;
-
- if (start_seg_with_known_loc > 0 || !(f->page_flag & PAGEFLAG_continued_packet))
- break;
-
- // (untested) the final packet begins on an earlier page
- if (!go_to_page_before(f, page_start))
- goto error;
-
- page_start = stb_vorbis_get_file_offset(f);
- if (!start_page(f)) goto error;
- end_pos = f->segment_count - 1;
- }
-
- // prepare to start decoding
- f->current_loc_valid = FALSE;
- f->last_seg = FALSE;
- f->valid_bits = 0;
- f->packet_bytes = 0;
- f->bytes_in_seg = 0;
- f->previous_length = 0;
- f->next_seg = start_seg_with_known_loc;
-
- for (i = 0; i < start_seg_with_known_loc; i++)
- skip(f, f->segments[i]);
-
- // start decoding (optimizable - this frame is generally discarded)
- if (!vorbis_pump_first_frame(f))
- return 0;
- if (f->current_loc > sample_number)
- return error(f, VORBIS_seek_failed);
- return 1;
-
-error:
- // try to restore the file to a valid state
- stb_vorbis_seek_start(f);
- return error(f, VORBIS_seek_failed);
-}
-
-// the same as vorbis_decode_initial, but without advancing
-static int peek_decode_initial(vorb *f, int *p_left_start, int *p_left_end, int *p_right_start, int *p_right_end, int *mode)
-{
- int bits_read, bytes_read;
-
- if (!vorbis_decode_initial(f, p_left_start, p_left_end, p_right_start, p_right_end, mode))
- return 0;
-
- // either 1 or 2 bytes were read, figure out which so we can rewind
- bits_read = 1 + ilog(f->mode_count-1);
- if (f->mode_config[*mode].blockflag)
- bits_read += 2;
- bytes_read = (bits_read + 7) / 8;
-
- f->bytes_in_seg += bytes_read;
- f->packet_bytes -= bytes_read;
- skip(f, -bytes_read);
- if (f->next_seg == -1)
- f->next_seg = f->segment_count - 1;
- else
- f->next_seg--;
- f->valid_bits = 0;
-
- return 1;
-}
-
-int stb_vorbis_seek_frame(stb_vorbis *f, unsigned int sample_number)
-{
- uint32 max_frame_samples;
-
- if (IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing);
-
- // fast page-level search
- if (!seek_to_sample_coarse(f, sample_number))
- return 0;
-
- assert(f->current_loc_valid);
- assert(f->current_loc <= sample_number);
-
- // linear search for the relevant packet
- max_frame_samples = (f->blocksize_1*3 - f->blocksize_0) >> 2;
- while (f->current_loc < sample_number) {
- int left_start, left_end, right_start, right_end, mode, frame_samples;
- if (!peek_decode_initial(f, &left_start, &left_end, &right_start, &right_end, &mode))
- return error(f, VORBIS_seek_failed);
- // calculate the number of samples returned by the next frame
- frame_samples = right_start - left_start;
- if (f->current_loc + frame_samples > sample_number) {
- return 1; // the next frame will contain the sample
- } else if (f->current_loc + frame_samples + max_frame_samples > sample_number) {
- // there's a chance the frame after this could contain the sample
- vorbis_pump_first_frame(f);
- } else {
- // this frame is too early to be relevant
- f->current_loc += frame_samples;
- f->previous_length = 0;
- maybe_start_packet(f);
- flush_packet(f);
- }
- }
- // the next frame should start with the sample
- if (f->current_loc != sample_number) return error(f, VORBIS_seek_failed);
- return 1;
-}
-
-int stb_vorbis_seek(stb_vorbis *f, unsigned int sample_number)
-{
- if (!stb_vorbis_seek_frame(f, sample_number))
- return 0;
-
- if (sample_number != f->current_loc) {
- int n;
- uint32 frame_start = f->current_loc;
- stb_vorbis_get_frame_float(f, &n, NULL);
- assert(sample_number > frame_start);
- assert(f->channel_buffer_start + (int) (sample_number-frame_start) <= f->channel_buffer_end);
- f->channel_buffer_start += (sample_number - frame_start);
- }
-
- return 1;
-}
-
-int stb_vorbis_seek_start(stb_vorbis *f)
-{
- if (IS_PUSH_MODE(f)) { return error(f, VORBIS_invalid_api_mixing); }
- set_file_offset(f, f->first_audio_page_offset);
- f->previous_length = 0;
- f->first_decode = TRUE;
- f->next_seg = -1;
- return vorbis_pump_first_frame(f);
-}
-
-unsigned int stb_vorbis_stream_length_in_samples(stb_vorbis *f)
-{
- unsigned int restore_offset, previous_safe;
- unsigned int end, last_page_loc;
-
- if (IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing);
- if (!f->total_samples) {
- unsigned int last;
- uint32 lo,hi;
- char header[6];
-
- // first, store the current decode position so we can restore it
- restore_offset = stb_vorbis_get_file_offset(f);
-
- // now we want to seek back 64K from the end (the last page must
- // be at most a little less than 64K, but let's allow a little slop)
- if (f->stream_len >= 65536 && f->stream_len-65536 >= f->first_audio_page_offset)
- previous_safe = f->stream_len - 65536;
- else
- previous_safe = f->first_audio_page_offset;
-
- set_file_offset(f, previous_safe);
- // previous_safe is now our candidate 'earliest known place that seeking
- // to will lead to the final page'
-
- if (!vorbis_find_page(f, &end, &last)) {
- // if we can't find a page, we're hosed!
- f->error = VORBIS_cant_find_last_page;
- f->total_samples = 0xffffffff;
- goto done;
- }
-
- // check if there are more pages
- last_page_loc = stb_vorbis_get_file_offset(f);
-
- // stop when the last_page flag is set, not when we reach eof;
- // this allows us to stop short of a 'file_section' end without
- // explicitly checking the length of the section
- while (!last) {
- set_file_offset(f, end);
- if (!vorbis_find_page(f, &end, &last)) {
- // the last page we found didn't have the 'last page' flag
- // set. whoops!
- break;
- }
- previous_safe = last_page_loc+1;
- last_page_loc = stb_vorbis_get_file_offset(f);
- }
-
- set_file_offset(f, last_page_loc);
-
- // parse the header
- getn(f, (unsigned char *)header, 6);
- // extract the absolute granule position
- lo = get32(f);
- hi = get32(f);
- if (lo == 0xffffffff && hi == 0xffffffff) {
- f->error = VORBIS_cant_find_last_page;
- f->total_samples = SAMPLE_unknown;
- goto done;
- }
- if (hi)
- lo = 0xfffffffe; // saturate
- f->total_samples = lo;
-
- f->p_last.page_start = last_page_loc;
- f->p_last.page_end = end;
- f->p_last.last_decoded_sample = lo;
-
- done:
- set_file_offset(f, restore_offset);
- }
- return f->total_samples == SAMPLE_unknown ? 0 : f->total_samples;
-}
-
-float stb_vorbis_stream_length_in_seconds(stb_vorbis *f)
-{
- return stb_vorbis_stream_length_in_samples(f) / (float) f->sample_rate;
-}
-
-
-
-int stb_vorbis_get_frame_float(stb_vorbis *f, int *channels, float ***output)
-{
- int len, right,left,i;
- if (IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing);
-
- if (!vorbis_decode_packet(f, &len, &left, &right)) {
- f->channel_buffer_start = f->channel_buffer_end = 0;
- return 0;
- }
-
- len = vorbis_finish_frame(f, len, left, right);
- for (i=0; i < f->channels; ++i)
- f->outputs[i] = f->channel_buffers[i] + left;
-
- f->channel_buffer_start = left;
- f->channel_buffer_end = left+len;
-
- if (channels) *channels = f->channels;
- if (output) *output = f->outputs;
- return len;
-}
-
-#ifndef STB_VORBIS_NO_STDIO
-
-stb_vorbis * stb_vorbis_open_file_section(FILE *file, int close_on_free, int *error, const stb_vorbis_alloc *alloc, unsigned int length)
-{
- stb_vorbis *f, p;
- vorbis_init(&p, alloc);
- p.f = file;
- p.f_start = (uint32) ftell(file);
- p.stream_len = length;
- p.close_on_free = close_on_free;
- if (start_decoder(&p)) {
- f = vorbis_alloc(&p);
- if (f) {
- *f = p;
- vorbis_pump_first_frame(f);
- return f;
- }
- }
- if (error) *error = p.error;
- vorbis_deinit(&p);
- return NULL;
-}
-
-stb_vorbis * stb_vorbis_open_file(FILE *file, int close_on_free, int *error, const stb_vorbis_alloc *alloc)
-{
- unsigned int len, start;
- start = (unsigned int) ftell(file);
- fseek(file, 0, SEEK_END);
- len = (unsigned int) (ftell(file) - start);
- fseek(file, start, SEEK_SET);
- return stb_vorbis_open_file_section(file, close_on_free, error, alloc, len);
-}
-
-stb_vorbis * stb_vorbis_open_filename(const char *filename, int *error, const stb_vorbis_alloc *alloc)
-{
- FILE *f;
-#if defined(_WIN32) && defined(__STDC_WANT_SECURE_LIB__)
- if (0 != fopen_s(&f, filename, "rb"))
- f = NULL;
-#else
- f = fopen(filename, "rb");
-#endif
- if (f)
- return stb_vorbis_open_file(f, TRUE, error, alloc);
- if (error) *error = VORBIS_file_open_failure;
- return NULL;
-}
-#endif // STB_VORBIS_NO_STDIO
-
-stb_vorbis * stb_vorbis_open_memory(const unsigned char *data, int len, int *error, const stb_vorbis_alloc *alloc)
-{
- stb_vorbis *f, p;
- if (data == NULL) return NULL;
- vorbis_init(&p, alloc);
- p.stream = (uint8 *) data;
- p.stream_end = (uint8 *) data + len;
- p.stream_start = (uint8 *) p.stream;
- p.stream_len = len;
- p.push_mode = FALSE;
- if (start_decoder(&p)) {
- f = vorbis_alloc(&p);
- if (f) {
- *f = p;
- vorbis_pump_first_frame(f);
- if (error) *error = VORBIS__no_error;
- return f;
- }
- }
- if (error) *error = p.error;
- vorbis_deinit(&p);
- return NULL;
-}
-
-#ifndef STB_VORBIS_NO_INTEGER_CONVERSION
-#define PLAYBACK_MONO 1
-#define PLAYBACK_LEFT 2
-#define PLAYBACK_RIGHT 4
-
-#define L (PLAYBACK_LEFT | PLAYBACK_MONO)
-#define C (PLAYBACK_LEFT | PLAYBACK_RIGHT | PLAYBACK_MONO)
-#define R (PLAYBACK_RIGHT | PLAYBACK_MONO)
-
-static int8 channel_position[7][6] =
-{
- { 0 },
- { C },
- { L, R },
- { L, C, R },
- { L, R, L, R },
- { L, C, R, L, R },
- { L, C, R, L, R, C },
-};
-
-
-#ifndef STB_VORBIS_NO_FAST_SCALED_FLOAT
- typedef union {
- float f;
- int i;
- } float_conv;
- typedef char stb_vorbis_float_size_test[sizeof(float)==4 && sizeof(int) == 4];
- #define FASTDEF(x) float_conv x
- // add (1<<23) to convert to int, then divide by 2^SHIFT, then add 0.5/2^SHIFT to round
- #define MAGIC(SHIFT) (1.5f * (1 << (23-SHIFT)) + 0.5f/(1 << SHIFT))
- #define ADDEND(SHIFT) (((150-SHIFT) << 23) + (1 << 22))
- #define FAST_SCALED_FLOAT_TO_INT(temp,x,s) (temp.f = (x) + MAGIC(s), temp.i - ADDEND(s))
- #define check_endianness()
-#else
- #define FAST_SCALED_FLOAT_TO_INT(temp,x,s) ((int) ((x) * (1 << (s))))
- #define check_endianness()
- #define FASTDEF(x)
-#endif
-
-static void copy_samples(short *dest, float *src, int len)
-{
- int i;
- check_endianness();
- for (i=0; i < len; ++i) {
- FASTDEF(temp);
- int v = FAST_SCALED_FLOAT_TO_INT(temp, src[i],15);
- if ((unsigned int) (v + 32768) > 65535)
- v = v < 0 ? -32768 : 32767;
- dest[i] = v;
- }
-}
-
-static void compute_samples(int mask, short *output, int num_c, float **data, int d_offset, int len)
-{
- #define BUFFER_SIZE 32
- float buffer[BUFFER_SIZE];
- int i,j,o,n = BUFFER_SIZE;
- check_endianness();
- for (o = 0; o < len; o += BUFFER_SIZE) {
- memset(buffer, 0, sizeof(buffer));
- if (o + n > len) n = len - o;
- for (j=0; j < num_c; ++j) {
- if (channel_position[num_c][j] & mask) {
- for (i=0; i < n; ++i)
- buffer[i] += data[j][d_offset+o+i];
- }
- }
- for (i=0; i < n; ++i) {
- FASTDEF(temp);
- int v = FAST_SCALED_FLOAT_TO_INT(temp,buffer[i],15);
- if ((unsigned int) (v + 32768) > 65535)
- v = v < 0 ? -32768 : 32767;
- output[o+i] = v;
- }
- }
-}
-
-static void compute_stereo_samples(short *output, int num_c, float **data, int d_offset, int len)
-{
- #define BUFFER_SIZE 32
- float buffer[BUFFER_SIZE];
- int i,j,o,n = BUFFER_SIZE >> 1;
- // o is the offset in the source data
- check_endianness();
- for (o = 0; o < len; o += BUFFER_SIZE >> 1) {
- // o2 is the offset in the output data
- int o2 = o << 1;
- memset(buffer, 0, sizeof(buffer));
- if (o + n > len) n = len - o;
- for (j=0; j < num_c; ++j) {
- int m = channel_position[num_c][j] & (PLAYBACK_LEFT | PLAYBACK_RIGHT);
- if (m == (PLAYBACK_LEFT | PLAYBACK_RIGHT)) {
- for (i=0; i < n; ++i) {
- buffer[i*2+0] += data[j][d_offset+o+i];
- buffer[i*2+1] += data[j][d_offset+o+i];
- }
- } else if (m == PLAYBACK_LEFT) {
- for (i=0; i < n; ++i) {
- buffer[i*2+0] += data[j][d_offset+o+i];
- }
- } else if (m == PLAYBACK_RIGHT) {
- for (i=0; i < n; ++i) {
- buffer[i*2+1] += data[j][d_offset+o+i];
- }
- }
- }
- for (i=0; i < (n<<1); ++i) {
- FASTDEF(temp);
- int v = FAST_SCALED_FLOAT_TO_INT(temp,buffer[i],15);
- if ((unsigned int) (v + 32768) > 65535)
- v = v < 0 ? -32768 : 32767;
- output[o2+i] = v;
- }
- }
-}
-
-static void convert_samples_short(int buf_c, short **buffer, int b_offset, int data_c, float **data, int d_offset, int samples)
-{
- int i;
- if (buf_c != data_c && buf_c <= 2 && data_c <= 6) {
- static int channel_selector[3][2] = { {0}, {PLAYBACK_MONO}, {PLAYBACK_LEFT, PLAYBACK_RIGHT} };
- for (i=0; i < buf_c; ++i)
- compute_samples(channel_selector[buf_c][i], buffer[i]+b_offset, data_c, data, d_offset, samples);
- } else {
- int limit = buf_c < data_c ? buf_c : data_c;
- for (i=0; i < limit; ++i)
- copy_samples(buffer[i]+b_offset, data[i]+d_offset, samples);
- for ( ; i < buf_c; ++i)
- memset(buffer[i]+b_offset, 0, sizeof(short) * samples);
- }
-}
-
-int stb_vorbis_get_frame_short(stb_vorbis *f, int num_c, short **buffer, int num_samples)
-{
- float **output = NULL;
- int len = stb_vorbis_get_frame_float(f, NULL, &output);
- if (len > num_samples) len = num_samples;
- if (len)
- convert_samples_short(num_c, buffer, 0, f->channels, output, 0, len);
- return len;
-}
-
-static void convert_channels_short_interleaved(int buf_c, short *buffer, int data_c, float **data, int d_offset, int len)
-{
- int i;
- check_endianness();
- if (buf_c != data_c && buf_c <= 2 && data_c <= 6) {
- assert(buf_c == 2);
- for (i=0; i < buf_c; ++i)
- compute_stereo_samples(buffer, data_c, data, d_offset, len);
- } else {
- int limit = buf_c < data_c ? buf_c : data_c;
- int j;
- for (j=0; j < len; ++j) {
- for (i=0; i < limit; ++i) {
- FASTDEF(temp);
- float f = data[i][d_offset+j];
- int v = FAST_SCALED_FLOAT_TO_INT(temp, f,15);//data[i][d_offset+j],15);
- if ((unsigned int) (v + 32768) > 65535)
- v = v < 0 ? -32768 : 32767;
- *buffer++ = v;
- }
- for ( ; i < buf_c; ++i)
- *buffer++ = 0;
- }
- }
-}
-
-int stb_vorbis_get_frame_short_interleaved(stb_vorbis *f, int num_c, short *buffer, int num_shorts)
-{
- float **output;
- int len;
- if (num_c == 1) return stb_vorbis_get_frame_short(f,num_c,&buffer, num_shorts);
- len = stb_vorbis_get_frame_float(f, NULL, &output);
- if (len) {
- if (len*num_c > num_shorts) len = num_shorts / num_c;
- convert_channels_short_interleaved(num_c, buffer, f->channels, output, 0, len);
- }
- return len;
-}
-
-int stb_vorbis_get_samples_short_interleaved(stb_vorbis *f, int channels, short *buffer, int num_shorts)
-{
- float **outputs;
- int len = num_shorts / channels;
- int n=0;
- int z = f->channels;
- if (z > channels) z = channels;
- while (n < len) {
- int k = f->channel_buffer_end - f->channel_buffer_start;
- if (n+k >= len) k = len - n;
- if (k)
- convert_channels_short_interleaved(channels, buffer, f->channels, f->channel_buffers, f->channel_buffer_start, k);
- buffer += k*channels;
- n += k;
- f->channel_buffer_start += k;
- if (n == len) break;
- if (!stb_vorbis_get_frame_float(f, NULL, &outputs)) break;
- }
- return n;
-}
-
-int stb_vorbis_get_samples_short(stb_vorbis *f, int channels, short **buffer, int len)
-{
- float **outputs;
- int n=0;
- int z = f->channels;
- if (z > channels) z = channels;
- while (n < len) {
- int k = f->channel_buffer_end - f->channel_buffer_start;
- if (n+k >= len) k = len - n;
- if (k)
- convert_samples_short(channels, buffer, n, f->channels, f->channel_buffers, f->channel_buffer_start, k);
- n += k;
- f->channel_buffer_start += k;
- if (n == len) break;
- if (!stb_vorbis_get_frame_float(f, NULL, &outputs)) break;
- }
- return n;
-}
-
-#ifndef STB_VORBIS_NO_STDIO
-int stb_vorbis_decode_filename(const char *filename, int *channels, int *sample_rate, short **output)
-{
- int data_len, offset, total, limit, error;
- short *data;
- stb_vorbis *v = stb_vorbis_open_filename(filename, &error, NULL);
- if (v == NULL) return -1;
- limit = v->channels * 4096;
- *channels = v->channels;
- if (sample_rate)
- *sample_rate = v->sample_rate;
- offset = data_len = 0;
- total = limit;
- data = (short *) malloc(total * sizeof(*data));
- if (data == NULL) {
- stb_vorbis_close(v);
- return -2;
- }
- for (;;) {
- int n = stb_vorbis_get_frame_short_interleaved(v, v->channels, data+offset, total-offset);
- if (n == 0) break;
- data_len += n;
- offset += n * v->channels;
- if (offset + limit > total) {
- short *data2;
- total *= 2;
- data2 = (short *) realloc(data, total * sizeof(*data));
- if (data2 == NULL) {
- free(data);
- stb_vorbis_close(v);
- return -2;
- }
- data = data2;
- }
- }
- *output = data;
- stb_vorbis_close(v);
- return data_len;
-}
-#endif // NO_STDIO
-
-int stb_vorbis_decode_memory(const uint8 *mem, int len, int *channels, int *sample_rate, short **output)
-{
- int data_len, offset, total, limit, error;
- short *data;
- stb_vorbis *v = stb_vorbis_open_memory(mem, len, &error, NULL);
- if (v == NULL) return -1;
- limit = v->channels * 4096;
- *channels = v->channels;
- if (sample_rate)
- *sample_rate = v->sample_rate;
- offset = data_len = 0;
- total = limit;
- data = (short *) malloc(total * sizeof(*data));
- if (data == NULL) {
- stb_vorbis_close(v);
- return -2;
- }
- for (;;) {
- int n = stb_vorbis_get_frame_short_interleaved(v, v->channels, data+offset, total-offset);
- if (n == 0) break;
- data_len += n;
- offset += n * v->channels;
- if (offset + limit > total) {
- short *data2;
- total *= 2;
- data2 = (short *) realloc(data, total * sizeof(*data));
- if (data2 == NULL) {
- free(data);
- stb_vorbis_close(v);
- return -2;
- }
- data = data2;
- }
- }
- *output = data;
- stb_vorbis_close(v);
- return data_len;
-}
-#endif // STB_VORBIS_NO_INTEGER_CONVERSION
-
-int stb_vorbis_get_samples_float_interleaved(stb_vorbis *f, int channels, float *buffer, int num_floats)
-{
- float **outputs;
- int len = num_floats / channels;
- int n=0;
- int z = f->channels;
- if (z > channels) z = channels;
- while (n < len) {
- int i,j;
- int k = f->channel_buffer_end - f->channel_buffer_start;
- if (n+k >= len) k = len - n;
- for (j=0; j < k; ++j) {
- for (i=0; i < z; ++i)
- *buffer++ = f->channel_buffers[i][f->channel_buffer_start+j];
- for ( ; i < channels; ++i)
- *buffer++ = 0;
- }
- n += k;
- f->channel_buffer_start += k;
- if (n == len)
- break;
- if (!stb_vorbis_get_frame_float(f, NULL, &outputs))
- break;
- }
- return n;
-}
-
-int stb_vorbis_get_samples_float(stb_vorbis *f, int channels, float **buffer, int num_samples)
-{
- float **outputs;
- int n=0;
- int z = f->channels;
- if (z > channels) z = channels;
- while (n < num_samples) {
- int i;
- int k = f->channel_buffer_end - f->channel_buffer_start;
- if (n+k >= num_samples) k = num_samples - n;
- if (k) {
- for (i=0; i < z; ++i)
- memcpy(buffer[i]+n, f->channel_buffers[i]+f->channel_buffer_start, sizeof(float)*k);
- for ( ; i < channels; ++i)
- memset(buffer[i]+n, 0, sizeof(float) * k);
- }
- n += k;
- f->channel_buffer_start += k;
- if (n == num_samples)
- break;
- if (!stb_vorbis_get_frame_float(f, NULL, &outputs))
- break;
- }
- return n;
-}
-#endif // STB_VORBIS_NO_PULLDATA_API
-
-/* Version history
- 1.17 - 2019-07-08 - fix CVE-2019-13217, -13218, -13219, -13220, -13221, -13222, -13223
- found with Mayhem by ForAllSecure
- 1.16 - 2019-03-04 - fix warnings
- 1.15 - 2019-02-07 - explicit failure if Ogg Skeleton data is found
- 1.14 - 2018-02-11 - delete bogus dealloca usage
- 1.13 - 2018-01-29 - fix truncation of last frame (hopefully)
- 1.12 - 2017-11-21 - limit residue begin/end to blocksize/2 to avoid large temp allocs in bad/corrupt files
- 1.11 - 2017-07-23 - fix MinGW compilation
- 1.10 - 2017-03-03 - more robust seeking; fix negative ilog(); clear error in open_memory
- 1.09 - 2016-04-04 - back out 'avoid discarding last frame' fix from previous version
- 1.08 - 2016-04-02 - fixed multiple warnings; fix setup memory leaks;
- avoid discarding last frame of audio data
- 1.07 - 2015-01-16 - fixed some warnings, fix mingw, const-correct API
- some more crash fixes when out of memory or with corrupt files
- 1.06 - 2015-08-31 - full, correct support for seeking API (Dougall Johnson)
- some crash fixes when out of memory or with corrupt files
- 1.05 - 2015-04-19 - don't define __forceinline if it's redundant
- 1.04 - 2014-08-27 - fix missing const-correct case in API
- 1.03 - 2014-08-07 - Warning fixes
- 1.02 - 2014-07-09 - Declare qsort compare function _cdecl on windows
- 1.01 - 2014-06-18 - fix stb_vorbis_get_samples_float
- 1.0 - 2014-05-26 - fix memory leaks; fix warnings; fix bugs in multichannel
- (API change) report sample rate for decode-full-file funcs
- 0.99996 - bracket #include <malloc.h> for macintosh compilation by Laurent Gomila
- 0.99995 - use union instead of pointer-cast for fast-float-to-int to avoid alias-optimization problem
- 0.99994 - change fast-float-to-int to work in single-precision FPU mode, remove endian-dependence
- 0.99993 - remove assert that fired on legal files with empty tables
- 0.99992 - rewind-to-start
- 0.99991 - bugfix to stb_vorbis_get_samples_short by Bernhard Wodo
- 0.9999 - (should have been 0.99990) fix no-CRT support, compiling as C++
- 0.9998 - add a full-decode function with a memory source
- 0.9997 - fix a bug in the read-from-FILE case in 0.9996 addition
- 0.9996 - query length of vorbis stream in samples/seconds
- 0.9995 - bugfix to another optimization that only happened in certain files
- 0.9994 - bugfix to one of the optimizations that caused significant (but inaudible?) errors
- 0.9993 - performance improvements; runs in 99% to 104% of time of reference implementation
- 0.9992 - performance improvement of IMDCT; now performs close to reference implementation
- 0.9991 - performance improvement of IMDCT
- 0.999 - (should have been 0.9990) performance improvement of IMDCT
- 0.998 - no-CRT support from Casey Muratori
- 0.997 - bugfixes for bugs found by Terje Mathisen
- 0.996 - bugfix: fast-huffman decode initialized incorrectly for sparse codebooks; fixing gives 10% speedup - found by Terje Mathisen
- 0.995 - bugfix: fix to 'effective' overrun detection - found by Terje Mathisen
- 0.994 - bugfix: garbage decode on final VQ symbol of a non-multiple - found by Terje Mathisen
- 0.993 - bugfix: pushdata API required 1 extra byte for empty page (failed to consume final page if empty) - found by Terje Mathisen
- 0.992 - fixes for MinGW warning
- 0.991 - turn fast-float-conversion on by default
- 0.990 - fix push-mode seek recovery if you seek into the headers
- 0.98b - fix to bad release of 0.98
- 0.98 - fix push-mode seek recovery; robustify float-to-int and support non-fast mode
- 0.97 - builds under c++ (typecasting, don't use 'class' keyword)
- 0.96 - somehow MY 0.95 was right, but the web one was wrong, so here's my 0.95 rereleased as 0.96, fixes a typo in the clamping code
- 0.95 - clamping code for 16-bit functions
- 0.94 - not publically released
- 0.93 - fixed all-zero-floor case (was decoding garbage)
- 0.92 - fixed a memory leak
- 0.91 - conditional compiles to omit parts of the API and the infrastructure to support them: STB_VORBIS_NO_PULLDATA_API, STB_VORBIS_NO_PUSHDATA_API, STB_VORBIS_NO_STDIO, STB_VORBIS_NO_INTEGER_CONVERSION
- 0.90 - first public release
-*/
-
-#endif // STB_VORBIS_HEADER_ONLY
-
-
-/*
-------------------------------------------------------------------------------
-This software is available under 2 licenses -- choose whichever you prefer.
-------------------------------------------------------------------------------
-ALTERNATIVE A - MIT License
-Copyright (c) 2017 Sean Barrett
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-------------------------------------------------------------------------------
-ALTERNATIVE B - Public Domain (www.unlicense.org)
-This is free and unencumbered software released into the public domain.
-Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
-software, either in source code form or as a compiled binary, for any purpose,
-commercial or non-commercial, and by any means.
-In jurisdictions that recognize copyright laws, the author or authors of this
-software dedicate any and all copyright interest in the software to the public
-domain. We make this dedication for the benefit of the public at large and to
-the detriment of our heirs and successors. We intend this dedication to be an
-overt act of relinquishment in perpetuity of all present and future rights to
-this software under copyright law.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
-*/
diff --git a/thirdparty/misc/stb_vorbis.h b/thirdparty/misc/stb_vorbis.h
deleted file mode 100644
index 357efcd5fc..0000000000
--- a/thirdparty/misc/stb_vorbis.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define STB_VORBIS_HEADER_ONLY
-#include "stb_vorbis.c"
diff --git a/thirdparty/msdfgen/CHANGELOG.md b/thirdparty/msdfgen/CHANGELOG.md
new file mode 100644
index 0000000000..4b9a752650
--- /dev/null
+++ b/thirdparty/msdfgen/CHANGELOG.md
@@ -0,0 +1,82 @@
+
+## Version 1.9 (2021-05-28)
+
+- Error correction of multi-channel distance fields has been completely reworked
+- Added new edge coloring strategy that optimizes colors based on distances between edges
+- Added some minor functions for the library API
+- Minor code refactor and optimizations
+
+## Version 1.8 (2020-10-17)
+
+- Integrated the Skia library into the project, which is used to preprocess the shape geometry and eliminate any self-intersections and other irregularities previously unsupported by the software
+ - The scanline pass and overlapping contour mode is made obsolete by this step and has been disabled by default. The preprocess step can be disabled by the new `-nopreprocess` switch and the former enabled by `-scanline` and `-overlap` respectively.
+ - The project can be built without the Skia library, forgoing the geometry preprocessing feature. This is controlled by the macro definition `MSDFGEN_USE_SKIA`
+- Significantly improved performance of the core algorithm by reusing results from previously computed pixels
+- Introduced an additional error correction routine which eliminates MSDF artifacts by analytically predicting results of bilinear interpolation
+- Added the possibility to load font glyphs by their index rather than a Unicode value (use the prefix `g` before the character code in `-font` argument)
+- Added `-distanceshift` argument that can be used to adjust the center of the distance range in the output distance field
+- Fixed several errors in the evaluation of curve distances
+- Fixed an issue with paths containing convergent corners (those whose inner angle is zero)
+- The algorithm for pseudo-distance computation slightly changed, fixing certain rare edge cases and improving consistency
+- Added the ability to supply own `FT_Face` handle to the msdfgen library
+- Minor refactor of the core algorithm
+
+### Version 1.7.1 (2020-03-09)
+
+- Fixed an edge case bug in scanline rasterization
+
+## Version 1.7 (2020-03-07)
+
+- Added `mtsdf` mode - a combination of `msdf` with `sdf` in the alpha channel
+- Distance fields can now be stored as uncompressed TIFF image files with floating point precision
+- Bitmap class refactor - template argument split into data type and number of channels, bitmap reference classes introduced
+- Added a secondary "ink trap" edge coloring heuristic, can be selected using `-coloringstrategy inktrap`
+- Added computation of estimated rendering error for a given SDF
+- Added computation of bounding box that includes sharp mitered corners
+- The API for bounds computation of the `Shape` class changed for clarity
+- Fixed several edge case bugs
+
+## Version 1.6 (2019-04-08)
+
+- Core algorithm rewritten to split up advanced edge selection logic into modular template arguments.
+- Pseudo-distance evaluation reworked to eliminate discontinuities at the midpoint between edges.
+- MSDF error correction reworked to also fix distances away from edges and consider diagonal pairs. Code simplified.
+- Added scanline rasterization support for `Shape`.
+- Added a scanline pass in the standalone version, which corrects the signs in the distance field according to the selected fill rule (`-fillrule`). Can be disabled using `-noscanline`.
+- Fixed autoframe scaling, which previously caused the output to have unnecessary empty border.
+- `-guessorder` switch no longer enabled by default, as the functionality is now provided by the scanline pass.
+- Updated FreeType and other libraries, changed to static linkage
+- Added 64-bit and static library builds to the Visual Studio solution
+
+## Version 1.5 (2017-07-23)
+
+- Fixed rounding error in cubic curve splitting.
+- SVG parser fixes and support for additional path commands.
+- Added CMake build script.
+
+## Version 1.4 (2017-02-09)
+
+- Reworked contour combining logic to support overlapping contours. Original algorithm preserved in functions with `_legacy` suffix, which are invoked by the new `-legacy` switch.
+- Fixed a severe bug in cubic curve distance computation, where a control point lies at the endpoint.
+- Standalone version now automatically detects if the input has the wrong orientation and adjusts the distance field accordingly. Can be disabled by `-keeporder` or `-reverseorder` switch.
+- SVG parser fixes and improvements.
+
+## Version 1.3 (2016-12-07)
+
+- Fixed `-reverseorder` switch.
+- Fixed glyph loading to use the proper method of acquiring outlines from FreeType.
+
+## Version 1.2 (2016-07-20)
+
+- Added option to specify that shape vertices are listed in reverse order (`-reverseorder`).
+- Added option to set a seed for the edge coloring heuristic (-seed \<n\>), which can be used to adjust the output.
+- Fixed parsing of glyph contours that start with a curve control point.
+
+## Version 1.1 (2016-05-08)
+
+- Switched to MIT license due to popular demand.
+- Fixed SDF rendering anti-aliasing when the output is smaller than the distance field.
+
+## Version 1.0 (2016-04-28)
+
+- Project published.
diff --git a/thirdparty/msdfgen/LICENSE.txt b/thirdparty/msdfgen/LICENSE.txt
new file mode 100644
index 0000000000..5fb05446bc
--- /dev/null
+++ b/thirdparty/msdfgen/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 Viktor Chlumsky
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/thirdparty/msdfgen/core/Bitmap.h b/thirdparty/msdfgen/core/Bitmap.h
new file mode 100644
index 0000000000..14407d6c34
--- /dev/null
+++ b/thirdparty/msdfgen/core/Bitmap.h
@@ -0,0 +1,50 @@
+
+#pragma once
+
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// A 2D image bitmap with N channels of type T. Pixel memory is managed by the class.
+template <typename T, int N = 1>
+class Bitmap {
+
+public:
+ Bitmap();
+ Bitmap(int width, int height);
+ Bitmap(const BitmapConstRef<T, N> &orig);
+ Bitmap(const Bitmap<T, N> &orig);
+#ifdef MSDFGEN_USE_CPP11
+ Bitmap(Bitmap<T, N> &&orig);
+#endif
+ ~Bitmap();
+ Bitmap<T, N> & operator=(const BitmapConstRef<T, N> &orig);
+ Bitmap<T, N> & operator=(const Bitmap<T, N> &orig);
+#ifdef MSDFGEN_USE_CPP11
+ Bitmap<T, N> & operator=(Bitmap<T, N> &&orig);
+#endif
+ /// Bitmap width in pixels.
+ int width() const;
+ /// Bitmap height in pixels.
+ int height() const;
+ T * operator()(int x, int y);
+ const T * operator()(int x, int y) const;
+#ifdef MSDFGEN_USE_CPP11
+ explicit operator T *();
+ explicit operator const T *() const;
+#else
+ operator T *();
+ operator const T *() const;
+#endif
+ operator BitmapRef<T, N>();
+ operator BitmapConstRef<T, N>() const;
+
+private:
+ T *pixels;
+ int w, h;
+
+};
+
+}
+
+#include "Bitmap.hpp"
diff --git a/thirdparty/msdfgen/core/Bitmap.hpp b/thirdparty/msdfgen/core/Bitmap.hpp
new file mode 100644
index 0000000000..cb16cac8d4
--- /dev/null
+++ b/thirdparty/msdfgen/core/Bitmap.hpp
@@ -0,0 +1,117 @@
+
+#include "Bitmap.h"
+
+#include <cstdlib>
+#include <cstring>
+
+namespace msdfgen {
+
+template <typename T, int N>
+Bitmap<T, N>::Bitmap() : pixels(NULL), w(0), h(0) { }
+
+template <typename T, int N>
+Bitmap<T, N>::Bitmap(int width, int height) : w(width), h(height) {
+ pixels = new T[N*w*h];
+}
+
+template <typename T, int N>
+Bitmap<T, N>::Bitmap(const BitmapConstRef<T, N> &orig) : w(orig.width), h(orig.height) {
+ pixels = new T[N*w*h];
+ memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
+}
+
+template <typename T, int N>
+Bitmap<T, N>::Bitmap(const Bitmap<T, N> &orig) : w(orig.w), h(orig.h) {
+ pixels = new T[N*w*h];
+ memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
+}
+
+#ifdef MSDFGEN_USE_CPP11
+template <typename T, int N>
+Bitmap<T, N>::Bitmap(Bitmap<T, N> &&orig) : pixels(orig.pixels), w(orig.w), h(orig.h) {
+ orig.pixels = NULL;
+ orig.w = 0, orig.h = 0;
+}
+#endif
+
+template <typename T, int N>
+Bitmap<T, N>::~Bitmap() {
+ delete [] pixels;
+}
+
+template <typename T, int N>
+Bitmap<T, N> & Bitmap<T, N>::operator=(const BitmapConstRef<T, N> &orig) {
+ if (pixels != orig.pixels) {
+ delete [] pixels;
+ w = orig.width, h = orig.height;
+ pixels = new T[N*w*h];
+ memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
+ }
+ return *this;
+}
+
+template <typename T, int N>
+Bitmap<T, N> & Bitmap<T, N>::operator=(const Bitmap<T, N> &orig) {
+ if (this != &orig) {
+ delete [] pixels;
+ w = orig.w, h = orig.h;
+ pixels = new T[N*w*h];
+ memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
+ }
+ return *this;
+}
+
+#ifdef MSDFGEN_USE_CPP11
+template <typename T, int N>
+Bitmap<T, N> & Bitmap<T, N>::operator=(Bitmap<T, N> &&orig) {
+ if (this != &orig) {
+ delete [] pixels;
+ pixels = orig.pixels;
+ w = orig.w, h = orig.h;
+ orig.pixels = NULL;
+ }
+ return *this;
+}
+#endif
+
+template <typename T, int N>
+int Bitmap<T, N>::width() const {
+ return w;
+}
+
+template <typename T, int N>
+int Bitmap<T, N>::height() const {
+ return h;
+}
+
+template <typename T, int N>
+T * Bitmap<T, N>::operator()(int x, int y) {
+ return pixels+N*(w*y+x);
+}
+
+template <typename T, int N>
+const T * Bitmap<T, N>::operator()(int x, int y) const {
+ return pixels+N*(w*y+x);
+}
+
+template <typename T, int N>
+Bitmap<T, N>::operator T *() {
+ return pixels;
+}
+
+template <typename T, int N>
+Bitmap<T, N>::operator const T *() const {
+ return pixels;
+}
+
+template <typename T, int N>
+Bitmap<T, N>::operator BitmapRef<T, N>() {
+ return BitmapRef<T, N>(pixels, w, h);
+}
+
+template <typename T, int N>
+Bitmap<T, N>::operator BitmapConstRef<T, N>() const {
+ return BitmapConstRef<T, N>(pixels, w, h);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/BitmapRef.hpp b/thirdparty/msdfgen/core/BitmapRef.hpp
new file mode 100644
index 0000000000..6f9620dcdf
--- /dev/null
+++ b/thirdparty/msdfgen/core/BitmapRef.hpp
@@ -0,0 +1,43 @@
+
+#pragma once
+
+#include <cstdlib>
+
+namespace msdfgen {
+
+typedef unsigned char byte;
+
+/// Reference to a 2D image bitmap or a buffer acting as one. Pixel storage not owned or managed by the object.
+template <typename T, int N = 1>
+struct BitmapRef {
+
+ T *pixels;
+ int width, height;
+
+ inline BitmapRef() : pixels(NULL), width(0), height(0) { }
+ inline BitmapRef(T *pixels, int width, int height) : pixels(pixels), width(width), height(height) { }
+
+ inline T * operator()(int x, int y) const {
+ return pixels+N*(width*y+x);
+ }
+
+};
+
+/// Constant reference to a 2D image bitmap or a buffer acting as one. Pixel storage not owned or managed by the object.
+template <typename T, int N = 1>
+struct BitmapConstRef {
+
+ const T *pixels;
+ int width, height;
+
+ inline BitmapConstRef() : pixels(NULL), width(0), height(0) { }
+ inline BitmapConstRef(const T *pixels, int width, int height) : pixels(pixels), width(width), height(height) { }
+ inline BitmapConstRef(const BitmapRef<T, N> &orig) : pixels(orig.pixels), width(orig.width), height(orig.height) { }
+
+ inline const T * operator()(int x, int y) const {
+ return pixels+N*(width*y+x);
+ }
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/Contour.cpp b/thirdparty/msdfgen/core/Contour.cpp
new file mode 100644
index 0000000000..ca80d3c55a
--- /dev/null
+++ b/thirdparty/msdfgen/core/Contour.cpp
@@ -0,0 +1,90 @@
+
+#include "Contour.h"
+
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+static double shoelace(const Point2 &a, const Point2 &b) {
+ return (b.x-a.x)*(a.y+b.y);
+}
+
+void Contour::addEdge(const EdgeHolder &edge) {
+ edges.push_back(edge);
+}
+
+#ifdef MSDFGEN_USE_CPP11
+void Contour::addEdge(EdgeHolder &&edge) {
+ edges.push_back((EdgeHolder &&) edge);
+}
+#endif
+
+EdgeHolder & Contour::addEdge() {
+ edges.resize(edges.size()+1);
+ return edges.back();
+}
+
+static void boundPoint(double &l, double &b, double &r, double &t, Point2 p) {
+ if (p.x < l) l = p.x;
+ if (p.y < b) b = p.y;
+ if (p.x > r) r = p.x;
+ if (p.y > t) t = p.y;
+}
+
+void Contour::bound(double &l, double &b, double &r, double &t) const {
+ for (std::vector<EdgeHolder>::const_iterator edge = edges.begin(); edge != edges.end(); ++edge)
+ (*edge)->bound(l, b, r, t);
+}
+
+void Contour::boundMiters(double &l, double &b, double &r, double &t, double border, double miterLimit, int polarity) const {
+ if (edges.empty())
+ return;
+ Vector2 prevDir = edges.back()->direction(1).normalize(true);
+ for (std::vector<EdgeHolder>::const_iterator edge = edges.begin(); edge != edges.end(); ++edge) {
+ Vector2 dir = -(*edge)->direction(0).normalize(true);
+ if (polarity*crossProduct(prevDir, dir) >= 0) {
+ double miterLength = miterLimit;
+ double q = .5*(1-dotProduct(prevDir, dir));
+ if (q > 0)
+ miterLength = min(1/sqrt(q), miterLimit);
+ Point2 miter = (*edge)->point(0)+border*miterLength*(prevDir+dir).normalize(true);
+ boundPoint(l, b, r, t, miter);
+ }
+ prevDir = (*edge)->direction(1).normalize(true);
+ }
+}
+
+int Contour::winding() const {
+ if (edges.empty())
+ return 0;
+ double total = 0;
+ if (edges.size() == 1) {
+ Point2 a = edges[0]->point(0), b = edges[0]->point(1/3.), c = edges[0]->point(2/3.);
+ total += shoelace(a, b);
+ total += shoelace(b, c);
+ total += shoelace(c, a);
+ } else if (edges.size() == 2) {
+ Point2 a = edges[0]->point(0), b = edges[0]->point(.5), c = edges[1]->point(0), d = edges[1]->point(.5);
+ total += shoelace(a, b);
+ total += shoelace(b, c);
+ total += shoelace(c, d);
+ total += shoelace(d, a);
+ } else {
+ Point2 prev = edges.back()->point(0);
+ for (std::vector<EdgeHolder>::const_iterator edge = edges.begin(); edge != edges.end(); ++edge) {
+ Point2 cur = (*edge)->point(0);
+ total += shoelace(prev, cur);
+ prev = cur;
+ }
+ }
+ return sign(total);
+}
+
+void Contour::reverse() {
+ for (int i = (int) edges.size()/2; i > 0; --i)
+ EdgeHolder::swap(edges[i-1], edges[edges.size()-i]);
+ for (std::vector<EdgeHolder>::iterator edge = edges.begin(); edge != edges.end(); ++edge)
+ (*edge)->reverse();
+}
+
+}
diff --git a/thirdparty/msdfgen/core/Contour.h b/thirdparty/msdfgen/core/Contour.h
new file mode 100644
index 0000000000..f79b269582
--- /dev/null
+++ b/thirdparty/msdfgen/core/Contour.h
@@ -0,0 +1,34 @@
+
+#pragma once
+
+#include <vector>
+#include "EdgeHolder.h"
+
+namespace msdfgen {
+
+/// A single closed contour of a shape.
+class Contour {
+
+public:
+ /// The sequence of edges that make up the contour.
+ std::vector<EdgeHolder> edges;
+
+ /// Adds an edge to the contour.
+ void addEdge(const EdgeHolder &edge);
+#ifdef MSDFGEN_USE_CPP11
+ void addEdge(EdgeHolder &&edge);
+#endif
+ /// Creates a new edge in the contour and returns its reference.
+ EdgeHolder & addEdge();
+ /// Adjusts the bounding box to fit the contour.
+ void bound(double &l, double &b, double &r, double &t) const;
+ /// Adjusts the bounding box to fit the contour border's mitered corners.
+ void boundMiters(double &l, double &b, double &r, double &t, double border, double miterLimit, int polarity) const;
+ /// Computes the winding of the contour. Returns 1 if positive, -1 if negative.
+ int winding() const;
+ /// Reverses the sequence of edges on the contour.
+ void reverse();
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/EdgeColor.h b/thirdparty/msdfgen/core/EdgeColor.h
new file mode 100644
index 0000000000..9d49a5a89e
--- /dev/null
+++ b/thirdparty/msdfgen/core/EdgeColor.h
@@ -0,0 +1,18 @@
+
+#pragma once
+
+namespace msdfgen {
+
+/// Edge color specifies which color channels an edge belongs to.
+enum EdgeColor {
+ BLACK = 0,
+ RED = 1,
+ GREEN = 2,
+ YELLOW = 3,
+ BLUE = 4,
+ MAGENTA = 5,
+ CYAN = 6,
+ WHITE = 7
+};
+
+}
diff --git a/thirdparty/msdfgen/core/EdgeHolder.cpp b/thirdparty/msdfgen/core/EdgeHolder.cpp
new file mode 100644
index 0000000000..1a8c5f66e9
--- /dev/null
+++ b/thirdparty/msdfgen/core/EdgeHolder.cpp
@@ -0,0 +1,77 @@
+
+#include "EdgeHolder.h"
+
+namespace msdfgen {
+
+void EdgeHolder::swap(EdgeHolder &a, EdgeHolder &b) {
+ EdgeSegment *tmp = a.edgeSegment;
+ a.edgeSegment = b.edgeSegment;
+ b.edgeSegment = tmp;
+}
+
+EdgeHolder::EdgeHolder() : edgeSegment(NULL) { }
+
+EdgeHolder::EdgeHolder(EdgeSegment *segment) : edgeSegment(segment) { }
+
+EdgeHolder::EdgeHolder(Point2 p0, Point2 p1, EdgeColor edgeColor) : edgeSegment(new LinearSegment(p0, p1, edgeColor)) { }
+
+EdgeHolder::EdgeHolder(Point2 p0, Point2 p1, Point2 p2, EdgeColor edgeColor) : edgeSegment(new QuadraticSegment(p0, p1, p2, edgeColor)) { }
+
+EdgeHolder::EdgeHolder(Point2 p0, Point2 p1, Point2 p2, Point2 p3, EdgeColor edgeColor) : edgeSegment(new CubicSegment(p0, p1, p2, p3, edgeColor)) { }
+
+EdgeHolder::EdgeHolder(const EdgeHolder &orig) : edgeSegment(orig.edgeSegment ? orig.edgeSegment->clone() : NULL) { }
+
+#ifdef MSDFGEN_USE_CPP11
+EdgeHolder::EdgeHolder(EdgeHolder &&orig) : edgeSegment(orig.edgeSegment) {
+ orig.edgeSegment = NULL;
+}
+#endif
+
+EdgeHolder::~EdgeHolder() {
+ delete edgeSegment;
+}
+
+EdgeHolder & EdgeHolder::operator=(const EdgeHolder &orig) {
+ if (this != &orig) {
+ delete edgeSegment;
+ edgeSegment = orig.edgeSegment ? orig.edgeSegment->clone() : NULL;
+ }
+ return *this;
+}
+
+#ifdef MSDFGEN_USE_CPP11
+EdgeHolder & EdgeHolder::operator=(EdgeHolder &&orig) {
+ if (this != &orig) {
+ delete edgeSegment;
+ edgeSegment = orig.edgeSegment;
+ orig.edgeSegment = NULL;
+ }
+ return *this;
+}
+#endif
+
+EdgeSegment & EdgeHolder::operator*() {
+ return *edgeSegment;
+}
+
+const EdgeSegment & EdgeHolder::operator*() const {
+ return *edgeSegment;
+}
+
+EdgeSegment * EdgeHolder::operator->() {
+ return edgeSegment;
+}
+
+const EdgeSegment * EdgeHolder::operator->() const {
+ return edgeSegment;
+}
+
+EdgeHolder::operator EdgeSegment *() {
+ return edgeSegment;
+}
+
+EdgeHolder::operator const EdgeSegment *() const {
+ return edgeSegment;
+}
+
+}
diff --git a/thirdparty/msdfgen/core/EdgeHolder.h b/thirdparty/msdfgen/core/EdgeHolder.h
new file mode 100644
index 0000000000..c4c5be7616
--- /dev/null
+++ b/thirdparty/msdfgen/core/EdgeHolder.h
@@ -0,0 +1,41 @@
+
+#pragma once
+
+#include "edge-segments.h"
+
+namespace msdfgen {
+
+/// Container for a single edge of dynamic type.
+class EdgeHolder {
+
+public:
+ /// Swaps the edges held by a and b.
+ static void swap(EdgeHolder &a, EdgeHolder &b);
+
+ EdgeHolder();
+ EdgeHolder(EdgeSegment *segment);
+ EdgeHolder(Point2 p0, Point2 p1, EdgeColor edgeColor = WHITE);
+ EdgeHolder(Point2 p0, Point2 p1, Point2 p2, EdgeColor edgeColor = WHITE);
+ EdgeHolder(Point2 p0, Point2 p1, Point2 p2, Point2 p3, EdgeColor edgeColor = WHITE);
+ EdgeHolder(const EdgeHolder &orig);
+#ifdef MSDFGEN_USE_CPP11
+ EdgeHolder(EdgeHolder &&orig);
+#endif
+ ~EdgeHolder();
+ EdgeHolder & operator=(const EdgeHolder &orig);
+#ifdef MSDFGEN_USE_CPP11
+ EdgeHolder & operator=(EdgeHolder &&orig);
+#endif
+ EdgeSegment & operator*();
+ const EdgeSegment & operator*() const;
+ EdgeSegment * operator->();
+ const EdgeSegment * operator->() const;
+ operator EdgeSegment *();
+ operator const EdgeSegment *() const;
+
+private:
+ EdgeSegment *edgeSegment;
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/MSDFErrorCorrection.cpp b/thirdparty/msdfgen/core/MSDFErrorCorrection.cpp
new file mode 100644
index 0000000000..7918597fd2
--- /dev/null
+++ b/thirdparty/msdfgen/core/MSDFErrorCorrection.cpp
@@ -0,0 +1,495 @@
+
+#include "MSDFErrorCorrection.h"
+
+#include <cstring>
+#include "arithmetics.hpp"
+#include "equation-solver.h"
+#include "EdgeColor.h"
+#include "bitmap-interpolation.hpp"
+#include "edge-selectors.h"
+#include "contour-combiners.h"
+#include "ShapeDistanceFinder.h"
+#include "generator-config.h"
+
+namespace msdfgen {
+
+#define ARTIFACT_T_EPSILON .01
+#define PROTECTION_RADIUS_TOLERANCE 1.001
+
+#define CLASSIFIER_FLAG_CANDIDATE 0x01
+#define CLASSIFIER_FLAG_ARTIFACT 0x02
+
+const double ErrorCorrectionConfig::defaultMinDeviationRatio = 1.11111111111111111;
+const double ErrorCorrectionConfig::defaultMinImproveRatio = 1.11111111111111111;
+
+/// The base artifact classifier recognizes artifacts based on the contents of the SDF alone.
+class BaseArtifactClassifier {
+public:
+ inline BaseArtifactClassifier(double span, bool protectedFlag) : span(span), protectedFlag(protectedFlag) { }
+ /// Evaluates if the median value xm interpolated at xt in the range between am at at and bm at bt indicates an artifact.
+ inline int rangeTest(double at, double bt, double xt, float am, float bm, float xm) const {
+ // For protected texels, only consider inversion artifacts (interpolated median has different sign than boundaries). For the rest, it is sufficient that the interpolated median is outside its boundaries.
+ if ((am > .5f && bm > .5f && xm <= .5f) || (am < .5f && bm < .5f && xm >= .5f) || (!protectedFlag && median(am, bm, xm) != xm)) {
+ double axSpan = (xt-at)*span, bxSpan = (bt-xt)*span;
+ // Check if the interpolated median's value is in the expected range based on its distance (span) from boundaries a, b.
+ if (!(xm >= am-axSpan && xm <= am+axSpan && xm >= bm-bxSpan && xm <= bm+bxSpan))
+ return CLASSIFIER_FLAG_CANDIDATE|CLASSIFIER_FLAG_ARTIFACT;
+ return CLASSIFIER_FLAG_CANDIDATE;
+ }
+ return 0;
+ }
+ /// Returns true if the combined results of the tests performed on the median value m interpolated at t indicate an artifact.
+ inline bool evaluate(double t, float m, int flags) const {
+ return (flags&2) != 0;
+ }
+private:
+ double span;
+ bool protectedFlag;
+};
+
+/// The shape distance checker evaluates the exact shape distance to find additional artifacts at a significant performance cost.
+template <template <typename> class ContourCombiner, int N>
+class ShapeDistanceChecker {
+public:
+ class ArtifactClassifier : public BaseArtifactClassifier {
+ public:
+ inline ArtifactClassifier(ShapeDistanceChecker *parent, const Vector2 &direction, double span) : BaseArtifactClassifier(span, parent->protectedFlag), parent(parent), direction(direction) { }
+ /// Returns true if the combined results of the tests performed on the median value m interpolated at t indicate an artifact.
+ inline bool evaluate(double t, float m, int flags) const {
+ if (flags&CLASSIFIER_FLAG_CANDIDATE) {
+ // Skip expensive distance evaluation if the point has already been classified as an artifact by the base classifier.
+ if (flags&CLASSIFIER_FLAG_ARTIFACT)
+ return true;
+ Vector2 tVector = t*direction;
+ float oldMSD[N], newMSD[3];
+ // Compute the color that would be currently interpolated at the artifact candidate's position.
+ Point2 sdfCoord = parent->sdfCoord+tVector;
+ interpolate(oldMSD, parent->sdf, sdfCoord);
+ // Compute the color that would be interpolated at the artifact candidate's position if error correction was applied on the current texel.
+ double aWeight = (1-fabs(tVector.x))*(1-fabs(tVector.y));
+ float aPSD = median(parent->msd[0], parent->msd[1], parent->msd[2]);
+ newMSD[0] = float(oldMSD[0]+aWeight*(aPSD-parent->msd[0]));
+ newMSD[1] = float(oldMSD[1]+aWeight*(aPSD-parent->msd[1]));
+ newMSD[2] = float(oldMSD[2]+aWeight*(aPSD-parent->msd[2]));
+ // Compute the evaluated distance (interpolated median) before and after error correction, as well as the exact shape distance.
+ float oldPSD = median(oldMSD[0], oldMSD[1], oldMSD[2]);
+ float newPSD = median(newMSD[0], newMSD[1], newMSD[2]);
+ float refPSD = float(parent->invRange*parent->distanceFinder.distance(parent->shapeCoord+tVector*parent->texelSize)+.5);
+ // Compare the differences of the exact distance and the before and after distances.
+ return parent->minImproveRatio*fabsf(newPSD-refPSD) < double(fabsf(oldPSD-refPSD));
+ }
+ return false;
+ }
+ private:
+ ShapeDistanceChecker *parent;
+ Vector2 direction;
+ };
+ Point2 shapeCoord, sdfCoord;
+ const float *msd;
+ bool protectedFlag;
+ inline ShapeDistanceChecker(const BitmapConstRef<float, N> &sdf, const Shape &shape, const Projection &projection, double invRange, double minImproveRatio) : distanceFinder(shape), sdf(sdf), invRange(invRange), minImproveRatio(minImproveRatio) {
+ texelSize = projection.unprojectVector(Vector2(1));
+ }
+ inline ArtifactClassifier classifier(const Vector2 &direction, double span) {
+ return ArtifactClassifier(this, direction, span);
+ }
+private:
+ ShapeDistanceFinder<ContourCombiner<PseudoDistanceSelector> > distanceFinder;
+ BitmapConstRef<float, N> sdf;
+ double invRange;
+ Vector2 texelSize;
+ double minImproveRatio;
+};
+
+MSDFErrorCorrection::MSDFErrorCorrection() { }
+
+MSDFErrorCorrection::MSDFErrorCorrection(const BitmapRef<byte, 1> &stencil, const Projection &projection, double range) : stencil(stencil), projection(projection) {
+ invRange = 1/range;
+ minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio;
+ minImproveRatio = ErrorCorrectionConfig::defaultMinImproveRatio;
+ memset(stencil.pixels, 0, sizeof(byte)*stencil.width*stencil.height);
+}
+
+void MSDFErrorCorrection::setMinDeviationRatio(double minDeviationRatio) {
+ this->minDeviationRatio = minDeviationRatio;
+}
+
+void MSDFErrorCorrection::setMinImproveRatio(double minImproveRatio) {
+ this->minImproveRatio = minImproveRatio;
+}
+
+void MSDFErrorCorrection::protectCorners(const Shape &shape) {
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ if (!contour->edges.empty()) {
+ const EdgeSegment *prevEdge = contour->edges.back();
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ int commonColor = prevEdge->color&(*edge)->color;
+ // If the color changes from prevEdge to edge, this is a corner.
+ if (!(commonColor&(commonColor-1))) {
+ // Find the four texels that envelop the corner and mark them as protected.
+ Point2 p = projection.project((*edge)->point(0));
+ if (shape.inverseYAxis)
+ p.y = stencil.height-p.y;
+ int l = (int) floor(p.x-.5);
+ int b = (int) floor(p.y-.5);
+ int r = l+1;
+ int t = b+1;
+ // Check that the positions are within bounds.
+ if (l < stencil.width && b < stencil.height && r >= 0 && t >= 0) {
+ if (l >= 0 && b >= 0)
+ *stencil(l, b) |= (byte) PROTECTED;
+ if (r < stencil.width && b >= 0)
+ *stencil(r, b) |= (byte) PROTECTED;
+ if (l >= 0 && t < stencil.height)
+ *stencil(l, t) |= (byte) PROTECTED;
+ if (r < stencil.width && t < stencil.height)
+ *stencil(r, t) |= (byte) PROTECTED;
+ }
+ }
+ prevEdge = *edge;
+ }
+ }
+}
+
+/// Determines if the channel contributes to an edge between the two texels a, b.
+static bool edgeBetweenTexelsChannel(const float *a, const float *b, int channel) {
+ // Find interpolation ratio t (0 < t < 1) where an edge is expected (mix(a[channel], b[channel], t) == 0.5).
+ double t = (a[channel]-.5)/(a[channel]-b[channel]);
+ if (t > 0 && t < 1) {
+ // Interpolate channel values at t.
+ float c[3] = {
+ mix(a[0], b[0], t),
+ mix(a[1], b[1], t),
+ mix(a[2], b[2], t)
+ };
+ // This is only an edge if the zero-distance channel is the median.
+ return median(c[0], c[1], c[2]) == c[channel];
+ }
+ return false;
+}
+
+/// Returns a bit mask of which channels contribute to an edge between the two texels a, b.
+static int edgeBetweenTexels(const float *a, const float *b) {
+ return (
+ RED*edgeBetweenTexelsChannel(a, b, 0)+
+ GREEN*edgeBetweenTexelsChannel(a, b, 1)+
+ BLUE*edgeBetweenTexelsChannel(a, b, 2)
+ );
+}
+
+/// Marks texel as protected if one of its non-median channels is present in the channel mask.
+static void protectExtremeChannels(byte *stencil, const float *msd, float m, int mask) {
+ if (
+ (mask&RED && msd[0] != m) ||
+ (mask&GREEN && msd[1] != m) ||
+ (mask&BLUE && msd[2] != m)
+ )
+ *stencil |= (byte) MSDFErrorCorrection::PROTECTED;
+}
+
+template <int N>
+void MSDFErrorCorrection::protectEdges(const BitmapConstRef<float, N> &sdf) {
+ float radius;
+ // Horizontal texel pairs
+ radius = float(PROTECTION_RADIUS_TOLERANCE*projection.unprojectVector(Vector2(invRange, 0)).length());
+ for (int y = 0; y < sdf.height; ++y) {
+ const float *left = sdf(0, y);
+ const float *right = sdf(1, y);
+ for (int x = 0; x < sdf.width-1; ++x) {
+ float lm = median(left[0], left[1], left[2]);
+ float rm = median(right[0], right[1], right[2]);
+ if (fabsf(lm-.5f)+fabsf(rm-.5f) < radius) {
+ int mask = edgeBetweenTexels(left, right);
+ protectExtremeChannels(stencil(x, y), left, lm, mask);
+ protectExtremeChannels(stencil(x+1, y), right, rm, mask);
+ }
+ left += N, right += N;
+ }
+ }
+ // Vertical texel pairs
+ radius = float(PROTECTION_RADIUS_TOLERANCE*projection.unprojectVector(Vector2(0, invRange)).length());
+ for (int y = 0; y < sdf.height-1; ++y) {
+ const float *bottom = sdf(0, y);
+ const float *top = sdf(0, y+1);
+ for (int x = 0; x < sdf.width; ++x) {
+ float bm = median(bottom[0], bottom[1], bottom[2]);
+ float tm = median(top[0], top[1], top[2]);
+ if (fabsf(bm-.5f)+fabsf(tm-.5f) < radius) {
+ int mask = edgeBetweenTexels(bottom, top);
+ protectExtremeChannels(stencil(x, y), bottom, bm, mask);
+ protectExtremeChannels(stencil(x, y+1), top, tm, mask);
+ }
+ bottom += N, top += N;
+ }
+ }
+ // Diagonal texel pairs
+ radius = float(PROTECTION_RADIUS_TOLERANCE*projection.unprojectVector(Vector2(invRange)).length());
+ for (int y = 0; y < sdf.height-1; ++y) {
+ const float *lb = sdf(0, y);
+ const float *rb = sdf(1, y);
+ const float *lt = sdf(0, y+1);
+ const float *rt = sdf(1, y+1);
+ for (int x = 0; x < sdf.width-1; ++x) {
+ float mlb = median(lb[0], lb[1], lb[2]);
+ float mrb = median(rb[0], rb[1], rb[2]);
+ float mlt = median(lt[0], lt[1], lt[2]);
+ float mrt = median(rt[0], rt[1], rt[2]);
+ if (fabsf(mlb-.5f)+fabsf(mrt-.5f) < radius) {
+ int mask = edgeBetweenTexels(lb, rt);
+ protectExtremeChannels(stencil(x, y), lb, mlb, mask);
+ protectExtremeChannels(stencil(x+1, y+1), rt, mrt, mask);
+ }
+ if (fabsf(mrb-.5f)+fabsf(mlt-.5f) < radius) {
+ int mask = edgeBetweenTexels(rb, lt);
+ protectExtremeChannels(stencil(x+1, y), rb, mrb, mask);
+ protectExtremeChannels(stencil(x, y+1), lt, mlt, mask);
+ }
+ lb += N, rb += N, lt += N, rt += N;
+ }
+ }
+}
+
+void MSDFErrorCorrection::protectAll() {
+ byte *end = stencil.pixels+stencil.width*stencil.height;
+ for (byte *mask = stencil.pixels; mask < end; ++mask)
+ *mask |= (byte) PROTECTED;
+}
+
+/// Returns the median of the linear interpolation of texels a, b at t.
+static float interpolatedMedian(const float *a, const float *b, double t) {
+ return median(
+ mix(a[0], b[0], t),
+ mix(a[1], b[1], t),
+ mix(a[2], b[2], t)
+ );
+}
+/// Returns the median of the bilinear interpolation with the given constant, linear, and quadratic terms at t.
+static float interpolatedMedian(const float *a, const float *l, const float *q, double t) {
+ return float(median(
+ t*(t*q[0]+l[0])+a[0],
+ t*(t*q[1]+l[1])+a[1],
+ t*(t*q[2]+l[2])+a[2]
+ ));
+}
+
+/// Determines if the interpolated median xm is an artifact.
+static bool isArtifact(bool isProtected, double axSpan, double bxSpan, float am, float bm, float xm) {
+ return (
+ // For protected texels, only report an artifact if it would cause fill inversion (change between positive and negative distance).
+ (!isProtected || (am > .5f && bm > .5f && xm <= .5f) || (am < .5f && bm < .5f && xm >= .5f)) &&
+ // This is an artifact if the interpolated median is outside the range of possible values based on its distance from a, b.
+ !(xm >= am-axSpan && xm <= am+axSpan && xm >= bm-bxSpan && xm <= bm+bxSpan)
+ );
+}
+
+/// Checks if a linear interpolation artifact will occur at a point where two specific color channels are equal - such points have extreme median values.
+template <class ArtifactClassifier>
+static bool hasLinearArtifactInner(const ArtifactClassifier &artifactClassifier, float am, float bm, const float *a, const float *b, float dA, float dB) {
+ // Find interpolation ratio t (0 < t < 1) where two color channels are equal (mix(dA, dB, t) == 0).
+ double t = (double) dA/(dA-dB);
+ if (t > ARTIFACT_T_EPSILON && t < 1-ARTIFACT_T_EPSILON) {
+ // Interpolate median at t and let the classifier decide if its value indicates an artifact.
+ float xm = interpolatedMedian(a, b, t);
+ return artifactClassifier.evaluate(t, xm, artifactClassifier.rangeTest(0, 1, t, am, bm, xm));
+ }
+ return false;
+}
+
+/// Checks if a bilinear interpolation artifact will occur at a point where two specific color channels are equal - such points have extreme median values.
+template <class ArtifactClassifier>
+static bool hasDiagonalArtifactInner(const ArtifactClassifier &artifactClassifier, float am, float dm, const float *a, const float *l, const float *q, float dA, float dBC, float dD, double tEx0, double tEx1) {
+ // Find interpolation ratios t (0 < t[i] < 1) where two color channels are equal.
+ double t[2];
+ int solutions = solveQuadratic(t, dD-dBC+dA, dBC-dA-dA, dA);
+ for (int i = 0; i < solutions; ++i) {
+ // Solutions t[i] == 0 and t[i] == 1 are singularities and occur very often because two channels are usually equal at texels.
+ if (t[i] > ARTIFACT_T_EPSILON && t[i] < 1-ARTIFACT_T_EPSILON) {
+ // Interpolate median xm at t.
+ float xm = interpolatedMedian(a, l, q, t[i]);
+ // Determine if xm deviates too much from medians of a, d.
+ int rangeFlags = artifactClassifier.rangeTest(0, 1, t[i], am, dm, xm);
+ // Additionally, check xm against the interpolated medians at the local extremes tEx0, tEx1.
+ double tEnd[2];
+ float em[2];
+ // tEx0
+ if (tEx0 > 0 && tEx0 < 1) {
+ tEnd[0] = 0, tEnd[1] = 1;
+ em[0] = am, em[1] = dm;
+ tEnd[tEx0 > t[i]] = tEx0;
+ em[tEx0 > t[i]] = interpolatedMedian(a, l, q, tEx0);
+ rangeFlags |= artifactClassifier.rangeTest(tEnd[0], tEnd[1], t[i], am, dm, xm);
+ }
+ // tEx1
+ if (tEx1 > 0 && tEx1 < 1) {
+ tEnd[0] = 0, tEnd[1] = 1;
+ em[0] = am, em[1] = dm;
+ tEnd[tEx1 > t[i]] = tEx1;
+ em[tEx1 > t[i]] = interpolatedMedian(a, l, q, tEx1);
+ rangeFlags |= artifactClassifier.rangeTest(tEnd[0], tEnd[1], t[i], am, dm, xm);
+ }
+ if (artifactClassifier.evaluate(t[i], xm, rangeFlags))
+ return true;
+ }
+ }
+ return false;
+}
+
+/// Checks if a linear interpolation artifact will occur inbetween two horizontally or vertically adjacent texels a, b.
+template <class ArtifactClassifier>
+static bool hasLinearArtifact(const ArtifactClassifier &artifactClassifier, float am, const float *a, const float *b) {
+ float bm = median(b[0], b[1], b[2]);
+ return (
+ // Out of the pair, only report artifacts for the texel further from the edge to minimize side effects.
+ fabsf(am-.5f) >= fabsf(bm-.5f) && (
+ // Check points where each pair of color channels meets.
+ hasLinearArtifactInner(artifactClassifier, am, bm, a, b, a[1]-a[0], b[1]-b[0]) ||
+ hasLinearArtifactInner(artifactClassifier, am, bm, a, b, a[2]-a[1], b[2]-b[1]) ||
+ hasLinearArtifactInner(artifactClassifier, am, bm, a, b, a[0]-a[2], b[0]-b[2])
+ )
+ );
+}
+
+/// Checks if a bilinear interpolation artifact will occur inbetween two diagonally adjacent texels a, d (with b, c forming the other diagonal).
+template <class ArtifactClassifier>
+static bool hasDiagonalArtifact(const ArtifactClassifier &artifactClassifier, float am, const float *a, const float *b, const float *c, const float *d) {
+ float dm = median(d[0], d[1], d[2]);
+ // Out of the pair, only report artifacts for the texel further from the edge to minimize side effects.
+ if (fabsf(am-.5f) >= fabsf(dm-.5f)) {
+ float abc[3] = {
+ a[0]-b[0]-c[0],
+ a[1]-b[1]-c[1],
+ a[2]-b[2]-c[2]
+ };
+ // Compute the linear terms for bilinear interpolation.
+ float l[3] = {
+ -a[0]-abc[0],
+ -a[1]-abc[1],
+ -a[2]-abc[2]
+ };
+ // Compute the quadratic terms for bilinear interpolation.
+ float q[3] = {
+ d[0]+abc[0],
+ d[1]+abc[1],
+ d[2]+abc[2]
+ };
+ // Compute interpolation ratios tEx (0 < tEx[i] < 1) for the local extremes of each color channel (the derivative 2*q[i]*tEx[i]+l[i] == 0).
+ double tEx[3] = {
+ -.5*l[0]/q[0],
+ -.5*l[1]/q[1],
+ -.5*l[2]/q[2]
+ };
+ // Check points where each pair of color channels meets.
+ return (
+ hasDiagonalArtifactInner(artifactClassifier, am, dm, a, l, q, a[1]-a[0], b[1]-b[0]+c[1]-c[0], d[1]-d[0], tEx[0], tEx[1]) ||
+ hasDiagonalArtifactInner(artifactClassifier, am, dm, a, l, q, a[2]-a[1], b[2]-b[1]+c[2]-c[1], d[2]-d[1], tEx[1], tEx[2]) ||
+ hasDiagonalArtifactInner(artifactClassifier, am, dm, a, l, q, a[0]-a[2], b[0]-b[2]+c[0]-c[2], d[0]-d[2], tEx[2], tEx[0])
+ );
+ }
+ return false;
+}
+
+template <int N>
+void MSDFErrorCorrection::findErrors(const BitmapConstRef<float, N> &sdf) {
+ // Compute the expected deltas between values of horizontally, vertically, and diagonally adjacent texels.
+ double hSpan = minDeviationRatio*projection.unprojectVector(Vector2(invRange, 0)).length();
+ double vSpan = minDeviationRatio*projection.unprojectVector(Vector2(0, invRange)).length();
+ double dSpan = minDeviationRatio*projection.unprojectVector(Vector2(invRange)).length();
+ // Inspect all texels.
+ for (int y = 0; y < sdf.height; ++y) {
+ for (int x = 0; x < sdf.width; ++x) {
+ const float *c = sdf(x, y);
+ float cm = median(c[0], c[1], c[2]);
+ bool protectedFlag = (*stencil(x, y)&PROTECTED) != 0;
+ const float *l = NULL, *b = NULL, *r = NULL, *t = NULL;
+ // Mark current texel c with the error flag if an artifact occurs when it's interpolated with any of its 8 neighbors.
+ *stencil(x, y) |= (byte) (ERROR*(
+ (x > 0 && ((l = sdf(x-1, y)), hasLinearArtifact(BaseArtifactClassifier(hSpan, protectedFlag), cm, c, l))) ||
+ (y > 0 && ((b = sdf(x, y-1)), hasLinearArtifact(BaseArtifactClassifier(vSpan, protectedFlag), cm, c, b))) ||
+ (x < sdf.width-1 && ((r = sdf(x+1, y)), hasLinearArtifact(BaseArtifactClassifier(hSpan, protectedFlag), cm, c, r))) ||
+ (y < sdf.height-1 && ((t = sdf(x, y+1)), hasLinearArtifact(BaseArtifactClassifier(vSpan, protectedFlag), cm, c, t))) ||
+ (x > 0 && y > 0 && hasDiagonalArtifact(BaseArtifactClassifier(dSpan, protectedFlag), cm, c, l, b, sdf(x-1, y-1))) ||
+ (x < sdf.width-1 && y > 0 && hasDiagonalArtifact(BaseArtifactClassifier(dSpan, protectedFlag), cm, c, r, b, sdf(x+1, y-1))) ||
+ (x > 0 && y < sdf.height-1 && hasDiagonalArtifact(BaseArtifactClassifier(dSpan, protectedFlag), cm, c, l, t, sdf(x-1, y+1))) ||
+ (x < sdf.width-1 && y < sdf.height-1 && hasDiagonalArtifact(BaseArtifactClassifier(dSpan, protectedFlag), cm, c, r, t, sdf(x+1, y+1)))
+ ));
+ }
+ }
+}
+
+template <template <typename> class ContourCombiner, int N>
+void MSDFErrorCorrection::findErrors(const BitmapConstRef<float, N> &sdf, const Shape &shape) {
+ // Compute the expected deltas between values of horizontally, vertically, and diagonally adjacent texels.
+ double hSpan = minDeviationRatio*projection.unprojectVector(Vector2(invRange, 0)).length();
+ double vSpan = minDeviationRatio*projection.unprojectVector(Vector2(0, invRange)).length();
+ double dSpan = minDeviationRatio*projection.unprojectVector(Vector2(invRange)).length();
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp parallel
+#endif
+ {
+ ShapeDistanceChecker<ContourCombiner, N> shapeDistanceChecker(sdf, shape, projection, invRange, minImproveRatio);
+ bool rightToLeft = false;
+ // Inspect all texels.
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp for
+#endif
+ for (int y = 0; y < sdf.height; ++y) {
+ int row = shape.inverseYAxis ? sdf.height-y-1 : y;
+ for (int col = 0; col < sdf.width; ++col) {
+ int x = rightToLeft ? sdf.width-col-1 : col;
+ if ((*stencil(x, row)&ERROR))
+ continue;
+ const float *c = sdf(x, row);
+ shapeDistanceChecker.shapeCoord = projection.unproject(Point2(x+.5, y+.5));
+ shapeDistanceChecker.sdfCoord = Point2(x+.5, row+.5);
+ shapeDistanceChecker.msd = c;
+ shapeDistanceChecker.protectedFlag = (*stencil(x, row)&PROTECTED) != 0;
+ float cm = median(c[0], c[1], c[2]);
+ const float *l = NULL, *b = NULL, *r = NULL, *t = NULL;
+ // Mark current texel c with the error flag if an artifact occurs when it's interpolated with any of its 8 neighbors.
+ *stencil(x, row) |= (byte) (ERROR*(
+ (x > 0 && ((l = sdf(x-1, row)), hasLinearArtifact(shapeDistanceChecker.classifier(Vector2(-1, 0), hSpan), cm, c, l))) ||
+ (row > 0 && ((b = sdf(x, row-1)), hasLinearArtifact(shapeDistanceChecker.classifier(Vector2(0, -1), vSpan), cm, c, b))) ||
+ (x < sdf.width-1 && ((r = sdf(x+1, row)), hasLinearArtifact(shapeDistanceChecker.classifier(Vector2(+1, 0), hSpan), cm, c, r))) ||
+ (row < sdf.height-1 && ((t = sdf(x, row+1)), hasLinearArtifact(shapeDistanceChecker.classifier(Vector2(0, +1), vSpan), cm, c, t))) ||
+ (x > 0 && row > 0 && hasDiagonalArtifact(shapeDistanceChecker.classifier(Vector2(-1, -1), dSpan), cm, c, l, b, sdf(x-1, row-1))) ||
+ (x < sdf.width-1 && row > 0 && hasDiagonalArtifact(shapeDistanceChecker.classifier(Vector2(+1, -1), dSpan), cm, c, r, b, sdf(x+1, row-1))) ||
+ (x > 0 && row < sdf.height-1 && hasDiagonalArtifact(shapeDistanceChecker.classifier(Vector2(-1, +1), dSpan), cm, c, l, t, sdf(x-1, row+1))) ||
+ (x < sdf.width-1 && row < sdf.height-1 && hasDiagonalArtifact(shapeDistanceChecker.classifier(Vector2(+1, +1), dSpan), cm, c, r, t, sdf(x+1, row+1)))
+ ));
+ }
+ }
+ }
+}
+
+template <int N>
+void MSDFErrorCorrection::apply(const BitmapRef<float, N> &sdf) const {
+ int texelCount = sdf.width*sdf.height;
+ const byte *mask = stencil.pixels;
+ float *texel = sdf.pixels;
+ for (int i = 0; i < texelCount; ++i) {
+ if (*mask&ERROR) {
+ // Set all color channels to the median.
+ float m = median(texel[0], texel[1], texel[2]);
+ texel[0] = m, texel[1] = m, texel[2] = m;
+ }
+ ++mask;
+ texel += N;
+ }
+}
+
+BitmapConstRef<byte, 1> MSDFErrorCorrection::getStencil() const {
+ return stencil;
+}
+
+template void MSDFErrorCorrection::protectEdges(const BitmapConstRef<float, 3> &sdf);
+template void MSDFErrorCorrection::protectEdges(const BitmapConstRef<float, 4> &sdf);
+template void MSDFErrorCorrection::findErrors(const BitmapConstRef<float, 3> &sdf);
+template void MSDFErrorCorrection::findErrors(const BitmapConstRef<float, 4> &sdf);
+template void MSDFErrorCorrection::findErrors<SimpleContourCombiner>(const BitmapConstRef<float, 3> &sdf, const Shape &shape);
+template void MSDFErrorCorrection::findErrors<SimpleContourCombiner>(const BitmapConstRef<float, 4> &sdf, const Shape &shape);
+template void MSDFErrorCorrection::findErrors<OverlappingContourCombiner>(const BitmapConstRef<float, 3> &sdf, const Shape &shape);
+template void MSDFErrorCorrection::findErrors<OverlappingContourCombiner>(const BitmapConstRef<float, 4> &sdf, const Shape &shape);
+template void MSDFErrorCorrection::apply(const BitmapRef<float, 3> &sdf) const;
+template void MSDFErrorCorrection::apply(const BitmapRef<float, 4> &sdf) const;
+
+}
diff --git a/thirdparty/msdfgen/core/MSDFErrorCorrection.h b/thirdparty/msdfgen/core/MSDFErrorCorrection.h
new file mode 100644
index 0000000000..c2e92fbce7
--- /dev/null
+++ b/thirdparty/msdfgen/core/MSDFErrorCorrection.h
@@ -0,0 +1,56 @@
+
+#pragma once
+
+#include "Projection.h"
+#include "Shape.h"
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// Performs error correction on a computed MSDF to eliminate interpolation artifacts. This is a low-level class, you may want to use the API in msdf-error-correction.h instead.
+class MSDFErrorCorrection {
+
+public:
+ /// Stencil flags.
+ enum Flags {
+ /// Texel marked as potentially causing interpolation errors.
+ ERROR = 1,
+ /// Texel marked as protected. Protected texels are only given the error flag if they cause inversion artifacts.
+ PROTECTED = 2
+ };
+
+ MSDFErrorCorrection();
+ explicit MSDFErrorCorrection(const BitmapRef<byte, 1> &stencil, const Projection &projection, double range);
+ /// Sets the minimum ratio between the actual and maximum expected distance delta to be considered an error.
+ void setMinDeviationRatio(double minDeviationRatio);
+ /// Sets the minimum ratio between the pre-correction distance error and the post-correction distance error.
+ void setMinImproveRatio(double minImproveRatio);
+ /// Flags all texels that are interpolated at corners as protected.
+ void protectCorners(const Shape &shape);
+ /// Flags all texels that contribute to edges as protected.
+ template <int N>
+ void protectEdges(const BitmapConstRef<float, N> &sdf);
+ /// Flags all texels as protected.
+ void protectAll();
+ /// Flags texels that are expected to cause interpolation artifacts based on analysis of the SDF only.
+ template <int N>
+ void findErrors(const BitmapConstRef<float, N> &sdf);
+ /// Flags texels that are expected to cause interpolation artifacts based on analysis of the SDF and comparison with the exact shape distance.
+ template <template <typename> class ContourCombiner, int N>
+ void findErrors(const BitmapConstRef<float, N> &sdf, const Shape &shape);
+ /// Modifies the MSDF so that all texels with the error flag are converted to single-channel.
+ template <int N>
+ void apply(const BitmapRef<float, N> &sdf) const;
+ /// Returns the stencil in its current state (see Flags).
+ BitmapConstRef<byte, 1> getStencil() const;
+
+private:
+ BitmapRef<byte, 1> stencil;
+ Projection projection;
+ double invRange;
+ double minDeviationRatio;
+ double minImproveRatio;
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/Projection.cpp b/thirdparty/msdfgen/core/Projection.cpp
new file mode 100644
index 0000000000..fa2f5e2592
--- /dev/null
+++ b/thirdparty/msdfgen/core/Projection.cpp
@@ -0,0 +1,42 @@
+
+#include "Projection.h"
+
+namespace msdfgen {
+
+Projection::Projection() : scale(1), translate(0) { }
+
+Projection::Projection(const Vector2 &scale, const Vector2 &translate) : scale(scale), translate(translate) { }
+
+Point2 Projection::project(const Point2 &coord) const {
+ return scale*(coord+translate);
+}
+
+Point2 Projection::unproject(const Point2 &coord) const {
+ return coord/scale-translate;
+}
+
+Vector2 Projection::projectVector(const Vector2 &vector) const {
+ return scale*vector;
+}
+
+Vector2 Projection::unprojectVector(const Vector2 &vector) const {
+ return vector/scale;
+}
+
+double Projection::projectX(double x) const {
+ return scale.x*(x+translate.x);
+}
+
+double Projection::projectY(double y) const {
+ return scale.y*(y+translate.y);
+}
+
+double Projection::unprojectX(double x) const {
+ return x/scale.x-translate.x;
+}
+
+double Projection::unprojectY(double y) const {
+ return y/scale.y-translate.y;
+}
+
+}
diff --git a/thirdparty/msdfgen/core/Projection.h b/thirdparty/msdfgen/core/Projection.h
new file mode 100644
index 0000000000..7cdb1c307a
--- /dev/null
+++ b/thirdparty/msdfgen/core/Projection.h
@@ -0,0 +1,37 @@
+
+#pragma once
+
+#include "Vector2.h"
+
+namespace msdfgen {
+
+/// A transformation from shape coordinates to pixel coordinates.
+class Projection {
+
+public:
+ Projection();
+ Projection(const Vector2 &scale, const Vector2 &translate);
+ /// Converts the shape coordinate to pixel coordinate.
+ Point2 project(const Point2 &coord) const;
+ /// Converts the pixel coordinate to shape coordinate.
+ Point2 unproject(const Point2 &coord) const;
+ /// Converts the vector to pixel coordinate space.
+ Vector2 projectVector(const Vector2 &vector) const;
+ /// Converts the vector from pixel coordinate space.
+ Vector2 unprojectVector(const Vector2 &vector) const;
+ /// Converts the X-coordinate from shape to pixel coordinate space.
+ double projectX(double x) const;
+ /// Converts the Y-coordinate from shape to pixel coordinate space.
+ double projectY(double y) const;
+ /// Converts the X-coordinate from pixel to shape coordinate space.
+ double unprojectX(double x) const;
+ /// Converts the Y-coordinate from pixel to shape coordinate space.
+ double unprojectY(double y) const;
+
+private:
+ Vector2 scale;
+ Vector2 translate;
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/Scanline.cpp b/thirdparty/msdfgen/core/Scanline.cpp
new file mode 100644
index 0000000000..8e5352dbf6
--- /dev/null
+++ b/thirdparty/msdfgen/core/Scanline.cpp
@@ -0,0 +1,125 @@
+
+#include "Scanline.h"
+
+#include <algorithm>
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+static int compareIntersections(const void *a, const void *b) {
+ return sign(reinterpret_cast<const Scanline::Intersection *>(a)->x-reinterpret_cast<const Scanline::Intersection *>(b)->x);
+}
+
+bool interpretFillRule(int intersections, FillRule fillRule) {
+ switch (fillRule) {
+ case FILL_NONZERO:
+ return intersections != 0;
+ case FILL_ODD:
+ return intersections&1;
+ case FILL_POSITIVE:
+ return intersections > 0;
+ case FILL_NEGATIVE:
+ return intersections < 0;
+ }
+ return false;
+}
+
+double Scanline::overlap(const Scanline &a, const Scanline &b, double xFrom, double xTo, FillRule fillRule) {
+ double total = 0;
+ bool aInside = false, bInside = false;
+ int ai = 0, bi = 0;
+ double ax = !a.intersections.empty() ? a.intersections[ai].x : xTo;
+ double bx = !b.intersections.empty() ? b.intersections[bi].x : xTo;
+ while (ax < xFrom || bx < xFrom) {
+ double xNext = min(ax, bx);
+ if (ax == xNext && ai < (int) a.intersections.size()) {
+ aInside = interpretFillRule(a.intersections[ai].direction, fillRule);
+ ax = ++ai < (int) a.intersections.size() ? a.intersections[ai].x : xTo;
+ }
+ if (bx == xNext && bi < (int) b.intersections.size()) {
+ bInside = interpretFillRule(b.intersections[bi].direction, fillRule);
+ bx = ++bi < (int) b.intersections.size() ? b.intersections[bi].x : xTo;
+ }
+ }
+ double x = xFrom;
+ while (ax < xTo || bx < xTo) {
+ double xNext = min(ax, bx);
+ if (aInside == bInside)
+ total += xNext-x;
+ if (ax == xNext && ai < (int) a.intersections.size()) {
+ aInside = interpretFillRule(a.intersections[ai].direction, fillRule);
+ ax = ++ai < (int) a.intersections.size() ? a.intersections[ai].x : xTo;
+ }
+ if (bx == xNext && bi < (int) b.intersections.size()) {
+ bInside = interpretFillRule(b.intersections[bi].direction, fillRule);
+ bx = ++bi < (int) b.intersections.size() ? b.intersections[bi].x : xTo;
+ }
+ x = xNext;
+ }
+ if (aInside == bInside)
+ total += xTo-x;
+ return total;
+}
+
+Scanline::Scanline() : lastIndex(0) { }
+
+void Scanline::preprocess() {
+ lastIndex = 0;
+ if (!intersections.empty()) {
+ qsort(&intersections[0], intersections.size(), sizeof(Intersection), compareIntersections);
+ int totalDirection = 0;
+ for (std::vector<Intersection>::iterator intersection = intersections.begin(); intersection != intersections.end(); ++intersection) {
+ totalDirection += intersection->direction;
+ intersection->direction = totalDirection;
+ }
+ }
+}
+
+void Scanline::setIntersections(const std::vector<Intersection> &intersections) {
+ this->intersections = intersections;
+ preprocess();
+}
+
+#ifdef MSDFGEN_USE_CPP11
+void Scanline::setIntersections(std::vector<Intersection> &&intersections) {
+ this->intersections = (std::vector<Intersection> &&) intersections;
+ preprocess();
+}
+#endif
+
+int Scanline::moveTo(double x) const {
+ if (intersections.empty())
+ return -1;
+ int index = lastIndex;
+ if (x < intersections[index].x) {
+ do {
+ if (index == 0) {
+ lastIndex = 0;
+ return -1;
+ }
+ --index;
+ } while (x < intersections[index].x);
+ } else {
+ while (index < (int) intersections.size()-1 && x >= intersections[index+1].x)
+ ++index;
+ }
+ lastIndex = index;
+ return index;
+}
+
+int Scanline::countIntersections(double x) const {
+ return moveTo(x)+1;
+}
+
+int Scanline::sumIntersections(double x) const {
+ int index = moveTo(x);
+ if (index >= 0)
+ return intersections[index].direction;
+ return 0;
+}
+
+bool Scanline::filled(double x, FillRule fillRule) const {
+ return interpretFillRule(sumIntersections(x), fillRule);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/Scanline.h b/thirdparty/msdfgen/core/Scanline.h
new file mode 100644
index 0000000000..9c8f34044b
--- /dev/null
+++ b/thirdparty/msdfgen/core/Scanline.h
@@ -0,0 +1,55 @@
+
+#pragma once
+
+#include <vector>
+
+namespace msdfgen {
+
+/// Fill rule dictates how intersection total is interpreted during rasterization.
+enum FillRule {
+ FILL_NONZERO,
+ FILL_ODD, // "even-odd"
+ FILL_POSITIVE,
+ FILL_NEGATIVE
+};
+
+/// Resolves the number of intersection into a binary fill value based on fill rule.
+bool interpretFillRule(int intersections, FillRule fillRule);
+
+/// Represents a horizontal scanline intersecting a shape.
+class Scanline {
+
+public:
+ /// An intersection with the scanline.
+ struct Intersection {
+ /// X coordinate.
+ double x;
+ /// Normalized Y direction of the oriented edge at the point of intersection.
+ int direction;
+ };
+
+ static double overlap(const Scanline &a, const Scanline &b, double xFrom, double xTo, FillRule fillRule);
+
+ Scanline();
+ /// Populates the intersection list.
+ void setIntersections(const std::vector<Intersection> &intersections);
+#ifdef MSDFGEN_USE_CPP11
+ void setIntersections(std::vector<Intersection> &&intersections);
+#endif
+ /// Returns the number of intersections left of x.
+ int countIntersections(double x) const;
+ /// Returns the total sign of intersections left of x.
+ int sumIntersections(double x) const;
+ /// Decides whether the scanline is filled at x based on fill rule.
+ bool filled(double x, FillRule fillRule) const;
+
+private:
+ std::vector<Intersection> intersections;
+ mutable int lastIndex;
+
+ void preprocess();
+ int moveTo(double x) const;
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/Shape.cpp b/thirdparty/msdfgen/core/Shape.cpp
new file mode 100644
index 0000000000..8d6f47c807
--- /dev/null
+++ b/thirdparty/msdfgen/core/Shape.cpp
@@ -0,0 +1,183 @@
+
+#include "Shape.h"
+
+#include <algorithm>
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+Shape::Shape() : inverseYAxis(false) { }
+
+void Shape::addContour(const Contour &contour) {
+ contours.push_back(contour);
+}
+
+#ifdef MSDFGEN_USE_CPP11
+void Shape::addContour(Contour &&contour) {
+ contours.push_back((Contour &&) contour);
+}
+#endif
+
+Contour & Shape::addContour() {
+ contours.resize(contours.size()+1);
+ return contours.back();
+}
+
+bool Shape::validate() const {
+ for (std::vector<Contour>::const_iterator contour = contours.begin(); contour != contours.end(); ++contour) {
+ if (!contour->edges.empty()) {
+ Point2 corner = contour->edges.back()->point(1);
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ if (!*edge)
+ return false;
+ if ((*edge)->point(0) != corner)
+ return false;
+ corner = (*edge)->point(1);
+ }
+ }
+ }
+ return true;
+}
+
+static void deconvergeEdge(EdgeHolder &edgeHolder, int param) {
+ {
+ const QuadraticSegment *quadraticSegment = dynamic_cast<const QuadraticSegment *>(&*edgeHolder);
+ if (quadraticSegment)
+ edgeHolder = quadraticSegment->convertToCubic();
+ }
+ {
+ CubicSegment *cubicSegment = dynamic_cast<CubicSegment *>(&*edgeHolder);
+ if (cubicSegment)
+ cubicSegment->deconverge(param, MSDFGEN_DECONVERGENCE_FACTOR);
+ }
+}
+
+void Shape::normalize() {
+ for (std::vector<Contour>::iterator contour = contours.begin(); contour != contours.end(); ++contour) {
+ if (contour->edges.size() == 1) {
+ EdgeSegment *parts[3] = { };
+ contour->edges[0]->splitInThirds(parts[0], parts[1], parts[2]);
+ contour->edges.clear();
+ contour->edges.push_back(EdgeHolder(parts[0]));
+ contour->edges.push_back(EdgeHolder(parts[1]));
+ contour->edges.push_back(EdgeHolder(parts[2]));
+ } else {
+ EdgeHolder *prevEdge = &contour->edges.back();
+ for (std::vector<EdgeHolder>::iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ Vector2 prevDir = (*prevEdge)->direction(1).normalize();
+ Vector2 curDir = (*edge)->direction(0).normalize();
+ if (dotProduct(prevDir, curDir) < MSDFGEN_CORNER_DOT_EPSILON-1) {
+ deconvergeEdge(*prevEdge, 1);
+ deconvergeEdge(*edge, 0);
+ }
+ prevEdge = &*edge;
+ }
+ }
+ }
+}
+
+void Shape::bound(double &l, double &b, double &r, double &t) const {
+ for (std::vector<Contour>::const_iterator contour = contours.begin(); contour != contours.end(); ++contour)
+ contour->bound(l, b, r, t);
+}
+
+void Shape::boundMiters(double &l, double &b, double &r, double &t, double border, double miterLimit, int polarity) const {
+ for (std::vector<Contour>::const_iterator contour = contours.begin(); contour != contours.end(); ++contour)
+ contour->boundMiters(l, b, r, t, border, miterLimit, polarity);
+}
+
+Shape::Bounds Shape::getBounds(double border, double miterLimit, int polarity) const {
+ static const double LARGE_VALUE = 1e240;
+ Shape::Bounds bounds = { +LARGE_VALUE, +LARGE_VALUE, -LARGE_VALUE, -LARGE_VALUE };
+ bound(bounds.l, bounds.b, bounds.r, bounds.t);
+ if (border > 0) {
+ bounds.l -= border, bounds.b -= border;
+ bounds.r += border, bounds.t += border;
+ if (miterLimit > 0)
+ boundMiters(bounds.l, bounds.b, bounds.r, bounds.t, border, miterLimit, polarity);
+ }
+ return bounds;
+}
+
+void Shape::scanline(Scanline &line, double y) const {
+ std::vector<Scanline::Intersection> intersections;
+ double x[3];
+ int dy[3];
+ for (std::vector<Contour>::const_iterator contour = contours.begin(); contour != contours.end(); ++contour) {
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ int n = (*edge)->scanlineIntersections(x, dy, y);
+ for (int i = 0; i < n; ++i) {
+ Scanline::Intersection intersection = { x[i], dy[i] };
+ intersections.push_back(intersection);
+ }
+ }
+ }
+#ifdef MSDFGEN_USE_CPP11
+ line.setIntersections((std::vector<Scanline::Intersection> &&) intersections);
+#else
+ line.setIntersections(intersections);
+#endif
+}
+
+int Shape::edgeCount() const {
+ int total = 0;
+ for (std::vector<Contour>::const_iterator contour = contours.begin(); contour != contours.end(); ++contour)
+ total += (int) contour->edges.size();
+ return total;
+}
+
+void Shape::orientContours() {
+ struct Intersection {
+ double x;
+ int direction;
+ int contourIndex;
+
+ static int compare(const void *a, const void *b) {
+ return sign(reinterpret_cast<const Intersection *>(a)->x-reinterpret_cast<const Intersection *>(b)->x);
+ }
+ };
+
+ const double ratio = .5*(sqrt(5)-1); // an irrational number to minimize chance of intersecting a corner or other point of interest
+ std::vector<int> orientations(contours.size());
+ std::vector<Intersection> intersections;
+ for (int i = 0; i < (int) contours.size(); ++i) {
+ if (!orientations[i] && !contours[i].edges.empty()) {
+ // Find an Y that crosses the contour
+ double y0 = contours[i].edges.front()->point(0).y;
+ double y1 = y0;
+ for (std::vector<EdgeHolder>::const_iterator edge = contours[i].edges.begin(); edge != contours[i].edges.end() && y0 == y1; ++edge)
+ y1 = (*edge)->point(1).y;
+ for (std::vector<EdgeHolder>::const_iterator edge = contours[i].edges.begin(); edge != contours[i].edges.end() && y0 == y1; ++edge)
+ y1 = (*edge)->point(ratio).y; // in case all endpoints are in a horizontal line
+ double y = mix(y0, y1, ratio);
+ // Scanline through whole shape at Y
+ double x[3];
+ int dy[3];
+ for (int j = 0; j < (int) contours.size(); ++j) {
+ for (std::vector<EdgeHolder>::const_iterator edge = contours[j].edges.begin(); edge != contours[j].edges.end(); ++edge) {
+ int n = (*edge)->scanlineIntersections(x, dy, y);
+ for (int k = 0; k < n; ++k) {
+ Intersection intersection = { x[k], dy[k], j };
+ intersections.push_back(intersection);
+ }
+ }
+ }
+ qsort(&intersections[0], intersections.size(), sizeof(Intersection), &Intersection::compare);
+ // Disqualify multiple intersections
+ for (int j = 1; j < (int) intersections.size(); ++j)
+ if (intersections[j].x == intersections[j-1].x)
+ intersections[j].direction = intersections[j-1].direction = 0;
+ // Inspect scanline and deduce orientations of intersected contours
+ for (int j = 0; j < (int) intersections.size(); ++j)
+ if (intersections[j].direction)
+ orientations[intersections[j].contourIndex] += 2*((j&1)^(intersections[j].direction > 0))-1;
+ intersections.clear();
+ }
+ }
+ // Reverse contours that have the opposite orientation
+ for (int i = 0; i < (int) contours.size(); ++i)
+ if (orientations[i] < 0)
+ contours[i].reverse();
+}
+
+}
diff --git a/thirdparty/msdfgen/core/Shape.h b/thirdparty/msdfgen/core/Shape.h
new file mode 100644
index 0000000000..7539921ce7
--- /dev/null
+++ b/thirdparty/msdfgen/core/Shape.h
@@ -0,0 +1,55 @@
+
+#pragma once
+
+#include <vector>
+#include "Contour.h"
+#include "Scanline.h"
+
+namespace msdfgen {
+
+// Threshold of the dot product of adjacent edge directions to be considered convergent.
+#define MSDFGEN_CORNER_DOT_EPSILON .000001
+// The proportional amount by which a curve's control point will be adjusted to eliminate convergent corners.
+#define MSDFGEN_DECONVERGENCE_FACTOR .000001
+
+/// Vector shape representation.
+class Shape {
+
+public:
+ struct Bounds {
+ double l, b, r, t;
+ };
+
+ /// The list of contours the shape consists of.
+ std::vector<Contour> contours;
+ /// Specifies whether the shape uses bottom-to-top (false) or top-to-bottom (true) Y coordinates.
+ bool inverseYAxis;
+
+ Shape();
+ /// Adds a contour.
+ void addContour(const Contour &contour);
+#ifdef MSDFGEN_USE_CPP11
+ void addContour(Contour &&contour);
+#endif
+ /// Adds a blank contour and returns its reference.
+ Contour & addContour();
+ /// Normalizes the shape geometry for distance field generation.
+ void normalize();
+ /// Performs basic checks to determine if the object represents a valid shape.
+ bool validate() const;
+ /// Adjusts the bounding box to fit the shape.
+ void bound(double &l, double &b, double &r, double &t) const;
+ /// Adjusts the bounding box to fit the shape border's mitered corners.
+ void boundMiters(double &l, double &b, double &r, double &t, double border, double miterLimit, int polarity) const;
+ /// Computes the minimum bounding box that fits the shape, optionally with a (mitered) border.
+ Bounds getBounds(double border = 0, double miterLimit = 0, int polarity = 0) const;
+ /// Outputs the scanline that intersects the shape at y.
+ void scanline(Scanline &line, double y) const;
+ /// Returns the total number of edge segments
+ int edgeCount() const;
+ /// Assumes its contours are unoriented (even-odd fill rule). Attempts to orient them to conform to the non-zero winding rule.
+ void orientContours();
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/ShapeDistanceFinder.h b/thirdparty/msdfgen/core/ShapeDistanceFinder.h
new file mode 100644
index 0000000000..57df8d8e72
--- /dev/null
+++ b/thirdparty/msdfgen/core/ShapeDistanceFinder.h
@@ -0,0 +1,37 @@
+
+#pragma once
+
+#include <vector>
+#include "Vector2.h"
+#include "edge-selectors.h"
+#include "contour-combiners.h"
+
+namespace msdfgen {
+
+/// Finds the distance between a point and a Shape. ContourCombiner dictates the distance metric and its data type.
+template <class ContourCombiner>
+class ShapeDistanceFinder {
+
+public:
+ typedef typename ContourCombiner::DistanceType DistanceType;
+
+ // Passed shape object must persist until the distance finder is destroyed!
+ explicit ShapeDistanceFinder(const Shape &shape);
+ /// Finds the distance from origin. Not thread-safe! Is fastest when subsequent queries are close together.
+ DistanceType distance(const Point2 &origin);
+
+ /// Finds the distance between shape and origin. Does not allocate result cache used to optimize performance of multiple queries.
+ static DistanceType oneShotDistance(const Shape &shape, const Point2 &origin);
+
+private:
+ const Shape &shape;
+ ContourCombiner contourCombiner;
+ std::vector<typename ContourCombiner::EdgeSelectorType::EdgeCache> shapeEdgeCache;
+
+};
+
+typedef ShapeDistanceFinder<SimpleContourCombiner<TrueDistanceSelector> > SimpleTrueShapeDistanceFinder;
+
+}
+
+#include "ShapeDistanceFinder.hpp"
diff --git a/thirdparty/msdfgen/core/ShapeDistanceFinder.hpp b/thirdparty/msdfgen/core/ShapeDistanceFinder.hpp
new file mode 100644
index 0000000000..028738e5c3
--- /dev/null
+++ b/thirdparty/msdfgen/core/ShapeDistanceFinder.hpp
@@ -0,0 +1,56 @@
+
+#include "ShapeDistanceFinder.h"
+
+namespace msdfgen {
+
+template <class ContourCombiner>
+ShapeDistanceFinder<ContourCombiner>::ShapeDistanceFinder(const Shape &shape) : shape(shape), contourCombiner(shape), shapeEdgeCache(shape.edgeCount()) { }
+
+template <class ContourCombiner>
+typename ShapeDistanceFinder<ContourCombiner>::DistanceType ShapeDistanceFinder<ContourCombiner>::distance(const Point2 &origin) {
+ contourCombiner.reset(origin);
+ typename ContourCombiner::EdgeSelectorType::EdgeCache *edgeCache = &shapeEdgeCache[0];
+
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour) {
+ if (!contour->edges.empty()) {
+ typename ContourCombiner::EdgeSelectorType &edgeSelector = contourCombiner.edgeSelector(int(contour-shape.contours.begin()));
+
+ const EdgeSegment *prevEdge = contour->edges.size() >= 2 ? *(contour->edges.end()-2) : *contour->edges.begin();
+ const EdgeSegment *curEdge = contour->edges.back();
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ const EdgeSegment *nextEdge = *edge;
+ edgeSelector.addEdge(*edgeCache++, prevEdge, curEdge, nextEdge);
+ prevEdge = curEdge;
+ curEdge = nextEdge;
+ }
+ }
+ }
+
+ return contourCombiner.distance();
+}
+
+template <class ContourCombiner>
+typename ShapeDistanceFinder<ContourCombiner>::DistanceType ShapeDistanceFinder<ContourCombiner>::oneShotDistance(const Shape &shape, const Point2 &origin) {
+ ContourCombiner contourCombiner(shape);
+ contourCombiner.reset(origin);
+
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour) {
+ if (!contour->edges.empty()) {
+ typename ContourCombiner::EdgeSelectorType &edgeSelector = contourCombiner.edgeSelector(int(contour-shape.contours.begin()));
+
+ const EdgeSegment *prevEdge = contour->edges.size() >= 2 ? *(contour->edges.end()-2) : *contour->edges.begin();
+ const EdgeSegment *curEdge = contour->edges.back();
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ const EdgeSegment *nextEdge = *edge;
+ typename ContourCombiner::EdgeSelectorType::EdgeCache dummy;
+ edgeSelector.addEdge(dummy, prevEdge, curEdge, nextEdge);
+ prevEdge = curEdge;
+ curEdge = nextEdge;
+ }
+ }
+ }
+
+ return contourCombiner.distance();
+}
+
+}
diff --git a/thirdparty/msdfgen/core/SignedDistance.cpp b/thirdparty/msdfgen/core/SignedDistance.cpp
new file mode 100644
index 0000000000..18c9d2c424
--- /dev/null
+++ b/thirdparty/msdfgen/core/SignedDistance.cpp
@@ -0,0 +1,30 @@
+
+#include "SignedDistance.h"
+
+#include <cmath>
+
+namespace msdfgen {
+
+const SignedDistance SignedDistance::INFINITE(-1e240, 1);
+
+SignedDistance::SignedDistance() : distance(-1e240), dot(1) { }
+
+SignedDistance::SignedDistance(double dist, double d) : distance(dist), dot(d) { }
+
+bool operator<(SignedDistance a, SignedDistance b) {
+ return fabs(a.distance) < fabs(b.distance) || (fabs(a.distance) == fabs(b.distance) && a.dot < b.dot);
+}
+
+bool operator>(SignedDistance a, SignedDistance b) {
+ return fabs(a.distance) > fabs(b.distance) || (fabs(a.distance) == fabs(b.distance) && a.dot > b.dot);
+}
+
+bool operator<=(SignedDistance a, SignedDistance b) {
+ return fabs(a.distance) < fabs(b.distance) || (fabs(a.distance) == fabs(b.distance) && a.dot <= b.dot);
+}
+
+bool operator>=(SignedDistance a, SignedDistance b) {
+ return fabs(a.distance) > fabs(b.distance) || (fabs(a.distance) == fabs(b.distance) && a.dot >= b.dot);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/SignedDistance.h b/thirdparty/msdfgen/core/SignedDistance.h
new file mode 100644
index 0000000000..034210f751
--- /dev/null
+++ b/thirdparty/msdfgen/core/SignedDistance.h
@@ -0,0 +1,25 @@
+
+#pragma once
+
+namespace msdfgen {
+
+/// Represents a signed distance and alignment, which together can be compared to uniquely determine the closest edge segment.
+class SignedDistance {
+
+public:
+ static const SignedDistance INFINITE;
+
+ double distance;
+ double dot;
+
+ SignedDistance();
+ SignedDistance(double dist, double d);
+
+ friend bool operator<(SignedDistance a, SignedDistance b);
+ friend bool operator>(SignedDistance a, SignedDistance b);
+ friend bool operator<=(SignedDistance a, SignedDistance b);
+ friend bool operator>=(SignedDistance a, SignedDistance b);
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/Vector2.cpp b/thirdparty/msdfgen/core/Vector2.cpp
new file mode 100644
index 0000000000..896963ff2c
--- /dev/null
+++ b/thirdparty/msdfgen/core/Vector2.cpp
@@ -0,0 +1,146 @@
+
+#include "Vector2.h"
+
+namespace msdfgen {
+
+Vector2::Vector2(double val) : x(val), y(val) { }
+
+Vector2::Vector2(double x, double y) : x(x), y(y) { }
+
+void Vector2::reset() {
+ x = 0, y = 0;
+}
+
+void Vector2::set(double x, double y) {
+ Vector2::x = x, Vector2::y = y;
+}
+
+double Vector2::length() const {
+ return sqrt(x*x+y*y);
+}
+
+double Vector2::direction() const {
+ return atan2(y, x);
+}
+
+Vector2 Vector2::normalize(bool allowZero) const {
+ double len = length();
+ if (len == 0)
+ return Vector2(0, !allowZero);
+ return Vector2(x/len, y/len);
+}
+
+Vector2 Vector2::getOrthogonal(bool polarity) const {
+ return polarity ? Vector2(-y, x) : Vector2(y, -x);
+}
+
+Vector2 Vector2::getOrthonormal(bool polarity, bool allowZero) const {
+ double len = length();
+ if (len == 0)
+ return polarity ? Vector2(0, !allowZero) : Vector2(0, -!allowZero);
+ return polarity ? Vector2(-y/len, x/len) : Vector2(y/len, -x/len);
+}
+
+Vector2 Vector2::project(const Vector2 &vector, bool positive) const {
+ Vector2 n = normalize(true);
+ double t = dotProduct(vector, n);
+ if (positive && t <= 0)
+ return Vector2();
+ return t*n;
+}
+
+Vector2::operator const void*() const {
+ return x || y ? this : NULL;
+}
+
+bool Vector2::operator!() const {
+ return !x && !y;
+}
+
+bool Vector2::operator==(const Vector2 &other) const {
+ return x == other.x && y == other.y;
+}
+
+bool Vector2::operator!=(const Vector2 &other) const {
+ return x != other.x || y != other.y;
+}
+
+Vector2 Vector2::operator+() const {
+ return *this;
+}
+
+Vector2 Vector2::operator-() const {
+ return Vector2(-x, -y);
+}
+
+Vector2 Vector2::operator+(const Vector2 &other) const {
+ return Vector2(x+other.x, y+other.y);
+}
+
+Vector2 Vector2::operator-(const Vector2 &other) const {
+ return Vector2(x-other.x, y-other.y);
+}
+
+Vector2 Vector2::operator*(const Vector2 &other) const {
+ return Vector2(x*other.x, y*other.y);
+}
+
+Vector2 Vector2::operator/(const Vector2 &other) const {
+ return Vector2(x/other.x, y/other.y);
+}
+
+Vector2 Vector2::operator*(double value) const {
+ return Vector2(x*value, y*value);
+}
+
+Vector2 Vector2::operator/(double value) const {
+ return Vector2(x/value, y/value);
+}
+
+Vector2 & Vector2::operator+=(const Vector2 &other) {
+ x += other.x, y += other.y;
+ return *this;
+}
+
+Vector2 & Vector2::operator-=(const Vector2 &other) {
+ x -= other.x, y -= other.y;
+ return *this;
+}
+
+Vector2 & Vector2::operator*=(const Vector2 &other) {
+ x *= other.x, y *= other.y;
+ return *this;
+}
+
+Vector2 & Vector2::operator/=(const Vector2 &other) {
+ x /= other.x, y /= other.y;
+ return *this;
+}
+
+Vector2 & Vector2::operator*=(double value) {
+ x *= value, y *= value;
+ return *this;
+}
+
+Vector2 & Vector2::operator/=(double value) {
+ x /= value, y /= value;
+ return *this;
+}
+
+double dotProduct(const Vector2 &a, const Vector2 &b) {
+ return a.x*b.x+a.y*b.y;
+}
+
+double crossProduct(const Vector2 &a, const Vector2 &b) {
+ return a.x*b.y-a.y*b.x;
+}
+
+Vector2 operator*(double value, const Vector2 &vector) {
+ return Vector2(value*vector.x, value*vector.y);
+}
+
+Vector2 operator/(double value, const Vector2 &vector) {
+ return Vector2(value/vector.x, value/vector.y);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/Vector2.h b/thirdparty/msdfgen/core/Vector2.h
new file mode 100644
index 0000000000..47ca637c3d
--- /dev/null
+++ b/thirdparty/msdfgen/core/Vector2.h
@@ -0,0 +1,66 @@
+
+#pragma once
+
+#include <cstdlib>
+#include <cmath>
+
+namespace msdfgen {
+
+/**
+* A 2-dimensional euclidean vector with double precision.
+* Implementation based on the Vector2 template from Artery Engine.
+* @author Viktor Chlumsky
+*/
+struct Vector2 {
+
+ double x, y;
+
+ Vector2(double val = 0);
+ Vector2(double x, double y);
+ /// Sets the vector to zero.
+ void reset();
+ /// Sets individual elements of the vector.
+ void set(double x, double y);
+ /// Returns the vector's length.
+ double length() const;
+ /// Returns the angle of the vector in radians (atan2).
+ double direction() const;
+ /// Returns the normalized vector - one that has the same direction but unit length.
+ Vector2 normalize(bool allowZero = false) const;
+ /// Returns a vector with the same length that is orthogonal to this one.
+ Vector2 getOrthogonal(bool polarity = true) const;
+ /// Returns a vector with unit length that is orthogonal to this one.
+ Vector2 getOrthonormal(bool polarity = true, bool allowZero = false) const;
+ /// Returns a vector projected along this one.
+ Vector2 project(const Vector2 &vector, bool positive = false) const;
+ operator const void *() const;
+ bool operator!() const;
+ bool operator==(const Vector2 &other) const;
+ bool operator!=(const Vector2 &other) const;
+ Vector2 operator+() const;
+ Vector2 operator-() const;
+ Vector2 operator+(const Vector2 &other) const;
+ Vector2 operator-(const Vector2 &other) const;
+ Vector2 operator*(const Vector2 &other) const;
+ Vector2 operator/(const Vector2 &other) const;
+ Vector2 operator*(double value) const;
+ Vector2 operator/(double value) const;
+ Vector2 & operator+=(const Vector2 &other);
+ Vector2 & operator-=(const Vector2 &other);
+ Vector2 & operator*=(const Vector2 &other);
+ Vector2 & operator/=(const Vector2 &other);
+ Vector2 & operator*=(double value);
+ Vector2 & operator/=(double value);
+ /// Dot product of two vectors.
+ friend double dotProduct(const Vector2 &a, const Vector2 &b);
+ /// A special version of the cross product for 2D vectors (returns scalar value).
+ friend double crossProduct(const Vector2 &a, const Vector2 &b);
+ friend Vector2 operator*(double value, const Vector2 &vector);
+ friend Vector2 operator/(double value, const Vector2 &vector);
+
+};
+
+/// A vector may also represent a point, which shall be differentiated semantically using the alias Point2.
+typedef Vector2 Point2;
+
+}
diff --git a/thirdparty/msdfgen/core/arithmetics.hpp b/thirdparty/msdfgen/core/arithmetics.hpp
new file mode 100644
index 0000000000..78c21d658e
--- /dev/null
+++ b/thirdparty/msdfgen/core/arithmetics.hpp
@@ -0,0 +1,63 @@
+
+#pragma once
+
+#include <cstdlib>
+#include <cmath>
+
+namespace msdfgen {
+
+/// Returns the smaller of the arguments.
+template <typename T>
+inline T min(T a, T b) {
+ return b < a ? b : a;
+}
+
+/// Returns the larger of the arguments.
+template <typename T>
+inline T max(T a, T b) {
+ return a < b ? b : a;
+}
+
+/// Returns the middle out of three values
+template <typename T>
+inline T median(T a, T b, T c) {
+ return max(min(a, b), min(max(a, b), c));
+}
+
+/// Returns the weighted average of a and b.
+template <typename T, typename S>
+inline T mix(T a, T b, S weight) {
+ return T((S(1)-weight)*a+weight*b);
+}
+
+/// Clamps the number to the interval from 0 to 1.
+template <typename T>
+inline T clamp(T n) {
+ return n >= T(0) && n <= T(1) ? n : T(n > T(0));
+}
+
+/// Clamps the number to the interval from 0 to b.
+template <typename T>
+inline T clamp(T n, T b) {
+ return n >= T(0) && n <= b ? n : T(n > T(0))*b;
+}
+
+/// Clamps the number to the interval from a to b.
+template <typename T>
+inline T clamp(T n, T a, T b) {
+ return n >= a && n <= b ? n : n < a ? a : b;
+}
+
+/// Returns 1 for positive values, -1 for negative values, and 0 for zero.
+template <typename T>
+inline int sign(T n) {
+ return (T(0) < n)-(n < T(0));
+}
+
+/// Returns 1 for non-negative values and -1 for negative values.
+template <typename T>
+inline int nonZeroSign(T n) {
+ return 2*(n > T(0))-1;
+}
+
+}
diff --git a/thirdparty/msdfgen/core/bitmap-interpolation.hpp b/thirdparty/msdfgen/core/bitmap-interpolation.hpp
new file mode 100644
index 0000000000..a14b0fb534
--- /dev/null
+++ b/thirdparty/msdfgen/core/bitmap-interpolation.hpp
@@ -0,0 +1,25 @@
+
+#pragma once
+
+#include "arithmetics.hpp"
+#include "Vector2.h"
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+template <typename T, int N>
+static void interpolate(T *output, const BitmapConstRef<T, N> &bitmap, Point2 pos) {
+ pos -= .5;
+ int l = (int) floor(pos.x);
+ int b = (int) floor(pos.y);
+ int r = l+1;
+ int t = b+1;
+ double lr = pos.x-l;
+ double bt = pos.y-b;
+ l = clamp(l, bitmap.width-1), r = clamp(r, bitmap.width-1);
+ b = clamp(b, bitmap.height-1), t = clamp(t, bitmap.height-1);
+ for (int i = 0; i < N; ++i)
+ output[i] = mix(mix(bitmap(l, b)[i], bitmap(r, b)[i], lr), mix(bitmap(l, t)[i], bitmap(r, t)[i], lr), bt);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/contour-combiners.cpp b/thirdparty/msdfgen/core/contour-combiners.cpp
new file mode 100644
index 0000000000..d0c5b46d74
--- /dev/null
+++ b/thirdparty/msdfgen/core/contour-combiners.cpp
@@ -0,0 +1,133 @@
+
+#include "contour-combiners.h"
+
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+static void initDistance(double &distance) {
+ distance = SignedDistance::INFINITE.distance;
+}
+
+static void initDistance(MultiDistance &distance) {
+ distance.r = SignedDistance::INFINITE.distance;
+ distance.g = SignedDistance::INFINITE.distance;
+ distance.b = SignedDistance::INFINITE.distance;
+}
+
+static double resolveDistance(double distance) {
+ return distance;
+}
+
+static double resolveDistance(const MultiDistance &distance) {
+ return median(distance.r, distance.g, distance.b);
+}
+
+template <class EdgeSelector>
+SimpleContourCombiner<EdgeSelector>::SimpleContourCombiner(const Shape &shape) { }
+
+template <class EdgeSelector>
+void SimpleContourCombiner<EdgeSelector>::reset(const Point2 &p) {
+ shapeEdgeSelector.reset(p);
+}
+
+template <class EdgeSelector>
+EdgeSelector & SimpleContourCombiner<EdgeSelector>::edgeSelector(int) {
+ return shapeEdgeSelector;
+}
+
+template <class EdgeSelector>
+typename SimpleContourCombiner<EdgeSelector>::DistanceType SimpleContourCombiner<EdgeSelector>::distance() const {
+ return shapeEdgeSelector.distance();
+}
+
+template class SimpleContourCombiner<TrueDistanceSelector>;
+template class SimpleContourCombiner<PseudoDistanceSelector>;
+template class SimpleContourCombiner<MultiDistanceSelector>;
+template class SimpleContourCombiner<MultiAndTrueDistanceSelector>;
+
+template <class EdgeSelector>
+OverlappingContourCombiner<EdgeSelector>::OverlappingContourCombiner(const Shape &shape) {
+ windings.reserve(shape.contours.size());
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ windings.push_back(contour->winding());
+ edgeSelectors.resize(shape.contours.size());
+}
+
+template <class EdgeSelector>
+void OverlappingContourCombiner<EdgeSelector>::reset(const Point2 &p) {
+ this->p = p;
+ for (typename std::vector<EdgeSelector>::iterator contourEdgeSelector = edgeSelectors.begin(); contourEdgeSelector != edgeSelectors.end(); ++contourEdgeSelector)
+ contourEdgeSelector->reset(p);
+}
+
+template <class EdgeSelector>
+EdgeSelector & OverlappingContourCombiner<EdgeSelector>::edgeSelector(int i) {
+ return edgeSelectors[i];
+}
+
+template <class EdgeSelector>
+typename OverlappingContourCombiner<EdgeSelector>::DistanceType OverlappingContourCombiner<EdgeSelector>::distance() const {
+ int contourCount = (int) edgeSelectors.size();
+ EdgeSelector shapeEdgeSelector;
+ EdgeSelector innerEdgeSelector;
+ EdgeSelector outerEdgeSelector;
+ shapeEdgeSelector.reset(p);
+ innerEdgeSelector.reset(p);
+ outerEdgeSelector.reset(p);
+ for (int i = 0; i < contourCount; ++i) {
+ DistanceType edgeDistance = edgeSelectors[i].distance();
+ shapeEdgeSelector.merge(edgeSelectors[i]);
+ if (windings[i] > 0 && resolveDistance(edgeDistance) >= 0)
+ innerEdgeSelector.merge(edgeSelectors[i]);
+ if (windings[i] < 0 && resolveDistance(edgeDistance) <= 0)
+ outerEdgeSelector.merge(edgeSelectors[i]);
+ }
+
+ DistanceType shapeDistance = shapeEdgeSelector.distance();
+ DistanceType innerDistance = innerEdgeSelector.distance();
+ DistanceType outerDistance = outerEdgeSelector.distance();
+ double innerScalarDistance = resolveDistance(innerDistance);
+ double outerScalarDistance = resolveDistance(outerDistance);
+ DistanceType distance;
+ initDistance(distance);
+
+ int winding = 0;
+ if (innerScalarDistance >= 0 && fabs(innerScalarDistance) <= fabs(outerScalarDistance)) {
+ distance = innerDistance;
+ winding = 1;
+ for (int i = 0; i < contourCount; ++i)
+ if (windings[i] > 0) {
+ DistanceType contourDistance = edgeSelectors[i].distance();
+ if (fabs(resolveDistance(contourDistance)) < fabs(outerScalarDistance) && resolveDistance(contourDistance) > resolveDistance(distance))
+ distance = contourDistance;
+ }
+ } else if (outerScalarDistance <= 0 && fabs(outerScalarDistance) < fabs(innerScalarDistance)) {
+ distance = outerDistance;
+ winding = -1;
+ for (int i = 0; i < contourCount; ++i)
+ if (windings[i] < 0) {
+ DistanceType contourDistance = edgeSelectors[i].distance();
+ if (fabs(resolveDistance(contourDistance)) < fabs(innerScalarDistance) && resolveDistance(contourDistance) < resolveDistance(distance))
+ distance = contourDistance;
+ }
+ } else
+ return shapeDistance;
+
+ for (int i = 0; i < contourCount; ++i)
+ if (windings[i] != winding) {
+ DistanceType contourDistance = edgeSelectors[i].distance();
+ if (resolveDistance(contourDistance)*resolveDistance(distance) >= 0 && fabs(resolveDistance(contourDistance)) < fabs(resolveDistance(distance)))
+ distance = contourDistance;
+ }
+ if (resolveDistance(distance) == resolveDistance(shapeDistance))
+ distance = shapeDistance;
+ return distance;
+}
+
+template class OverlappingContourCombiner<TrueDistanceSelector>;
+template class OverlappingContourCombiner<PseudoDistanceSelector>;
+template class OverlappingContourCombiner<MultiDistanceSelector>;
+template class OverlappingContourCombiner<MultiAndTrueDistanceSelector>;
+
+}
diff --git a/thirdparty/msdfgen/core/contour-combiners.h b/thirdparty/msdfgen/core/contour-combiners.h
new file mode 100644
index 0000000000..944b119aba
--- /dev/null
+++ b/thirdparty/msdfgen/core/contour-combiners.h
@@ -0,0 +1,47 @@
+
+#pragma once
+
+#include "Shape.h"
+#include "edge-selectors.h"
+
+namespace msdfgen {
+
+/// Simply selects the nearest contour.
+template <class EdgeSelector>
+class SimpleContourCombiner {
+
+public:
+ typedef EdgeSelector EdgeSelectorType;
+ typedef typename EdgeSelector::DistanceType DistanceType;
+
+ explicit SimpleContourCombiner(const Shape &shape);
+ void reset(const Point2 &p);
+ EdgeSelector & edgeSelector(int i);
+ DistanceType distance() const;
+
+private:
+ EdgeSelector shapeEdgeSelector;
+
+};
+
+/// Selects the nearest contour that actually forms a border between filled and unfilled area.
+template <class EdgeSelector>
+class OverlappingContourCombiner {
+
+public:
+ typedef EdgeSelector EdgeSelectorType;
+ typedef typename EdgeSelector::DistanceType DistanceType;
+
+ explicit OverlappingContourCombiner(const Shape &shape);
+ void reset(const Point2 &p);
+ EdgeSelector & edgeSelector(int i);
+ DistanceType distance() const;
+
+private:
+ Point2 p;
+ std::vector<int> windings;
+ std::vector<EdgeSelector> edgeSelectors;
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/edge-coloring.cpp b/thirdparty/msdfgen/core/edge-coloring.cpp
new file mode 100644
index 0000000000..370f9aa38d
--- /dev/null
+++ b/thirdparty/msdfgen/core/edge-coloring.cpp
@@ -0,0 +1,499 @@
+
+#include "edge-coloring.h"
+
+#include <cstdlib>
+#include <cmath>
+#include <cstring>
+#include <queue>
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+static bool isCorner(const Vector2 &aDir, const Vector2 &bDir, double crossThreshold) {
+ return dotProduct(aDir, bDir) <= 0 || fabs(crossProduct(aDir, bDir)) > crossThreshold;
+}
+
+static double estimateEdgeLength(const EdgeSegment *edge) {
+ double len = 0;
+ Point2 prev = edge->point(0);
+ for (int i = 1; i <= MSDFGEN_EDGE_LENGTH_PRECISION; ++i) {
+ Point2 cur = edge->point(1./MSDFGEN_EDGE_LENGTH_PRECISION*i);
+ len += (cur-prev).length();
+ prev = cur;
+ }
+ return len;
+}
+
+static void switchColor(EdgeColor &color, unsigned long long &seed, EdgeColor banned = BLACK) {
+ EdgeColor combined = EdgeColor(color&banned);
+ if (combined == RED || combined == GREEN || combined == BLUE) {
+ color = EdgeColor(combined^WHITE);
+ return;
+ }
+ if (color == BLACK || color == WHITE) {
+ static const EdgeColor start[3] = { CYAN, MAGENTA, YELLOW };
+ color = start[seed%3];
+ seed /= 3;
+ return;
+ }
+ int shifted = color<<(1+(seed&1));
+ color = EdgeColor((shifted|shifted>>3)&WHITE);
+ seed >>= 1;
+}
+
+void edgeColoringSimple(Shape &shape, double angleThreshold, unsigned long long seed) {
+ double crossThreshold = sin(angleThreshold);
+ std::vector<int> corners;
+ for (std::vector<Contour>::iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour) {
+ // Identify corners
+ corners.clear();
+ if (!contour->edges.empty()) {
+ Vector2 prevDirection = contour->edges.back()->direction(1);
+ int index = 0;
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge, ++index) {
+ if (isCorner(prevDirection.normalize(), (*edge)->direction(0).normalize(), crossThreshold))
+ corners.push_back(index);
+ prevDirection = (*edge)->direction(1);
+ }
+ }
+
+ // Smooth contour
+ if (corners.empty())
+ for (std::vector<EdgeHolder>::iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge)
+ (*edge)->color = WHITE;
+ // "Teardrop" case
+ else if (corners.size() == 1) {
+ EdgeColor colors[3] = { WHITE, WHITE };
+ switchColor(colors[0], seed);
+ switchColor(colors[2] = colors[0], seed);
+ int corner = corners[0];
+ if (contour->edges.size() >= 3) {
+ int m = (int) contour->edges.size();
+ for (int i = 0; i < m; ++i)
+ contour->edges[(corner+i)%m]->color = (colors+1)[int(3+2.875*i/(m-1)-1.4375+.5)-3];
+ } else if (contour->edges.size() >= 1) {
+ // Less than three edge segments for three colors => edges must be split
+ EdgeSegment *parts[7] = { };
+ contour->edges[0]->splitInThirds(parts[0+3*corner], parts[1+3*corner], parts[2+3*corner]);
+ if (contour->edges.size() >= 2) {
+ contour->edges[1]->splitInThirds(parts[3-3*corner], parts[4-3*corner], parts[5-3*corner]);
+ parts[0]->color = parts[1]->color = colors[0];
+ parts[2]->color = parts[3]->color = colors[1];
+ parts[4]->color = parts[5]->color = colors[2];
+ } else {
+ parts[0]->color = colors[0];
+ parts[1]->color = colors[1];
+ parts[2]->color = colors[2];
+ }
+ contour->edges.clear();
+ for (int i = 0; parts[i]; ++i)
+ contour->edges.push_back(EdgeHolder(parts[i]));
+ }
+ }
+ // Multiple corners
+ else {
+ int cornerCount = (int) corners.size();
+ int spline = 0;
+ int start = corners[0];
+ int m = (int) contour->edges.size();
+ EdgeColor color = WHITE;
+ switchColor(color, seed);
+ EdgeColor initialColor = color;
+ for (int i = 0; i < m; ++i) {
+ int index = (start+i)%m;
+ if (spline+1 < cornerCount && corners[spline+1] == index) {
+ ++spline;
+ switchColor(color, seed, EdgeColor((spline == cornerCount-1)*initialColor));
+ }
+ contour->edges[index]->color = color;
+ }
+ }
+ }
+}
+
+struct EdgeColoringInkTrapCorner {
+ int index;
+ double prevEdgeLengthEstimate;
+ bool minor;
+ EdgeColor color;
+};
+
+void edgeColoringInkTrap(Shape &shape, double angleThreshold, unsigned long long seed) {
+ typedef EdgeColoringInkTrapCorner Corner;
+ double crossThreshold = sin(angleThreshold);
+ std::vector<Corner> corners;
+ for (std::vector<Contour>::iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour) {
+ // Identify corners
+ double splineLength = 0;
+ corners.clear();
+ if (!contour->edges.empty()) {
+ Vector2 prevDirection = contour->edges.back()->direction(1);
+ int index = 0;
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge, ++index) {
+ if (isCorner(prevDirection.normalize(), (*edge)->direction(0).normalize(), crossThreshold)) {
+ Corner corner = { index, splineLength };
+ corners.push_back(corner);
+ splineLength = 0;
+ }
+ splineLength += estimateEdgeLength(*edge);
+ prevDirection = (*edge)->direction(1);
+ }
+ }
+
+ // Smooth contour
+ if (corners.empty())
+ for (std::vector<EdgeHolder>::iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge)
+ (*edge)->color = WHITE;
+ // "Teardrop" case
+ else if (corners.size() == 1) {
+ EdgeColor colors[3] = { WHITE, WHITE };
+ switchColor(colors[0], seed);
+ switchColor(colors[2] = colors[0], seed);
+ int corner = corners[0].index;
+ if (contour->edges.size() >= 3) {
+ int m = (int) contour->edges.size();
+ for (int i = 0; i < m; ++i)
+ contour->edges[(corner+i)%m]->color = (colors+1)[int(3+2.875*i/(m-1)-1.4375+.5)-3];
+ } else if (contour->edges.size() >= 1) {
+ // Less than three edge segments for three colors => edges must be split
+ EdgeSegment *parts[7] = { };
+ contour->edges[0]->splitInThirds(parts[0+3*corner], parts[1+3*corner], parts[2+3*corner]);
+ if (contour->edges.size() >= 2) {
+ contour->edges[1]->splitInThirds(parts[3-3*corner], parts[4-3*corner], parts[5-3*corner]);
+ parts[0]->color = parts[1]->color = colors[0];
+ parts[2]->color = parts[3]->color = colors[1];
+ parts[4]->color = parts[5]->color = colors[2];
+ } else {
+ parts[0]->color = colors[0];
+ parts[1]->color = colors[1];
+ parts[2]->color = colors[2];
+ }
+ contour->edges.clear();
+ for (int i = 0; parts[i]; ++i)
+ contour->edges.push_back(EdgeHolder(parts[i]));
+ }
+ }
+ // Multiple corners
+ else {
+ int cornerCount = (int) corners.size();
+ int majorCornerCount = cornerCount;
+ if (cornerCount > 3) {
+ corners.begin()->prevEdgeLengthEstimate += splineLength;
+ for (int i = 0; i < cornerCount; ++i) {
+ if (
+ corners[i].prevEdgeLengthEstimate > corners[(i+1)%cornerCount].prevEdgeLengthEstimate &&
+ corners[(i+1)%cornerCount].prevEdgeLengthEstimate < corners[(i+2)%cornerCount].prevEdgeLengthEstimate
+ ) {
+ corners[i].minor = true;
+ --majorCornerCount;
+ }
+ }
+ }
+ EdgeColor color = WHITE;
+ EdgeColor initialColor = BLACK;
+ for (int i = 0; i < cornerCount; ++i) {
+ if (!corners[i].minor) {
+ --majorCornerCount;
+ switchColor(color, seed, EdgeColor(!majorCornerCount*initialColor));
+ corners[i].color = color;
+ if (!initialColor)
+ initialColor = color;
+ }
+ }
+ for (int i = 0; i < cornerCount; ++i) {
+ if (corners[i].minor) {
+ EdgeColor nextColor = corners[(i+1)%cornerCount].color;
+ corners[i].color = EdgeColor((color&nextColor)^WHITE);
+ } else
+ color = corners[i].color;
+ }
+ int spline = 0;
+ int start = corners[0].index;
+ color = corners[0].color;
+ int m = (int) contour->edges.size();
+ for (int i = 0; i < m; ++i) {
+ int index = (start+i)%m;
+ if (spline+1 < cornerCount && corners[spline+1].index == index)
+ color = corners[++spline].color;
+ contour->edges[index]->color = color;
+ }
+ }
+ }
+}
+
+// EDGE COLORING BY DISTANCE - EXPERIMENTAL IMPLEMENTATION - WORK IN PROGRESS
+#define MAX_RECOLOR_STEPS 16
+#define EDGE_DISTANCE_PRECISION 16
+
+static double edgeToEdgeDistance(const EdgeSegment &a, const EdgeSegment &b, int precision) {
+ if (a.point(0) == b.point(0) || a.point(0) == b.point(1) || a.point(1) == b.point(0) || a.point(1) == b.point(1))
+ return 0;
+ double iFac = 1./precision;
+ double minDistance = (b.point(0)-a.point(0)).length();
+ for (int i = 0; i <= precision; ++i) {
+ double t = iFac*i;
+ double d = fabs(a.signedDistance(b.point(t), t).distance);
+ minDistance = min(minDistance, d);
+ }
+ for (int i = 0; i <= precision; ++i) {
+ double t = iFac*i;
+ double d = fabs(b.signedDistance(a.point(t), t).distance);
+ minDistance = min(minDistance, d);
+ }
+ return minDistance;
+}
+
+static double splineToSplineDistance(EdgeSegment * const *edgeSegments, int aStart, int aEnd, int bStart, int bEnd, int precision) {
+ double minDistance = fabs(SignedDistance::INFINITE.distance);
+ for (int ai = aStart; ai < aEnd; ++ai)
+ for (int bi = bStart; bi < bEnd && minDistance; ++bi) {
+ double d = edgeToEdgeDistance(*edgeSegments[ai], *edgeSegments[bi], precision);
+ minDistance = min(minDistance, d);
+ }
+ return minDistance;
+}
+
+static void colorSecondDegreeGraph(int *coloring, const int * const *edgeMatrix, int vertexCount, unsigned long long seed) {
+ for (int i = 0; i < vertexCount; ++i) {
+ int possibleColors = 7;
+ for (int j = 0; j < i; ++j) {
+ if (edgeMatrix[i][j])
+ possibleColors &= ~(1<<coloring[j]);
+ }
+ int color = 0;
+ switch (possibleColors) {
+ case 1:
+ color = 0;
+ break;
+ case 2:
+ color = 1;
+ break;
+ case 3:
+ color = (int) seed&1;
+ seed >>= 1;
+ break;
+ case 4:
+ color = 2;
+ break;
+ case 5:
+ color = ((int) seed+1&1)<<1;
+ seed >>= 1;
+ break;
+ case 6:
+ color = ((int) seed&1)+1;
+ seed >>= 1;
+ break;
+ case 7:
+ color = int((seed+i)%3);
+ seed /= 3;
+ break;
+ }
+ coloring[i] = color;
+ }
+}
+
+static int vertexPossibleColors(const int *coloring, const int *edgeVector, int vertexCount) {
+ int usedColors = 0;
+ for (int i = 0; i < vertexCount; ++i)
+ if (edgeVector[i])
+ usedColors |= 1<<coloring[i];
+ return 7&~usedColors;
+}
+
+static void uncolorSameNeighbors(std::queue<int> &uncolored, int *coloring, const int * const *edgeMatrix, int vertex, int vertexCount) {
+ for (int i = vertex+1; i < vertexCount; ++i) {
+ if (edgeMatrix[vertex][i] && coloring[i] == coloring[vertex]) {
+ coloring[i] = -1;
+ uncolored.push(i);
+ }
+ }
+ for (int i = 0; i < vertex; ++i) {
+ if (edgeMatrix[vertex][i] && coloring[i] == coloring[vertex]) {
+ coloring[i] = -1;
+ uncolored.push(i);
+ }
+ }
+}
+
+static bool tryAddEdge(int *coloring, int * const *edgeMatrix, int vertexCount, int vertexA, int vertexB, int *coloringBuffer) {
+ static const int FIRST_POSSIBLE_COLOR[8] = { -1, 0, 1, 0, 2, 2, 1, 0 };
+ edgeMatrix[vertexA][vertexB] = 1;
+ edgeMatrix[vertexB][vertexA] = 1;
+ if (coloring[vertexA] != coloring[vertexB])
+ return true;
+ int bPossibleColors = vertexPossibleColors(coloring, edgeMatrix[vertexB], vertexCount);
+ if (bPossibleColors) {
+ coloring[vertexB] = FIRST_POSSIBLE_COLOR[bPossibleColors];
+ return true;
+ }
+ memcpy(coloringBuffer, coloring, sizeof(int)*vertexCount);
+ std::queue<int> uncolored;
+ {
+ int *coloring = coloringBuffer;
+ coloring[vertexB] = FIRST_POSSIBLE_COLOR[7&~(1<<coloring[vertexA])];
+ uncolorSameNeighbors(uncolored, coloring, edgeMatrix, vertexB, vertexCount);
+ int step = 0;
+ while (!uncolored.empty() && step < MAX_RECOLOR_STEPS) {
+ int i = uncolored.front();
+ uncolored.pop();
+ int possibleColors = vertexPossibleColors(coloring, edgeMatrix[i], vertexCount);
+ if (possibleColors) {
+ coloring[i] = FIRST_POSSIBLE_COLOR[possibleColors];
+ continue;
+ }
+ do {
+ coloring[i] = step++%3;
+ } while (edgeMatrix[i][vertexA] && coloring[i] == coloring[vertexA]);
+ uncolorSameNeighbors(uncolored, coloring, edgeMatrix, i, vertexCount);
+ }
+ }
+ if (!uncolored.empty()) {
+ edgeMatrix[vertexA][vertexB] = 0;
+ edgeMatrix[vertexB][vertexA] = 0;
+ return false;
+ }
+ memcpy(coloring, coloringBuffer, sizeof(int)*vertexCount);
+ return true;
+}
+
+static int cmpDoublePtr(const void *a, const void *b) {
+ return sign(**reinterpret_cast<const double * const *>(a)-**reinterpret_cast<const double * const *>(b));
+}
+
+void edgeColoringByDistance(Shape &shape, double angleThreshold, unsigned long long seed) {
+
+ std::vector<EdgeSegment *> edgeSegments;
+ std::vector<int> splineStarts;
+
+ double crossThreshold = sin(angleThreshold);
+ std::vector<int> corners;
+ for (std::vector<Contour>::iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ if (!contour->edges.empty()) {
+ // Identify corners
+ corners.clear();
+ Vector2 prevDirection = contour->edges.back()->direction(1);
+ int index = 0;
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge, ++index) {
+ if (isCorner(prevDirection.normalize(), (*edge)->direction(0).normalize(), crossThreshold))
+ corners.push_back(index);
+ prevDirection = (*edge)->direction(1);
+ }
+
+ splineStarts.push_back((int) edgeSegments.size());
+ // Smooth contour
+ if (corners.empty())
+ for (std::vector<EdgeHolder>::iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge)
+ edgeSegments.push_back(&**edge);
+ // "Teardrop" case
+ else if (corners.size() == 1) {
+ int corner = corners[0];
+ if (contour->edges.size() >= 3) {
+ int m = (int) contour->edges.size();
+ for (int i = 0; i < m; ++i) {
+ if (i == m/2)
+ splineStarts.push_back((int) edgeSegments.size());
+ if (int(3+2.875*i/(m-1)-1.4375+.5)-3)
+ edgeSegments.push_back(&*contour->edges[(corner+i)%m]);
+ else
+ contour->edges[(corner+i)%m]->color = WHITE;
+ }
+ } else if (contour->edges.size() >= 1) {
+ // Less than three edge segments for three colors => edges must be split
+ EdgeSegment *parts[7] = { };
+ contour->edges[0]->splitInThirds(parts[0+3*corner], parts[1+3*corner], parts[2+3*corner]);
+ if (contour->edges.size() >= 2) {
+ contour->edges[1]->splitInThirds(parts[3-3*corner], parts[4-3*corner], parts[5-3*corner]);
+ edgeSegments.push_back(parts[0]);
+ edgeSegments.push_back(parts[1]);
+ parts[2]->color = parts[3]->color = WHITE;
+ splineStarts.push_back((int) edgeSegments.size());
+ edgeSegments.push_back(parts[4]);
+ edgeSegments.push_back(parts[5]);
+ } else {
+ edgeSegments.push_back(parts[0]);
+ parts[1]->color = WHITE;
+ splineStarts.push_back((int) edgeSegments.size());
+ edgeSegments.push_back(parts[2]);
+ }
+ contour->edges.clear();
+ for (int i = 0; parts[i]; ++i)
+ contour->edges.push_back(EdgeHolder(parts[i]));
+ }
+ }
+ // Multiple corners
+ else {
+ int cornerCount = (int) corners.size();
+ int spline = 0;
+ int start = corners[0];
+ int m = (int) contour->edges.size();
+ for (int i = 0; i < m; ++i) {
+ int index = (start+i)%m;
+ if (spline+1 < cornerCount && corners[spline+1] == index) {
+ splineStarts.push_back((int) edgeSegments.size());
+ ++spline;
+ }
+ edgeSegments.push_back(&*contour->edges[index]);
+ }
+ }
+ }
+ splineStarts.push_back((int) edgeSegments.size());
+
+ int segmentCount = (int) edgeSegments.size();
+ int splineCount = (int) splineStarts.size()-1;
+ if (!splineCount)
+ return;
+
+ std::vector<double> distanceMatrixStorage(splineCount*splineCount);
+ std::vector<double *> distanceMatrix(splineCount);
+ for (int i = 0; i < splineCount; ++i)
+ distanceMatrix[i] = &distanceMatrixStorage[i*splineCount];
+ const double *distanceMatrixBase = &distanceMatrixStorage[0];
+
+ for (int i = 0; i < splineCount; ++i) {
+ distanceMatrix[i][i] = -1;
+ for (int j = i+1; j < splineCount; ++j) {
+ double dist = splineToSplineDistance(&edgeSegments[0], splineStarts[i], splineStarts[i+1], splineStarts[j], splineStarts[j+1], EDGE_DISTANCE_PRECISION);
+ distanceMatrix[i][j] = dist;
+ distanceMatrix[j][i] = dist;
+ }
+ }
+
+ std::vector<const double *> graphEdgeDistances;
+ graphEdgeDistances.reserve(splineCount*(splineCount-1)/2);
+ for (int i = 0; i < splineCount; ++i)
+ for (int j = i+1; j < splineCount; ++j)
+ graphEdgeDistances.push_back(&distanceMatrix[i][j]);
+ int graphEdgeCount = (int) graphEdgeDistances.size();
+ if (!graphEdgeDistances.empty())
+ qsort(&graphEdgeDistances[0], graphEdgeDistances.size(), sizeof(const double *), &cmpDoublePtr);
+
+ std::vector<int> edgeMatrixStorage(splineCount*splineCount);
+ std::vector<int *> edgeMatrix(splineCount);
+ for (int i = 0; i < splineCount; ++i)
+ edgeMatrix[i] = &edgeMatrixStorage[i*splineCount];
+ int nextEdge = 0;
+ for (; nextEdge < graphEdgeCount && !*graphEdgeDistances[nextEdge]; ++nextEdge) {
+ int elem = graphEdgeDistances[nextEdge]-distanceMatrixBase;
+ int row = elem/splineCount;
+ int col = elem%splineCount;
+ edgeMatrix[row][col] = 1;
+ edgeMatrix[col][row] = 1;
+ }
+
+ std::vector<int> coloring(2*splineCount);
+ colorSecondDegreeGraph(&coloring[0], &edgeMatrix[0], splineCount, seed);
+ for (; nextEdge < graphEdgeCount; ++nextEdge) {
+ int elem = graphEdgeDistances[nextEdge]-distanceMatrixBase;
+ tryAddEdge(&coloring[0], &edgeMatrix[0], splineCount, elem/splineCount, elem%splineCount, &coloring[splineCount]);
+ }
+
+ const EdgeColor colors[3] = { YELLOW, CYAN, MAGENTA };
+ int spline = -1;
+ for (int i = 0; i < segmentCount; ++i) {
+ if (splineStarts[spline+1] == i)
+ ++spline;
+ edgeSegments[i]->color = colors[coloring[spline]];
+ }
+}
+
+}
diff --git a/thirdparty/msdfgen/core/edge-coloring.h b/thirdparty/msdfgen/core/edge-coloring.h
new file mode 100644
index 0000000000..ffd5e6dce8
--- /dev/null
+++ b/thirdparty/msdfgen/core/edge-coloring.h
@@ -0,0 +1,29 @@
+
+#pragma once
+
+#include "Shape.h"
+
+#define MSDFGEN_EDGE_LENGTH_PRECISION 4
+
+namespace msdfgen {
+
+/** Assigns colors to edges of the shape in accordance to the multi-channel distance field technique.
+ * May split some edges if necessary.
+ * angleThreshold specifies the maximum angle (in radians) to be considered a corner, for example 3 (~172 degrees).
+ * Values below 1/2 PI will be treated as the external angle.
+ */
+void edgeColoringSimple(Shape &shape, double angleThreshold, unsigned long long seed = 0);
+
+/** The alternative "ink trap" coloring strategy is designed for better results with typefaces
+ * that use ink traps as a design feature. It guarantees that even if all edges that are shorter than
+ * both their neighboring edges are removed, the coloring remains consistent with the established rules.
+ */
+void edgeColoringInkTrap(Shape &shape, double angleThreshold, unsigned long long seed = 0);
+
+/** The alternative coloring by distance tries to use different colors for edges that are close together.
+ * This should theoretically be the best strategy on average. However, since it needs to compute the distance
+ * between all pairs of edges, and perform a graph optimization task, it is much slower than the rest.
+ */
+void edgeColoringByDistance(Shape &shape, double angleThreshold, unsigned long long seed = 0);
+
+}
diff --git a/thirdparty/msdfgen/core/edge-segments.cpp b/thirdparty/msdfgen/core/edge-segments.cpp
new file mode 100644
index 0000000000..5274a9a5a1
--- /dev/null
+++ b/thirdparty/msdfgen/core/edge-segments.cpp
@@ -0,0 +1,504 @@
+
+#include "edge-segments.h"
+
+#include "arithmetics.hpp"
+#include "equation-solver.h"
+
+namespace msdfgen {
+
+void EdgeSegment::distanceToPseudoDistance(SignedDistance &distance, Point2 origin, double param) const {
+ if (param < 0) {
+ Vector2 dir = direction(0).normalize();
+ Vector2 aq = origin-point(0);
+ double ts = dotProduct(aq, dir);
+ if (ts < 0) {
+ double pseudoDistance = crossProduct(aq, dir);
+ if (fabs(pseudoDistance) <= fabs(distance.distance)) {
+ distance.distance = pseudoDistance;
+ distance.dot = 0;
+ }
+ }
+ } else if (param > 1) {
+ Vector2 dir = direction(1).normalize();
+ Vector2 bq = origin-point(1);
+ double ts = dotProduct(bq, dir);
+ if (ts > 0) {
+ double pseudoDistance = crossProduct(bq, dir);
+ if (fabs(pseudoDistance) <= fabs(distance.distance)) {
+ distance.distance = pseudoDistance;
+ distance.dot = 0;
+ }
+ }
+ }
+}
+
+LinearSegment::LinearSegment(Point2 p0, Point2 p1, EdgeColor edgeColor) : EdgeSegment(edgeColor) {
+ p[0] = p0;
+ p[1] = p1;
+}
+
+QuadraticSegment::QuadraticSegment(Point2 p0, Point2 p1, Point2 p2, EdgeColor edgeColor) : EdgeSegment(edgeColor) {
+ if (p1 == p0 || p1 == p2)
+ p1 = 0.5*(p0+p2);
+ p[0] = p0;
+ p[1] = p1;
+ p[2] = p2;
+}
+
+CubicSegment::CubicSegment(Point2 p0, Point2 p1, Point2 p2, Point2 p3, EdgeColor edgeColor) : EdgeSegment(edgeColor) {
+ if ((p1 == p0 || p1 == p3) && (p2 == p0 || p2 == p3)) {
+ p1 = mix(p0, p3, 1/3.);
+ p2 = mix(p0, p3, 2/3.);
+ }
+ p[0] = p0;
+ p[1] = p1;
+ p[2] = p2;
+ p[3] = p3;
+}
+
+LinearSegment * LinearSegment::clone() const {
+ return new LinearSegment(p[0], p[1], color);
+}
+
+QuadraticSegment * QuadraticSegment::clone() const {
+ return new QuadraticSegment(p[0], p[1], p[2], color);
+}
+
+CubicSegment * CubicSegment::clone() const {
+ return new CubicSegment(p[0], p[1], p[2], p[3], color);
+}
+
+Point2 LinearSegment::point(double param) const {
+ return mix(p[0], p[1], param);
+}
+
+Point2 QuadraticSegment::point(double param) const {
+ return mix(mix(p[0], p[1], param), mix(p[1], p[2], param), param);
+}
+
+Point2 CubicSegment::point(double param) const {
+ Vector2 p12 = mix(p[1], p[2], param);
+ return mix(mix(mix(p[0], p[1], param), p12, param), mix(p12, mix(p[2], p[3], param), param), param);
+}
+
+Vector2 LinearSegment::direction(double param) const {
+ return p[1]-p[0];
+}
+
+Vector2 QuadraticSegment::direction(double param) const {
+ Vector2 tangent = mix(p[1]-p[0], p[2]-p[1], param);
+ if (!tangent)
+ return p[2]-p[0];
+ return tangent;
+}
+
+Vector2 CubicSegment::direction(double param) const {
+ Vector2 tangent = mix(mix(p[1]-p[0], p[2]-p[1], param), mix(p[2]-p[1], p[3]-p[2], param), param);
+ if (!tangent) {
+ if (param == 0) return p[2]-p[0];
+ if (param == 1) return p[3]-p[1];
+ }
+ return tangent;
+}
+
+Vector2 LinearSegment::directionChange(double param) const {
+ return Vector2();
+}
+
+Vector2 QuadraticSegment::directionChange(double param) const {
+ return (p[2]-p[1])-(p[1]-p[0]);
+}
+
+Vector2 CubicSegment::directionChange(double param) const {
+ return mix((p[2]-p[1])-(p[1]-p[0]), (p[3]-p[2])-(p[2]-p[1]), param);
+}
+
+double LinearSegment::length() const {
+ return (p[1]-p[0]).length();
+}
+
+double QuadraticSegment::length() const {
+ Vector2 ab = p[1]-p[0];
+ Vector2 br = p[2]-p[1]-ab;
+ double abab = dotProduct(ab, ab);
+ double abbr = dotProduct(ab, br);
+ double brbr = dotProduct(br, br);
+ double abLen = sqrt(abab);
+ double brLen = sqrt(brbr);
+ double crs = crossProduct(ab, br);
+ double h = sqrt(abab+abbr+abbr+brbr);
+ return (
+ brLen*((abbr+brbr)*h-abbr*abLen)+
+ crs*crs*log((brLen*h+abbr+brbr)/(brLen*abLen+abbr))
+ )/(brbr*brLen);
+}
+
+SignedDistance LinearSegment::signedDistance(Point2 origin, double &param) const {
+ Vector2 aq = origin-p[0];
+ Vector2 ab = p[1]-p[0];
+ param = dotProduct(aq, ab)/dotProduct(ab, ab);
+ Vector2 eq = p[param > .5]-origin;
+ double endpointDistance = eq.length();
+ if (param > 0 && param < 1) {
+ double orthoDistance = dotProduct(ab.getOrthonormal(false), aq);
+ if (fabs(orthoDistance) < endpointDistance)
+ return SignedDistance(orthoDistance, 0);
+ }
+ return SignedDistance(nonZeroSign(crossProduct(aq, ab))*endpointDistance, fabs(dotProduct(ab.normalize(), eq.normalize())));
+}
+
+SignedDistance QuadraticSegment::signedDistance(Point2 origin, double &param) const {
+ Vector2 qa = p[0]-origin;
+ Vector2 ab = p[1]-p[0];
+ Vector2 br = p[2]-p[1]-ab;
+ double a = dotProduct(br, br);
+ double b = 3*dotProduct(ab, br);
+ double c = 2*dotProduct(ab, ab)+dotProduct(qa, br);
+ double d = dotProduct(qa, ab);
+ double t[3];
+ int solutions = solveCubic(t, a, b, c, d);
+
+ Vector2 epDir = direction(0);
+ double minDistance = nonZeroSign(crossProduct(epDir, qa))*qa.length(); // distance from A
+ param = -dotProduct(qa, epDir)/dotProduct(epDir, epDir);
+ {
+ epDir = direction(1);
+ double distance = (p[2]-origin).length(); // distance from B
+ if (distance < fabs(minDistance)) {
+ minDistance = nonZeroSign(crossProduct(epDir, p[2]-origin))*distance;
+ param = dotProduct(origin-p[1], epDir)/dotProduct(epDir, epDir);
+ }
+ }
+ for (int i = 0; i < solutions; ++i) {
+ if (t[i] > 0 && t[i] < 1) {
+ Point2 qe = qa+2*t[i]*ab+t[i]*t[i]*br;
+ double distance = qe.length();
+ if (distance <= fabs(minDistance)) {
+ minDistance = nonZeroSign(crossProduct(ab+t[i]*br, qe))*distance;
+ param = t[i];
+ }
+ }
+ }
+
+ if (param >= 0 && param <= 1)
+ return SignedDistance(minDistance, 0);
+ if (param < .5)
+ return SignedDistance(minDistance, fabs(dotProduct(direction(0).normalize(), qa.normalize())));
+ else
+ return SignedDistance(minDistance, fabs(dotProduct(direction(1).normalize(), (p[2]-origin).normalize())));
+}
+
+SignedDistance CubicSegment::signedDistance(Point2 origin, double &param) const {
+ Vector2 qa = p[0]-origin;
+ Vector2 ab = p[1]-p[0];
+ Vector2 br = p[2]-p[1]-ab;
+ Vector2 as = (p[3]-p[2])-(p[2]-p[1])-br;
+
+ Vector2 epDir = direction(0);
+ double minDistance = nonZeroSign(crossProduct(epDir, qa))*qa.length(); // distance from A
+ param = -dotProduct(qa, epDir)/dotProduct(epDir, epDir);
+ {
+ epDir = direction(1);
+ double distance = (p[3]-origin).length(); // distance from B
+ if (distance < fabs(minDistance)) {
+ minDistance = nonZeroSign(crossProduct(epDir, p[3]-origin))*distance;
+ param = dotProduct(epDir-(p[3]-origin), epDir)/dotProduct(epDir, epDir);
+ }
+ }
+ // Iterative minimum distance search
+ for (int i = 0; i <= MSDFGEN_CUBIC_SEARCH_STARTS; ++i) {
+ double t = (double) i/MSDFGEN_CUBIC_SEARCH_STARTS;
+ Vector2 qe = qa+3*t*ab+3*t*t*br+t*t*t*as;
+ for (int step = 0; step < MSDFGEN_CUBIC_SEARCH_STEPS; ++step) {
+ // Improve t
+ Vector2 d1 = 3*ab+6*t*br+3*t*t*as;
+ Vector2 d2 = 6*br+6*t*as;
+ t -= dotProduct(qe, d1)/(dotProduct(d1, d1)+dotProduct(qe, d2));
+ if (t <= 0 || t >= 1)
+ break;
+ qe = qa+3*t*ab+3*t*t*br+t*t*t*as;
+ double distance = qe.length();
+ if (distance < fabs(minDistance)) {
+ minDistance = nonZeroSign(crossProduct(d1, qe))*distance;
+ param = t;
+ }
+ }
+ }
+
+ if (param >= 0 && param <= 1)
+ return SignedDistance(minDistance, 0);
+ if (param < .5)
+ return SignedDistance(minDistance, fabs(dotProduct(direction(0).normalize(), qa.normalize())));
+ else
+ return SignedDistance(minDistance, fabs(dotProduct(direction(1).normalize(), (p[3]-origin).normalize())));
+}
+
+int LinearSegment::scanlineIntersections(double x[3], int dy[3], double y) const {
+ if ((y >= p[0].y && y < p[1].y) || (y >= p[1].y && y < p[0].y)) {
+ double param = (y-p[0].y)/(p[1].y-p[0].y);
+ x[0] = mix(p[0].x, p[1].x, param);
+ dy[0] = sign(p[1].y-p[0].y);
+ return 1;
+ }
+ return 0;
+}
+
+int QuadraticSegment::scanlineIntersections(double x[3], int dy[3], double y) const {
+ int total = 0;
+ int nextDY = y > p[0].y ? 1 : -1;
+ x[total] = p[0].x;
+ if (p[0].y == y) {
+ if (p[0].y < p[1].y || (p[0].y == p[1].y && p[0].y < p[2].y))
+ dy[total++] = 1;
+ else
+ nextDY = 1;
+ }
+ {
+ Vector2 ab = p[1]-p[0];
+ Vector2 br = p[2]-p[1]-ab;
+ double t[2];
+ int solutions = solveQuadratic(t, br.y, 2*ab.y, p[0].y-y);
+ // Sort solutions
+ double tmp;
+ if (solutions >= 2 && t[0] > t[1])
+ tmp = t[0], t[0] = t[1], t[1] = tmp;
+ for (int i = 0; i < solutions && total < 2; ++i) {
+ if (t[i] >= 0 && t[i] <= 1) {
+ x[total] = p[0].x+2*t[i]*ab.x+t[i]*t[i]*br.x;
+ if (nextDY*(ab.y+t[i]*br.y) >= 0) {
+ dy[total++] = nextDY;
+ nextDY = -nextDY;
+ }
+ }
+ }
+ }
+ if (p[2].y == y) {
+ if (nextDY > 0 && total > 0) {
+ --total;
+ nextDY = -1;
+ }
+ if ((p[2].y < p[1].y || (p[2].y == p[1].y && p[2].y < p[0].y)) && total < 2) {
+ x[total] = p[2].x;
+ if (nextDY < 0) {
+ dy[total++] = -1;
+ nextDY = 1;
+ }
+ }
+ }
+ if (nextDY != (y >= p[2].y ? 1 : -1)) {
+ if (total > 0)
+ --total;
+ else {
+ if (fabs(p[2].y-y) < fabs(p[0].y-y))
+ x[total] = p[2].x;
+ dy[total++] = nextDY;
+ }
+ }
+ return total;
+}
+
+int CubicSegment::scanlineIntersections(double x[3], int dy[3], double y) const {
+ int total = 0;
+ int nextDY = y > p[0].y ? 1 : -1;
+ x[total] = p[0].x;
+ if (p[0].y == y) {
+ if (p[0].y < p[1].y || (p[0].y == p[1].y && (p[0].y < p[2].y || (p[0].y == p[2].y && p[0].y < p[3].y))))
+ dy[total++] = 1;
+ else
+ nextDY = 1;
+ }
+ {
+ Vector2 ab = p[1]-p[0];
+ Vector2 br = p[2]-p[1]-ab;
+ Vector2 as = (p[3]-p[2])-(p[2]-p[1])-br;
+ double t[3];
+ int solutions = solveCubic(t, as.y, 3*br.y, 3*ab.y, p[0].y-y);
+ // Sort solutions
+ double tmp;
+ if (solutions >= 2) {
+ if (t[0] > t[1])
+ tmp = t[0], t[0] = t[1], t[1] = tmp;
+ if (solutions >= 3 && t[1] > t[2]) {
+ tmp = t[1], t[1] = t[2], t[2] = tmp;
+ if (t[0] > t[1])
+ tmp = t[0], t[0] = t[1], t[1] = tmp;
+ }
+ }
+ for (int i = 0; i < solutions && total < 3; ++i) {
+ if (t[i] >= 0 && t[i] <= 1) {
+ x[total] = p[0].x+3*t[i]*ab.x+3*t[i]*t[i]*br.x+t[i]*t[i]*t[i]*as.x;
+ if (nextDY*(ab.y+2*t[i]*br.y+t[i]*t[i]*as.y) >= 0) {
+ dy[total++] = nextDY;
+ nextDY = -nextDY;
+ }
+ }
+ }
+ }
+ if (p[3].y == y) {
+ if (nextDY > 0 && total > 0) {
+ --total;
+ nextDY = -1;
+ }
+ if ((p[3].y < p[2].y || (p[3].y == p[2].y && (p[3].y < p[1].y || (p[3].y == p[1].y && p[3].y < p[0].y)))) && total < 3) {
+ x[total] = p[3].x;
+ if (nextDY < 0) {
+ dy[total++] = -1;
+ nextDY = 1;
+ }
+ }
+ }
+ if (nextDY != (y >= p[3].y ? 1 : -1)) {
+ if (total > 0)
+ --total;
+ else {
+ if (fabs(p[3].y-y) < fabs(p[0].y-y))
+ x[total] = p[3].x;
+ dy[total++] = nextDY;
+ }
+ }
+ return total;
+}
+
+static void pointBounds(Point2 p, double &l, double &b, double &r, double &t) {
+ if (p.x < l) l = p.x;
+ if (p.y < b) b = p.y;
+ if (p.x > r) r = p.x;
+ if (p.y > t) t = p.y;
+}
+
+void LinearSegment::bound(double &l, double &b, double &r, double &t) const {
+ pointBounds(p[0], l, b, r, t);
+ pointBounds(p[1], l, b, r, t);
+}
+
+void QuadraticSegment::bound(double &l, double &b, double &r, double &t) const {
+ pointBounds(p[0], l, b, r, t);
+ pointBounds(p[2], l, b, r, t);
+ Vector2 bot = (p[1]-p[0])-(p[2]-p[1]);
+ if (bot.x) {
+ double param = (p[1].x-p[0].x)/bot.x;
+ if (param > 0 && param < 1)
+ pointBounds(point(param), l, b, r, t);
+ }
+ if (bot.y) {
+ double param = (p[1].y-p[0].y)/bot.y;
+ if (param > 0 && param < 1)
+ pointBounds(point(param), l, b, r, t);
+ }
+}
+
+void CubicSegment::bound(double &l, double &b, double &r, double &t) const {
+ pointBounds(p[0], l, b, r, t);
+ pointBounds(p[3], l, b, r, t);
+ Vector2 a0 = p[1]-p[0];
+ Vector2 a1 = 2*(p[2]-p[1]-a0);
+ Vector2 a2 = p[3]-3*p[2]+3*p[1]-p[0];
+ double params[2];
+ int solutions;
+ solutions = solveQuadratic(params, a2.x, a1.x, a0.x);
+ for (int i = 0; i < solutions; ++i)
+ if (params[i] > 0 && params[i] < 1)
+ pointBounds(point(params[i]), l, b, r, t);
+ solutions = solveQuadratic(params, a2.y, a1.y, a0.y);
+ for (int i = 0; i < solutions; ++i)
+ if (params[i] > 0 && params[i] < 1)
+ pointBounds(point(params[i]), l, b, r, t);
+}
+
+void LinearSegment::reverse() {
+ Point2 tmp = p[0];
+ p[0] = p[1];
+ p[1] = tmp;
+}
+
+void QuadraticSegment::reverse() {
+ Point2 tmp = p[0];
+ p[0] = p[2];
+ p[2] = tmp;
+}
+
+void CubicSegment::reverse() {
+ Point2 tmp = p[0];
+ p[0] = p[3];
+ p[3] = tmp;
+ tmp = p[1];
+ p[1] = p[2];
+ p[2] = tmp;
+}
+
+void LinearSegment::moveStartPoint(Point2 to) {
+ p[0] = to;
+}
+
+void QuadraticSegment::moveStartPoint(Point2 to) {
+ Vector2 origSDir = p[0]-p[1];
+ Point2 origP1 = p[1];
+ p[1] += crossProduct(p[0]-p[1], to-p[0])/crossProduct(p[0]-p[1], p[2]-p[1])*(p[2]-p[1]);
+ p[0] = to;
+ if (dotProduct(origSDir, p[0]-p[1]) < 0)
+ p[1] = origP1;
+}
+
+void CubicSegment::moveStartPoint(Point2 to) {
+ p[1] += to-p[0];
+ p[0] = to;
+}
+
+void LinearSegment::moveEndPoint(Point2 to) {
+ p[1] = to;
+}
+
+void QuadraticSegment::moveEndPoint(Point2 to) {
+ Vector2 origEDir = p[2]-p[1];
+ Point2 origP1 = p[1];
+ p[1] += crossProduct(p[2]-p[1], to-p[2])/crossProduct(p[2]-p[1], p[0]-p[1])*(p[0]-p[1]);
+ p[2] = to;
+ if (dotProduct(origEDir, p[2]-p[1]) < 0)
+ p[1] = origP1;
+}
+
+void CubicSegment::moveEndPoint(Point2 to) {
+ p[2] += to-p[3];
+ p[3] = to;
+}
+
+void LinearSegment::splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const {
+ part1 = new LinearSegment(p[0], point(1/3.), color);
+ part2 = new LinearSegment(point(1/3.), point(2/3.), color);
+ part3 = new LinearSegment(point(2/3.), p[1], color);
+}
+
+void QuadraticSegment::splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const {
+ part1 = new QuadraticSegment(p[0], mix(p[0], p[1], 1/3.), point(1/3.), color);
+ part2 = new QuadraticSegment(point(1/3.), mix(mix(p[0], p[1], 5/9.), mix(p[1], p[2], 4/9.), .5), point(2/3.), color);
+ part3 = new QuadraticSegment(point(2/3.), mix(p[1], p[2], 2/3.), p[2], color);
+}
+
+void CubicSegment::splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const {
+ part1 = new CubicSegment(p[0], p[0] == p[1] ? p[0] : mix(p[0], p[1], 1/3.), mix(mix(p[0], p[1], 1/3.), mix(p[1], p[2], 1/3.), 1/3.), point(1/3.), color);
+ part2 = new CubicSegment(point(1/3.),
+ mix(mix(mix(p[0], p[1], 1/3.), mix(p[1], p[2], 1/3.), 1/3.), mix(mix(p[1], p[2], 1/3.), mix(p[2], p[3], 1/3.), 1/3.), 2/3.),
+ mix(mix(mix(p[0], p[1], 2/3.), mix(p[1], p[2], 2/3.), 2/3.), mix(mix(p[1], p[2], 2/3.), mix(p[2], p[3], 2/3.), 2/3.), 1/3.),
+ point(2/3.), color);
+ part3 = new CubicSegment(point(2/3.), mix(mix(p[1], p[2], 2/3.), mix(p[2], p[3], 2/3.), 2/3.), p[2] == p[3] ? p[3] : mix(p[2], p[3], 2/3.), p[3], color);
+}
+
+EdgeSegment * QuadraticSegment::convertToCubic() const {
+ return new CubicSegment(p[0], mix(p[0], p[1], 2/3.), mix(p[1], p[2], 1/3.), p[2], color);
+}
+
+void CubicSegment::deconverge(int param, double amount) {
+ Vector2 dir = direction(param);
+ Vector2 normal = dir.getOrthonormal();
+ double h = dotProduct(directionChange(param)-dir, normal);
+ switch (param) {
+ case 0:
+ p[1] += amount*(dir+sign(h)*sqrt(fabs(h))*normal);
+ break;
+ case 1:
+ p[2] -= amount*(dir-sign(h)*sqrt(fabs(h))*normal);
+ break;
+ }
+}
+
+}
diff --git a/thirdparty/msdfgen/core/edge-segments.h b/thirdparty/msdfgen/core/edge-segments.h
new file mode 100644
index 0000000000..1c8fb599ff
--- /dev/null
+++ b/thirdparty/msdfgen/core/edge-segments.h
@@ -0,0 +1,122 @@
+
+#pragma once
+
+#include "Vector2.h"
+#include "SignedDistance.h"
+#include "EdgeColor.h"
+
+namespace msdfgen {
+
+// Parameters for iterative search of closest point on a cubic Bezier curve. Increase for higher precision.
+#define MSDFGEN_CUBIC_SEARCH_STARTS 4
+#define MSDFGEN_CUBIC_SEARCH_STEPS 4
+
+/// An abstract edge segment.
+class EdgeSegment {
+
+public:
+ EdgeColor color;
+
+ EdgeSegment(EdgeColor edgeColor = WHITE) : color(edgeColor) { }
+ virtual ~EdgeSegment() { }
+ /// Creates a copy of the edge segment.
+ virtual EdgeSegment * clone() const = 0;
+ /// Returns the point on the edge specified by the parameter (between 0 and 1).
+ virtual Point2 point(double param) const = 0;
+ /// Returns the direction the edge has at the point specified by the parameter.
+ virtual Vector2 direction(double param) const = 0;
+ /// Returns the change of direction (second derivative) at the point specified by the parameter.
+ virtual Vector2 directionChange(double param) const = 0;
+ /// Returns the minimum signed distance between origin and the edge.
+ virtual SignedDistance signedDistance(Point2 origin, double &param) const = 0;
+ /// Converts a previously retrieved signed distance from origin to pseudo-distance.
+ virtual void distanceToPseudoDistance(SignedDistance &distance, Point2 origin, double param) const;
+ /// Outputs a list of (at most three) intersections (their X coordinates) with an infinite horizontal scanline at y and returns how many there are.
+ virtual int scanlineIntersections(double x[3], int dy[3], double y) const = 0;
+ /// Adjusts the bounding box to fit the edge segment.
+ virtual void bound(double &l, double &b, double &r, double &t) const = 0;
+
+ /// Reverses the edge (swaps its start point and end point).
+ virtual void reverse() = 0;
+ /// Moves the start point of the edge segment.
+ virtual void moveStartPoint(Point2 to) = 0;
+ /// Moves the end point of the edge segment.
+ virtual void moveEndPoint(Point2 to) = 0;
+ /// Splits the edge segments into thirds which together represent the original edge.
+ virtual void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const = 0;
+
+};
+
+/// A line segment.
+class LinearSegment : public EdgeSegment {
+
+public:
+ Point2 p[2];
+
+ LinearSegment(Point2 p0, Point2 p1, EdgeColor edgeColor = WHITE);
+ LinearSegment * clone() const;
+ Point2 point(double param) const;
+ Vector2 direction(double param) const;
+ Vector2 directionChange(double param) const;
+ double length() const;
+ SignedDistance signedDistance(Point2 origin, double &param) const;
+ int scanlineIntersections(double x[3], int dy[3], double y) const;
+ void bound(double &l, double &b, double &r, double &t) const;
+
+ void reverse();
+ void moveStartPoint(Point2 to);
+ void moveEndPoint(Point2 to);
+ void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const;
+
+};
+
+/// A quadratic Bezier curve.
+class QuadraticSegment : public EdgeSegment {
+
+public:
+ Point2 p[3];
+
+ QuadraticSegment(Point2 p0, Point2 p1, Point2 p2, EdgeColor edgeColor = WHITE);
+ QuadraticSegment * clone() const;
+ Point2 point(double param) const;
+ Vector2 direction(double param) const;
+ Vector2 directionChange(double param) const;
+ double length() const;
+ SignedDistance signedDistance(Point2 origin, double &param) const;
+ int scanlineIntersections(double x[3], int dy[3], double y) const;
+ void bound(double &l, double &b, double &r, double &t) const;
+
+ void reverse();
+ void moveStartPoint(Point2 to);
+ void moveEndPoint(Point2 to);
+ void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const;
+
+ EdgeSegment * convertToCubic() const;
+
+};
+
+/// A cubic Bezier curve.
+class CubicSegment : public EdgeSegment {
+
+public:
+ Point2 p[4];
+
+ CubicSegment(Point2 p0, Point2 p1, Point2 p2, Point2 p3, EdgeColor edgeColor = WHITE);
+ CubicSegment * clone() const;
+ Point2 point(double param) const;
+ Vector2 direction(double param) const;
+ Vector2 directionChange(double param) const;
+ SignedDistance signedDistance(Point2 origin, double &param) const;
+ int scanlineIntersections(double x[3], int dy[3], double y) const;
+ void bound(double &l, double &b, double &r, double &t) const;
+
+ void reverse();
+ void moveStartPoint(Point2 to);
+ void moveEndPoint(Point2 to);
+ void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const;
+
+ void deconverge(int param, double amount);
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/edge-selectors.cpp b/thirdparty/msdfgen/core/edge-selectors.cpp
new file mode 100644
index 0000000000..aee78847fb
--- /dev/null
+++ b/thirdparty/msdfgen/core/edge-selectors.cpp
@@ -0,0 +1,261 @@
+
+#include "edge-selectors.h"
+
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+#define DISTANCE_DELTA_FACTOR 1.001
+
+TrueDistanceSelector::EdgeCache::EdgeCache() : absDistance(0) { }
+
+void TrueDistanceSelector::reset(const Point2 &p) {
+ double delta = DISTANCE_DELTA_FACTOR*(p-this->p).length();
+ minDistance.distance += nonZeroSign(minDistance.distance)*delta;
+ this->p = p;
+}
+
+void TrueDistanceSelector::addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge) {
+ double delta = DISTANCE_DELTA_FACTOR*(p-cache.point).length();
+ if (cache.absDistance-delta <= fabs(minDistance.distance)) {
+ double dummy;
+ SignedDistance distance = edge->signedDistance(p, dummy);
+ if (distance < minDistance)
+ minDistance = distance;
+ cache.point = p;
+ cache.absDistance = fabs(distance.distance);
+ }
+}
+
+void TrueDistanceSelector::merge(const TrueDistanceSelector &other) {
+ if (other.minDistance < minDistance)
+ minDistance = other.minDistance;
+}
+
+TrueDistanceSelector::DistanceType TrueDistanceSelector::distance() const {
+ return minDistance.distance;
+}
+
+PseudoDistanceSelectorBase::EdgeCache::EdgeCache() : absDistance(0), aDomainDistance(0), bDomainDistance(0), aPseudoDistance(0), bPseudoDistance(0) { }
+
+bool PseudoDistanceSelectorBase::getPseudoDistance(double &distance, const Vector2 &ep, const Vector2 &edgeDir) {
+ double ts = dotProduct(ep, edgeDir);
+ if (ts > 0) {
+ double pseudoDistance = crossProduct(ep, edgeDir);
+ if (fabs(pseudoDistance) < fabs(distance)) {
+ distance = pseudoDistance;
+ return true;
+ }
+ }
+ return false;
+}
+
+PseudoDistanceSelectorBase::PseudoDistanceSelectorBase() : minNegativePseudoDistance(-fabs(minTrueDistance.distance)), minPositivePseudoDistance(fabs(minTrueDistance.distance)), nearEdge(NULL), nearEdgeParam(0) { }
+
+void PseudoDistanceSelectorBase::reset(double delta) {
+ minTrueDistance.distance += nonZeroSign(minTrueDistance.distance)*delta;
+ minNegativePseudoDistance = -fabs(minTrueDistance.distance);
+ minPositivePseudoDistance = fabs(minTrueDistance.distance);
+ nearEdge = NULL;
+ nearEdgeParam = 0;
+}
+
+bool PseudoDistanceSelectorBase::isEdgeRelevant(const EdgeCache &cache, const EdgeSegment *edge, const Point2 &p) const {
+ double delta = DISTANCE_DELTA_FACTOR*(p-cache.point).length();
+ return (
+ cache.absDistance-delta <= fabs(minTrueDistance.distance) ||
+ fabs(cache.aDomainDistance) < delta ||
+ fabs(cache.bDomainDistance) < delta ||
+ (cache.aDomainDistance > 0 && (cache.aPseudoDistance < 0 ?
+ cache.aPseudoDistance+delta >= minNegativePseudoDistance :
+ cache.aPseudoDistance-delta <= minPositivePseudoDistance
+ )) ||
+ (cache.bDomainDistance > 0 && (cache.bPseudoDistance < 0 ?
+ cache.bPseudoDistance+delta >= minNegativePseudoDistance :
+ cache.bPseudoDistance-delta <= minPositivePseudoDistance
+ ))
+ );
+}
+
+void PseudoDistanceSelectorBase::addEdgeTrueDistance(const EdgeSegment *edge, const SignedDistance &distance, double param) {
+ if (distance < minTrueDistance) {
+ minTrueDistance = distance;
+ nearEdge = edge;
+ nearEdgeParam = param;
+ }
+}
+
+void PseudoDistanceSelectorBase::addEdgePseudoDistance(double distance) {
+ if (distance <= 0 && distance > minNegativePseudoDistance)
+ minNegativePseudoDistance = distance;
+ if (distance >= 0 && distance < minPositivePseudoDistance)
+ minPositivePseudoDistance = distance;
+}
+
+void PseudoDistanceSelectorBase::merge(const PseudoDistanceSelectorBase &other) {
+ if (other.minTrueDistance < minTrueDistance) {
+ minTrueDistance = other.minTrueDistance;
+ nearEdge = other.nearEdge;
+ nearEdgeParam = other.nearEdgeParam;
+ }
+ if (other.minNegativePseudoDistance > minNegativePseudoDistance)
+ minNegativePseudoDistance = other.minNegativePseudoDistance;
+ if (other.minPositivePseudoDistance < minPositivePseudoDistance)
+ minPositivePseudoDistance = other.minPositivePseudoDistance;
+}
+
+double PseudoDistanceSelectorBase::computeDistance(const Point2 &p) const {
+ double minDistance = minTrueDistance.distance < 0 ? minNegativePseudoDistance : minPositivePseudoDistance;
+ if (nearEdge) {
+ SignedDistance distance = minTrueDistance;
+ nearEdge->distanceToPseudoDistance(distance, p, nearEdgeParam);
+ if (fabs(distance.distance) < fabs(minDistance))
+ minDistance = distance.distance;
+ }
+ return minDistance;
+}
+
+SignedDistance PseudoDistanceSelectorBase::trueDistance() const {
+ return minTrueDistance;
+}
+
+void PseudoDistanceSelector::reset(const Point2 &p) {
+ double delta = DISTANCE_DELTA_FACTOR*(p-this->p).length();
+ PseudoDistanceSelectorBase::reset(delta);
+ this->p = p;
+}
+
+void PseudoDistanceSelector::addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge) {
+ if (isEdgeRelevant(cache, edge, p)) {
+ double param;
+ SignedDistance distance = edge->signedDistance(p, param);
+ addEdgeTrueDistance(edge, distance, param);
+ cache.point = p;
+ cache.absDistance = fabs(distance.distance);
+
+ Vector2 ap = p-edge->point(0);
+ Vector2 bp = p-edge->point(1);
+ Vector2 aDir = edge->direction(0).normalize(true);
+ Vector2 bDir = edge->direction(1).normalize(true);
+ Vector2 prevDir = prevEdge->direction(1).normalize(true);
+ Vector2 nextDir = nextEdge->direction(0).normalize(true);
+ double add = dotProduct(ap, (prevDir+aDir).normalize(true));
+ double bdd = -dotProduct(bp, (bDir+nextDir).normalize(true));
+ if (add > 0) {
+ double pd = distance.distance;
+ if (getPseudoDistance(pd, ap, -aDir))
+ addEdgePseudoDistance(pd = -pd);
+ cache.aPseudoDistance = pd;
+ }
+ if (bdd > 0) {
+ double pd = distance.distance;
+ if (getPseudoDistance(pd, bp, bDir))
+ addEdgePseudoDistance(pd);
+ cache.bPseudoDistance = pd;
+ }
+ cache.aDomainDistance = add;
+ cache.bDomainDistance = bdd;
+ }
+}
+
+PseudoDistanceSelector::DistanceType PseudoDistanceSelector::distance() const {
+ return computeDistance(p);
+}
+
+void MultiDistanceSelector::reset(const Point2 &p) {
+ double delta = DISTANCE_DELTA_FACTOR*(p-this->p).length();
+ r.reset(delta);
+ g.reset(delta);
+ b.reset(delta);
+ this->p = p;
+}
+
+void MultiDistanceSelector::addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge) {
+ if (
+ (edge->color&RED && r.isEdgeRelevant(cache, edge, p)) ||
+ (edge->color&GREEN && g.isEdgeRelevant(cache, edge, p)) ||
+ (edge->color&BLUE && b.isEdgeRelevant(cache, edge, p))
+ ) {
+ double param;
+ SignedDistance distance = edge->signedDistance(p, param);
+ if (edge->color&RED)
+ r.addEdgeTrueDistance(edge, distance, param);
+ if (edge->color&GREEN)
+ g.addEdgeTrueDistance(edge, distance, param);
+ if (edge->color&BLUE)
+ b.addEdgeTrueDistance(edge, distance, param);
+ cache.point = p;
+ cache.absDistance = fabs(distance.distance);
+
+ Vector2 ap = p-edge->point(0);
+ Vector2 bp = p-edge->point(1);
+ Vector2 aDir = edge->direction(0).normalize(true);
+ Vector2 bDir = edge->direction(1).normalize(true);
+ Vector2 prevDir = prevEdge->direction(1).normalize(true);
+ Vector2 nextDir = nextEdge->direction(0).normalize(true);
+ double add = dotProduct(ap, (prevDir+aDir).normalize(true));
+ double bdd = -dotProduct(bp, (bDir+nextDir).normalize(true));
+ if (add > 0) {
+ double pd = distance.distance;
+ if (PseudoDistanceSelectorBase::getPseudoDistance(pd, ap, -aDir)) {
+ pd = -pd;
+ if (edge->color&RED)
+ r.addEdgePseudoDistance(pd);
+ if (edge->color&GREEN)
+ g.addEdgePseudoDistance(pd);
+ if (edge->color&BLUE)
+ b.addEdgePseudoDistance(pd);
+ }
+ cache.aPseudoDistance = pd;
+ }
+ if (bdd > 0) {
+ double pd = distance.distance;
+ if (PseudoDistanceSelectorBase::getPseudoDistance(pd, bp, bDir)) {
+ if (edge->color&RED)
+ r.addEdgePseudoDistance(pd);
+ if (edge->color&GREEN)
+ g.addEdgePseudoDistance(pd);
+ if (edge->color&BLUE)
+ b.addEdgePseudoDistance(pd);
+ }
+ cache.bPseudoDistance = pd;
+ }
+ cache.aDomainDistance = add;
+ cache.bDomainDistance = bdd;
+ }
+}
+
+void MultiDistanceSelector::merge(const MultiDistanceSelector &other) {
+ r.merge(other.r);
+ g.merge(other.g);
+ b.merge(other.b);
+}
+
+MultiDistanceSelector::DistanceType MultiDistanceSelector::distance() const {
+ MultiDistance multiDistance;
+ multiDistance.r = r.computeDistance(p);
+ multiDistance.g = g.computeDistance(p);
+ multiDistance.b = b.computeDistance(p);
+ return multiDistance;
+}
+
+SignedDistance MultiDistanceSelector::trueDistance() const {
+ SignedDistance distance = r.trueDistance();
+ if (g.trueDistance() < distance)
+ distance = g.trueDistance();
+ if (b.trueDistance() < distance)
+ distance = b.trueDistance();
+ return distance;
+}
+
+MultiAndTrueDistanceSelector::DistanceType MultiAndTrueDistanceSelector::distance() const {
+ MultiDistance multiDistance = MultiDistanceSelector::distance();
+ MultiAndTrueDistance mtd;
+ mtd.r = multiDistance.r;
+ mtd.g = multiDistance.g;
+ mtd.b = multiDistance.b;
+ mtd.a = trueDistance().distance;
+ return mtd;
+}
+
+}
diff --git a/thirdparty/msdfgen/core/edge-selectors.h b/thirdparty/msdfgen/core/edge-selectors.h
new file mode 100644
index 0000000000..3620999f82
--- /dev/null
+++ b/thirdparty/msdfgen/core/edge-selectors.h
@@ -0,0 +1,117 @@
+
+#pragma once
+
+#include "Vector2.h"
+#include "SignedDistance.h"
+#include "edge-segments.h"
+
+namespace msdfgen {
+
+struct MultiDistance {
+ double r, g, b;
+};
+struct MultiAndTrueDistance : MultiDistance {
+ double a;
+};
+
+/// Selects the nearest edge by its true distance.
+class TrueDistanceSelector {
+
+public:
+ typedef double DistanceType;
+
+ struct EdgeCache {
+ Point2 point;
+ double absDistance;
+
+ EdgeCache();
+ };
+
+ void reset(const Point2 &p);
+ void addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge);
+ void merge(const TrueDistanceSelector &other);
+ DistanceType distance() const;
+
+private:
+ Point2 p;
+ SignedDistance minDistance;
+
+};
+
+class PseudoDistanceSelectorBase {
+
+public:
+ struct EdgeCache {
+ Point2 point;
+ double absDistance;
+ double aDomainDistance, bDomainDistance;
+ double aPseudoDistance, bPseudoDistance;
+
+ EdgeCache();
+ };
+
+ static bool getPseudoDistance(double &distance, const Vector2 &ep, const Vector2 &edgeDir);
+
+ PseudoDistanceSelectorBase();
+ void reset(double delta);
+ bool isEdgeRelevant(const EdgeCache &cache, const EdgeSegment *edge, const Point2 &p) const;
+ void addEdgeTrueDistance(const EdgeSegment *edge, const SignedDistance &distance, double param);
+ void addEdgePseudoDistance(double distance);
+ void merge(const PseudoDistanceSelectorBase &other);
+ double computeDistance(const Point2 &p) const;
+ SignedDistance trueDistance() const;
+
+private:
+ SignedDistance minTrueDistance;
+ double minNegativePseudoDistance;
+ double minPositivePseudoDistance;
+ const EdgeSegment *nearEdge;
+ double nearEdgeParam;
+
+};
+
+/// Selects the nearest edge by its pseudo-distance.
+class PseudoDistanceSelector : public PseudoDistanceSelectorBase {
+
+public:
+ typedef double DistanceType;
+
+ void reset(const Point2 &p);
+ void addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge);
+ DistanceType distance() const;
+
+private:
+ Point2 p;
+
+};
+
+/// Selects the nearest edge for each of the three channels by its pseudo-distance.
+class MultiDistanceSelector {
+
+public:
+ typedef MultiDistance DistanceType;
+ typedef PseudoDistanceSelectorBase::EdgeCache EdgeCache;
+
+ void reset(const Point2 &p);
+ void addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge);
+ void merge(const MultiDistanceSelector &other);
+ DistanceType distance() const;
+ SignedDistance trueDistance() const;
+
+private:
+ Point2 p;
+ PseudoDistanceSelectorBase r, g, b;
+
+};
+
+/// Selects the nearest edge for each of the three color channels by its pseudo-distance and by true distance for the alpha channel.
+class MultiAndTrueDistanceSelector : public MultiDistanceSelector {
+
+public:
+ typedef MultiAndTrueDistance DistanceType;
+
+ DistanceType distance() const;
+
+};
+
+}
diff --git a/thirdparty/msdfgen/core/equation-solver.cpp b/thirdparty/msdfgen/core/equation-solver.cpp
new file mode 100644
index 0000000000..fbe906428b
--- /dev/null
+++ b/thirdparty/msdfgen/core/equation-solver.cpp
@@ -0,0 +1,77 @@
+
+#include "equation-solver.h"
+
+#define _USE_MATH_DEFINES
+#include <cmath>
+
+#define TOO_LARGE_RATIO 1e12
+
+namespace msdfgen {
+
+int solveQuadratic(double x[2], double a, double b, double c) {
+ // a = 0 -> linear equation
+ if (a == 0 || fabs(b)+fabs(c) > TOO_LARGE_RATIO*fabs(a)) {
+ // a, b = 0 -> no solution
+ if (b == 0 || fabs(c) > TOO_LARGE_RATIO*fabs(b)) {
+ if (c == 0)
+ return -1; // 0 = 0
+ return 0;
+ }
+ x[0] = -c/b;
+ return 1;
+ }
+ double dscr = b*b-4*a*c;
+ if (dscr > 0) {
+ dscr = sqrt(dscr);
+ x[0] = (-b+dscr)/(2*a);
+ x[1] = (-b-dscr)/(2*a);
+ return 2;
+ } else if (dscr == 0) {
+ x[0] = -b/(2*a);
+ return 1;
+ } else
+ return 0;
+}
+
+static int solveCubicNormed(double x[3], double a, double b, double c) {
+ double a2 = a*a;
+ double q = (a2 - 3*b)/9;
+ double r = (a*(2*a2-9*b) + 27*c)/54;
+ double r2 = r*r;
+ double q3 = q*q*q;
+ double A, B;
+ if (r2 < q3) {
+ double t = r/sqrt(q3);
+ if (t < -1) t = -1;
+ if (t > 1) t = 1;
+ t = acos(t);
+ a /= 3; q = -2*sqrt(q);
+ x[0] = q*cos(t/3)-a;
+ x[1] = q*cos((t+2*M_PI)/3)-a;
+ x[2] = q*cos((t-2*M_PI)/3)-a;
+ return 3;
+ } else {
+ A = -pow(fabs(r)+sqrt(r2-q3), 1/3.);
+ if (r < 0) A = -A;
+ B = A == 0 ? 0 : q/A;
+ a /= 3;
+ x[0] = (A+B)-a;
+ x[1] = -0.5*(A+B)-a;
+ x[2] = 0.5*sqrt(3.)*(A-B);
+ if (fabs(x[2]) < 1e-14)
+ return 2;
+ return 1;
+ }
+}
+
+int solveCubic(double x[3], double a, double b, double c, double d) {
+ if (a != 0) {
+ double bn = b/a, cn = c/a, dn = d/a;
+ // Check that a isn't "almost zero"
+ if (fabs(bn) < TOO_LARGE_RATIO && fabs(cn) < TOO_LARGE_RATIO && fabs(dn) < TOO_LARGE_RATIO)
+ return solveCubicNormed(x, bn, cn, dn);
+ }
+ return solveQuadratic(x, b, c, d);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/equation-solver.h b/thirdparty/msdfgen/core/equation-solver.h
new file mode 100644
index 0000000000..bae097b2b9
--- /dev/null
+++ b/thirdparty/msdfgen/core/equation-solver.h
@@ -0,0 +1,12 @@
+
+#pragma once
+
+namespace msdfgen {
+
+// ax^2 + bx + c = 0
+int solveQuadratic(double x[2], double a, double b, double c);
+
+// ax^3 + bx^2 + cx + d = 0
+int solveCubic(double x[3], double a, double b, double c, double d);
+
+}
diff --git a/thirdparty/msdfgen/core/generator-config.h b/thirdparty/msdfgen/core/generator-config.h
new file mode 100644
index 0000000000..ddcad961f2
--- /dev/null
+++ b/thirdparty/msdfgen/core/generator-config.h
@@ -0,0 +1,63 @@
+
+#pragma once
+
+#include <cstdlib>
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// The configuration of the MSDF error correction pass.
+struct ErrorCorrectionConfig {
+ /// The default value of minDeviationRatio.
+ static const double defaultMinDeviationRatio;
+ /// The default value of minImproveRatio.
+ static const double defaultMinImproveRatio;
+
+ /// Mode of operation.
+ enum Mode {
+ /// Skips error correction pass.
+ DISABLED,
+ /// Corrects all discontinuities of the distance field regardless if edges are adversely affected.
+ INDISCRIMINATE,
+ /// Corrects artifacts at edges and other discontinuous distances only if it does not affect edges or corners.
+ EDGE_PRIORITY,
+ /// Only corrects artifacts at edges.
+ EDGE_ONLY
+ } mode;
+ /// Configuration of whether to use an algorithm that computes the exact shape distance at the positions of suspected artifacts. This algorithm can be much slower.
+ enum DistanceCheckMode {
+ /// Never computes exact shape distance.
+ DO_NOT_CHECK_DISTANCE,
+ /// Only computes exact shape distance at edges. Provides a good balance between speed and precision.
+ CHECK_DISTANCE_AT_EDGE,
+ /// Computes and compares the exact shape distance for each suspected artifact.
+ ALWAYS_CHECK_DISTANCE
+ } distanceCheckMode;
+ /// The minimum ratio between the actual and maximum expected distance delta to be considered an error.
+ double minDeviationRatio;
+ /// The minimum ratio between the pre-correction distance error and the post-correction distance error. Has no effect for DO_NOT_CHECK_DISTANCE.
+ double minImproveRatio;
+ /// An optional buffer to avoid dynamic allocation. Must have at least as many bytes as the MSDF has pixels.
+ byte *buffer;
+
+ inline explicit ErrorCorrectionConfig(Mode mode = EDGE_PRIORITY, DistanceCheckMode distanceCheckMode = CHECK_DISTANCE_AT_EDGE, double minDeviationRatio = defaultMinDeviationRatio, double minImproveRatio = defaultMinImproveRatio, byte *buffer = NULL) : mode(mode), distanceCheckMode(distanceCheckMode), minDeviationRatio(minDeviationRatio), minImproveRatio(minImproveRatio), buffer(buffer) { }
+};
+
+/// The configuration of the distance field generator algorithm.
+struct GeneratorConfig {
+ /// Specifies whether to use the version of the algorithm that supports overlapping contours with the same winding. May be set to false to improve performance when no such contours are present.
+ bool overlapSupport;
+
+ inline explicit GeneratorConfig(bool overlapSupport = true) : overlapSupport(overlapSupport) { }
+};
+
+/// The configuration of the multi-channel distance field generator algorithm.
+struct MSDFGeneratorConfig : GeneratorConfig {
+ /// Configuration of the error correction pass.
+ ErrorCorrectionConfig errorCorrection;
+
+ inline MSDFGeneratorConfig() { }
+ inline explicit MSDFGeneratorConfig(bool overlapSupport, const ErrorCorrectionConfig &errorCorrection = ErrorCorrectionConfig()) : GeneratorConfig(overlapSupport), errorCorrection(errorCorrection) { }
+};
+
+}
diff --git a/thirdparty/msdfgen/core/msdf-error-correction.cpp b/thirdparty/msdfgen/core/msdf-error-correction.cpp
new file mode 100644
index 0000000000..21ddff8c85
--- /dev/null
+++ b/thirdparty/msdfgen/core/msdf-error-correction.cpp
@@ -0,0 +1,154 @@
+
+#include "msdf-error-correction.h"
+
+#include <vector>
+#include "arithmetics.hpp"
+#include "Bitmap.h"
+#include "contour-combiners.h"
+#include "MSDFErrorCorrection.h"
+
+namespace msdfgen {
+
+template <int N>
+static void msdfErrorCorrectionInner(const BitmapRef<float, N> &sdf, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config) {
+ if (config.errorCorrection.mode == ErrorCorrectionConfig::DISABLED)
+ return;
+ Bitmap<byte, 1> stencilBuffer;
+ if (!config.errorCorrection.buffer)
+ stencilBuffer = Bitmap<byte, 1>(sdf.width, sdf.height);
+ BitmapRef<byte, 1> stencil;
+ stencil.pixels = config.errorCorrection.buffer ? config.errorCorrection.buffer : (byte *) stencilBuffer;
+ stencil.width = sdf.width, stencil.height = sdf.height;
+ MSDFErrorCorrection ec(stencil, projection, range);
+ ec.setMinDeviationRatio(config.errorCorrection.minDeviationRatio);
+ ec.setMinImproveRatio(config.errorCorrection.minImproveRatio);
+ switch (config.errorCorrection.mode) {
+ case ErrorCorrectionConfig::DISABLED:
+ case ErrorCorrectionConfig::INDISCRIMINATE:
+ break;
+ case ErrorCorrectionConfig::EDGE_PRIORITY:
+ ec.protectCorners(shape);
+ ec.protectEdges<N>(sdf);
+ break;
+ case ErrorCorrectionConfig::EDGE_ONLY:
+ ec.protectAll();
+ break;
+ }
+ if (config.errorCorrection.distanceCheckMode == ErrorCorrectionConfig::DO_NOT_CHECK_DISTANCE || (config.errorCorrection.distanceCheckMode == ErrorCorrectionConfig::CHECK_DISTANCE_AT_EDGE && config.errorCorrection.mode != ErrorCorrectionConfig::EDGE_ONLY)) {
+ ec.findErrors<N>(sdf);
+ if (config.errorCorrection.distanceCheckMode == ErrorCorrectionConfig::CHECK_DISTANCE_AT_EDGE)
+ ec.protectAll();
+ }
+ if (config.errorCorrection.distanceCheckMode == ErrorCorrectionConfig::ALWAYS_CHECK_DISTANCE || config.errorCorrection.distanceCheckMode == ErrorCorrectionConfig::CHECK_DISTANCE_AT_EDGE) {
+ if (config.overlapSupport)
+ ec.findErrors<OverlappingContourCombiner, N>(sdf, shape);
+ else
+ ec.findErrors<SimpleContourCombiner, N>(sdf, shape);
+ }
+ ec.apply(sdf);
+}
+
+template <int N>
+static void msdfErrorCorrectionShapeless(const BitmapRef<float, N> &sdf, const Projection &projection, double range, double minDeviationRatio, bool protectAll) {
+ Bitmap<byte, 1> stencilBuffer(sdf.width, sdf.height);
+ MSDFErrorCorrection ec(stencilBuffer, projection, range);
+ ec.setMinDeviationRatio(minDeviationRatio);
+ if (protectAll)
+ ec.protectAll();
+ ec.findErrors<N>(sdf);
+ ec.apply(sdf);
+}
+
+void msdfErrorCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config) {
+ msdfErrorCorrectionInner(sdf, shape, projection, range, config);
+}
+void msdfErrorCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config) {
+ msdfErrorCorrectionInner(sdf, shape, projection, range, config);
+}
+
+void msdfFastDistanceErrorCorrection(const BitmapRef<float, 3> &sdf, const Projection &projection, double range, double minDeviationRatio) {
+ msdfErrorCorrectionShapeless(sdf, projection, range, minDeviationRatio, false);
+}
+void msdfFastDistanceErrorCorrection(const BitmapRef<float, 4> &sdf, const Projection &projection, double range, double minDeviationRatio) {
+ msdfErrorCorrectionShapeless(sdf, projection, range, minDeviationRatio, false);
+}
+
+void msdfFastEdgeErrorCorrection(const BitmapRef<float, 3> &sdf, const Projection &projection, double range, double minDeviationRatio) {
+ msdfErrorCorrectionShapeless(sdf, projection, range, minDeviationRatio, true);
+}
+void msdfFastEdgeErrorCorrection(const BitmapRef<float, 4> &sdf, const Projection &projection, double range, double minDeviationRatio) {
+ msdfErrorCorrectionShapeless(sdf, projection, range, minDeviationRatio, true);
+}
+
+
+// Legacy version
+
+inline static bool detectClash(const float *a, const float *b, double threshold) {
+ // Sort channels so that pairs (a0, b0), (a1, b1), (a2, b2) go from biggest to smallest absolute difference
+ float a0 = a[0], a1 = a[1], a2 = a[2];
+ float b0 = b[0], b1 = b[1], b2 = b[2];
+ float tmp;
+ if (fabsf(b0-a0) < fabsf(b1-a1)) {
+ tmp = a0, a0 = a1, a1 = tmp;
+ tmp = b0, b0 = b1, b1 = tmp;
+ }
+ if (fabsf(b1-a1) < fabsf(b2-a2)) {
+ tmp = a1, a1 = a2, a2 = tmp;
+ tmp = b1, b1 = b2, b2 = tmp;
+ if (fabsf(b0-a0) < fabsf(b1-a1)) {
+ tmp = a0, a0 = a1, a1 = tmp;
+ tmp = b0, b0 = b1, b1 = tmp;
+ }
+ }
+ return (fabsf(b1-a1) >= threshold) &&
+ !(b0 == b1 && b0 == b2) && // Ignore if other pixel has been equalized
+ fabsf(a2-.5f) >= fabsf(b2-.5f); // Out of the pair, only flag the pixel farther from a shape edge
+}
+
+template <int N>
+static void msdfErrorCorrectionInner_legacy(const BitmapRef<float, N> &output, const Vector2 &threshold) {
+ std::vector<std::pair<int, int> > clashes;
+ int w = output.width, h = output.height;
+ for (int y = 0; y < h; ++y)
+ for (int x = 0; x < w; ++x) {
+ if (
+ (x > 0 && detectClash(output(x, y), output(x-1, y), threshold.x)) ||
+ (x < w-1 && detectClash(output(x, y), output(x+1, y), threshold.x)) ||
+ (y > 0 && detectClash(output(x, y), output(x, y-1), threshold.y)) ||
+ (y < h-1 && detectClash(output(x, y), output(x, y+1), threshold.y))
+ )
+ clashes.push_back(std::make_pair(x, y));
+ }
+ for (std::vector<std::pair<int, int> >::const_iterator clash = clashes.begin(); clash != clashes.end(); ++clash) {
+ float *pixel = output(clash->first, clash->second);
+ float med = median(pixel[0], pixel[1], pixel[2]);
+ pixel[0] = med, pixel[1] = med, pixel[2] = med;
+ }
+#ifndef MSDFGEN_NO_DIAGONAL_CLASH_DETECTION
+ clashes.clear();
+ for (int y = 0; y < h; ++y)
+ for (int x = 0; x < w; ++x) {
+ if (
+ (x > 0 && y > 0 && detectClash(output(x, y), output(x-1, y-1), threshold.x+threshold.y)) ||
+ (x < w-1 && y > 0 && detectClash(output(x, y), output(x+1, y-1), threshold.x+threshold.y)) ||
+ (x > 0 && y < h-1 && detectClash(output(x, y), output(x-1, y+1), threshold.x+threshold.y)) ||
+ (x < w-1 && y < h-1 && detectClash(output(x, y), output(x+1, y+1), threshold.x+threshold.y))
+ )
+ clashes.push_back(std::make_pair(x, y));
+ }
+ for (std::vector<std::pair<int, int> >::const_iterator clash = clashes.begin(); clash != clashes.end(); ++clash) {
+ float *pixel = output(clash->first, clash->second);
+ float med = median(pixel[0], pixel[1], pixel[2]);
+ pixel[0] = med, pixel[1] = med, pixel[2] = med;
+ }
+#endif
+}
+
+void msdfErrorCorrection_legacy(const BitmapRef<float, 3> &output, const Vector2 &threshold) {
+ msdfErrorCorrectionInner_legacy(output, threshold);
+}
+void msdfErrorCorrection_legacy(const BitmapRef<float, 4> &output, const Vector2 &threshold) {
+ msdfErrorCorrectionInner_legacy(output, threshold);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/msdf-error-correction.h b/thirdparty/msdfgen/core/msdf-error-correction.h
new file mode 100644
index 0000000000..d5384c9329
--- /dev/null
+++ b/thirdparty/msdfgen/core/msdf-error-correction.h
@@ -0,0 +1,28 @@
+
+#pragma once
+
+#include "Vector2.h"
+#include "Projection.h"
+#include "Shape.h"
+#include "BitmapRef.hpp"
+#include "generator-config.h"
+
+namespace msdfgen {
+
+/// Predicts potential artifacts caused by the interpolation of the MSDF and corrects them by converting nearby texels to single-channel.
+void msdfErrorCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
+void msdfErrorCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
+
+/// Applies the simplified error correction to all discontiunous distances (INDISCRIMINATE mode). Does not need shape or translation.
+void msdfFastDistanceErrorCorrection(const BitmapRef<float, 3> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
+void msdfFastDistanceErrorCorrection(const BitmapRef<float, 4> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
+
+/// Applies the simplified error correction to edges only (EDGE_ONLY mode). Does not need shape or translation.
+void msdfFastEdgeErrorCorrection(const BitmapRef<float, 3> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
+void msdfFastEdgeErrorCorrection(const BitmapRef<float, 4> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
+
+/// The original version of the error correction algorithm.
+void msdfErrorCorrection_legacy(const BitmapRef<float, 3> &output, const Vector2 &threshold);
+void msdfErrorCorrection_legacy(const BitmapRef<float, 4> &output, const Vector2 &threshold);
+
+}
diff --git a/thirdparty/msdfgen/core/msdfgen.cpp b/thirdparty/msdfgen/core/msdfgen.cpp
new file mode 100644
index 0000000000..0289295f14
--- /dev/null
+++ b/thirdparty/msdfgen/core/msdfgen.cpp
@@ -0,0 +1,288 @@
+
+#include "../msdfgen.h"
+
+#include <vector>
+#include "edge-selectors.h"
+#include "contour-combiners.h"
+#include "ShapeDistanceFinder.h"
+
+namespace msdfgen {
+
+template <typename DistanceType>
+class DistancePixelConversion;
+
+template <>
+class DistancePixelConversion<double> {
+ double invRange;
+public:
+ typedef BitmapRef<float, 1> BitmapRefType;
+ inline explicit DistancePixelConversion(double range) : invRange(1/range) { }
+ inline void operator()(float *pixels, double distance) const {
+ *pixels = float(invRange*distance+.5);
+ }
+};
+
+template <>
+class DistancePixelConversion<MultiDistance> {
+ double invRange;
+public:
+ typedef BitmapRef<float, 3> BitmapRefType;
+ inline explicit DistancePixelConversion(double range) : invRange(1/range) { }
+ inline void operator()(float *pixels, const MultiDistance &distance) const {
+ pixels[0] = float(invRange*distance.r+.5);
+ pixels[1] = float(invRange*distance.g+.5);
+ pixels[2] = float(invRange*distance.b+.5);
+ }
+};
+
+template <>
+class DistancePixelConversion<MultiAndTrueDistance> {
+ double invRange;
+public:
+ typedef BitmapRef<float, 4> BitmapRefType;
+ inline explicit DistancePixelConversion(double range) : invRange(1/range) { }
+ inline void operator()(float *pixels, const MultiAndTrueDistance &distance) const {
+ pixels[0] = float(invRange*distance.r+.5);
+ pixels[1] = float(invRange*distance.g+.5);
+ pixels[2] = float(invRange*distance.b+.5);
+ pixels[3] = float(invRange*distance.a+.5);
+ }
+};
+
+template <class ContourCombiner>
+void generateDistanceField(const typename DistancePixelConversion<typename ContourCombiner::DistanceType>::BitmapRefType &output, const Shape &shape, const Projection &projection, double range) {
+ DistancePixelConversion<typename ContourCombiner::DistanceType> distancePixelConversion(range);
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp parallel
+#endif
+ {
+ ShapeDistanceFinder<ContourCombiner> distanceFinder(shape);
+ bool rightToLeft = false;
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp for
+#endif
+ for (int y = 0; y < output.height; ++y) {
+ int row = shape.inverseYAxis ? output.height-y-1 : y;
+ for (int col = 0; col < output.width; ++col) {
+ int x = rightToLeft ? output.width-col-1 : col;
+ Point2 p = projection.unproject(Point2(x+.5, y+.5));
+ typename ContourCombiner::DistanceType distance = distanceFinder.distance(p);
+ distancePixelConversion(output(x, row), distance);
+ }
+ rightToLeft = !rightToLeft;
+ }
+ }
+}
+
+void generateSDF(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, double range, const GeneratorConfig &config) {
+ if (config.overlapSupport)
+ generateDistanceField<OverlappingContourCombiner<TrueDistanceSelector> >(output, shape, projection, range);
+ else
+ generateDistanceField<SimpleContourCombiner<TrueDistanceSelector> >(output, shape, projection, range);
+}
+
+void generatePseudoSDF(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, double range, const GeneratorConfig &config) {
+ if (config.overlapSupport)
+ generateDistanceField<OverlappingContourCombiner<PseudoDistanceSelector> >(output, shape, projection, range);
+ else
+ generateDistanceField<SimpleContourCombiner<PseudoDistanceSelector> >(output, shape, projection, range);
+}
+
+void generateMSDF(const BitmapRef<float, 3> &output, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config) {
+ if (config.overlapSupport)
+ generateDistanceField<OverlappingContourCombiner<MultiDistanceSelector> >(output, shape, projection, range);
+ else
+ generateDistanceField<SimpleContourCombiner<MultiDistanceSelector> >(output, shape, projection, range);
+ msdfErrorCorrection(output, shape, projection, range, config);
+}
+
+void generateMTSDF(const BitmapRef<float, 4> &output, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config) {
+ if (config.overlapSupport)
+ generateDistanceField<OverlappingContourCombiner<MultiAndTrueDistanceSelector> >(output, shape, projection, range);
+ else
+ generateDistanceField<SimpleContourCombiner<MultiAndTrueDistanceSelector> >(output, shape, projection, range);
+ msdfErrorCorrection(output, shape, projection, range, config);
+}
+
+// Legacy API
+
+void generateSDF(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, bool overlapSupport) {
+ generateSDF(output, shape, Projection(scale, translate), range, GeneratorConfig(overlapSupport));
+}
+
+void generatePseudoSDF(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, bool overlapSupport) {
+ generatePseudoSDF(output, shape, Projection(scale, translate), range, GeneratorConfig(overlapSupport));
+}
+
+void generateMSDF(const BitmapRef<float, 3> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, const ErrorCorrectionConfig &errorCorrectionConfig, bool overlapSupport) {
+ generateMSDF(output, shape, Projection(scale, translate), range, MSDFGeneratorConfig(overlapSupport, errorCorrectionConfig));
+}
+
+void generateMTSDF(const BitmapRef<float, 4> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, const ErrorCorrectionConfig &errorCorrectionConfig, bool overlapSupport) {
+ generateMTSDF(output, shape, Projection(scale, translate), range, MSDFGeneratorConfig(overlapSupport, errorCorrectionConfig));
+}
+
+// Legacy version
+
+void generateSDF_legacy(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate) {
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp parallel for
+#endif
+ for (int y = 0; y < output.height; ++y) {
+ int row = shape.inverseYAxis ? output.height-y-1 : y;
+ for (int x = 0; x < output.width; ++x) {
+ double dummy;
+ Point2 p = Vector2(x+.5, y+.5)/scale-translate;
+ SignedDistance minDistance;
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ SignedDistance distance = (*edge)->signedDistance(p, dummy);
+ if (distance < minDistance)
+ minDistance = distance;
+ }
+ *output(x, row) = float(minDistance.distance/range+.5);
+ }
+ }
+}
+
+void generatePseudoSDF_legacy(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate) {
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp parallel for
+#endif
+ for (int y = 0; y < output.height; ++y) {
+ int row = shape.inverseYAxis ? output.height-y-1 : y;
+ for (int x = 0; x < output.width; ++x) {
+ Point2 p = Vector2(x+.5, y+.5)/scale-translate;
+ SignedDistance minDistance;
+ const EdgeHolder *nearEdge = NULL;
+ double nearParam = 0;
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ double param;
+ SignedDistance distance = (*edge)->signedDistance(p, param);
+ if (distance < minDistance) {
+ minDistance = distance;
+ nearEdge = &*edge;
+ nearParam = param;
+ }
+ }
+ if (nearEdge)
+ (*nearEdge)->distanceToPseudoDistance(minDistance, p, nearParam);
+ *output(x, row) = float(minDistance.distance/range+.5);
+ }
+ }
+}
+
+void generateMSDF_legacy(const BitmapRef<float, 3> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, ErrorCorrectionConfig errorCorrectionConfig) {
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp parallel for
+#endif
+ for (int y = 0; y < output.height; ++y) {
+ int row = shape.inverseYAxis ? output.height-y-1 : y;
+ for (int x = 0; x < output.width; ++x) {
+ Point2 p = Vector2(x+.5, y+.5)/scale-translate;
+
+ struct {
+ SignedDistance minDistance;
+ const EdgeHolder *nearEdge;
+ double nearParam;
+ } r, g, b;
+ r.nearEdge = g.nearEdge = b.nearEdge = NULL;
+ r.nearParam = g.nearParam = b.nearParam = 0;
+
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ double param;
+ SignedDistance distance = (*edge)->signedDistance(p, param);
+ if ((*edge)->color&RED && distance < r.minDistance) {
+ r.minDistance = distance;
+ r.nearEdge = &*edge;
+ r.nearParam = param;
+ }
+ if ((*edge)->color&GREEN && distance < g.minDistance) {
+ g.minDistance = distance;
+ g.nearEdge = &*edge;
+ g.nearParam = param;
+ }
+ if ((*edge)->color&BLUE && distance < b.minDistance) {
+ b.minDistance = distance;
+ b.nearEdge = &*edge;
+ b.nearParam = param;
+ }
+ }
+
+ if (r.nearEdge)
+ (*r.nearEdge)->distanceToPseudoDistance(r.minDistance, p, r.nearParam);
+ if (g.nearEdge)
+ (*g.nearEdge)->distanceToPseudoDistance(g.minDistance, p, g.nearParam);
+ if (b.nearEdge)
+ (*b.nearEdge)->distanceToPseudoDistance(b.minDistance, p, b.nearParam);
+ output(x, row)[0] = float(r.minDistance.distance/range+.5);
+ output(x, row)[1] = float(g.minDistance.distance/range+.5);
+ output(x, row)[2] = float(b.minDistance.distance/range+.5);
+ }
+ }
+
+ errorCorrectionConfig.distanceCheckMode = ErrorCorrectionConfig::DO_NOT_CHECK_DISTANCE;
+ msdfErrorCorrection(output, shape, Projection(scale, translate), range, MSDFGeneratorConfig(false, errorCorrectionConfig));
+}
+
+void generateMTSDF_legacy(const BitmapRef<float, 4> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, ErrorCorrectionConfig errorCorrectionConfig) {
+#ifdef MSDFGEN_USE_OPENMP
+ #pragma omp parallel for
+#endif
+ for (int y = 0; y < output.height; ++y) {
+ int row = shape.inverseYAxis ? output.height-y-1 : y;
+ for (int x = 0; x < output.width; ++x) {
+ Point2 p = Vector2(x+.5, y+.5)/scale-translate;
+
+ SignedDistance minDistance;
+ struct {
+ SignedDistance minDistance;
+ const EdgeHolder *nearEdge;
+ double nearParam;
+ } r, g, b;
+ r.nearEdge = g.nearEdge = b.nearEdge = NULL;
+ r.nearParam = g.nearParam = b.nearParam = 0;
+
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ double param;
+ SignedDistance distance = (*edge)->signedDistance(p, param);
+ if (distance < minDistance)
+ minDistance = distance;
+ if ((*edge)->color&RED && distance < r.minDistance) {
+ r.minDistance = distance;
+ r.nearEdge = &*edge;
+ r.nearParam = param;
+ }
+ if ((*edge)->color&GREEN && distance < g.minDistance) {
+ g.minDistance = distance;
+ g.nearEdge = &*edge;
+ g.nearParam = param;
+ }
+ if ((*edge)->color&BLUE && distance < b.minDistance) {
+ b.minDistance = distance;
+ b.nearEdge = &*edge;
+ b.nearParam = param;
+ }
+ }
+
+ if (r.nearEdge)
+ (*r.nearEdge)->distanceToPseudoDistance(r.minDistance, p, r.nearParam);
+ if (g.nearEdge)
+ (*g.nearEdge)->distanceToPseudoDistance(g.minDistance, p, g.nearParam);
+ if (b.nearEdge)
+ (*b.nearEdge)->distanceToPseudoDistance(b.minDistance, p, b.nearParam);
+ output(x, row)[0] = float(r.minDistance.distance/range+.5);
+ output(x, row)[1] = float(g.minDistance.distance/range+.5);
+ output(x, row)[2] = float(b.minDistance.distance/range+.5);
+ output(x, row)[3] = float(minDistance.distance/range+.5);
+ }
+ }
+
+ errorCorrectionConfig.distanceCheckMode = ErrorCorrectionConfig::DO_NOT_CHECK_DISTANCE;
+ msdfErrorCorrection(output, shape, Projection(scale, translate), range, MSDFGeneratorConfig(false, errorCorrectionConfig));
+}
+
+}
diff --git a/thirdparty/msdfgen/core/pixel-conversion.hpp b/thirdparty/msdfgen/core/pixel-conversion.hpp
new file mode 100644
index 0000000000..7e9b6d08f0
--- /dev/null
+++ b/thirdparty/msdfgen/core/pixel-conversion.hpp
@@ -0,0 +1,18 @@
+
+#pragma once
+
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+typedef unsigned char byte;
+
+inline byte pixelFloatToByte(float x) {
+ return byte(clamp(256.f*x, 255.f));
+}
+
+inline float pixelByteToFloat(byte x) {
+ return 1.f/255.f*float(x);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/rasterization.cpp b/thirdparty/msdfgen/core/rasterization.cpp
new file mode 100644
index 0000000000..9aa695a8c1
--- /dev/null
+++ b/thirdparty/msdfgen/core/rasterization.cpp
@@ -0,0 +1,115 @@
+
+#include "rasterization.h"
+
+#include <vector>
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+void rasterize(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, FillRule fillRule) {
+ Scanline scanline;
+ for (int y = 0; y < output.height; ++y) {
+ int row = shape.inverseYAxis ? output.height-y-1 : y;
+ shape.scanline(scanline, projection.unprojectY(y+.5));
+ for (int x = 0; x < output.width; ++x)
+ *output(x, row) = (float) scanline.filled(projection.unprojectX(x+.5), fillRule);
+ }
+}
+
+void distanceSignCorrection(const BitmapRef<float, 1> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule) {
+ Scanline scanline;
+ for (int y = 0; y < sdf.height; ++y) {
+ int row = shape.inverseYAxis ? sdf.height-y-1 : y;
+ shape.scanline(scanline, projection.unprojectY(y+.5));
+ for (int x = 0; x < sdf.width; ++x) {
+ bool fill = scanline.filled(projection.unprojectX(x+.5), fillRule);
+ float &sd = *sdf(x, row);
+ if ((sd > .5f) != fill)
+ sd = 1.f-sd;
+ }
+ }
+}
+
+template <int N>
+static void multiDistanceSignCorrection(const BitmapRef<float, N> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule) {
+ int w = sdf.width, h = sdf.height;
+ if (!(w*h))
+ return;
+ Scanline scanline;
+ bool ambiguous = false;
+ std::vector<char> matchMap;
+ matchMap.resize(w*h);
+ char *match = &matchMap[0];
+ for (int y = 0; y < h; ++y) {
+ int row = shape.inverseYAxis ? h-y-1 : y;
+ shape.scanline(scanline, projection.unprojectY(y+.5));
+ for (int x = 0; x < w; ++x) {
+ bool fill = scanline.filled(projection.unprojectX(x+.5), fillRule);
+ float *msd = sdf(x, row);
+ float sd = median(msd[0], msd[1], msd[2]);
+ if (sd == .5f)
+ ambiguous = true;
+ else if ((sd > .5f) != fill) {
+ msd[0] = 1.f-msd[0];
+ msd[1] = 1.f-msd[1];
+ msd[2] = 1.f-msd[2];
+ *match = -1;
+ } else
+ *match = 1;
+ if (N >= 4 && (msd[3] > .5f) != fill)
+ msd[3] = 1.f-msd[3];
+ ++match;
+ }
+ }
+ // This step is necessary to avoid artifacts when whole shape is inverted
+ if (ambiguous) {
+ match = &matchMap[0];
+ for (int y = 0; y < h; ++y) {
+ int row = shape.inverseYAxis ? h-y-1 : y;
+ for (int x = 0; x < w; ++x) {
+ if (!*match) {
+ int neighborMatch = 0;
+ if (x > 0) neighborMatch += *(match-1);
+ if (x < w-1) neighborMatch += *(match+1);
+ if (y > 0) neighborMatch += *(match-w);
+ if (y < h-1) neighborMatch += *(match+w);
+ if (neighborMatch < 0) {
+ float *msd = sdf(x, row);
+ msd[0] = 1.f-msd[0];
+ msd[1] = 1.f-msd[1];
+ msd[2] = 1.f-msd[2];
+ }
+ }
+ ++match;
+ }
+ }
+ }
+}
+
+void distanceSignCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule) {
+ multiDistanceSignCorrection(sdf, shape, projection, fillRule);
+}
+
+void distanceSignCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule) {
+ multiDistanceSignCorrection(sdf, shape, projection, fillRule);
+}
+
+// Legacy API
+
+void rasterize(const BitmapRef<float, 1> &output, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule) {
+ rasterize(output, shape, Projection(scale, translate), fillRule);
+}
+
+void distanceSignCorrection(const BitmapRef<float, 1> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule) {
+ distanceSignCorrection(sdf, shape, Projection(scale, translate), fillRule);
+}
+
+void distanceSignCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule) {
+ distanceSignCorrection(sdf, shape, Projection(scale, translate), fillRule);
+}
+
+void distanceSignCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule) {
+ distanceSignCorrection(sdf, shape, Projection(scale, translate), fillRule);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/rasterization.h b/thirdparty/msdfgen/core/rasterization.h
new file mode 100644
index 0000000000..82d0c73d95
--- /dev/null
+++ b/thirdparty/msdfgen/core/rasterization.h
@@ -0,0 +1,25 @@
+
+#pragma once
+
+#include "Vector2.h"
+#include "Shape.h"
+#include "Projection.h"
+#include "Scanline.h"
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// Rasterizes the shape into a monochrome bitmap.
+void rasterize(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
+/// Fixes the sign of the input signed distance field, so that it matches the shape's rasterized fill.
+void distanceSignCorrection(const BitmapRef<float, 1> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
+void distanceSignCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
+void distanceSignCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
+
+// Old version of the function API's kept for backwards compatibility
+void rasterize(const BitmapRef<float, 1> &output, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
+void distanceSignCorrection(const BitmapRef<float, 1> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
+void distanceSignCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
+void distanceSignCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
+
+}
diff --git a/thirdparty/msdfgen/core/render-sdf.cpp b/thirdparty/msdfgen/core/render-sdf.cpp
new file mode 100644
index 0000000000..e282285e59
--- /dev/null
+++ b/thirdparty/msdfgen/core/render-sdf.cpp
@@ -0,0 +1,108 @@
+
+#include "render-sdf.h"
+
+#include "arithmetics.hpp"
+#include "pixel-conversion.hpp"
+#include "bitmap-interpolation.hpp"
+
+namespace msdfgen {
+
+static float distVal(float dist, double pxRange, float midValue) {
+ if (!pxRange)
+ return (float) (dist > midValue);
+ return (float) clamp((dist-midValue)*pxRange+.5);
+}
+
+void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 1> &sdf, double pxRange, float midValue) {
+ Vector2 scale((double) sdf.width/output.width, (double) sdf.height/output.height);
+ pxRange *= (double) (output.width+output.height)/(sdf.width+sdf.height);
+ for (int y = 0; y < output.height; ++y)
+ for (int x = 0; x < output.width; ++x) {
+ float sd;
+ interpolate(&sd, sdf, scale*Point2(x+.5, y+.5));
+ *output(x, y) = distVal(sd, pxRange, midValue);
+ }
+}
+
+void renderSDF(const BitmapRef<float, 3> &output, const BitmapConstRef<float, 1> &sdf, double pxRange, float midValue) {
+ Vector2 scale((double) sdf.width/output.width, (double) sdf.height/output.height);
+ pxRange *= (double) (output.width+output.height)/(sdf.width+sdf.height);
+ for (int y = 0; y < output.height; ++y)
+ for (int x = 0; x < output.width; ++x) {
+ float sd;
+ interpolate(&sd, sdf, scale*Point2(x+.5, y+.5));
+ float v = distVal(sd, pxRange, midValue);
+ output(x, y)[0] = v;
+ output(x, y)[1] = v;
+ output(x, y)[2] = v;
+ }
+}
+
+void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 3> &sdf, double pxRange, float midValue) {
+ Vector2 scale((double) sdf.width/output.width, (double) sdf.height/output.height);
+ pxRange *= (double) (output.width+output.height)/(sdf.width+sdf.height);
+ for (int y = 0; y < output.height; ++y)
+ for (int x = 0; x < output.width; ++x) {
+ float sd[3];
+ interpolate(sd, sdf, scale*Point2(x+.5, y+.5));
+ *output(x, y) = distVal(median(sd[0], sd[1], sd[2]), pxRange, midValue);
+ }
+}
+
+void renderSDF(const BitmapRef<float, 3> &output, const BitmapConstRef<float, 3> &sdf, double pxRange, float midValue) {
+ Vector2 scale((double) sdf.width/output.width, (double) sdf.height/output.height);
+ pxRange *= (double) (output.width+output.height)/(sdf.width+sdf.height);
+ for (int y = 0; y < output.height; ++y)
+ for (int x = 0; x < output.width; ++x) {
+ float sd[3];
+ interpolate(sd, sdf, scale*Point2(x+.5, y+.5));
+ output(x, y)[0] = distVal(sd[0], pxRange, midValue);
+ output(x, y)[1] = distVal(sd[1], pxRange, midValue);
+ output(x, y)[2] = distVal(sd[2], pxRange, midValue);
+ }
+}
+
+void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 4> &sdf, double pxRange, float midValue) {
+ Vector2 scale((double) sdf.width/output.width, (double) sdf.height/output.height);
+ pxRange *= (double) (output.width+output.height)/(sdf.width+sdf.height);
+ for (int y = 0; y < output.height; ++y)
+ for (int x = 0; x < output.width; ++x) {
+ float sd[4];
+ interpolate(sd, sdf, scale*Point2(x+.5, y+.5));
+ *output(x, y) = distVal(median(sd[0], sd[1], sd[2]), pxRange, midValue);
+ }
+}
+
+void renderSDF(const BitmapRef<float, 4> &output, const BitmapConstRef<float, 4> &sdf, double pxRange, float midValue) {
+ Vector2 scale((double) sdf.width/output.width, (double) sdf.height/output.height);
+ pxRange *= (double) (output.width+output.height)/(sdf.width+sdf.height);
+ for (int y = 0; y < output.height; ++y)
+ for (int x = 0; x < output.width; ++x) {
+ float sd[4];
+ interpolate(sd, sdf, scale*Point2(x+.5, y+.5));
+ output(x, y)[0] = distVal(sd[0], pxRange, midValue);
+ output(x, y)[1] = distVal(sd[1], pxRange, midValue);
+ output(x, y)[2] = distVal(sd[2], pxRange, midValue);
+ output(x, y)[3] = distVal(sd[3], pxRange, midValue);
+ }
+}
+
+void simulate8bit(const BitmapRef<float, 1> &bitmap) {
+ const float *end = bitmap.pixels+1*bitmap.width*bitmap.height;
+ for (float *p = bitmap.pixels; p < end; ++p)
+ *p = pixelByteToFloat(pixelFloatToByte(*p));
+}
+
+void simulate8bit(const BitmapRef<float, 3> &bitmap) {
+ const float *end = bitmap.pixels+3*bitmap.width*bitmap.height;
+ for (float *p = bitmap.pixels; p < end; ++p)
+ *p = pixelByteToFloat(pixelFloatToByte(*p));
+}
+
+void simulate8bit(const BitmapRef<float, 4> &bitmap) {
+ const float *end = bitmap.pixels+4*bitmap.width*bitmap.height;
+ for (float *p = bitmap.pixels; p < end; ++p)
+ *p = pixelByteToFloat(pixelFloatToByte(*p));
+}
+
+}
diff --git a/thirdparty/msdfgen/core/render-sdf.h b/thirdparty/msdfgen/core/render-sdf.h
new file mode 100644
index 0000000000..7f2d270b67
--- /dev/null
+++ b/thirdparty/msdfgen/core/render-sdf.h
@@ -0,0 +1,22 @@
+
+#pragma once
+
+#include "Vector2.h"
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// Reconstructs the shape's appearance into output from the distance field sdf.
+void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 1> &sdf, double pxRange = 0, float midValue = .5f);
+void renderSDF(const BitmapRef<float, 3> &output, const BitmapConstRef<float, 1> &sdf, double pxRange = 0, float midValue = .5f);
+void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 3> &sdf, double pxRange = 0, float midValue = .5f);
+void renderSDF(const BitmapRef<float, 3> &output, const BitmapConstRef<float, 3> &sdf, double pxRange = 0, float midValue = .5f);
+void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 4> &sdf, double pxRange = 0, float midValue = .5f);
+void renderSDF(const BitmapRef<float, 4> &output, const BitmapConstRef<float, 4> &sdf, double pxRange = 0, float midValue = .5f);
+
+/// Snaps the values of the floating-point bitmaps into one of the 256 values representable in a standard 8-bit bitmap.
+void simulate8bit(const BitmapRef<float, 1> &bitmap);
+void simulate8bit(const BitmapRef<float, 3> &bitmap);
+void simulate8bit(const BitmapRef<float, 4> &bitmap);
+
+}
diff --git a/thirdparty/msdfgen/core/save-bmp.cpp b/thirdparty/msdfgen/core/save-bmp.cpp
new file mode 100644
index 0000000000..f761ee4f88
--- /dev/null
+++ b/thirdparty/msdfgen/core/save-bmp.cpp
@@ -0,0 +1,169 @@
+
+#define _CRT_SECURE_NO_WARNINGS
+
+#include "save-bmp.h"
+
+#include <cstdio>
+
+#ifdef MSDFGEN_USE_CPP11
+ #include <cstdint>
+#else
+ typedef int int32_t;
+ typedef unsigned uint32_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned char uint8_t;
+#endif
+
+#include "pixel-conversion.hpp"
+
+namespace msdfgen {
+
+template <typename T>
+static bool writeValue(FILE *file, T value) {
+ #ifdef __BIG_ENDIAN__
+ T reverse = 0;
+ for (int i = 0; i < sizeof(T); ++i) {
+ reverse <<= 8;
+ reverse |= value&T(0xff);
+ value >>= 8;
+ }
+ return fwrite(&reverse, sizeof(T), 1, file) == 1;
+ #else
+ return fwrite(&value, sizeof(T), 1, file) == 1;
+ #endif
+}
+
+static bool writeBmpHeader(FILE *file, int width, int height, int &paddedWidth) {
+ paddedWidth = (3*width+3)&~3;
+ const uint32_t bitmapStart = 54;
+ const uint32_t bitmapSize = paddedWidth*height;
+ const uint32_t fileSize = bitmapStart+bitmapSize;
+
+ writeValue<uint16_t>(file, 0x4d42u);
+ writeValue<uint32_t>(file, fileSize);
+ writeValue<uint16_t>(file, 0);
+ writeValue<uint16_t>(file, 0);
+ writeValue<uint32_t>(file, bitmapStart);
+
+ writeValue<uint32_t>(file, 40);
+ writeValue<int32_t>(file, width);
+ writeValue<int32_t>(file, height);
+ writeValue<uint16_t>(file, 1);
+ writeValue<uint16_t>(file, 24);
+ writeValue<uint32_t>(file, 0);
+ writeValue<uint32_t>(file, bitmapSize);
+ writeValue<uint32_t>(file, 2835);
+ writeValue<uint32_t>(file, 2835);
+ writeValue<uint32_t>(file, 0);
+ writeValue<uint32_t>(file, 0);
+
+ return true;
+}
+
+bool saveBmp(const BitmapConstRef<byte, 1> &bitmap, const char *filename) {
+ FILE *file = fopen(filename, "wb");
+ if (!file)
+ return false;
+
+ int paddedWidth;
+ writeBmpHeader(file, bitmap.width, bitmap.height, paddedWidth);
+
+ const uint8_t padding[4] = { };
+ int padLength = paddedWidth-3*bitmap.width;
+ for (int y = 0; y < bitmap.height; ++y) {
+ for (int x = 0; x < bitmap.width; ++x) {
+ uint8_t px = (uint8_t) *bitmap(x, y);
+ fwrite(&px, sizeof(uint8_t), 1, file);
+ fwrite(&px, sizeof(uint8_t), 1, file);
+ fwrite(&px, sizeof(uint8_t), 1, file);
+ }
+ fwrite(padding, 1, padLength, file);
+ }
+
+ return !fclose(file);
+}
+
+bool saveBmp(const BitmapConstRef<byte, 3> &bitmap, const char *filename) {
+ FILE *file = fopen(filename, "wb");
+ if (!file)
+ return false;
+
+ int paddedWidth;
+ writeBmpHeader(file, bitmap.width, bitmap.height, paddedWidth);
+
+ const uint8_t padding[4] = { };
+ int padLength = paddedWidth-3*bitmap.width;
+ for (int y = 0; y < bitmap.height; ++y) {
+ for (int x = 0; x < bitmap.width; ++x) {
+ uint8_t bgr[3] = {
+ (uint8_t) bitmap(x, y)[2],
+ (uint8_t) bitmap(x, y)[1],
+ (uint8_t) bitmap(x, y)[0]
+ };
+ fwrite(bgr, sizeof(uint8_t), 3, file);
+ }
+ fwrite(padding, 1, padLength, file);
+ }
+
+ return !fclose(file);
+}
+
+bool saveBmp(const BitmapConstRef<byte, 4> &bitmap, const char *filename) {
+ // RGBA not supported by the BMP format
+ return false;
+}
+
+bool saveBmp(const BitmapConstRef<float, 1> &bitmap, const char *filename) {
+ FILE *file = fopen(filename, "wb");
+ if (!file)
+ return false;
+
+ int paddedWidth;
+ writeBmpHeader(file, bitmap.width, bitmap.height, paddedWidth);
+
+ const uint8_t padding[4] = { };
+ int padLength = paddedWidth-3*bitmap.width;
+ for (int y = 0; y < bitmap.height; ++y) {
+ for (int x = 0; x < bitmap.width; ++x) {
+ uint8_t px = (uint8_t) pixelFloatToByte(*bitmap(x, y));
+ fwrite(&px, sizeof(uint8_t), 1, file);
+ fwrite(&px, sizeof(uint8_t), 1, file);
+ fwrite(&px, sizeof(uint8_t), 1, file);
+ }
+ fwrite(padding, 1, padLength, file);
+ }
+
+ return !fclose(file);
+}
+
+bool saveBmp(const BitmapConstRef<float, 3> &bitmap, const char *filename) {
+ FILE *file = fopen(filename, "wb");
+ if (!file)
+ return false;
+
+ int paddedWidth;
+ writeBmpHeader(file, bitmap.width, bitmap.height, paddedWidth);
+
+ const uint8_t padding[4] = { };
+ int padLength = paddedWidth-3*bitmap.width;
+ for (int y = 0; y < bitmap.height; ++y) {
+ for (int x = 0; x < bitmap.width; ++x) {
+ uint8_t bgr[3] = {
+ (uint8_t) pixelFloatToByte(bitmap(x, y)[2]),
+ (uint8_t) pixelFloatToByte(bitmap(x, y)[1]),
+ (uint8_t) pixelFloatToByte(bitmap(x, y)[0])
+ };
+ fwrite(bgr, sizeof(uint8_t), 3, file);
+ }
+ fwrite(padding, 1, padLength, file);
+ }
+
+ return !fclose(file);
+}
+
+bool saveBmp(const BitmapConstRef<float, 4> &bitmap, const char *filename) {
+ // RGBA not supported by the BMP format
+ return false;
+}
+
+}
diff --git a/thirdparty/msdfgen/core/save-bmp.h b/thirdparty/msdfgen/core/save-bmp.h
new file mode 100644
index 0000000000..98f852921f
--- /dev/null
+++ b/thirdparty/msdfgen/core/save-bmp.h
@@ -0,0 +1,16 @@
+
+#pragma once
+
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// Saves the bitmap as a BMP file.
+bool saveBmp(const BitmapConstRef<byte, 1> &bitmap, const char *filename);
+bool saveBmp(const BitmapConstRef<byte, 3> &bitmap, const char *filename);
+bool saveBmp(const BitmapConstRef<byte, 4> &bitmap, const char *filename);
+bool saveBmp(const BitmapConstRef<float, 1> &bitmap, const char *filename);
+bool saveBmp(const BitmapConstRef<float, 3> &bitmap, const char *filename);
+bool saveBmp(const BitmapConstRef<float, 4> &bitmap, const char *filename);
+
+}
diff --git a/thirdparty/msdfgen/core/save-tiff.cpp b/thirdparty/msdfgen/core/save-tiff.cpp
new file mode 100644
index 0000000000..71405e00e5
--- /dev/null
+++ b/thirdparty/msdfgen/core/save-tiff.cpp
@@ -0,0 +1,190 @@
+
+#define _CRT_SECURE_NO_WARNINGS
+
+#include "save-tiff.h"
+
+#include <cstdio>
+
+#ifdef MSDFGEN_USE_CPP11
+ #include <cstdint>
+#else
+ typedef int int32_t;
+ typedef unsigned uint32_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned char uint8_t;
+#endif
+
+namespace msdfgen {
+
+template <typename T>
+static bool writeValue(FILE *file, T value) {
+ return fwrite(&value, sizeof(T), 1, file) == 1;
+}
+template <typename T>
+static void writeValueRepeated(FILE *file, T value, int times) {
+ for (int i = 0; i < times; ++i)
+ writeValue(file, value);
+}
+
+static bool writeTiffHeader(FILE *file, int width, int height, int channels) {
+ #ifdef __BIG_ENDIAN__
+ writeValue<uint16_t>(file, 0x4d4du);
+ #else
+ writeValue<uint16_t>(file, 0x4949u);
+ #endif
+ writeValue<uint16_t>(file, 42);
+ writeValue<uint32_t>(file, 0x0008u); // Offset of first IFD
+ // Offset = 0x0008
+
+ writeValue<uint16_t>(file, 15); // Number of IFD entries
+
+ // ImageWidth
+ writeValue<uint16_t>(file, 0x0100u);
+ writeValue<uint16_t>(file, 0x0004u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<int32_t>(file, width);
+ // ImageLength
+ writeValue<uint16_t>(file, 0x0101u);
+ writeValue<uint16_t>(file, 0x0004u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<int32_t>(file, height);
+ // BitsPerSample
+ writeValue<uint16_t>(file, 0x0102u);
+ writeValue<uint16_t>(file, 0x0003u);
+ writeValue<uint32_t>(file, channels);
+ if (channels > 1)
+ writeValue<uint32_t>(file, 0x00c2u); // Offset of 32, 32, ...
+ else {
+ writeValue<uint16_t>(file, 32);
+ writeValue<uint16_t>(file, 0);
+ }
+ // Compression
+ writeValue<uint16_t>(file, 0x0103u);
+ writeValue<uint16_t>(file, 0x0003u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<uint16_t>(file, 1);
+ writeValue<uint16_t>(file, 0);
+ // PhotometricInterpretation
+ writeValue<uint16_t>(file, 0x0106u);
+ writeValue<uint16_t>(file, 0x0003u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<uint16_t>(file, channels >= 3 ? 2 : 1);
+ writeValue<uint16_t>(file, 0);
+ // StripOffsets
+ writeValue<uint16_t>(file, 0x0111u);
+ writeValue<uint16_t>(file, 0x0004u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<uint32_t>(file, 0x00d2u+(channels > 1)*channels*12); // Offset of pixel data
+ // SamplesPerPixel
+ writeValue<uint16_t>(file, 0x0115u);
+ writeValue<uint16_t>(file, 0x0003u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<uint16_t>(file, channels);
+ writeValue<uint16_t>(file, 0);
+ // RowsPerStrip
+ writeValue<uint16_t>(file, 0x0116u);
+ writeValue<uint16_t>(file, 0x0004u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<int32_t>(file, height);
+ // StripByteCounts
+ writeValue<uint16_t>(file, 0x0117u);
+ writeValue<uint16_t>(file, 0x0004u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<int32_t>(file, sizeof(float)*channels*width*height);
+ // XResolution
+ writeValue<uint16_t>(file, 0x011au);
+ writeValue<uint16_t>(file, 0x0005u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<uint32_t>(file, 0x00c2u+(channels > 1)*channels*2); // Offset of 300, 1
+ // YResolution
+ writeValue<uint16_t>(file, 0x011bu);
+ writeValue<uint16_t>(file, 0x0005u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<uint32_t>(file, 0x00cau+(channels > 1)*channels*2); // Offset of 300, 1
+ // ResolutionUnit
+ writeValue<uint16_t>(file, 0x0128u);
+ writeValue<uint16_t>(file, 0x0003u);
+ writeValue<uint32_t>(file, 1);
+ writeValue<uint16_t>(file, 2);
+ writeValue<uint16_t>(file, 0);
+ // SampleFormat
+ writeValue<uint16_t>(file, 0x0153u);
+ writeValue<uint16_t>(file, 0x0003u);
+ writeValue<uint32_t>(file, channels);
+ if (channels > 1)
+ writeValue<uint32_t>(file, 0x00d2u+channels*2); // Offset of 3, 3, ...
+ else {
+ writeValue<uint16_t>(file, 3);
+ writeValue<uint16_t>(file, 0);
+ }
+ // SMinSampleValue
+ writeValue<uint16_t>(file, 0x0154u);
+ writeValue<uint16_t>(file, 0x000bu);
+ writeValue<uint32_t>(file, channels);
+ if (channels > 1)
+ writeValue<uint32_t>(file, 0x00d2u+channels*4); // Offset of 0.f, 0.f, ...
+ else
+ writeValue<float>(file, 0.f);
+ // SMaxSampleValue
+ writeValue<uint16_t>(file, 0x0155u);
+ writeValue<uint16_t>(file, 0x000bu);
+ writeValue<uint32_t>(file, channels);
+ if (channels > 1)
+ writeValue<uint32_t>(file, 0x00d2u+channels*8); // Offset of 1.f, 1.f, ...
+ else
+ writeValue<float>(file, 1.f);
+ // Offset = 0x00be
+
+ writeValue<uint32_t>(file, 0);
+
+ if (channels > 1) {
+ // 0x00c2 BitsPerSample data
+ writeValueRepeated<uint16_t>(file, 32, channels);
+ // 0x00c2 + 2*N XResolution data
+ writeValue<uint32_t>(file, 300);
+ writeValue<uint32_t>(file, 1);
+ // 0x00ca + 2*N YResolution data
+ writeValue<uint32_t>(file, 300);
+ writeValue<uint32_t>(file, 1);
+ // 0x00d2 + 2*N SampleFormat data
+ writeValueRepeated<uint16_t>(file, 3, channels);
+ // 0x00d2 + 4*N SMinSampleValue data
+ writeValueRepeated<float>(file, 0.f, channels);
+ // 0x00d2 + 8*N SMaxSampleValue data
+ writeValueRepeated<float>(file, 1.f, channels);
+ // Offset = 0x00d2 + 12*N
+ } else {
+ // 0x00c2 XResolution data
+ writeValue<uint32_t>(file, 300);
+ writeValue<uint32_t>(file, 1);
+ // 0x00ca YResolution data
+ writeValue<uint32_t>(file, 300);
+ writeValue<uint32_t>(file, 1);
+ // Offset = 0x00d2
+ }
+
+ return true;
+}
+
+template <int N>
+bool saveTiffFloat(const BitmapConstRef<float, N> &bitmap, const char *filename) {
+ FILE *file = fopen(filename, "wb");
+ if (!file)
+ return false;
+ writeTiffHeader(file, bitmap.width, bitmap.height, N);
+ for (int y = bitmap.height-1; y >= 0; --y)
+ fwrite(bitmap(0, y), sizeof(float), N*bitmap.width, file);
+ return !fclose(file);
+}
+
+bool saveTiff(const BitmapConstRef<float, 1> &bitmap, const char *filename) {
+ return saveTiffFloat(bitmap, filename);
+}
+bool saveTiff(const BitmapConstRef<float, 3> &bitmap, const char *filename) {
+ return saveTiffFloat(bitmap, filename);
+}
+bool saveTiff(const BitmapConstRef<float, 4> &bitmap, const char *filename) {
+ return saveTiffFloat(bitmap, filename);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/save-tiff.h b/thirdparty/msdfgen/core/save-tiff.h
new file mode 100644
index 0000000000..072cd71d50
--- /dev/null
+++ b/thirdparty/msdfgen/core/save-tiff.h
@@ -0,0 +1,13 @@
+
+#pragma once
+
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// Saves the bitmap as an uncompressed floating-point TIFF file.
+bool saveTiff(const BitmapConstRef<float, 1> &bitmap, const char *filename);
+bool saveTiff(const BitmapConstRef<float, 3> &bitmap, const char *filename);
+bool saveTiff(const BitmapConstRef<float, 4> &bitmap, const char *filename);
+
+}
diff --git a/thirdparty/msdfgen/core/sdf-error-estimation.cpp b/thirdparty/msdfgen/core/sdf-error-estimation.cpp
new file mode 100644
index 0000000000..7c00c449a9
--- /dev/null
+++ b/thirdparty/msdfgen/core/sdf-error-estimation.cpp
@@ -0,0 +1,192 @@
+
+#include "sdf-error-estimation.h"
+
+#include <cmath>
+#include "arithmetics.hpp"
+
+namespace msdfgen {
+
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 1> &sdf, const Projection &projection, double y, bool inverseYAxis) {
+ if (!(sdf.width > 0 && sdf.height > 0))
+ return line.setIntersections(std::vector<Scanline::Intersection>());
+ double pixelY = clamp(projection.projectY(y)-.5, double(sdf.height-1));
+ if (inverseYAxis)
+ pixelY = sdf.height-1-pixelY;
+ int b = (int) floor(pixelY);
+ int t = b+1;
+ double bt = pixelY-b;
+ if (t >= sdf.height) {
+ b = sdf.height-1;
+ t = sdf.height-1;
+ bt = 1;
+ }
+ bool inside = false;
+ std::vector<Scanline::Intersection> intersections;
+ float lv, rv = mix(*sdf(0, b), *sdf(0, t), bt);
+ if ((inside = rv > .5f)) {
+ Scanline::Intersection intersection = { -1e240, 1 };
+ intersections.push_back(intersection);
+ }
+ for (int l = 0, r = 1; r < sdf.width; ++l, ++r) {
+ lv = rv;
+ rv = mix(*sdf(r, b), *sdf(r, t), bt);
+ if (lv != rv) {
+ double lr = double(.5f-lv)/double(rv-lv);
+ if (lr >= 0 && lr <= 1) {
+ Scanline::Intersection intersection = { projection.unprojectX(l+lr+.5), sign(rv-lv) };
+ intersections.push_back(intersection);
+ }
+ }
+ }
+#ifdef MSDFGEN_USE_CPP11
+ line.setIntersections((std::vector<Scanline::Intersection> &&) intersections);
+#else
+ line.setIntersections(intersections);
+#endif
+}
+
+template <int N>
+void scanlineMSDF(Scanline &line, const BitmapConstRef<float, N> &sdf, const Projection &projection, double y, bool inverseYAxis) {
+ if (!(sdf.width > 0 && sdf.height > 0))
+ return line.setIntersections(std::vector<Scanline::Intersection>());
+ double pixelY = clamp(projection.projectY(y)-.5, double(sdf.height-1));
+ if (inverseYAxis)
+ pixelY = sdf.height-1-pixelY;
+ int b = (int) floor(pixelY);
+ int t = b+1;
+ double bt = pixelY-b;
+ if (t >= sdf.height) {
+ b = sdf.height-1;
+ t = sdf.height-1;
+ bt = 1;
+ }
+ bool inside = false;
+ std::vector<Scanline::Intersection> intersections;
+ float lv[3], rv[3];
+ rv[0] = mix(sdf(0, b)[0], sdf(0, t)[0], bt);
+ rv[1] = mix(sdf(0, b)[1], sdf(0, t)[1], bt);
+ rv[2] = mix(sdf(0, b)[2], sdf(0, t)[2], bt);
+ if ((inside = median(rv[0], rv[1], rv[2]) > .5f)) {
+ Scanline::Intersection intersection = { -1e240, 1 };
+ intersections.push_back(intersection);
+ }
+ for (int l = 0, r = 1; r < sdf.width; ++l, ++r) {
+ lv[0] = rv[0], lv[1] = rv[1], lv[2] = rv[2];
+ rv[0] = mix(sdf(r, b)[0], sdf(r, t)[0], bt);
+ rv[1] = mix(sdf(r, b)[1], sdf(r, t)[1], bt);
+ rv[2] = mix(sdf(r, b)[2], sdf(r, t)[2], bt);
+ Scanline::Intersection newIntersections[4];
+ int newIntersectionCount = 0;
+ for (int i = 0; i < 3; ++i) {
+ if (lv[i] != rv[i]) {
+ double lr = double(.5f-lv[i])/double(rv[i]-lv[i]);
+ if (lr >= 0 && lr <= 1) {
+ float v[3] = {
+ mix(lv[0], rv[0], lr),
+ mix(lv[1], rv[1], lr),
+ mix(lv[2], rv[2], lr)
+ };
+ if (median(v[0], v[1], v[2]) == v[i]) {
+ newIntersections[newIntersectionCount].x = projection.unprojectX(l+lr+.5);
+ newIntersections[newIntersectionCount].direction = sign(rv[i]-lv[i]);
+ ++newIntersectionCount;
+ }
+ }
+ }
+ }
+ // Sort new intersections
+ if (newIntersectionCount >= 2) {
+ if (newIntersections[0].x > newIntersections[1].x)
+ newIntersections[3] = newIntersections[0], newIntersections[0] = newIntersections[1], newIntersections[1] = newIntersections[3];
+ if (newIntersectionCount >= 3 && newIntersections[1].x > newIntersections[2].x) {
+ newIntersections[3] = newIntersections[1], newIntersections[1] = newIntersections[2], newIntersections[2] = newIntersections[3];
+ if (newIntersections[0].x > newIntersections[1].x)
+ newIntersections[3] = newIntersections[0], newIntersections[0] = newIntersections[1], newIntersections[1] = newIntersections[3];
+ }
+ }
+ for (int i = 0; i < newIntersectionCount; ++i) {
+ if ((newIntersections[i].direction > 0) == !inside) {
+ intersections.push_back(newIntersections[i]);
+ inside = !inside;
+ }
+ }
+ // Consistency check
+ float rvScalar = median(rv[0], rv[1], rv[2]);
+ if ((rvScalar > .5f) != inside && rvScalar != .5f && !intersections.empty()) {
+ intersections.pop_back();
+ inside = !inside;
+ }
+ }
+#ifdef MSDFGEN_USE_CPP11
+ line.setIntersections((std::vector<Scanline::Intersection> &&) intersections);
+#else
+ line.setIntersections(intersections);
+#endif
+}
+
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 3> &sdf, const Projection &projection, double y, bool inverseYAxis) {
+ scanlineMSDF(line, sdf, projection, y, inverseYAxis);
+}
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 4> &sdf, const Projection &projection, double y, bool inverseYAxis) {
+ scanlineMSDF(line, sdf, projection, y, inverseYAxis);
+}
+
+template <int N>
+double estimateSDFErrorInner(const BitmapConstRef<float, N> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule) {
+ if (sdf.width <= 1 || sdf.height <= 1 || scanlinesPerRow < 1)
+ return 0;
+ double subRowSize = 1./scanlinesPerRow;
+ double xFrom = projection.unprojectX(.5);
+ double xTo = projection.unprojectX(sdf.width-.5);
+ double overlapFactor = 1/(xTo-xFrom);
+ double error = 0;
+ Scanline refScanline, sdfScanline;
+ for (int row = 0; row < sdf.height-1; ++row) {
+ for (int subRow = 0; subRow < scanlinesPerRow; ++subRow) {
+ double bt = (subRow+.5)*subRowSize;
+ double y = projection.unprojectY(row+bt+.5);
+ shape.scanline(refScanline, y);
+ scanlineSDF(sdfScanline, sdf, projection, y, shape.inverseYAxis);
+ error += 1-overlapFactor*Scanline::overlap(refScanline, sdfScanline, xFrom, xTo, fillRule);
+ }
+ }
+ return error/((sdf.height-1)*scanlinesPerRow);
+}
+
+double estimateSDFError(const BitmapConstRef<float, 1> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule) {
+ return estimateSDFErrorInner(sdf, shape, projection, scanlinesPerRow, fillRule);
+}
+double estimateSDFError(const BitmapConstRef<float, 3> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule) {
+ return estimateSDFErrorInner(sdf, shape, projection, scanlinesPerRow, fillRule);
+}
+double estimateSDFError(const BitmapConstRef<float, 4> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule) {
+ return estimateSDFErrorInner(sdf, shape, projection, scanlinesPerRow, fillRule);
+}
+
+// Legacy API
+
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 1> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y) {
+ scanlineSDF(line, sdf, Projection(scale, translate), y, inverseYAxis);
+}
+
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 3> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y) {
+ scanlineSDF(line, sdf, Projection(scale, translate), y, inverseYAxis);
+}
+
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 4> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y) {
+ scanlineSDF(line, sdf, Projection(scale, translate), y, inverseYAxis);
+}
+
+double estimateSDFError(const BitmapConstRef<float, 1> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule) {
+ return estimateSDFError(sdf, shape, Projection(scale, translate), scanlinesPerRow, fillRule);
+}
+
+double estimateSDFError(const BitmapConstRef<float, 3> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule) {
+ return estimateSDFError(sdf, shape, Projection(scale, translate), scanlinesPerRow, fillRule);
+}
+
+double estimateSDFError(const BitmapConstRef<float, 4> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule) {
+ return estimateSDFError(sdf, shape, Projection(scale, translate), scanlinesPerRow, fillRule);
+}
+
+}
diff --git a/thirdparty/msdfgen/core/sdf-error-estimation.h b/thirdparty/msdfgen/core/sdf-error-estimation.h
new file mode 100644
index 0000000000..d2fd40d2b8
--- /dev/null
+++ b/thirdparty/msdfgen/core/sdf-error-estimation.h
@@ -0,0 +1,30 @@
+
+#pragma once
+
+#include "Vector2.h"
+#include "Shape.h"
+#include "Projection.h"
+#include "Scanline.h"
+#include "BitmapRef.hpp"
+
+namespace msdfgen {
+
+/// Analytically constructs a scanline at y evaluating fill by linear interpolation of the SDF.
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 1> &sdf, const Projection &projection, double y, bool inverseYAxis = false);
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 3> &sdf, const Projection &projection, double y, bool inverseYAxis = false);
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 4> &sdf, const Projection &projection, double y, bool inverseYAxis = false);
+
+/// Estimates the portion of the area that will be filled incorrectly when rendering using the SDF.
+double estimateSDFError(const BitmapConstRef<float, 1> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
+double estimateSDFError(const BitmapConstRef<float, 3> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
+double estimateSDFError(const BitmapConstRef<float, 4> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
+
+// Old version of the function API's kept for backwards compatibility
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 1> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y);
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 3> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y);
+void scanlineSDF(Scanline &line, const BitmapConstRef<float, 4> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y);
+double estimateSDFError(const BitmapConstRef<float, 1> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
+double estimateSDFError(const BitmapConstRef<float, 3> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
+double estimateSDFError(const BitmapConstRef<float, 4> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
+
+}
diff --git a/thirdparty/msdfgen/core/shape-description.cpp b/thirdparty/msdfgen/core/shape-description.cpp
new file mode 100644
index 0000000000..a096fa2541
--- /dev/null
+++ b/thirdparty/msdfgen/core/shape-description.cpp
@@ -0,0 +1,284 @@
+
+#define _CRT_SECURE_NO_WARNINGS
+#include "shape-description.h"
+
+namespace msdfgen {
+
+int readCharF(FILE *input) {
+ int c = '\0';
+ do {
+ c = fgetc(input);
+ } while (c == ' ' || c == '\t' || c == '\r' || c == '\n');
+ return c;
+}
+
+int readCharS(const char **input) {
+ int c = '\0';
+ do {
+ c = *(*input)++;
+ } while (c == ' ' || c == '\t' || c == '\r' || c == '\n');
+ if (!c) {
+ --c;
+ return EOF;
+ }
+ return c;
+}
+
+int readCoordF(FILE *input, Point2 &coord) {
+ return fscanf(input, "%lf,%lf", &coord.x, &coord.y);
+}
+
+int readCoordS(const char **input, Point2 &coord) {
+ int read = 0;
+ int result = sscanf(*input, "%lf,%lf%n", &coord.x, &coord.y, &read);
+ *input += read;
+ return result;
+}
+
+static bool writeCoord(FILE *output, Point2 coord) {
+ fprintf(output, "%.12g, %.12g", coord.x, coord.y);
+ return true;
+}
+
+template <typename T, int (*readChar)(T *), int (*readCoord)(T *, Point2 &)>
+static int readControlPoints(T *input, Point2 *output) {
+ int result = readCoord(input, output[0]);
+ if (result == 2) {
+ switch (readChar(input)) {
+ case ')':
+ return 1;
+ case ';':
+ break;
+ default:
+ return -1;
+ }
+ result = readCoord(input, output[1]);
+ if (result == 2 && readChar(input) == ')')
+ return 2;
+ } else if (result != 1 && readChar(input) == ')')
+ return 0;
+ return -1;
+}
+
+template <typename T, int (*readChar)(T *), int (*readCoord)(T *, Point2 &)>
+static bool readContour(T *input, Contour &output, const Point2 *first, int terminator, bool &colorsSpecified) {
+ Point2 p[4], start;
+ if (first)
+ p[0] = *first;
+ else {
+ int result = readCoord(input, p[0]);
+ if (result != 2)
+ return result != 1 && readChar(input) == terminator;
+ }
+ start = p[0];
+ int c = '\0';
+ while ((c = readChar(input)) != terminator) {
+ if (c != ';')
+ return false;
+ EdgeColor color = WHITE;
+ int result = readCoord(input, p[1]);
+ if (result == 2) {
+ output.addEdge(EdgeHolder(p[0], p[1], color));
+ p[0] = p[1];
+ continue;
+ } else if (result == 1)
+ return false;
+ else {
+ int controlPoints = 0;
+ switch ((c = readChar(input))) {
+ case '#':
+ output.addEdge(EdgeHolder(p[0], start, color));
+ p[0] = start;
+ continue;
+ case ';':
+ goto FINISH_EDGE;
+ case '(':
+ goto READ_CONTROL_POINTS;
+ case 'C': case 'c':
+ color = CYAN;
+ colorsSpecified = true;
+ break;
+ case 'M': case 'm':
+ color = MAGENTA;
+ colorsSpecified = true;
+ break;
+ case 'Y': case 'y':
+ color = YELLOW;
+ colorsSpecified = true;
+ break;
+ case 'W': case 'w':
+ color = WHITE;
+ colorsSpecified = true;
+ break;
+ default:
+ return c == terminator;
+ }
+ switch (readChar(input)) {
+ case ';':
+ goto FINISH_EDGE;
+ case '(':
+ READ_CONTROL_POINTS:
+ if ((controlPoints = readControlPoints<T, readChar, readCoord>(input, p+1)) < 0)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ if (readChar(input) != ';')
+ return false;
+ FINISH_EDGE:
+ result = readCoord(input, p[1+controlPoints]);
+ if (result != 2) {
+ if (result == 1)
+ return false;
+ else {
+ if (readChar(input) == '#')
+ p[1+controlPoints] = start;
+ else
+ return false;
+ }
+ }
+ switch (controlPoints) {
+ case 0:
+ output.addEdge(EdgeHolder(p[0], p[1], color));
+ p[0] = p[1];
+ continue;
+ case 1:
+ output.addEdge(EdgeHolder(p[0], p[1], p[2], color));
+ p[0] = p[2];
+ continue;
+ case 2:
+ output.addEdge(EdgeHolder(p[0], p[1], p[2], p[3], color));
+ p[0] = p[3];
+ continue;
+ }
+ }
+ }
+ return true;
+}
+
+bool readShapeDescription(FILE *input, Shape &output, bool *colorsSpecified) {
+ bool locColorsSpec = false;
+ output.contours.clear();
+ output.inverseYAxis = false;
+ Point2 p;
+ int result = readCoordF(input, p);
+ if (result == 2) {
+ return readContour<FILE, readCharF, readCoordF>(input, output.addContour(), &p, EOF, locColorsSpec);
+ } else if (result == 1)
+ return false;
+ else {
+ int c = readCharF(input);
+ if (c == '@') {
+ char after = '\0';
+ if (fscanf(input, "invert-y%c", &after) != 1)
+ return feof(input) != 0;
+ output.inverseYAxis = true;
+ c = after;
+ if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
+ c = readCharF(input);
+ }
+ for (; c == '{'; c = readCharF(input))
+ if (!readContour<FILE, readCharF, readCoordF>(input, output.addContour(), NULL, '}', locColorsSpec))
+ return false;
+ if (colorsSpecified)
+ *colorsSpecified = locColorsSpec;
+ return c == EOF && feof(input);
+ }
+}
+
+bool readShapeDescription(const char *input, Shape &output, bool *colorsSpecified) {
+ bool locColorsSpec = false;
+ output.contours.clear();
+ output.inverseYAxis = false;
+ Point2 p;
+ int result = readCoordS(&input, p);
+ if (result == 2) {
+ return readContour<const char *, readCharS, readCoordS>(&input, output.addContour(), &p, EOF, locColorsSpec);
+ } else if (result == 1)
+ return false;
+ else {
+ int c = readCharS(&input);
+ if (c == '@') {
+ for (int i = 0; i < (int) sizeof("invert-y")-1; ++i)
+ if (input[i] != "invert-y"[i])
+ return false;
+ output.inverseYAxis = true;
+ input += sizeof("invert-y")-1;
+ c = readCharS(&input);
+ }
+ for (; c == '{'; c = readCharS(&input))
+ if (!readContour<const char *, readCharS, readCoordS>(&input, output.addContour(), NULL, '}', locColorsSpec))
+ return false;
+ if (colorsSpecified)
+ *colorsSpecified = locColorsSpec;
+ return c == EOF;
+ }
+}
+
+static bool isColored(const Shape &shape) {
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour)
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge)
+ if ((*edge)->color != WHITE)
+ return true;
+ return false;
+}
+
+bool writeShapeDescription(FILE *output, const Shape &shape) {
+ if (!shape.validate())
+ return false;
+ bool writeColors = isColored(shape);
+ if (shape.inverseYAxis)
+ fprintf(output, "@invert-y\n");
+ for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour) {
+ fprintf(output, "{\n");
+ if (!contour->edges.empty()) {
+ for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
+ char colorCode = '\0';
+ if (writeColors) {
+ switch ((*edge)->color) {
+ case YELLOW: colorCode = 'y'; break;
+ case MAGENTA: colorCode = 'm'; break;
+ case CYAN: colorCode = 'c'; break;
+ case WHITE: colorCode = 'w'; break;
+ default:;
+ }
+ }
+ if (const LinearSegment *e = dynamic_cast<const LinearSegment *>(&**edge)) {
+ fprintf(output, "\t");
+ writeCoord(output, e->p[0]);
+ fprintf(output, ";\n");
+ if (colorCode)
+ fprintf(output, "\t\t%c;\n", colorCode);
+ }
+ if (const QuadraticSegment *e = dynamic_cast<const QuadraticSegment *>(&**edge)) {
+ fprintf(output, "\t");
+ writeCoord(output, e->p[0]);
+ fprintf(output, ";\n\t\t");
+ if (colorCode)
+ fprintf(output, "%c", colorCode);
+ fprintf(output, "(");
+ writeCoord(output, e->p[1]);
+ fprintf(output, ");\n");
+ }
+ if (const CubicSegment *e = dynamic_cast<const CubicSegment *>(&**edge)) {
+ fprintf(output, "\t");
+ writeCoord(output, e->p[0]);
+ fprintf(output, ";\n\t\t");
+ if (colorCode)
+ fprintf(output, "%c", colorCode);
+ fprintf(output, "(");
+ writeCoord(output, e->p[1]);
+ fprintf(output, "; ");
+ writeCoord(output, e->p[2]);
+ fprintf(output, ");\n");
+ }
+ }
+ fprintf(output, "\t#\n");
+ }
+ fprintf(output, "}\n");
+ }
+ return true;
+}
+
+}
diff --git a/thirdparty/msdfgen/core/shape-description.h b/thirdparty/msdfgen/core/shape-description.h
new file mode 100644
index 0000000000..5df7c50a03
--- /dev/null
+++ b/thirdparty/msdfgen/core/shape-description.h
@@ -0,0 +1,16 @@
+
+#pragma once
+
+#include <cstdlib>
+#include <cstdio>
+#include "Shape.h"
+
+namespace msdfgen {
+
+/// Deserializes a text description of a vector shape into output.
+bool readShapeDescription(FILE *input, Shape &output, bool *colorsSpecified = NULL);
+bool readShapeDescription(const char *input, Shape &output, bool *colorsSpecified = NULL);
+/// Serializes a shape object into a text description.
+bool writeShapeDescription(FILE *output, const Shape &shape);
+
+}
diff --git a/thirdparty/msdfgen/msdfgen.h b/thirdparty/msdfgen/msdfgen.h
new file mode 100644
index 0000000000..fb36bd7e1d
--- /dev/null
+++ b/thirdparty/msdfgen/msdfgen.h
@@ -0,0 +1,65 @@
+
+#pragma once
+
+/*
+ * MULTI-CHANNEL SIGNED DISTANCE FIELD GENERATOR v1.9 (2021-05-28)
+ * ---------------------------------------------------------------
+ * A utility by Viktor Chlumsky, (c) 2014 - 2021
+ *
+ * The technique used to generate multi-channel distance fields in this code
+ * has been developed by Viktor Chlumsky in 2014 for his master's thesis,
+ * "Shape Decomposition for Multi-Channel Distance Fields". It provides improved
+ * quality of sharp corners in glyphs and other 2D shapes compared to monochrome
+ * distance fields. To reconstruct an image of the shape, apply the median of three
+ * operation on the triplet of sampled signed distance values.
+ *
+ */
+
+#include "core/arithmetics.hpp"
+#include "core/Vector2.h"
+#include "core/Projection.h"
+#include "core/Scanline.h"
+#include "core/Shape.h"
+#include "core/BitmapRef.hpp"
+#include "core/Bitmap.h"
+#include "core/bitmap-interpolation.hpp"
+#include "core/pixel-conversion.hpp"
+#include "core/edge-coloring.h"
+#include "core/generator-config.h"
+#include "core/msdf-error-correction.h"
+#include "core/render-sdf.h"
+#include "core/rasterization.h"
+#include "core/sdf-error-estimation.h"
+#include "core/save-bmp.h"
+#include "core/save-tiff.h"
+#include "core/shape-description.h"
+
+#define MSDFGEN_VERSION "1.9"
+
+namespace msdfgen {
+
+/// Generates a conventional single-channel signed distance field.
+void generateSDF(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, double range, const GeneratorConfig &config = GeneratorConfig());
+
+/// Generates a single-channel signed pseudo-distance field.
+void generatePseudoSDF(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, double range, const GeneratorConfig &config = GeneratorConfig());
+
+/// Generates a multi-channel signed distance field. Edge colors must be assigned first! (See edgeColoringSimple)
+void generateMSDF(const BitmapRef<float, 3> &output, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
+
+/// Generates a multi-channel signed distance field with true distance in the alpha channel. Edge colors must be assigned first.
+void generateMTSDF(const BitmapRef<float, 4> &output, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
+
+// Old version of the function API's kept for backwards compatibility
+void generateSDF(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, bool overlapSupport = true);
+void generatePseudoSDF(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, bool overlapSupport = true);
+void generateMSDF(const BitmapRef<float, 3> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, const ErrorCorrectionConfig &errorCorrectionConfig = ErrorCorrectionConfig(), bool overlapSupport = true);
+void generateMTSDF(const BitmapRef<float, 4> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, const ErrorCorrectionConfig &errorCorrectionConfig = ErrorCorrectionConfig(), bool overlapSupport = true);
+
+// Original simpler versions of the previous functions, which work well under normal circumstances, but cannot deal with overlapping contours.
+void generateSDF_legacy(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate);
+void generatePseudoSDF_legacy(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate);
+void generateMSDF_legacy(const BitmapRef<float, 3> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, ErrorCorrectionConfig errorCorrectionConfig = ErrorCorrectionConfig());
+void generateMTSDF_legacy(const BitmapRef<float, 4> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, ErrorCorrectionConfig errorCorrectionConfig = ErrorCorrectionConfig());
+
+}
diff --git a/thirdparty/spirv-reflect/patches/specialization-constants.patch b/thirdparty/spirv-reflect/patches/specialization-constants.patch
index 8ff1dcc2e5..efd89a76af 100644
--- a/thirdparty/spirv-reflect/patches/specialization-constants.patch
+++ b/thirdparty/spirv-reflect/patches/specialization-constants.patch
@@ -1,18 +1,18 @@
diff --git a/thirdparty/spirv-reflect/spirv_reflect.c b/thirdparty/spirv-reflect/spirv_reflect.c
-index 0fc979a8a4..3e3643717a 100644
+index 1c94a2e00e..2786a7f3ad 100644
--- a/thirdparty/spirv-reflect/spirv_reflect.c
+++ b/thirdparty/spirv-reflect/spirv_reflect.c
-@@ -124,6 +124,9 @@ typedef struct Decorations {
- NumberDecoration location;
- NumberDecoration offset;
- NumberDecoration uav_counter_buffer;
+@@ -124,6 +124,9 @@ typedef struct SpvReflectPrvDecorations {
+ SpvReflectPrvNumberDecoration location;
+ SpvReflectPrvNumberDecoration offset;
+ SpvReflectPrvNumberDecoration uav_counter_buffer;
+// -- GODOT begin --
-+ NumberDecoration specialization_constant;
++ SpvReflectPrvNumberDecoration specialization_constant;
+// -- GODOT end --
- StringDecoration semantic;
- uint32_t array_stride;
- uint32_t matrix_stride;
-@@ -610,6 +613,9 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
+ SpvReflectPrvStringDecoration semantic;
+ uint32_t array_stride;
+ uint32_t matrix_stride;
+@@ -629,6 +632,9 @@ static SpvReflectResult ParseNodes(SpvReflectPrvParser* p_parser)
p_parser->nodes[i].decorations.offset.value = (uint32_t)INVALID_VALUE;
p_parser->nodes[i].decorations.uav_counter_buffer.value = (uint32_t)INVALID_VALUE;
p_parser->nodes[i].decorations.built_in = (SpvBuiltIn)INVALID_VALUE;
@@ -22,7 +22,7 @@ index 0fc979a8a4..3e3643717a 100644
}
// Mark source file id node
p_parser->source_file_id = (uint32_t)INVALID_VALUE;
-@@ -800,10 +806,16 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
+@@ -819,10 +825,16 @@ static SpvReflectResult ParseNodes(SpvReflectPrvParser* p_parser)
CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id);
}
break;
@@ -41,7 +41,16 @@ index 0fc979a8a4..3e3643717a 100644
case SpvOpSpecConstantComposite:
case SpvOpSpecConstantOp: {
CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id);
-@@ -1309,6 +1321,9 @@ static SpvReflectResult ParseDecorations(Parser* p_parser)
+@@ -854,7 +866,7 @@ static SpvReflectResult ParseNodes(SpvReflectPrvParser* p_parser)
+ CHECKED_READU32(p_parser, p_node->word_offset + 3, p_access_chain->base_id);
+ //
+ // SPIRV_ACCESS_CHAIN_INDEX_OFFSET (4) is the number of words up until the first index:
+- // [Node, Result Type Id, Result Id, Base Id, <Indexes>]
++ // [SpvReflectPrvNode, Result Type Id, Result Id, Base Id, <Indexes>]
+ //
+ p_access_chain->index_count = (node_word_count - SPIRV_ACCESS_CHAIN_INDEX_OFFSET);
+ if (p_access_chain->index_count > 0) {
+@@ -1334,6 +1346,9 @@ static SpvReflectResult ParseDecorations(SpvReflectPrvParser* p_parser)
skip = true;
}
break;
@@ -51,7 +60,7 @@ index 0fc979a8a4..3e3643717a 100644
case SpvDecorationBlock:
case SpvDecorationBufferBlock:
case SpvDecorationColMajor:
-@@ -1441,7 +1456,14 @@ static SpvReflectResult ParseDecorations(Parser* p_parser)
+@@ -1466,7 +1481,14 @@ static SpvReflectResult ParseDecorations(SpvReflectPrvParser* p_parser)
p_target_decorations->input_attachment_index.word_offset = word_offset;
}
break;
@@ -67,7 +76,7 @@ index 0fc979a8a4..3e3643717a 100644
case SpvReflectDecorationHlslCounterBufferGOOGLE: {
uint32_t word_offset = p_node->word_offset + member_offset+ 3;
CHECKED_READU32(p_parser, word_offset, p_target_decorations->uav_counter_buffer.value);
-@@ -1731,6 +1753,13 @@ static SpvReflectResult ParseType(
+@@ -1766,6 +1788,13 @@ static SpvReflectResult ParseType(
p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_ACCELERATION_STRUCTURE;
}
break;
@@ -81,17 +90,17 @@ index 0fc979a8a4..3e3643717a 100644
}
if (result == SPV_REFLECT_RESULT_SUCCESS) {
-@@ -3187,6 +3216,69 @@ static SpvReflectResult ParseExecutionModes(Parser* p_parser, SpvReflectShaderMo
+@@ -3236,6 +3265,69 @@ static SpvReflectResult ParseExecutionModes(
return SPV_REFLECT_RESULT_SUCCESS;
}
+// -- GODOT begin --
-+static SpvReflectResult ParseSpecializationConstants(Parser* p_parser, SpvReflectShaderModule* p_module)
++static SpvReflectResult ParseSpecializationConstants(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module)
+{
+ p_module->specialization_constant_count = 0;
+ p_module->specialization_constants = NULL;
+ for (size_t i = 0; i < p_parser->node_count; ++i) {
-+ Node* p_node = &(p_parser->nodes[i]);
++ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
+ if (p_node->op == SpvOpSpecConstantTrue || p_node->op == SpvOpSpecConstantFalse || p_node->op == SpvOpSpecConstant) {
+ p_module->specialization_constant_count++;
+ }
@@ -106,7 +115,7 @@ index 0fc979a8a4..3e3643717a 100644
+ uint32_t index = 0;
+
+ for (size_t i = 0; i < p_parser->node_count; ++i) {
-+ Node* p_node = &(p_parser->nodes[i]);
++ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
+ switch(p_node->op) {
+ default: continue;
+ case SpvOpSpecConstantTrue: {
@@ -124,7 +133,7 @@ index 0fc979a8a4..3e3643717a 100644
+ IF_READU32(result, p_parser, p_node->word_offset + 1, element_type_id);
+ IF_READU32(result, p_parser, p_node->word_offset + 3, default_value);
+
-+ Node* p_next_node = FindNode(p_parser, element_type_id);
++ SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id);
+
+ if (p_next_node->op == SpvOpTypeInt) {
+ p_module->specialization_constants[index].constant_type = SPV_REFLECT_SPECIALIZATION_CONSTANT_INT;
@@ -148,10 +157,10 @@ index 0fc979a8a4..3e3643717a 100644
+}
+// -- GODOT end --
+
- static SpvReflectResult ParsePushConstantBlocks(Parser* p_parser, SpvReflectShaderModule* p_module)
- {
- for (size_t i = 0; i < p_parser->node_count; ++i) {
-@@ -3562,6 +3654,12 @@ SpvReflectResult spvReflectCreateShaderModule(
+ static SpvReflectResult ParsePushConstantBlocks(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module)
+@@ -3613,6 +3705,12 @@ SpvReflectResult spvReflectCreateShaderModule(
result = ParsePushConstantBlocks(&parser, p_module);
SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);
}
@@ -164,7 +173,7 @@ index 0fc979a8a4..3e3643717a 100644
if (result == SPV_REFLECT_RESULT_SUCCESS) {
result = ParseEntryPoints(&parser, p_module);
SPV_REFLECT_ASSERT(result == SPV_REFLECT_RESULT_SUCCESS);
-@@ -3691,6 +3789,9 @@ void spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module)
+@@ -3742,6 +3840,9 @@ void spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module)
SafeFree(p_entry->used_push_constants);
}
SafeFree(p_module->entry_points);
@@ -174,14 +183,14 @@ index 0fc979a8a4..3e3643717a 100644
// Push constants
for (size_t i = 0; i < p_module->push_constant_block_count; ++i) {
-@@ -3959,6 +4060,38 @@ SpvReflectResult spvReflectEnumerateEntryPointInterfaceVariables(
+@@ -4010,6 +4111,38 @@ SpvReflectResult spvReflectEnumerateEntryPointInterfaceVariables(
return SPV_REFLECT_RESULT_SUCCESS;
}
+// -- GODOT begin --
+SpvReflectResult spvReflectEnumerateSpecializationConstants(
-+ const SpvReflectShaderModule* p_module,
-+ uint32_t* p_count,
++ const SpvReflectShaderModule* p_module,
++ uint32_t* p_count,
+ SpvReflectSpecializationConstant** pp_constants
+)
+{
@@ -214,7 +223,7 @@ index 0fc979a8a4..3e3643717a 100644
const SpvReflectShaderModule* p_module,
uint32_t* p_count,
diff --git a/thirdparty/spirv-reflect/spirv_reflect.h b/thirdparty/spirv-reflect/spirv_reflect.h
-index a5a956e9e8..21f8160770 100644
+index da05400973..50cc65222b 100644
--- a/thirdparty/spirv-reflect/spirv_reflect.h
+++ b/thirdparty/spirv-reflect/spirv_reflect.h
@@ -292,6 +292,28 @@ typedef struct SpvReflectTypeDescription {
@@ -247,9 +256,9 @@ index a5a956e9e8..21f8160770 100644
/*! @struct SpvReflectInterfaceVariable
@@ -439,6 +461,10 @@ typedef struct SpvReflectShaderModule {
- SpvReflectInterfaceVariable* interface_variables;
- uint32_t push_constant_block_count;
- SpvReflectBlockVariable* push_constant_blocks;
+ SpvReflectInterfaceVariable* interface_variables; // Uses value(s) from first entry point
+ uint32_t push_constant_block_count; // Uses value(s) from first entry point
+ SpvReflectBlockVariable* push_constant_blocks; // Uses value(s) from first entry point
+ // -- GODOT begin --
+ uint32_t specialization_constant_count;
+ SpvReflectSpecializationConstant* specialization_constants;
diff --git a/thirdparty/spirv-reflect/spirv_reflect.c b/thirdparty/spirv-reflect/spirv_reflect.c
index 8f614c8874..2786a7f3ad 100644
--- a/thirdparty/spirv-reflect/spirv_reflect.c
+++ b/thirdparty/spirv-reflect/spirv_reflect.c
@@ -76,153 +76,157 @@ enum {
// clang-format on
// clang-format off
-typedef struct ArrayTraits {
- uint32_t element_type_id;
- uint32_t length_id;
-} ArrayTraits;
+typedef struct SpvReflectPrvArrayTraits {
+ uint32_t element_type_id;
+ uint32_t length_id;
+} SpvReflectPrvArrayTraits;
// clang-format on
// clang-format off
-typedef struct ImageTraits {
- uint32_t sampled_type_id;
- SpvDim dim;
- uint32_t depth;
- uint32_t arrayed;
- uint32_t ms;
- uint32_t sampled;
- SpvImageFormat image_format;
-} ImageTraits;
+typedef struct SpvReflectPrvImageTraits {
+ uint32_t sampled_type_id;
+ SpvDim dim;
+ uint32_t depth;
+ uint32_t arrayed;
+ uint32_t ms;
+ uint32_t sampled;
+ SpvImageFormat image_format;
+} SpvReflectPrvImageTraits;
// clang-format on
// clang-format off
-typedef struct NumberDecoration {
- uint32_t word_offset;
- uint32_t value;
-} NumberDecoration;
+typedef struct SpvReflectPrvNumberDecoration {
+ uint32_t word_offset;
+ uint32_t value;
+} SpvReflectPrvNumberDecoration;
// clang-format on
// clang-format off
-typedef struct StringDecoration {
- uint32_t word_offset;
- const char* value;
-} StringDecoration;
+typedef struct SpvReflectPrvStringDecoration {
+ uint32_t word_offset;
+ const char* value;
+} SpvReflectPrvStringDecoration;
// clang-format on
// clang-format off
-typedef struct Decorations {
- bool is_block;
- bool is_buffer_block;
- bool is_row_major;
- bool is_column_major;
- bool is_built_in;
- bool is_noperspective;
- bool is_flat;
- bool is_non_writable;
- NumberDecoration set;
- NumberDecoration binding;
- NumberDecoration input_attachment_index;
- NumberDecoration location;
- NumberDecoration offset;
- NumberDecoration uav_counter_buffer;
+typedef struct SpvReflectPrvDecorations {
+ bool is_block;
+ bool is_buffer_block;
+ bool is_row_major;
+ bool is_column_major;
+ bool is_built_in;
+ bool is_noperspective;
+ bool is_flat;
+ bool is_non_writable;
+ SpvReflectPrvNumberDecoration set;
+ SpvReflectPrvNumberDecoration binding;
+ SpvReflectPrvNumberDecoration input_attachment_index;
+ SpvReflectPrvNumberDecoration location;
+ SpvReflectPrvNumberDecoration offset;
+ SpvReflectPrvNumberDecoration uav_counter_buffer;
// -- GODOT begin --
- NumberDecoration specialization_constant;
+ SpvReflectPrvNumberDecoration specialization_constant;
// -- GODOT end --
- StringDecoration semantic;
- uint32_t array_stride;
- uint32_t matrix_stride;
- SpvBuiltIn built_in;
-} Decorations;
+ SpvReflectPrvStringDecoration semantic;
+ uint32_t array_stride;
+ uint32_t matrix_stride;
+ SpvBuiltIn built_in;
+} SpvReflectPrvDecorations;
// clang-format on
// clang-format off
-typedef struct Node {
- uint32_t result_id;
- SpvOp op;
- uint32_t result_type_id;
- uint32_t type_id;
- SpvStorageClass storage_class;
- uint32_t word_offset;
- uint32_t word_count;
- bool is_type;
-
- ArrayTraits array_traits;
- ImageTraits image_traits;
- uint32_t image_type_id;
-
- const char* name;
- Decorations decorations;
- uint32_t member_count;
- const char** member_names;
- Decorations* member_decorations;
-} Node;
+typedef struct SpvReflectPrvNode {
+ uint32_t result_id;
+ SpvOp op;
+ uint32_t result_type_id;
+ uint32_t type_id;
+ SpvStorageClass storage_class;
+ uint32_t word_offset;
+ uint32_t word_count;
+ bool is_type;
+
+ SpvReflectPrvArrayTraits array_traits;
+ SpvReflectPrvImageTraits image_traits;
+ uint32_t image_type_id;
+
+ const char* name;
+ SpvReflectPrvDecorations decorations;
+ uint32_t member_count;
+ const char** member_names;
+ SpvReflectPrvDecorations* member_decorations;
+} SpvReflectPrvNode;
// clang-format on
// clang-format off
-typedef struct String {
- uint32_t result_id;
- const char* string;
-} String;
+typedef struct SpvReflectPrvString {
+ uint32_t result_id;
+ const char* string;
+} SpvReflectPrvString;
// clang-format on
// clang-format off
-typedef struct Function {
- uint32_t id;
- uint32_t callee_count;
- uint32_t* callees;
- struct Function** callee_ptrs;
- uint32_t accessed_ptr_count;
- uint32_t* accessed_ptrs;
-} Function;
+typedef struct SpvReflectPrvFunction {
+ uint32_t id;
+ uint32_t callee_count;
+ uint32_t* callees;
+ struct SpvReflectPrvFunction** callee_ptrs;
+ uint32_t accessed_ptr_count;
+ uint32_t* accessed_ptrs;
+} SpvReflectPrvFunction;
// clang-format on
// clang-format off
-typedef struct AccessChain {
- uint32_t result_id;
- uint32_t result_type_id;
+typedef struct SpvReflectPrvAccessChain {
+ uint32_t result_id;
+ uint32_t result_type_id;
//
// Pointing to the base of a composite object.
// Generally the id of descriptor block variable
- uint32_t base_id;
+ uint32_t base_id;
//
// From spec:
// The first index in Indexes will select the
// top-level member/element/component/element
// of the base composite
- uint32_t index_count;
- uint32_t* indexes;
-} AccessChain;
+ uint32_t index_count;
+ uint32_t* indexes;
+} SpvReflectPrvAccessChain;
// clang-format on
// clang-format off
-typedef struct Parser {
- size_t spirv_word_count;
- uint32_t* spirv_code;
- uint32_t string_count;
- String* strings;
- SpvSourceLanguage source_language;
- uint32_t source_language_version;
- uint32_t source_file_id;
- const char* source_embedded;
- size_t node_count;
- Node* nodes;
- uint32_t entry_point_count;
- uint32_t function_count;
- Function* functions;
- uint32_t access_chain_count;
- AccessChain* access_chains;
-
- uint32_t type_count;
- uint32_t descriptor_count;
- uint32_t push_constant_count;
-} Parser;
+typedef struct SpvReflectPrvParser {
+ size_t spirv_word_count;
+ uint32_t* spirv_code;
+ uint32_t string_count;
+ SpvReflectPrvString* strings;
+ SpvSourceLanguage source_language;
+ uint32_t source_language_version;
+ uint32_t source_file_id;
+ const char* source_embedded;
+ size_t node_count;
+ SpvReflectPrvNode* nodes;
+ uint32_t entry_point_count;
+ uint32_t function_count;
+ SpvReflectPrvFunction* functions;
+ uint32_t access_chain_count;
+ SpvReflectPrvAccessChain* access_chains;
+
+ uint32_t type_count;
+ uint32_t descriptor_count;
+ uint32_t push_constant_count;
+} SpvReflectPrvParser;
// clang-format on
-static uint32_t Max(uint32_t a, uint32_t b)
+static uint32_t Max(
+ uint32_t a,
+ uint32_t b)
{
return a > b ? a : b;
}
-static uint32_t RoundUp(uint32_t value, uint32_t multiple)
+static uint32_t RoundUp(
+ uint32_t value,
+ uint32_t multiple)
{
assert(multiple && ((multiple & (multiple - 1)) == 0));
return (value + multiple - 1) & ~(multiple - 1);
@@ -242,7 +246,9 @@ static uint32_t RoundUp(uint32_t value, uint32_t multiple)
} \
}
-static int SortCompareUint32(const void* a, const void* b)
+static int SortCompareUint32(
+ const void* a,
+ const void* b)
{
const uint32_t* p_a = (const uint32_t*)a;
const uint32_t* p_b = (const uint32_t*)b;
@@ -272,7 +278,10 @@ static size_t DedupSortedUint32(uint32_t* arr, size_t size)
return dedup_idx+1;
}
-static bool SearchSortedUint32(const uint32_t* arr, size_t size, uint32_t target)
+static bool SearchSortedUint32(
+ const uint32_t* arr,
+ size_t size,
+ uint32_t target)
{
size_t lo = 0;
size_t hi = size;
@@ -341,7 +350,9 @@ static SpvReflectResult IntersectSortedUint32(
}
-static bool InRange(const Parser* p_parser, uint32_t index)
+static bool InRange(
+ const SpvReflectPrvParser* p_parser,
+ uint32_t index)
{
bool in_range = false;
if (IsNotNull(p_parser)) {
@@ -350,7 +361,10 @@ static bool InRange(const Parser* p_parser, uint32_t index)
return in_range;
}
-static SpvReflectResult ReadU32(Parser* p_parser, uint32_t word_offset, uint32_t* p_value)
+static SpvReflectResult ReadU32(
+ SpvReflectPrvParser* p_parser,
+ uint32_t word_offset,
+ uint32_t* p_value)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->spirv_code));
@@ -399,12 +413,12 @@ static SpvReflectResult ReadU32(Parser* p_parser, uint32_t word_offset, uint32_t
}
static SpvReflectResult ReadStr(
- Parser* p_parser,
- uint32_t word_offset,
- uint32_t word_index,
- uint32_t word_count,
- uint32_t* p_buf_size,
- char* p_buf
+ SpvReflectPrvParser* p_parser,
+ uint32_t word_offset,
+ uint32_t word_index,
+ uint32_t word_count,
+ uint32_t* p_buf_size,
+ char* p_buf
)
{
uint32_t limit = (word_offset + word_count);
@@ -445,7 +459,7 @@ static SpvReflectResult ReadStr(
return result;
}
-static SpvReflectDecorationFlags ApplyDecorations(const Decorations* p_decoration_fields)
+static SpvReflectDecorationFlags ApplyDecorations(const SpvReflectPrvDecorations* p_decoration_fields)
{
SpvReflectDecorationFlags decorations = SPV_REFLECT_DECORATION_NONE;
if (p_decoration_fields->is_block) {
@@ -485,11 +499,13 @@ static void ApplyArrayTraits(const SpvReflectTypeDescription* p_type, SpvReflect
memcpy(p_array_traits, &p_type->traits.array, sizeof(p_type->traits.array));
}
-static Node* FindNode(Parser* p_parser, uint32_t result_id)
+static SpvReflectPrvNode* FindNode(
+ SpvReflectPrvParser* p_parser,
+ uint32_t result_id)
{
- Node* p_node = NULL;
+ SpvReflectPrvNode* p_node = NULL;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_elem = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_elem = &(p_parser->nodes[i]);
if (p_elem->result_id == result_id) {
p_node = p_elem;
break;
@@ -511,7 +527,10 @@ static SpvReflectTypeDescription* FindType(SpvReflectShaderModule* p_module, uin
return p_type;
}
-static SpvReflectResult CreateParser(size_t size, void* p_code, Parser* p_parser)
+static SpvReflectResult CreateParser(
+ size_t size,
+ void* p_code,
+ SpvReflectPrvParser* p_parser)
{
if (p_code == NULL) {
return SPV_REFLECT_RESULT_ERROR_NULL_POINTER;
@@ -534,12 +553,12 @@ static SpvReflectResult CreateParser(size_t size, void* p_code, Parser* p_parser
return SPV_REFLECT_RESULT_SUCCESS;
}
-static void DestroyParser(Parser* p_parser)
+static void DestroyParser(SpvReflectPrvParser* p_parser)
{
if (!IsNull(p_parser->nodes)) {
// Free nodes
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (IsNotNull(p_node->member_names)) {
SafeFree(p_node->member_names);
}
@@ -569,7 +588,7 @@ static void DestroyParser(Parser* p_parser)
}
}
-static SpvReflectResult ParseNodes(Parser* p_parser)
+static SpvReflectResult ParseNodes(SpvReflectPrvParser* p_parser)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->spirv_code));
@@ -599,7 +618,7 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
// Allocate nodes
p_parser->node_count = node_count;
- p_parser->nodes = (Node*)calloc(p_parser->node_count, sizeof(*(p_parser->nodes)));
+ p_parser->nodes = (SpvReflectPrvNode*)calloc(p_parser->node_count, sizeof(*(p_parser->nodes)));
if (IsNull(p_parser->nodes)) {
return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;
}
@@ -626,7 +645,7 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
// Allocate access chain
if (p_parser->access_chain_count > 0) {
- p_parser->access_chains = (AccessChain*)calloc(p_parser->access_chain_count, sizeof(*(p_parser->access_chains)));
+ p_parser->access_chains = (SpvReflectPrvAccessChain*)calloc(p_parser->access_chain_count, sizeof(*(p_parser->access_chains)));
if (IsNull(p_parser->access_chains)) {
return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;
}
@@ -641,7 +660,7 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
SpvOp op = (SpvOp)(word & 0xFFFF);
uint32_t node_word_count = (word >> 16) & 0xFFFF;
- Node* p_node = &(p_parser->nodes[node_index]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[node_index]);
p_node->op = op;
p_node->word_offset = spirv_word_index;
p_node->word_count = node_word_count;
@@ -841,13 +860,13 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
case SpvOpAccessChain:
{
- AccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]);
+ SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]);
CHECKED_READU32(p_parser, p_node->word_offset + 1, p_access_chain->result_type_id);
CHECKED_READU32(p_parser, p_node->word_offset + 2, p_access_chain->result_id);
CHECKED_READU32(p_parser, p_node->word_offset + 3, p_access_chain->base_id);
//
// SPIRV_ACCESS_CHAIN_INDEX_OFFSET (4) is the number of words up until the first index:
- // [Node, Result Type Id, Result Id, Base Id, <Indexes>]
+ // [SpvReflectPrvNode, Result Type Id, Result Id, Base Id, <Indexes>]
//
p_access_chain->index_count = (node_word_count - SPIRV_ACCESS_CHAIN_INDEX_OFFSET);
if (p_access_chain->index_count > 0) {
@@ -861,7 +880,7 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
uint32_t index_id = 0;
CHECKED_READU32(p_parser, p_node->word_offset + SPIRV_ACCESS_CHAIN_INDEX_OFFSET + index_index, index_id);
// Find OpConstant node that contains index value
- Node* p_index_value_node = FindNode(p_parser, index_id);
+ SpvReflectPrvNode* p_index_value_node = FindNode(p_parser, index_id);
if ((p_index_value_node != NULL) && (p_index_value_node->op == SpvOpConstant)) {
// Read index value
uint32_t index_value = UINT32_MAX;
@@ -890,7 +909,7 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
case SpvOpLabel:
{
if (function_node != (uint32_t)INVALID_VALUE) {
- Node* p_func_node = &(p_parser->nodes[function_node]);
+ SpvReflectPrvNode* p_func_node = &(p_parser->nodes[function_node]);
CHECKED_READU32(p_parser, p_func_node->word_offset + 2, p_func_node->result_id);
++(p_parser->function_count);
}
@@ -914,7 +933,7 @@ static SpvReflectResult ParseNodes(Parser* p_parser)
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseStrings(Parser* p_parser)
+static SpvReflectResult ParseStrings(SpvReflectPrvParser* p_parser)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->spirv_code));
@@ -927,11 +946,11 @@ static SpvReflectResult ParseStrings(Parser* p_parser)
if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {
// Allocate string storage
- p_parser->strings = (String*)calloc(p_parser->string_count, sizeof(*(p_parser->strings)));
+ p_parser->strings = (SpvReflectPrvString*)calloc(p_parser->string_count, sizeof(*(p_parser->strings)));
uint32_t string_index = 0;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (p_node->op != SpvOpString) {
continue;
}
@@ -943,7 +962,7 @@ static SpvReflectResult ParseStrings(Parser* p_parser)
}
// Result id
- String* p_string = &(p_parser->strings[string_index]);
+ SpvReflectPrvString* p_string = &(p_parser->strings[string_index]);
CHECKED_READU32(p_parser, p_node->word_offset + 1, p_string->result_id);
// String
@@ -958,7 +977,7 @@ static SpvReflectResult ParseStrings(Parser* p_parser)
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseSource(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParseSource(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->spirv_code));
@@ -967,7 +986,7 @@ static SpvReflectResult ParseSource(Parser* p_parser, SpvReflectShaderModule* p_
// Source file
if (IsNotNull(p_parser->strings)) {
for (uint32_t i = 0; i < p_parser->string_count; ++i) {
- String* p_string = &(p_parser->strings[i]);
+ SpvReflectPrvString* p_string = &(p_parser->strings[i]);
if (p_string->result_id == p_parser->source_file_id) {
p_module->source_file = p_string->string;
break;
@@ -998,7 +1017,11 @@ static SpvReflectResult ParseSource(Parser* p_parser, SpvReflectShaderModule* p_
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseFunction(Parser* p_parser, Node* p_func_node, Function* p_func, size_t first_label_index)
+static SpvReflectResult ParseFunction(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectPrvNode* p_func_node,
+ SpvReflectPrvFunction* p_func,
+ size_t first_label_index)
{
p_func->id = p_func_node->result_id;
@@ -1006,7 +1029,7 @@ static SpvReflectResult ParseFunction(Parser* p_parser, Node* p_func_node, Funct
p_func->accessed_ptr_count = 0;
for (size_t i = first_label_index; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (p_node->op == SpvOpFunctionEnd) {
break;
}
@@ -1056,7 +1079,7 @@ static SpvReflectResult ParseFunction(Parser* p_parser, Node* p_func_node, Funct
p_func->callee_count = 0;
p_func->accessed_ptr_count = 0;
for (size_t i = first_label_index; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (p_node->op == SpvOpFunctionEnd) {
break;
}
@@ -1119,14 +1142,16 @@ static SpvReflectResult ParseFunction(Parser* p_parser, Node* p_func_node, Funct
return SPV_REFLECT_RESULT_SUCCESS;
}
-static int SortCompareFunctions(const void* a, const void* b)
+static int SortCompareFunctions(
+ const void* a,
+ const void* b)
{
- const Function* af = (const Function*)a;
- const Function* bf = (const Function*)b;
+ const SpvReflectPrvFunction* af = (const SpvReflectPrvFunction*)a;
+ const SpvReflectPrvFunction* bf = (const SpvReflectPrvFunction*)b;
return (int)af->id - (int)bf->id;
}
-static SpvReflectResult ParseFunctions(Parser* p_parser)
+static SpvReflectResult ParseFunctions(SpvReflectPrvParser* p_parser)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->spirv_code));
@@ -1137,15 +1162,15 @@ static SpvReflectResult ParseFunctions(Parser* p_parser)
return SPV_REFLECT_RESULT_SUCCESS;
}
- p_parser->functions = (Function*)calloc(p_parser->function_count,
- sizeof(*(p_parser->functions)));
+ p_parser->functions = (SpvReflectPrvFunction*)calloc(p_parser->function_count,
+ sizeof(*(p_parser->functions)));
if (IsNull(p_parser->functions)) {
return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;
}
size_t function_index = 0;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (p_node->op != SpvOpFunction) {
continue;
}
@@ -1167,7 +1192,7 @@ static SpvReflectResult ParseFunctions(Parser* p_parser)
continue;
}
- Function* p_function = &(p_parser->functions[function_index]);
+ SpvReflectPrvFunction* p_function = &(p_parser->functions[function_index]);
SpvReflectResult result = ParseFunction(p_parser, p_node, p_function, i);
if (result != SPV_REFLECT_RESULT_SUCCESS) {
@@ -1183,12 +1208,12 @@ static SpvReflectResult ParseFunctions(Parser* p_parser)
// Once they're sorted, link the functions with pointers to improve graph
// traversal efficiency
for (size_t i = 0; i < p_parser->function_count; ++i) {
- Function* p_func = &(p_parser->functions[i]);
+ SpvReflectPrvFunction* p_func = &(p_parser->functions[i]);
if (p_func->callee_count == 0) {
continue;
}
- p_func->callee_ptrs = (Function**)calloc(p_func->callee_count,
- sizeof(*(p_func->callee_ptrs)));
+ p_func->callee_ptrs = (SpvReflectPrvFunction**)calloc(p_func->callee_count,
+ sizeof(*(p_func->callee_ptrs)));
for (size_t j = 0, k = 0; j < p_func->callee_count; ++j) {
while (p_parser->functions[k].id != p_func->callees[j]) {
++k;
@@ -1205,7 +1230,7 @@ static SpvReflectResult ParseFunctions(Parser* p_parser)
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseMemberCounts(Parser* p_parser)
+static SpvReflectResult ParseMemberCounts(SpvReflectPrvParser* p_parser)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->spirv_code));
@@ -1213,7 +1238,7 @@ static SpvReflectResult ParseMemberCounts(Parser* p_parser)
if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if ((p_node->op != SpvOpMemberName) && (p_node->op != SpvOpMemberDecorate)) {
continue;
}
@@ -1222,7 +1247,7 @@ static SpvReflectResult ParseMemberCounts(Parser* p_parser)
uint32_t member_index = (uint32_t)INVALID_VALUE;
CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id);
CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index);
- Node* p_target_node = FindNode(p_parser, target_id);
+ SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id);
// Not all nodes get parsed, so FindNode returning NULL is expected.
if (IsNull(p_target_node)) {
continue;
@@ -1236,7 +1261,7 @@ static SpvReflectResult ParseMemberCounts(Parser* p_parser)
}
for (uint32_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (p_node->member_count == 0) {
continue;
}
@@ -1246,7 +1271,7 @@ static SpvReflectResult ParseMemberCounts(Parser* p_parser)
return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;
}
- p_node->member_decorations = (Decorations*)calloc(p_node->member_count, sizeof(*(p_node->member_decorations)));
+ p_node->member_decorations = (SpvReflectPrvDecorations*)calloc(p_node->member_count, sizeof(*(p_node->member_decorations)));
if (IsNull(p_node->member_decorations)) {
return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;
}
@@ -1255,7 +1280,7 @@ static SpvReflectResult ParseMemberCounts(Parser* p_parser)
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseNames(Parser* p_parser)
+static SpvReflectResult ParseNames(SpvReflectPrvParser* p_parser)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->spirv_code));
@@ -1263,14 +1288,14 @@ static SpvReflectResult ParseNames(Parser* p_parser)
if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if ((p_node->op != SpvOpName) && (p_node->op != SpvOpMemberName)) {
continue;
}
uint32_t target_id = 0;
CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id);
- Node* p_target_node = FindNode(p_parser, target_id);
+ SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id);
// Not all nodes get parsed, so FindNode returning NULL is expected.
if (IsNull(p_target_node)) {
continue;
@@ -1289,10 +1314,10 @@ static SpvReflectResult ParseNames(Parser* p_parser)
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseDecorations(Parser* p_parser)
+static SpvReflectResult ParseDecorations(SpvReflectPrvParser* p_parser)
{
for (uint32_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (((uint32_t)p_node->op != (uint32_t)SpvOpDecorate) &&
((uint32_t)p_node->op != (uint32_t)SpvOpMemberDecorate) &&
@@ -1352,12 +1377,12 @@ static SpvReflectResult ParseDecorations(Parser* p_parser)
// Find target target node
uint32_t target_id = 0;
CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id);
- Node* p_target_node = FindNode(p_parser, target_id);
+ SpvReflectPrvNode* p_target_node = FindNode(p_parser, target_id);
if (IsNull(p_target_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
// Get decorations
- Decorations* p_target_decorations = &(p_target_node->decorations);
+ SpvReflectPrvDecorations* p_target_decorations = &(p_target_node->decorations);
// Update pointer if this is a member member decoration
if (p_node->op == SpvOpMemberDecorate) {
uint32_t member_index = (uint32_t)INVALID_VALUE;
@@ -1507,11 +1532,11 @@ static SpvReflectResult EnumerateAllUniforms(
}
static SpvReflectResult ParseType(
- Parser* p_parser,
- Node* p_node,
- Decorations* p_struct_member_decorations,
- SpvReflectShaderModule* p_module,
- SpvReflectTypeDescription* p_type
+ SpvReflectPrvParser* p_parser,
+ SpvReflectPrvNode* p_node,
+ SpvReflectPrvDecorations* p_struct_member_decorations,
+ SpvReflectShaderModule* p_module,
+ SpvReflectTypeDescription* p_type
)
{
SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS;
@@ -1574,7 +1599,7 @@ static SpvReflectResult ParseType(
IF_READU32(result, p_parser, p_node->word_offset + 2, component_type_id);
IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.vector.component_count);
// Parse component type
- Node* p_next_node = FindNode(p_parser, component_type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, component_type_id);
if (IsNotNull(p_next_node)) {
result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);
}
@@ -1590,7 +1615,7 @@ static SpvReflectResult ParseType(
uint32_t column_type_id = (uint32_t)INVALID_VALUE;
IF_READU32(result, p_parser, p_node->word_offset + 2, column_type_id);
IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.matrix.column_count);
- Node* p_next_node = FindNode(p_parser, column_type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, column_type_id);
if (IsNotNull(p_next_node)) {
result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);
}
@@ -1609,6 +1634,15 @@ static SpvReflectResult ParseType(
case SpvOpTypeImage: {
p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE;
+ uint32_t sampled_type_id = (uint32_t)INVALID_VALUE;
+ IF_READU32(result, p_parser, p_node->word_offset + 2, sampled_type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, sampled_type_id);
+ if (IsNotNull(p_next_node)) {
+ result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);
+ }
+ else {
+ result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
+ }
IF_READU32_CAST(result, p_parser, p_node->word_offset + 3, SpvDim, p_type->traits.image.dim);
IF_READU32(result, p_parser, p_node->word_offset + 4, p_type->traits.image.depth);
IF_READU32(result, p_parser, p_node->word_offset + 5, p_type->traits.image.arrayed);
@@ -1627,7 +1661,7 @@ static SpvReflectResult ParseType(
p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE;
uint32_t image_type_id = (uint32_t)INVALID_VALUE;
IF_READU32(result, p_parser, p_node->word_offset + 2, image_type_id);
- Node* p_next_node = FindNode(p_parser, image_type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, image_type_id);
if (IsNotNull(p_next_node)) {
result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);
}
@@ -1649,7 +1683,7 @@ static SpvReflectResult ParseType(
// OpMemberDecorate, even if the array is apart of a struct.
p_type->traits.array.stride = p_node->decorations.array_stride;
// Get length for current dimension
- Node* p_length_node = FindNode(p_parser, length_id);
+ SpvReflectPrvNode* p_length_node = FindNode(p_parser, length_id);
if (IsNotNull(p_length_node)) {
if (p_length_node->op == SpvOpSpecConstant ||
p_length_node->op == SpvOpSpecConstantOp) {
@@ -1668,7 +1702,7 @@ static SpvReflectResult ParseType(
}
}
// Parse next dimension or element type
- Node* p_next_node = FindNode(p_parser, element_type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id);
if (IsNotNull(p_next_node)) {
result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);
}
@@ -1682,10 +1716,11 @@ static SpvReflectResult ParseType(
break;
case SpvOpTypeRuntimeArray: {
+ p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_ARRAY;
uint32_t element_type_id = (uint32_t)INVALID_VALUE;
IF_READU32(result, p_parser, p_node->word_offset + 2, element_type_id);
// Parse next dimension or element type
- Node* p_next_node = FindNode(p_parser, element_type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id);
if (IsNotNull(p_next_node)) {
result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);
}
@@ -1705,7 +1740,7 @@ static SpvReflectResult ParseType(
uint32_t member_id = (uint32_t)INVALID_VALUE;
IF_READU32(result, p_parser, p_node->word_offset + word_index, member_id);
// Find member node
- Node* p_member_node = FindNode(p_parser, member_id);
+ SpvReflectPrvNode* p_member_node = FindNode(p_parser, member_id);
if (IsNull(p_member_node)) {
result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
SPV_REFLECT_ASSERT(false);
@@ -1713,7 +1748,7 @@ static SpvReflectResult ParseType(
}
// Member decorations
- Decorations* p_member_decorations = &p_node->member_decorations[member_index];
+ SpvReflectPrvDecorations* p_member_decorations = &p_node->member_decorations[member_index];
assert(member_index < p_type->member_count);
// Parse member type
@@ -1738,7 +1773,7 @@ static SpvReflectResult ParseType(
uint32_t type_id = (uint32_t)INVALID_VALUE;
IF_READU32(result, p_parser, p_node->word_offset + 3, type_id);
// Parse type
- Node* p_next_node = FindNode(p_parser, type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, type_id);
if (IsNotNull(p_next_node)) {
result = ParseType(p_parser, p_next_node, NULL, p_module, p_type);
}
@@ -1774,7 +1809,9 @@ static SpvReflectResult ParseType(
return result;
}
-static SpvReflectResult ParseTypes(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParseTypes(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module)
{
if (p_parser->type_count == 0) {
return SPV_REFLECT_RESULT_SUCCESS;
@@ -1797,7 +1834,7 @@ static SpvReflectResult ParseTypes(Parser* p_parser, SpvReflectShaderModule* p_m
size_t type_index = 0;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (! p_node->is_type) {
continue;
}
@@ -1826,11 +1863,13 @@ static int SortCompareDescriptorBinding(const void* a, const void* b)
return value;
}
-static SpvReflectResult ParseDescriptorBindings(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParseDescriptorBindings(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module)
{
p_module->descriptor_binding_count = 0;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if ((p_node->op != SpvOpVariable) ||
((p_node->storage_class != SpvStorageClassUniform) &&
(p_node->storage_class != SpvStorageClassStorageBuffer) &&
@@ -1866,7 +1905,7 @@ static SpvReflectResult ParseDescriptorBindings(Parser* p_parser, SpvReflectShad
size_t descriptor_index = 0;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if ((p_node->op != SpvOpVariable) ||
((p_node->storage_class != SpvStorageClassUniform) &&
(p_node->storage_class != SpvStorageClassStorageBuffer) &&
@@ -1888,7 +1927,7 @@ static SpvReflectResult ParseDescriptorBindings(Parser* p_parser, SpvReflectShad
if (p_type->op == SpvOpTypePointer) {
pointer_storage_class = p_type->storage_class;
// Find the type's node
- Node* p_type_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);
if (IsNull(p_type_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2116,7 +2155,7 @@ static SpvReflectResult ParseUAVCounterBindings(SpvReflectShaderModule* p_module
}
static SpvReflectResult ParseDescriptorBlockVariable(
- Parser* p_parser,
+ SpvReflectPrvParser* p_parser,
SpvReflectShaderModule* p_module,
SpvReflectTypeDescription* p_type,
SpvReflectBlockVariable* p_var
@@ -2131,7 +2170,7 @@ static SpvReflectResult ParseDescriptorBlockVariable(
return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED;
}
- Node* p_type_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);
if (IsNull(p_type_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2195,12 +2234,12 @@ static SpvReflectResult ParseDescriptorBlockVariable(
}
static SpvReflectResult ParseDescriptorBlockVariableSizes(
- Parser* p_parser,
- SpvReflectShaderModule* p_module,
- bool is_parent_root,
- bool is_parent_aos,
- bool is_parent_rta,
- SpvReflectBlockVariable* p_var
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module,
+ bool is_parent_root,
+ bool is_parent_aos,
+ bool is_parent_rta,
+ SpvReflectBlockVariable* p_var
)
{
if (p_var->member_count == 0) {
@@ -2351,12 +2390,12 @@ static void MarkSelfAndAllMemberVarsAsUsed(SpvReflectBlockVariable* p_var)
}
static SpvReflectResult ParseDescriptorBlockVariableUsage(
- Parser* p_parser,
- SpvReflectShaderModule* p_module,
- AccessChain* p_access_chain,
- uint32_t index_index,
- SpvOp override_op_type,
- SpvReflectBlockVariable* p_var
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module,
+ SpvReflectPrvAccessChain* p_access_chain,
+ uint32_t index_index,
+ SpvOp override_op_type,
+ SpvReflectBlockVariable* p_var
)
{
(void)p_parser;
@@ -2381,7 +2420,7 @@ static SpvReflectResult ParseDescriptorBlockVariableUsage(
SpvReflectTypeDescription* p_type = p_var->type_description;
while ((p_type->op == SpvOpTypeArray) && (index_index < p_access_chain->index_count)) {
// Find the array element type id
- Node* p_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_node = FindNode(p_parser, p_type->id);
if (p_node == NULL) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2480,7 +2519,9 @@ static SpvReflectResult ParseDescriptorBlockVariableUsage(
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseDescriptorBlocks(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParseDescriptorBlocks(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module)
{
if (p_module->descriptor_binding_count == 0) {
return SPV_REFLECT_RESULT_SUCCESS;
@@ -2504,7 +2545,7 @@ static SpvReflectResult ParseDescriptorBlocks(Parser* p_parser, SpvReflectShader
}
for (uint32_t access_chain_index = 0; access_chain_index < p_parser->access_chain_count; ++access_chain_index) {
- AccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]);
+ SpvReflectPrvAccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]);
// Skip any access chains that aren't touching this descriptor block
if (p_descriptor->spirv_id != p_access_chain->base_id) {
continue;
@@ -2557,6 +2598,8 @@ static SpvReflectResult ParseFormat(
case 4: *p_format = SPV_REFLECT_FORMAT_R32G32B32A32_SFLOAT; break;
}
}
+ break;
+
case 64: {
switch (component_count) {
case 2: *p_format = SPV_REFLECT_FORMAT_R64G64_SFLOAT; break;
@@ -2576,6 +2619,8 @@ static SpvReflectResult ParseFormat(
case 4: *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32B32A32_SINT : SPV_REFLECT_FORMAT_R32G32B32A32_UINT; break;
}
}
+ break;
+
case 64: {
switch (component_count) {
case 2: *p_format = signedness ? SPV_REFLECT_FORMAT_R64G64_SINT : SPV_REFLECT_FORMAT_R64G64_UINT; break;
@@ -2616,15 +2661,15 @@ static SpvReflectResult ParseFormat(
}
static SpvReflectResult ParseInterfaceVariable(
- Parser* p_parser,
- const Decorations* p_type_node_decorations,
- SpvReflectShaderModule* p_module,
- SpvReflectTypeDescription* p_type,
- SpvReflectInterfaceVariable* p_var,
- bool* p_has_built_in
+ SpvReflectPrvParser* p_parser,
+ const SpvReflectPrvDecorations* p_type_node_decorations,
+ SpvReflectShaderModule* p_module,
+ SpvReflectTypeDescription* p_type,
+ SpvReflectInterfaceVariable* p_var,
+ bool* p_has_built_in
)
{
- Node* p_type_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);
if (IsNull(p_type_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2637,7 +2682,7 @@ static SpvReflectResult ParseInterfaceVariable(
}
for (uint32_t member_index = 0; member_index < p_type_node->member_count; ++member_index) {
- Decorations* p_member_decorations = &p_type_node->member_decorations[member_index];
+ SpvReflectPrvDecorations* p_member_decorations = &p_type_node->member_decorations[member_index];
SpvReflectTypeDescription* p_member_type = &p_type->members[member_index];
SpvReflectInterfaceVariable* p_member_var = &p_var->members[member_index];
SpvReflectResult result = ParseInterfaceVariable(p_parser, p_member_decorations, p_module, p_member_type, p_member_var, p_has_built_in);
@@ -2673,7 +2718,7 @@ static SpvReflectResult ParseInterfaceVariable(
}
static SpvReflectResult ParseInterfaceVariables(
- Parser* p_parser,
+ SpvReflectPrvParser* p_parser,
SpvReflectShaderModule* p_module,
SpvReflectEntryPoint* p_entry,
uint32_t interface_variable_count,
@@ -2689,7 +2734,7 @@ static SpvReflectResult ParseInterfaceVariables(
p_entry->output_variable_count = 0;
for (size_t i = 0; i < interface_variable_count; ++i) {
uint32_t var_result_id = *(p_interface_variable_ids + i);
- Node* p_node = FindNode(p_parser, var_result_id);
+ SpvReflectPrvNode* p_node = FindNode(p_parser, var_result_id);
if (IsNull(p_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2727,7 +2772,7 @@ static SpvReflectResult ParseInterfaceVariables(
size_t output_index = 0;
for (size_t i = 0; i < interface_variable_count; ++i) {
uint32_t var_result_id = *(p_interface_variable_ids + i);
- Node* p_node = FindNode(p_parser, var_result_id);
+ SpvReflectPrvNode* p_node = FindNode(p_parser, var_result_id);
if (IsNull(p_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2739,7 +2784,7 @@ static SpvReflectResult ParseInterfaceVariables(
// If the type is a pointer, resolve it
if (p_type->op == SpvOpTypePointer) {
// Find the type's node
- Node* p_type_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);
if (IsNull(p_type_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2750,7 +2795,7 @@ static SpvReflectResult ParseInterfaceVariables(
}
}
- Node* p_type_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);
if (IsNull(p_type_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -2831,11 +2876,11 @@ static SpvReflectResult EnumerateAllPushConstants(
}
static SpvReflectResult TraverseCallGraph(
- Parser* p_parser,
- Function* p_func,
- size_t* p_func_count,
- uint32_t* p_func_ids,
- uint32_t depth
+ SpvReflectPrvParser* p_parser,
+ SpvReflectPrvFunction* p_func,
+ size_t* p_func_count,
+ uint32_t* p_func_ids,
+ uint32_t depth
)
{
if (depth > p_parser->function_count) {
@@ -2860,7 +2905,7 @@ static SpvReflectResult TraverseCallGraph(
}
static SpvReflectResult ParseStaticallyUsedResources(
- Parser* p_parser,
+ SpvReflectPrvParser* p_parser,
SpvReflectShaderModule* p_module,
SpvReflectEntryPoint* p_entry,
size_t uniform_count,
@@ -2870,7 +2915,7 @@ static SpvReflectResult ParseStaticallyUsedResources(
)
{
// Find function with the right id
- Function* p_func = NULL;
+ SpvReflectPrvFunction* p_func = NULL;
for (size_t i = 0; i < p_parser->function_count; ++i) {
if (p_parser->functions[i].id == p_entry->id) {
p_func = &(p_parser->functions[i]);
@@ -3004,7 +3049,9 @@ static SpvReflectResult ParseStaticallyUsedResources(
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseEntryPoints(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParseEntryPoints(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module)
{
if (p_parser->entry_point_count == 0) {
return SPV_REFLECT_RESULT_SUCCESS;
@@ -3033,7 +3080,7 @@ static SpvReflectResult ParseEntryPoints(Parser* p_parser, SpvReflectShaderModul
size_t entry_point_index = 0;
for (size_t i = 0; entry_point_index < p_parser->entry_point_count && i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (p_node->op != SpvOpEntryPoint) {
continue;
}
@@ -3118,7 +3165,9 @@ static SpvReflectResult ParseEntryPoints(Parser* p_parser, SpvReflectShaderModul
return SPV_REFLECT_RESULT_SUCCESS;
}
-static SpvReflectResult ParseExecutionModes(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParseExecutionModes(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module)
{
assert(IsNotNull(p_parser));
assert(IsNotNull(p_parser->nodes));
@@ -3126,7 +3175,7 @@ static SpvReflectResult ParseExecutionModes(Parser* p_parser, SpvReflectShaderMo
if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) {
for (size_t node_idx = 0; node_idx < p_parser->node_count; ++node_idx) {
- Node* p_node = &(p_parser->nodes[node_idx]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[node_idx]);
if (p_node->op != SpvOpExecutionMode) {
continue;
}
@@ -3217,12 +3266,12 @@ static SpvReflectResult ParseExecutionModes(Parser* p_parser, SpvReflectShaderMo
}
// -- GODOT begin --
-static SpvReflectResult ParseSpecializationConstants(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParseSpecializationConstants(SpvReflectPrvParser* p_parser, SpvReflectShaderModule* p_module)
{
p_module->specialization_constant_count = 0;
p_module->specialization_constants = NULL;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if (p_node->op == SpvOpSpecConstantTrue || p_node->op == SpvOpSpecConstantFalse || p_node->op == SpvOpSpecConstant) {
p_module->specialization_constant_count++;
}
@@ -3237,7 +3286,7 @@ static SpvReflectResult ParseSpecializationConstants(Parser* p_parser, SpvReflec
uint32_t index = 0;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
switch(p_node->op) {
default: continue;
case SpvOpSpecConstantTrue: {
@@ -3255,7 +3304,7 @@ static SpvReflectResult ParseSpecializationConstants(Parser* p_parser, SpvReflec
IF_READU32(result, p_parser, p_node->word_offset + 1, element_type_id);
IF_READU32(result, p_parser, p_node->word_offset + 3, default_value);
- Node* p_next_node = FindNode(p_parser, element_type_id);
+ SpvReflectPrvNode* p_next_node = FindNode(p_parser, element_type_id);
if (p_next_node->op == SpvOpTypeInt) {
p_module->specialization_constants[index].constant_type = SPV_REFLECT_SPECIALIZATION_CONSTANT_INT;
@@ -3279,10 +3328,12 @@ static SpvReflectResult ParseSpecializationConstants(Parser* p_parser, SpvReflec
}
// -- GODOT end --
-static SpvReflectResult ParsePushConstantBlocks(Parser* p_parser, SpvReflectShaderModule* p_module)
+static SpvReflectResult ParsePushConstantBlocks(
+ SpvReflectPrvParser* p_parser,
+ SpvReflectShaderModule* p_module)
{
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) {
continue;
}
@@ -3301,7 +3352,7 @@ static SpvReflectResult ParsePushConstantBlocks(Parser* p_parser, SpvReflectShad
uint32_t push_constant_index = 0;
for (size_t i = 0; i < p_parser->node_count; ++i) {
- Node* p_node = &(p_parser->nodes[i]);
+ SpvReflectPrvNode* p_node = &(p_parser->nodes[i]);
if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) {
continue;
}
@@ -3313,7 +3364,7 @@ static SpvReflectResult ParsePushConstantBlocks(Parser* p_parser, SpvReflectShad
// If the type is a pointer, resolve it
if (p_type->op == SpvOpTypePointer) {
// Find the type's node
- Node* p_type_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);
if (IsNull(p_type_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -3324,7 +3375,7 @@ static SpvReflectResult ParsePushConstantBlocks(Parser* p_parser, SpvReflectShad
}
}
- Node* p_type_node = FindNode(p_parser, p_type->id);
+ SpvReflectPrvNode* p_type_node = FindNode(p_parser, p_type->id);
if (IsNull(p_type_node)) {
return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE;
}
@@ -3577,7 +3628,7 @@ SpvReflectResult spvReflectCreateShaderModule(
}
memcpy(p_module->_internal->spirv_code, p_code, size);
- Parser parser = { 0 };
+ SpvReflectPrvParser parser = { 0 };
SpvReflectResult result = CreateParser(p_module->_internal->spirv_size,
p_module->_internal->spirv_code,
&parser);
@@ -4062,8 +4113,8 @@ SpvReflectResult spvReflectEnumerateEntryPointInterfaceVariables(
// -- GODOT begin --
SpvReflectResult spvReflectEnumerateSpecializationConstants(
- const SpvReflectShaderModule* p_module,
- uint32_t* p_count,
+ const SpvReflectShaderModule* p_module,
+ uint32_t* p_count,
SpvReflectSpecializationConstant** pp_constants
)
{
diff --git a/thirdparty/spirv-reflect/spirv_reflect.h b/thirdparty/spirv-reflect/spirv_reflect.h
index 21f8160770..50cc65222b 100644
--- a/thirdparty/spirv-reflect/spirv_reflect.h
+++ b/thirdparty/spirv-reflect/spirv_reflect.h
@@ -152,7 +152,7 @@ typedef enum SpvReflectFormat {
SPV_REFLECT_FORMAT_R64G64_SFLOAT = 115, // = VK_FORMAT_R64G64_SFLOAT
SPV_REFLECT_FORMAT_R64G64B64_UINT = 116, // = VK_FORMAT_R64G64B64_UINT
SPV_REFLECT_FORMAT_R64G64B64_SINT = 117, // = VK_FORMAT_R64G64B64_SINT
- SPV_REFLECT_FORMAT_R64G64B64_SFLOAT = 118, // = VK_FORMAT_R64G64B64_FLOAT
+ SPV_REFLECT_FORMAT_R64G64B64_SFLOAT = 118, // = VK_FORMAT_R64G64B64_SFLOAT
SPV_REFLECT_FORMAT_R64G64B64A64_UINT = 119, // = VK_FORMAT_R64G64B64A64_UINT
SPV_REFLECT_FORMAT_R64G64B64A64_SINT = 120, // = VK_FORMAT_R64G64B64A64_SINT
SPV_REFLECT_FORMAT_R64G64B64A64_SFLOAT = 121, // = VK_FORMAT_R64G64B64A64_SFLOAT
@@ -447,20 +447,20 @@ typedef struct SpvReflectShaderModule {
uint32_t source_language_version;
const char* source_file;
const char* source_source;
- SpvExecutionModel spirv_execution_model;
- SpvReflectShaderStageFlagBits shader_stage;
- uint32_t descriptor_binding_count;
- SpvReflectDescriptorBinding* descriptor_bindings;
- uint32_t descriptor_set_count;
- SpvReflectDescriptorSet descriptor_sets[SPV_REFLECT_MAX_DESCRIPTOR_SETS];
- uint32_t input_variable_count;
- SpvReflectInterfaceVariable** input_variables;
- uint32_t output_variable_count;
- SpvReflectInterfaceVariable** output_variables;
- uint32_t interface_variable_count;
- SpvReflectInterfaceVariable* interface_variables;
- uint32_t push_constant_block_count;
- SpvReflectBlockVariable* push_constant_blocks;
+ SpvExecutionModel spirv_execution_model; // Uses value(s) from first entry point
+ SpvReflectShaderStageFlagBits shader_stage; // Uses value(s) from first entry point
+ uint32_t descriptor_binding_count; // Uses value(s) from first entry point
+ SpvReflectDescriptorBinding* descriptor_bindings; // Uses value(s) from first entry point
+ uint32_t descriptor_set_count; // Uses value(s) from first entry point
+ SpvReflectDescriptorSet descriptor_sets[SPV_REFLECT_MAX_DESCRIPTOR_SETS]; // Uses value(s) from first entry point
+ uint32_t input_variable_count; // Uses value(s) from first entry point
+ SpvReflectInterfaceVariable** input_variables; // Uses value(s) from first entry point
+ uint32_t output_variable_count; // Uses value(s) from first entry point
+ SpvReflectInterfaceVariable** output_variables; // Uses value(s) from first entry point
+ uint32_t interface_variable_count; // Uses value(s) from first entry point
+ SpvReflectInterfaceVariable* interface_variables; // Uses value(s) from first entry point
+ uint32_t push_constant_block_count; // Uses value(s) from first entry point
+ SpvReflectBlockVariable* push_constant_blocks; // Uses value(s) from first entry point
// -- GODOT begin --
uint32_t specialization_constant_count;
SpvReflectSpecializationConstant* specialization_constants;
@@ -1426,6 +1426,9 @@ public:
ShaderModule(const std::vector<uint32_t>& code);
~ShaderModule();
+ ShaderModule(ShaderModule&& other);
+ ShaderModule& operator=(ShaderModule&& other);
+
SpvReflectResult GetResult() const;
const SpvReflectShaderModule& GetShaderModule() const;
@@ -1437,8 +1440,9 @@ public:
const char* GetSourceFile() const;
- uint32_t GetEntryPointCount() const;
- const char* GetEntryPointName(uint32_t index) const;
+ uint32_t GetEntryPointCount() const;
+ const char* GetEntryPointName(uint32_t index) const;
+ SpvReflectShaderStageFlagBits GetEntryPointShaderStage(uint32_t index) const;
SpvReflectShaderStageFlagBits GetShaderStage() const;
SPV_REFLECT_DEPRECATED("Renamed to GetShaderStage")
@@ -1568,6 +1572,20 @@ inline ShaderModule::~ShaderModule() {
}
+inline ShaderModule::ShaderModule(ShaderModule&& other)
+{
+ *this = std::move(other);
+}
+
+inline ShaderModule& ShaderModule::operator=(ShaderModule&& other)
+{
+ m_result = std::move(other.m_result);
+ m_module = std::move(other.m_module);
+
+ other.m_module = {};
+ return *this;
+}
+
/*! @fn GetResult
@return
@@ -1644,9 +1662,18 @@ inline const char* ShaderModule::GetEntryPointName(uint32_t index) const {
return m_module.entry_points[index].name;
}
+/*! @fn GetEntryPointShaderStage
+
+ @param index
+ @return Returns the shader stage for the entry point at \b index
+*/
+inline SpvReflectShaderStageFlagBits ShaderModule::GetEntryPointShaderStage(uint32_t index) const {
+ return m_module.entry_points[index].shader_stage;
+}
+
/*! @fn GetShaderStage
- @return Returns Vulkan shader stage
+ @return Returns shader stage for the first entry point
*/
inline SpvReflectShaderStageFlagBits ShaderModule::GetShaderStage() const {
diff --git a/thirdparty/vulkan/patches/VMA-assert-remove.patch b/thirdparty/vulkan/patches/VMA-assert-remove.patch
deleted file mode 100644
index 3d57ab7d42..0000000000
--- a/thirdparty/vulkan/patches/VMA-assert-remove.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h
-index 0dfb66efc6..8a42699e7f 100644
---- a/thirdparty/vulkan/vk_mem_alloc.h
-+++ b/thirdparty/vulkan/vk_mem_alloc.h
-@@ -17508,24 +17508,6 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
- allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
- requiresDedicatedAllocation, prefersDedicatedAllocation);
-
-- // Make sure alignment requirements for specific buffer usages reported
-- // in Physical Device Properties are included in alignment reported by memory requirements.
-- if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
-- {
-- VMA_ASSERT(vkMemReq.alignment %
-- allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
-- }
-- if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
-- {
-- VMA_ASSERT(vkMemReq.alignment %
-- allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
-- }
-- if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
-- {
-- VMA_ASSERT(vkMemReq.alignment %
-- allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
-- }
--
- // 3. Allocate memory using allocator.
- res = allocator->AllocateMemory(
- vkMemReq,
diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h
index 26f9faa6e4..9890f20f7c 100644
--- a/thirdparty/vulkan/vk_mem_alloc.h
+++ b/thirdparty/vulkan/vk_mem_alloc.h
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
+// Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -23,15 +23,11 @@
#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
#define AMD_VULKAN_MEMORY_ALLOCATOR_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/** \mainpage Vulkan Memory Allocator
-<b>Version 2.3.0</b> (2019-12-04)
+<b>Version 3.0.0-development</b> (2021-06-21)
-Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. \n
+Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All rights reserved. \n
License: MIT
Documentation of all members: vk_mem_alloc.h
@@ -57,6 +53,7 @@ Documentation of all members: vk_mem_alloc.h
- \subpage staying_within_budget
- [Querying for budget](@ref staying_within_budget_querying_for_budget)
- [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
+ - \subpage resource_aliasing
- \subpage custom_memory_pools
- [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
- [Linear allocation algorithm](@ref linear_algorithm)
@@ -66,10 +63,10 @@ Documentation of all members: vk_mem_alloc.h
- [Ring buffer](@ref linear_algorithm_ring_buffer)
- [Buddy allocation algorithm](@ref buddy_algorithm)
- \subpage defragmentation
- - [Defragmenting CPU memory](@ref defragmentation_cpu)
- - [Defragmenting GPU memory](@ref defragmentation_gpu)
- - [Additional notes](@ref defragmentation_additional_notes)
- - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm)
+ - [Defragmenting CPU memory](@ref defragmentation_cpu)
+ - [Defragmenting GPU memory](@ref defragmentation_gpu)
+ - [Additional notes](@ref defragmentation_additional_notes)
+ - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm)
- \subpage lost_allocations
- \subpage statistics
- [Numeric statistics](@ref statistics_numeric_statistics)
@@ -92,6 +89,8 @@ Documentation of all members: vk_mem_alloc.h
- [Device memory allocation callbacks](@ref allocation_callbacks)
- [Device heap memory limit](@ref heap_memory_limit)
- \subpage vk_khr_dedicated_allocation
+ - \subpage enabling_buffer_device_address
+ - \subpage vk_amd_device_coherent_memory
- \subpage general_considerations
- [Thread safety](@ref general_considerations_thread_safety)
- [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
@@ -123,7 +122,7 @@ To do it properly:
-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
This includes declarations of all members of the library.
--# In exacly one CPP file define following macro before this include.
+-# In exactly one CPP file define following macro before this include.
It enables also internal definitions.
\code
@@ -143,24 +142,42 @@ before including these headers (like `WIN32_LEAN_AND_MEAN` or
`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
them before every `#include` of this library.
+You may need to configure the way you import Vulkan functions.
+
+- By default, VMA assumes you you link statically with Vulkan API. If this is not the case,
+ `#define VMA_STATIC_VULKAN_FUNCTIONS 0` before `#include` of the VMA implementation and use another way.
+- You can `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1` and make sure `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` globals are defined.
+ All the remaining Vulkan functions will be fetched automatically.
+- Finally, you can provide your own pointers to all Vulkan functions needed by VMA using structure member
+ VmaAllocatorCreateInfo::pVulkanFunctions, if you fetched them in some custom way e.g. using some loader like [Volk](https://github.com/zeux/volk).
+
\section quick_start_initialization Initialization
At program startup:
--# Initialize Vulkan to have `VkPhysicalDevice` and `VkDevice` object.
+-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
calling vmaCreateAllocator().
\code
VmaAllocatorCreateInfo allocatorInfo = {};
+allocatorInfo.vulkanApiVersion = VK_API_VERSION_1_2;
allocatorInfo.physicalDevice = physicalDevice;
allocatorInfo.device = device;
+allocatorInfo.instance = instance;
VmaAllocator allocator;
vmaCreateAllocator(&allocatorInfo, &allocator);
\endcode
+Only members `physicalDevice`, `device`, `instance` are required.
+However, you should inform the library which Vulkan version do you use by setting
+VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
+by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
+Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
+
+
\section quick_start_resource_allocation Resource allocation
When you want to create a buffer or image:
@@ -302,6 +319,7 @@ VmaAllocation allocation;
vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
\endcode
+
\section choosing_memory_type_custom_memory_pools Custom memory pools
If you allocate from custom memory pool, all the ways of specifying memory
@@ -422,12 +440,10 @@ There are some exceptions though, when you should consider mapping memory only f
block is migrated by WDDM to system RAM, which degrades performance. It doesn't
matter if that particular memory block is actually used by the command buffer
being submitted.
-- On Mac/MoltenVK there is a known bug - [Issue #175](https://github.com/KhronosGroup/MoltenVK/issues/175)
- which requires unmapping before GPU can see updated texture.
- Keeping many large memory blocks mapped may impact performance or stability of some debugging tools.
\section memory_mapping_cache_control Cache flush and invalidate
-
+
Memory in Vulkan doesn't need to be unmapped before using it on GPU,
but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
you need to manually **invalidate** cache before reading of mapped pointer
@@ -436,7 +452,8 @@ Map/unmap operations don't do that automatically.
Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
functions that refer to given allocation object: vmaFlushAllocation(),
-vmaInvalidateAllocation().
+vmaInvalidateAllocation(),
+or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
Regions of memory specified for flush/invalidate must be aligned to
`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
@@ -478,7 +495,7 @@ vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allo
VkMemoryPropertyFlags memFlags;
vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags);
-if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
{
// Allocation ended up in mappable memory. You can map it and access it directly.
void* mappedData;
@@ -513,7 +530,7 @@ VmaAllocation alloc;
VmaAllocationInfo allocInfo;
vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-if(allocInfo.pUserData != nullptr)
+if(allocInfo.pMappedData != nullptr)
{
// Allocation ended up in mappable memory.
// It's persistently mapped. You can access it directly.
@@ -599,6 +616,114 @@ set to more than 0 will try to allocate memory blocks without checking whether t
fit within budget.
+\page resource_aliasing Resource aliasing (overlap)
+
+New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
+management, give an opportunity to alias (overlap) multiple resources in the
+same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
+It can be useful to save video memory, but it must be used with caution.
+
+For example, if you know the flow of your whole render frame in advance, you
+are going to use some intermediate textures or buffers only during a small range of render passes,
+and you know these ranges don't overlap in time, you can bind these resources to
+the same place in memory, even if they have completely different parameters (width, height, format etc.).
+
+![Resource aliasing (overlap)](../gfx/Aliasing.png)
+
+Such scenario is possible using VMA, but you need to create your images manually.
+Then you need to calculate parameters of an allocation to be made using formula:
+
+- allocation size = max(size of each image)
+- allocation alignment = max(alignment of each image)
+- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
+
+Following example shows two different images bound to the same place in memory,
+allocated to fit largest of them.
+
+\code
+// A 512x512 texture to be sampled.
+VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
+img1CreateInfo.extent.width = 512;
+img1CreateInfo.extent.height = 512;
+img1CreateInfo.extent.depth = 1;
+img1CreateInfo.mipLevels = 10;
+img1CreateInfo.arrayLayers = 1;
+img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
+img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+
+// A full screen texture to be used as color attachment.
+VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
+img2CreateInfo.extent.width = 1920;
+img2CreateInfo.extent.height = 1080;
+img2CreateInfo.extent.depth = 1;
+img2CreateInfo.mipLevels = 1;
+img2CreateInfo.arrayLayers = 1;
+img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+
+VkImage img1;
+res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
+VkImage img2;
+res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
+
+VkMemoryRequirements img1MemReq;
+vkGetImageMemoryRequirements(device, img1, &img1MemReq);
+VkMemoryRequirements img2MemReq;
+vkGetImageMemoryRequirements(device, img2, &img2MemReq);
+
+VkMemoryRequirements finalMemReq = {};
+finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
+finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
+finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
+// Validate if(finalMemReq.memoryTypeBits != 0)
+
+VmaAllocationCreateInfo allocCreateInfo = {};
+allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+
+VmaAllocation alloc;
+res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
+
+res = vmaBindImageMemory(allocator, alloc, img1);
+res = vmaBindImageMemory(allocator, alloc, img2);
+
+// You can use img1, img2 here, but not at the same time!
+
+vmaFreeMemory(allocator, alloc);
+vkDestroyImage(allocator, img2, nullptr);
+vkDestroyImage(allocator, img1, nullptr);
+\endcode
+
+Remember that using resources that alias in memory requires proper synchronization.
+You need to issue a memory barrier to make sure commands that use `img1` and `img2`
+don't overlap on GPU timeline.
+You also need to treat a resource after aliasing as uninitialized - containing garbage data.
+For example, if you use `img1` and then want to use `img2`, you need to issue
+an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
+
+Additional considerations:
+
+- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
+See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
+- You can create more complex layout where different images and buffers are bound
+at different offsets inside one large allocation. For example, one can imagine
+a big texture used in some render passes, aliasing with a set of many small buffers
+used between in some further passes. To bind a resource at non-zero offset of an allocation,
+use vmaBindBufferMemory2() / vmaBindImageMemory2().
+- Before allocating memory for the resources you want to alias, check `memoryTypeBits`
+returned in memory requirements of each resource to make sure the bits overlap.
+Some GPUs may expose multiple memory types suitable e.g. only for buffers or
+images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
+resources may be disjoint. Aliasing them is not possible in that case.
+
+
\page custom_memory_pools Custom memory pools
A memory pool contains a number of `VkDeviceMemory` blocks.
@@ -822,7 +947,7 @@ allocations.
To mitigate this problem, you can use defragmentation feature:
structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd().
-Given set of allocations,
+Given set of allocations,
this function can move them to compact used memory, ensure more continuous free
space and possibly also free some `VkDeviceMemory` blocks.
@@ -888,9 +1013,9 @@ for(uint32_t i = 0; i < allocCount; ++i)
// Create new buffer with same parameters.
VkBufferCreateInfo bufferInfo = ...;
vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
-
+
// You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
-
+
// Bind new buffer to new memory region. Data contained in it is already moved.
VmaAllocationInfo allocInfo;
vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
@@ -966,9 +1091,9 @@ for(uint32_t i = 0; i < allocCount; ++i)
// Create new buffer with same parameters.
VkBufferCreateInfo bufferInfo = ...;
vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]);
-
+
// You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning.
-
+
// Bind new buffer to new memory region. Data contained in it is already moved.
VmaAllocationInfo allocInfo;
vmaGetAllocationInfo(allocator, allocations[i], &allocInfo);
@@ -1005,7 +1130,7 @@ See [Validation layer warnings](@ref general_considerations_validation_layer_war
Please don't expect memory to be fully compacted after defragmentation.
Algorithms inside are based on some heuristics that try to maximize number of Vulkan
-memory blocks to make totally empty to release them, as well as to maximimze continuous
+memory blocks to make totally empty to release them, as well as to maximize continuous
empty space inside remaining blocks, while minimizing the number and size of allocations that
need to be moved. Some fragmentation may still remain - this is normal.
@@ -1268,6 +1393,9 @@ printf("Image name: %s\n", imageName);
That string is also printed in JSON report created by vmaBuildStatsString().
+\note Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
+You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
+
\page debugging_memory_usage Debugging incorrect memory usage
@@ -1359,7 +1487,7 @@ which indicates a serious bug.
You can also explicitly request checking margins of all allocations in all memory blocks
that belong to specified memory types by using function vmaCheckCorruption(),
-or in memory blocks that belong to specified custom pool, by using function
+or in memory blocks that belong to specified custom pool, by using function
vmaCheckPoolCorruption().
Margin validation (corruption detection) works only for memory types that are
@@ -1500,6 +1628,7 @@ This is a more complex situation. Different solutions are possible,
and the best one depends on specific GPU type, but you can use this simple approach for the start.
Prefer to write to such resource sequentially (e.g. using `memcpy`).
Don't perform random access or any reads from it on CPU, as it may be very slow.
+Also note that textures written directly from the host through a mapped pointer need to be in LINEAR not OPTIMAL layout.
\subsection usage_patterns_readback Readback
@@ -1532,17 +1661,17 @@ directly instead of submitting explicit transfer (see below).
For resources that you frequently write on CPU and read on GPU, many solutions are possible:
-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY,
- second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit tranfer each time.
--# Create just single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU,
+ second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit transfer each time.
+-# Create just a single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU,
read it directly on GPU.
--# Create just single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU,
+-# Create just a single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU,
read it directly on GPU.
Which solution is the most efficient depends on your resource and especially on the GPU.
It is best to measure it and then make the decision.
Some general recommendations:
-- On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead
+- On integrated graphics use (2) or (3) to avoid unnecessary time and memory overhead
related to using a second copy and making transfer.
- For small resources (e.g. constant buffers) use (2).
Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable.
@@ -1563,6 +1692,10 @@ solutions are possible:
You should take some measurements to decide which option is faster in case of your specific
resource.
+Note that textures accessed directly from the host through a mapped pointer need to be in LINEAR layout,
+which may slow down their usage on the device.
+Textures accessed only by the device and transfer operations can use OPTIMAL layout.
+
If you don't want to specialize your code for specific types of GPUs, you can still make
an simple optimization for cases when your resource ends up in mappable memory to use it
directly in this case instead of creating CPU-side staging copy.
@@ -1585,12 +1718,31 @@ and empty otherwise.
\section config_Vulkan_functions Pointers to Vulkan functions
-The library uses Vulkan functions straight from the `vulkan.h` header by default.
-If you want to provide your own pointers to these functions, e.g. fetched using
-`vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`:
+There are multiple ways to import pointers to Vulkan functions in the library.
+In the simplest case you don't need to do anything.
+If the compilation or linking of your program or the initialization of the #VmaAllocator
+doesn't work for you, you can try to reconfigure it.
+
+First, the allocator tries to fetch pointers to Vulkan functions linked statically,
+like this:
+
+\code
+m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
+\endcode
+
+If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
+
+Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
+You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
+by using a helper library like [volk](https://github.com/zeux/volk).
+
+Third, VMA tries to fetch remaining pointers that are still null by calling
+`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
+If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
+
+Finally, all the function pointers required by the library (considering selected
+Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
--# Define `VMA_STATIC_VULKAN_FUNCTIONS 0`.
--# Provide valid pointers through VmaAllocatorCreateInfo::pVulkanFunctions.
\section custom_memory_allocator Custom host memory allocator
@@ -1612,7 +1764,7 @@ VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
When device memory of certain heap runs out of free space, new allocations may
fail (returning error code) or they may succeed, silently pushing some existing
memory blocks from GPU VRAM to system RAM (which degrades performance). This
-behavior is implementation-dependant - it depends on GPU vendor and graphics
+behavior is implementation-dependent - it depends on GPU vendor and graphics
driver.
On AMD cards it can be controlled while creating Vulkan device object by using
@@ -1670,11 +1822,115 @@ unaware of it.
To learn more about this extension, see:
-- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_dedicated_allocation)
+- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
+\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
+
+VK_AMD_device_coherent_memory is a device extension that enables access to
+additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
+`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
+allocation of buffers intended for writing "breadcrumb markers" in between passes
+or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
+
+When the extension is available but has not been enabled, Vulkan physical device
+still exposes those memory types, but their usage is forbidden. VMA automatically
+takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
+to allocate memory of such type is made.
+
+If you want to use this extension in connection with VMA, follow these steps:
+
+\section vk_amd_device_coherent_memory_initialization Initialization
+
+1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
+Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
+
+2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
+Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
+Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
+
+3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
+to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
+
+4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
+Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
+Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
+`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
+
+5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
+have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
+to VmaAllocatorCreateInfo::flags.
+
+\section vk_amd_device_coherent_memory_usage Usage
+
+After following steps described above, you can create VMA allocations and custom pools
+out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
+devices. There are multiple ways to do it, for example:
+
+- You can request or prefer to allocate out of such memory types by adding
+ `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
+ or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
+ other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
+- If you manually found memory type index to use for this purpose, force allocation
+ from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
+
+\section vk_amd_device_coherent_memory_more_information More information
+
+To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap44.html#VK_AMD_device_coherent_memory)
+
+Example use of this extension can be found in the code of the sample and test suite
+accompanying this library.
+
+
+\page enabling_buffer_device_address Enabling buffer device address
+
+Device extension VK_KHR_buffer_device_address
+allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
+It is promoted to core Vulkan 1.2.
+
+If you want to use this feature in connection with VMA, follow these steps:
+
+\section enabling_buffer_device_address_initialization Initialization
+
+1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
+Check if the extension is supported - if returned array of `VkExtensionProperties` contains
+"VK_KHR_buffer_device_address".
+
+2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
+Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
+Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures*::bufferDeviceAddress` is true.
+
+3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
+"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
+
+4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
+Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
+Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
+`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
+
+5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
+have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
+to VmaAllocatorCreateInfo::flags.
+
+\section enabling_buffer_device_address_usage Usage
+
+After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
+The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
+allocated memory blocks wherever it might be needed.
+
+Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
+The second part of this functionality related to "capture and replay" is not supported,
+as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
+
+\section enabling_buffer_device_address_more_information More information
+
+To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
+
+Example use of this extension can be found in the code of the sample and test suite
+accompanying this library.
+
\page general_considerations General considerations
\section general_considerations_thread_safety Thread safety
@@ -1708,7 +1964,7 @@ to just ignore them.
up together, especially on GPUs with unified memory like Intel.
- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
- It happens when you use lost allocations, and a new image or buffer is
- created in place of an existing object that bacame lost.
+ created in place of an existing object that became lost.
- It may happen also when you use [defragmentation](@ref defragmentation).
\section general_considerations_allocation_algorithm Allocation algorithm
@@ -1731,7 +1987,7 @@ The library uses following algorithm for allocation, in order:
Features deliberately excluded from the scope of this library:
-- Data transfer. Uploading (straming) and downloading data of buffers and images
+- Data transfer. Uploading (streaming) and downloading data of buffers and images
between CPU and GPU memory and related synchronization is responsibility of the user.
Defining some "texture" object that would automatically stream its data from a
staging copy in CPU memory to GPU memory would rather be a feature of another,
@@ -1740,6 +1996,8 @@ Features deliberately excluded from the scope of this library:
explicit memory type index and dedicated allocation anyway, so they don't
interact with main features of this library. Such special purpose allocations
should be made manually, using `vkCreateBuffer()` and `vkAllocateMemory()`.
+- Sub-allocation of parts of one large buffer. Although recommended as a good practice,
+ it is the user's responsibility to implement such logic on top of VMA.
- Recreation of buffers and images. Although the library has functions for
buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to
recreate these objects yourself after defragmentation. That's because the big
@@ -1749,8 +2007,9 @@ Features deliberately excluded from the scope of this library:
objects in CPU memory (not Vulkan memory), allocation failures are not checked
and handled gracefully, because that would complicate code significantly and
is usually not needed in desktop PC applications anyway.
+ Success of an allocation is just checked with an assert.
- Code free of any compiler warnings. Maintaining the library to compile and
- work correctly on so many different platforms is hard enough. Being free of
+ work correctly on so many different platforms is hard enough. Being free of
any warnings, on any version of any compiler, is simply not feasible.
- This is a C++ library with C interface.
Bindings or ports to any other programming languages are welcomed as external projects and
@@ -1758,6 +2017,10 @@ Features deliberately excluded from the scope of this library:
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/*
Define this macro to 0/1 to disable/enable support for recording functionality,
available through VmaAllocatorCreateInfo::pRecordSettings.
@@ -1766,10 +2029,39 @@ available through VmaAllocatorCreateInfo::pRecordSettings.
#define VMA_RECORDING_ENABLED 0
#endif
-#ifndef NOMINMAX
+#if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
#define NOMINMAX // For windows.h
#endif
+#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
+ extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
+ extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
+ extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
+ extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
+ extern PFN_vkAllocateMemory vkAllocateMemory;
+ extern PFN_vkFreeMemory vkFreeMemory;
+ extern PFN_vkMapMemory vkMapMemory;
+ extern PFN_vkUnmapMemory vkUnmapMemory;
+ extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
+ extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
+ extern PFN_vkBindBufferMemory vkBindBufferMemory;
+ extern PFN_vkBindImageMemory vkBindImageMemory;
+ extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
+ extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
+ extern PFN_vkCreateBuffer vkCreateBuffer;
+ extern PFN_vkDestroyBuffer vkDestroyBuffer;
+ extern PFN_vkCreateImage vkCreateImage;
+ extern PFN_vkDestroyImage vkDestroyImage;
+ extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
+ #if VMA_VULKAN_VERSION >= 1001000
+ extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
+ extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
+ extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
+ extern PFN_vkBindImageMemory2 vkBindImageMemory2;
+ extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
+ #endif // #if VMA_VULKAN_VERSION >= 1001000
+#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
+
#ifndef VULKAN_H_
#ifdef USE_VOLK
#include <volk.h>
@@ -1778,15 +2070,13 @@ available through VmaAllocatorCreateInfo::pRecordSettings.
#endif
#endif
-#if VMA_RECORDING_ENABLED
- #include <windows.h>
-#endif
-
// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
// where AAA = major, BBB = minor, CCC = patch.
// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
#if !defined(VMA_VULKAN_VERSION)
- #if defined(VK_VERSION_1_1)
+ #if defined(VK_VERSION_1_2)
+ #define VMA_VULKAN_VERSION 1002000
+ #elif defined(VK_VERSION_1_1)
#define VMA_VULKAN_VERSION 1001000
#else
#define VMA_VULKAN_VERSION 1000000
@@ -1817,9 +2107,36 @@ available through VmaAllocatorCreateInfo::pRecordSettings.
#endif
#endif
+// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
+#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
+ #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
+ #define VMA_BUFFER_DEVICE_ADDRESS 1
+ #else
+ #define VMA_BUFFER_DEVICE_ADDRESS 0
+ #endif
+#endif
+
+// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
+#if !defined(VMA_MEMORY_PRIORITY)
+ #if VK_EXT_memory_priority
+ #define VMA_MEMORY_PRIORITY 1
+ #else
+ #define VMA_MEMORY_PRIORITY 0
+ #endif
+#endif
+
+// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
+#if !defined(VMA_EXTERNAL_MEMORY)
+ #if VK_KHR_external_memory
+ #define VMA_EXTERNAL_MEMORY 1
+ #else
+ #define VMA_EXTERNAL_MEMORY 0
+ #endif
+#endif
+
// Define these macros to decorate all public functions with additional code,
// before and after returned type, appropriately. This may be useful for
-// exporing the functions when compiling VMA as a separate library. Example:
+// exporting the functions when compiling VMA as a separate library. Example:
// #define VMA_CALL_PRE __declspec(dllexport)
// #define VMA_CALL_POST __cdecl
#ifndef VMA_CALL_PRE
@@ -1829,6 +2146,59 @@ available through VmaAllocatorCreateInfo::pRecordSettings.
#define VMA_CALL_POST
#endif
+// Define this macro to decorate pointers with an attribute specifying the
+// length of the array they point to if they are not null.
+//
+// The length may be one of
+// - The name of another parameter in the argument list where the pointer is declared
+// - The name of another member in the struct where the pointer is declared
+// - The name of a member of a struct type, meaning the value of that member in
+// the context of the call. For example
+// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
+// this means the number of memory heaps available in the device associated
+// with the VmaAllocator being dealt with.
+#ifndef VMA_LEN_IF_NOT_NULL
+ #define VMA_LEN_IF_NOT_NULL(len)
+#endif
+
+// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
+// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
+#ifndef VMA_NULLABLE
+ #ifdef __clang__
+ #define VMA_NULLABLE _Nullable
+ #else
+ #define VMA_NULLABLE
+ #endif
+#endif
+
+// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
+// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
+#ifndef VMA_NOT_NULL
+ #ifdef __clang__
+ #define VMA_NOT_NULL _Nonnull
+ #else
+ #define VMA_NOT_NULL
+ #endif
+#endif
+
+// If non-dispatchable handles are represented as pointers then we can give
+// then nullability annotations
+#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
+ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
+ #else
+ #define VMA_NOT_NULL_NON_DISPATCHABLE
+ #endif
+#endif
+
+#ifndef VMA_NULLABLE_NON_DISPATCHABLE
+ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
+ #else
+ #define VMA_NULLABLE_NON_DISPATCHABLE
+ #endif
+#endif
+
/** \struct VmaAllocator
\brief Represents main object of this library initialized.
@@ -1842,16 +2212,18 @@ VK_DEFINE_HANDLE(VmaAllocator)
/// Callback function called after successful vkAllocateMemory.
typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
- VmaAllocator allocator,
- uint32_t memoryType,
- VkDeviceMemory memory,
- VkDeviceSize size);
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t memoryType,
+ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
+ VkDeviceSize size,
+ void* VMA_NULLABLE pUserData);
/// Callback function called before vkFreeMemory.
typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
- VmaAllocator allocator,
- uint32_t memoryType,
- VkDeviceMemory memory,
- VkDeviceSize size);
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t memoryType,
+ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
+ VkDeviceSize size,
+ void* VMA_NULLABLE pUserData);
/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
@@ -1862,9 +2234,11 @@ Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
*/
typedef struct VmaDeviceMemoryCallbacks {
/// Optional, can be null.
- PFN_vmaAllocateDeviceMemoryFunction pfnAllocate;
+ PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
+ /// Optional, can be null.
+ PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
/// Optional, can be null.
- PFN_vmaFreeDeviceMemoryFunction pfnFree;
+ void* VMA_NULLABLE pUserData;
} VmaDeviceMemoryCallbacks;
/// Flags for created #VmaAllocator.
@@ -1879,7 +2253,7 @@ typedef enum VmaAllocatorCreateFlagBits {
The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
- Using this extenion will automatically allocate dedicated blocks of memory for
+ Using this extension will automatically allocate dedicated blocks of memory for
some buffers and images instead of suballocating place for them out of bigger
memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
flag) when it is recommended by the driver. It may improve performance on some
@@ -1926,6 +2300,59 @@ typedef enum VmaAllocatorCreateFlagBits {
be more accurate than an estimation used by the library otherwise.
*/
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
+ /**
+ Enables usage of VK_AMD_device_coherent_memory extension.
+
+ You may set this flag only if you:
+
+ - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
+ - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
+ - want it to be used internally by this library.
+
+ The extension and accompanying device feature provide access to memory types with
+ `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
+ They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
+
+ When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
+ To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
+ returning `VK_ERROR_FEATURE_NOT_PRESENT`.
+ */
+ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
+ /**
+ Enables usage of "buffer device address" feature, which allows you to use function
+ `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
+
+ You may set this flag only if you:
+
+ 1. (For Vulkan version < 1.2) Found as available and enabled device extension
+ VK_KHR_buffer_device_address.
+ This extension is promoted to core Vulkan 1.2.
+ 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
+
+ When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
+ The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
+ allocated memory blocks wherever it might be needed.
+
+ For more information, see documentation chapter \ref enabling_buffer_device_address.
+ */
+ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
+ /**
+ Enables usage of VK_EXT_memory_priority extension in the library.
+
+ You may set this flag only if you found available and enabled this device extension,
+ along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
+ while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
+
+ When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
+ are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
+
+ A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
+ Larger values are higher priority. The granularity of the priorities is implementation-dependent.
+ It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
+ The value to be used for default priority is 0.5.
+ For more details, see the documentation of the VK_EXT_memory_priority extension.
+ */
+ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VmaAllocatorCreateFlagBits;
@@ -1936,33 +2363,33 @@ typedef VkFlags VmaAllocatorCreateFlags;
Used in VmaAllocatorCreateInfo::pVulkanFunctions.
*/
typedef struct VmaVulkanFunctions {
- PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
- PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
- PFN_vkAllocateMemory vkAllocateMemory;
- PFN_vkFreeMemory vkFreeMemory;
- PFN_vkMapMemory vkMapMemory;
- PFN_vkUnmapMemory vkUnmapMemory;
- PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
- PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
- PFN_vkBindBufferMemory vkBindBufferMemory;
- PFN_vkBindImageMemory vkBindImageMemory;
- PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
- PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
- PFN_vkCreateBuffer vkCreateBuffer;
- PFN_vkDestroyBuffer vkDestroyBuffer;
- PFN_vkCreateImage vkCreateImage;
- PFN_vkDestroyImage vkDestroyImage;
- PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
+ PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
+ PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
+ PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
+ PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
+ PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
+ PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
+ PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
+ PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
+ PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
+ PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
+ PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
+ PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
+ PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
+ PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
+ PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
+ PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
+ PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
- PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
- PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
+ PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
+ PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
#endif
#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
- PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
- PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
+ PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
+ PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
#endif
#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
- PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
+ PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
#endif
} VmaVulkanFunctions;
@@ -1974,7 +2401,7 @@ typedef enum VmaRecordFlagBits {
It may degrade performance though.
*/
VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001,
-
+
VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VmaRecordFlagBits;
typedef VkFlags VmaRecordFlags;
@@ -1991,7 +2418,7 @@ typedef struct VmaRecordSettings
It will be opened for the whole time #VmaAllocator object is alive.
If opening this file fails, creation of the whole allocator object fails.
*/
- const char* pFilePath;
+ const char* VMA_NOT_NULL pFilePath;
} VmaRecordSettings;
/// Description of a Allocator to be created.
@@ -2001,19 +2428,19 @@ typedef struct VmaAllocatorCreateInfo
VmaAllocatorCreateFlags flags;
/// Vulkan physical device.
/** It must be valid throughout whole lifetime of created allocator. */
- VkPhysicalDevice physicalDevice;
+ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
/// Vulkan device.
/** It must be valid throughout whole lifetime of created allocator. */
- VkDevice device;
+ VkDevice VMA_NOT_NULL device;
/// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
/** Set to 0 to use default, which is currently 256 MiB. */
VkDeviceSize preferredLargeHeapBlockSize;
/// Custom CPU memory allocation callbacks. Optional.
/** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
- const VkAllocationCallbacks* pAllocationCallbacks;
+ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
/// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
/** Optional, can be null. */
- const VmaDeviceMemoryCallbacks* pDeviceMemoryCallbacks;
+ const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
/** \brief Maximum number of additional frames that are in use at the same time as current frame.
This value is used only when you make allocations with
@@ -2052,67 +2479,100 @@ typedef struct VmaAllocatorCreateInfo
blocks to system RAM. This driver behavior can also be controlled using
VK_AMD_memory_overallocation_behavior extension.
*/
- const VkDeviceSize* pHeapSizeLimit;
- /** \brief Pointers to Vulkan functions. Can be null if you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1`.
-
- If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section,
- you can pass null as this member, because the library will fetch pointers to
- Vulkan functions internally in a static way, like:
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
- vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
+ /** \brief Pointers to Vulkan functions. Can be null.
- Fill this member if you want to provide your own pointers to Vulkan functions,
- e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`.
+ For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
*/
- const VmaVulkanFunctions* pVulkanFunctions;
+ const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
/** \brief Parameters for recording of VMA calls. Can be null.
If not null, it enables recording of calls to VMA functions to a file.
If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro,
creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`.
*/
- const VmaRecordSettings* pRecordSettings;
- /** \brief Optional handle to Vulkan instance object.
+ const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
+ /** \brief Handle to Vulkan instance object.
- Optional, can be null. Must be set if #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT flas is used
- or if `vulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)`.
+ Starting from version 3.0.0 this member is no longer optional, it must be set!
*/
- VkInstance instance;
+ VkInstance VMA_NOT_NULL instance;
/** \brief Optional. The highest version of Vulkan that the application is designed to use.
-
+
It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
The patch version number specified is ignored. Only the major and minor versions are considered.
- It must be less or euqal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
- Only versions 1.0 and 1.1 are supported by the current implementation.
+ It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
+ Only versions 1.0, 1.1, 1.2 are supported by the current implementation.
Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
*/
uint32_t vulkanApiVersion;
+#if VMA_EXTERNAL_MEMORY
+ /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
+
+ If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
+ elements, defining external memory handle types of particular Vulkan memory type,
+ to be passed using `VkExportMemoryAllocateInfoKHR`.
+
+ Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
+ This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
+ */
+ const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
+#endif // #if VMA_EXTERNAL_MEMORY
} VmaAllocatorCreateInfo;
/// Creates Allocator object.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
- const VmaAllocatorCreateInfo* pCreateInfo,
- VmaAllocator* pAllocator);
+ const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
/// Destroys allocator object.
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
- VmaAllocator allocator);
+ VmaAllocator VMA_NULLABLE allocator);
+
+/** \brief Information about existing #VmaAllocator object.
+*/
+typedef struct VmaAllocatorInfo
+{
+ /** \brief Handle to Vulkan instance object.
+
+ This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
+ */
+ VkInstance VMA_NOT_NULL instance;
+ /** \brief Handle to Vulkan physical device object.
+
+ This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
+ */
+ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
+ /** \brief Handle to Vulkan device object.
+
+ This is the same value as has been passed through VmaAllocatorCreateInfo::device.
+ */
+ VkDevice VMA_NOT_NULL device;
+} VmaAllocatorInfo;
+
+/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
+
+It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
+`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
/**
PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
You can access it here, without fetching it again on your own.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
- VmaAllocator allocator,
- const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
/**
PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
You can access it here, without fetching it again on your own.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
- VmaAllocator allocator,
- const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
/**
\brief Given Memory Type Index, returns Property Flags of this memory type.
@@ -2121,9 +2581,9 @@ This is just a convenience function. Same information can be obtained using
vmaGetMemoryProperties().
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
- VmaAllocator allocator,
+ VmaAllocator VMA_NOT_NULL allocator,
uint32_t memoryTypeIndex,
- VkMemoryPropertyFlags* pFlags);
+ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
/** \brief Sets index of the current frame.
@@ -2134,7 +2594,7 @@ when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot
become lost in the current frame.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
- VmaAllocator allocator,
+ VmaAllocator VMA_NOT_NULL allocator,
uint32_t frameIndex);
/** \brief Calculated statistics of memory usage in entire allocator.
@@ -2173,8 +2633,8 @@ Note that when using allocator from multiple threads, returned information may i
become outdated.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
- VmaAllocator allocator,
- VmaStats* pStats);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaStats* VMA_NOT_NULL pStats);
/** \brief Statistics of current memory usage and available budget, in bytes, for specific memory heap.
*/
@@ -2183,32 +2643,32 @@ typedef struct VmaBudget
/** \brief Sum size of all `VkDeviceMemory` blocks allocated from particular heap, in bytes.
*/
VkDeviceSize blockBytes;
-
+
/** \brief Sum size of all allocations created in particular heap, in bytes.
-
+
Usually less or equal than `blockBytes`.
Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused -
available for new allocations or wasted due to fragmentation.
-
+
It might be greater than `blockBytes` if there are some allocations in lost state, as they account
to this value as well.
*/
VkDeviceSize allocationBytes;
-
+
/** \brief Estimated current memory usage of the program, in bytes.
-
+
Fetched from system using `VK_EXT_memory_budget` extension if enabled.
-
+
It might be different than `blockBytes` (usually higher) due to additional implicit objects
also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
`VkDeviceMemory` blocks allocated outside of this library, if any.
*/
VkDeviceSize usage;
-
+
/** \brief Estimated amount of memory available to the program, in bytes.
-
+
Fetched from system using `VK_EXT_memory_budget` extension if enabled.
-
+
It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
external to the program, like other programs also consuming system resources.
Difference `budget - usage` is the amount of additional memory that can probably
@@ -2228,8 +2688,8 @@ Note that when using allocator from multiple threads, returned information may i
become outdated.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
- VmaAllocator allocator,
- VmaBudget* pBudget);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaBudget* VMA_NOT_NULL pBudget);
#ifndef VMA_STATS_STRING_ENABLED
#define VMA_STATS_STRING_ENABLED 1
@@ -2241,13 +2701,13 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
/** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
- VmaAllocator allocator,
- char** ppStatsString,
+ VmaAllocator VMA_NOT_NULL allocator,
+ char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
VkBool32 detailedMap);
VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
- VmaAllocator allocator,
- char* pStatsString);
+ VmaAllocator VMA_NOT_NULL allocator,
+ char* VMA_NULLABLE pStatsString);
#endif // #if VMA_STATS_STRING_ENABLED
@@ -2273,7 +2733,7 @@ typedef enum VmaMemoryUsage
It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`.
Usage:
-
+
- Resources written and read by device, e.g. images used as attachments.
- Resources transferred from host once (immutable) or infrequently and read by
device multiple times, e.g. textures to be sampled, vertex buffers, uniform
@@ -2298,7 +2758,7 @@ typedef enum VmaMemoryUsage
Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU.
CPU access is typically uncached. Writes may be write-combined.
- Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call.
+ Usage: Resources written frequently by host (dynamic), read by device. E.g. textures (with LINEAR layout), vertex buffers, uniform buffers updated every frame or every draw call.
*/
VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
/** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached.
@@ -2318,7 +2778,7 @@ typedef enum VmaMemoryUsage
VMA_MEMORY_USAGE_CPU_COPY = 5,
/** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
-
+
Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
@@ -2331,28 +2791,28 @@ typedef enum VmaMemoryUsage
/// Flags to be passed as VmaAllocationCreateInfo::flags.
typedef enum VmaAllocationCreateFlagBits {
/** \brief Set this flag if the allocation should have its own memory block.
-
+
Use it for special, big resources, like fullscreen images used as attachments.
-
+
You should not use this flag if VmaAllocationCreateInfo::pool is not null.
*/
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
/** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
-
+
If new allocation cannot be placed in any of the existing blocks, allocation
fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
-
+
You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
-
+
If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
/** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
-
+
Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
- Is it valid to use this flag for allocation made from memory type that is not
+ It is valid to use this flag for allocation made from memory type that is not
`HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
useful if you need an allocation that is efficient to use on GPU
(`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
@@ -2445,19 +2905,19 @@ typedef struct VmaAllocationCreateInfo
/// Use #VmaAllocationCreateFlagBits enum.
VmaAllocationCreateFlags flags;
/** \brief Intended usage of memory.
-
+
You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
If `pool` is not null, this member is ignored.
*/
VmaMemoryUsage usage;
/** \brief Flags that must be set in a Memory Type chosen for an allocation.
-
+
Leave 0 if you specify memory requirements in other way. \n
If `pool` is not null, this member is ignored.*/
VkMemoryPropertyFlags requiredFlags;
/** \brief Flags that preferably should be set in a memory type chosen for an allocation.
-
- Set to 0 if no additional flags are prefered. \n
+
+ Set to 0 if no additional flags are preferred. \n
If `pool` is not null, this member is ignored. */
VkMemoryPropertyFlags preferredFlags;
/** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
@@ -2473,14 +2933,21 @@ typedef struct VmaAllocationCreateInfo
Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
`usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
*/
- VmaPool pool;
+ VmaPool VMA_NULLABLE pool;
/** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
-
+
If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
null or pointer to a null-terminated string. The string will be then copied to
internal buffer, so it doesn't need to be valid after allocation call.
*/
- void* pUserData;
+ void* VMA_NULLABLE pUserData;
+ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
+
+ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
+ and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+ Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
+ */
+ float priority;
} VmaAllocationCreateInfo;
/**
@@ -2500,10 +2967,10 @@ type of resource you want to use it for. Please check parameters of your
resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
- VmaAllocator allocator,
+ VmaAllocator VMA_NOT_NULL allocator,
uint32_t memoryTypeBits,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- uint32_t* pMemoryTypeIndex);
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
/**
\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
@@ -2518,10 +2985,10 @@ It is just a convenience function, equivalent to calling:
- `vkDestroyBuffer`
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
- VmaAllocator allocator,
- const VkBufferCreateInfo* pBufferCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- uint32_t* pMemoryTypeIndex);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
/**
\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
@@ -2536,10 +3003,10 @@ It is just a convenience function, equivalent to calling:
- `vkDestroyImage`
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
- VmaAllocator allocator,
- const VkImageCreateInfo* pImageCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- uint32_t* pMemoryTypeIndex);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
/// Flags to be passed as VmaPoolCreateInfo::flags.
typedef enum VmaPoolCreateFlagBits {
@@ -2626,7 +3093,7 @@ typedef struct VmaPoolCreateInfo {
/** \brief Maximum number of blocks that can be allocated in this pool. Optional.
Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
-
+
Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
throughout whole lifetime of this pool.
*/
@@ -2645,6 +3112,29 @@ typedef struct VmaPoolCreateInfo {
become lost, set this value to 0.
*/
uint32_t frameInUseCount;
+ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
+
+ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
+ Otherwise, this variable is ignored.
+ */
+ float priority;
+ /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
+
+ Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
+ It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
+ e.g. when doing interop with OpenGL.
+ */
+ VkDeviceSize minAllocationAlignment;
+ /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
+
+ Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
+ It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
+ Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
+
+ Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
+ can be attached automatically by this library when using other, more convenient of its features.
+ */
+ void* VMA_NULLABLE pMemoryAllocateNext;
} VmaPoolCreateInfo;
/** \brief Describes parameter of existing #VmaPool.
@@ -2681,15 +3171,15 @@ typedef struct VmaPoolStats {
@param[out] pPool Handle to created pool.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
- VmaAllocator allocator,
- const VmaPoolCreateInfo* pCreateInfo,
- VmaPool* pPool);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
/** \brief Destroys #VmaPool object and frees Vulkan device memory.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
- VmaAllocator allocator,
- VmaPool pool);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NULLABLE pool);
/** \brief Retrieves statistics of existing #VmaPool object.
@@ -2698,9 +3188,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
@param[out] pPoolStats Statistics of specified pool.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
- VmaAllocator allocator,
- VmaPool pool,
- VmaPoolStats* pPoolStats);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ VmaPoolStats* VMA_NOT_NULL pPoolStats);
/** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now.
@@ -2709,9 +3199,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
@param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
- VmaAllocator allocator,
- VmaPool pool,
- size_t* pLostAllocationCount);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ size_t* VMA_NULLABLE pLostAllocationCount);
/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
@@ -2727,7 +3217,7 @@ Possible return values:
`VMA_ASSERT` is also fired in that case.
- Other value: Error returned by Vulkan, e.g. memory mapping failure.
*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
/** \brief Retrieves name of a custom pool.
@@ -2736,9 +3226,9 @@ containing name of the pool that was previously set. The pointer becomes invalid
destroyed or its name is changed using vmaSetPoolName().
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
- VmaAllocator allocator,
- VmaPool pool,
- const char** ppName);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
/** \brief Sets name of a custom pool.
@@ -2746,9 +3236,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
Function makes internal copy of the string, so it can be changed or freed immediately after this call.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
- VmaAllocator allocator,
- VmaPool pool,
- const char* pName);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ const char* VMA_NULLABLE pName);
/** \struct VmaAllocation
\brief Represents single memory allocation.
@@ -2780,20 +3270,25 @@ VK_DEFINE_HANDLE(VmaAllocation)
*/
typedef struct VmaAllocationInfo {
/** \brief Memory type index that this allocation was allocated from.
-
+
It never changes.
*/
uint32_t memoryType;
/** \brief Handle to Vulkan memory object.
Same memory object can be shared by multiple allocations.
-
+
It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
If the allocation is lost, it is equal to `VK_NULL_HANDLE`.
*/
- VkDeviceMemory deviceMemory;
- /** \brief Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
+ VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
+ /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
+
+ You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
+ vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
+ not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
+ and apply this offset automatically.
It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost.
*/
@@ -2801,22 +3296,28 @@ typedef struct VmaAllocationInfo {
/** \brief Size of this allocation, in bytes.
It never changes, unless allocation is lost.
+
+ \note Allocation size returned in this variable may be greater than the size
+ requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
+ allocation is accessible for operations on memory e.g. using a pointer after
+ mapping with vmaMapMemory(), but operations on the resource e.g. using
+ `vkCmdCopyBuffer` must be limited to the size of the resource.
*/
VkDeviceSize size;
/** \brief Pointer to the beginning of this allocation as mapped data.
If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
- created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null.
+ created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
It can change after call to vmaMapMemory(), vmaUnmapMemory().
It can also change after call to vmaDefragment() if this allocation is passed to the function.
*/
- void* pMappedData;
+ void* VMA_NULLABLE pMappedData;
/** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
It can change after call to vmaSetAllocationUserData() for this allocation.
*/
- void* pUserData;
+ void* VMA_NULLABLE pUserData;
} VmaAllocationInfo;
/** \brief General purpose memory allocation.
@@ -2830,11 +3331,11 @@ It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage
vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
- VmaAllocator allocator,
- const VkMemoryRequirements* pVkMemoryRequirements,
- const VmaAllocationCreateInfo* pCreateInfo,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
/** \brief General purpose memory allocation for multiple allocation objects at once.
@@ -2856,12 +3357,12 @@ If any allocation fails, all allocations already made within this function call
returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
- VmaAllocator allocator,
- const VkMemoryRequirements* pVkMemoryRequirements,
- const VmaAllocationCreateInfo* pCreateInfo,
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
size_t allocationCount,
- VmaAllocation* pAllocations,
- VmaAllocationInfo* pAllocationInfo);
+ VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
+ VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
/**
@param[out] pAllocation Handle to allocated memory.
@@ -2870,27 +3371,27 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
You should free the memory using vmaFreeMemory().
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
- VmaAllocator allocator,
- VkBuffer buffer,
- const VmaAllocationCreateInfo* pCreateInfo,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
/// Function similar to vmaAllocateMemoryForBuffer().
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
- VmaAllocator allocator,
- VkImage image,
- const VmaAllocationCreateInfo* pCreateInfo,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
- VmaAllocator allocator,
- VmaAllocation allocation);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VmaAllocation VMA_NULLABLE allocation);
/** \brief Frees memory and destroys multiple allocations.
@@ -2903,24 +3404,13 @@ Allocations in `pAllocations` array can come from any memory pools and types.
Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
- VmaAllocator allocator,
+ VmaAllocator VMA_NOT_NULL allocator,
size_t allocationCount,
- VmaAllocation* pAllocations);
-
-/** \brief Deprecated.
-
-In version 2.2.0 it used to try to change allocation's size without moving or reallocating it.
-In current version it returns `VK_SUCCESS` only if `newSize` equals current allocation's size.
-Otherwise returns `VK_ERROR_OUT_OF_POOL_MEMORY`, indicating that allocation's size could not be changed.
-*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize newSize);
+ const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
/** \brief Returns current information about specified allocation and atomically marks it as used in current frame.
-Current paramters of given allocation are returned in `pAllocationInfo`.
+Current paramteres of given allocation are returned in `pAllocationInfo`.
This function also atomically "touches" allocation - marks it as used in current frame,
just like vmaTouchAllocation().
@@ -2935,9 +3425,9 @@ you can avoid calling it too often.
- If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VmaAllocationInfo* pAllocationInfo);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
/** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame.
@@ -2954,8 +3444,8 @@ If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOS
this function always returns `VK_TRUE`.
*/
VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
- VmaAllocator allocator,
- VmaAllocation allocation);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation);
/** \brief Sets pUserData in given allocation to new value.
@@ -2971,9 +3461,9 @@ allocation's `pUserData`. It is opaque, so you can use it however you want - e.g
as a pointer, ordinal number or some handle to you own data.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
- VmaAllocator allocator,
- VmaAllocation allocation,
- void* pUserData);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ void* VMA_NULLABLE pUserData);
/** \brief Creates new allocation that is in lost state from the beginning.
@@ -2986,15 +3476,15 @@ not bound to any image or buffer. It has size = 0. It cannot be turned into
a real, non-empty allocation.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
- VmaAllocator allocator,
- VmaAllocation* pAllocation);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
/** \brief Maps memory represented by given allocation and returns pointer to it.
Maps memory represented by given allocation to make it accessible to CPU code.
When succeeded, `*ppData` contains pointer to first byte of this memory.
If the allocation is part of bigger `VkDeviceMemory` block, the pointer is
-correctly offseted to the beginning of region assigned to this particular
+correctly offsetted to the beginning of region assigned to this particular
allocation.
Mapping is internally reference-counted and synchronized, so despite raw Vulkan
@@ -3028,9 +3518,9 @@ If the allocation is made from a memory types that is not `HOST_COHERENT`,
you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
- VmaAllocator allocator,
- VmaAllocation allocation,
- void** ppData);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ void* VMA_NULLABLE * VMA_NOT_NULL ppData);
/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
@@ -3041,8 +3531,8 @@ If the allocation is made from a memory types that is not `HOST_COHERENT`,
you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
- VmaAllocator allocator,
- VmaAllocation allocation);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation);
/** \brief Flushes memory of given allocation.
@@ -3061,8 +3551,15 @@ Unmap operation doesn't do that automatically.
Warning! `offset` and `size` are relative to the contents of given `allocation`.
If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
Do not pass allocation's offset as `offset`!!!
+
+This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
+called, otherwise `VK_SUCCESS`.
*/
-VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
/** \brief Invalidates memory of given allocation.
@@ -3081,8 +3578,57 @@ Map operation doesn't do that automatically.
Warning! `offset` and `size` are relative to the contents of given `allocation`.
If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
Do not pass allocation's offset as `offset`!!!
+
+This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
+it is called, otherwise `VK_SUCCESS`.
*/
-VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
+
+/** \brief Flushes memory of given set of allocations.
+
+Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
+For more information, see documentation of vmaFlushAllocation().
+
+\param allocator
+\param allocationCount
+\param allocations
+\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
+\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
+
+This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
+called, otherwise `VK_SUCCESS`.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t allocationCount,
+ const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
+
+/** \brief Invalidates memory of given set of allocations.
+
+Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
+For more information, see documentation of vmaInvalidateAllocation().
+
+\param allocator
+\param allocationCount
+\param allocations
+\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
+\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
+
+This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
+called, otherwise `VK_SUCCESS`.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t allocationCount,
+ const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
@@ -3100,7 +3646,7 @@ Possible return values:
`VMA_ASSERT` is also fired in that case.
- Other value: Error returned by Vulkan, e.g. memory mapping failure.
*/
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
/** \struct VmaDefragmentationContext
\brief Represents Opaque object that represents started defragmentation process.
@@ -3112,6 +3658,7 @@ VK_DEFINE_HANDLE(VmaDefragmentationContext)
/// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
typedef enum VmaDefragmentationFlagBits {
+ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 0x1,
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VmaDefragmentationFlagBits;
typedef VkFlags VmaDefragmentationFlags;
@@ -3135,13 +3682,13 @@ typedef struct VmaDefragmentationInfo2 {
It is safe to pass allocations that are in the lost state - they are ignored.
All allocations not present in this array are considered non-moveable during this defragmentation.
*/
- VmaAllocation* pAllocations;
+ const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
/** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation.
The array should have `allocationCount` elements.
You can pass null if you are not interested in this information.
*/
- VkBool32* pAllocationsChanged;
+ VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
/** \brief Numer of pools in `pPools` array.
*/
uint32_t poolCount;
@@ -3160,9 +3707,9 @@ typedef struct VmaDefragmentationInfo2 {
Using this array is equivalent to specifying all allocations from the pools in `pAllocations`.
It might be more efficient.
*/
- VmaPool* pPools;
+ const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
/** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`.
-
+
`VK_WHOLE_SIZE` means no limit.
*/
VkDeviceSize maxCpuBytesToMove;
@@ -3172,7 +3719,7 @@ typedef struct VmaDefragmentationInfo2 {
*/
uint32_t maxCpuAllocationsToMove;
/** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`.
-
+
`VK_WHOLE_SIZE` means no limit.
*/
VkDeviceSize maxGpuBytesToMove;
@@ -3189,16 +3736,31 @@ typedef struct VmaDefragmentationInfo2 {
Passing null means that only CPU defragmentation will be performed.
*/
- VkCommandBuffer commandBuffer;
+ VkCommandBuffer VMA_NULLABLE commandBuffer;
} VmaDefragmentationInfo2;
+typedef struct VmaDefragmentationPassMoveInfo {
+ VmaAllocation VMA_NOT_NULL allocation;
+ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
+ VkDeviceSize offset;
+} VmaDefragmentationPassMoveInfo;
+
+/** \brief Parameters for incremental defragmentation steps.
+
+To be used with function vmaBeginDefragmentationPass().
+*/
+typedef struct VmaDefragmentationPassInfo {
+ uint32_t moveCount;
+ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
+} VmaDefragmentationPassInfo;
+
/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead.
*/
typedef struct VmaDefragmentationInfo {
/** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places.
-
+
Default is `VK_WHOLE_SIZE`, which means no limit.
*/
VkDeviceSize maxBytesToMove;
@@ -3251,10 +3813,10 @@ For more information and important limitations regarding defragmentation, see do
[Defragmentation](@ref defragmentation).
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
- VmaAllocator allocator,
- const VmaDefragmentationInfo2* pInfo,
- VmaDefragmentationStats* pStats,
- VmaDefragmentationContext *pContext);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
+ VmaDefragmentationStats* VMA_NULLABLE pStats,
+ VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
/** \brief Ends defragmentation process.
@@ -3262,8 +3824,18 @@ Use this function to finish defragmentation started by vmaDefragmentationBegin()
It is safe to pass `context == null`. The function then does nothing.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
- VmaAllocator allocator,
- VmaDefragmentationContext context);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaDefragmentationContext VMA_NULLABLE context);
+
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaDefragmentationContext VMA_NULLABLE context,
+ VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
+);
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaDefragmentationContext VMA_NULLABLE context
+);
/** \brief Deprecated. Compacts memory by moving allocations.
@@ -3306,12 +3878,12 @@ you should measure that on your platform.
For more information, see [Defragmentation](@ref defragmentation) chapter.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
- VmaAllocator allocator,
- VmaAllocation* pAllocations,
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
size_t allocationCount,
- VkBool32* pAllocationsChanged,
- const VmaDefragmentationInfo *pDefragmentationInfo,
- VmaDefragmentationStats* pDefragmentationStats);
+ VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
+ const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
+ VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
/** \brief Binds buffer to allocation.
@@ -3326,26 +3898,26 @@ allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from mul
It is recommended to use function vmaCreateBuffer() instead of this one.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkBuffer buffer);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
/** \brief Binds buffer to allocation with additional parameters.
-@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0.
+@param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
@param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
-or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails.
+or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
- VmaAllocator allocator,
- VmaAllocation allocation,
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
VkDeviceSize allocationLocalOffset,
- VkBuffer buffer,
- const void* pNext);
+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
+ const void* VMA_NULLABLE pNext);
/** \brief Binds image to allocation.
@@ -3360,26 +3932,26 @@ allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from mul
It is recommended to use function vmaCreateImage() instead of this one.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkImage image);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
/** \brief Binds image to allocation with additional parameters.
-@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0.
+@param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
@param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
This function is similar to vmaBindImageMemory(), but it provides additional parameters.
If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
-or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails.
+or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
- VmaAllocator allocator,
- VmaAllocation allocation,
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
VkDeviceSize allocationLocalOffset,
- VkImage image,
- const void* pNext);
+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
+ const void* VMA_NULLABLE pNext);
/**
@param[out] pBuffer Buffer that was created.
@@ -3399,21 +3971,25 @@ If the function succeeded, you must destroy both buffer and allocation when you
no longer need them using either convenience function vmaDestroyBuffer() or
separately, using `vkDestroyBuffer()` and vmaFreeMemory().
-If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
+If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
VK_KHR_dedicated_allocation extension is used internally to query driver whether
it requires or prefers the new buffer to have dedicated allocation. If yes,
and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null
-and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
+and #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
allocation for this buffer, just like when using
-VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+
+\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
+although recommended as a good practice, is out of scope of this library and could be implemented
+by the user as a higher-level logic on top of VMA.
*/
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
- VmaAllocator allocator,
- const VkBufferCreateInfo* pBufferCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- VkBuffer* pBuffer,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
+ VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
/** \brief Destroys Vulkan buffer and frees allocated memory.
@@ -3427,18 +4003,18 @@ vmaFreeMemory(allocator, allocation);
It it safe to pass null as buffer and/or allocation.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
- VmaAllocator allocator,
- VkBuffer buffer,
- VmaAllocation allocation);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
+ VmaAllocation VMA_NULLABLE allocation);
/// Function similar to vmaCreateBuffer().
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
- VmaAllocator allocator,
- const VkImageCreateInfo* pImageCreateInfo,
- const VmaAllocationCreateInfo* pAllocationCreateInfo,
- VkImage* pImage,
- VmaAllocation* pAllocation,
- VmaAllocationInfo* pAllocationInfo);
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
+ VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
/** \brief Destroys Vulkan image and frees allocated memory.
@@ -3452,9 +4028,9 @@ vmaFreeMemory(allocator, allocation);
It it safe to pass null as image and/or allocation.
*/
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
- VmaAllocator allocator,
- VkImage image,
- VmaAllocation allocation);
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
+ VmaAllocation VMA_NULLABLE allocation);
#ifdef __cplusplus
}
@@ -3473,6 +4049,17 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
#include <cstdint>
#include <cstdlib>
#include <cstring>
+#include <utility>
+
+#if VMA_RECORDING_ENABLED
+ #include <chrono>
+ #if defined(_WIN32)
+ #include <windows.h>
+ #else
+ #include <sstream>
+ #include <thread>
+ #endif
+#endif
/*******************************************************************************
CONFIGURATION SECTION
@@ -3486,12 +4073,23 @@ Define this macro to 1 to make the library fetch pointers to Vulkan functions
internally, like:
vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
-
-Define to 0 if you are going to provide you own pointers to Vulkan functions via
-VmaAllocatorCreateInfo::pVulkanFunctions.
*/
#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
-#define VMA_STATIC_VULKAN_FUNCTIONS 1
+ #define VMA_STATIC_VULKAN_FUNCTIONS 1
+#endif
+
+/*
+Define this macro to 1 to make the library fetch pointers to Vulkan functions
+internally, like:
+
+ vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
+*/
+#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
+ #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
+ #if defined(VK_NO_PROTOTYPES)
+ extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
+ extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
+ #endif
#endif
// Define this macro to 1 to make the library use STL containers instead of its own implementation.
@@ -3554,7 +4152,7 @@ remove them if not needed.
#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
#include <cstdlib>
-void *aligned_alloc(size_t alignment, size_t size)
+static void* vma_aligned_alloc(size_t alignment, size_t size)
{
// alignment must be >= sizeof(void*)
if(alignment < sizeof(void*))
@@ -3566,8 +4164,25 @@ void *aligned_alloc(size_t alignment, size_t size)
}
#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
#include <cstdlib>
-void *aligned_alloc(size_t alignment, size_t size)
-{
+
+#if defined(__APPLE__)
+#include <AvailabilityMacros.h>
+#endif
+
+static void* vma_aligned_alloc(size_t alignment, size_t size)
+{
+#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
+#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
+ // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
+ // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
+ // MAC_OS_X_VERSION_10_16), even though the function is marked
+ // availabe for 10.15. That's why the preprocessor checks for 10.16 but
+ // the __builtin_available checks for 10.15.
+ // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
+ if (__builtin_available(macOS 10.15, iOS 13, *))
+ return aligned_alloc(alignment, size);
+#endif
+#endif
// alignment must be >= sizeof(void*)
if(alignment < sizeof(void*))
{
@@ -3579,6 +4194,28 @@ void *aligned_alloc(size_t alignment, size_t size)
return pointer;
return VMA_NULL;
}
+#elif defined(_WIN32)
+static void* vma_aligned_alloc(size_t alignment, size_t size)
+{
+ return _aligned_malloc(size, alignment);
+}
+#else
+static void* vma_aligned_alloc(size_t alignment, size_t size)
+{
+ return aligned_alloc(alignment, size);
+}
+#endif
+
+#if defined(_WIN32)
+static void vma_aligned_free(void* ptr)
+{
+ _aligned_free(ptr);
+}
+#else
+static void vma_aligned_free(void* VMA_NULLABLE ptr)
+{
+ free(ptr);
+}
#endif
// If your compiler is not compatible with C++11 and definition of
@@ -3588,20 +4225,20 @@ void *aligned_alloc(size_t alignment, size_t size)
// Normal assert to check for programmer's errors, especially in Debug configuration.
#ifndef VMA_ASSERT
- #ifdef _DEBUG
- #define VMA_ASSERT(expr) assert(expr)
- #else
+ #ifdef NDEBUG
#define VMA_ASSERT(expr)
+ #else
+ #define VMA_ASSERT(expr) assert(expr)
#endif
#endif
// Assert that will be called very often, like inside data structures e.g. operator[].
// Making it non-empty can make program slow.
#ifndef VMA_HEAVY_ASSERT
- #ifdef _DEBUG
- #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
- #else
+ #ifdef NDEBUG
#define VMA_HEAVY_ASSERT(expr)
+ #else
+ #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
#endif
#endif
@@ -3610,19 +4247,16 @@ void *aligned_alloc(size_t alignment, size_t size)
#endif
#ifndef VMA_SYSTEM_ALIGNED_MALLOC
- #if defined(_WIN32)
- #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
- #else
- #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
- #endif
+ #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
#endif
-#ifndef VMA_SYSTEM_FREE
- #if defined(_WIN32)
- #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
+#ifndef VMA_SYSTEM_ALIGNED_FREE
+ // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
+ #if defined(VMA_SYSTEM_FREE)
+ #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
#else
- #define VMA_SYSTEM_FREE(ptr) free(ptr)
- #endif
+ #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
+ #endif
#endif
#ifndef VMA_MIN
@@ -3653,15 +4287,15 @@ void *aligned_alloc(size_t alignment, size_t size)
// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
#if VMA_STATS_STRING_ENABLED
- static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
+ static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
{
snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
}
- static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
+ static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
{
snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
}
- static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
+ static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
{
snprintf(outStr, strLen, "%p", ptr);
}
@@ -3673,6 +4307,7 @@ void *aligned_alloc(size_t alignment, size_t size)
public:
void Lock() { m_Mutex.lock(); }
void Unlock() { m_Mutex.unlock(); }
+ bool TryLock() { return m_Mutex.try_lock(); }
private:
std::mutex m_Mutex;
};
@@ -3689,8 +4324,10 @@ void *aligned_alloc(size_t alignment, size_t size)
public:
void LockRead() { m_Mutex.lock_shared(); }
void UnlockRead() { m_Mutex.unlock_shared(); }
+ bool TryLockRead() { return m_Mutex.try_lock_shared(); }
void LockWrite() { m_Mutex.lock(); }
void UnlockWrite() { m_Mutex.unlock(); }
+ bool TryLockWrite() { return m_Mutex.try_lock(); }
private:
std::shared_mutex m_Mutex;
};
@@ -3704,8 +4341,10 @@ void *aligned_alloc(size_t alignment, size_t size)
VmaRWMutex() { InitializeSRWLock(&m_Lock); }
void LockRead() { AcquireSRWLockShared(&m_Lock); }
void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
+ bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
+ bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
private:
SRWLOCK m_Lock;
};
@@ -3717,8 +4356,10 @@ void *aligned_alloc(size_t alignment, size_t size)
public:
void LockRead() { m_Mutex.Lock(); }
void UnlockRead() { m_Mutex.Unlock(); }
+ bool TryLockRead() { return m_Mutex.TryLock(); }
void LockWrite() { m_Mutex.Lock(); }
void UnlockWrite() { m_Mutex.Unlock(); }
+ bool TryLockWrite() { return m_Mutex.TryLock(); }
private:
VMA_MUTEX m_Mutex;
};
@@ -3747,12 +4388,16 @@ If providing your own implementation, you need to implement a subset of std::ato
#define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
#endif
-#ifndef VMA_DEBUG_ALIGNMENT
+#ifndef VMA_MIN_ALIGNMENT
/**
Minimum alignment of all allocations, in bytes.
- Set to more than 1 for debugging purposes only. Must be power of two.
+ Set to more than 1 for debugging purposes. Must be power of two.
*/
- #define VMA_DEBUG_ALIGNMENT (1)
+ #ifdef VMA_DEBUG_ALIGNMENT // Old name
+ #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
+ #else
+ #define VMA_MIN_ALIGNMENT (1)
+ #endif
#endif
#ifndef VMA_DEBUG_MARGIN
@@ -3796,6 +4441,14 @@ If providing your own implementation, you need to implement a subset of std::ato
#define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
#endif
+#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
+ /*
+ Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
+ and return error instead of leaving up to Vulkan implementation what to do in such cases.
+ */
+ #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
+#endif
+
#ifndef VMA_SMALL_HEAP_MAX_SIZE
/// Maximum size of a memory heap in Vulkan to consider it "small".
#define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
@@ -3825,6 +4478,12 @@ static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
END OF CONFIGURATION
*/
+// # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
+
+static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
+static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
+static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
+
static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
@@ -3833,51 +4492,53 @@ static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
// Returns number of bits set to 1 in (v).
static inline uint32_t VmaCountBitsSet(uint32_t v)
{
- uint32_t c = v - ((v >> 1) & 0x55555555);
- c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
- c = ((c >> 4) + c) & 0x0F0F0F0F;
- c = ((c >> 8) + c) & 0x00FF00FF;
- c = ((c >> 16) + c) & 0x0000FFFF;
- return c;
+ uint32_t c = v - ((v >> 1) & 0x55555555);
+ c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
+ c = ((c >> 4) + c) & 0x0F0F0F0F;
+ c = ((c >> 8) + c) & 0x00FF00FF;
+ c = ((c >> 16) + c) & 0x0000FFFF;
+ return c;
+}
+
+/*
+Returns true if given number is a power of two.
+T must be unsigned integer number or signed integer but always nonnegative.
+For 0 returns true.
+*/
+template <typename T>
+inline bool VmaIsPow2(T x)
+{
+ return (x & (x-1)) == 0;
}
// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
// Use types like uint32_t, uint64_t as T.
template <typename T>
-static inline T VmaAlignUp(T val, T align)
+static inline T VmaAlignUp(T val, T alignment)
{
- return (val + align - 1) / align * align;
+ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+ return (val + alignment - 1) & ~(alignment - 1);
}
// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
// Use types like uint32_t, uint64_t as T.
template <typename T>
-static inline T VmaAlignDown(T val, T align)
+static inline T VmaAlignDown(T val, T alignment)
{
- return val / align * align;
+ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+ return val & ~(alignment - 1);
}
// Division with mathematical rounding to nearest number.
template <typename T>
static inline T VmaRoundDiv(T x, T y)
{
- return (x + (y / (T)2)) / y;
-}
-
-/*
-Returns true if given number is a power of two.
-T must be unsigned integer number or signed integer but always nonnegative.
-For 0 returns true.
-*/
-template <typename T>
-inline bool VmaIsPow2(T x)
-{
- return (x & (x-1)) == 0;
+ return (x + (y / (T)2)) / y;
}
// Returns smallest power of 2 greater or equal to v.
static inline uint32_t VmaNextPow2(uint32_t v)
{
- v--;
+ v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
@@ -3888,7 +4549,7 @@ static inline uint32_t VmaNextPow2(uint32_t v)
}
static inline uint64_t VmaNextPow2(uint64_t v)
{
- v--;
+ v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
@@ -4033,7 +4694,7 @@ static inline bool VmaIsBufferImageGranularityConflict(
{
VMA_SWAP(suballocType1, suballocType2);
}
-
+
switch(suballocType1)
{
case VMA_SUBALLOCATION_TYPE_FREE:
@@ -4167,7 +4828,7 @@ static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, co
size_t down = 0, up = (end - beg);
while(down < up)
{
- const size_t mid = (down + up) / 2;
+ const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
if(cmp(*(beg+mid), key))
{
down = mid + 1;
@@ -4219,15 +4880,23 @@ static bool VmaValidatePointerArray(uint32_t count, const T* arr)
return true;
}
+template<typename MainT, typename NewT>
+static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
+{
+ newStruct->pNext = mainStruct->pNext;
+ mainStruct->pNext = newStruct;
+}
+
////////////////////////////////////////////////////////////////////////////////
// Memory allocation
static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
{
+ void* result = VMA_NULL;
if((pAllocationCallbacks != VMA_NULL) &&
(pAllocationCallbacks->pfnAllocation != VMA_NULL))
{
- return (*pAllocationCallbacks->pfnAllocation)(
+ result = (*pAllocationCallbacks->pfnAllocation)(
pAllocationCallbacks->pUserData,
size,
alignment,
@@ -4235,8 +4904,10 @@ static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t
}
else
{
- return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
+ result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
}
+ VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
+ return result;
}
static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
@@ -4248,7 +4919,7 @@ static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr
}
else
{
- VMA_SYSTEM_FREE(ptr);
+ VMA_SYSTEM_ALIGNED_FREE(ptr);
}
}
@@ -4319,7 +4990,7 @@ class VmaStlAllocator
public:
const VkAllocationCallbacks* const m_pCallbacks;
typedef T value_type;
-
+
VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
@@ -4338,6 +5009,7 @@ public:
}
VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
+ VmaStlAllocator(const VmaStlAllocator&) = default;
};
#if VMA_USE_STL_VECTOR
@@ -4382,12 +5054,12 @@ public:
m_Capacity(count)
{
}
-
+
// This version of the constructor is here for compatibility with pre-C++14 std::vector.
// value is unused.
VmaVector(size_t count, const T& value, const AllocatorT& allocator)
: VmaVector(count, allocator) {}
-
+
VmaVector(const VmaVector<T, AllocatorT>& src) :
m_Allocator(src.m_Allocator),
m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
@@ -4399,7 +5071,7 @@ public:
memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
}
}
-
+
~VmaVector()
{
VmaFree(m_Allocator.m_pCallbacks, m_pArray);
@@ -4417,12 +5089,12 @@ public:
}
return *this;
}
-
+
bool empty() const { return m_Count == 0; }
size_t size() const { return m_Count; }
T* data() { return m_pArray; }
const T* data() const { return m_pArray; }
-
+
T& operator[](size_t index)
{
VMA_HEAVY_ASSERT(index < m_Count);
@@ -4458,12 +5130,12 @@ public:
void reserve(size_t newCapacity, bool freeMemory = false)
{
newCapacity = VMA_MAX(newCapacity, m_Count);
-
+
if((newCapacity < m_Capacity) && !freeMemory)
{
newCapacity = m_Capacity;
}
-
+
if(newCapacity != m_Capacity)
{
T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
@@ -4477,17 +5149,13 @@ public:
}
}
- void resize(size_t newCount, bool freeMemory = false)
+ void resize(size_t newCount)
{
size_t newCapacity = m_Capacity;
if(newCount > m_Capacity)
{
newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
}
- else if(freeMemory)
- {
- newCapacity = newCount;
- }
if(newCapacity != m_Capacity)
{
@@ -4505,9 +5173,25 @@ public:
m_Count = newCount;
}
- void clear(bool freeMemory = false)
+ void clear()
+ {
+ resize(0);
+ }
+
+ void shrink_to_fit()
{
- resize(0, freeMemory);
+ if(m_Capacity > m_Count)
+ {
+ T* newArray = VMA_NULL;
+ if(m_Count > 0)
+ {
+ newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
+ memcpy(newArray, m_pArray, m_Count * sizeof(T));
+ }
+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ m_Capacity = m_Count;
+ m_pArray = newArray;
+ }
}
void insert(size_t index, const T& src)
@@ -4558,9 +5242,14 @@ public:
}
typedef T* iterator;
+ typedef const T* const_iterator;
iterator begin() { return m_pArray; }
iterator end() { return m_pArray + m_Count; }
+ const_iterator cbegin() const { return m_pArray; }
+ const_iterator cend() const { return m_pArray + m_Count; }
+ const_iterator begin() const { return cbegin(); }
+ const_iterator end() const { return cend(); }
private:
AllocatorT m_Allocator;
@@ -4614,6 +5303,187 @@ bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type&
}
////////////////////////////////////////////////////////////////////////////////
+// class VmaSmallVector
+
+/*
+This is a vector (a variable-sized array), optimized for the case when the array is small.
+
+It contains some number of elements in-place, which allows it to avoid heap allocation
+when the actual number of elements is below that threshold. This allows normal "small"
+cases to be fast without losing generality for large inputs.
+*/
+
+template<typename T, typename AllocatorT, size_t N>
+class VmaSmallVector
+{
+public:
+ typedef T value_type;
+
+ VmaSmallVector(const AllocatorT& allocator) :
+ m_Count(0),
+ m_DynamicArray(allocator)
+ {
+ }
+ VmaSmallVector(size_t count, const AllocatorT& allocator) :
+ m_Count(count),
+ m_DynamicArray(count > N ? count : 0, allocator)
+ {
+ }
+ template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
+ VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
+ template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
+ VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
+
+ bool empty() const { return m_Count == 0; }
+ size_t size() const { return m_Count; }
+ T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+ const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+
+ T& operator[](size_t index)
+ {
+ VMA_HEAVY_ASSERT(index < m_Count);
+ return data()[index];
+ }
+ const T& operator[](size_t index) const
+ {
+ VMA_HEAVY_ASSERT(index < m_Count);
+ return data()[index];
+ }
+
+ T& front()
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return data()[0];
+ }
+ const T& front() const
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return data()[0];
+ }
+ T& back()
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return data()[m_Count - 1];
+ }
+ const T& back() const
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ return data()[m_Count - 1];
+ }
+
+ void resize(size_t newCount, bool freeMemory = false)
+ {
+ if(newCount > N && m_Count > N)
+ {
+ // Any direction, staying in m_DynamicArray
+ m_DynamicArray.resize(newCount);
+ if(freeMemory)
+ {
+ m_DynamicArray.shrink_to_fit();
+ }
+ }
+ else if(newCount > N && m_Count <= N)
+ {
+ // Growing, moving from m_StaticArray to m_DynamicArray
+ m_DynamicArray.resize(newCount);
+ if(m_Count > 0)
+ {
+ memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
+ }
+ }
+ else if(newCount <= N && m_Count > N)
+ {
+ // Shrinking, moving from m_DynamicArray to m_StaticArray
+ if(newCount > 0)
+ {
+ memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
+ }
+ m_DynamicArray.resize(0);
+ if(freeMemory)
+ {
+ m_DynamicArray.shrink_to_fit();
+ }
+ }
+ else
+ {
+ // Any direction, staying in m_StaticArray - nothing to do here
+ }
+ m_Count = newCount;
+ }
+
+ void clear(bool freeMemory = false)
+ {
+ m_DynamicArray.clear();
+ if(freeMemory)
+ {
+ m_DynamicArray.shrink_to_fit();
+ }
+ m_Count = 0;
+ }
+
+ void insert(size_t index, const T& src)
+ {
+ VMA_HEAVY_ASSERT(index <= m_Count);
+ const size_t oldCount = size();
+ resize(oldCount + 1);
+ T* const dataPtr = data();
+ if(index < oldCount)
+ {
+ // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
+ memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
+ }
+ dataPtr[index] = src;
+ }
+
+ void remove(size_t index)
+ {
+ VMA_HEAVY_ASSERT(index < m_Count);
+ const size_t oldCount = size();
+ if(index < oldCount - 1)
+ {
+ // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
+ T* const dataPtr = data();
+ memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
+ }
+ resize(oldCount - 1);
+ }
+
+ void push_back(const T& src)
+ {
+ const size_t newIndex = size();
+ resize(newIndex + 1);
+ data()[newIndex] = src;
+ }
+
+ void pop_back()
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ resize(size() - 1);
+ }
+
+ void push_front(const T& src)
+ {
+ insert(0, src);
+ }
+
+ void pop_front()
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ remove(0);
+ }
+
+ typedef T* iterator;
+
+ iterator begin() { return data(); }
+ iterator end() { return data() + m_Count; }
+
+private:
+ size_t m_Count;
+ T m_StaticArray[N]; // Used when m_Size <= N
+ VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
+};
+
+////////////////////////////////////////////////////////////////////////////////
// class VmaPoolAllocator
/*
@@ -4628,7 +5498,7 @@ class VmaPoolAllocator
public:
VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
~VmaPoolAllocator();
- T* Alloc();
+ template<typename... Types> T* Alloc(Types... args);
void Free(T* ptr);
private:
@@ -4644,7 +5514,7 @@ private:
uint32_t Capacity;
uint32_t FirstFreeIndex;
};
-
+
const VkAllocationCallbacks* m_pAllocationCallbacks;
const uint32_t m_FirstBlockCapacity;
VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
@@ -4670,7 +5540,7 @@ VmaPoolAllocator<T>::~VmaPoolAllocator()
}
template<typename T>
-T* VmaPoolAllocator<T>::Alloc()
+template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
{
for(size_t i = m_ItemBlocks.size(); i--; )
{
@@ -4681,7 +5551,7 @@ T* VmaPoolAllocator<T>::Alloc()
Item* const pItem = &block.pItems[block.FirstFreeIndex];
block.FirstFreeIndex = pItem->NextFreeIndex;
T* result = (T*)&pItem->Value;
- new(result)T(); // Explicit constructor call.
+ new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
return result;
}
}
@@ -4691,7 +5561,7 @@ T* VmaPoolAllocator<T>::Alloc()
Item* const pItem = &newBlock.pItems[0];
newBlock.FirstFreeIndex = pItem->NextFreeIndex;
T* result = (T*)&pItem->Value;
- new(result)T(); // Explicit constructor call.
+ new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
return result;
}
@@ -4702,11 +5572,11 @@ void VmaPoolAllocator<T>::Free(T* ptr)
for(size_t i = m_ItemBlocks.size(); i--; )
{
ItemBlock& block = m_ItemBlocks[i];
-
+
// Casting to union.
Item* pItemPtr;
memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
-
+
// Check if pItemPtr is in address range of this block.
if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
{
@@ -4783,7 +5653,7 @@ public:
ItemType* PushFront(const T& value);
void PopBack();
void PopFront();
-
+
// Item can be null - it means PushBack.
ItemType* InsertBefore(ItemType* pItem);
// Item can be null - it means PushFront.
@@ -5093,7 +5963,7 @@ public:
VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
return m_pItem != rhs.m_pItem;
}
-
+
private:
VmaRawList<T>* m_pList;
VmaListItem<T>* m_pItem;
@@ -5121,7 +5991,7 @@ public:
m_pItem(src.m_pItem)
{
}
-
+
const T& operator*() const
{
VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
@@ -5176,7 +6046,7 @@ public:
VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
return m_pItem != rhs.m_pItem;
}
-
+
private:
const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
m_pList(pList),
@@ -5201,6 +6071,9 @@ public:
const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
+ const_iterator begin() const { return cbegin(); }
+ const_iterator end() const { return cend(); }
+
void clear() { m_RawList.Clear(); }
void push_back(const T& value) { m_RawList.PushBack(value); }
void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
@@ -5213,6 +6086,222 @@ private:
#endif // #if VMA_USE_STL_LIST
////////////////////////////////////////////////////////////////////////////////
+// class VmaIntrusiveLinkedList
+
+/*
+Expected interface of ItemTypeTraits:
+struct MyItemTypeTraits
+{
+ typedef MyItem ItemType;
+ static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
+ static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
+ static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
+ static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
+};
+*/
+template<typename ItemTypeTraits>
+class VmaIntrusiveLinkedList
+{
+public:
+ typedef typename ItemTypeTraits::ItemType ItemType;
+ static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
+ static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
+ // Movable, not copyable.
+ VmaIntrusiveLinkedList() { }
+ VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
+ VmaIntrusiveLinkedList(VmaIntrusiveLinkedList<ItemTypeTraits>&& src) :
+ m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
+ {
+ src.m_Front = src.m_Back = VMA_NULL;
+ src.m_Count = 0;
+ }
+ ~VmaIntrusiveLinkedList()
+ {
+ VMA_HEAVY_ASSERT(IsEmpty());
+ }
+ VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
+ VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(VmaIntrusiveLinkedList<ItemTypeTraits>&& src)
+ {
+ if(&src != this)
+ {
+ VMA_HEAVY_ASSERT(IsEmpty());
+ m_Front = src.m_Front;
+ m_Back = src.m_Back;
+ m_Count = src.m_Count;
+ src.m_Front = src.m_Back = VMA_NULL;
+ src.m_Count = 0;
+ }
+ return *this;
+ }
+ void RemoveAll()
+ {
+ if(!IsEmpty())
+ {
+ ItemType* item = m_Back;
+ while(item != VMA_NULL)
+ {
+ ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
+ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+ ItemTypeTraits::AccessNext(item) = VMA_NULL;
+ item = prevItem;
+ }
+ m_Front = VMA_NULL;
+ m_Back = VMA_NULL;
+ m_Count = 0;
+ }
+ }
+ size_t GetCount() const { return m_Count; }
+ bool IsEmpty() const { return m_Count == 0; }
+ ItemType* Front() { return m_Front; }
+ const ItemType* Front() const { return m_Front; }
+ ItemType* Back() { return m_Back; }
+ const ItemType* Back() const { return m_Back; }
+ void PushBack(ItemType* item)
+ {
+ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+ if(IsEmpty())
+ {
+ m_Front = item;
+ m_Back = item;
+ m_Count = 1;
+ }
+ else
+ {
+ ItemTypeTraits::AccessPrev(item) = m_Back;
+ ItemTypeTraits::AccessNext(m_Back) = item;
+ m_Back = item;
+ ++m_Count;
+ }
+ }
+ void PushFront(ItemType* item)
+ {
+ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+ if(IsEmpty())
+ {
+ m_Front = item;
+ m_Back = item;
+ m_Count = 1;
+ }
+ else
+ {
+ ItemTypeTraits::AccessNext(item) = m_Front;
+ ItemTypeTraits::AccessPrev(m_Front) = item;
+ m_Front = item;
+ ++m_Count;
+ }
+ }
+ ItemType* PopBack()
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType* const backItem = m_Back;
+ ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
+ if(prevItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
+ }
+ m_Back = prevItem;
+ --m_Count;
+ ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
+ ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
+ return backItem;
+ }
+ ItemType* PopFront()
+ {
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType* const frontItem = m_Front;
+ ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
+ if(nextItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
+ }
+ m_Front = nextItem;
+ --m_Count;
+ ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
+ ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
+ return frontItem;
+ }
+
+ // MyItem can be null - it means PushBack.
+ void InsertBefore(ItemType* existingItem, ItemType* newItem)
+ {
+ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+ if(existingItem != VMA_NULL)
+ {
+ ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
+ ItemTypeTraits::AccessPrev(newItem) = prevItem;
+ ItemTypeTraits::AccessNext(newItem) = existingItem;
+ ItemTypeTraits::AccessPrev(existingItem) = newItem;
+ if(prevItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessNext(prevItem) = newItem;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Front == existingItem);
+ m_Front = newItem;
+ }
+ ++m_Count;
+ }
+ else
+ PushBack(newItem);
+ }
+ // MyItem can be null - it means PushFront.
+ void InsertAfter(ItemType* existingItem, ItemType* newItem)
+ {
+ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+ if(existingItem != VMA_NULL)
+ {
+ ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
+ ItemTypeTraits::AccessNext(newItem) = nextItem;
+ ItemTypeTraits::AccessPrev(newItem) = existingItem;
+ ItemTypeTraits::AccessNext(existingItem) = newItem;
+ if(nextItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessPrev(nextItem) = newItem;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Back == existingItem);
+ m_Back = newItem;
+ }
+ ++m_Count;
+ }
+ else
+ return PushFront(newItem);
+ }
+ void Remove(ItemType* item)
+ {
+ VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
+ if(ItemTypeTraits::GetPrev(item) != VMA_NULL)
+ {
+ ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Front == item);
+ m_Front = ItemTypeTraits::GetNext(item);
+ }
+
+ if(ItemTypeTraits::GetNext(item) != VMA_NULL)
+ {
+ ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Back == item);
+ m_Back = ItemTypeTraits::GetPrev(item);
+ }
+ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+ ItemTypeTraits::AccessNext(item) = VMA_NULL;
+ --m_Count;
+ }
+private:
+ ItemType* m_Front = VMA_NULL;
+ ItemType* m_Back = VMA_NULL;
+ size_t m_Count = 0;
+};
+
+////////////////////////////////////////////////////////////////////////////////
// class VmaMap
// Unused in this version.
@@ -5255,7 +6344,7 @@ public:
void insert(const PairType& pair);
iterator find(const KeyT& key);
void erase(iterator it);
-
+
private:
VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
};
@@ -5342,25 +6431,24 @@ public:
This struct is allocated using VmaPoolAllocator.
*/
- void Ctor(uint32_t currentFrameIndex, bool userDataString)
+ VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
+ m_Alignment{1},
+ m_Size{0},
+ m_pUserData{VMA_NULL},
+ m_LastUseFrameIndex{currentFrameIndex},
+ m_MemoryTypeIndex{0},
+ m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
+ m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
+ m_MapCount{0},
+ m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
{
- m_Alignment = 1;
- m_Size = 0;
- m_MemoryTypeIndex = 0;
- m_pUserData = VMA_NULL;
- m_LastUseFrameIndex = currentFrameIndex;
- m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
- m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
- m_MapCount = 0;
- m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
-
#if VMA_STATS_STRING_ENABLED
m_CreationFrameIndex = currentFrameIndex;
m_BufferImageUsage = 0;
#endif
}
- void Dtor()
+ ~VmaAllocation_T()
{
VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
@@ -5405,7 +6493,7 @@ public:
void ChangeBlockAllocation(
VmaAllocator hAllocator,
VmaDeviceMemoryBlock* block,
- VkDeviceSize offset);
+ VkDeviceSize offset);
void ChangeOffset(VkDeviceSize newOffset);
@@ -5427,6 +6515,8 @@ public:
m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
m_DedicatedAllocation.m_hMemory = hMemory;
m_DedicatedAllocation.m_pMappedData = pMappedData;
+ m_DedicatedAllocation.m_Prev = VMA_NULL;
+ m_DedicatedAllocation.m_Next = VMA_NULL;
}
ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
@@ -5448,7 +6538,7 @@ public:
bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
void* GetMappedData() const;
bool CanBecomeLost() const;
-
+
uint32_t GetLastUseFrameIndex() const
{
return m_LastUseFrameIndex.load();
@@ -5461,7 +6551,7 @@ public:
- If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
- Else, returns false.
-
+
If hAllocation is already lost, assert - you should not call it then.
If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
*/
@@ -5524,6 +6614,8 @@ private:
{
VkDeviceMemory m_hMemory;
void* m_pMappedData; // Not null means memory is mapped.
+ VmaAllocation_T* m_Prev;
+ VmaAllocation_T* m_Next;
};
union
@@ -5540,6 +6632,32 @@ private:
#endif
void FreeUserDataString(VmaAllocator hAllocator);
+
+ friend struct VmaDedicatedAllocationListItemTraits;
+};
+
+struct VmaDedicatedAllocationListItemTraits
+{
+ typedef VmaAllocation_T ItemType;
+ static ItemType* GetPrev(const ItemType* item)
+ {
+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ return item->m_DedicatedAllocation.m_Prev;
+ }
+ static ItemType* GetNext(const ItemType* item)
+ {
+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ return item->m_DedicatedAllocation.m_Next;
+ }
+ static ItemType*& AccessPrev(ItemType* item)
+ {
+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ return item->m_DedicatedAllocation.m_Prev;
+ }
+ static ItemType*& AccessNext(ItemType* item){
+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+ return item->m_DedicatedAllocation.m_Next;
+ }
};
/*
@@ -5757,7 +6875,7 @@ public:
////////////////////////////////////////////////////////////////////////////////
// For defragmentation
-
+
bool IsBufferImageGranularityConflictPossible(
VkDeviceSize bufferImageGranularity,
VmaSuballocationType& inOutPrevSuballocType) const;
@@ -5968,7 +7086,7 @@ private:
SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-
+
// Number of items in 1st vector with hAllocation = null at the beginning.
size_t m_1stNullItemsBeginCount;
// Number of other items in 1st vector with hAllocation = null somewhere in the middle.
@@ -6006,7 +7124,7 @@ private:
- m_UsableSize is this size aligned down to a power of two.
All allocations and calculations happen relative to m_UsableSize.
- GetUnusableSize() is the difference between them.
- It is repoted as separate, unused range, not available for allocations.
+ It is reported as separate, unused range, not available for allocations.
Node at level 0 has size = m_UsableSize.
Each next level contains nodes with size 2 times smaller than current level.
@@ -6148,6 +7266,8 @@ private:
#endif
};
+struct VmaBlockVector;
+
/*
Represents a single block of device memory (`VkDeviceMemory`) with all the
data about its regions (aka suballocations, #VmaAllocation), assigned and free.
@@ -6171,6 +7291,7 @@ public:
// Always call after construction.
void Init(
VmaAllocator hAllocator,
+ VmaBlockVector* parentBlockVector,
VmaPool hParentPool,
uint32_t newMemoryTypeIndex,
VkDeviceMemory newMemory,
@@ -6179,7 +7300,8 @@ public:
uint32_t algorithm);
// Always call before destruction.
void Destroy(VmaAllocator allocator);
-
+
+ VmaBlockVector* GetParentBlockVector() const { return m_ParentBlockVector; }
VmaPool GetParentPool() const { return m_hParentPool; }
VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
@@ -6212,10 +7334,11 @@ public:
const void* pNext);
private:
- VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
- uint32_t m_MemoryTypeIndex;
- uint32_t m_Id;
- VkDeviceMemory m_hMemory;
+ VmaBlockVector* m_ParentBlockVector = VMA_NULL;
+ VmaPool m_hParentPool = VK_NULL_HANDLE; // VK_NULL_HANDLE if not belongs to custom pool.
+ uint32_t m_MemoryTypeIndex = UINT32_MAX;
+ uint32_t m_Id = 0;
+ VkDeviceMemory m_hMemory = VK_NULL_HANDLE;
/*
Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
@@ -6223,16 +7346,8 @@ private:
Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
*/
VMA_MUTEX m_Mutex;
- uint32_t m_MapCount;
- void* m_pMappedData;
-};
-
-struct VmaPointerLess
-{
- bool operator()(const void* lhs, const void* rhs) const
- {
- return lhs < rhs;
- }
+ uint32_t m_MapCount = 0;
+ void* m_pMappedData = VMA_NULL;
};
struct VmaDefragmentationMove
@@ -6242,6 +7357,9 @@ struct VmaDefragmentationMove
VkDeviceSize srcOffset;
VkDeviceSize dstOffset;
VkDeviceSize size;
+ VmaAllocation hAllocation;
+ VmaDeviceMemoryBlock* pSrcBlock;
+ VmaDeviceMemoryBlock* pDstBlock;
};
class VmaDefragmentationAlgorithm;
@@ -6266,7 +7384,10 @@ public:
VkDeviceSize bufferImageGranularity,
uint32_t frameInUseCount,
bool explicitBlockSize,
- uint32_t algorithm);
+ uint32_t algorithm,
+ float priority,
+ VkDeviceSize minAllocationAlignment,
+ void* pMemoryAllocateNext);
~VmaBlockVector();
VkResult CreateMinBlocks();
@@ -6311,12 +7432,21 @@ public:
// Saves results in pCtx->res.
void Defragment(
class VmaBlockVectorDefragmentationContext* pCtx,
- VmaDefragmentationStats* pStats,
+ VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags,
VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
VkCommandBuffer commandBuffer);
void DefragmentationEnd(
class VmaBlockVectorDefragmentationContext* pCtx,
+ uint32_t flags,
+ VmaDefragmentationStats* pStats);
+
+ uint32_t ProcessDefragmentations(
+ class VmaBlockVectorDefragmentationContext *pCtx,
+ VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
+
+ void CommitDefragmentations(
+ class VmaBlockVectorDefragmentationContext *pCtx,
VmaDefragmentationStats* pStats);
////////////////////////////////////////////////////////////////////////////////
@@ -6340,6 +7470,9 @@ private:
const uint32_t m_FrameInUseCount;
const bool m_ExplicitBlockSize;
const uint32_t m_Algorithm;
+ const float m_Priority;
+ const VkDeviceSize m_MinAllocationAlignment;
+ void* const m_pMemoryAllocateNext;
VMA_RW_MUTEX m_Mutex;
/* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
@@ -6387,7 +7520,7 @@ private:
// Saves result to pCtx->res.
void ApplyDefragmentationMovesGpu(
class VmaBlockVectorDefragmentationContext* pDefragCtx,
- const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+ VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkCommandBuffer commandBuffer);
/*
@@ -6424,6 +7557,18 @@ public:
private:
uint32_t m_Id;
char* m_Name;
+ VmaPool_T* m_PrevPool = VMA_NULL;
+ VmaPool_T* m_NextPool = VMA_NULL;
+ friend struct VmaPoolListItemTraits;
+};
+
+struct VmaPoolListItemTraits
+{
+ typedef VmaPool_T ItemType;
+ static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
+ static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
+ static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
+ static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
};
/*
@@ -6456,7 +7601,8 @@ public:
virtual VkResult Defragment(
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkDeviceSize maxBytesToMove,
- uint32_t maxAllocationsToMove) = 0;
+ uint32_t maxAllocationsToMove,
+ VmaDefragmentationFlags flags) = 0;
virtual VkDeviceSize GetBytesMoved() const = 0;
virtual uint32_t GetAllocationsMoved() const = 0;
@@ -6501,7 +7647,8 @@ public:
virtual VkResult Defragment(
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkDeviceSize maxBytesToMove,
- uint32_t maxAllocationsToMove);
+ uint32_t maxAllocationsToMove,
+ VmaDefragmentationFlags flags);
virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
@@ -6602,7 +7749,8 @@ private:
VkResult DefragmentRound(
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkDeviceSize maxBytesToMove,
- uint32_t maxAllocationsToMove);
+ uint32_t maxAllocationsToMove,
+ bool freeOldAllocations);
size_t CalcBlocksWithNonMovableCount() const;
@@ -6628,7 +7776,8 @@ public:
virtual VkResult Defragment(
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkDeviceSize maxBytesToMove,
- uint32_t maxAllocationsToMove);
+ uint32_t maxAllocationsToMove,
+ VmaDefragmentationFlags flags);
virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
@@ -6708,7 +7857,7 @@ private:
}
}
}
-
+
if(bestIndex != SIZE_MAX)
{
outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
@@ -6776,6 +7925,10 @@ public:
VkResult res;
bool mutexLocked;
VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
+ VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
+ uint32_t defragmentationMovesProcessed;
+ uint32_t defragmentationMovesCommitted;
+ bool hasDefragmentationPlan;
VmaBlockVectorDefragmentationContext(
VmaAllocator hAllocator,
@@ -6791,7 +7944,7 @@ public:
void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
void AddAll() { m_AllAllocations = true; }
- void Begin(bool overlappingMoveSupported);
+ void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
private:
const VmaAllocator m_hAllocator;
@@ -6825,28 +7978,37 @@ public:
VmaDefragmentationStats* pStats);
~VmaDefragmentationContext_T();
- void AddPools(uint32_t poolCount, VmaPool* pPools);
+ void AddPools(uint32_t poolCount, const VmaPool* pPools);
void AddAllocations(
uint32_t allocationCount,
- VmaAllocation* pAllocations,
+ const VmaAllocation* pAllocations,
VkBool32* pAllocationsChanged);
/*
Returns:
- `VK_SUCCESS` if succeeded and object can be destroyed immediately.
- `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
- - Negative value if error occured and object can be destroyed immediately.
+ - Negative value if error occurred and object can be destroyed immediately.
*/
VkResult Defragment(
VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
- VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
+ VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
+
+ VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
+ VkResult DefragmentPassEnd();
private:
const VmaAllocator m_hAllocator;
const uint32_t m_CurrFrameIndex;
const uint32_t m_Flags;
VmaDefragmentationStats* const m_pStats;
+
+ VkDeviceSize m_MaxCpuBytesToMove;
+ uint32_t m_MaxCpuAllocationsToMove;
+ VkDeviceSize m_MaxGpuBytesToMove;
+ uint32_t m_MaxGpuAllocationsToMove;
+
// Owner of these objects.
VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
// Owner of these objects.
@@ -6866,7 +8028,8 @@ public:
uint32_t vulkanApiVersion,
bool dedicatedAllocationExtensionEnabled,
bool bindMemory2ExtensionEnabled,
- bool memoryBudgetExtensionEnabled);
+ bool memoryBudgetExtensionEnabled,
+ bool deviceCoherentMemoryExtensionEnabled);
~VmaRecorder();
void RecordCreateAllocator(uint32_t frameIndex);
@@ -6963,8 +8126,7 @@ private:
VmaRecordFlags m_Flags;
FILE* m_File;
VMA_MUTEX m_FileMutex;
- int64_t m_Freq;
- int64_t m_StartCounter;
+ std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
void GetBasicParams(CallParams& outParams);
@@ -6997,7 +8159,7 @@ class VmaAllocationObjectAllocator
public:
VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
- VmaAllocation Allocate();
+ template<typename... Types> VmaAllocation Allocate(Types... args);
void Free(VmaAllocation hAlloc);
private:
@@ -7064,13 +8226,16 @@ public:
bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
bool m_UseExtMemoryBudget;
+ bool m_UseAmdDeviceCoherentMemory;
+ bool m_UseKhrBufferDeviceAddress;
+ bool m_UseExtMemoryPriority;
VkDevice m_hDevice;
VkInstance m_hInstance;
bool m_AllocationCallbacksSpecified;
VkAllocationCallbacks m_AllocationCallbacks;
VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
VmaAllocationObjectAllocator m_AllocationObjectAllocator;
-
+
// Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
uint32_t m_HeapSizeLimitMask;
@@ -7079,13 +8244,14 @@ public:
// Default pools.
VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
+ VmaBlockVector* m_pSmallBufferBlockVectors[VK_MAX_MEMORY_TYPES];
- // Each vector is sorted by memory (handle value).
- typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
- AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
+ typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
+ DedicatedAllocationLinkedList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
VmaCurrentBudgetData m_Budget;
+ VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
@@ -7100,6 +8266,8 @@ public:
return m_VulkanFunctions;
}
+ VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
+
VkDeviceSize GetBufferImageGranularity() const
{
return VMA_MAX(
@@ -7125,8 +8293,8 @@ public:
VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
{
return IsMemoryTypeNonCoherent(memTypeIndex) ?
- VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
- (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
+ VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
+ (VkDeviceSize)VMA_MIN_ALIGNMENT;
}
bool IsIntegratedGpu() const
@@ -7134,6 +8302,8 @@ public:
return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
}
+ uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
+
#if VMA_RECORDING_ENABLED
VmaRecorder* GetRecorder() const { return m_pRecorder; }
#endif
@@ -7155,6 +8325,7 @@ public:
bool requiresDedicatedAllocation,
bool prefersDedicatedAllocation,
VkBuffer dedicatedBuffer,
+ VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
VkImage dedicatedImage,
const VmaAllocationCreateInfo& createInfo,
VmaSuballocationType suballocType,
@@ -7166,10 +8337,6 @@ public:
size_t allocationCount,
const VmaAllocation* pAllocations);
- VkResult ResizeAllocation(
- const VmaAllocation alloc,
- VkDeviceSize newSize);
-
void CalculateStats(VmaStats* pStats);
void GetBudget(
@@ -7186,6 +8353,12 @@ public:
VkResult DefragmentationEnd(
VmaDefragmentationContext context);
+ VkResult DefragmentationPassBegin(
+ VmaDefragmentationPassInfo* pInfo,
+ VmaDefragmentationContext context);
+ VkResult DefragmentationPassEnd(
+ VmaDefragmentationContext context);
+
void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
bool TouchAllocation(VmaAllocation hAllocation);
@@ -7235,10 +8408,15 @@ public:
VkImage hImage,
const void* pNext);
- void FlushOrInvalidateAllocation(
+ VkResult FlushOrInvalidateAllocation(
VmaAllocation hAllocation,
VkDeviceSize offset, VkDeviceSize size,
VMA_CACHE_OPERATION op);
+ VkResult FlushOrInvalidateAllocations(
+ uint32_t allocationCount,
+ const VmaAllocation* allocations,
+ const VkDeviceSize* offsets, const VkDeviceSize* sizes,
+ VMA_CACHE_OPERATION op);
void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
@@ -7248,26 +8426,52 @@ public:
*/
uint32_t GetGpuDefragmentationMemoryTypeBits();
+#if VMA_EXTERNAL_MEMORY
+ VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
+ {
+ return m_TypeExternalMemoryHandleTypes[memTypeIndex];
+ }
+#endif // #if VMA_EXTERNAL_MEMORY
+
private:
VkDeviceSize m_PreferredLargeHeapBlockSize;
VkPhysicalDevice m_PhysicalDevice;
VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
-
+#if VMA_EXTERNAL_MEMORY
+ VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
+#endif // #if VMA_EXTERNAL_MEMORY
+
VMA_RW_MUTEX m_PoolsMutex;
- // Protected by m_PoolsMutex. Sorted by pointer value.
- VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
+ typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
+ // Protected by m_PoolsMutex.
+ PoolList m_Pools;
uint32_t m_NextPoolId;
VmaVulkanFunctions m_VulkanFunctions;
+ // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
+ uint32_t m_GlobalMemoryTypeBits;
+
#if VMA_RECORDING_ENABLED
VmaRecorder* m_pRecorder;
#endif
void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
+#if VMA_STATIC_VULKAN_FUNCTIONS == 1
+ void ImportVulkanFunctions_Static();
+#endif
+
+ void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
+
+#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+ void ImportVulkanFunctions_Dynamic();
+#endif
+
+ void ValidateVulkanFunctions();
+
VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
VkResult AllocateMemoryOfType(
@@ -7275,6 +8479,7 @@ private:
VkDeviceSize alignment,
bool dedicatedAllocation,
VkBuffer dedicatedBuffer,
+ VkBufferUsageFlags dedicatedBufferUsage,
VkImage dedicatedImage,
const VmaAllocationCreateInfo& createInfo,
uint32_t memTypeIndex,
@@ -7302,7 +8507,9 @@ private:
bool map,
bool isUserDataString,
void* pUserData,
+ float priority,
VkBuffer dedicatedBuffer,
+ VkBufferUsageFlags dedicatedBufferUsage,
VkImage dedicatedImage,
size_t allocationCount,
VmaAllocation* pAllocations);
@@ -7315,6 +8522,13 @@ private:
*/
uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
+ uint32_t CalculateGlobalMemoryTypeBits() const;
+
+ bool GetFlushOrInvalidateRange(
+ VmaAllocation allocation,
+ VkDeviceSize offset, VkDeviceSize size,
+ VkMappedMemoryRange& outRange) const;
+
#if VMA_MEMORY_BUDGET
void UpdateVulkanBudget();
#endif // #if VMA_MEMORY_BUDGET
@@ -7451,10 +8665,10 @@ public:
void BeginObject(bool singleLine = false);
void EndObject();
-
+
void BeginArray(bool singleLine = false);
void EndArray();
-
+
void WriteString(const char* pStr);
void BeginString(const char* pStr = VMA_NULL);
void ContinueString(const char* pStr);
@@ -7462,7 +8676,7 @@ public:
void ContinueString(uint64_t n);
void ContinueString_Pointer(const void* ptr);
void EndString(const char* pStr = VMA_NULL);
-
+
void WriteNumber(uint32_t n);
void WriteNumber(uint64_t n);
void WriteBool(bool b);
@@ -7710,7 +8924,7 @@ void VmaJsonWriter::WriteIndent(bool oneLess)
if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
{
m_SB.AddNewLine();
-
+
size_t count = m_Stack.size();
if(count > 0 && oneLess)
{
@@ -8124,7 +9338,7 @@ void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
VmaAllocation hAllocation) const
{
json.BeginObject(true);
-
+
json.WriteString("Offset");
json.WriteNumber(offset);
@@ -8138,7 +9352,7 @@ void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
VkDeviceSize size) const
{
json.BeginObject(true);
-
+
json.WriteString("Offset");
json.WriteNumber(offset);
@@ -8198,7 +9412,7 @@ void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
bool VmaBlockMetadata_Generic::Validate() const
{
VMA_VALIDATE(!m_Suballocations.empty());
-
+
// Expected offset of new suballocation as calculated from previous ones.
VkDeviceSize calculatedOffset = 0;
// Expected number of free suballocations as calculated from traversing their list.
@@ -8211,12 +9425,8 @@ bool VmaBlockMetadata_Generic::Validate() const
// True if previous visited suballocation was free.
bool prevFree = false;
- for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
- suballocItem != m_Suballocations.cend();
- ++suballocItem)
+ for(const auto& subAlloc : m_Suballocations)
{
- const VmaSuballocation& subAlloc = *suballocItem;
-
// Actual offset of this suballocation doesn't match expected one.
VMA_VALIDATE(subAlloc.offset == calculatedOffset);
@@ -8259,7 +9469,7 @@ bool VmaBlockMetadata_Generic::Validate() const
for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
{
VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
-
+
// Only free suballocations can be registered in m_FreeSuballocationsBySize.
VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
// They must be sorted by size ascending.
@@ -8268,7 +9478,7 @@ bool VmaBlockMetadata_Generic::Validate() const
lastSize = suballocItem->size;
}
- // Check if totals match calculacted values.
+ // Check if totals match calculated values.
VMA_VALIDATE(ValidateFreeSuballocationList());
VMA_VALIDATE(calculatedOffset == GetSize());
VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
@@ -8301,7 +9511,7 @@ void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) cons
const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
outInfo.allocationCount = rangeCount - m_FreeCount;
outInfo.unusedRangeCount = m_FreeCount;
-
+
outInfo.unusedBytes = m_SumFreeSize;
outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
@@ -8310,11 +9520,8 @@ void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) cons
outInfo.unusedRangeSizeMin = UINT64_MAX;
outInfo.unusedRangeSizeMax = 0;
- for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
- suballocItem != m_Suballocations.cend();
- ++suballocItem)
+ for(const auto& suballoc : m_Suballocations)
{
- const VmaSuballocation& suballoc = *suballocItem;
if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
{
outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
@@ -8349,17 +9556,15 @@ void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
m_FreeCount); // unusedRangeCount
size_t i = 0;
- for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
- suballocItem != m_Suballocations.cend();
- ++suballocItem, ++i)
+ for(const auto& suballoc : m_Suballocations)
{
- if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+ if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
{
- PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
+ PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
}
else
{
- PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
}
}
@@ -8560,7 +9765,7 @@ bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
VMA_HEAVY_ASSERT(Validate());
VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
-
+
return true;
}
@@ -8584,18 +9789,16 @@ uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameInde
VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
{
- for(VmaSuballocationList::iterator it = m_Suballocations.begin();
- it != m_Suballocations.end();
- ++it)
+ for(auto& suballoc : m_Suballocations)
{
- if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
+ if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
{
- if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
+ if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
{
VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
return VK_ERROR_VALIDATION_FAILED_EXT;
}
- if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
+ if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
{
VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
return VK_ERROR_VALIDATION_FAILED_EXT;
@@ -8737,7 +9940,7 @@ bool VmaBlockMetadata_Generic::CheckAllocation(
VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
VMA_ASSERT(suballocItem != m_Suballocations.cend());
VMA_ASSERT(pOffset != VMA_NULL);
-
+
*itemsToMakeLostCount = 0;
*pSumFreeSize = 0;
*pSumItemSize = 0;
@@ -8770,19 +9973,19 @@ bool VmaBlockMetadata_Generic::CheckAllocation(
// Start from offset equal to beginning of this suballocation.
*pOffset = suballocItem->offset;
-
+
// Apply VMA_DEBUG_MARGIN at the beginning.
if(VMA_DEBUG_MARGIN > 0)
{
*pOffset += VMA_DEBUG_MARGIN;
}
-
+
// Apply alignment.
*pOffset = VmaAlignUp(*pOffset, allocAlignment);
// Check previous suballocations for BufferImageGranularity conflicts.
// Make bigger alignment if necessary.
- if(bufferImageGranularity > 1)
+ if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
{
bool bufferImageGranularityConflict = false;
VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
@@ -8807,14 +10010,14 @@ bool VmaBlockMetadata_Generic::CheckAllocation(
*pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
}
}
-
+
// Now that we have final *pOffset, check if we are past suballocItem.
// If yes, return false - this function should be called for another suballocItem as starting point.
if(*pOffset >= suballocItem->offset + suballocItem->size)
{
return false;
}
-
+
// Calculate padding at the beginning based on current offset.
const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
@@ -8866,7 +10069,7 @@ bool VmaBlockMetadata_Generic::CheckAllocation(
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, we must mark more allocations lost or fail.
- if(bufferImageGranularity > 1)
+ if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
{
VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
++nextSuballocItem;
@@ -8913,19 +10116,19 @@ bool VmaBlockMetadata_Generic::CheckAllocation(
// Start from offset equal to beginning of this suballocation.
*pOffset = suballoc.offset;
-
+
// Apply VMA_DEBUG_MARGIN at the beginning.
if(VMA_DEBUG_MARGIN > 0)
{
*pOffset += VMA_DEBUG_MARGIN;
}
-
+
// Apply alignment.
*pOffset = VmaAlignUp(*pOffset, allocAlignment);
-
+
// Check previous suballocations for BufferImageGranularity conflicts.
// Make bigger alignment if necessary.
- if(bufferImageGranularity > 1)
+ if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
{
bool bufferImageGranularityConflict = false;
VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
@@ -8950,7 +10153,7 @@ bool VmaBlockMetadata_Generic::CheckAllocation(
*pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
}
}
-
+
// Calculate padding at the beginning based on current offset.
const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
@@ -8965,7 +10168,7 @@ bool VmaBlockMetadata_Generic::CheckAllocation(
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, allocation cannot be made here.
- if(bufferImageGranularity > 1)
+ if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
{
VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
++nextSuballocItem;
@@ -8997,7 +10200,7 @@ void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator
{
VMA_ASSERT(item != m_Suballocations.end());
VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-
+
VmaSuballocationList::iterator nextItem = item;
++nextItem;
VMA_ASSERT(nextItem != m_Suballocations.end());
@@ -9014,7 +10217,7 @@ VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSu
VmaSuballocation& suballoc = *suballocItem;
suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
suballoc.hAllocation = VK_NULL_HANDLE;
-
+
// Update totals.
++m_FreeCount;
m_SumFreeSize += suballoc.size;
@@ -9022,7 +10225,7 @@ VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSu
// Merge with previous and/or next suballocation if it's also free.
bool mergeWithNext = false;
bool mergeWithPrev = false;
-
+
VmaSuballocationList::iterator nextItem = suballocItem;
++nextItem;
if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
@@ -9129,14 +10332,12 @@ bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
VkDeviceSize minAlignment = VK_WHOLE_SIZE;
bool typeConflictFound = false;
- for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
- it != m_Suballocations.cend();
- ++it)
+ for(const auto& suballoc : m_Suballocations)
{
- const VmaSuballocationType suballocType = it->type;
+ const VmaSuballocationType suballocType = suballoc.type;
if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
{
- minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
+ minAlignment = VMA_MIN(minAlignment, suballoc.hAllocation->GetAlignment());
if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
{
typeConflictFound = true;
@@ -9320,7 +10521,7 @@ VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
{
return size;
}
-
+
const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
switch(m_2ndVectorMode)
@@ -9328,7 +10529,7 @@ VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
case SECOND_VECTOR_EMPTY:
/*
Available space is after end of 1st, as well as before beginning of 1st (which
- whould make it a ring buffer).
+ would make it a ring buffer).
*/
{
const size_t suballocations1stCount = suballocations1st.size();
@@ -9407,7 +10608,7 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
if(nextAlloc2ndIndex < suballoc2ndCount)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9418,13 +10619,13 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
outInfo.usedBytes += suballoc.size;
outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc2ndIndex;
@@ -9464,7 +10665,7 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
if(nextAlloc1stIndex < suballoc1stCount)
{
const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9475,13 +10676,13 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
outInfo.usedBytes += suballoc.size;
outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc1stIndex;
@@ -9520,7 +10721,7 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
if(nextAlloc2ndIndex != SIZE_MAX)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9531,13 +10732,13 @@ void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
outInfo.usedBytes += suballoc.size;
outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
--nextAlloc2ndIndex;
@@ -9593,7 +10794,7 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
if(nextAlloc2ndIndex < suballoc2ndCount)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9603,11 +10804,11 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
++inoutStats.unusedRangeCount;
inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
++inoutStats.allocationCount;
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc2ndIndex;
@@ -9646,7 +10847,7 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
if(nextAlloc1stIndex < suballoc1stCount)
{
const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9656,11 +10857,11 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
++inoutStats.unusedRangeCount;
inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
++inoutStats.allocationCount;
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc1stIndex;
@@ -9698,7 +10899,7 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
if(nextAlloc2ndIndex != SIZE_MAX)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9708,11 +10909,11 @@ void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
++inoutStats.unusedRangeCount;
inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
++inoutStats.allocationCount;
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
--nextAlloc2ndIndex;
@@ -9770,19 +10971,19 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
if(nextAlloc2ndIndex < suballoc2ndCount)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
// There is free space from lastOffset to suballoc.offset.
++unusedRangeCount;
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
++alloc2ndCount;
usedBytes += suballoc.size;
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc2ndIndex;
@@ -9819,19 +11020,19 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
if(nextAlloc1stIndex < suballoc1stCount)
{
const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
// There is free space from lastOffset to suballoc.offset.
++unusedRangeCount;
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
++alloc1stCount;
usedBytes += suballoc.size;
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc1stIndex;
@@ -9866,19 +11067,19 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
if(nextAlloc2ndIndex != SIZE_MAX)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
// There is free space from lastOffset to suballoc.offset.
++unusedRangeCount;
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
++alloc2ndCount;
usedBytes += suballoc.size;
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
--nextAlloc2ndIndex;
@@ -9921,7 +11122,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
if(nextAlloc2ndIndex < suballoc2ndCount)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9929,11 +11130,11 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc2ndIndex;
@@ -9968,7 +11169,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
if(nextAlloc1stIndex < suballoc1stCount)
{
const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -9976,11 +11177,11 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
++nextAlloc1stIndex;
@@ -10016,7 +11217,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
if(nextAlloc2ndIndex != SIZE_MAX)
{
const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-
+
// 1. Process free space before this allocation.
if(lastOffset < suballoc.offset)
{
@@ -10024,11 +11225,11 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
}
-
+
// 2. Process this allocation.
// There is allocation with suballoc.offset, suballoc.size.
PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
-
+
// 3. Prepare for next iteration.
lastOffset = suballoc.offset + suballoc.size;
--nextAlloc2ndIndex;
@@ -10133,7 +11334,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
// Check next suballocations from 2nd for BufferImageGranularity conflicts.
// Make bigger alignment if necessary.
- if(bufferImageGranularity > 1 && !suballocations2nd.empty())
+ if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
{
bool bufferImageGranularityConflict = false;
for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
@@ -10238,7 +11439,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
// Check previous suballocations for BufferImageGranularity conflicts.
// Make bigger alignment if necessary.
- if(bufferImageGranularity > 1 && !suballocations1st.empty())
+ if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
{
bool bufferImageGranularityConflict = false;
for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
@@ -10270,7 +11471,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
{
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, allocation cannot be made here.
- if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+ if((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
{
for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
{
@@ -10328,7 +11529,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
// Check previous suballocations for BufferImageGranularity conflicts.
// Make bigger alignment if necessary.
- if(bufferImageGranularity > 1 && !suballocations2nd.empty())
+ if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
{
bool bufferImageGranularityConflict = false;
for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
@@ -10386,7 +11587,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, we must mark more allocations lost or fail.
- if(bufferImageGranularity > 1)
+ if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
{
while(index1st < suballocations1st.size())
{
@@ -10432,7 +11633,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
{
// Check next suballocations for BufferImageGranularity conflicts.
// If conflict exists, allocation cannot be made here.
- if(bufferImageGranularity > 1)
+ if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
{
for(size_t nextSuballocIndex = index1st;
nextSuballocIndex < suballocations1st.size();
@@ -10480,7 +11681,7 @@ bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
}
VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
-
+
// We always start from 1st.
SuballocationVectorType* suballocations = &AccessSuballocations1st();
size_t index = m_1stNullItemsBeginCount;
@@ -10528,15 +11729,15 @@ bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
}
CleanupAfterFree();
- //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
-
+ //VMA_HEAVY_ASSERT(Validate()); // Already called by CleanupAfterFree().
+
return true;
}
uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
{
uint32_t lostAllocationCount = 0;
-
+
SuballocationVectorType& suballocations1st = AccessSuballocations1st();
for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
{
@@ -10960,7 +12161,7 @@ bool VmaBlockMetadata_Buddy::Validate() const
node = node->free.next)
{
VMA_VALIDATE(node->type == Node::TYPE_FREE);
-
+
if(node->free.next == VMA_NULL)
{
VMA_VALIDATE(m_FreeList[level].back == node);
@@ -11146,7 +12347,7 @@ void VmaBlockMetadata_Buddy::Alloc(
const uint32_t targetLevel = AllocSizeToLevel(allocSize);
uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
-
+
Node* currNode = m_FreeList[currLevel].front;
VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
while(currNode->offset != request.offset)
@@ -11154,14 +12355,14 @@ void VmaBlockMetadata_Buddy::Alloc(
currNode = currNode->free.next;
VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
}
-
+
// Go down, splitting free nodes.
while(currLevel < targetLevel)
{
// currNode is already first free node at currLevel.
// Remove it from list of free nodes at this currLevel.
RemoveFromFreeList(currLevel, currNode);
-
+
const uint32_t childrenLevel = currLevel + 1;
// Create two free sub-nodes.
@@ -11323,7 +12524,7 @@ void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offs
vma_delete(GetAllocationCallbacks(), node->buddy);
vma_delete(GetAllocationCallbacks(), node);
parent->type = Node::TYPE_FREE;
-
+
node = parent;
--level;
//m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
@@ -11437,7 +12638,7 @@ void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, con
PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
break;
case Node::TYPE_ALLOCATION:
- {
+ {
PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
if(allocSize < levelNodeSize)
@@ -11465,18 +12666,13 @@ void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, con
////////////////////////////////////////////////////////////////////////////////
// class VmaDeviceMemoryBlock
-VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
- m_pMetadata(VMA_NULL),
- m_MemoryTypeIndex(UINT32_MAX),
- m_Id(0),
- m_hMemory(VK_NULL_HANDLE),
- m_MapCount(0),
- m_pMappedData(VMA_NULL)
+VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
{
}
void VmaDeviceMemoryBlock::Init(
VmaAllocator hAllocator,
+ VmaBlockVector* parentBlockVector,
VmaPool hParentPool,
uint32_t newMemoryTypeIndex,
VkDeviceMemory newMemory,
@@ -11484,8 +12680,10 @@ void VmaDeviceMemoryBlock::Init(
uint32_t id,
uint32_t algorithm)
{
+ VMA_ASSERT(parentBlockVector != VMA_NULL);
VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+ m_ParentBlockVector = parentBlockVector;
m_hParentPool = hParentPool;
m_MemoryTypeIndex = newMemoryTypeIndex;
m_Id = id;
@@ -11526,7 +12724,7 @@ bool VmaDeviceMemoryBlock::Validate() const
{
VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
(m_pMetadata->GetSize() != 0));
-
+
return m_pMetadata->Validate();
}
@@ -11731,7 +12929,10 @@ VmaPool_T::VmaPool_T(
(createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
createInfo.frameInUseCount,
createInfo.blockSize != 0, // explicitBlockSize
- createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
+ createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
+ createInfo.priority,
+ VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
+ createInfo.pMemoryAllocateNext),
m_Id(0),
m_Name(VMA_NULL)
{
@@ -11739,13 +12940,14 @@ VmaPool_T::VmaPool_T(
VmaPool_T::~VmaPool_T()
{
+ VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
}
void VmaPool_T::SetName(const char* pName)
{
const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
VmaFreeString(allocs, m_Name);
-
+
if(pName != VMA_NULL)
{
m_Name = VmaCreateStringCopy(allocs, pName);
@@ -11770,7 +12972,10 @@ VmaBlockVector::VmaBlockVector(
VkDeviceSize bufferImageGranularity,
uint32_t frameInUseCount,
bool explicitBlockSize,
- uint32_t algorithm) :
+ uint32_t algorithm,
+ float priority,
+ VkDeviceSize minAllocationAlignment,
+ void* pMemoryAllocateNext) :
m_hAllocator(hAllocator),
m_hParentPool(hParentPool),
m_MemoryTypeIndex(memoryTypeIndex),
@@ -11781,6 +12986,9 @@ VmaBlockVector::VmaBlockVector(
m_FrameInUseCount(frameInUseCount),
m_ExplicitBlockSize(explicitBlockSize),
m_Algorithm(algorithm),
+ m_Priority(priority),
+ m_MinAllocationAlignment(minAllocationAlignment),
+ m_pMemoryAllocateNext(pMemoryAllocateNext),
m_HasEmptyBlock(false),
m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
m_NextBlockId(0)
@@ -11860,6 +13068,8 @@ VkResult VmaBlockVector::Allocate(
size_t allocIndex;
VkResult res = VK_SUCCESS;
+ alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
+
if(IsCorruptionDetectionEnabled())
{
size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
@@ -11887,9 +13097,13 @@ VkResult VmaBlockVector::Allocate(
if(res != VK_SUCCESS)
{
// Free all already created allocations.
+ const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
while(allocIndex--)
{
- Free(pAllocations[allocIndex]);
+ VmaAllocation_T* const alloc = pAllocations[allocIndex];
+ const VkDeviceSize allocSize = alloc->GetSize();
+ Free(alloc);
+ m_hAllocator->m_Budget.RemoveAllocation(heapIndex, allocSize);
}
memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
}
@@ -11909,8 +13123,7 @@ VkResult VmaBlockVector::AllocatePage(
bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
-
- const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
+
VkDeviceSize freeMemory;
{
const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
@@ -11918,7 +13131,7 @@ VkResult VmaBlockVector::AllocatePage(
m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
}
-
+
const bool canFallbackToDedicated = !IsCustomPool();
const bool canCreateNewBlock =
((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
@@ -12229,8 +13442,7 @@ VkResult VmaBlockVector::AllocatePage(
&bestRequest))
{
// Allocate from this pBlock.
- *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
- (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
+ *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
UpdateHasEmptyBlock();
(*pAllocation)->InitBlockAllocation(
@@ -12280,6 +13492,8 @@ VkResult VmaBlockVector::AllocatePage(
void VmaBlockVector::Free(
const VmaAllocation hAllocation)
{
+ VMA_ASSERT(hAllocation->GetBlock()->GetParentBlockVector() == this);
+
VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
bool budgetExceeded = false;
@@ -12433,9 +13647,8 @@ VkResult VmaBlockVector::AllocateFromBlock(
return res;
}
}
-
- *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
- (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
+
+ *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
UpdateHasEmptyBlock();
(*pAllocation)->InitBlockAllocation(
@@ -12467,8 +13680,39 @@ VkResult VmaBlockVector::AllocateFromBlock(
VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
{
VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ allocInfo.pNext = m_pMemoryAllocateNext;
allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
allocInfo.allocationSize = blockSize;
+
+#if VMA_BUFFER_DEVICE_ADDRESS
+ // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
+ VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+ if(m_hAllocator->m_UseKhrBufferDeviceAddress)
+ {
+ allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+ VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+ }
+#endif // #if VMA_BUFFER_DEVICE_ADDRESS
+
+#if VMA_MEMORY_PRIORITY
+ VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
+ if(m_hAllocator->m_UseExtMemoryPriority)
+ {
+ priorityInfo.priority = m_Priority;
+ VmaPnextChainPushFront(&allocInfo, &priorityInfo);
+ }
+#endif // #if VMA_MEMORY_PRIORITY
+
+#if VMA_EXTERNAL_MEMORY
+ // Attach VkExportMemoryAllocateInfoKHR if necessary.
+ VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
+ exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
+ if(exportMemoryAllocInfo.handleTypes != 0)
+ {
+ VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
+ }
+#endif // #if VMA_EXTERNAL_MEMORY
+
VkDeviceMemory mem = VK_NULL_HANDLE;
VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
if(res < 0)
@@ -12482,6 +13726,7 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn
VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
pBlock->Init(
m_hAllocator,
+ this, // parentBlockVector
m_hParentPool,
m_MemoryTypeIndex,
mem,
@@ -12619,7 +13864,7 @@ void VmaBlockVector::ApplyDefragmentationMovesCpu(
void VmaBlockVector::ApplyDefragmentationMovesGpu(
class VmaBlockVectorDefragmentationContext* pDefragCtx,
- const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
+ VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkCommandBuffer commandBuffer)
{
const size_t blockCount = m_Blocks.size();
@@ -12632,8 +13877,13 @@ void VmaBlockVector::ApplyDefragmentationMovesGpu(
for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
{
const VmaDefragmentationMove& move = moves[moveIndex];
- pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
- pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+
+ //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
+ {
+ // Old school move still require us to map the whole block
+ pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+ pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
+ }
}
VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
@@ -12807,13 +14057,13 @@ void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
void VmaBlockVector::Defragment(
class VmaBlockVectorDefragmentationContext* pCtx,
- VmaDefragmentationStats* pStats,
+ VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags,
VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
VkCommandBuffer commandBuffer)
{
pCtx->res = VK_SUCCESS;
-
+
const VkMemoryPropertyFlags memPropFlags =
m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
@@ -12844,19 +14094,28 @@ void VmaBlockVector::Defragment(
if(m_hAllocator->m_UseMutex)
{
- m_Mutex.LockWrite();
- pCtx->mutexLocked = true;
+ if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)
+ {
+ if(!m_Mutex.TryLockWrite())
+ {
+ pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
+ return;
+ }
+ }
+ else
+ {
+ m_Mutex.LockWrite();
+ pCtx->mutexLocked = true;
+ }
}
- pCtx->Begin(overlappingMoveSupported);
+ pCtx->Begin(overlappingMoveSupported, flags);
// Defragment.
const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
- VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
- VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
- pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
+ pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
// Accumulate statistics.
if(pStats != VMA_NULL)
@@ -12878,16 +14137,27 @@ void VmaBlockVector::Defragment(
maxCpuAllocationsToMove -= allocationsMoved;
}
}
-
+
+ if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)
+ {
+ if(m_hAllocator->m_UseMutex)
+ m_Mutex.UnlockWrite();
+
+ if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
+ pCtx->res = VK_NOT_READY;
+
+ return;
+ }
+
if(pCtx->res >= VK_SUCCESS)
{
if(defragmentOnGpu)
{
- ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
+ ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
}
else
{
- ApplyDefragmentationMovesCpu(pCtx, moves);
+ ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
}
}
}
@@ -12895,22 +14165,36 @@ void VmaBlockVector::Defragment(
void VmaBlockVector::DefragmentationEnd(
class VmaBlockVectorDefragmentationContext* pCtx,
+ uint32_t flags,
VmaDefragmentationStats* pStats)
{
- // Destroy buffers.
- for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
+ if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
{
- VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
- if(blockCtx.hBuffer)
- {
- (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
- m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
- }
+ VMA_ASSERT(pCtx->mutexLocked == false);
+
+ // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
+ // lock protecting us. Since we mutate state here, we have to take the lock out now
+ m_Mutex.LockWrite();
+ pCtx->mutexLocked = true;
}
- if(pCtx->res >= VK_SUCCESS)
+ // If the mutex isn't locked we didn't do any work and there is nothing to delete.
+ if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
{
- FreeEmptyBlocks(pStats);
+ // Destroy buffers.
+ for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
+ {
+ VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
+ if(blockCtx.hBuffer)
+ {
+ (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
+ }
+ }
+
+ if(pCtx->res >= VK_SUCCESS)
+ {
+ FreeEmptyBlocks(pStats);
+ }
}
if(pCtx->mutexLocked)
@@ -12920,6 +14204,48 @@ void VmaBlockVector::DefragmentationEnd(
}
}
+uint32_t VmaBlockVector::ProcessDefragmentations(
+ class VmaBlockVectorDefragmentationContext *pCtx,
+ VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
+{
+ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+ const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
+
+ for(uint32_t i = 0; i < moveCount; ++ i)
+ {
+ VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
+
+ pMove->allocation = move.hAllocation;
+ pMove->memory = move.pDstBlock->GetDeviceMemory();
+ pMove->offset = move.dstOffset;
+
+ ++ pMove;
+ }
+
+ pCtx->defragmentationMovesProcessed += moveCount;
+
+ return moveCount;
+}
+
+void VmaBlockVector::CommitDefragmentations(
+ class VmaBlockVectorDefragmentationContext *pCtx,
+ VmaDefragmentationStats* pStats)
+{
+ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+
+ for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
+ {
+ const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
+
+ move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
+ move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
+ }
+
+ pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
+ FreeEmptyBlocks(pStats);
+}
+
size_t VmaBlockVector::CalcAllocationCount() const
{
size_t result = 0;
@@ -13070,7 +14396,8 @@ void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, Vk
VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkDeviceSize maxBytesToMove,
- uint32_t maxAllocationsToMove)
+ uint32_t maxAllocationsToMove,
+ bool freeOldAllocations)
{
if(m_Blocks.empty())
{
@@ -13125,7 +14452,7 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
}
}
-
+
BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
@@ -13162,12 +14489,16 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
return VK_SUCCESS;
}
- VmaDefragmentationMove move;
+ VmaDefragmentationMove move = {};
move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
move.srcOffset = srcOffset;
move.dstOffset = dstAllocRequest.offset;
move.size = size;
+ move.hAllocation = allocInfo.m_hAllocation;
+ move.pSrcBlock = pSrcBlockInfo->m_pBlock;
+ move.pDstBlock = pDstBlockInfo->m_pBlock;
+
moves.push_back(move);
pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
@@ -13175,9 +14506,12 @@ VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
suballocType,
size,
allocInfo.m_hAllocation);
- pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
-
- allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
+
+ if(freeOldAllocations)
+ {
+ pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
+ allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
+ }
if(allocInfo.m_pChanged != VMA_NULL)
{
@@ -13230,7 +14564,8 @@ size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() cons
VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkDeviceSize maxBytesToMove,
- uint32_t maxAllocationsToMove)
+ uint32_t maxAllocationsToMove,
+ VmaDefragmentationFlags flags)
{
if(!m_AllAllocations && m_AllocationCount == 0)
{
@@ -13258,7 +14593,7 @@ VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
}
pBlockInfo->CalcHasNonMovableAllocations();
-
+
// This is a choice based on research.
// Option 1:
pBlockInfo->SortAllocationsByOffsetDescending();
@@ -13276,7 +14611,7 @@ VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
VkResult result = VK_SUCCESS;
for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
{
- result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
+ result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
}
return result;
@@ -13328,7 +14663,8 @@ VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
VkDeviceSize maxBytesToMove,
- uint32_t maxAllocationsToMove)
+ uint32_t maxAllocationsToMove,
+ VmaDefragmentationFlags flags)
{
VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
@@ -13384,6 +14720,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
}
const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
+ VmaDefragmentationMove move = {};
// Try to place it in one of free spaces from the database.
size_t freeSpaceInfoIndex;
VkDeviceSize dstAllocOffset;
@@ -13406,7 +14743,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
suballoc.hAllocation->ChangeOffset(dstAllocOffset);
m_BytesMoved += srcAllocSize;
++m_AllocationsMoved;
-
+
VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
++nextSuballocIt;
pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
@@ -13414,10 +14751,12 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
InsertSuballoc(pFreeSpaceMetadata, suballoc);
- VmaDefragmentationMove move = {
- srcOrigBlockIndex, freeSpaceOrigBlockIndex,
- srcAllocOffset, dstAllocOffset,
- srcAllocSize };
+ move.srcBlockIndex = srcOrigBlockIndex;
+ move.dstBlockIndex = freeSpaceOrigBlockIndex;
+ move.srcOffset = srcAllocOffset;
+ move.dstOffset = dstAllocOffset;
+ move.size = srcAllocSize;
+
moves.push_back(move);
}
// Different block
@@ -13440,10 +14779,12 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
InsertSuballoc(pFreeSpaceMetadata, suballoc);
- VmaDefragmentationMove move = {
- srcOrigBlockIndex, freeSpaceOrigBlockIndex,
- srcAllocOffset, dstAllocOffset,
- srcAllocSize };
+ move.srcBlockIndex = srcOrigBlockIndex;
+ move.dstBlockIndex = freeSpaceOrigBlockIndex;
+ move.srcOffset = srcAllocOffset;
+ move.dstOffset = dstAllocOffset;
+ move.size = srcAllocSize;
+
moves.push_back(move);
}
}
@@ -13498,10 +14839,13 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
m_BytesMoved += srcAllocSize;
++m_AllocationsMoved;
++srcSuballocIt;
- VmaDefragmentationMove move = {
- srcOrigBlockIndex, dstOrigBlockIndex,
- srcAllocOffset, dstAllocOffset,
- srcAllocSize };
+
+ move.srcBlockIndex = srcOrigBlockIndex;
+ move.dstBlockIndex = dstOrigBlockIndex;
+ move.srcOffset = srcAllocOffset;
+ move.dstOffset = dstAllocOffset;
+ move.size = srcAllocSize;
+
moves.push_back(move);
}
}
@@ -13527,10 +14871,12 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
pDstMetadata->m_Suballocations.push_back(suballoc);
- VmaDefragmentationMove move = {
- srcOrigBlockIndex, dstOrigBlockIndex,
- srcAllocOffset, dstAllocOffset,
- srcAllocSize };
+ move.srcBlockIndex = srcOrigBlockIndex;
+ move.dstBlockIndex = dstOrigBlockIndex;
+ move.srcOffset = srcAllocOffset;
+ move.dstOffset = dstAllocOffset;
+ move.size = srcAllocSize;
+
moves.push_back(move);
}
}
@@ -13538,7 +14884,7 @@ VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
}
m_BlockInfos.clear();
-
+
PostprocessMetadata();
return VK_SUCCESS;
@@ -13580,7 +14926,7 @@ void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
VmaBlockMetadata_Generic* const pMetadata =
(VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
const VkDeviceSize blockSize = pMetadata->GetSize();
-
+
// No allocations in this block - entire area is free.
if(pMetadata->m_Suballocations.empty())
{
@@ -13680,6 +15026,10 @@ VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
res(VK_SUCCESS),
mutexLocked(false),
blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
+ defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
+ defragmentationMovesProcessed(0),
+ defragmentationMovesCommitted(0),
+ hasDefragmentationPlan(0),
m_hAllocator(hAllocator),
m_hCustomPool(hCustomPool),
m_pBlockVector(pBlockVector),
@@ -13701,7 +15051,7 @@ void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, V
m_Allocations.push_back(info);
}
-void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
+void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
{
const bool allAllocations = m_AllAllocations ||
m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
@@ -13715,10 +15065,12 @@ void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
- VMA_DEBUG_MARGIN is 0.
- All allocations in this block vector are moveable.
- There is no possibility of image/buffer granularity conflict.
+ - The defragmentation is not incremental
*/
if(VMA_DEBUG_MARGIN == 0 &&
allAllocations &&
- !m_pBlockVector->IsBufferImageGranularityConflictPossible())
+ !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
+ !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL))
{
m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
@@ -13764,7 +15116,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
for(size_t i = m_CustomPoolContexts.size(); i--; )
{
VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
- pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
+ pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
vma_delete(m_hAllocator, pBlockVectorCtx);
}
for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
@@ -13772,13 +15124,13 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
if(pBlockVectorCtx)
{
- pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
+ pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
vma_delete(m_hAllocator, pBlockVectorCtx);
}
}
}
-void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
+void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
{
for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
{
@@ -13788,7 +15140,7 @@ void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
if(pool->m_BlockVector.GetAlgorithm() == 0)
{
VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
+
for(size_t i = m_CustomPoolContexts.size(); i--; )
{
if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
@@ -13797,7 +15149,7 @@ void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
break;
}
}
-
+
if(!pBlockVectorDefragCtx)
{
pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
@@ -13815,7 +15167,7 @@ void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
void VmaDefragmentationContext_T::AddAllocations(
uint32_t allocationCount,
- VmaAllocation* pAllocations,
+ const VmaAllocation* pAllocations,
VkBool32* pAllocationsChanged)
{
// Dispatch pAllocations among defragmentators. Create them when necessary.
@@ -13885,13 +15237,30 @@ void VmaDefragmentationContext_T::AddAllocations(
VkResult VmaDefragmentationContext_T::Defragment(
VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
- VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
+ VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
{
if(pStats)
{
memset(pStats, 0, sizeof(VmaDefragmentationStats));
}
+ if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)
+ {
+ // For incremental defragmetnations, we just earmark how much we can move
+ // The real meat is in the defragmentation steps
+ m_MaxCpuBytesToMove = maxCpuBytesToMove;
+ m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
+
+ m_MaxGpuBytesToMove = maxGpuBytesToMove;
+ m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
+
+ if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
+ m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
+ return VK_SUCCESS;
+
+ return VK_NOT_READY;
+ }
+
if(commandBuffer == VK_NULL_HANDLE)
{
maxGpuBytesToMove = 0;
@@ -13911,7 +15280,7 @@ VkResult VmaDefragmentationContext_T::Defragment(
VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
pBlockVectorCtx->GetBlockVector()->Defragment(
pBlockVectorCtx,
- pStats,
+ pStats, flags,
maxCpuBytesToMove, maxCpuAllocationsToMove,
maxGpuBytesToMove, maxGpuAllocationsToMove,
commandBuffer);
@@ -13931,7 +15300,7 @@ VkResult VmaDefragmentationContext_T::Defragment(
VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
pBlockVectorCtx->GetBlockVector()->Defragment(
pBlockVectorCtx,
- pStats,
+ pStats, flags,
maxCpuBytesToMove, maxCpuAllocationsToMove,
maxGpuBytesToMove, maxGpuAllocationsToMove,
commandBuffer);
@@ -13944,6 +15313,132 @@ VkResult VmaDefragmentationContext_T::Defragment(
return res;
}
+VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
+{
+ VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
+ uint32_t movesLeft = pInfo->moveCount;
+
+ // Process default pools.
+ for(uint32_t memTypeIndex = 0;
+ memTypeIndex < m_hAllocator->GetMemoryTypeCount();
+ ++memTypeIndex)
+ {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+ if(pBlockVectorCtx)
+ {
+ VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
+ if(!pBlockVectorCtx->hasDefragmentationPlan)
+ {
+ pBlockVectorCtx->GetBlockVector()->Defragment(
+ pBlockVectorCtx,
+ m_pStats, m_Flags,
+ m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
+ m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
+ VK_NULL_HANDLE);
+
+ if(pBlockVectorCtx->res < VK_SUCCESS)
+ continue;
+
+ pBlockVectorCtx->hasDefragmentationPlan = true;
+ }
+
+ const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
+ pBlockVectorCtx,
+ pCurrentMove, movesLeft);
+
+ movesLeft -= processed;
+ pCurrentMove += processed;
+ }
+ }
+
+ // Process custom pools.
+ for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+ customCtxIndex < customCtxCount;
+ ++customCtxIndex)
+ {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+ VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+
+ if(!pBlockVectorCtx->hasDefragmentationPlan)
+ {
+ pBlockVectorCtx->GetBlockVector()->Defragment(
+ pBlockVectorCtx,
+ m_pStats, m_Flags,
+ m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
+ m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
+ VK_NULL_HANDLE);
+
+ if(pBlockVectorCtx->res < VK_SUCCESS)
+ continue;
+
+ pBlockVectorCtx->hasDefragmentationPlan = true;
+ }
+
+ const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
+ pBlockVectorCtx,
+ pCurrentMove, movesLeft);
+
+ movesLeft -= processed;
+ pCurrentMove += processed;
+ }
+
+ pInfo->moveCount = pInfo->moveCount - movesLeft;
+
+ return VK_SUCCESS;
+}
+VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
+{
+ VkResult res = VK_SUCCESS;
+
+ // Process default pools.
+ for(uint32_t memTypeIndex = 0;
+ memTypeIndex < m_hAllocator->GetMemoryTypeCount();
+ ++memTypeIndex)
+ {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
+ if(pBlockVectorCtx)
+ {
+ VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
+
+ if(!pBlockVectorCtx->hasDefragmentationPlan)
+ {
+ res = VK_NOT_READY;
+ continue;
+ }
+
+ pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
+ pBlockVectorCtx, m_pStats);
+
+ if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
+ res = VK_NOT_READY;
+ }
+ }
+
+ // Process custom pools.
+ for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
+ customCtxIndex < customCtxCount;
+ ++customCtxIndex)
+ {
+ VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
+ VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
+
+ if(!pBlockVectorCtx->hasDefragmentationPlan)
+ {
+ res = VK_NOT_READY;
+ continue;
+ }
+
+ pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
+ pBlockVectorCtx, m_pStats);
+
+ if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
+ res = VK_NOT_READY;
+ }
+
+ return res;
+}
+
////////////////////////////////////////////////////////////////////////////////
// VmaRecorder
@@ -13953,8 +15448,7 @@ VmaRecorder::VmaRecorder() :
m_UseMutex(true),
m_Flags(0),
m_File(VMA_NULL),
- m_Freq(INT64_MAX),
- m_StartCounter(INT64_MAX)
+ m_RecordingStartTime(std::chrono::high_resolution_clock::now())
{
}
@@ -13963,15 +15457,23 @@ VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
m_UseMutex = useMutex;
m_Flags = settings.flags;
- QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
- QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
-
+#if defined(_WIN32)
// Open file for writing.
errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
+
if(err != 0)
{
return VK_ERROR_INITIALIZATION_FAILED;
}
+#else
+ // Open file for writing.
+ m_File = fopen(settings.pFilePath, "wb");
+
+ if(m_File == 0)
+ {
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+#endif
// Write header.
fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
@@ -14431,7 +15933,8 @@ VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags,
}
else
{
- sprintf_s(m_PtrStr, "%p", pUserData);
+ // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
+ snprintf(m_PtrStr, 17, "%p", pUserData);
m_Str = m_PtrStr;
}
}
@@ -14447,7 +15950,8 @@ void VmaRecorder::WriteConfiguration(
uint32_t vulkanApiVersion,
bool dedicatedAllocationExtensionEnabled,
bool bindMemory2ExtensionEnabled,
- bool memoryBudgetExtensionEnabled)
+ bool memoryBudgetExtensionEnabled,
+ bool deviceCoherentMemoryExtensionEnabled)
{
fprintf(m_File, "Config,Begin\n");
@@ -14480,9 +15984,10 @@ void VmaRecorder::WriteConfiguration(
fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
+ fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
- fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
+ fprintf(m_File, "Macro,VMA_MIN_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_MIN_ALIGNMENT);
fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
@@ -14496,11 +16001,22 @@ void VmaRecorder::WriteConfiguration(
void VmaRecorder::GetBasicParams(CallParams& outParams)
{
- outParams.threadId = GetCurrentThreadId();
+ #if defined(_WIN32)
+ outParams.threadId = GetCurrentThreadId();
+ #else
+ // Use C++11 features to get thread id and convert it to uint32_t.
+ // There is room for optimization since sstream is quite slow.
+ // Is there a better way to convert std::this_thread::get_id() to uint32_t?
+ std::thread::id thread_id = std::this_thread::get_id();
+ std::stringstream thread_id_to_string_converter;
+ thread_id_to_string_converter << thread_id;
+ std::string thread_id_as_string = thread_id_to_string_converter.str();
+ outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
+ #endif
+
+ auto current_time = std::chrono::high_resolution_clock::now();
- LARGE_INTEGER counter;
- QueryPerformanceCounter(&counter);
- outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
+ outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
}
void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
@@ -14533,10 +16049,10 @@ VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCal
{
}
-VmaAllocation VmaAllocationObjectAllocator::Allocate()
+template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
{
VmaMutexLock mutexLock(m_Mutex);
- return m_Allocator.Alloc();
+ return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
}
void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
@@ -14554,6 +16070,9 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
+ m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
+ m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
+ m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
m_hDevice(pCreateInfo->device),
m_hInstance(pCreateInfo->instance),
m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
@@ -14561,12 +16080,13 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
*pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
m_AllocationObjectAllocator(&m_AllocationCallbacks),
m_HeapSizeLimitMask(0),
+ m_DeviceMemoryCount(0),
m_PreferredLargeHeapBlockSize(0),
m_PhysicalDevice(pCreateInfo->physicalDevice),
m_CurrentFrameIndex(0),
m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
- m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
- m_NextPoolId(0)
+ m_NextPoolId(0),
+ m_GlobalMemoryTypeBits(UINT32_MAX)
#if VMA_RECORDING_ENABLED
,m_pRecorder(VMA_NULL)
#endif
@@ -14583,7 +16103,7 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
}
- VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
+ VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
{
@@ -14606,23 +16126,46 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
}
#endif
+#if !(VMA_BUFFER_DEVICE_ADDRESS)
+ if(m_UseKhrBufferDeviceAddress)
+ {
+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
+ }
+#endif
+#if VMA_VULKAN_VERSION < 1002000
+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
+ {
+ VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
+ }
+#endif
#if VMA_VULKAN_VERSION < 1001000
if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
{
VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
}
#endif
+#if !(VMA_MEMORY_PRIORITY)
+ if(m_UseExtMemoryPriority)
+ {
+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
+ }
+#endif
memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
memset(&m_MemProps, 0, sizeof(m_MemProps));
-
+
memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
- memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
+ memset(&m_pSmallBufferBlockVectors, 0, sizeof(m_pSmallBufferBlockVectors));
memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
+#if VMA_EXTERNAL_MEMORY
+ memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
+#endif // #if VMA_EXTERNAL_MEMORY
+
if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
{
+ m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
}
@@ -14632,7 +16175,7 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
(*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
(*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
- VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
+ VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
@@ -14640,6 +16183,16 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+ m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
+
+#if VMA_EXTERNAL_MEMORY
+ if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
+ {
+ memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
+ sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
+ }
+#endif // #if VMA_EXTERNAL_MEMORY
+
if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
{
for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
@@ -14670,11 +16223,26 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
GetBufferImageGranularity(),
pCreateInfo->frameInUseCount,
false, // explicitBlockSize
- false); // linearAlgorithm
+ false, // linearAlgorithm
+ 0.5f, // priority (0.5 is the default per Vulkan spec)
+ GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
+ VMA_NULL); // // pMemoryAllocateNext
+ m_pSmallBufferBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
+ this,
+ VK_NULL_HANDLE, // hParentPool
+ memTypeIndex,
+ preferredBlockSize,
+ 0,
+ SIZE_MAX,
+ 1, // bufferImageGranularity forced to 1 !!!
+ pCreateInfo->frameInUseCount,
+ false, // explicitBlockSize
+ false, // linearAlgorithm
+ 0.5f, // priority (0.5 is the default per Vulkan spec)
+ GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
+ VMA_NULL); // // pMemoryAllocateNext
// No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
// becase minBlockCount is 0.
- m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
-
}
}
@@ -14698,7 +16266,8 @@ VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
m_VulkanApiVersion,
m_UseKhrDedicatedAllocation,
m_UseKhrBindMemory2,
- m_UseExtMemoryBudget);
+ m_UseExtMemoryBudget,
+ m_UseAmdDeviceCoherentMemory);
m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
#else
VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
@@ -14725,24 +16294,44 @@ VmaAllocator_T::~VmaAllocator_T()
vma_delete(this, m_pRecorder);
}
#endif
-
- VMA_ASSERT(m_Pools.empty());
- for(size_t i = GetMemoryTypeCount(); i--; )
+ VMA_ASSERT(m_Pools.IsEmpty());
+
+ for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
{
- if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
+ if(!m_DedicatedAllocations[memTypeIndex].IsEmpty())
{
VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
}
- vma_delete(this, m_pDedicatedAllocations[i]);
- vma_delete(this, m_pBlockVectors[i]);
+ vma_delete(this, m_pSmallBufferBlockVectors[memTypeIndex]);
+ vma_delete(this, m_pBlockVectors[memTypeIndex]);
}
}
void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
{
#if VMA_STATIC_VULKAN_FUNCTIONS == 1
+ ImportVulkanFunctions_Static();
+#endif
+
+ if(pVulkanFunctions != VMA_NULL)
+ {
+ ImportVulkanFunctions_Custom(pVulkanFunctions);
+ }
+
+#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+ ImportVulkanFunctions_Dynamic();
+#endif
+
+ ValidateVulkanFunctions();
+}
+
+#if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
+void VmaAllocator_T::ImportVulkanFunctions_Static()
+{
+ // Vulkan 1.0
m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
@@ -14760,89 +16349,137 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc
m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
+
+ // Vulkan 1.1
#if VMA_VULKAN_VERSION >= 1001000
if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
{
- VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
- m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
- (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
- m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
- (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
- m_VulkanFunctions.vkBindBufferMemory2KHR =
- (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
- m_VulkanFunctions.vkBindImageMemory2KHR =
- (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
- m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
- (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
+ m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
+ m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
+ m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
+ m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
+ m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
}
#endif
+}
+
+#endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
+
+void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
+{
+ VMA_ASSERT(pVulkanFunctions != VMA_NULL);
+
+#define VMA_COPY_IF_NOT_NULL(funcName) \
+ if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
+
+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
+ VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
+ VMA_COPY_IF_NOT_NULL(vkFreeMemory);
+ VMA_COPY_IF_NOT_NULL(vkMapMemory);
+ VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
+ VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
+ VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
+ VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
+ VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
+ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
+ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
+ VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
+ VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
+ VMA_COPY_IF_NOT_NULL(vkCreateImage);
+ VMA_COPY_IF_NOT_NULL(vkDestroyImage);
+ VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
+
+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
+ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
+#endif
+
+#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+ VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
+ VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
+#endif
+
+#if VMA_MEMORY_BUDGET
+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
+#endif
+
+#undef VMA_COPY_IF_NOT_NULL
+}
+
+#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+
+void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
+{
+#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
+ if(m_VulkanFunctions.memberName == VMA_NULL) \
+ m_VulkanFunctions.memberName = \
+ (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
+#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
+ if(m_VulkanFunctions.memberName == VMA_NULL) \
+ m_VulkanFunctions.memberName = \
+ (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
+
+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
+ VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
+ VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
+ VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
+ VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
+ VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
+ VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
+ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
+ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
+ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
+ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
+ VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
+ VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
+ VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
+ VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
+ VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
+
+#if VMA_VULKAN_VERSION >= 1001000
+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+ {
+ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
+ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
+ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
+ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
+ }
+#endif
+
#if VMA_DEDICATED_ALLOCATION
if(m_UseKhrDedicatedAllocation)
{
- m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
- (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
- m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
- (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
+ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
+ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
}
#endif
+
#if VMA_BIND_MEMORY2
if(m_UseKhrBindMemory2)
{
- m_VulkanFunctions.vkBindBufferMemory2KHR =
- (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
- m_VulkanFunctions.vkBindImageMemory2KHR =
- (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
+ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
+ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
}
#endif // #if VMA_BIND_MEMORY2
+
#if VMA_MEMORY_BUDGET
- if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
+ if(m_UseExtMemoryBudget)
{
- VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
- m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
- (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
}
#endif // #if VMA_MEMORY_BUDGET
-#endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
-
-#define VMA_COPY_IF_NOT_NULL(funcName) \
- if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
- if(pVulkanFunctions != VMA_NULL)
- {
- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
- VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
- VMA_COPY_IF_NOT_NULL(vkFreeMemory);
- VMA_COPY_IF_NOT_NULL(vkMapMemory);
- VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
- VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
- VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
- VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
- VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
- VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
- VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
- VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
- VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
- VMA_COPY_IF_NOT_NULL(vkCreateImage);
- VMA_COPY_IF_NOT_NULL(vkDestroyImage);
- VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
- VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
- VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
-#endif
-#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
- VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
- VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
-#endif
-#if VMA_MEMORY_BUDGET
- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
-#endif
- }
+#undef VMA_FETCH_DEVICE_FUNC
+#undef VMA_FETCH_INSTANCE_FUNC
+}
-#undef VMA_COPY_IF_NOT_NULL
+#endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
- // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
- // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
+void VmaAllocator_T::ValidateVulkanFunctions()
+{
VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
@@ -14860,6 +16497,7 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc
VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
+
#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
{
@@ -14867,6 +16505,7 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc
VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
}
#endif
+
#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
{
@@ -14874,6 +16513,7 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc
VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
}
#endif
+
#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
{
@@ -14895,6 +16535,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType(
VkDeviceSize alignment,
bool dedicatedAllocation,
VkBuffer dedicatedBuffer,
+ VkBufferUsageFlags dedicatedBufferUsage,
VkImage dedicatedImage,
const VmaAllocationCreateInfo& createInfo,
uint32_t memTypeIndex,
@@ -14919,7 +16560,8 @@ VkResult VmaAllocator_T::AllocateMemoryOfType(
finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
}
- VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
+ bool isSmallBuffer = dedicatedBuffer != VK_NULL_HANDLE && size <= 4096; // TODO
+ VmaBlockVector* const blockVector = isSmallBuffer ? m_pSmallBufferBlockVectors[memTypeIndex] : m_pBlockVectors[memTypeIndex];
VMA_ASSERT(blockVector);
const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
@@ -14952,7 +16594,9 @@ VkResult VmaAllocator_T::AllocateMemoryOfType(
(finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
(finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
finalCreateInfo.pUserData,
+ finalCreateInfo.priority,
dedicatedBuffer,
+ dedicatedBufferUsage,
dedicatedImage,
allocationCount,
pAllocations);
@@ -14978,32 +16622,40 @@ VkResult VmaAllocator_T::AllocateMemoryOfType(
{
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
+
+ // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
+ // which can quickly deplete maxMemoryAllocationCount: Don't try dedicated allocations when above
+ // 3/4 of the maximum allocation count.
+ if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
+ {
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+
+ res = AllocateDedicatedMemory(
+ size,
+ suballocType,
+ memTypeIndex,
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+ finalCreateInfo.pUserData,
+ finalCreateInfo.priority,
+ dedicatedBuffer,
+ dedicatedBufferUsage,
+ dedicatedImage,
+ allocationCount,
+ pAllocations);
+ if(res == VK_SUCCESS)
+ {
+ // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
+ VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
+ return VK_SUCCESS;
+ }
else
{
- res = AllocateDedicatedMemory(
- size,
- suballocType,
- memTypeIndex,
- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
- finalCreateInfo.pUserData,
- dedicatedBuffer,
- dedicatedImage,
- allocationCount,
- pAllocations);
- if(res == VK_SUCCESS)
- {
- // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
- VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
- return VK_SUCCESS;
- }
- else
- {
- // Everything failed: Return error code.
- VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
- return res;
- }
+ // Everything failed: Return error code.
+ VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+ return res;
}
}
}
@@ -15016,7 +16668,9 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
bool map,
bool isUserDataString,
void* pUserData,
+ float priority,
VkBuffer dedicatedBuffer,
+ VkBufferUsageFlags dedicatedBufferUsage,
VkImage dedicatedImage,
size_t allocationCount,
VmaAllocation* pAllocations)
@@ -15046,16 +16700,57 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
{
VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
dedicatedAllocInfo.buffer = dedicatedBuffer;
- allocInfo.pNext = &dedicatedAllocInfo;
+ VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
}
else if(dedicatedImage != VK_NULL_HANDLE)
{
dedicatedAllocInfo.image = dedicatedImage;
- allocInfo.pNext = &dedicatedAllocInfo;
+ VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
}
}
#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+#if VMA_BUFFER_DEVICE_ADDRESS
+ VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+ if(m_UseKhrBufferDeviceAddress)
+ {
+ bool canContainBufferWithDeviceAddress = true;
+ if(dedicatedBuffer != VK_NULL_HANDLE)
+ {
+ canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
+ (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
+ }
+ else if(dedicatedImage != VK_NULL_HANDLE)
+ {
+ canContainBufferWithDeviceAddress = false;
+ }
+ if(canContainBufferWithDeviceAddress)
+ {
+ allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+ VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+ }
+ }
+#endif // #if VMA_BUFFER_DEVICE_ADDRESS
+
+#if VMA_MEMORY_PRIORITY
+ VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
+ if(m_UseExtMemoryPriority)
+ {
+ priorityInfo.priority = priority;
+ VmaPnextChainPushFront(&allocInfo, &priorityInfo);
+ }
+#endif // #if VMA_MEMORY_PRIORITY
+
+#if VMA_EXTERNAL_MEMORY
+ // Attach VkExportMemoryAllocateInfoKHR if necessary.
+ VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
+ exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
+ if(exportMemoryAllocInfo.handleTypes != 0)
+ {
+ VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
+ }
+#endif // #if VMA_EXTERNAL_MEMORY
+
size_t allocIndex;
VkResult res = VK_SUCCESS;
for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
@@ -15077,14 +16772,13 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
if(res == VK_SUCCESS)
{
- // Register them in m_pDedicatedAllocations.
+ // Register them in m_DedicatedAllocations.
{
VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
- AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
- VMA_ASSERT(pDedicatedAllocations);
+ DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
{
- VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
+ dedicatedAllocations.PushBack(pAllocations[allocIndex]);
}
}
@@ -15097,7 +16791,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
{
VmaAllocation currAlloc = pAllocations[allocIndex];
VkDeviceMemory hMemory = currAlloc->GetMemory();
-
+
/*
There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
before vkFreeMemory.
@@ -15107,11 +16801,10 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory(
(*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
}
*/
-
+
FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
currAlloc->SetUserData(this, VMA_NULL);
- currAlloc->Dtor();
m_AllocationObjectAllocator.Free(currAlloc);
}
@@ -15157,8 +16850,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
}
}
- *pAllocation = m_AllocationObjectAllocator.Allocate();
- (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
+ *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
(*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
(*pAllocation)->SetUserData(this, pUserData);
m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
@@ -15185,7 +16877,7 @@ void VmaAllocator_T::GetBufferMemoryRequirements(
VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
- memReq2.pNext = &memDedicatedReq;
+ VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
(*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
@@ -15217,7 +16909,7 @@ void VmaAllocator_T::GetImageMemoryRequirements(
VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
- memReq2.pNext = &memDedicatedReq;
+ VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
(*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
@@ -15239,6 +16931,7 @@ VkResult VmaAllocator_T::AllocateMemory(
bool requiresDedicatedAllocation,
bool prefersDedicatedAllocation,
VkBuffer dedicatedBuffer,
+ VkBufferUsageFlags dedicatedBufferUsage,
VkImage dedicatedImage,
const VmaAllocationCreateInfo& createInfo,
VmaSuballocationType suballocType,
@@ -15287,10 +16980,6 @@ VkResult VmaAllocator_T::AllocateMemory(
if(createInfo.pool != VK_NULL_HANDLE)
{
- const VkDeviceSize alignmentForPool = VMA_MAX(
- vkMemReq.alignment,
- GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
-
VmaAllocationCreateInfo createInfoForPool = createInfo;
// If memory type is not HOST_VISIBLE, disable MAPPED.
if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
@@ -15302,7 +16991,7 @@ VkResult VmaAllocator_T::AllocateMemory(
return createInfo.pool->m_BlockVector.Allocate(
m_CurrentFrameIndex.load(),
vkMemReq.size,
- alignmentForPool,
+ vkMemReq.alignment,
createInfoForPool,
suballocType,
allocationCount,
@@ -15316,15 +17005,12 @@ VkResult VmaAllocator_T::AllocateMemory(
VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
if(res == VK_SUCCESS)
{
- VkDeviceSize alignmentForMemType = VMA_MAX(
- vkMemReq.alignment,
- GetMemoryTypeMinAlignment(memTypeIndex));
-
res = AllocateMemoryOfType(
vkMemReq.size,
- alignmentForMemType,
+ vkMemReq.alignment,
requiresDedicatedAllocation || prefersDedicatedAllocation,
dedicatedBuffer,
+ dedicatedBufferUsage,
dedicatedImage,
createInfo,
memTypeIndex,
@@ -15347,15 +17033,12 @@ VkResult VmaAllocator_T::AllocateMemory(
res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
if(res == VK_SUCCESS)
{
- alignmentForMemType = VMA_MAX(
- vkMemReq.alignment,
- GetMemoryTypeMinAlignment(memTypeIndex));
-
res = AllocateMemoryOfType(
vkMemReq.size,
- alignmentForMemType,
+ vkMemReq.alignment,
requiresDedicatedAllocation || prefersDedicatedAllocation,
dedicatedBuffer,
+ dedicatedBufferUsage,
dedicatedImage,
createInfo,
memTypeIndex,
@@ -15415,8 +17098,7 @@ void VmaAllocator_T::FreeMemory(
}
else
{
- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
- pBlockVector = m_pBlockVectors[memTypeIndex];
+ pBlockVector = allocation->GetBlock()->GetParentBlockVector();
}
pBlockVector->Free(allocation);
}
@@ -15432,28 +17114,11 @@ void VmaAllocator_T::FreeMemory(
// Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
allocation->SetUserData(this, VMA_NULL);
- allocation->Dtor();
m_AllocationObjectAllocator.Free(allocation);
}
}
}
-VkResult VmaAllocator_T::ResizeAllocation(
- const VmaAllocation alloc,
- VkDeviceSize newSize)
-{
- // This function is deprecated and so it does nothing. It's left for backward compatibility.
- if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
- {
- return VK_ERROR_VALIDATION_FAILED_EXT;
- }
- if(newSize == alloc->GetSize())
- {
- return VK_SUCCESS;
- }
- return VK_ERROR_OUT_OF_POOL_MEMORY;
-}
-
void VmaAllocator_T::CalculateStats(VmaStats* pStats)
{
// Initialize.
@@ -15462,21 +17127,25 @@ void VmaAllocator_T::CalculateStats(VmaStats* pStats)
InitStatInfo(pStats->memoryType[i]);
for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
InitStatInfo(pStats->memoryHeap[i]);
-
+
// Process default pools.
for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{
VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
VMA_ASSERT(pBlockVector);
pBlockVector->AddStats(pStats);
+
+ VmaBlockVector* const pSmallBufferBlockVector = m_pSmallBufferBlockVectors[memTypeIndex];
+ VMA_ASSERT(pSmallBufferBlockVector);
+ pSmallBufferBlockVector->AddStats(pStats);
}
// Process custom pools.
{
VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
- for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
+ for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
{
- m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
+ pool->m_BlockVector.AddStats(pStats);
}
}
@@ -15485,12 +17154,12 @@ void VmaAllocator_T::CalculateStats(VmaStats* pStats)
{
const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
- AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
- VMA_ASSERT(pDedicatedAllocVector);
- for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
+ DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
+ for(VmaAllocation alloc = dedicatedAllocList.Front();
+ alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
{
VmaStatInfo allocationStatInfo;
- (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
+ alloc->DedicatedAllocCalcStatsInfo(allocationStatInfo);
VmaAddStatInfo(pStats->total, allocationStatInfo);
VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
@@ -15579,7 +17248,7 @@ VkResult VmaAllocator_T::DefragmentationBegin(
VkResult res = (*pContext)->Defragment(
info.maxCpuBytesToMove, info.maxCpuAllocationsToMove,
info.maxGpuBytesToMove, info.maxGpuAllocationsToMove,
- info.commandBuffer, pStats);
+ info.commandBuffer, pStats, info.flags);
if(res != VK_NOT_READY)
{
@@ -15597,6 +17266,19 @@ VkResult VmaAllocator_T::DefragmentationEnd(
return VK_SUCCESS;
}
+VkResult VmaAllocator_T::DefragmentationPassBegin(
+ VmaDefragmentationPassInfo* pInfo,
+ VmaDefragmentationContext context)
+{
+ return context->DefragmentPassBegin(pInfo);
+}
+VkResult VmaAllocator_T::DefragmentationPassEnd(
+ VmaDefragmentationContext context)
+{
+ return context->DefragmentPassEnd();
+
+}
+
void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
{
if(hAllocation->CanBecomeLost())
@@ -15727,6 +17409,12 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPoo
VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
+ // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
+ if(pCreateInfo->pMemoryAllocateNext)
+ {
+ VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
+ }
+
if(newCreateInfo.maxBlockCount == 0)
{
newCreateInfo.maxBlockCount = SIZE_MAX;
@@ -15735,6 +17423,16 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPoo
{
return VK_ERROR_INITIALIZATION_FAILED;
}
+ // Memory type index out of range or forbidden.
+ if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
+ ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
+ {
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+ if(newCreateInfo.minAllocationAlignment > 0)
+ {
+ VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
+ }
const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
@@ -15752,7 +17450,7 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPoo
{
VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
(*pPool)->SetId(m_NextPoolId++);
- VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
+ m_Pools.PushBack(*pPool);
}
return VK_SUCCESS;
@@ -15763,8 +17461,7 @@ void VmaAllocator_T::DestroyPool(VmaPool pool)
// Remove from m_Pools.
{
VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
- bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
- VMA_ASSERT(success && "Pool not found in Allocator.");
+ m_Pools.Remove(pool);
}
vma_delete(this, pool);
@@ -15829,11 +17526,11 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
// Process custom pools.
{
VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
- for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
+ for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
{
- if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
+ if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
{
- VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
+ VkResult localRes = pool->m_BlockVector.CheckCorruption();
switch(localRes)
{
case VK_ERROR_FEATURE_NOT_PRESENT:
@@ -15853,13 +17550,46 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
{
- *pAllocation = m_AllocationObjectAllocator.Allocate();
- (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
+ *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
(*pAllocation)->InitLost();
}
+// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
+template<typename T>
+struct AtomicTransactionalIncrement
+{
+public:
+ typedef std::atomic<T> AtomicT;
+ ~AtomicTransactionalIncrement()
+ {
+ if(m_Atomic)
+ --(*m_Atomic);
+ }
+ T Increment(AtomicT* atomic)
+ {
+ m_Atomic = atomic;
+ return m_Atomic->fetch_add(1);
+ }
+ void Commit()
+ {
+ m_Atomic = nullptr;
+ }
+
+private:
+ AtomicT* m_Atomic = nullptr;
+};
+
VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
{
+ AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
+ const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
+#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
+ if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
+ {
+ return VK_ERROR_TOO_MANY_OBJECTS;
+ }
+#endif
+
const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
// HeapSizeLimit is in effect for this heap.
@@ -15897,8 +17627,10 @@ VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAlloc
// Informative callback.
if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
{
- (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
+ (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
}
+
+ deviceMemoryCountIncrement.Commit();
}
else
{
@@ -15913,13 +17645,15 @@ void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, Vk
// Informative callback.
if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
{
- (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
+ (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
}
// VULKAN CALL vkFreeMemory.
(*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
+
+ --m_DeviceMemoryCount;
}
VkResult VmaAllocator_T::BindVulkanBuffer(
@@ -16082,80 +17816,71 @@ VkResult VmaAllocator_T::BindImageMemory(
return res;
}
-void VmaAllocator_T::FlushOrInvalidateAllocation(
+VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
VmaAllocation hAllocation,
VkDeviceSize offset, VkDeviceSize size,
VMA_CACHE_OPERATION op)
{
- const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
- if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
- {
- const VkDeviceSize allocationSize = hAllocation->GetSize();
- VMA_ASSERT(offset <= allocationSize);
-
- const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+ VkResult res = VK_SUCCESS;
- VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
- memRange.memory = hAllocation->GetMemory();
-
- switch(hAllocation->GetType())
+ VkMappedMemoryRange memRange = {};
+ if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
+ {
+ switch(op)
{
- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
- memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
- if(size == VK_WHOLE_SIZE)
- {
- memRange.size = allocationSize - memRange.offset;
- }
- else
- {
- VMA_ASSERT(offset + size <= allocationSize);
- memRange.size = VMA_MIN(
- VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
- allocationSize - memRange.offset);
- }
+ case VMA_CACHE_FLUSH:
+ res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
break;
-
- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
- {
- // 1. Still within this allocation.
- memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
- if(size == VK_WHOLE_SIZE)
- {
- size = allocationSize - offset;
- }
- else
- {
- VMA_ASSERT(offset + size <= allocationSize);
- }
- memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
-
- // 2. Adjust to whole block.
- const VkDeviceSize allocationOffset = hAllocation->GetOffset();
- VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
- const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
- memRange.offset += allocationOffset;
- memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
-
+ case VMA_CACHE_INVALIDATE:
+ res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
break;
- }
-
default:
VMA_ASSERT(0);
}
+ }
+ // else: Just ignore this call.
+ return res;
+}
+
+VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
+ uint32_t allocationCount,
+ const VmaAllocation* allocations,
+ const VkDeviceSize* offsets, const VkDeviceSize* sizes,
+ VMA_CACHE_OPERATION op)
+{
+ typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
+ typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
+ RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
+
+ for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+ {
+ const VmaAllocation alloc = allocations[allocIndex];
+ const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
+ const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
+ VkMappedMemoryRange newRange;
+ if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
+ {
+ ranges.push_back(newRange);
+ }
+ }
+ VkResult res = VK_SUCCESS;
+ if(!ranges.empty())
+ {
switch(op)
{
case VMA_CACHE_FLUSH:
- (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
+ res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
break;
case VMA_CACHE_INVALIDATE:
- (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
+ res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
break;
default:
VMA_ASSERT(0);
}
}
// else: Just ignore this call.
+ return res;
}
void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
@@ -16165,14 +17890,12 @@ void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
{
VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
- AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
- VMA_ASSERT(pDedicatedAllocations);
- bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
- VMA_ASSERT(success);
+ DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
+ dedicatedAllocations.Remove(allocation);
}
VkDeviceMemory hMemory = allocation->GetMemory();
-
+
/*
There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
before vkFreeMemory.
@@ -16182,7 +17905,7 @@ void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
(*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
}
*/
-
+
FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
@@ -16213,6 +17936,91 @@ uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
return memoryTypeBits;
}
+uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
+{
+ // Make sure memory information is already fetched.
+ VMA_ASSERT(GetMemoryTypeCount() > 0);
+
+ uint32_t memoryTypeBits = UINT32_MAX;
+
+ if(!m_UseAmdDeviceCoherentMemory)
+ {
+ // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+ {
+ if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
+ {
+ memoryTypeBits &= ~(1u << memTypeIndex);
+ }
+ }
+ }
+
+ return memoryTypeBits;
+}
+
+bool VmaAllocator_T::GetFlushOrInvalidateRange(
+ VmaAllocation allocation,
+ VkDeviceSize offset, VkDeviceSize size,
+ VkMappedMemoryRange& outRange) const
+{
+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+ if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
+ {
+ const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+ const VkDeviceSize allocationSize = allocation->GetSize();
+ VMA_ASSERT(offset <= allocationSize);
+
+ outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+ outRange.pNext = VMA_NULL;
+ outRange.memory = allocation->GetMemory();
+
+ switch(allocation->GetType())
+ {
+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+ outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+ if(size == VK_WHOLE_SIZE)
+ {
+ outRange.size = allocationSize - outRange.offset;
+ }
+ else
+ {
+ VMA_ASSERT(offset + size <= allocationSize);
+ outRange.size = VMA_MIN(
+ VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
+ allocationSize - outRange.offset);
+ }
+ break;
+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+ {
+ // 1. Still within this allocation.
+ outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+ if(size == VK_WHOLE_SIZE)
+ {
+ size = allocationSize - offset;
+ }
+ else
+ {
+ VMA_ASSERT(offset + size <= allocationSize);
+ }
+ outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
+
+ // 2. Adjust to whole block.
+ const VkDeviceSize allocationOffset = allocation->GetOffset();
+ VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
+ const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
+ outRange.offset += allocationOffset;
+ outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
+
+ break;
+ }
+ default:
+ VMA_ASSERT(0);
+ }
+ return true;
+ }
+ return false;
+}
+
#if VMA_MEMORY_BUDGET
void VmaAllocator_T::UpdateVulkanBudget()
@@ -16222,7 +18030,7 @@ void VmaAllocator_T::UpdateVulkanBudget()
VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
- memProps.pNext = &budgetProps;
+ VmaPnextChainPushFront(&memProps, &budgetProps);
GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
@@ -16234,6 +18042,20 @@ void VmaAllocator_T::UpdateVulkanBudget()
m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
+
+ // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
+ if(m_Budget.m_VulkanBudget[heapIndex] == 0)
+ {
+ m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
+ }
+ else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
+ {
+ m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
+ }
+ if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
+ {
+ m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+ }
}
m_Budget.m_OperationsSinceBudgetFetch = 0;
}
@@ -16281,9 +18103,8 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{
VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
- AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
- VMA_ASSERT(pDedicatedAllocVector);
- if(pDedicatedAllocVector->empty() == false)
+ DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
+ if(!dedicatedAllocList.IsEmpty())
{
if(dedicatedAllocationsStarted == false)
{
@@ -16295,14 +18116,14 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
json.BeginString("Type ");
json.ContinueString(memTypeIndex);
json.EndString();
-
+
json.BeginArray();
- for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
+ for(VmaAllocation alloc = dedicatedAllocList.Front();
+ alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
{
json.BeginObject(true);
- const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
- hAlloc->PrintParameters(json);
+ alloc->PrintParameters(json);
json.EndObject();
}
@@ -16314,6 +18135,7 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
json.EndObject();
}
+ // Default pools
{
bool allocationsStarted = false;
for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
@@ -16340,21 +18162,47 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
}
}
+ // Small buffer pools
+ {
+ bool allocationsStarted = false;
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+ {
+ if(m_pSmallBufferBlockVectors[memTypeIndex]->IsEmpty() == false)
+ {
+ if(allocationsStarted == false)
+ {
+ allocationsStarted = true;
+ json.WriteString("SmallBufferPools");
+ json.BeginObject();
+ }
+
+ json.BeginString("Type ");
+ json.ContinueString(memTypeIndex);
+ json.EndString();
+
+ m_pSmallBufferBlockVectors[memTypeIndex]->PrintDetailedMap(json);
+ }
+ }
+ if(allocationsStarted)
+ {
+ json.EndObject();
+ }
+ }
+
// Custom pools
{
VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
- const size_t poolCount = m_Pools.size();
- if(poolCount > 0)
+ if(!m_Pools.IsEmpty())
{
json.WriteString("Pools");
json.BeginObject();
- for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
+ for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
{
json.BeginString();
- json.ContinueString(m_Pools[poolIndex]->GetId());
+ json.ContinueString(pool->GetId());
json.EndString();
- m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
+ pool->m_BlockVector.PrintDetailedMap(json);
}
json.EndObject();
}
@@ -16372,7 +18220,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
{
VMA_ASSERT(pCreateInfo && pAllocator);
VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
- (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
+ (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
VMA_DEBUG_LOG("vmaCreateAllocator");
*pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
return (*pAllocator)->Init(pCreateInfo);
@@ -16389,6 +18237,14 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
}
}
+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
+{
+ VMA_ASSERT(allocator && pAllocatorInfo);
+ pAllocatorInfo->instance = allocator->m_hInstance;
+ pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
+ pAllocatorInfo->device = allocator->m_hDevice;
+}
+
VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
VmaAllocator allocator,
const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
@@ -16468,7 +18324,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
json.WriteString("Total");
VmaPrintStatInfo(json, stats.total);
-
+
for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
{
json.BeginString("Heap ");
@@ -16540,6 +18396,22 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
{
json.WriteString("LAZILY_ALLOCATED");
}
+#if VMA_VULKAN_VERSION >= 1001000
+ if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
+ {
+ json.WriteString("PROTECTED");
+ }
+#endif // #if VMA_VULKAN_VERSION >= 1001000
+#if VK_AMD_device_coherent_memory
+ if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
+ {
+ json.WriteString("DEVICE_COHERENT");
+ }
+ if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
+ {
+ json.WriteString("DEVICE_UNCACHED");
+ }
+#endif // #if VK_AMD_device_coherent_memory
json.EndArray();
if(stats.memoryType[typeIndex].blockCount > 0)
@@ -16599,11 +18471,13 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+ memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
+
if(pAllocationCreateInfo->memoryTypeBits != 0)
{
memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
}
-
+
uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
uint32_t notPreferredFlags = 0;
@@ -16644,6 +18518,13 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
break;
}
+ // Avoid DEVICE_COHERENT unless explicitly requested.
+ if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
+ (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
+ {
+ notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
+ }
+
*pMemoryTypeIndex = UINT32_MAX;
uint32_t minCost = UINT32_MAX;
for(uint32_t memTypeIndex = 0, memTypeBit = 1;
@@ -16744,25 +18625,25 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
- VmaAllocator allocator,
- const VmaPoolCreateInfo* pCreateInfo,
- VmaPool* pPool)
+ VmaAllocator allocator,
+ const VmaPoolCreateInfo* pCreateInfo,
+ VmaPool* pPool)
{
VMA_ASSERT(allocator && pCreateInfo && pPool);
-
+
VMA_DEBUG_LOG("vmaCreatePool");
-
+
VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
+
VkResult res = allocator->CreatePool(pCreateInfo, pPool);
-
+
#if VMA_RECORDING_ENABLED
if(allocator->GetRecorder() != VMA_NULL)
{
allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
}
#endif
-
+
return res;
}
@@ -16771,16 +18652,16 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
VmaPool pool)
{
VMA_ASSERT(allocator);
-
+
if(pool == VK_NULL_HANDLE)
{
return;
}
-
+
VMA_DEBUG_LOG("vmaDestroyPool");
-
+
VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
+
#if VMA_RECORDING_ENABLED
if(allocator->GetRecorder() != VMA_NULL)
{
@@ -16838,8 +18719,8 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
VmaPool pool,
const char** ppName)
{
- VMA_ASSERT(allocator && pool);
-
+ VMA_ASSERT(allocator && pool && ppName);
+
VMA_DEBUG_LOG("vmaGetPoolName");
VMA_DEBUG_GLOBAL_MUTEX_LOCK
@@ -16881,11 +18762,12 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
VMA_DEBUG_GLOBAL_MUTEX_LOCK
- VkResult result = allocator->AllocateMemory(
+ VkResult result = allocator->AllocateMemory(
*pVkMemoryRequirements,
false, // requiresDedicatedAllocation
false, // prefersDedicatedAllocation
VK_NULL_HANDLE, // dedicatedBuffer
+ UINT32_MAX, // dedicatedBufferUsage
VK_NULL_HANDLE, // dedicatedImage
*pCreateInfo,
VMA_SUBALLOCATION_TYPE_UNKNOWN,
@@ -16902,13 +18784,13 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
*pAllocation);
}
#endif
-
+
if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
{
allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
}
- return result;
+ return result;
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
@@ -16930,11 +18812,12 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
VMA_DEBUG_GLOBAL_MUTEX_LOCK
- VkResult result = allocator->AllocateMemory(
+ VkResult result = allocator->AllocateMemory(
*pVkMemoryRequirements,
false, // requiresDedicatedAllocation
false, // prefersDedicatedAllocation
VK_NULL_HANDLE, // dedicatedBuffer
+ UINT32_MAX, // dedicatedBufferUsage
VK_NULL_HANDLE, // dedicatedImage
*pCreateInfo,
VMA_SUBALLOCATION_TYPE_UNKNOWN,
@@ -16952,7 +18835,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
pAllocations);
}
#endif
-
+
if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
{
for(size_t i = 0; i < allocationCount; ++i)
@@ -16961,7 +18844,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
}
}
- return result;
+ return result;
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
@@ -16989,6 +18872,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
requiresDedicatedAllocation,
prefersDedicatedAllocation,
buffer, // dedicatedBuffer
+ UINT32_MAX, // dedicatedBufferUsage
VK_NULL_HANDLE, // dedicatedImage
*pCreateInfo,
VMA_SUBALLOCATION_TYPE_BUFFER,
@@ -17013,7 +18897,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
}
- return result;
+ return result;
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
@@ -17040,6 +18924,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
requiresDedicatedAllocation,
prefersDedicatedAllocation,
VK_NULL_HANDLE, // dedicatedBuffer
+ UINT32_MAX, // dedicatedBufferUsage
image, // dedicatedImage
*pCreateInfo,
VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
@@ -17064,7 +18949,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
}
- return result;
+ return result;
}
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
@@ -17072,14 +18957,14 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
VmaAllocation allocation)
{
VMA_ASSERT(allocator);
-
+
if(allocation == VK_NULL_HANDLE)
{
return;
}
-
+
VMA_DEBUG_LOG("vmaFreeMemory");
-
+
VMA_DEBUG_GLOBAL_MUTEX_LOCK
#if VMA_RECORDING_ENABLED
@@ -17090,7 +18975,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
allocation);
}
#endif
-
+
allocator->FreeMemory(
1, // allocationCount
&allocation);
@@ -17099,7 +18984,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
VmaAllocator allocator,
size_t allocationCount,
- VmaAllocation* pAllocations)
+ const VmaAllocation* pAllocations)
{
if(allocationCount == 0)
{
@@ -17107,9 +18992,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
}
VMA_ASSERT(allocator);
-
+
VMA_DEBUG_LOG("vmaFreeMemoryPages");
-
+
VMA_DEBUG_GLOBAL_MUTEX_LOCK
#if VMA_RECORDING_ENABLED
@@ -17121,22 +19006,8 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
pAllocations);
}
#endif
-
- allocator->FreeMemory(allocationCount, pAllocations);
-}
-VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
- VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize newSize)
-{
- VMA_ASSERT(allocator && allocation);
-
- VMA_DEBUG_LOG("vmaResizeAllocation");
-
- VMA_DEBUG_GLOBAL_MUTEX_LOCK
-
- return allocator->ResizeAllocation(allocation, newSize);
+ allocator->FreeMemory(allocationCount, pAllocations);
}
VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
@@ -17265,7 +19136,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
allocator->Unmap(allocation);
}
-VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
{
VMA_ASSERT(allocator && allocation);
@@ -17273,7 +19144,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAl
VMA_DEBUG_GLOBAL_MUTEX_LOCK
- allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
+ const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
#if VMA_RECORDING_ENABLED
if(allocator->GetRecorder() != VMA_NULL)
@@ -17283,9 +19154,11 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAl
allocation, offset, size);
}
#endif
+
+ return res;
}
-VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
{
VMA_ASSERT(allocator && allocation);
@@ -17293,7 +19166,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator,
VMA_DEBUG_GLOBAL_MUTEX_LOCK
- allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
+ const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
#if VMA_RECORDING_ENABLED
if(allocator->GetRecorder() != VMA_NULL)
@@ -17303,6 +19176,72 @@ VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator,
allocation, offset, size);
}
#endif
+
+ return res;
+}
+
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
+ VmaAllocator allocator,
+ uint32_t allocationCount,
+ const VmaAllocation* allocations,
+ const VkDeviceSize* offsets,
+ const VkDeviceSize* sizes)
+{
+ VMA_ASSERT(allocator);
+
+ if(allocationCount == 0)
+ {
+ return VK_SUCCESS;
+ }
+
+ VMA_ASSERT(allocations);
+
+ VMA_DEBUG_LOG("vmaFlushAllocations");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
+
+#if VMA_RECORDING_ENABLED
+ if(allocator->GetRecorder() != VMA_NULL)
+ {
+ //TODO
+ }
+#endif
+
+ return res;
+}
+
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
+ VmaAllocator allocator,
+ uint32_t allocationCount,
+ const VmaAllocation* allocations,
+ const VkDeviceSize* offsets,
+ const VkDeviceSize* sizes)
+{
+ VMA_ASSERT(allocator);
+
+ if(allocationCount == 0)
+ {
+ return VK_SUCCESS;
+ }
+
+ VMA_ASSERT(allocations);
+
+ VMA_DEBUG_LOG("vmaInvalidateAllocations");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
+
+#if VMA_RECORDING_ENABLED
+ if(allocator->GetRecorder() != VMA_NULL)
+ {
+ //TODO
+ }
+#endif
+
+ return res;
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
@@ -17318,7 +19257,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, u
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
VmaAllocator allocator,
- VmaAllocation* pAllocations,
+ const VmaAllocation* pAllocations,
size_t allocationCount,
VkBool32* pAllocationsChanged,
const VmaDefragmentationInfo *pDefragmentationInfo,
@@ -17415,6 +19354,42 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
}
}
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
+ VmaAllocator allocator,
+ VmaDefragmentationContext context,
+ VmaDefragmentationPassInfo* pInfo
+ )
+{
+ VMA_ASSERT(allocator);
+ VMA_ASSERT(pInfo);
+
+ VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
+
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ if(context == VK_NULL_HANDLE)
+ {
+ pInfo->moveCount = 0;
+ return VK_SUCCESS;
+ }
+
+ return allocator->DefragmentationPassBegin(pInfo, context);
+}
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
+ VmaAllocator allocator,
+ VmaDefragmentationContext context)
+{
+ VMA_ASSERT(allocator);
+
+ VMA_DEBUG_LOG("vmaEndDefragmentationPass");
+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
+
+ if(context == VK_NULL_HANDLE)
+ return VK_SUCCESS;
+
+ return allocator->DefragmentationPassEnd(context);
+}
+
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
VmaAllocator allocator,
VmaAllocation allocation,
@@ -17489,9 +19464,15 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
{
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-
+ if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
+ !allocator->m_UseKhrBufferDeviceAddress)
+ {
+ VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ }
+
VMA_DEBUG_LOG("vmaCreateBuffer");
-
+
VMA_DEBUG_GLOBAL_MUTEX_LOCK
*pBuffer = VK_NULL_HANDLE;
@@ -17518,6 +19499,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
requiresDedicatedAllocation,
prefersDedicatedAllocation,
*pBuffer, // dedicatedBuffer
+ pBufferCreateInfo->usage, // dedicatedBufferUsage
VK_NULL_HANDLE, // dedicatedImage
*pAllocationCreateInfo,
VMA_SUBALLOCATION_TYPE_BUFFER,
@@ -17645,7 +19627,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
-
+
// 2. Allocate memory using allocator.
VkMemoryRequirements vkMemReq = {};
bool requiresDedicatedAllocation = false;
@@ -17658,6 +19640,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
requiresDedicatedAllocation,
prefersDedicatedAllocation,
VK_NULL_HANDLE, // dedicatedBuffer
+ UINT32_MAX, // dedicatedBufferUsage
*pImage, // dedicatedImage
*pAllocationCreateInfo,
suballocType,