diff options
Diffstat (limited to 'thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp')
-rw-r--r-- | thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp | 594 |
1 files changed, 274 insertions, 320 deletions
diff --git a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp index 875d89c53e..b814fd84d8 100644 --- a/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp +++ b/thirdparty/bullet/BulletCollision/BroadphaseCollision/btQuantizedBvh.cpp @@ -21,43 +21,38 @@ subject to the following restrictions: #define RAYAABB2 -btQuantizedBvh::btQuantizedBvh() : - m_bulletVersion(BT_BULLET_VERSION), - m_useQuantization(false), - //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY) - m_traversalMode(TRAVERSAL_STACKLESS) - //m_traversalMode(TRAVERSAL_RECURSIVE) - ,m_subtreeHeaderCount(0) //PCK: add this line +btQuantizedBvh::btQuantizedBvh() : m_bulletVersion(BT_BULLET_VERSION), + m_useQuantization(false), + //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY) + m_traversalMode(TRAVERSAL_STACKLESS) + //m_traversalMode(TRAVERSAL_RECURSIVE) + , + m_subtreeHeaderCount(0) //PCK: add this line { - m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY); - m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY); + m_bvhAabbMin.setValue(-SIMD_INFINITY, -SIMD_INFINITY, -SIMD_INFINITY); + m_bvhAabbMax.setValue(SIMD_INFINITY, SIMD_INFINITY, SIMD_INFINITY); } - - - - void btQuantizedBvh::buildInternal() { ///assumes that caller filled in the m_quantizedLeafNodes m_useQuantization = true; int numLeafNodes = 0; - + if (m_useQuantization) { //now we have an array of leafnodes in m_leafNodes numLeafNodes = m_quantizedLeafNodes.size(); - m_quantizedContiguousNodes.resize(2*numLeafNodes); - + m_quantizedContiguousNodes.resize(2 * numLeafNodes); } m_curNodeIndex = 0; - buildTree(0,numLeafNodes); + buildTree(0, numLeafNodes); ///if the entire tree is small then subtree size, we need to create a header info for the tree - if(m_useQuantization && !m_SubtreeHeaders.size()) + if (m_useQuantization && !m_SubtreeHeaders.size()) { btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand(); subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]); @@ -73,29 +68,24 @@ void btQuantizedBvh::buildInternal() m_leafNodes.clear(); } - - ///just for debugging, to visualize the individual patches/subtrees #ifdef DEBUG_PATCH_COLORS -btVector3 color[4]= -{ - btVector3(1,0,0), - btVector3(0,1,0), - btVector3(0,0,1), - btVector3(0,1,1) -}; -#endif //DEBUG_PATCH_COLORS - - +btVector3 color[4] = + { + btVector3(1, 0, 0), + btVector3(0, 1, 0), + btVector3(0, 0, 1), + btVector3(0, 1, 1)}; +#endif //DEBUG_PATCH_COLORS -void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin) +void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin, const btVector3& bvhAabbMax, btScalar quantizationMargin) { //enlarge the AABB to avoid division by zero when initializing the quantization values - btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin); + btVector3 clampValue(quantizationMargin, quantizationMargin, quantizationMargin); m_bvhAabbMin = bvhAabbMin - clampValue; m_bvhAabbMax = bvhAabbMax + clampValue; btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin; - m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize; + m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize; m_useQuantization = true; @@ -103,25 +93,22 @@ void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btV unsigned short vecIn[3]; btVector3 v; { - quantize(vecIn,m_bvhAabbMin,false); + quantize(vecIn, m_bvhAabbMin, false); v = unQuantize(vecIn); - m_bvhAabbMin.setMin(v-clampValue); + m_bvhAabbMin.setMin(v - clampValue); } - aabbSize = m_bvhAabbMax - m_bvhAabbMin; - m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize; + aabbSize = m_bvhAabbMax - m_bvhAabbMin; + m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize; { - quantize(vecIn,m_bvhAabbMax,true); + quantize(vecIn, m_bvhAabbMax, true); v = unQuantize(vecIn); - m_bvhAabbMax.setMax(v+clampValue); + m_bvhAabbMax.setMax(v + clampValue); } aabbSize = m_bvhAabbMax - m_bvhAabbMin; - m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize; + m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize; } } - - - btQuantizedBvh::~btQuantizedBvh() { } @@ -129,104 +116,100 @@ btQuantizedBvh::~btQuantizedBvh() #ifdef DEBUG_TREE_BUILDING int gStackDepth = 0; int gMaxStackDepth = 0; -#endif //DEBUG_TREE_BUILDING +#endif //DEBUG_TREE_BUILDING -void btQuantizedBvh::buildTree (int startIndex,int endIndex) +void btQuantizedBvh::buildTree(int startIndex, int endIndex) { #ifdef DEBUG_TREE_BUILDING gStackDepth++; if (gStackDepth > gMaxStackDepth) gMaxStackDepth = gStackDepth; -#endif //DEBUG_TREE_BUILDING - +#endif //DEBUG_TREE_BUILDING int splitAxis, splitIndex, i; - int numIndices =endIndex-startIndex; + int numIndices = endIndex - startIndex; int curIndex = m_curNodeIndex; - btAssert(numIndices>0); + btAssert(numIndices > 0); - if (numIndices==1) + if (numIndices == 1) { #ifdef DEBUG_TREE_BUILDING gStackDepth--; -#endif //DEBUG_TREE_BUILDING - - assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex); +#endif //DEBUG_TREE_BUILDING + + assignInternalNodeFromLeafNode(m_curNodeIndex, startIndex); m_curNodeIndex++; - return; + return; } //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'. - - splitAxis = calcSplittingAxis(startIndex,endIndex); - splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis); + splitAxis = calcSplittingAxis(startIndex, endIndex); + + splitIndex = sortAndCalcSplittingIndex(startIndex, endIndex, splitAxis); int internalNodeIndex = m_curNodeIndex; - + //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value. //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values - setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization - setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization - - - for (i=startIndex;i<endIndex;i++) + setInternalNodeAabbMin(m_curNodeIndex, m_bvhAabbMax); //can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization + setInternalNodeAabbMax(m_curNodeIndex, m_bvhAabbMin); //can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization + + for (i = startIndex; i < endIndex; i++) { - mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i)); + mergeInternalNodeAabb(m_curNodeIndex, getAabbMin(i), getAabbMax(i)); } m_curNodeIndex++; - //internalNode->m_escapeIndex; - + int leftChildNodexIndex = m_curNodeIndex; //build left child tree - buildTree(startIndex,splitIndex); + buildTree(startIndex, splitIndex); int rightChildNodexIndex = m_curNodeIndex; //build right child tree - buildTree(splitIndex,endIndex); + buildTree(splitIndex, endIndex); #ifdef DEBUG_TREE_BUILDING gStackDepth--; -#endif //DEBUG_TREE_BUILDING +#endif //DEBUG_TREE_BUILDING int escapeIndex = m_curNodeIndex - curIndex; if (m_useQuantization) { //escapeIndex is the number of nodes of this subtree - const int sizeQuantizedNode =sizeof(btQuantizedBvhNode); + const int sizeQuantizedNode = sizeof(btQuantizedBvhNode); const int treeSizeInBytes = escapeIndex * sizeQuantizedNode; if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES) { - updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex); + updateSubtreeHeaders(leftChildNodexIndex, rightChildNodexIndex); } - } else + } + else { - } - setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex); - + setInternalNodeEscapeIndex(internalNodeIndex, escapeIndex); } -void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex) +void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex) { btAssert(m_useQuantization); btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex]; int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex(); - int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); - + int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); + btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex]; int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex(); - int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); + int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode)); - if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) + if (leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) { btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand(); subtree.setAabbFromQuantizeNode(leftChildNode); @@ -234,7 +217,7 @@ void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChild subtree.m_subtreeSize = leftSubTreeSize; } - if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) + if (rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES) { btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand(); subtree.setAabbFromQuantizeNode(rightChildNode); @@ -246,32 +229,31 @@ void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChild m_subtreeHeaderCount = m_SubtreeHeaders.size(); } - -int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis) +int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis) { int i; - int splitIndex =startIndex; + int splitIndex = startIndex; int numIndices = endIndex - startIndex; btScalar splitValue; - btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.)); - for (i=startIndex;i<endIndex;i++) + btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.)); + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); - means+=center; + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); + means += center; } - means *= (btScalar(1.)/(btScalar)numIndices); - + means *= (btScalar(1.) / (btScalar)numIndices); + splitValue = means[splitAxis]; - + //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'. - for (i=startIndex;i<endIndex;i++) + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); if (center[splitAxis] > splitValue) { //swap - swapLeafNodes(i,splitIndex); + swapLeafNodes(i, splitIndex); splitIndex++; } } @@ -281,56 +263,53 @@ int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int sp //unbalanced1 is unsafe: it can cause stack overflows //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1))); - //unbalanced2 should work too: always use center (perfect balanced trees) + //unbalanced2 should work too: always use center (perfect balanced trees) //bool unbalanced2 = true; //this should be safe too: - int rangeBalancedIndices = numIndices/3; - bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices))); - + int rangeBalancedIndices = numIndices / 3; + bool unbalanced = ((splitIndex <= (startIndex + rangeBalancedIndices)) || (splitIndex >= (endIndex - 1 - rangeBalancedIndices))); + if (unbalanced) { - splitIndex = startIndex+ (numIndices>>1); + splitIndex = startIndex + (numIndices >> 1); } - bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex)); + bool unbal = (splitIndex == startIndex) || (splitIndex == (endIndex)); (void)unbal; btAssert(!unbal); return splitIndex; } - -int btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex) +int btQuantizedBvh::calcSplittingAxis(int startIndex, int endIndex) { int i; - btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.)); - btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.)); - int numIndices = endIndex-startIndex; + btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.)); + btVector3 variance(btScalar(0.), btScalar(0.), btScalar(0.)); + int numIndices = endIndex - startIndex; - for (i=startIndex;i<endIndex;i++) + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); - means+=center; + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); + means += center; } - means *= (btScalar(1.)/(btScalar)numIndices); - - for (i=startIndex;i<endIndex;i++) + means *= (btScalar(1.) / (btScalar)numIndices); + + for (i = startIndex; i < endIndex; i++) { - btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i)); - btVector3 diff2 = center-means; + btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i)); + btVector3 diff2 = center - means; diff2 = diff2 * diff2; variance += diff2; } - variance *= (btScalar(1.)/ ((btScalar)numIndices-1) ); - + variance *= (btScalar(1.) / ((btScalar)numIndices - 1)); + return variance.maxAxis(); } - - -void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const +void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const { //either choose recursive traversal (walkTree) or stackless (walkStacklessTree) @@ -339,38 +318,37 @@ void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallb ///quantize query AABB unsigned short int quantizedQueryAabbMin[3]; unsigned short int quantizedQueryAabbMax[3]; - quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0); - quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1); + quantizeWithClamp(quantizedQueryAabbMin, aabbMin, 0); + quantizeWithClamp(quantizedQueryAabbMax, aabbMax, 1); switch (m_traversalMode) { - case TRAVERSAL_STACKLESS: - walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex); - break; - case TRAVERSAL_STACKLESS_CACHE_FRIENDLY: - walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); - break; - case TRAVERSAL_RECURSIVE: + case TRAVERSAL_STACKLESS: + walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, 0, m_curNodeIndex); + break; + case TRAVERSAL_STACKLESS_CACHE_FRIENDLY: + walkStacklessQuantizedTreeCacheFriendly(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); + break; + case TRAVERSAL_RECURSIVE: { const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0]; - walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); + walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); } break; - default: - //unsupported - btAssert(0); + default: + //unsupported + btAssert(0); } - } else + } + else { - walkStacklessTree(nodeCallback,aabbMin,aabbMax); + walkStacklessTree(nodeCallback, aabbMin, aabbMax); } } - int maxIterations = 0; - -void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const +void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const { btAssert(!m_useQuantization); @@ -384,24 +362,25 @@ void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const while (curIndex < m_curNodeIndex) { //catch bugs in tree data - btAssert (walkIterations < m_curNodeIndex); + btAssert(walkIterations < m_curNodeIndex); walkIterations++; - aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg); + aabbOverlap = TestAabbAgainstAabb2(aabbMin, aabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg); isLeafNode = rootNode->m_escapeIndex == -1; - + //PCK: unsigned instead of bool if (isLeafNode && (aabbOverlap != 0)) { - nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex); - } - + nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex); + } + //PCK: unsigned instead of bool if ((aabbOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->m_escapeIndex; rootNode += escapeIndex; @@ -410,7 +389,6 @@ void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const } if (maxIterations < walkIterations) maxIterations = walkIterations; - } /* @@ -434,39 +412,38 @@ void btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback } */ -void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const +void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode, btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const { btAssert(m_useQuantization); - + bool isLeafNode; //PCK: unsigned instead of bool unsigned aabbOverlap; //PCK: unsigned instead of bool - aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax); + aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, currentNode->m_quantizedAabbMin, currentNode->m_quantizedAabbMax); isLeafNode = currentNode->isLeafNode(); - + //PCK: unsigned instead of bool if (aabbOverlap != 0) { if (isLeafNode) { - nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex()); - } else + nodeCallback->processNode(currentNode->getPartId(), currentNode->getTriangleIndex()); + } + else { //process left and right children - const btQuantizedBvhNode* leftChildNode = currentNode+1; - walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); + const btQuantizedBvhNode* leftChildNode = currentNode + 1; + walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); - const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex(); - walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax); + const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode + 1 : leftChildNode + leftChildNode->getEscapeIndex(); + walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax); } - } + } } - - -void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const +void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const { btAssert(!m_useQuantization); @@ -475,11 +452,11 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall int walkIterations = 0; bool isLeafNode; //PCK: unsigned instead of bool - unsigned aabbOverlap=0; - unsigned rayBoxOverlap=0; + unsigned aabbOverlap = 0; + unsigned rayBoxOverlap = 0; btScalar lambda_max = 1.0; - - /* Quick pruning by quantized box */ + + /* Quick pruning by quantized box */ btVector3 rayAabbMin = raySource; btVector3 rayAabbMax = raySource; rayAabbMin.setMin(rayTarget); @@ -490,15 +467,15 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall rayAabbMax += aabbMax; #ifdef RAYAABB2 - btVector3 rayDir = (rayTarget-raySource); - rayDir.normalize (); - lambda_max = rayDir.dot(rayTarget-raySource); + btVector3 rayDir = (rayTarget - raySource); + rayDir.normalize(); + lambda_max = rayDir.dot(rayTarget - raySource); ///what about division by zero? --> just set rayDirection[i] to 1.0 btVector3 rayDirectionInverse; rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0]; rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1]; rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2]; - unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0}; + unsigned int sign[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0}; #endif btVector3 bounds[2]; @@ -507,7 +484,7 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall { btScalar param = 1.0; //catch bugs in tree data - btAssert (walkIterations < m_curNodeIndex); + btAssert(walkIterations < m_curNodeIndex); walkIterations++; @@ -517,34 +494,35 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall bounds[0] -= aabbMax; bounds[1] -= aabbMin; - aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg); + aabbOverlap = TestAabbAgainstAabb2(rayAabbMin, rayAabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg); //perhaps profile if it is worth doing the aabbOverlap test first #ifdef RAYAABB2 - ///careful with this check: need to check division by zero (above) and fix the unQuantize method - ///thanks Joerg/hiker for the reproduction case! - ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858 - rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false; + ///careful with this check: need to check division by zero (above) and fix the unQuantize method + ///thanks Joerg/hiker for the reproduction case! + ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858 + rayBoxOverlap = aabbOverlap ? btRayAabb2(raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false; #else btVector3 normal; - rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal); + rayBoxOverlap = btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal); #endif isLeafNode = rootNode->m_escapeIndex == -1; - + //PCK: unsigned instead of bool if (isLeafNode && (rayBoxOverlap != 0)) { - nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex); - } - + nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex); + } + //PCK: unsigned instead of bool if ((rayBoxOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->m_escapeIndex; rootNode += escapeIndex; @@ -553,15 +531,12 @@ void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCall } if (maxIterations < walkIterations) maxIterations = walkIterations; - } - - -void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const +void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const { btAssert(m_useQuantization); - + int curIndex = startNodeIndex; int walkIterations = 0; int subTreeSize = endNodeIndex - startNodeIndex; @@ -569,7 +544,7 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex]; int escapeIndex; - + bool isLeafNode; //PCK: unsigned instead of bool unsigned boxBoxOverlap = 0; @@ -578,14 +553,14 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* btScalar lambda_max = 1.0; #ifdef RAYAABB2 - btVector3 rayDirection = (rayTarget-raySource); - rayDirection.normalize (); - lambda_max = rayDirection.dot(rayTarget-raySource); + btVector3 rayDirection = (rayTarget - raySource); + rayDirection.normalize(); + lambda_max = rayDirection.dot(rayTarget - raySource); ///what about division by zero? --> just set rayDirection[i] to 1.0 rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0]; rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1]; rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2]; - unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0}; + unsigned int sign[3] = {rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0}; #endif /* Quick pruning by quantized box */ @@ -600,37 +575,36 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* unsigned short int quantizedQueryAabbMin[3]; unsigned short int quantizedQueryAabbMax[3]; - quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0); - quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1); + quantizeWithClamp(quantizedQueryAabbMin, rayAabbMin, 0); + quantizeWithClamp(quantizedQueryAabbMax, rayAabbMax, 1); while (curIndex < endNodeIndex) { - //#define VISUALLY_ANALYZE_BVH 1 #ifdef VISUALLY_ANALYZE_BVH //some code snippet to debugDraw aabb, to visually analyze bvh structure static int drawPatch = 0; //need some global access to a debugDrawer extern btIDebugDraw* debugDrawerPtr; - if (curIndex==drawPatch) + if (curIndex == drawPatch) { - btVector3 aabbMin,aabbMax; + btVector3 aabbMin, aabbMax; aabbMin = unQuantize(rootNode->m_quantizedAabbMin); aabbMax = unQuantize(rootNode->m_quantizedAabbMax); - btVector3 color(1,0,0); - debugDrawerPtr->drawAabb(aabbMin,aabbMax,color); + btVector3 color(1, 0, 0); + debugDrawerPtr->drawAabb(aabbMin, aabbMax, color); } -#endif//VISUALLY_ANALYZE_BVH +#endif //VISUALLY_ANALYZE_BVH //catch bugs in tree data - btAssert (walkIterations < subTreeSize); + btAssert(walkIterations < subTreeSize); walkIterations++; //PCK: unsigned instead of bool // only interested if this is closer than any previous hit btScalar param = 1.0; rayBoxOverlap = 0; - boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax); + boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax); isLeafNode = rootNode->isLeafNode(); if (boxBoxOverlap) { @@ -655,24 +629,25 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858 //BT_PROFILE("btRayAabb2"); - rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max); - + rayBoxOverlap = btRayAabb2(raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max); + #else - rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal); + rayBoxOverlap = true; //btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal); #endif } - + if (isLeafNode && rayBoxOverlap) { - nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex()); + nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex()); } - + //PCK: unsigned instead of bool if ((rayBoxOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->getEscapeIndex(); rootNode += escapeIndex; @@ -681,13 +656,12 @@ void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* } if (maxIterations < walkIterations) maxIterations = walkIterations; - } -void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const +void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const { btAssert(m_useQuantization); - + int curIndex = startNodeIndex; int walkIterations = 0; int subTreeSize = endNodeIndex - startNodeIndex; @@ -695,49 +669,49 @@ void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallb const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex]; int escapeIndex; - + bool isLeafNode; //PCK: unsigned instead of bool unsigned aabbOverlap; while (curIndex < endNodeIndex) { - //#define VISUALLY_ANALYZE_BVH 1 #ifdef VISUALLY_ANALYZE_BVH //some code snippet to debugDraw aabb, to visually analyze bvh structure static int drawPatch = 0; //need some global access to a debugDrawer extern btIDebugDraw* debugDrawerPtr; - if (curIndex==drawPatch) + if (curIndex == drawPatch) { - btVector3 aabbMin,aabbMax; + btVector3 aabbMin, aabbMax; aabbMin = unQuantize(rootNode->m_quantizedAabbMin); aabbMax = unQuantize(rootNode->m_quantizedAabbMax); - btVector3 color(1,0,0); - debugDrawerPtr->drawAabb(aabbMin,aabbMax,color); + btVector3 color(1, 0, 0); + debugDrawerPtr->drawAabb(aabbMin, aabbMax, color); } -#endif//VISUALLY_ANALYZE_BVH +#endif //VISUALLY_ANALYZE_BVH //catch bugs in tree data - btAssert (walkIterations < subTreeSize); + btAssert(walkIterations < subTreeSize); walkIterations++; //PCK: unsigned instead of bool - aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax); + aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax); isLeafNode = rootNode->isLeafNode(); - + if (isLeafNode && aabbOverlap) { - nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex()); - } - + nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex()); + } + //PCK: unsigned instead of bool if ((aabbOverlap != 0) || isLeafNode) { rootNode++; curIndex++; - } else + } + else { escapeIndex = rootNode->getEscapeIndex(); rootNode += escapeIndex; @@ -746,40 +720,36 @@ void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallb } if (maxIterations < walkIterations) maxIterations = walkIterations; - } //This traversal can be called from Playstation 3 SPU -void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const +void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const { btAssert(m_useQuantization); int i; - - for (i=0;i<this->m_SubtreeHeaders.size();i++) + for (i = 0; i < this->m_SubtreeHeaders.size(); i++) { const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i]; //PCK: unsigned instead of bool - unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax); + unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, subtree.m_quantizedAabbMin, subtree.m_quantizedAabbMax); if (overlap != 0) { - walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax, - subtree.m_rootNodeIndex, - subtree.m_rootNodeIndex+subtree.m_subtreeSize); + walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, + subtree.m_rootNodeIndex, + subtree.m_rootNodeIndex + subtree.m_subtreeSize); } } } - -void btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const +void btQuantizedBvh::reportRayOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const { - reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0)); + reportBoxCastOverlappingNodex(nodeCallback, raySource, rayTarget, btVector3(0, 0, 0), btVector3(0, 0, 0)); } - -void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const +void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax) const { //always use stackless @@ -803,31 +773,31 @@ void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCa reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax); } */ - } - -void btQuantizedBvh::swapLeafNodes(int i,int splitIndex) +void btQuantizedBvh::swapLeafNodes(int i, int splitIndex) { if (m_useQuantization) { - btQuantizedBvhNode tmp = m_quantizedLeafNodes[i]; - m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex]; - m_quantizedLeafNodes[splitIndex] = tmp; - } else + btQuantizedBvhNode tmp = m_quantizedLeafNodes[i]; + m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex]; + m_quantizedLeafNodes[splitIndex] = tmp; + } + else { - btOptimizedBvhNode tmp = m_leafNodes[i]; - m_leafNodes[i] = m_leafNodes[splitIndex]; - m_leafNodes[splitIndex] = tmp; + btOptimizedBvhNode tmp = m_leafNodes[i]; + m_leafNodes[i] = m_leafNodes[splitIndex]; + m_leafNodes[splitIndex] = tmp; } } -void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex) +void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex) { if (m_useQuantization) { m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex]; - } else + } + else { m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex]; } @@ -844,11 +814,10 @@ static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1; static const unsigned BVH_ALIGNMENT_BLOCKS = 2; #endif - unsigned int btQuantizedBvh::getAlignmentSerializationPadding() { // I changed this to 0 since the extra padding is not needed or used. - return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT; + return 0; //BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT; } unsigned btQuantizedBvh::calculateSerializeBufferSize() const @@ -862,12 +831,12 @@ unsigned btQuantizedBvh::calculateSerializeBufferSize() const return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode); } -bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const +bool btQuantizedBvh::serialize(void* o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const { btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size()); m_subtreeHeaderCount = m_SubtreeHeaders.size(); -/* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) + /* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) { ///check alignedment for buffer? btAssert(0); @@ -875,7 +844,7 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe } */ - btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer; + btQuantizedBvh* targetBvh = (btQuantizedBvh*)o_alignedDataBuffer; // construct the class so the virtual function table, etc will be set up // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor @@ -885,10 +854,9 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe { targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex)); - - btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin); - btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax); - btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization); + btSwapVector3Endian(m_bvhAabbMin, targetBvh->m_bvhAabbMin); + btSwapVector3Endian(m_bvhAabbMax, targetBvh->m_bvhAabbMax); + btSwapVector3Endian(m_bvhQuantization, targetBvh->m_bvhQuantization); targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode); targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount)); @@ -905,12 +873,12 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe targetBvh->m_useQuantization = m_useQuantization; - unsigned char *nodeData = (unsigned char *)targetBvh; + unsigned char* nodeData = (unsigned char*)targetBvh; nodeData += sizeof(btQuantizedBvh); - - unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + + unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; - + int nodeCount = m_curNodeIndex; if (m_useQuantization) @@ -936,7 +904,6 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe { for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++) { - targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]; targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]; targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]; @@ -946,8 +913,6 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]; targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex; - - } } nodeData += sizeof(btQuantizedBvhNode) * nodeCount; @@ -993,7 +958,7 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0); } - sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; // Now serialize the subtree headers @@ -1048,14 +1013,13 @@ bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBuffe return true; } -btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian) +btQuantizedBvh* btQuantizedBvh::deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian) { - - if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) + if (i_alignedDataBuffer == NULL) // || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0)) { return NULL; } - btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer; + btQuantizedBvh* bvh = (btQuantizedBvh*)i_alignedDataBuffer; if (i_swapEndian) { @@ -1077,12 +1041,12 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un return NULL; } - unsigned char *nodeData = (unsigned char *)bvh; + unsigned char* nodeData = (unsigned char*)bvh; nodeData += sizeof(btQuantizedBvh); - - unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + + unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; - + int nodeCount = bvh->m_curNodeIndex; // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor @@ -1120,7 +1084,7 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un { btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg); btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg); - + bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex)); bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart)); bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex)); @@ -1129,7 +1093,7 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un nodeData += sizeof(btOptimizedBvhNode) * nodeCount; } - sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; + sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK; nodeData += sizeToAdd; // Now serialize the subtree headers @@ -1155,13 +1119,11 @@ btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, un } // Constructor that prevents btVector3's default constructor from being called -btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) : -m_bvhAabbMin(self.m_bvhAabbMin), -m_bvhAabbMax(self.m_bvhAabbMax), -m_bvhQuantization(self.m_bvhQuantization), -m_bulletVersion(BT_BULLET_VERSION) +btQuantizedBvh::btQuantizedBvh(btQuantizedBvh& self, bool /* ownsMemory */) : m_bvhAabbMin(self.m_bvhAabbMin), + m_bvhAabbMax(self.m_bvhAabbMax), + m_bvhQuantization(self.m_bvhQuantization), + m_bulletVersion(BT_BULLET_VERSION) { - } void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData) @@ -1171,8 +1133,8 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization); m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex; - m_useQuantization = quantizedBvhFloatData.m_useQuantization!=0; - + m_useQuantization = quantizedBvhFloatData.m_useQuantization != 0; + { int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes; m_contiguousNodes.resize(numElem); @@ -1181,7 +1143,7 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB { btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg); m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg); @@ -1195,11 +1157,11 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB { int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes; m_quantizedContiguousNodes.resize(numElem); - + if (numElem) { btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex; m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; @@ -1213,16 +1175,16 @@ void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedB } m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode); - + { int numElem = quantizedBvhFloatData.m_numSubtreeHeaders; m_SubtreeHeaders.resize(numElem); if (numElem) { btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { - m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ; + m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1]; m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2]; m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0]; @@ -1242,8 +1204,8 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization); m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex; - m_useQuantization = quantizedBvhDoubleData.m_useQuantization!=0; - + m_useQuantization = quantizedBvhDoubleData.m_useQuantization != 0; + { int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes; m_contiguousNodes.resize(numElem); @@ -1252,7 +1214,7 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize { btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg); m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg); @@ -1266,11 +1228,11 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize { int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes; m_quantizedContiguousNodes.resize(numElem); - + if (numElem) { btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex; m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; @@ -1284,16 +1246,16 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize } m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode); - + { int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders; m_SubtreeHeaders.resize(numElem); if (numElem) { btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { - m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ; + m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0]; m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1]; m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2]; m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0]; @@ -1304,32 +1266,29 @@ void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantize } } } - } - - ///fills the dataBuffer and returns the struct name (and 0 on failure) -const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const +const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const { btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer; - + m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax); m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin); m_bvhQuantization.serialize(quantizedData->m_bvhQuantization); quantizedData->m_curNodeIndex = m_curNodeIndex; quantizedData->m_useQuantization = m_useQuantization; - + quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size(); - quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*) (m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0); + quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*)(m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0); if (quantizedData->m_contiguousNodesPtr) { int sz = sizeof(btOptimizedBvhNodeData); int numElem = m_contiguousNodes.size(); - btChunk* chunk = serializer->allocate(sz,numElem); + btChunk* chunk = serializer->allocate(sz, numElem); btOptimizedBvhNodeData* memPtr = (btOptimizedBvhNodeData*)chunk->m_oldPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg); m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg); @@ -1339,19 +1298,19 @@ const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer // Fill padding with zeros to appease msan. memset(memPtr->m_pad, 0, sizeof(memPtr->m_pad)); } - serializer->finalizeChunk(chunk,"btOptimizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_contiguousNodes[0]); + serializer->finalizeChunk(chunk, "btOptimizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_contiguousNodes[0]); } quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size(); -// printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes); - quantizedData->m_quantizedContiguousNodesPtr =(btQuantizedBvhNodeData*) (m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0); + // printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes); + quantizedData->m_quantizedContiguousNodesPtr = (btQuantizedBvhNodeData*)(m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0); if (quantizedData->m_quantizedContiguousNodesPtr) { int sz = sizeof(btQuantizedBvhNodeData); int numElem = m_quantizedContiguousNodes.size(); - btChunk* chunk = serializer->allocate(sz,numElem); + btChunk* chunk = serializer->allocate(sz, numElem); btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->m_oldPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex; memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0]; @@ -1361,20 +1320,20 @@ const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1]; memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2]; } - serializer->finalizeChunk(chunk,"btQuantizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_quantizedContiguousNodes[0]); + serializer->finalizeChunk(chunk, "btQuantizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_quantizedContiguousNodes[0]); } quantizedData->m_traversalMode = int(m_traversalMode); quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size(); - quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*) (m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0); + quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*)(m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0); if (quantizedData->m_subTreeInfoPtr) { int sz = sizeof(btBvhSubtreeInfoData); int numElem = m_SubtreeHeaders.size(); - btChunk* chunk = serializer->allocate(sz,numElem); + btChunk* chunk = serializer->allocate(sz, numElem); btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->m_oldPtr; - for (int i=0;i<numElem;i++,memPtr++) + for (int i = 0; i < numElem; i++, memPtr++) { memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0]; memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1]; @@ -1386,12 +1345,7 @@ const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex; memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize; } - serializer->finalizeChunk(chunk,"btBvhSubtreeInfoData",BT_ARRAY_CODE,(void*)&m_SubtreeHeaders[0]); + serializer->finalizeChunk(chunk, "btBvhSubtreeInfoData", BT_ARRAY_CODE, (void*)&m_SubtreeHeaders[0]); } return btQuantizedBvhDataName; } - - - - - |